From cb0d7cd19db5ae81682e6fb6c307d7b8075111af Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Tue, 13 Feb 2024 15:43:21 +0100 Subject: [PATCH 001/980] ENH: Add center/ljust/rjust/zfill ufuncs for unicode and bytes --- numpy/_core/code_generators/generate_umath.py | 21 +- .../_core/code_generators/ufunc_docstrings.py | 141 +++++++++++ numpy/_core/src/umath/string_buffer.h | 77 ++++++ numpy/_core/src/umath/string_ufuncs.cpp | 221 ++++++++++++++++++ numpy/_core/strings.py | 166 ++++++------- numpy/_core/tests/test_strings.py | 86 +++++++ numpy/_core/umath.py | 2 +- 7 files changed, 633 insertions(+), 81 deletions(-) diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index ec5a153dd439..b72d13d11c6c 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -1280,7 +1280,26 @@ def english_upper(s): docstrings.get('numpy._core.umath._expandtabs'), None, ), - +'_center': + Ufunc(3, 1, None, + docstrings.get('numpy._core.umath._center'), + None, + ), +'_ljust': + Ufunc(3, 1, None, + docstrings.get('numpy._core.umath._ljust'), + None, + ), +'_rjust': + Ufunc(3, 1, None, + docstrings.get('numpy._core.umath._rjust'), + None, + ), +'_zfill': + Ufunc(2, 1, None, + docstrings.get('numpy._core.umath._zfill'), + None, + ), } def indent(st, spaces): diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index 6a8946be3dee..83c73a195f95 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -4885,3 +4885,144 @@ def add_newdoc(place, name, doc): add_newdoc('numpy._core.umath', '_expandtabs_length', '') add_newdoc('numpy._core.umath', '_expandtabs', '') + +add_newdoc('numpy._core.umath', '_center', + """ + Return a copy of `x1` with its elements centered in a string of + length `x2`. + + Parameters + ---------- + x1 : array_like, with `np.bytes_` or `np.str_` dtype + + x2 : array_lie, with any integer dtype + The length of the resulting strings + x3 : array_like, with `np.bytes_` or `np.str_` dtype + The padding character to use. + $PARAMS + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input + types + $OUT_SCALAR_2 + + See Also + -------- + str.center + + Notes + ----- + This function is intended to work with arrays of strings. The + fill character is not applied to numeric types. + + Examples + -------- + >>> c = np.array(['a1b2','1b2a','b2a1','2a1b']); c + array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='>> np.strings.center(c, width=9) + array([' a1b2 ', ' 1b2a ', ' b2a1 ', ' 2a1b '], dtype='>> np.strings.center(c, width=9, fillchar='*') + array(['***a1b2**', '***1b2a**', '***b2a1**', '***2a1b**'], dtype='>> np.strings.center(c, width=1) + array(['a', '1', 'b', '2'], dtype='>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.strings.ljust(c, width=3) + array(['aAa', ' a', 'abB'], dtype='>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.strings.rjust(a, width=3) + array(['aAa', ' a', 'abB'], dtype='>> np.strings.zfill('1', 3) + array('001', dtype=' buf, npy_int64 tabsize, Buffer out) } +enum class JUSTPOSITION { + CENTER, LEFT, RIGHT +}; + +template +static inline npy_intp +string_pad(Buffer buf, npy_int64 width, Buffer fill, JUSTPOSITION pos, Buffer out) +{ + size_t finalwidth = width > 0 ? width : 0; + size_t len = buf.num_codepoints(); + if (len >= finalwidth) { + buf.buffer_memcpy(out, len); + out.buffer_fill_with_zeros_after_index(len); + return (npy_intp) len; + } + + size_t left, right; + if (pos == JUSTPOSITION::CENTER) { + size_t pad = finalwidth - len; + left = pad / 2 + (pad & finalwidth & 1); + right = pad - left; + } + else if (pos == JUSTPOSITION::LEFT) { + left = 0; + right = finalwidth - len; + } + else { + left = finalwidth - len; + right = 0; + } + + if (left == 0 && right == 0) { + buf.buffer_memcpy(out, len); + out.buffer_fill_with_zeros_after_index(len); + return len; + } + + if (left > PY_SSIZE_T_MAX - len || right > PY_SSIZE_T_MAX - (left + len)) { + npy_gil_error(PyExc_OverflowError, "padded string is too long"); + return -1; + } + + if (left > 0) { + out += out.buffer_memset(*fill, left); + } + buf.buffer_memcpy(out, len); + out += len; + if (right > 0) { + out.buffer_memset(*fill, right); + } + return left + len + right; +} + + +template +static inline npy_intp +string_zfill(Buffer buf, npy_int64 width, Buffer out) +{ + size_t finalwidth = width > 0 ? width : 0; + + char fill = '0'; + Buffer fillchar(&fill, 1); + npy_intp new_len = string_pad(buf, width, fillchar, JUSTPOSITION::RIGHT, out); + + size_t offset = finalwidth - buf.num_codepoints(); + Buffer tmp = buf + offset; + + npy_ucs4 c = *tmp; + if (c == '+' || c == '-') { + buf.buffer_memset(c, 1); + tmp.buffer_memset(fill, 1); + } + + return new_len; +} + + #endif /* _NPY_CORE_SRC_UMATH_STRING_BUFFER_H_ */ diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index a5686c884fc3..3ac5749038f4 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -507,6 +507,73 @@ string_expandtabs_loop(PyArrayMethod_Context *context, } +template +static int +string_center_ljust_rjust_loop(PyArrayMethod_Context *context, + char *const data[], npy_intp const dimensions[], + npy_intp const strides[], NpyAuxData *NPY_UNUSED(auxdata)) +{ + JUSTPOSITION pos = *(JUSTPOSITION *)(context->method->static_data); + int elsize1 = context->descriptors[0]->elsize; + int elsize3 = context->descriptors[2]->elsize; + int outsize = context->descriptors[3]->elsize; + + char *in1 = data[0]; + char *in2 = data[1]; + char *in3 = data[2]; + char *out = data[3]; + + npy_intp N = dimensions[0]; + + while (N--) { + Buffer buf(in1, elsize1); + Buffer fill(in3, elsize3); + Buffer outbuf(out, outsize); + size_t len = string_pad(buf, *(npy_int64 *)in2, fill, pos, outbuf); + if (len < 0) { + return -1; + } + + in1 += strides[0]; + in2 += strides[1]; + in3 += strides[2]; + out += strides[3]; + } + + return 0; +} + + +template +static int +string_zfill_loop(PyArrayMethod_Context *context, + char *const data[], npy_intp const dimensions[], + npy_intp const strides[], NpyAuxData *NPY_UNUSED(auxdata)) +{ + int elsize = context->descriptors[0]->elsize; + int outsize = context->descriptors[2]->elsize; + + char *in1 = data[0]; + char *in2 = data[1]; + char *out = data[2]; + + npy_intp N = dimensions[0]; + + while (N--) { + Buffer buf(in1, elsize); + Buffer outbuf(out, outsize); + npy_intp newlen = string_zfill(buf, *(npy_int64 *)in2, outbuf); + outbuf.buffer_fill_with_zeros_after_index(newlen); + + in1 += strides[0]; + in2 += strides[1]; + out += strides[2]; + } + + return 0; +} + + /* Resolve descriptors & promoter functions */ static NPY_CASTING @@ -769,6 +836,109 @@ string_expandtabs_resolve_descriptors( } +static int +string_center_ljust_rjust_promoter(PyObject *NPY_UNUSED(ufunc), + PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *new_op_dtypes[]) +{ + Py_INCREF(op_dtypes[0]); + new_op_dtypes[0] = op_dtypes[0]; + new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_Int64DType); + Py_INCREF(op_dtypes[0]); + new_op_dtypes[2] = op_dtypes[0]; + Py_INCREF(op_dtypes[0]); + new_op_dtypes[3] = op_dtypes[0]; + return 0; +} + + +static NPY_CASTING +string_center_ljust_rjust_resolve_descriptors( + PyArrayMethodObject *NPY_UNUSED(self), + PyArray_DTypeMeta *NPY_UNUSED(dtypes[3]), + PyArray_Descr *given_descrs[5], + PyArray_Descr *loop_descrs[5], + npy_intp *NPY_UNUSED(view_offset)) +{ + if (given_descrs[3] == NULL) { + PyErr_SetString( + PyExc_TypeError, + "The 'out' kwarg is necessary. Use the version in numpy.strings without it."); + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + loop_descrs[0] = NPY_DT_CALL_ensure_canonical(given_descrs[0]); + if (loop_descrs[0] == NULL) { + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + loop_descrs[1] = NPY_DT_CALL_ensure_canonical(given_descrs[1]); + if (loop_descrs[1] == NULL) { + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + loop_descrs[2] = NPY_DT_CALL_ensure_canonical(given_descrs[2]); + if (loop_descrs[2] == NULL) { + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + loop_descrs[3] = NPY_DT_CALL_ensure_canonical(given_descrs[3]); + if (loop_descrs[3] == NULL) { + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + return NPY_NO_CASTING; +} + + +static int +string_zfill_promoter(PyObject *NPY_UNUSED(ufunc), + PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *new_op_dtypes[]) +{ + Py_INCREF(op_dtypes[0]); + new_op_dtypes[0] = op_dtypes[0]; + new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_Int64DType); + Py_INCREF(op_dtypes[0]); + new_op_dtypes[2] = op_dtypes[0]; + return 0; +} + + +static NPY_CASTING +string_zfill_resolve_descriptors( + PyArrayMethodObject *NPY_UNUSED(self), + PyArray_DTypeMeta *NPY_UNUSED(dtypes[3]), + PyArray_Descr *given_descrs[3], + PyArray_Descr *loop_descrs[3], + npy_intp *NPY_UNUSED(view_offset)) +{ + if (given_descrs[2] == NULL) { + PyErr_SetString( + PyExc_TypeError, + "The 'out' kwarg is necessary. Use numpy.strings.zfill without it."); + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + loop_descrs[0] = NPY_DT_CALL_ensure_canonical(given_descrs[0]); + if (loop_descrs[0] == NULL) { + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + loop_descrs[1] = NPY_DT_CALL_ensure_canonical(given_descrs[1]); + if (loop_descrs[1] == NULL) { + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + loop_descrs[2] = NPY_DT_CALL_ensure_canonical(given_descrs[2]); + if (loop_descrs[2] == NULL) { + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + return NPY_NO_CASTING; +} + + /* * Machinery to add the string loops to the existing ufuncs. */ @@ -1284,6 +1454,57 @@ init_string_ufuncs(PyObject *umath) return -1; } + dtypes[0] = dtypes[2] = dtypes[3] = NPY_OBJECT; + dtypes[1] = NPY_INT64; + + const char *center_ljust_rjust_names[] = { + "_center", "_ljust", "_rjust" + }; + + static JUSTPOSITION padpositions[] = { + JUSTPOSITION::CENTER, JUSTPOSITION::LEFT, JUSTPOSITION::RIGHT + }; + + for (int i = 0; i < 3; i++) { + if (init_ufunc( + umath, center_ljust_rjust_names[i], 3, 1, dtypes, ENCODING::ASCII, + string_center_ljust_rjust_loop, + string_center_ljust_rjust_resolve_descriptors, + &padpositions[i]) < 0) { + return -1; + } + if (init_ufunc( + umath, center_ljust_rjust_names[i], 3, 1, dtypes, ENCODING::UTF32, + string_center_ljust_rjust_loop, + string_center_ljust_rjust_resolve_descriptors, + &padpositions[i]) < 0) { + return -1; + } + if (init_promoter(umath, center_ljust_rjust_names[i], 3, 1, + string_center_ljust_rjust_promoter) < 0) { + return -1; + } + } + + dtypes[0] = NPY_OBJECT; + dtypes[1] = NPY_INT64; + dtypes[2] = NPY_OBJECT; + if (init_ufunc( + umath, "_zfill", 2, 1, dtypes, ENCODING::ASCII, + string_zfill_loop, + string_zfill_resolve_descriptors, NULL) < 0) { + return -1; + } + if (init_ufunc( + umath, "_zfill", 2, 1, dtypes, ENCODING::UTF32, + string_zfill_loop, + string_zfill_resolve_descriptors, NULL) < 0) { + return -1; + } + if (init_promoter(umath, "_zfill", 2, 1, string_zfill_promoter) < 0) { + return -1; + } + return 0; } diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 78e39add631a..cbb8156b6a9a 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -37,6 +37,10 @@ _replace, _expandtabs_length, _expandtabs, + _center, + _ljust, + _rjust, + _zfill, ) @@ -46,12 +50,13 @@ "add", "multiply", "isalpha", "isdigit", "isspace", "isalnum", "islower", "isupper", "istitle", "isdecimal", "isnumeric", "str_len", "find", "rfind", "index", "rindex", "count", "startswith", "endswith", "lstrip", - "rstrip", "strip", "replace", "expandtabs", + "rstrip", "strip", "replace", "expandtabs", "center", "ljust", "rjust", + "zfill", # _vec_string - Will gradually become ufuncs as well - "mod", "decode", "encode", "center", "ljust", "rjust", "zfill", "upper", - "lower", "swapcase", "capitalize", "title", "join", "split", "rsplit", - "splitlines", "partition", "rpartition", "translate", + "mod", "decode", "encode", "upper", "lower", "swapcase", "capitalize", + "title", "join", "split", "rsplit", "splitlines", "partition", + "rpartition", "translate", ] @@ -573,15 +578,13 @@ def center(a, width, fillchar=' '): Return a copy of `a` with its elements centered in a string of length `width`. - Calls :meth:`str.center` element-wise. - Parameters ---------- a : array_like, with `np.bytes_` or `np.str_` dtype - width : int + width : array_like, with any integer dtype The length of the resulting strings - fillchar : str or unicode, optional + fillchar : array_like, with `np.bytes_` or `np.str_` dtype, optional The padding character to use (default is space). Returns @@ -611,13 +614,17 @@ def center(a, width, fillchar=' '): array(['a', '1', 'b', '2'], dtype='>> np.strings.zfill('1', 3) + array('001', dtype='>> np.strings.zfill('1', 3) - array('001', dtype=' Date: Fri, 1 Mar 2024 19:11:17 +0100 Subject: [PATCH 002/980] Address feedback --- numpy/_core/code_generators/ufunc_docstrings.py | 13 +++++-------- numpy/_core/src/umath/string_buffer.h | 15 +++++++-------- numpy/_core/src/umath/string_ufuncs.cpp | 4 ++++ numpy/_core/strings.py | 2 ++ numpy/_core/tests/test_strings.py | 1 + 5 files changed, 19 insertions(+), 16 deletions(-) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index 83c73a195f95..e84e326e74c1 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -4895,7 +4895,7 @@ def add_newdoc(place, name, doc): ---------- x1 : array_like, with `np.bytes_` or `np.str_` dtype - x2 : array_lie, with any integer dtype + x2 : array_like, with any integer dtype The length of the resulting strings x3 : array_like, with `np.bytes_` or `np.str_` dtype The padding character to use. @@ -4911,11 +4911,6 @@ def add_newdoc(place, name, doc): See Also -------- str.center - - Notes - ----- - This function is intended to work with arrays of strings. The - fill character is not applied to numeric types. Examples -------- @@ -4939,7 +4934,7 @@ def add_newdoc(place, name, doc): ---------- x1 : array_like, with `np.bytes_` or `np.str_` dtype - x2 : array_lie, with any integer dtype + x2 : array_like, with any integer dtype The length of the resulting strings x3 : array_like, with `np.bytes_` or `np.str_` dtype The padding character to use. @@ -4972,7 +4967,7 @@ def add_newdoc(place, name, doc): ---------- x1 : array_like, with `np.bytes_` or `np.str_` dtype - x2 : array_lie, with any integer dtype + x2 : array_like, with any integer dtype The length of the resulting strings x3 : array_like, with `np.bytes_` or `np.str_` dtype The padding character to use. @@ -5024,5 +5019,7 @@ def add_newdoc(place, name, doc): -------- >>> np.strings.zfill('1', 3) array('001', dtype='>> np.strings.zfill('-1', 3) + array('-01', dtype=' buf, npy_int64 width, Buffer fill, JUSTPOSITION pos, right = 0; } - if (left == 0 && right == 0) { - buf.buffer_memcpy(out, len); - out.buffer_fill_with_zeros_after_index(len); - return len; - } + assert(left >= 0 || right >= 0); if (left > PY_SSIZE_T_MAX - len || right > PY_SSIZE_T_MAX - (left + len)) { npy_gil_error(PyExc_OverflowError, "padded string is too long"); @@ -1555,7 +1551,7 @@ string_pad(Buffer buf, npy_int64 width, Buffer fill, JUSTPOSITION pos, if (right > 0) { out.buffer_memset(*fill, right); } - return left + len + right; + return finalwidth; } @@ -1568,14 +1564,17 @@ string_zfill(Buffer buf, npy_int64 width, Buffer out) char fill = '0'; Buffer fillchar(&fill, 1); npy_intp new_len = string_pad(buf, width, fillchar, JUSTPOSITION::RIGHT, out); + if (new_len == -1) { + return -1; + } size_t offset = finalwidth - buf.num_codepoints(); - Buffer tmp = buf + offset; + Buffer tmp = out + offset; npy_ucs4 c = *tmp; if (c == '+' || c == '-') { - buf.buffer_memset(c, 1); tmp.buffer_memset(fill, 1); + out.buffer_memset(c, 1); } return new_len; diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 3ac5749038f4..5441114980bc 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -533,6 +533,7 @@ string_center_ljust_rjust_loop(PyArrayMethod_Context *context, if (len < 0) { return -1; } + outbuf.buffer_fill_with_zeros_after_index(len); in1 += strides[0]; in2 += strides[1]; @@ -563,6 +564,9 @@ string_zfill_loop(PyArrayMethod_Context *context, Buffer buf(in1, elsize); Buffer outbuf(out, outsize); npy_intp newlen = string_zfill(buf, *(npy_int64 *)in2, outbuf); + if (newlen < 0) { + return -1; + } outbuf.buffer_fill_with_zeros_after_index(newlen); in1 += strides[0]; diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index cbb8156b6a9a..374e9a0a36ad 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -739,6 +739,8 @@ def zfill(a, width): -------- >>> np.strings.zfill('1', 3) array('001', dtype='>> np.strings.zfill('-1', 3) + array('-01', dtype=' Date: Fri, 1 Mar 2024 19:14:32 +0100 Subject: [PATCH 003/980] Fix lint error --- numpy/_core/tests/test_strings.py | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index c1f161c44166..58bf61dcb219 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -925,7 +925,6 @@ def test_expandtabs(self, buf, res, dt): assert_array_equal(np.strings.expandtabs(buf), res) - @pytest.mark.parametrize("dt", [ "U", pytest.param("T", marks=pytest.mark.xfail( From 38627f0cc06e8942612aa19e6198d717818ed52b Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Fri, 1 Mar 2024 20:05:59 +0100 Subject: [PATCH 004/980] Fix passing char to buffer struct --- numpy/_core/src/umath/string_buffer.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index 11073f8bed3a..15590bc7c067 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -1561,8 +1561,9 @@ string_zfill(Buffer buf, npy_int64 width, Buffer out) { size_t finalwidth = width > 0 ? width : 0; - char fill = '0'; - Buffer fillchar(&fill, 1); + npy_ucs4 fill = '0'; + Buffer fillchar(&fill, 4); // max codepoint size is 4 bytes + npy_intp new_len = string_pad(buf, width, fillchar, JUSTPOSITION::RIGHT, out); if (new_len == -1) { return -1; From 7fefb40b85ba5be33ffe63574a5521c1759ce04b Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 1 Mar 2024 14:01:04 -0700 Subject: [PATCH 005/980] NEP: NEP 55 updates and add @mhvk as an author --- .../nep-0055-arena-string-memory-layout.svg | 2008 +---------------- .../nep-0055-heap-string-memory-layout.svg | 1996 +--------------- .../nep-0055-short-string-memory-layout.svg | 1385 +----------- doc/neps/nep-0055-string_dtype.rst | 62 +- 4 files changed, 46 insertions(+), 5405 deletions(-) diff --git a/doc/neps/_static/nep-0055-arena-string-memory-layout.svg b/doc/neps/_static/nep-0055-arena-string-memory-layout.svg index 44a2bbb8d5ce..03b1c560df93 100644 --- a/doc/neps/_static/nep-0055-arena-string-memory-layout.svg +++ b/doc/neps/_static/nep-0055-arena-string-memory-layout.svg @@ -1,2004 +1,4 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + +
offset 
(size_t)
offset...

(7 byte uint)
(7 byte uint)...
4C
4C
09
09
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1C
1C
0
0
0
0
0
0
0
0
0
0
80
80
flags
(char)
flags...
Arena-allocated String
Arena-allocated String
1
1
...
...
2380
2380
2381
2381
2382
2382
2383
2383
'N'
'N'
'u'
'u'
'm'
'm'
'p'
'p'
1F
1F
'e'
'e'
2379
2379
1C
1C
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
Arena
Allocator
Arena...
0
0
...
...
size_and_flags
(size_t)
size_and_flags...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/doc/neps/_static/nep-0055-heap-string-memory-layout.svg b/doc/neps/_static/nep-0055-heap-string-memory-layout.svg index 05813deeb0e7..97e97f41ea66 100644 --- a/doc/neps/_static/nep-0055-heap-string-memory-layout.svg +++ b/doc/neps/_static/nep-0055-heap-string-memory-layout.svg @@ -1,1992 +1,4 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + +
offset 
(uintptr_t)
offset...
D3
D3
D3
D3
D3
D3
0
0
0
0
D3
D3
04
04
0
0
0
0
1C
1C
0
0
0
0
0
0
0
0
0
0
Heap-allocated String
Heap-allocated String
PyMem Raw
Allocator Domain
PyMem Raw...
'N'
'N'
'u'
'u'
'm'
'm'
'p'
'p'
'y'
'y'
''
''
'i'
'i'
's'
's'
''
''
0
0
1
1
2
2
3
3
4
4
5
5
6
6
7
7
8
8
70
70
flags
(char)
flags...
0
0
0
0
0
0
0
0
0
0
1
1
1
1
1
1

(7 byte uint)
(7 byte uint)...
size_and_flags
(size_t)
size_and_flags...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/doc/neps/_static/nep-0055-short-string-memory-layout.svg b/doc/neps/_static/nep-0055-short-string-memory-layout.svg index 1a35f59b31e6..973e69b96e8e 100644 --- a/doc/neps/_static/nep-0055-short-string-memory-layout.svg +++ b/doc/neps/_static/nep-0055-short-string-memory-layout.svg @@ -1,1381 +1,4 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + +
direct_buffer
(char[15])
direct_buf...
'H'
'H'
'e'
'e'
'l'
'l'
'l'
'l'
'o'
'o'
''
''
'w'
'w'
'o'
'o'
'r'
'r'
'l'
'l'
'd'
'd'
''
''
''
''
''
''
''
''
6b
6b
Short String
Short String
flags
(word)
flags...
size
(4-bit uint)
size...
1
1
0
0
1
1
0
0
1
1
1
1
0
0
1
1
size_and_flags
(char)
size_and_flag...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/doc/neps/nep-0055-string_dtype.rst b/doc/neps/nep-0055-string_dtype.rst index e2803c8d9d35..463329843e15 100644 --- a/doc/neps/nep-0055-string_dtype.rst +++ b/doc/neps/nep-0055-string_dtype.rst @@ -6,7 +6,8 @@ NEP 55 — Add a UTF-8 variable-width string DType to NumPy :Author: Nathan Goldbaum :Author: Warren Weckesser -:Status: Accepted +:Author: Marten van Kerkwijk +:Status: Final :Type: Standards Track :Created: 2023-06-29 :Updated: 2024-01-18 @@ -905,10 +906,10 @@ endian-dependent layouts of these structs is an implementation detail and is not publicly exposed in the API. Whether or not a string is stored directly on the arena buffer or in the heap is -signaled by setting the ``NPY_STRING_SHORT`` flag on the string data. Because -the maximum size of a heap-allocated string is limited to the size of the -largest 7-byte unsized integer, this flag can never be set for a valid heap -string. +signaled by setting the ``NPY_OUTSIDE_ARENA`` and ``NPY_STRING_LONG`` flags on +the string data. Because the maximum size of a heap-allocated string is limited +to the size of the largest 7-byte unsized integer, these flags can never be set +for a valid heap string. See :ref:`memorylayoutexamples` for some visual examples of strings in each of these memory layouts. @@ -956,20 +957,29 @@ exponentially expanding buffer, with an expansion factor of 1.25. Each string entry in the arena is prepended by a size, stored either in a ``char`` or a ``size_t``, depending on the length of the string. Strings with lengths between 16 or 8 (depending on architecture) and 255 are stored with a -``char`` size. We refer to these as "medium" strings internally and strings -stored this way have the ``NPY_STRING_MEDIUM`` flag set. This choice reduces the -overhead for storing smaller strings on the heap by 7 bytes per medium-length -string. +``char`` size. We refer to these as "medium" strings internally. This choice +reduces the overhead for storing smaller strings on the heap by 7 bytes per +medium-length string. Strings in the arena with lengths longer than 255 bytes +have the ``NPY_STRING_LONG`` flag set. If the contents of a packed string are freed and then assigned to a new string with the same size or smaller than the string that was originally stored in the -packed string, the existing short string or arena allocation is re-used, with -padding zeros written to the end of the subset of the buffer reserved for the -string. If the string is enlarged, the existing space in the arena buffer cannot -be used, so instead we resort to allocating space directly on the heap via -``malloc`` and the ``NPY_STRING_ON_HEAP`` flag is set. Any pre-existing flags -are kept set to allow future use of the string to determine if there is space in -the arena buffer allocated for the string for possible re-use. +packed string, the existing short string or arena allocation is re-used. There +is one exception however, when a string in the arena is overwritten with a short +string, the arena metadata is lost and the arena allocation cannot be re-used. + +If the string is enlarged, the existing space in the arena buffer cannot be +used, so instead we resort to allocating space directly on the heap via +``malloc`` and the ``NPY_STRING_OUTSIDE_ARENA`` and ``NPY_STRING_LONG`` flags +are set. Note that ``NPY_STRING_LONG`` can be set even for strings with lengths +less than 255 bytes in this case. Any pre-existing flags are kept set to allow +future use of the string to determine if there is space in the arena buffer +allocated for the string for possible re-use. + +No matter where it is stored, once a string is initialized is marked with the +``NPY_STRING_INITIALIZED`` flag. This lets us clearly distinguish between an +unitialized empty string and a string that has been mutated into the empty +string. The size of the allocation is stored in the arena to allow reuse of the arena allocation if a string is mutated. In principle we could disallow re-use of the @@ -1022,13 +1032,7 @@ Freeing Strings Existing strings must be freed before discarding or re-using a packed string. The API is constructed to require this for all strings, even for short strings with no heap allocations. In all cases, all data in the packed string -are zeroed out, except for the flags, which are preserved except as noted below. - -For strings with data living in the arena allocation, the data for the string in -the arena buffer are zeroed out and the ``NPY_STRING_ARENA_FREED`` flag is set -on the packed string to indicate there is space in the arena for a later re-use -of the packed string. Heap strings have their heap allocation freed and the -``NPY_STRING_ON_HEAP`` flag removed. +are zeroed out, except for the flags, which are preserved. .. _memorylayoutexamples: @@ -1044,8 +1048,8 @@ Short strings store string data directly in the array buffer. On little-endian architectures, the string data appear first, followed by a single byte that allows space for four flags and stores the size of the string as an unsigned integer in the final 4 bits. In this example, the string contents are -"Hello world", with a size of 11. The only flag set indicates that this is a -short string. +"Hello world", with a size of 11. The flags indicate this string is stored +outside the arena and is initialized. .. image:: _static/nep-0055-arena-string-memory-layout.svg @@ -1060,7 +1064,7 @@ a "medium"-length string and the size requires only one byte in the arena allocation. An arena string larger than 255 bytes would need 8 bytes in the arena to store the size in a ``size_t``. The only flag set indicates that this is a such "medium"-length string with a size that fits in a ``unsigned -char``. Arena strings that are longer than 255 bytes have no flags set. +char``. The only flag set indicates this string is initialized. .. image:: _static/nep-0055-heap-string-memory-layout.svg @@ -1068,8 +1072,10 @@ Heap strings store string data in a buffer returned by ``PyMem_RawMalloc`` and instead of storing an offset into an arena buffer, directly store the address of the heap address returned by ``malloc``. In this example, the string contents are "Numpy is a very cool library" and are stored at heap address -``0x4d3d3d3``. The string has one flag set, indicating that the allocation lives -directly on the heap rather than in the arena buffer. +``0x4d3d3d3``. The string has three flags set, indicating it is a "Long" string +(e.g. not a short string) stored outside the arena, and is initialized. Note +that if this string were stored inside the arena, it would not have the long +string flag set. Empty Strings and Missing Data ++++++++++++++++++++++++++++++ From 98c8e5c8d582ed96b7e0b4eb2b0b3be7ed1cea23 Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Mon, 4 Mar 2024 15:13:08 +0100 Subject: [PATCH 006/980] Address more feedback; fix fill in zfill and improve docstrings/doctests --- .../_core/code_generators/ufunc_docstrings.py | 24 +++++++++-------- numpy/_core/src/umath/string_buffer.h | 21 +++++++-------- numpy/_core/src/umath/string_ufuncs.cpp | 2 +- numpy/_core/strings.py | 26 ++++++++++--------- 4 files changed, 38 insertions(+), 35 deletions(-) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index e84e326e74c1..d76acd17cd2f 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -4896,7 +4896,7 @@ def add_newdoc(place, name, doc): x1 : array_like, with `np.bytes_` or `np.str_` dtype x2 : array_like, with any integer dtype - The length of the resulting strings + The length of the resulting strings, unless ``width < str_len(a)``. x3 : array_like, with `np.bytes_` or `np.str_` dtype The padding character to use. $PARAMS @@ -4921,7 +4921,7 @@ def add_newdoc(place, name, doc): >>> np.strings.center(c, width=9, fillchar='*') array(['***a1b2**', '***1b2a**', '***b2a1**', '***2a1b**'], dtype='>> np.strings.center(c, width=1) - array(['a', '1', 'b', '2'], dtype='>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.ljust(c, width=3) - array(['aAa', ' a', 'abB'], dtype='>> np.strings.ljust(c, width=9) + array(['aAaAaA ', ' aA ', 'abBABba '], dtype='>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) +>>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.rjust(a, width=3) - array(['aAa', ' a', 'abB'], dtype='>> np.strings.rjust(a, width=9) + array([' aAaAaA', ' aA ', ' abBABba'], dtype='>> np.strings.zfill('1', 3) - array('001', dtype='>> np.strings.zfill('-1', 3) - array('-01', dtype='>> np.strings.zfill(['1', '-1', '+1'], 3) + array(['001', '-01', '+01'], dtype=' static inline npy_intp -string_pad(Buffer buf, npy_int64 width, Buffer fill, JUSTPOSITION pos, Buffer out) +string_pad(Buffer buf, npy_int64 width, npy_ucs4 fill, JUSTPOSITION pos, Buffer out) { size_t finalwidth = width > 0 ? width : 0; + if (finalwidth > PY_SSIZE_T_MAX) { + npy_gil_error(PyExc_OverflowError, "padded string is too long"); + return -1; + } + size_t len = buf.num_codepoints(); if (len >= finalwidth) { buf.buffer_memcpy(out, len); @@ -1537,19 +1542,15 @@ string_pad(Buffer buf, npy_int64 width, Buffer fill, JUSTPOSITION pos, } assert(left >= 0 || right >= 0); - - if (left > PY_SSIZE_T_MAX - len || right > PY_SSIZE_T_MAX - (left + len)) { - npy_gil_error(PyExc_OverflowError, "padded string is too long"); - return -1; - } + assert(left <= PY_SSIZE_T_MAX - len && right <= PY_SSIZE_T_MAX - (left + len)); if (left > 0) { - out += out.buffer_memset(*fill, left); + out += out.buffer_memset(fill, left); } buf.buffer_memcpy(out, len); out += len; if (right > 0) { - out.buffer_memset(*fill, right); + out.buffer_memset(fill, right); } return finalwidth; } @@ -1562,9 +1563,7 @@ string_zfill(Buffer buf, npy_int64 width, Buffer out) size_t finalwidth = width > 0 ? width : 0; npy_ucs4 fill = '0'; - Buffer fillchar(&fill, 4); // max codepoint size is 4 bytes - - npy_intp new_len = string_pad(buf, width, fillchar, JUSTPOSITION::RIGHT, out); + npy_intp new_len = string_pad(buf, width, fill, JUSTPOSITION::RIGHT, out); if (new_len == -1) { return -1; } diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 5441114980bc..faa5583e52fb 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -529,7 +529,7 @@ string_center_ljust_rjust_loop(PyArrayMethod_Context *context, Buffer buf(in1, elsize1); Buffer fill(in3, elsize3); Buffer outbuf(out, outsize); - size_t len = string_pad(buf, *(npy_int64 *)in2, fill, pos, outbuf); + size_t len = string_pad(buf, *(npy_int64 *)in2, *fill, pos, outbuf); if (len < 0) { return -1; } diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 374e9a0a36ad..69dbbf234aeb 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -583,7 +583,7 @@ def center(a, width, fillchar=' '): a : array_like, with `np.bytes_` or `np.str_` dtype width : array_like, with any integer dtype - The length of the resulting strings + The length of the resulting strings, unless ``width < str_len(a)``. fillchar : array_like, with `np.bytes_` or `np.str_` dtype, optional The padding character to use (default is space). @@ -596,7 +596,7 @@ def center(a, width, fillchar=' '): See Also -------- str.center - + Notes ----- This function is intended to work with arrays of strings. The @@ -611,7 +611,7 @@ def center(a, width, fillchar=' '): >>> np.strings.center(c, width=9, fillchar='*') array(['***a1b2**', '***1b2a**', '***b2a1**', '***2a1b**'], dtype='>> np.strings.center(c, width=1) - array(['a', '1', 'b', '2'], dtype='>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.ljust(c, width=3) - array(['aAa', ' a', 'abB'], dtype='>> np.strings.ljust(c, width=9) + array(['aAaAaA ', ' aA ', 'abBABba '], dtype='>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.rjust(a, width=3) - array(['aAa', ' a', 'abB'], dtype='>> np.strings.rjust(a, width=9) + array([' aAaAaA', ' aA ', ' abBABba'], dtype='>> np.strings.zfill('1', 3) - array('001', dtype='>> np.strings.zfill('-1', 3) - array('-01', dtype='>> np.strings.zfill(['1', '-1', '+1'], 3) + array(['001', '-01', '+01'], dtype=' Date: Mon, 4 Mar 2024 16:52:37 +0100 Subject: [PATCH 007/980] Add loops for mixed types for center/ljust/rjust --- numpy/_core/src/umath/string_ufuncs.cpp | 108 +++++++++++++++++++++--- numpy/_core/strings.py | 50 ++++++++--- numpy/_core/tests/test_strings.py | 89 +++++++++++++++++++ 3 files changed, 223 insertions(+), 24 deletions(-) diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index faa5583e52fb..6b5377ceb180 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -507,7 +507,7 @@ string_expandtabs_loop(PyArrayMethod_Context *context, } -template +template static int string_center_ljust_rjust_loop(PyArrayMethod_Context *context, char *const data[], npy_intp const dimensions[], @@ -526,9 +526,9 @@ string_center_ljust_rjust_loop(PyArrayMethod_Context *context, npy_intp N = dimensions[0]; while (N--) { - Buffer buf(in1, elsize1); - Buffer fill(in3, elsize3); - Buffer outbuf(out, outsize); + Buffer buf(in1, elsize1); + Buffer fill(in3, elsize3); + Buffer outbuf(out, outsize); size_t len = string_pad(buf, *(npy_int64 *)in2, *fill, pos, outbuf); if (len < 0) { return -1; @@ -1160,6 +1160,67 @@ init_ufunc(PyObject *umath, const char *name, int nin, int nout, } +/* + * This is a variant of init_ufunc that allows for mixed string dtypes + * in its parameters. Instead of having NPY_OBJECT be a sentinel for a + * fixed dtype, here the typenums are always the correct ones. + */ +static int +init_mixed_type_ufunc(PyObject *umath, const char *name, int nin, int nout, + NPY_TYPES *typenums, PyArrayMethod_StridedLoop loop, + PyArrayMethod_ResolveDescriptors resolve_descriptors, + void *static_data) +{ + int res = -1; + + PyArray_DTypeMeta **dtypes = (PyArray_DTypeMeta **) PyMem_Malloc( + (nin + nout) * sizeof(PyArray_DTypeMeta *)); + if (dtypes == NULL) { + PyErr_NoMemory(); + return -1; + } + + for (int i = 0; i < nin+nout; i++) { + dtypes[i] = PyArray_DTypeFromTypeNum(typenums[i]); + } + + PyType_Slot slots[4]; + slots[0] = {NPY_METH_strided_loop, nullptr}; + slots[1] = {_NPY_METH_static_data, static_data}; + slots[3] = {0, nullptr}; + if (resolve_descriptors != NULL) { + slots[2] = {NPY_METH_resolve_descriptors, (void *) resolve_descriptors}; + } + else { + slots[2] = {0, nullptr}; + } + + char loop_name[256] = {0}; + snprintf(loop_name, sizeof(loop_name), "templated_string_%s", name); + + PyArrayMethod_Spec spec = {}; + spec.name = loop_name; + spec.nin = nin; + spec.nout = nout; + spec.dtypes = dtypes; + spec.slots = slots; + spec.flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + + if (add_loop(umath, name, &spec, loop) < 0) { + goto finish; + } + + res = 0; + finish: + for (int i = 0; i < nin+nout; i++) { + Py_DECREF(dtypes[i]); + } + PyMem_Free((void *) dtypes); + return res; +} + + + NPY_NO_EXPORT int init_string_ufuncs(PyObject *umath) { @@ -1458,7 +1519,6 @@ init_string_ufuncs(PyObject *umath) return -1; } - dtypes[0] = dtypes[2] = dtypes[3] = NPY_OBJECT; dtypes[1] = NPY_INT64; const char *center_ljust_rjust_names[] = { @@ -1470,16 +1530,42 @@ init_string_ufuncs(PyObject *umath) }; for (int i = 0; i < 3; i++) { - if (init_ufunc( - umath, center_ljust_rjust_names[i], 3, 1, dtypes, ENCODING::ASCII, - string_center_ljust_rjust_loop, + dtypes[0] = NPY_STRING; + dtypes[2] = NPY_STRING; + dtypes[3] = NPY_STRING; + if (init_mixed_type_ufunc( + umath, center_ljust_rjust_names[i], 3, 1, dtypes, + string_center_ljust_rjust_loop, string_center_ljust_rjust_resolve_descriptors, &padpositions[i]) < 0) { return -1; } - if (init_ufunc( - umath, center_ljust_rjust_names[i], 3, 1, dtypes, ENCODING::UTF32, - string_center_ljust_rjust_loop, + dtypes[0] = NPY_STRING; + dtypes[2] = NPY_UNICODE; + dtypes[3] = NPY_STRING; + if (init_mixed_type_ufunc( + umath, center_ljust_rjust_names[i], 3, 1, dtypes, + string_center_ljust_rjust_loop, + string_center_ljust_rjust_resolve_descriptors, + &padpositions[i]) < 0) { + return -1; + } + dtypes[0] = NPY_UNICODE; + dtypes[2] = NPY_UNICODE; + dtypes[3] = NPY_UNICODE; + if (init_mixed_type_ufunc( + umath, center_ljust_rjust_names[i], 3, 1, dtypes, + string_center_ljust_rjust_loop, + string_center_ljust_rjust_resolve_descriptors, + &padpositions[i]) < 0) { + return -1; + } + dtypes[0] = NPY_UNICODE; + dtypes[2] = NPY_STRING; + dtypes[3] = NPY_UNICODE; + if (init_mixed_type_ufunc( + umath, center_ljust_rjust_names[i], 3, 1, dtypes, + string_center_ljust_rjust_loop, string_center_ljust_rjust_resolve_descriptors, &padpositions[i]) < 0) { return -1; diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 69dbbf234aeb..edf86e98a576 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -590,8 +590,7 @@ def center(a, width, fillchar=' '): Returns ------- out : ndarray - Output array of str or unicode, depending on input - types + Output array of str or unicode, depending on the type of ``a`` See Also -------- @@ -599,8 +598,9 @@ def center(a, width, fillchar=' '): Notes ----- - This function is intended to work with arrays of strings. The - fill character is not applied to numeric types. + While it is possible for ``a`` and ``fillchar`` to have different dtypes, + passing a non-ASCII character in ``fillchar`` when ``a`` is of dtype "S", + will result in silent errors (resulting buffer might have wrong data). Examples -------- @@ -616,14 +616,18 @@ def center(a, width, fillchar=' '): """ a = np.asanyarray(a) width = np.maximum(str_len(a), width) - fillchar = np.asanyarray(fillchar, dtype=f"{a.dtype.char}1") + fillchar = np.asanyarray(fillchar) + if np.any(str_len(fillchar) != 1): + raise TypeError( + "The fill character must be exactly one character long") + + shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape) if a.dtype.char == "T": - shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape) out = np.empty_like(a, shape=shape) else: out_dtype = f"{a.dtype.char}{width.max()}" - out = np.empty_like(a, shape=a.shape, dtype=out_dtype) + out = np.empty_like(a, shape=shape, dtype=out_dtype) return _center(a, width, fillchar, out=out) @@ -650,6 +654,12 @@ def ljust(a, width, fillchar=' '): -------- str.ljust + Notes + ----- + While it is possible for ``a`` and ``fillchar`` to have different dtypes, + passing a non-ASCII character in ``fillchar`` when ``a`` is of dtype "S", + will result in silent errors (resulting buffer might have wrong data). + Examples -------- >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) @@ -661,14 +671,18 @@ def ljust(a, width, fillchar=' '): """ a = np.asanyarray(a) width = np.maximum(str_len(a), width) + fillchar = np.asanyarray(fillchar) + if np.any(str_len(fillchar) != 1): + raise TypeError( + "The fill character must be exactly one character long") + shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape) if a.dtype.char == "T": - shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape) out = np.empty_like(a, shape=shape) else: out_dtype = f"{a.dtype.char}{width.max()}" - out = np.empty_like(a, shape=a.shape, dtype=out_dtype) + out = np.empty_like(a, shape=shape, dtype=out_dtype) return _ljust(a, width, fillchar, out=out) @@ -695,6 +709,12 @@ def rjust(a, width, fillchar=' '): -------- str.rjust + Notes + ----- + While it is possible for ``a`` and ``fillchar`` to have different dtypes, + passing a non-ASCII character in ``fillchar`` when ``a`` is of dtype "S", + will result in silent errors (resulting buffer might have wrong data). + Examples -------- >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) @@ -706,14 +726,18 @@ def rjust(a, width, fillchar=' '): """ a = np.asanyarray(a) width = np.maximum(str_len(a), width) + fillchar = np.asanyarray(fillchar) + if np.any(str_len(fillchar) != 1): + raise TypeError( + "The fill character must be exactly one character long") + shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape) if a.dtype.char == "T": - shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape) out = np.empty_like(a, shape=shape) else: out_dtype = f"{a.dtype.char}{width.max()}" - out = np.empty_like(a, shape=a.shape, dtype=out_dtype) + out = np.empty_like(a, shape=shape, dtype=out_dtype) return _rjust(a, width, fillchar, out=out) @@ -748,12 +772,12 @@ def zfill(a, width): a = np.asanyarray(a) width = np.maximum(str_len(a), width) + shape = np.broadcast_shapes(a.shape, width.shape) if a.dtype.char == "T": - shape = np.broadcast_shapes(a.shape, width.shape) out = np.empty_like(a, shape=shape) else: out_dtype = f"{a.dtype.char}{width.max()}" - out = np.empty_like(a, shape=a.shape, dtype=out_dtype) + out = np.empty_like(a, shape=shape, dtype=out_dtype) return _zfill(a, width, out=out) diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 58bf61dcb219..427d14e4cb11 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -716,6 +716,27 @@ def test_expandtabs_raises_overflow(self, dt): with pytest.raises(OverflowError, match="new string is too long"): np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), sys.maxsize) + def test_center_raises_multiple_character_fill(self, dt): + buf = np.array("abc", dtype=dt) + fill = np.array("**", dtype=dt) + with pytest.raises(TypeError, + match="The fill character must be exactly one character long"): + np.strings.center(buf, 10, fill) + + def test_ljust_raises_multiple_character_fill(self, dt): + buf = np.array("abc", dtype=dt) + fill = np.array("**", dtype=dt) + with pytest.raises(TypeError, + match="The fill character must be exactly one character long"): + np.strings.ljust(buf, 10, fill) + + def test_rjust_raises_multiple_character_fill(self, dt): + buf = np.array("abc", dtype=dt) + fill = np.array("**", dtype=dt) + with pytest.raises(TypeError, + match="The fill character must be exactly one character long"): + np.strings.rjust(buf, 10, fill) + @pytest.mark.parametrize("dt", [ "S", @@ -943,6 +964,74 @@ def test_center(self, buf, width, fillchar, res, dt): res = np.array(res, dtype=dt) assert_array_equal(np.strings.center(buf, width, fillchar), res) + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('x', 2, '\U0010FFFF', 'x\U0010FFFF'), + ('x', 3, '\U0010FFFF', '\U0010FFFFx\U0010FFFF'), + ('x', 4, '\U0010FFFF', '\U0010FFFFx\U0010FFFF\U0010FFFF'), + ]) + def test_center(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.center(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('x', 2, '\U0010FFFF', 'x\U0010FFFF'), + ('x', 3, '\U0010FFFF', 'x\U0010FFFF\U0010FFFF'), + ('x', 4, '\U0010FFFF', 'x\U0010FFFF\U0010FFFF\U0010FFFF'), + ]) + def test_ljust(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.ljust(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('x', 2, '\U0010FFFF', '\U0010FFFFx'), + ('x', 3, '\U0010FFFF', '\U0010FFFF\U0010FFFFx'), + ('x', 4, '\U0010FFFF', '\U0010FFFF\U0010FFFF\U0010FFFFx'), + ]) + def test_rjust(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.rjust(buf, width, fillchar), res) + + +class TestMixedTypeMethods: + def test_center(self): + u = np.array("😊", dtype="U") + s = np.array("*", dtype="S") + res = np.array("*😊*", dtype="U") + assert_array_equal(np.strings.center(u, 3, s), res) + + u = np.array("*", dtype="U") + s = np.array("s", dtype="S") + res = np.array("*s*", dtype="S") + assert_array_equal(np.strings.center(s, 3, u), res) + + def test_ljust(self): + u = np.array("😊", dtype="U") + s = np.array("*", dtype="S") + res = np.array("😊**", dtype="U") + assert_array_equal(np.strings.ljust(u, 3, s), res) + + u = np.array("*", dtype="U") + s = np.array("s", dtype="S") + res = np.array("s**", dtype="S") + assert_array_equal(np.strings.ljust(s, 3, u), res) + + def test_rjust(self): + u = np.array("😊", dtype="U") + s = np.array("*", dtype="S") + res = np.array("**😊", dtype="U") + assert_array_equal(np.strings.rjust(u, 3, s), res) + + u = np.array("*", dtype="U") + s = np.array("s", dtype="S") + res = np.array("**s", dtype="S") + assert_array_equal(np.strings.rjust(s, 3, u), res) + class TestUnicodeOnlyMethodsRaiseWithBytes: def test_isdecimal_raises(self): From f9bb1d71b41c8f825f11b68db7f24b65b45ac282 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 5 Mar 2024 09:58:15 -0700 Subject: [PATCH 008/980] NEP: respond to marten's comments --- doc/neps/nep-0055-string_dtype.rst | 55 +++++++++++++++--------------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/doc/neps/nep-0055-string_dtype.rst b/doc/neps/nep-0055-string_dtype.rst index 463329843e15..e6bc6a36a6a3 100644 --- a/doc/neps/nep-0055-string_dtype.rst +++ b/doc/neps/nep-0055-string_dtype.rst @@ -147,9 +147,6 @@ we propose to: types related to string support, enabling a migration path for a future deprecation of ``np.char``. -* An update to the ``npy`` and ``npz`` file formats to allow storage of - arbitrary-length sidecar data. - The following is out of scope for this work: * Changing DType inference for string data. @@ -162,6 +159,9 @@ The following is out of scope for this work: * Implement SIMD optimizations for string operations. +* An update to the ``npy`` and ``npz`` file formats to allow storage of + arbitrary-length sidecar data. + While we're explicitly ruling out implementing these items as part of this work, adding a new string DType helps set up future work that does implement some of these items. @@ -407,7 +407,7 @@ Missing data can be represented using a sentinel: >>> np.isnan(arr) array([False, True, False]) >>> np.empty(3, dtype=dt) - array([nan, nan, nan]) + array(['', '', '']) We only propose supporting user-provided sentinels. By default, empty arrays will be populated with empty strings: @@ -563,14 +563,14 @@ be populated with string ufuncs: True We feel ``np.strings`` is a more intuitive name than ``np.char``, and eventually -will replace ``np.char`` once downstream libraries that conform to SPEC-0 can -safely switch to ``np.strings`` without needing any logic conditional on the NumPy -version. +will replace ``np.char`` once the minimum NumPy version supported by downstream +libraries per SPEC-0 is new enough that they can safely switch to ``np.strings`` +without needing any logic conditional on the NumPy version. Serialization ************* -Since string data are stored outside the array buffer, serialization top the +Since string data are stored outside the array buffer, serialization to the ``npy`` format would requires a format revision to support storing variable-width sidecare data. Rather than doing this as part of this effort, we do not plan on supporting serialization to the ``npy`` or ``npz`` format without @@ -972,11 +972,11 @@ If the string is enlarged, the existing space in the arena buffer cannot be used, so instead we resort to allocating space directly on the heap via ``malloc`` and the ``NPY_STRING_OUTSIDE_ARENA`` and ``NPY_STRING_LONG`` flags are set. Note that ``NPY_STRING_LONG`` can be set even for strings with lengths -less than 255 bytes in this case. Any pre-existing flags are kept set to allow -future use of the string to determine if there is space in the arena buffer -allocated for the string for possible re-use. +less than 255 bytes in this case. Since the heap address overwrites the arena +offset, and future string replacements will be stored on the heap or directly +in the array buffer as a short string. -No matter where it is stored, once a string is initialized is marked with the +No matter where it is stored, once a string is initialized it is marked with the ``NPY_STRING_INITIALIZED`` flag. This lets us clearly distinguish between an unitialized empty string and a string that has been mutated into the empty string. @@ -1062,9 +1062,8 @@ re-use of the arena allocation if a string is mutated. Also note that because the length of the string is small enough to fit in an ``unsigned char``, this is a "medium"-length string and the size requires only one byte in the arena allocation. An arena string larger than 255 bytes would need 8 bytes in the -arena to store the size in a ``size_t``. The only flag set indicates that this -is a such "medium"-length string with a size that fits in a ``unsigned -char``. The only flag set indicates this string is initialized. +arena to store the size in a ``size_t``. The only flag set indicates this string +is initialized. .. image:: _static/nep-0055-heap-string-memory-layout.svg @@ -1072,26 +1071,28 @@ Heap strings store string data in a buffer returned by ``PyMem_RawMalloc`` and instead of storing an offset into an arena buffer, directly store the address of the heap address returned by ``malloc``. In this example, the string contents are "Numpy is a very cool library" and are stored at heap address -``0x4d3d3d3``. The string has three flags set, indicating it is a "Long" string +``0x4d3d3d3``. The string has three flags set, indicating it is a "long" string (e.g. not a short string) stored outside the arena, and is initialized. Note that if this string were stored inside the arena, it would not have the long -string flag set. +string flag set because it requires less than 256 bytes to store. Empty Strings and Missing Data ++++++++++++++++++++++++++++++ The layout we have chosen has the benefit that newly created array buffer returned by ``calloc`` will be an array filled with empty strings by -construction, since a string with no flags set is a heap string with size -zero. This is not the only valid representation of an empty string, since other -flags may be set to indicate that the missing string is associated with a -pre-existing short string or arena string. Missing strings will have an -identical representation, except they will always have a flag, -``NPY_STRING_MISSING`` set in the flags field. Users will need to check if a -string is null before accessing an unpacked string buffer and we have set up the -C API in such a way as to force null-checking whenever a string is -unpacked. Both missing and empty strings are stored directly in the array buffer -and do not require additional heap storage. +construction, since a string with no flags set is an uninitialized zero-length +arena string. This is not the only valid representation of an empty string, since other +flags may be set to indicate that the empty string is associated with a +pre-existing short string or arena string. + +Missing strings will have an identical representation, except they will always +have a flag, ``NPY_STRING_MISSING`` set in the flags field. Users will need to +check if a string is null before accessing an unpacked string buffer and we have +set up the C API in such a way as to force null-checking whenever a string is +unpacked. Both missing and empty strings can be detected based on data in the +packed string representation and do not require corresponding room in the arena +allocation or extra heap allocations. Related work ------------ From 38594d2938e3aae903253e905efd0db6cfee28f1 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 5 Mar 2024 14:17:04 -0700 Subject: [PATCH 009/980] NEP: respond to marten's subsequent comments --- doc/neps/nep-0055-string_dtype.rst | 26 ++++++++------------------ 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/doc/neps/nep-0055-string_dtype.rst b/doc/neps/nep-0055-string_dtype.rst index e6bc6a36a6a3..6417898d2ec6 100644 --- a/doc/neps/nep-0055-string_dtype.rst +++ b/doc/neps/nep-0055-string_dtype.rst @@ -455,23 +455,12 @@ following the behavior of sorting an array containing ``nan``. String Sentinels ++++++++++++++++ -A string missing data value is an instance of ``str`` or subtype of ``str`` and -will be used as the default value for empty arrays: +A string missing data value is an instance of ``str`` or subtype of ``str``. - >>> arr = np.empty(3, dtype=StringDType(na_object='missing')) - >>> arr - array(['missing', 'missing', 'missing']) - -If such an array is passed to a string operation or a cast, "missing" entries -will be treated as if they have a value given by the string sentinel: - - >>> np.char.upper(arr) - array(['MISSING', 'MISSING', 'MISSING']) - -Comparison operations will similarly use the sentinel value directly for missing -entries. This is the primary usage of this pattern we've found in downstream -code, where a missing data sentinel like ``"__nan__"`` is passed to a low-level -sorting or partitioning algorithm. +Operations will use the sentinel value directly for missing entries. This is the +primary usage of this pattern we've found in downstream code, where a missing +data sentinel like ``"__nan__"`` is passed to a low-level sorting or +partitioning algorithm. Other Sentinels +++++++++++++++ @@ -564,8 +553,9 @@ be populated with string ufuncs: We feel ``np.strings`` is a more intuitive name than ``np.char``, and eventually will replace ``np.char`` once the minimum NumPy version supported by downstream -libraries per SPEC-0 is new enough that they can safely switch to ``np.strings`` -without needing any logic conditional on the NumPy version. +libraries per `SPEC-0 `_ is new +enough that they can safely switch to ``np.strings`` without needing any logic +conditional on the NumPy version. Serialization ************* From e4c7e0220077f440deb9c0abfcd75b3b56798493 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 6 Mar 2024 17:18:00 -0700 Subject: [PATCH 010/980] ENH: WIP version of stringdtype ljust, rjust, and center --- .../_core/src/multiarray/stringdtype/dtype.c | 3 +- numpy/_core/src/umath/string_buffer.h | 13 +- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 264 +++++++++++++++++- numpy/_core/strings.py | 3 + 4 files changed, 277 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 36c616f5bc13..3b7ef2bc68cf 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -930,8 +930,7 @@ load_new_string(npy_packed_static_string *out, npy_static_string *out_ss, "Failed to allocate string in %s", err_context); return -1; } - int is_null = NpyString_load(allocator, out_pss, out_ss); - if (is_null == -1) { + if (NpyString_load(allocator, out_pss, out_ss) == -1) { npy_gil_error(PyExc_MemoryError, "Failed to load string in %s", err_context); return -1; diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index d1c15a0d3ad1..31c2e5b8293c 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -1519,10 +1519,19 @@ string_pad(Buffer buf, npy_int64 width, npy_ucs4 fill, JUSTPOSITION pos, Bu return -1; } - size_t len = buf.num_codepoints(); + size_t len; + switch (enc) { + case ENCODING::ASCII: + case ENCODING::UTF32: + len = buf.num_codepoints(); + break; + case ENCODING::UTF8: + len = buf.after - buf.buf; + break; + } + if (len >= finalwidth) { buf.buffer_memcpy(out, len); - out.buffer_fill_with_zeros_after_index(len); return (npy_intp) len; } diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index d617cf56cbe5..048ee46b303f 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -148,7 +148,7 @@ static int multiply_loop_core( buf = (char *)PyMem_RawMalloc(newsize); if (buf == NULL) { npy_gil_error(PyExc_MemoryError, - "Failed to allocate string in multiply"); + "Failed to allocate string in multiply"); goto fail; } } @@ -172,7 +172,7 @@ static int multiply_loop_core( if (descrs[0] == descrs[1]) { if (NpyString_pack(oallocator, ops, buf, newsize) < 0) { npy_gil_error(PyExc_MemoryError, - "Failed to pack string in multiply"); + "Failed to pack string in multiply"); goto fail; } @@ -1575,7 +1575,214 @@ string_expandtabs_strided_loop(PyArrayMethod_Context *context, return -1; } +static NPY_CASTING +center_ljust_rjust_resolve_descriptors( + struct PyArrayMethodObject_tag *NPY_UNUSED(method), + PyArray_DTypeMeta *dtypes[], PyArray_Descr *given_descrs[], + PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) +{ + PyArray_StringDTypeObject *input_descr = (PyArray_StringDTypeObject *)given_descrs[0]; + PyArray_StringDTypeObject *fill_descr = (PyArray_StringDTypeObject *)given_descrs[2]; + + int eq_res = _eq_comparison(input_descr->coerce, fill_descr->coerce, + input_descr->na_object, fill_descr->na_object); + + if (eq_res < 0) { + return (NPY_CASTING)-1; + } + + if (eq_res != 1) { + PyErr_SetString(PyExc_TypeError, + "Can only text justification operations with equal" + "StringDType instances."); + return (NPY_CASTING)-1; + } + + Py_INCREF(given_descrs[0]); + loop_descrs[0] = given_descrs[0]; + Py_INCREF(given_descrs[1]); + loop_descrs[1] = given_descrs[1]; + Py_INCREF(given_descrs[2]); + loop_descrs[2] = given_descrs[2]; + + PyArray_Descr *out_descr = NULL; + + if (given_descrs[3] == NULL) { + out_descr = (PyArray_Descr *)new_stringdtype_instance( + ((PyArray_StringDTypeObject *)given_descrs[1])->na_object, + ((PyArray_StringDTypeObject *)given_descrs[1])->coerce, + NULL); + + if (out_descr == NULL) { + return (NPY_CASTING)-1; + } + } + else { + Py_INCREF(given_descrs[3]); + out_descr = given_descrs[3]; + } + + loop_descrs[3] = out_descr; + + return NPY_NO_CASTING; +} + + +static const char* CENTER_NAME = "center"; +static const char* LJUST_NAME = "ljust"; +static const char* RJUST_NAME = "rjust"; + +template +static int +center_ljust_rjust_strided_loop(PyArrayMethod_Context *context, + char *const data[], + npy_intp const dimensions[], + npy_intp const strides[], + NpyAuxData *NPY_UNUSED(auxdata)) +{ + PyArray_StringDTypeObject *s1descr = (PyArray_StringDTypeObject *)context->descriptors[0]; + int has_null = s1descr->na_object != NULL; + int has_nan_na = s1descr->has_nan_na; + int has_string_na = s1descr->has_string_na; + const npy_static_string *default_string = &s1descr->default_string; + npy_intp N = dimensions[0]; + char *in1 = data[0]; + char *in2 = data[1]; + char *in3 = data[2]; + char *out = data[3]; + npy_intp in1_stride = strides[0]; + npy_intp in2_stride = strides[1]; + npy_intp in3_stride = strides[2]; + npy_intp out_stride = strides[3]; + + npy_string_allocator *allocators[4] = {}; + NpyString_acquire_allocators(4, context->descriptors, allocators); + npy_string_allocator *s1allocator = allocators[0]; + // allocators[1] is NULL + npy_string_allocator *s2allocator = allocators[2]; + npy_string_allocator *oallocator = allocators[3]; + JUSTPOSITION pos = *(JUSTPOSITION *)(context->method->static_data); + const char* ufunc_name = NULL; + + switch (pos) { + case JUSTPOSITION::CENTER: + ufunc_name = CENTER_NAME; + break; + case JUSTPOSITION::LEFT: + ufunc_name = LJUST_NAME; + break; + case JUSTPOSITION::RIGHT: + ufunc_name = RJUST_NAME; + break; + } + + while (N--) { + const npy_packed_static_string *ps1 = (npy_packed_static_string *)in1; + npy_static_string s1 = {0, NULL}; + int s1_isnull = NpyString_load(s1allocator, ps1, &s1); + const npy_packed_static_string *ps2 = (npy_packed_static_string *)in3; + npy_static_string s2 = {0, NULL}; + int s2_isnull = NpyString_load(s2allocator, ps2, &s2); + npy_static_string os = {0, NULL}; + npy_packed_static_string *ops = (npy_packed_static_string *)out; + if (s1_isnull == -1 || s2_isnull == -1) { + npy_gil_error(PyExc_MemoryError, "Failed to load string in %s", + ufunc_name); + goto fail; + } + if (NPY_UNLIKELY(s1_isnull || s2_isnull)) { + if (has_nan_na) { + if (NpyString_pack_null(oallocator, ops) < 0) { + npy_gil_error(PyExc_MemoryError, + "Failed to deallocate string in %s", + ufunc_name); + goto fail; + } + goto next_step; + } + else if (has_string_na || !has_null) { + if (s1_isnull) { + s1 = *default_string; + } + if (s2_isnull) { + s2 = *default_string; + } + } + else { + npy_gil_error(PyExc_ValueError, + "Cannot %s null that is not a nan-like value", + ufunc_name); + goto fail; + } + } + { + char *buf = NULL; + npy_intp newsize; + int overflowed = npy_mul_sizes_with_overflow( + &newsize, + (npy_intp)num_bytes_for_utf8_character((unsigned char *)s2.buf), + (npy_intp)*(npy_int64*)in2); + + if (overflowed) { + npy_gil_error(PyExc_MemoryError, + "Failed to allocate string in %s", ufunc_name); + goto fail; + } + + if (context->descriptors[0] == context->descriptors[3]) { + // in-place + buf = (char *)PyMem_RawMalloc(newsize); + if (buf == NULL) { + npy_gil_error(PyExc_MemoryError, + "Failed to allocate string in %s", ufunc_name); + goto fail; + } + } + else { + if (load_new_string(ops, &os, newsize, oallocator, ufunc_name) < 0) { + goto fail; + } + /* explicitly discard const; initializing new buffer */ + buf = (char *)os.buf; + } + + Buffer inbuf((char *)s1.buf, s1.size); + Buffer fill((char *)s2.buf, s2.size); + Buffer outbuf(buf, newsize); + + npy_intp len = string_pad(inbuf, (npy_int64)newsize, *fill, pos, outbuf); + + if (len < 0) { + return -1; + } + + // in-place operations need to clean up temp buffer + if (context->descriptors[0] == context->descriptors[3]) { + if (NpyString_pack(oallocator, ops, buf, newsize) < 0) { + npy_gil_error(PyExc_MemoryError, + "Failed to pack string in %s", ufunc_name); + goto fail; + } + + PyMem_RawFree(buf); + } + } + next_step: + + in1 += in1_stride; + in2 += in2_stride; + in3 += in3_stride; + out += out_stride; + } + + NpyString_release_allocators(4, allocators); + return 0; + + fail: + NpyString_release_allocators(4, allocators); + return -1; +} NPY_NO_EXPORT int string_inputs_promoter( @@ -2222,5 +2429,58 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } + PyArray_DTypeMeta *center_ljust_rjust_dtypes[] = { + &PyArray_StringDType, + &PyArray_Int64DType, + &PyArray_StringDType, + &PyArray_StringDType, + }; + + static const char* center_ljust_rjust_names[3] = { + "_center", "_ljust", "_rjust" + }; + + static JUSTPOSITION positions[3] = { + JUSTPOSITION::CENTER, JUSTPOSITION::LEFT, JUSTPOSITION::RIGHT + }; + + for (int i=0; i<3; i++) { + if (init_ufunc(umath, center_ljust_rjust_names[i], + center_ljust_rjust_dtypes, + ¢er_ljust_rjust_resolve_descriptors, + ¢er_ljust_rjust_strided_loop, 3, 1, NPY_NO_CASTING, + (NPY_ARRAYMETHOD_FLAGS) 0, &positions[i]) < 0) { + return -1; + } + + PyArray_DTypeMeta *int_promoter_dtypes[] = { + &PyArray_StringDType, + (PyArray_DTypeMeta *)Py_None, + &PyArray_StringDType, + &PyArray_StringDType, + }; + + if (add_promoter(umath, center_ljust_rjust_names[i], + int_promoter_dtypes, 4, + string_multiply_promoter) < 0) { + return -1; + } + + PyArray_DTypeMeta *unicode_promoter_dtypes[] = { + &PyArray_StringDType, + (PyArray_DTypeMeta *)Py_None, + &PyArray_UnicodeDType, + &PyArray_StringDType, + }; + + if (add_promoter(umath, center_ljust_rjust_names[i], + unicode_promoter_dtypes, 4, + string_multiply_promoter) < 0) { + return -1; + } + } + + + return 0; } diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index edf86e98a576..106d40f77f3c 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -625,6 +625,7 @@ def center(a, width, fillchar=' '): shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape) if a.dtype.char == "T": out = np.empty_like(a, shape=shape) + fillchar = fillchar.astype(a.dtype) else: out_dtype = f"{a.dtype.char}{width.max()}" out = np.empty_like(a, shape=shape, dtype=out_dtype) @@ -680,6 +681,7 @@ def ljust(a, width, fillchar=' '): shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape) if a.dtype.char == "T": out = np.empty_like(a, shape=shape) + fillchar = fillchar.astype(a.dtype) else: out_dtype = f"{a.dtype.char}{width.max()}" out = np.empty_like(a, shape=shape, dtype=out_dtype) @@ -735,6 +737,7 @@ def rjust(a, width, fillchar=' '): shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape) if a.dtype.char == "T": out = np.empty_like(a, shape=shape) + fillchar = fillchar.astype(a.dtype) else: out_dtype = f"{a.dtype.char}{width.max()}" out = np.empty_like(a, shape=shape, dtype=out_dtype) From 16dee9859a5e0790592375cc1862a309d7486e04 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 8 Mar 2024 11:09:25 -0700 Subject: [PATCH 011/980] MAINT: fix compilation after rebasing --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 3 +-- numpy/_core/tests/test_stringdtype.py | 10 ++++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index 9681f731680a..d72c911f8295 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1606,8 +1606,7 @@ center_ljust_rjust_resolve_descriptors( if (given_descrs[3] == NULL) { out_descr = (PyArray_Descr *)new_stringdtype_instance( ((PyArray_StringDTypeObject *)given_descrs[1])->na_object, - ((PyArray_StringDTypeObject *)given_descrs[1])->coerce, - NULL); + ((PyArray_StringDTypeObject *)given_descrs[1])->coerce); if (out_descr == NULL) { return (NPY_CASTING)-1; diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index b856c667c021..ff00b9488dc2 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1206,22 +1206,32 @@ def test_binary(string_array, unicode_array, function_name, args): def test_strip(string_array, unicode_array): + print("rjs") + rjs = np.char.rjust(string_array, 1000) rju = np.char.rjust(unicode_array, 1000) + print("ljs") + ljs = np.char.ljust(string_array, 1000) lju = np.char.ljust(unicode_array, 1000) + print("lstrip") + assert_array_equal( np.char.lstrip(rjs), np.char.lstrip(rju).astype(StringDType()), ) + print("rstrip") + assert_array_equal( np.char.rstrip(ljs), np.char.rstrip(lju).astype(StringDType()), ) + print("strip") + assert_array_equal( np.char.strip(ljs), np.char.strip(lju).astype(StringDType()), From 275ea5ac63f4620190f32d4f01605265e6a37abb Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 8 Mar 2024 11:25:55 -0700 Subject: [PATCH 012/980] BUG: fix UTF-8 heap overflow in ljust/rjust/center --- numpy/_core/src/umath/string_buffer.h | 2 +- numpy/_core/tests/test_stringdtype.py | 12 +----------- 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index 31c2e5b8293c..c1829ae5df5b 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -1557,7 +1557,7 @@ string_pad(Buffer buf, npy_int64 width, npy_ucs4 fill, JUSTPOSITION pos, Bu out += out.buffer_memset(fill, left); } buf.buffer_memcpy(out, len); - out += len; + out.advance_chars_or_bytes(len); if (right > 0) { out.buffer_memset(fill, right); } diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index ff00b9488dc2..551a8c10268a 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1205,33 +1205,23 @@ def test_binary(string_array, unicode_array, function_name, args): assert 0 -def test_strip(string_array, unicode_array): - print("rjs") - +def test_strip_ljust_rjust_consistency(string_array, unicode_array): rjs = np.char.rjust(string_array, 1000) rju = np.char.rjust(unicode_array, 1000) - print("ljs") - ljs = np.char.ljust(string_array, 1000) lju = np.char.ljust(unicode_array, 1000) - print("lstrip") - assert_array_equal( np.char.lstrip(rjs), np.char.lstrip(rju).astype(StringDType()), ) - print("rstrip") - assert_array_equal( np.char.rstrip(ljs), np.char.rstrip(lju).astype(StringDType()), ) - print("strip") - assert_array_equal( np.char.strip(ljs), np.char.strip(lju).astype(StringDType()), From dbc0a9083b7aee580f09cdb746977b25030d572c Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 8 Mar 2024 13:06:43 -0700 Subject: [PATCH 013/980] BUG: Fix bugs in WIP ljust/rjust/center implementation --- numpy/_core/src/umath/string_buffer.h | 33 ++++++------ numpy/_core/src/umath/stringdtype_ufuncs.cpp | 12 +++-- numpy/_core/tests/test_stringdtype.py | 8 +-- numpy/_core/tests/test_strings.py | 57 +++++--------------- 4 files changed, 43 insertions(+), 67 deletions(-) diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index c1829ae5df5b..70e275f72a90 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -1519,34 +1519,34 @@ string_pad(Buffer buf, npy_int64 width, npy_ucs4 fill, JUSTPOSITION pos, Bu return -1; } + size_t len_codepoints = buf.num_codepoints(); + size_t len_bytes = buf.after - buf.buf; + size_t len; - switch (enc) { - case ENCODING::ASCII: - case ENCODING::UTF32: - len = buf.num_codepoints(); - break; - case ENCODING::UTF8: - len = buf.after - buf.buf; - break; + if (enc == ENCODING::UTF8) { + len = len_bytes; + } + else { + len = len_codepoints; } - if (len >= finalwidth) { + if (len_codepoints >= finalwidth) { buf.buffer_memcpy(out, len); return (npy_intp) len; } size_t left, right; if (pos == JUSTPOSITION::CENTER) { - size_t pad = finalwidth - len; + size_t pad = finalwidth - len_codepoints; left = pad / 2 + (pad & finalwidth & 1); right = pad - left; } else if (pos == JUSTPOSITION::LEFT) { left = 0; - right = finalwidth - len; + right = finalwidth - len_codepoints; } else { - left = finalwidth - len; + left = finalwidth - len_codepoints; right = 0; } @@ -1554,13 +1554,16 @@ string_pad(Buffer buf, npy_int64 width, npy_ucs4 fill, JUSTPOSITION pos, Bu assert(left <= PY_SSIZE_T_MAX - len && right <= PY_SSIZE_T_MAX - (left + len)); if (left > 0) { - out += out.buffer_memset(fill, left); + out.advance_chars_or_bytes(out.buffer_memset(fill, left)); } + buf.buffer_memcpy(out, len); - out.advance_chars_or_bytes(len); + out += len_codepoints; + if (right > 0) { - out.buffer_memset(fill, right); + out.advance_chars_or_bytes(out.buffer_memset(fill, right)); } + return finalwidth; } diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index d72c911f8295..ae3381f287d2 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1712,12 +1712,16 @@ center_ljust_rjust_strided_loop(PyArrayMethod_Context *context, } } { + Buffer inbuf((char *)s1.buf, s1.size); + Buffer fill((char *)s2.buf, s2.size); + char *buf = NULL; npy_intp newsize; int overflowed = npy_mul_sizes_with_overflow( - &newsize, + &(newsize), (npy_intp)num_bytes_for_utf8_character((unsigned char *)s2.buf), - (npy_intp)*(npy_int64*)in2); + (npy_intp)*(npy_int64*)in2 - inbuf.num_codepoints()); + newsize += s1.size; if (overflowed) { npy_gil_error(PyExc_MemoryError, @@ -1742,11 +1746,9 @@ center_ljust_rjust_strided_loop(PyArrayMethod_Context *context, buf = (char *)os.buf; } - Buffer inbuf((char *)s1.buf, s1.size); - Buffer fill((char *)s2.buf, s2.size); Buffer outbuf(buf, newsize); - npy_intp len = string_pad(inbuf, (npy_int64)newsize, *fill, pos, outbuf); + npy_intp len = string_pad(inbuf, *(npy_int64*)in2, *fill, pos, outbuf); if (len < 0) { return -1; diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 551a8c10268a..5c488093e208 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1100,23 +1100,23 @@ def test_unary(string_array, unicode_array, function_name): ("add", (None, None)), ("multiply", (None, 2)), ("mod", ("format: %s", None)), - pytest.param("center", (None, 25), marks=unicode_bug_fail), + ("center", (None, 25)), ("count", (None, "A")), ("encode", (None, "UTF-8")), ("endswith", (None, "lo")), ("find", (None, "A")), ("index", (None, "e")), ("join", ("-", None)), - pytest.param("ljust", (None, 12), marks=unicode_bug_fail), + ("ljust", (None, 12)), ("partition", (None, "A")), ("replace", (None, "A", "B")), ("rfind", (None, "A")), ("rindex", (None, "e")), - pytest.param("rjust", (None, 12), marks=unicode_bug_fail), + ("rjust", (None, 12)), ("rpartition", (None, "A")), ("split", (None, "A")), ("startswith", (None, "A")), - pytest.param("zfill", (None, 12), marks=unicode_bug_fail), + ("zfill", (None, 12)), ] PASSES_THROUGH_NAN_NULLS = [ diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 427d14e4cb11..7d9c3a6af27c 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -716,36 +716,26 @@ def test_expandtabs_raises_overflow(self, dt): with pytest.raises(OverflowError, match="new string is too long"): np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), sys.maxsize) + FILL_ERROR = "The fill character must be exactly one character long" + def test_center_raises_multiple_character_fill(self, dt): buf = np.array("abc", dtype=dt) fill = np.array("**", dtype=dt) - with pytest.raises(TypeError, - match="The fill character must be exactly one character long"): + with pytest.raises(TypeError, match=self.FILL_ERROR): np.strings.center(buf, 10, fill) def test_ljust_raises_multiple_character_fill(self, dt): buf = np.array("abc", dtype=dt) fill = np.array("**", dtype=dt) - with pytest.raises(TypeError, - match="The fill character must be exactly one character long"): + with pytest.raises(TypeError, match=self.FILL_ERROR): np.strings.ljust(buf, 10, fill) def test_rjust_raises_multiple_character_fill(self, dt): buf = np.array("abc", dtype=dt) fill = np.array("**", dtype=dt) - with pytest.raises(TypeError, - match="The fill character must be exactly one character long"): + with pytest.raises(TypeError, match=self.FILL_ERROR): np.strings.rjust(buf, 10, fill) - -@pytest.mark.parametrize("dt", [ - "S", - "U", - pytest.param("T", marks=pytest.mark.xfail( - reason="SrtingDType support to be implemented in a follow-up commit", - strict=True)), -]) -class TestMethodsWithoutStringDTypeSupport: @pytest.mark.parametrize("buf,width,fillchar,res", [ ('abc', 10, ' ', ' abc '), ('abc', 6, ' ', ' abc '), @@ -945,29 +935,10 @@ def test_expandtabs(self, buf, res, dt): res = np.array(res, dtype=dt) assert_array_equal(np.strings.expandtabs(buf), res) - -@pytest.mark.parametrize("dt", [ - "U", - pytest.param("T", marks=pytest.mark.xfail( - reason="SrtingDType support to be implemented in a follow-up commit", - strict=True)), -]) -class TestMethodsWithUnicodeWithoutStringDTypeSupport: - @pytest.mark.parametrize("buf,width,fillchar,res", [ - ('x', 2, '\U0010FFFF', 'x\U0010FFFF'), - ('x', 3, '\U0010FFFF', '\U0010FFFFx\U0010FFFF'), - ('x', 4, '\U0010FFFF', '\U0010FFFFx\U0010FFFF\U0010FFFF'), - ]) - def test_center(self, buf, width, fillchar, res, dt): - buf = np.array(buf, dtype=dt) - fillchar = np.array(fillchar, dtype=dt) - res = np.array(res, dtype=dt) - assert_array_equal(np.strings.center(buf, width, fillchar), res) - @pytest.mark.parametrize("buf,width,fillchar,res", [ - ('x', 2, '\U0010FFFF', 'x\U0010FFFF'), - ('x', 3, '\U0010FFFF', '\U0010FFFFx\U0010FFFF'), - ('x', 4, '\U0010FFFF', '\U0010FFFFx\U0010FFFF\U0010FFFF'), + ('x', 2, '\U0001044E', 'x\U0001044E'), + ('x', 3, '\U0001044E', '\U0001044Ex\U0001044E'), + ('x', 4, '\U0001044E', '\U0001044Ex\U0001044E\U0001044E'), ]) def test_center(self, buf, width, fillchar, res, dt): buf = np.array(buf, dtype=dt) @@ -976,9 +947,9 @@ def test_center(self, buf, width, fillchar, res, dt): assert_array_equal(np.strings.center(buf, width, fillchar), res) @pytest.mark.parametrize("buf,width,fillchar,res", [ - ('x', 2, '\U0010FFFF', 'x\U0010FFFF'), - ('x', 3, '\U0010FFFF', 'x\U0010FFFF\U0010FFFF'), - ('x', 4, '\U0010FFFF', 'x\U0010FFFF\U0010FFFF\U0010FFFF'), + ('x', 2, '\U0001044E', 'x\U0001044E'), + ('x', 3, '\U0001044E', 'x\U0001044E\U0001044E'), + ('x', 4, '\U0001044E', 'x\U0001044E\U0001044E\U0001044E'), ]) def test_ljust(self, buf, width, fillchar, res, dt): buf = np.array(buf, dtype=dt) @@ -987,9 +958,9 @@ def test_ljust(self, buf, width, fillchar, res, dt): assert_array_equal(np.strings.ljust(buf, width, fillchar), res) @pytest.mark.parametrize("buf,width,fillchar,res", [ - ('x', 2, '\U0010FFFF', '\U0010FFFFx'), - ('x', 3, '\U0010FFFF', '\U0010FFFF\U0010FFFFx'), - ('x', 4, '\U0010FFFF', '\U0010FFFF\U0010FFFF\U0010FFFFx'), + ('x', 2, '\U0001044E', '\U0001044Ex'), + ('x', 3, '\U0001044E', '\U0001044E\U0001044Ex'), + ('x', 4, '\U0001044E', '\U0001044E\U0001044E\U0001044Ex'), ]) def test_rjust(self, buf, width, fillchar, res, dt): buf = np.array(buf, dtype=dt) From f3d7536b5a0e6766b4b6e8b568655ac914774528 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 8 Mar 2024 13:06:58 -0700 Subject: [PATCH 014/980] ENH: add utf8 zfill implementation --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 136 +++++++++++++++++++ 1 file changed, 136 insertions(+) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index ae3381f287d2..d02f44d219b5 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1781,6 +1781,121 @@ center_ljust_rjust_strided_loop(PyArrayMethod_Context *context, return -1; } +static int +zfill_strided_loop(PyArrayMethod_Context *context, + char *const data[], npy_intp const dimensions[], + npy_intp const strides[], NpyAuxData *NPY_UNUSED(auxdata)) +{ + PyArray_StringDTypeObject *idescr = + (PyArray_StringDTypeObject *)context->descriptors[0]; + npy_intp N = dimensions[0]; + char *in1 = data[0]; + char *in2 = data[1]; + char *out = data[2]; + npy_intp in1_stride = strides[0]; + npy_intp in2_stride = strides[1]; + npy_intp out_stride = strides[2]; + + npy_string_allocator *allocators[3] = {}; + NpyString_acquire_allocators(3, context->descriptors, allocators); + npy_string_allocator *iallocator = allocators[0]; + // allocators[1] is NULL + npy_string_allocator *oallocator = allocators[2]; + int has_null = idescr->na_object != NULL; + int has_nan_na = idescr->has_nan_na; + int has_string_na = idescr->has_string_na; + const npy_static_string *default_string = &idescr->default_string; + + while (N--) { + npy_static_string is = {0, NULL}; + const npy_packed_static_string *ips = + (npy_packed_static_string *)in1; + npy_static_string os = {0, NULL}; + npy_packed_static_string *ops = (npy_packed_static_string *)out; + int is_isnull = NpyString_load(iallocator, ips, &is); + if (is_isnull == -1) { + npy_gil_error(PyExc_MemoryError, + "Failed to load string in zfill"); + goto fail; + } + else if (is_isnull) { + if (has_nan_na) { + if (NpyString_pack_null(oallocator, ops) < 0) { + npy_gil_error(PyExc_MemoryError, + "Failed to deallocate string in zfill"); + goto fail; + } + + goto next_step; + } + else if (has_string_na || !has_null) { + is = *(npy_static_string *)default_string; + } + else { + npy_gil_error(PyExc_TypeError, + "Cannot zfill null string that is not a nan-like " + "value"); + goto fail; + } + } + { + Buffer inbuf((char *)is.buf, is.size); + size_t in_codepoints = inbuf.num_codepoints(); + size_t width = (size_t)*(npy_int64 *)in2; + // number of leading one-byte characters plus the size of the + // original string + size_t outsize = (width - in_codepoints) + is.size; + char *buf = NULL; + if (context->descriptors[0] == context->descriptors[2]) { + // in-place + buf = (char *)PyMem_RawMalloc(outsize); + if (buf == NULL) { + npy_gil_error(PyExc_MemoryError, + "Failed to allocate string in zfill"); + goto fail; + } + } + else { + if (load_new_string(ops, &os, outsize, oallocator, "zfill") < 0) { + goto fail; + } + /* explicitly discard const; initializing new buffer */ + buf = (char *)os.buf; + } + + Buffer outbuf(buf, outsize); + if (string_zfill(inbuf, (npy_int64)width, outbuf) < 0) { + goto fail; + } + + // in-place operations need to clean up temp buffer + if (context->descriptors[0] == context->descriptors[2]) { + if (NpyString_pack(oallocator, ops, buf, outsize) < 0) { + npy_gil_error(PyExc_MemoryError, + "Failed to pack string in zfill"); + goto fail; + } + + PyMem_RawFree(buf); + } + + } + + next_step: + + in1 += in1_stride; + in2 += in2_stride; + out += out_stride; + } + + NpyString_release_allocators(3, allocators); + return 0; + +fail: + NpyString_release_allocators(3, allocators); + return -1; +} + NPY_NO_EXPORT int string_inputs_promoter( PyObject *ufunc_obj, PyArray_DTypeMeta *op_dtypes[], @@ -2477,7 +2592,28 @@ init_stringdtype_ufuncs(PyObject *umath) } } + PyArray_DTypeMeta *zfill_dtypes[] = { + &PyArray_StringDType, + &PyArray_Int64DType, + &PyArray_StringDType, + }; + if (init_ufunc(umath, "_zfill", zfill_dtypes, multiply_resolve_descriptors, + zfill_strided_loop, 2, 1, NPY_NO_CASTING, + (NPY_ARRAYMETHOD_FLAGS) 0, NULL) < 0) { + return -1; + } + + PyArray_DTypeMeta *int_promoter_dtypes[] = { + &PyArray_StringDType, + (PyArray_DTypeMeta *)Py_None, + &PyArray_StringDType, + }; + + if (add_promoter(umath, "_zfill", int_promoter_dtypes, 3, + string_multiply_promoter) < 0) { + return -1; + } return 0; } From 69feeb343eda76b18fb80b33907ea9ade9426142 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 8 Mar 2024 13:23:35 -0700 Subject: [PATCH 015/980] REL: Begin NumPy 2.1.0 development (#25968) From 74e320e3c1ff6761f316efe46ffc430337ec3b5f Mon Sep 17 00:00:00 2001 From: Yuki K Date: Sat, 9 Mar 2024 01:14:04 +0000 Subject: [PATCH 016/980] DOC: Fix a note section markup in ``dtype.rst`` [skip cirrus] [skip azp] [skip actions] --- doc/source/reference/c-api/dtype.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/c-api/dtype.rst b/doc/source/reference/c-api/dtype.rst index 1d521e39a832..8412219a79e1 100644 --- a/doc/source/reference/c-api/dtype.rst +++ b/doc/source/reference/c-api/dtype.rst @@ -390,7 +390,7 @@ to the front of the integer name. This is the correct integer for lengths or indexing. In practice this is normally the size of a pointer, but this is not guaranteed. - ..note:: + .. note:: Before NumPy 2.0, this was the same as ``Py_intptr_t``. While a better match, this did not match actual usage in practice. On the Python side, we still support ``np.dtype('p')`` to fetch a dtype From 7b9dede257171bdfafdd4a6fa43ac4df806b1103 Mon Sep 17 00:00:00 2001 From: Yuki K Date: Sat, 9 Mar 2024 03:55:22 +0000 Subject: [PATCH 017/980] DOC: Fix module setting of ``MaskedArray`` Set module of ``MaskedArray`` to ``numpy.ma`` because ``numpy.ma.core.MaskedArray`` is an internal alias not documented. ref: gh-13114 [skip cirrus] [skip azp] [skip actions] --- numpy/ma/core.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 8316b481e827..c67582265e4d 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -37,6 +37,7 @@ from numpy import array as narray, expand_dims, iinfo, finfo from numpy._core.numeric import normalize_axis_tuple from numpy._utils._inspect import getargspec, formatargspec +from numpy._utils import set_module __all__ = [ @@ -2636,7 +2637,7 @@ class MaskedIterator: >>> x = np.ma.array(arange(6).reshape(2, 3)) >>> fl = x.flat >>> type(fl) - + >>> for item in fl: ... print(item) ... @@ -2717,6 +2718,7 @@ def __next__(self): return d +@set_module("numpy.ma") class MaskedArray(ndarray): """ An array class with possibly masked values. @@ -8378,7 +8380,7 @@ def asarray(a, dtype=None, order=None): mask=False, fill_value=1e+20) >>> type(np.ma.asarray(x)) - + """ order = order or 'C' @@ -8425,7 +8427,7 @@ def asanyarray(a, dtype=None): mask=False, fill_value=1e+20) >>> type(np.ma.asanyarray(x)) - + """ # workaround for #8666, to preserve identity. Ideally the bottom line From 7934d5b2963e1eaab45645af24eb050122cdf3ee Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 8 Mar 2024 13:52:56 -0700 Subject: [PATCH 018/980] MAINT: Prepare main for NumPy 2.1.0 development --- doc/source/release.rst | 1 + doc/source/release/2.1.0-notes.rst | 19 +++++++++++++++++++ numpy/_core/code_generators/cversions.txt | 2 ++ pavement.py | 2 +- pyproject.toml | 2 +- 5 files changed, 24 insertions(+), 2 deletions(-) create mode 100644 doc/source/release/2.1.0-notes.rst diff --git a/doc/source/release.rst b/doc/source/release.rst index 41eeac87bf64..5226ef89a764 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release notes .. toctree:: :maxdepth: 2 + 2.1.0 2.0.0 1.26.4 1.26.3 diff --git a/doc/source/release/2.1.0-notes.rst b/doc/source/release/2.1.0-notes.rst new file mode 100644 index 000000000000..d0b0b6f1b785 --- /dev/null +++ b/doc/source/release/2.1.0-notes.rst @@ -0,0 +1,19 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.1.0 Release Notes +========================== + + +Highlights +========== + +*We'll choose highlights for this release near the end of the release cycle.* + + +.. if release snippets have been incorporated already, uncomment the follow + line (leave the `.. include:: directive) + +.. **Content from release note snippets in doc/release/upcoming_changes:** + +.. include:: notes-towncrier.rst diff --git a/numpy/_core/code_generators/cversions.txt b/numpy/_core/code_generators/cversions.txt index f91f616585a3..ccba8a1c25b3 100644 --- a/numpy/_core/code_generators/cversions.txt +++ b/numpy/_core/code_generators/cversions.txt @@ -70,7 +70,9 @@ 0x00000010 = 04a7bf1e65350926a0e528798da263c0 # Version 17 (NumPy 1.25) No actual change. +# Version 17 (NumPy 1.26) No change 0x00000011 = ca1aebdad799358149567d9d93cbca09 # Version 18 (NumPy 2.0.0) +# Version 18 (NumPy 2.1.0) No change 0x00000012 = 2b8f1f4da822491ff030b2b37dff07e3 diff --git a/pavement.py b/pavement.py index 3a52db2e6555..f205f1f40839 100644 --- a/pavement.py +++ b/pavement.py @@ -38,7 +38,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.0.0-notes.rst' +RELEASE_NOTES = 'doc/source/release/2.1.0-notes.rst' #------------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index 036137c36da7..6be12513c3ef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.0.0.dev0" +version = "2.1.0.dev0" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} From 0cfa38f5862958d27a6afd48090a12a551c8b44a Mon Sep 17 00:00:00 2001 From: mattip Date: Fri, 8 Mar 2024 09:21:58 +0200 Subject: [PATCH 019/980] CI: update build image [skip actions][skip azp][skip cirrus] --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 8f9fa8c9fed0..85c842b813bc 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,7 @@ _defaults: &defaults docker: # CircleCI maintains a library of pre-built images # documented at https://circleci.com/developer/images/image/cimg/python - - image: cimg/python:3.11.4 + - image: cimg/python:3.11.8 working_directory: ~/repo From 05cb05e74c20263841c43813d46ef8e094691252 Mon Sep 17 00:00:00 2001 From: mattip Date: Fri, 8 Mar 2024 14:05:54 +0200 Subject: [PATCH 020/980] DOC: make nitpick mode cleaner [skip actions][skip azp][skip cirrus] --- doc/source/conf.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/doc/source/conf.py b/doc/source/conf.py index 1e734c0134bc..b30fe3c9978a 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -585,3 +585,12 @@ class NumPyLexer(CLexer): breathe_projects = dict(numpy=os.path.join("..", "build", "doxygen", "xml")) breathe_default_project = "numpy" breathe_default_members = ("members", "undoc-members", "protected-members") + +# See https://github.com/breathe-doc/breathe/issues/696 +nitpick_ignore = [ + ('c:identifier', 'FILE'), + ('c:identifier', 'size_t'), + ('c:identifier', 'PyHeapTypeObject'), +] + + From 5dbf807a7c98c8e1458c733b98d69397d1955890 Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 10 Mar 2024 12:42:33 +0200 Subject: [PATCH 021/980] DOC: fix references [skip actions][skip azp][skip cirrus] --- doc/source/user/how-to-io.rst | 2 +- numpy/_core/records.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/user/how-to-io.rst b/doc/source/user/how-to-io.rst index 9b3a71fa40bb..ca4abcd13746 100644 --- a/doc/source/user/how-to-io.rst +++ b/doc/source/user/how-to-io.rst @@ -327,7 +327,7 @@ created with NumPy 1.26. Convert from a pandas DataFrame to a NumPy array ================================================ -See :meth:`pandas.DataFrame.to_numpy`. +See :meth:`pandas.Series.to_numpy`. Save/restore using `~numpy.ndarray.tofile` and `~numpy.fromfile` ================================================================ diff --git a/numpy/_core/records.py b/numpy/_core/records.py index 79755e09bb40..8bdeec15c6d2 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -583,7 +583,7 @@ def fromarrays(arrayList, dtype=None, shape=None, formats=None, ``arrayList[0]``. formats, names, titles, aligned, byteorder : If `dtype` is ``None``, these arguments are passed to - `numpy.format_parser` to construct a dtype. See that function for + `numpy.rec.format_parser` to construct a dtype. See that function for detailed documentation. Returns From 3746939c8862510bba5ee98da9e5349db3fa7c01 Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 10 Mar 2024 15:37:35 +0200 Subject: [PATCH 022/980] DOC: override inherited Mapping docstrings in NpzFile --- numpy/lib/_npyio_impl.py | 58 +++++++++++++++++++++++++++++----------- 1 file changed, 43 insertions(+), 15 deletions(-) diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 8fef65e7f6ab..24d377c9df06 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -278,6 +278,34 @@ def __repr__(self): array_names += "..." return f"NpzFile {filename!r} with keys: {array_names}" + # Work around problems with the docstrings in the Mapping methods + # They contain a `->`, which confuses the type annotation interpretations + # of sphinx-docs. See gh-25964 + + def get(self, key, default=None, /): + """ + D.get(k,[,d]) returns D[k] if k in D, else d. d defaults to None. + """ + return dict.get(self, key, default) + + def items(self): + """ + D.items() returns a set-like object providing a view on the items + """ + return dict.items(self) + + def keys(self): + """ + D.keys() returns a set-like object providing a view on the keys + """ + return dict.keys(self) + + def values(self): + """ + D.values() returns a set-like object providing a view on the values + """ + return dict.values(self) + @set_module('numpy') def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, @@ -487,9 +515,9 @@ def save(file, arr, allow_pickle=True, fix_imports=True): arr : array_like Array data to be saved. allow_pickle : bool, optional - Allow saving object arrays using Python pickles. Reasons for + Allow saving object arrays using Python pickles. Reasons for disallowing pickles include security (loading pickled data can execute - arbitrary code) and portability (pickled objects may not be loadable + arbitrary code) and portability (pickled objects may not be loadable on different Python installations, for example if the stored objects require libraries that are not available, and not all pickled data is compatible between Python 2 and Python 3). @@ -1814,10 +1842,10 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, .. versionadded:: 1.10.0 encoding : str, optional Encoding used to decode the inputfile. Does not apply when `fname` - is a file object. The special value 'bytes' enables backward + is a file object. The special value 'bytes' enables backward compatibility workarounds that ensure that you receive byte arrays - when possible and passes latin1 encoded strings to converters. - Override this value to receive unicode arrays and pass strings + when possible and passes latin1 encoded strings to converters. + Override this value to receive unicode arrays and pass strings as input to converters. If set to None the system default is used. The default value is 'bytes'. @@ -1854,7 +1882,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, * Individual values are not stripped of spaces by default. When using a custom converter, make sure the function does remove spaces. * Custom converters may receive unexpected values due to dtype - discovery. + discovery. References ---------- @@ -2127,7 +2155,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, except ValueError: # We couldn't find it: the name must have been dropped continue - # Redefine the key if it's a column number + # Redefine the key if it's a column number # and usecols is defined if usecols: try: @@ -2161,23 +2189,23 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, if len(dtype_flat) > 1: # Flexible type : get a converter from each dtype zipit = zip(dtype_flat, missing_values, filling_values) - converters = [StringConverter(dt, + converters = [StringConverter(dt, locked=True, - missing_values=miss, + missing_values=miss, default=fill) for (dt, miss, fill) in zipit] else: # Set to a default converter (but w/ different missing values) zipit = zip(missing_values, filling_values) - converters = [StringConverter(dtype, + converters = [StringConverter(dtype, locked=True, - missing_values=miss, + missing_values=miss, default=fill) for (miss, fill) in zipit] # Update the converters to use the user-defined ones uc_update = [] for (j, conv) in user_converters.items(): - # If the converter is specified by column names, + # If the converter is specified by column names, # use the index instead if _is_string_like(j): try: @@ -2201,8 +2229,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, if conv is bytes: user_conv = asbytes elif byte_converters: - # Converters may use decode to workaround numpy's old - # behavior, so encode the string again before passing + # Converters may use decode to workaround numpy's old + # behavior, so encode the string again before passing # to the user converter. def tobytes_first(x, conv): if type(x) is bytes: @@ -2338,7 +2366,7 @@ def tobytes_first(x, conv): "argument is deprecated. Set the encoding, use None for the " "system default.", np.exceptions.VisibleDeprecationWarning, stacklevel=2) - + def encode_unicode_cols(row_tup): row = list(row_tup) for i in strcolidx: From 2e134c16d94fee77077d28942d513bdb8156f3ea Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 10 Mar 2024 15:38:08 +0200 Subject: [PATCH 023/980] DOC: enforce nit-picky mode --- .circleci/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 85c842b813bc..646ee52633d3 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -74,7 +74,7 @@ jobs: . venv/bin/activate cd doc # Don't use -q, show warning summary" - SPHINXOPTS="-j2 -n" make -e html || echo "ignoring errors for now, see gh-13114" + SPHINXOPTS="-j2 -n" make -e html if [[ $(find build/html -type f | wc -l) -lt 1000 ]]; then echo "doc build failed: build/html is empty" exit -1 @@ -85,7 +85,7 @@ jobs: command: | . venv/bin/activate cd doc/neps - SPHINXOPTS="-j2 -q" make -e html + SPHINXOPTS="-j2 -n" make -e html || echo "ignoring errors for now" - store_artifacts: path: doc/build/html/ From 8df4cfb425fa91c0192ef976688213fa6335db7c Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 10 Mar 2024 16:54:33 +0200 Subject: [PATCH 024/980] fix, add more tests [skip cirrus] --- numpy/lib/_npyio_impl.py | 8 ++++---- numpy/lib/tests/test_io.py | 4 ++++ 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 24d377c9df06..8986b94fd500 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -286,25 +286,25 @@ def get(self, key, default=None, /): """ D.get(k,[,d]) returns D[k] if k in D, else d. d defaults to None. """ - return dict.get(self, key, default) + return Mapping.get(self, key, default) def items(self): """ D.items() returns a set-like object providing a view on the items """ - return dict.items(self) + return Mapping.items(self) def keys(self): """ D.keys() returns a set-like object providing a view on the keys """ - return dict.keys(self) + return Mapping.keys(self) def values(self): """ D.values() returns a set-like object providing a view on the values """ - return dict.values(self) + return Mapping.values(self) @set_module('numpy') diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 44664c2df891..38ded1f26cda 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -2760,12 +2760,16 @@ def test_npzfile_dict(): assert_(f in ['x', 'y']) assert_equal(a.shape, (3, 3)) + for a in z.values(): + assert_equal(a.shape, (3, 3)) + assert_(len(z.items()) == 2) for f in z: assert_(f in ['x', 'y']) assert_('x' in z.keys()) + assert (z.get('x') == z['x']).all() @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") From abc7060488853ce5c02ef6562a82307f70b3b286 Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Sun, 10 Mar 2024 12:30:46 +1100 Subject: [PATCH 025/980] BLD: push a tag builds a wheel --- .github/workflows/wheels.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 734dd635b549..b373df862c30 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -26,6 +26,9 @@ on: branches: - main - maintenance/** + push: + tags: + - v* workflow_dispatch: concurrency: From 5c2280073e80c66e7a5327b3a223f4f95bdb2acd Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Mon, 11 Mar 2024 08:42:26 +1100 Subject: [PATCH 026/980] BLD: omit pp39-macosx_arm64 from matrix [wheel build] --- .github/workflows/wheels.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 734dd635b549..e2d5563f3e91 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -85,9 +85,11 @@ jobs: python: ["cp39", "cp310", "cp311", "cp312", "pp39"] exclude: # Don't build PyPy 32-bit windows - - buildplat: [windows-2019, win32] + - buildplat: [windows-2019, win32, ""] python: "pp39" - - buildplat: [ ubuntu-20.04, musllinux_x86_64 ] + - buildplat: [ ubuntu-20.04, musllinux_x86_64, "" ] + python: "pp39" + - buildplat: [ macos-14, macosx_arm64, accelerate ] python: "pp39" env: IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} From 740ca3a389eedd0c452cc2383a912e80f399354d Mon Sep 17 00:00:00 2001 From: Liang Yan Date: Mon, 11 Mar 2024 13:50:18 +0800 Subject: [PATCH 027/980] DOC: Remove unused parameter description The name and slot parameters are not used. So remove description. [skip actions][skip azp][skip cirrus] Signed-off-by: Liang Yan --- numpy/_core/src/multiarray/array_method.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/_core/src/multiarray/array_method.c b/numpy/_core/src/multiarray/array_method.c index b262c1f263c2..9e4f3a55fba9 100644 --- a/numpy/_core/src/multiarray/array_method.c +++ b/numpy/_core/src/multiarray/array_method.c @@ -412,11 +412,9 @@ PyArrayMethod_FromSpec(PyArrayMethod_Spec *spec) /** * Create a new ArrayMethod (internal version). * - * @param name A name for the individual method, may be NULL. * @param spec A filled context object to pass generic information about * the method (such as usually needing the API, and the DTypes). * Unused fields must be NULL. - * @param slots Slots with the correct pair of IDs and (function) pointers. * @param private Some slots are currently considered private, if not true, * these will be rejected. * From ebb8f6ef51f9b6cf7a869c18dfe20865078c0048 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 11 Mar 2024 08:14:38 +0100 Subject: [PATCH 028/980] CI: clean up some unused `choco install` invocations - `unzip` is (I think) a left-over from having to unzip OpenBLAS tarballs (we have wheels for that now) - `ninja` is already installed from PyPI, it's in `test_requirements.txt` The `rtools43` path cleanup helps readability. Rtools is already installed on the CI runner image, and only used for testing (f2py tests in "full" mode). [skip cirrus] [skip circle] [skip actions] --- azure-steps-windows.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/azure-steps-windows.yml b/azure-steps-windows.yml index cc458723f28f..0baf374e1e3f 100644 --- a/azure-steps-windows.yml +++ b/azure-steps-windows.yml @@ -14,10 +14,7 @@ steps: displayName: 'Install dependencies; some are optional to avoid test skips' - powershell: | - choco install -y --stoponfirstfailure unzip choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite - choco install --stoponfirstfailure ninja - echo "##vso[task.setvariable variable=RTOOLS43_HOME]c:\rtools43" displayName: 'Install utilities' - powershell: | @@ -42,7 +39,7 @@ steps: - powershell: | cd tools # avoid root dir to not pick up source tree # Get a gfortran onto the path for f2py tests - $env:PATH = "$env:RTOOLS43_HOME\\x86_64-w64-mingw32.static.posix\\bin;$env:PATH" + $env:PATH = "c:\\rtools43\\x86_64-w64-mingw32.static.posix\\bin;$env:PATH" If ( $env:TEST_MODE -eq "full" ) { pytest --pyargs numpy -rsx --junitxml=junit/test-results.xml } else { From e24abaff004ce1d6b556e1be6c9aae0506696645 Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Mon, 11 Mar 2024 15:33:35 +0100 Subject: [PATCH 029/980] Error when non-ascii fillchar is used with ascii buffer --- numpy/_core/src/umath/string_ufuncs.cpp | 6 ++- numpy/_core/strings.py | 12 +++--- numpy/_core/tests/test_strings.py | 51 ++++++++++++++++--------- 3 files changed, 44 insertions(+), 25 deletions(-) diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 6b5377ceb180..337c8f65ba1e 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -529,7 +529,11 @@ string_center_ljust_rjust_loop(PyArrayMethod_Context *context, Buffer buf(in1, elsize1); Buffer fill(in3, elsize3); Buffer outbuf(out, outsize); - size_t len = string_pad(buf, *(npy_int64 *)in2, *fill, pos, outbuf); + if (bufferenc == ENCODING::ASCII && fillenc == ENCODING::UTF32 && *fill > 0x7F) { + npy_gil_error(PyExc_ValueError, "non-ascii fill character is not allowed when buffer is ascii"); + return -1; + } + npy_intp len = string_pad(buf, *(npy_int64 *)in2, *fill, pos, outbuf); if (len < 0) { return -1; } diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 106d40f77f3c..f64f334407ce 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -599,8 +599,8 @@ def center(a, width, fillchar=' '): Notes ----- While it is possible for ``a`` and ``fillchar`` to have different dtypes, - passing a non-ASCII character in ``fillchar`` when ``a`` is of dtype "S", - will result in silent errors (resulting buffer might have wrong data). + passing a non-ASCII character in ``fillchar`` when ``a`` is of dtype "S" + is not allowed, and a ``ValueError`` is raised. Examples -------- @@ -658,8 +658,8 @@ def ljust(a, width, fillchar=' '): Notes ----- While it is possible for ``a`` and ``fillchar`` to have different dtypes, - passing a non-ASCII character in ``fillchar`` when ``a`` is of dtype "S", - will result in silent errors (resulting buffer might have wrong data). + passing a non-ASCII character in ``fillchar`` when ``a`` is of dtype "S" + is not allowed, and a ``ValueError`` is raised. Examples -------- @@ -714,8 +714,8 @@ def rjust(a, width, fillchar=' '): Notes ----- While it is possible for ``a`` and ``fillchar`` to have different dtypes, - passing a non-ASCII character in ``fillchar`` when ``a`` is of dtype "S", - will result in silent errors (resulting buffer might have wrong data). + passing a non-ASCII character in ``fillchar`` when ``a`` is of dtype "S" + is not allowed, and a ``ValueError`` is raised. Examples -------- diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 7d9c3a6af27c..6658e6c1780b 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -971,37 +971,52 @@ def test_rjust(self, buf, width, fillchar, res, dt): class TestMixedTypeMethods: def test_center(self): - u = np.array("😊", dtype="U") - s = np.array("*", dtype="S") + buf = np.array("😊", dtype="U") + fill = np.array("*", dtype="S") res = np.array("*😊*", dtype="U") - assert_array_equal(np.strings.center(u, 3, s), res) + assert_array_equal(np.strings.center(buf, 3, fill), res) - u = np.array("*", dtype="U") - s = np.array("s", dtype="S") + buf = np.array("s", dtype="S") + fill = np.array("*", dtype="U") res = np.array("*s*", dtype="S") - assert_array_equal(np.strings.center(s, 3, u), res) + assert_array_equal(np.strings.center(buf, 3, fill), res) + + with pytest.raises(ValueError, match="non-ascii fill character"): + buf = np.array("s", dtype="S") + fill = np.array("😊", dtype="U") + np.strings.center(buf, 3, fill) def test_ljust(self): - u = np.array("😊", dtype="U") - s = np.array("*", dtype="S") + buf = np.array("😊", dtype="U") + fill = np.array("*", dtype="S") res = np.array("😊**", dtype="U") - assert_array_equal(np.strings.ljust(u, 3, s), res) + assert_array_equal(np.strings.ljust(buf, 3, fill), res) - u = np.array("*", dtype="U") - s = np.array("s", dtype="S") + buf = np.array("s", dtype="S") + fill = np.array("*", dtype="U") res = np.array("s**", dtype="S") - assert_array_equal(np.strings.ljust(s, 3, u), res) + assert_array_equal(np.strings.ljust(buf, 3, fill), res) + + with pytest.raises(ValueError, match="non-ascii fill character"): + buf = np.array("s", dtype="S") + fill = np.array("😊", dtype="U") + np.strings.ljust(buf, 3, fill) def test_rjust(self): - u = np.array("😊", dtype="U") - s = np.array("*", dtype="S") + buf = np.array("😊", dtype="U") + fill = np.array("*", dtype="S") res = np.array("**😊", dtype="U") - assert_array_equal(np.strings.rjust(u, 3, s), res) + assert_array_equal(np.strings.rjust(buf, 3, fill), res) - u = np.array("*", dtype="U") - s = np.array("s", dtype="S") + buf = np.array("s", dtype="S") + fill = np.array("*", dtype="U") res = np.array("**s", dtype="S") - assert_array_equal(np.strings.rjust(s, 3, u), res) + assert_array_equal(np.strings.rjust(buf, 3, fill), res) + + with pytest.raises(ValueError, match="non-ascii fill character"): + buf = np.array("s", dtype="S") + fill = np.array("😊", dtype="U") + np.strings.rjust(buf, 3, fill) class TestUnicodeOnlyMethodsRaiseWithBytes: From da4d2b0ff4e9be3e82aa860dfc2a0fedcda64692 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 11 Mar 2024 17:06:02 +0100 Subject: [PATCH 030/980] CI: don't use `fetch-tags` in wheel build jobs This failed in combination with tag-based triggers: - issue: gh-25979 - cause: https://github.com/numpy/numpy/pull/25981#issuecomment-1987653985 More importantly, we don't need to fetch either the tags or the whole commit history, because: (a) we don't derive the version number from the last tag, but from parsing pyproject.toml (b) we don't use a commit count, but only yyyymmdd.git-hash See `numpy/_build_utils/gitversion.py` Done only in the wheel build job right now, because that's where the problem is and we're in a hurry to solve it on the 2.0.x branch. However, this change should be fine everywhere. [skip cirrus] [skip circle] [skip azp] --- .github/workflows/wheels.yml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 942e7ea91a26..7955b71de9fd 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -103,11 +103,6 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: true - # versioneer.py requires the latest tag to be reachable. Here we - # fetch the complete history to get access to the tags. - # A shallow clone can work when the following issue is resolved: - # https://github.com/actions/checkout/issues/338 - fetch-tags: true - name: Setup MSVC (32-bit) if: ${{ matrix.buildplat[1] == 'win32' }} @@ -217,11 +212,6 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: true - # versioneer.py requires the latest tag to be reachable. Here we - # fetch the complete history to get access to the tags. - # A shallow clone can work when the following issue is resolved: - # https://github.com/actions/checkout/issues/338 - fetch-tags: true # Used to push the built wheels - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 with: From 9d0518ca58aabc9db33f4d4e594d145e8a3bf569 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 11 Mar 2024 11:15:24 -0600 Subject: [PATCH 031/980] MAINT: respond to review comments, try to fix tests --- .../_core/code_generators/ufunc_docstrings.py | 2 +- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 29 ++------ numpy/_core/strings.py | 73 +++++++++---------- 3 files changed, 41 insertions(+), 63 deletions(-) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index d76acd17cd2f..711db8d2e6a4 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -4987,7 +4987,7 @@ def add_newdoc(place, name, doc): Examples -------- ->>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.rjust(a, width=3) array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.rjust(a, width=9) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index d02f44d219b5..d2622859ef23 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1589,7 +1589,7 @@ center_ljust_rjust_resolve_descriptors( if (eq_res != 1) { PyErr_SetString(PyExc_TypeError, - "Can only text justification operations with equal" + "Can only do text justification operations with equal" "StringDType instances."); return (NPY_CASTING)-1; } @@ -1623,11 +1623,6 @@ center_ljust_rjust_resolve_descriptors( } -static const char* CENTER_NAME = "center"; -static const char* LJUST_NAME = "ljust"; -static const char* RJUST_NAME = "rjust"; - -template static int center_ljust_rjust_strided_loop(PyArrayMethod_Context *context, char *const data[], @@ -1658,19 +1653,7 @@ center_ljust_rjust_strided_loop(PyArrayMethod_Context *context, npy_string_allocator *oallocator = allocators[3]; JUSTPOSITION pos = *(JUSTPOSITION *)(context->method->static_data); - const char* ufunc_name = NULL; - - switch (pos) { - case JUSTPOSITION::CENTER: - ufunc_name = CENTER_NAME; - break; - case JUSTPOSITION::LEFT: - ufunc_name = LJUST_NAME; - break; - case JUSTPOSITION::RIGHT: - ufunc_name = RJUST_NAME; - break; - } + const char* ufunc_name = ((PyUFuncObject *)context->caller)->name; while (N--) { const npy_packed_static_string *ps1 = (npy_packed_static_string *)in1; @@ -1712,8 +1695,8 @@ center_ljust_rjust_strided_loop(PyArrayMethod_Context *context, } } { - Buffer inbuf((char *)s1.buf, s1.size); - Buffer fill((char *)s2.buf, s2.size); + Buffer inbuf((char *)s1.buf, s1.size); + Buffer fill((char *)s2.buf, s2.size); char *buf = NULL; npy_intp newsize; @@ -1746,7 +1729,7 @@ center_ljust_rjust_strided_loop(PyArrayMethod_Context *context, buf = (char *)os.buf; } - Buffer outbuf(buf, newsize); + Buffer outbuf(buf, newsize); npy_intp len = string_pad(inbuf, *(npy_int64*)in2, *fill, pos, outbuf); @@ -2560,7 +2543,7 @@ init_stringdtype_ufuncs(PyObject *umath) if (init_ufunc(umath, center_ljust_rjust_names[i], center_ljust_rjust_dtypes, ¢er_ljust_rjust_resolve_descriptors, - ¢er_ljust_rjust_strided_loop, 3, 1, NPY_NO_CASTING, + ¢er_ljust_rjust_strided_loop, 3, 1, NPY_NO_CASTING, (NPY_ARRAYMETHOD_FLAGS) 0, &positions[i]) < 0) { return -1; } diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index f64f334407ce..d77340b25b2e 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -555,7 +555,7 @@ def expandtabs(a, tabsize=8): Examples -------- - >>> a = np.array(['\t\tHello\tworld']) + >>> a = np.array(['\t\tHello\tworld']) >>> np.strings.expandtabs(a, tabsize=4) # doctest: +SKIP array([' Hello world'], dtype=' Date: Mon, 11 Mar 2024 12:09:03 -0600 Subject: [PATCH 032/980] MNT: fix match string in test_strings --- numpy/_core/tests/test_strings.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 6658e6c1780b..25d605d2ce6e 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -981,7 +981,7 @@ def test_center(self): res = np.array("*s*", dtype="S") assert_array_equal(np.strings.center(buf, 3, fill), res) - with pytest.raises(ValueError, match="non-ascii fill character"): + with pytest.raises(ValueError, match="'ascii' codec can't encode"): buf = np.array("s", dtype="S") fill = np.array("😊", dtype="U") np.strings.center(buf, 3, fill) @@ -997,7 +997,7 @@ def test_ljust(self): res = np.array("s**", dtype="S") assert_array_equal(np.strings.ljust(buf, 3, fill), res) - with pytest.raises(ValueError, match="non-ascii fill character"): + with pytest.raises(ValueError, match="'ascii' codec can't encode"): buf = np.array("s", dtype="S") fill = np.array("😊", dtype="U") np.strings.ljust(buf, 3, fill) @@ -1013,7 +1013,7 @@ def test_rjust(self): res = np.array("**s", dtype="S") assert_array_equal(np.strings.rjust(buf, 3, fill), res) - with pytest.raises(ValueError, match="non-ascii fill character"): + with pytest.raises(ValueError, match="'ascii' codec can't encode"): buf = np.array("s", dtype="S") fill = np.array("😊", dtype="U") np.strings.rjust(buf, 3, fill) From 1301571ca2775d08831e4b465c5848403ab40199 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A7alo=20B=C3=A1rias?= Date: Fri, 8 Mar 2024 22:52:48 +0000 Subject: [PATCH 033/980] BUG: raise error for negative-sized dtype Closes #25860 --- numpy/_core/src/multiarray/descriptor.c | 5 +++++ numpy/_core/tests/test_dtype.py | 3 +++ numpy/_core/tests/test_numeric.py | 3 +-- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index c77b380e9386..8a8e432d82ee 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -1813,6 +1813,11 @@ _convert_from_str(PyObject *obj, int align) /* Parse the integer, make sure it's the rest of the string */ elsize = (int)strtol(type + 1, &typeend, 10); + /* Make sure size is not negative */ + if (elsize < 0) { + goto fail; + } + if (typeend - type == len) { kind = type[0]; diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 664f4e028151..0595fb60240f 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -96,6 +96,9 @@ def test_invalid_types(self): assert_raises(TypeError, np.dtype, 'q8') assert_raises(TypeError, np.dtype, 'Q8') + # Make sure negative-sized dtype raises an error + assert_raises(TypeError, np.dtype, 'S-1') + def test_richcompare_invalid_dtype_equality(self): # Make sure objects that cannot be converted to valid # dtypes results in False/True when compared to valid dtypes. diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 3acbb20a1619..4dfc45cb180a 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -8,7 +8,6 @@ import numpy as np from numpy._core import umath, sctypes -from numpy._core._exceptions import _ArrayMemoryError from numpy._core.numerictypes import obj2sctype from numpy._core.arrayprint import set_string_function from numpy.exceptions import AxisError @@ -3345,7 +3344,7 @@ class MyNDArray(np.ndarray): assert_(type(b) is not MyNDArray) # Test invalid dtype - with assert_raises(_ArrayMemoryError): + with assert_raises(TypeError): a = np.array(b"abc") like_function(a, dtype="S-1", **fill_kwarg) From 695733ad16491a78506f7a9e94faea057eb56a1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A7alo=20B=C3=A1rias?= Date: Mon, 11 Mar 2024 19:51:22 +0000 Subject: [PATCH 034/980] TST: add two new tests for negative-sized dtypes --- numpy/_core/tests/test_dtype.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 0595fb60240f..eb4f915ee452 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -98,6 +98,8 @@ def test_invalid_types(self): # Make sure negative-sized dtype raises an error assert_raises(TypeError, np.dtype, 'S-1') + assert_raises(TypeError, np.dtype, 'U-1') + assert_raises(TypeError, np.dtype, 'V-1') def test_richcompare_invalid_dtype_equality(self): # Make sure objects that cannot be converted to valid From 5096e24ae8806873953ab7ba916d64ef24995b7d Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 11 Mar 2024 23:43:28 +0200 Subject: [PATCH 035/980] BUG: fix kwarg handling in assert_warn [skip cirrus][skip azp] --- numpy/testing/_private/utils.py | 4 +++- numpy/testing/tests/test_utils.py | 13 +++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index bae98964f9d4..88fbb51a1dd3 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -1949,8 +1949,10 @@ def assert_warns(warning_class, *args, **kwargs): >>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4) >>> assert ret == 16 """ - if not args: + if not args and not kwargs: return _assert_warns_context(warning_class) + elif len(args) < 1: + raise RuntimeError("assert_warns called without args") func = args[0] args = args[1:] diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 36f9c1617f44..b9e4f03ab208 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -1035,6 +1035,19 @@ def no_warnings(): assert_equal(before_filters, after_filters, "assert_warns does not preserver warnings state") + def test_args(self): + def f(a=0, b=1): + warnings.warn("yo") + return a + b + + assert assert_warns(UserWarning, f, b=20) == 20 + + with pytest.raises(RuntimeError) as exc: + # assert_warns cannot do regexp matching, use pytest.warn + with assert_warns(UserWarning, match="A"): + warnings.warn("B", UserWarning) + assert "assert_warns" in str(exc) + def test_warn_wrong_warning(self): def f(): warnings.warn("yo", DeprecationWarning) From b081b4d388f72df59a7862f98d11bdfd3d0884e3 Mon Sep 17 00:00:00 2001 From: mattip Date: Tue, 12 Mar 2024 08:38:47 +0200 Subject: [PATCH 036/980] improve error message (from review) [skip cirrus][skip azp] --- numpy/testing/_private/utils.py | 7 ++++++- numpy/testing/tests/test_utils.py | 12 ++++++++++-- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 88fbb51a1dd3..8e33f319b11f 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -1952,7 +1952,12 @@ def assert_warns(warning_class, *args, **kwargs): if not args and not kwargs: return _assert_warns_context(warning_class) elif len(args) < 1: - raise RuntimeError("assert_warns called without args") + if "match" in kwargs: + raise RuntimeError( + "assert_warns does not use 'match' kwarg, " + "use pytest.warns instead" + ) + raise RuntimeError("assert_warns(...) needs at least one arg") func = args[0] args = args[1:] diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index b9e4f03ab208..247bbeaec6f7 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -1043,10 +1043,18 @@ def f(a=0, b=1): assert assert_warns(UserWarning, f, b=20) == 20 with pytest.raises(RuntimeError) as exc: - # assert_warns cannot do regexp matching, use pytest.warn + # assert_warns cannot do regexp matching, use pytest.warns with assert_warns(UserWarning, match="A"): warnings.warn("B", UserWarning) - assert "assert_warns" in str(exc) + assert "assert_warns" in str(exc) + assert "pytest.warns" in str(exc) + + with pytest.raises(RuntimeError) as exc: + # assert_warns cannot do regexp matching, use pytest.warns + with assert_warns(UserWarning, wrong="A"): + warnings.warn("B", UserWarning) + assert "assert_warns" in str(exc) + assert "pytest.warns" not in str(exc) def test_warn_wrong_warning(self): def f(): From cd9937b2deb17d05cf5a879d80c3a072baf35fc8 Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Tue, 12 Mar 2024 10:44:35 +0000 Subject: [PATCH 037/980] BUG: Filter out broken Highway platform --- numpy/_core/meson.build | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 5679b826aa6e..05bc00c6d3da 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -96,7 +96,10 @@ if use_svml endif endif -use_highway = not get_option('disable-highway') +use_highway = not get_option('disable-highway') and not ( + host_machine.system() == 'darwin' and + cpu_family == 'aarch64' +) if use_highway and not fs.exists('src/highway/README.md') error('Missing the `highway` git submodule! Run `git submodule update --init` to fix this.') endif From c8df8d2aa239a318f3bcd57daf2fd265657e1114 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Mar 2024 17:41:53 +0000 Subject: [PATCH 038/980] MAINT: Bump pypa/cibuildwheel from 2.16.5 to 2.17.0 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 2.16.5 to 2.17.0. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/ce3fb7832089eb3e723a0a99cab7f3eaccf074fd...8d945475ac4b1aac4ae08b2fd27db9917158b6ce) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 7955b71de9fd..e387ca1a9c02 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -149,7 +149,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@ce3fb7832089eb3e723a0a99cab7f3eaccf074fd # v2.16.5 + uses: pypa/cibuildwheel@8d945475ac4b1aac4ae08b2fd27db9917158b6ce # v2.17.0 env: CIBW_PRERELEASE_PYTHONS: True CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From caddddab086dbd5ed32647cc169670969a9eefeb Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 12 Mar 2024 12:08:32 -0600 Subject: [PATCH 039/980] DOC: indicate stringdtype support in ufunc docstrings --- .../_core/code_generators/ufunc_docstrings.py | 22 +++--- numpy/_core/strings.py | 74 +++++++++---------- 2 files changed, 48 insertions(+), 48 deletions(-) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index 711db8d2e6a4..534ed44406ba 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -4448,7 +4448,7 @@ def add_newdoc(place, name, doc): Parameters ---------- - x : array_like, with `np.bytes_` or `np.str_` dtype + x : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype $PARAMS Returns @@ -4477,7 +4477,7 @@ def add_newdoc(place, name, doc): Parameters ---------- - x : array_like, with `np.bytes_` or `np.str_` dtype + x : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype $PARAMS Returns @@ -4507,7 +4507,7 @@ def add_newdoc(place, name, doc): Parameters ---------- - x : array_like, with `np.bytes_` or `np.str_` dtype + x : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype $PARAMS Returns @@ -4537,7 +4537,7 @@ def add_newdoc(place, name, doc): Parameters ---------- - x : array_like, with `np.bytes_` or `np.str_` dtype + x : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype $PARAMS Returns @@ -4893,11 +4893,11 @@ def add_newdoc(place, name, doc): Parameters ---------- - x1 : array_like, with `np.bytes_` or `np.str_` dtype + x1 : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype x2 : array_like, with any integer dtype The length of the resulting strings, unless ``width < str_len(a)``. - x3 : array_like, with `np.bytes_` or `np.str_` dtype + x3 : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype The padding character to use. $PARAMS @@ -4932,11 +4932,11 @@ def add_newdoc(place, name, doc): Parameters ---------- - x1 : array_like, with `np.bytes_` or `np.str_` dtype + x1 : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype x2 : array_like, with any integer dtype The length of the resulting strings, unless ``width < str_len(a)``. - x3 : array_like, with `np.bytes_` or `np.str_` dtype + x3 : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype The padding character to use. $PARAMS @@ -4967,11 +4967,11 @@ def add_newdoc(place, name, doc): Parameters ---------- - x1 : array_like, with `np.bytes_` or `np.str_` dtype + x1 : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype x2 : array_like, with any integer dtype The length of the resulting strings, unless ``width < str_len(a)``. - x3 : array_like, with `np.bytes_` or `np.str_` dtype + x3 : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype The padding character to use. $PARAMS @@ -5003,7 +5003,7 @@ def add_newdoc(place, name, doc): Parameters ---------- - x1 : array_like, with `np.bytes_` or `np.str_` dtype + x1 : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype x2 : array_like, with any integer dtype Width of string to left-fill elements in `a`. diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index d77340b25b2e..0b9de873d929 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -118,7 +118,7 @@ def multiply(a, i): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype i : array_like, with any integer dtype @@ -200,7 +200,7 @@ def find(a, sub, start=0, end=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype sub : array_like, with `np.bytes_` or `np.str_` dtype The substring to search for. @@ -236,9 +236,9 @@ def rfind(a, sub, start=0, end=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype - sub : array_like, with `np.bytes_` or `np.str_` dtype + sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype The substring to search for. start, end : array_like, with any integer dtype @@ -264,9 +264,9 @@ def index(a, sub, start=0, end=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype - sub : array_like, with `np.bytes_` or `np.str_` dtype + sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype start, end : array_like, with any integer dtype, optional @@ -330,9 +330,9 @@ def count(a, sub, start=0, end=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype - sub : array_like, with `np.bytes_` or `np.str_` dtype + sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype The substring to search for. start, end : array_like, with any integer dtype @@ -373,9 +373,9 @@ def startswith(a, prefix, start=0, end=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype - prefix : array_like, with `np.bytes_` or `np.str_` dtype + prefix : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype start, end : array_like, with any integer dtype With ``start``, test beginning at that position. With ``end``, @@ -402,9 +402,9 @@ def endswith(a, suffix, start=0, end=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype - suffix : array_like, with `np.bytes_` or `np.str_` dtype + suffix : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype start, end : array_like, with any integer dtype With ``start``, test beginning at that position. With ``end``, @@ -444,7 +444,7 @@ def decode(a, encoding=None, errors=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array_like, with ``bytes_`` dtype encoding : str, optional The name of an encoding @@ -490,7 +490,7 @@ def encode(a, encoding=None, errors=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array_like, with ``StringDType`` or ``str_`` dtype encoding : str, optional The name of an encoding @@ -538,7 +538,7 @@ def expandtabs(a, tabsize=8): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype Input array tabsize : int, optional Replace tabs with `tabsize` number of spaces. If not given defaults @@ -579,12 +579,12 @@ def center(a, width, fillchar=' '): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype width : array_like, with any integer dtype The length of the resulting strings, unless ``width < str_len(a)``. - fillchar : array_like, with `np.bytes_` or `np.str_` dtype, optional - The padding character to use (default is space). + fillchar : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Optional padding character to use (default is space). Returns ------- @@ -637,12 +637,12 @@ def ljust(a, width, fillchar=' '): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype width : array_like, with any integer dtype The length of the resulting strings, unless ``width < str_len(a)``. - fillchar : array_like, with `np.bytes_` or `np.str_` dtype, optional - The character to use for padding (default is space). + fillchar : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Optional character to use for padding (default is space). Returns ------- @@ -692,12 +692,12 @@ def rjust(a, width, fillchar=' '): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype width : array_like, with any integer dtype The length of the resulting strings, unless ``width < str_len(a)``. - fillchar : array_like, with `np.bytes_` or `np.str_` dtype, optional - The character to use for padding + fillchar : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Optional padding character to use (default is space). Returns ------- @@ -748,7 +748,7 @@ def zfill(a, width): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype width : array_like, with any integer dtype Width of string to left-fill elements in `a`. @@ -919,7 +919,7 @@ def upper(a): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype Input array. Returns @@ -953,7 +953,7 @@ def lower(a): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype Input array. Returns @@ -988,7 +988,7 @@ def swapcase(a): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype Input array. Returns @@ -1025,7 +1025,7 @@ def capitalize(a): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype Input array of strings to capitalize. Returns @@ -1065,7 +1065,7 @@ def title(a): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype Input array. Returns @@ -1154,8 +1154,8 @@ def join(sep, seq): Parameters ---------- - sep : array_like, with `np.bytes_` or `np.str_` dtype - seq : array_like, with `np.bytes_` or `np.str_` dtype + sep : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + seq : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype Returns ------- @@ -1188,7 +1188,7 @@ def split(a, sep=None, maxsplit=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype sep : str or unicode, optional If `sep` is not specified or None, any whitespace string is a @@ -1234,7 +1234,7 @@ def rsplit(a, sep=None, maxsplit=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype sep : str or unicode, optional If `sep` is not specified or None, any whitespace string @@ -1274,7 +1274,7 @@ def splitlines(a, keepends=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype keepends : bool, optional Line breaks are not included in the resulting list unless @@ -1308,7 +1308,7 @@ def partition(a, sep): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype Input array sep : {str, unicode} Separator to split each string element in `a`. @@ -1349,7 +1349,7 @@ def rpartition(a, sep): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype Input array sep : str or unicode Right-most separator to split each element in array. From e87df0e2c72d68f88a2d9485e9a11b110bf1e814 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 12 Mar 2024 12:12:04 -0600 Subject: [PATCH 040/980] DOC: add release note for #25908 --- doc/release/upcoming_changes/25908.improvement.rst | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 doc/release/upcoming_changes/25908.improvement.rst diff --git a/doc/release/upcoming_changes/25908.improvement.rst b/doc/release/upcoming_changes/25908.improvement.rst new file mode 100644 index 000000000000..ad7a2cdfdc2e --- /dev/null +++ b/doc/release/upcoming_changes/25908.improvement.rst @@ -0,0 +1,5 @@ +``center``, ``ljust``, ``rjust``, and ``zfill`` are now implemented using ufuncs +-------------------------------------------------------------------------------- + +The text justification functions in `numpy.strings` are now implemented using +ufuncs under the hood and should be significantly faster. From 0321fbf87da4a8257a0bb6ef97c3e673d07e83a4 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 12 Mar 2024 13:10:10 -0600 Subject: [PATCH 041/980] TST: remove usage of ProcessPoolExecutor in stringdtype tests --- numpy/_core/tests/test_stringdtype.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 5c488093e208..f35130d55d63 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -362,13 +362,6 @@ def test_isnan(dtype, string_list): assert not np.any(np.isnan(sarr)) -def _pickle_load(filename): - with open(filename, "rb") as f: - res = pickle.load(f) - - return res - -@pytest.mark.skipif(IS_WASM, reason="no threading support in wasm") def test_pickle(dtype, string_list): arr = np.array(string_list, dtype=dtype) @@ -381,15 +374,6 @@ def test_pickle(dtype, string_list): assert_array_equal(res[0], arr) assert res[1] == dtype - # load the pickle in a subprocess to ensure the string data are - # actually stored in the pickle file - with concurrent.futures.ProcessPoolExecutor() as executor: - e = executor.submit(_pickle_load, f.name) - res = e.result() - - assert_array_equal(res[0], arr) - assert res[1] == dtype - os.remove(f.name) From 414aa34920f94ac2637182bafff9dddca538e1c6 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 11 Mar 2024 17:21:55 -0600 Subject: [PATCH 042/980] MAINT: Remove sdist task from pavement.py The sdist task is now taken over by meson. The task also depended on getting the version from setup.py, which is gone. That could be fixed using tomli, but there is no point in doing so. [skip actions] [skip circle] [skip azp] [skip cirrus] --- pavement.py | 64 ----------------------------------------------------- 1 file changed, 64 deletions(-) diff --git a/pavement.py b/pavement.py index f205f1f40839..43dc28675eb9 100644 --- a/pavement.py +++ b/pavement.py @@ -50,70 +50,6 @@ installersdir=os.path.join("release", "installers")),) -#------------------------ -# Get the release version -#------------------------ - -sys.path.insert(0, os.path.dirname(__file__)) -try: - from setup import FULLVERSION -finally: - sys.path.pop(0) - - -#-------------------------- -# Source distribution stuff -#-------------------------- -def tarball_name(ftype='gztar'): - """Generate source distribution name - - Parameters - ---------- - ftype : {'zip', 'gztar'} - Type of archive, default is 'gztar'. - - """ - root = f'numpy-{FULLVERSION}' - if ftype == 'gztar': - return root + '.tar.gz' - elif ftype == 'zip': - return root + '.zip' - raise ValueError(f"Unknown type {type}") - - -@task -def sdist(options): - """Make source distributions. - - Parameters - ---------- - options : - Set by ``task`` decorator. - - """ - # First clean the repo and update submodules (for up-to-date doc html theme - # and Sphinx extensions) - sh('git clean -xdf') - sh('git submodule init') - sh('git submodule update') - - # To be sure to bypass paver when building sdist... paver + numpy.distutils - # do not play well together. - # Cython is run over all Cython files in setup.py, so generated C files - # will be included. - sh('python3 setup.py sdist --formats=gztar,zip') - - # Copy the superpack into installers dir - idirs = options.installers.installersdir - if not os.path.exists(idirs): - os.makedirs(idirs) - - for ftype in ['gztar', 'zip']: - source = os.path.join('dist', tarball_name(ftype)) - target = os.path.join(idirs, tarball_name(ftype)) - shutil.copy(source, target) - - #------------- # README stuff #------------- From 000c418b23765577c69b764e856a8eb8f428f2e1 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 12 Mar 2024 20:53:32 +0100 Subject: [PATCH 043/980] DOC: mention the `exceptions` namespace in the 2.0.0 release notes Closes gh-25966 [skip actions] [skip azp] [skip cirrus] (cherry picked from commit 6ed2008de7adf33752e5641eb017ec9c06ae4034) --- doc/source/release/2.0.0-notes.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/source/release/2.0.0-notes.rst b/doc/source/release/2.0.0-notes.rst index e43e54fb9cbc..216c38bb1538 100644 --- a/doc/source/release/2.0.0-notes.rst +++ b/doc/source/release/2.0.0-notes.rst @@ -149,6 +149,10 @@ NumPy 2.0 Python API removals (`gh-24321 `__) +* Warnings and exceptions present in `numpy.exceptions` (e.g, + `~numpy.exceptions.ComplexWarning`, + `~numpy.exceptions.VisibleDeprecationWarning`) are no longer exposed in the + main namespace. * Multiple niche enums, expired members and functions have been removed from the main namespace, such as: ``ERR_*``, ``SHIFT_*``, ``np.fastCopyAndTranspose``, ``np.kernel_version``, ``np.numarray``, ``np.oldnumeric`` and ``np.set_numeric_ops``. From 5ec8a3dda069e4fe3398e96324c0b833dfc50df8 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 12 Mar 2024 14:38:48 -0600 Subject: [PATCH 044/980] ENH: install StringDType promoter for add --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 41 ++++++++++++++++---- numpy/_core/tests/test_stringdtype.py | 10 +++++ 2 files changed, 43 insertions(+), 8 deletions(-) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index d2622859ef23..ed636fdf674e 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1043,9 +1043,9 @@ string_startswith_endswith_strided_loop(PyArrayMethod_Context *context, } static int -strip_chars_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], - PyArray_DTypeMeta *new_op_dtypes[]) +all_strings_promoter(PyObject *NPY_UNUSED(ufunc), + PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *new_op_dtypes[]) { new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_StringDType); new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_StringDType); @@ -2312,6 +2312,28 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } + PyArray_DTypeMeta *rall_strings_promoter_dtypes[] = { + &PyArray_StringDType, + &PyArray_UnicodeDType, + &PyArray_StringDType, + }; + + if (add_promoter(umath, "add", rall_strings_promoter_dtypes, 3, + all_strings_promoter) < 0) { + return -1; + } + + PyArray_DTypeMeta *lall_strings_promoter_dtypes[] = { + &PyArray_UnicodeDType, + &PyArray_StringDType, + &PyArray_StringDType, + }; + + if (add_promoter(umath, "add", lall_strings_promoter_dtypes, 3, + all_strings_promoter) < 0) { + return -1; + } + INIT_MULTIPLY(Int64, int64); INIT_MULTIPLY(UInt64, uint64); @@ -2446,10 +2468,6 @@ init_stringdtype_ufuncs(PyObject *umath) "_lstrip_chars", "_rstrip_chars", "_strip_chars", }; - PyArray_DTypeMeta *strip_chars_promoter_dtypes[] = { - &PyArray_StringDType, &PyArray_UnicodeDType, &PyArray_StringDType - }; - for (int i=0; i<3; i++) { if (init_ufunc(umath, strip_chars_names[i], strip_chars_dtypes, &strip_chars_resolve_descriptors, @@ -2460,7 +2478,14 @@ init_stringdtype_ufuncs(PyObject *umath) } if (add_promoter(umath, strip_chars_names[i], - strip_chars_promoter_dtypes, 3, strip_chars_promoter) < 0) { + rall_strings_promoter_dtypes, 3, + all_strings_promoter) < 0) { + return -1; + } + + if (add_promoter(umath, strip_chars_names[i], + lall_strings_promoter_dtypes, 3, + all_strings_promoter) < 0) { return -1; } } diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 5c488093e208..812ce0917164 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -730,6 +730,16 @@ def test_ufunc_add(dtype, string_list, other_strings, use_out): np.add(arr1, arr2) +def test_add_promoter(string_list): + arr = np.array(string_list, dtype=StringDType()) + lresult = np.array(["hello" + s for s in string_list], dtype=StringDType()) + rresult = np.array([s + "hello" for s in string_list], dtype=StringDType()) + + for op in ["hello", np.str_("hello"), np.array(["hello"])]: + assert_array_equal(op + arr, lresult) + assert_array_equal(arr + op, rresult) + + @pytest.mark.parametrize("use_out", [True, False]) @pytest.mark.parametrize("other", [2, [2, 1, 3, 4, 1, 3]]) @pytest.mark.parametrize( From 535b2da5532163381bec3c67b2d65cf51e736a62 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 13 Mar 2024 09:05:25 +0100 Subject: [PATCH 045/980] MAINT: remove the now-unused `NPY_NO_SIGNAL` The `npy_interrupt.h` header that needed it was removed in gh-23919 for 2.0.0. --- numpy/_core/include/numpy/_numpyconfig.h.in | 1 - numpy/_core/meson.build | 3 --- 2 files changed, 4 deletions(-) diff --git a/numpy/_core/include/numpy/_numpyconfig.h.in b/numpy/_core/include/numpy/_numpyconfig.h.in index 0491877e3164..1ced266ecf58 100644 --- a/numpy/_core/include/numpy/_numpyconfig.h.in +++ b/numpy/_core/include/numpy/_numpyconfig.h.in @@ -19,7 +19,6 @@ #mesondefine NPY_USE_C99_FORMATS -#mesondefine NPY_NO_SIGNAL #mesondefine NPY_NO_SMP #mesondefine NPY_VISIBILITY_HIDDEN diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 05bc00c6d3da..61f42da21009 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -491,9 +491,6 @@ endif if cc.has_header('sys/endian.h') cdata.set10('NPY_HAVE_SYS_ENDIAN_H', true) endif -if is_windows - cdata.set10('NPY_NO_SIGNAL', true) -endif # Command-line switch; distutils build checked for `NPY_NOSMP` env var instead # TODO: document this (search for NPY_NOSMP in C API docs) cdata.set10('NPY_NO_SMP', get_option('disable-threading')) From 5f7fab9bd337bc016ed1636905dc2394c63bae48 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 13 Mar 2024 09:45:36 +0100 Subject: [PATCH 046/980] MAINT: remove now-unused `NPY_USE_C99_FORMAT` The need for this define was removed with gh-24888. A code search shows no external usages, and the few times this shows up on the issue tracker it's always defined to `1`. [skip cirrus] [skip azp] [skip circle] --- numpy/_core/include/numpy/_numpyconfig.h.in | 2 -- numpy/_core/meson.build | 5 ----- 2 files changed, 7 deletions(-) diff --git a/numpy/_core/include/numpy/_numpyconfig.h.in b/numpy/_core/include/numpy/_numpyconfig.h.in index 1ced266ecf58..665003112957 100644 --- a/numpy/_core/include/numpy/_numpyconfig.h.in +++ b/numpy/_core/include/numpy/_numpyconfig.h.in @@ -17,8 +17,6 @@ #mesondefine NPY_SIZEOF_PY_LONG_LONG #mesondefine NPY_SIZEOF_LONGLONG -#mesondefine NPY_USE_C99_FORMATS - #mesondefine NPY_NO_SMP #mesondefine NPY_VISIBILITY_HIDDEN diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 61f42da21009..0e4ee8b3d6a9 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -495,11 +495,6 @@ endif # TODO: document this (search for NPY_NOSMP in C API docs) cdata.set10('NPY_NO_SMP', get_option('disable-threading')) -# Check whether we can use inttypes (C99) formats -if cc.has_header_symbol('inttypes.h', 'PRIdPTR') - cdata.set10('NPY_USE_C99_FORMATS', true) -endif - visibility_hidden = '' if cc.has_function_attribute('visibility:hidden') and host_machine.system() != 'cygwin' visibility_hidden = '__attribute__((visibility("hidden")))' From 80871f55cb4e39d752a41e39c49bb862d7f57248 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 13 Mar 2024 12:39:22 +0100 Subject: [PATCH 047/980] MAINT: handle `NPY_ALLOW_THREADS` and related build option better The option to disable threading in CPython was removed a long time ago (see cpython#3385); `WITH_THREAD` was kept defined for backwards compatibility, but can never change - so best to remove checking for it. The docs still mentioned the old `NPY_NOSMP` environment variable, support for which was removed with the move to Meson. Instead, there is a `disable-threading` build option, so document that. --- doc/source/reference/c-api/array.rst | 9 +++------ numpy/_core/include/numpy/_numpyconfig.h.in | 5 +++++ numpy/_core/include/numpy/ndarraytypes.h | 4 ++-- numpy/_core/meson.build | 4 ++-- numpy/_core/src/common/python_xerbla.c | 6 ------ numpy/linalg/lapack_lite/python_xerbla.c | 6 ------ 6 files changed, 12 insertions(+), 22 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 84549012e95b..447871e644cf 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -4032,15 +4032,12 @@ variables), the GIL should be released so that other Python threads can run while the time-consuming calculations are performed. This can be accomplished using two groups of macros. Typically, if one macro in a group is used in a code block, all of them must be used in the same -code block. Currently, :c:data:`NPY_ALLOW_THREADS` is defined to the -python-defined :c:data:`WITH_THREADS` constant unless the environment -variable ``NPY_NOSMP`` is set in which case -:c:data:`NPY_ALLOW_THREADS` is defined to be 0. +code block. :c:data:`NPY_ALLOW_THREADS` is true (defined as ``1``) unless the +build option ``-Ddisable-threading`` is set to ``true`` - in which case +:c:data:`NPY_ALLOW_THREADS` is false (``0``). .. c:macro:: NPY_ALLOW_THREADS -.. c:macro:: WITH_THREADS - Group 1 ^^^^^^^ diff --git a/numpy/_core/include/numpy/_numpyconfig.h.in b/numpy/_core/include/numpy/_numpyconfig.h.in index 665003112957..79b2ee3449a5 100644 --- a/numpy/_core/include/numpy/_numpyconfig.h.in +++ b/numpy/_core/include/numpy/_numpyconfig.h.in @@ -17,6 +17,11 @@ #mesondefine NPY_SIZEOF_PY_LONG_LONG #mesondefine NPY_SIZEOF_LONGLONG +/* + * Defined to 1 or 0. Note that Pyodide hardcodes NPY_NO_SMP (and other defines + * in this header) for better cross-compilation, so don't rename them without a + * good reason. + */ #mesondefine NPY_NO_SMP #mesondefine NPY_VISIBILITY_HIDDEN diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index 31aa3e4d330e..09d3816feb52 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -8,8 +8,8 @@ #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN -/* Only use thread if configured in config and python supports it */ -#if defined WITH_THREAD && !NPY_NO_SMP +/* Always allow threading unless it was explicitly disabled at build time */ +#if !NPY_NO_SMP #define NPY_ALLOW_THREADS 1 #else #define NPY_ALLOW_THREADS 0 diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 0e4ee8b3d6a9..8fc377aeedde 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -491,8 +491,8 @@ endif if cc.has_header('sys/endian.h') cdata.set10('NPY_HAVE_SYS_ENDIAN_H', true) endif -# Command-line switch; distutils build checked for `NPY_NOSMP` env var instead -# TODO: document this (search for NPY_NOSMP in C API docs) +# Build-time option to disable threading is stored and exposed in numpyconfig.h +# Note: SMP is an old acronym for threading (Symmetric/Shared-memory MultiProcessing) cdata.set10('NPY_NO_SMP', get_option('disable-threading')) visibility_hidden = '' diff --git a/numpy/_core/src/common/python_xerbla.c b/numpy/_core/src/common/python_xerbla.c index 37a41408be22..71a4c81edbf1 100644 --- a/numpy/_core/src/common/python_xerbla.c +++ b/numpy/_core/src/common/python_xerbla.c @@ -28,22 +28,16 @@ CBLAS_INT BLAS_FUNC(xerbla)(char *srname, CBLAS_INT *info) char buf[sizeof(format) + 6 + 4]; /* 6 for name, 4 for param. num. */ int len = 0; /* length of subroutine name*/ -#ifdef WITH_THREAD PyGILState_STATE save; -#endif while( len<6 && srname[len]!='\0' ) len++; while( len && srname[len-1]==' ' ) len--; -#ifdef WITH_THREAD save = PyGILState_Ensure(); -#endif PyOS_snprintf(buf, sizeof(buf), format, len, srname, (int)*info); PyErr_SetString(PyExc_ValueError, buf); -#ifdef WITH_THREAD PyGILState_Release(save); -#endif return 0; } diff --git a/numpy/linalg/lapack_lite/python_xerbla.c b/numpy/linalg/lapack_lite/python_xerbla.c index 37a41408be22..71a4c81edbf1 100644 --- a/numpy/linalg/lapack_lite/python_xerbla.c +++ b/numpy/linalg/lapack_lite/python_xerbla.c @@ -28,22 +28,16 @@ CBLAS_INT BLAS_FUNC(xerbla)(char *srname, CBLAS_INT *info) char buf[sizeof(format) + 6 + 4]; /* 6 for name, 4 for param. num. */ int len = 0; /* length of subroutine name*/ -#ifdef WITH_THREAD PyGILState_STATE save; -#endif while( len<6 && srname[len]!='\0' ) len++; while( len && srname[len-1]==' ' ) len--; -#ifdef WITH_THREAD save = PyGILState_Ensure(); -#endif PyOS_snprintf(buf, sizeof(buf), format, len, srname, (int)*info); PyErr_SetString(PyExc_ValueError, buf); -#ifdef WITH_THREAD PyGILState_Release(save); -#endif return 0; } From fb570eb0620db2b35db8e04bd98967bfc53ed677 Mon Sep 17 00:00:00 2001 From: Raquel Braunschweig Date: Fri, 8 Mar 2024 22:42:10 +0000 Subject: [PATCH 048/980] BUG: Fixes np.put receiving empty array issues Closes #25744 refactor: update error message --- numpy/_core/src/multiarray/item_selection.c | 6 ++++++ numpy/_core/tests/test_multiarray.py | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 7f7d8394d6f3..3698b8fc532d 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -384,6 +384,12 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, return NULL; } + if (PyArray_Size((PyObject *)self) == 0) { + PyErr_SetString(PyExc_IndexError, + "put: cannot do a put on an empty array"); + return NULL; + } + if (PyArray_FailUnlessWriteable(self, "put: output array") < 0) { return NULL; } diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 4a75d96fc06e..fcf7be5443d4 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -3501,6 +3501,12 @@ def test_put(self): bad_array = [1, 2, 3] assert_raises(TypeError, np.put, bad_array, [0, 2], 5) + # when calling np.put, make sure an + # IndexError is raised if the + # array is empty + empty_array = np.asarray(list()) + assert_raises(IndexError, np.put, empty_array, 1, 1, mode="wrap") + def test_ravel(self): a = np.array([[0, 1], [2, 3]]) assert_equal(a.ravel(), [0, 1, 2, 3]) From 1b5b3ae1decaf4a937682b6149777677e7c556b2 Mon Sep 17 00:00:00 2001 From: Raquel Braunschweig Date: Sat, 9 Mar 2024 21:02:10 +0000 Subject: [PATCH 049/980] BUG: update if condition to allow empty indices --- numpy/_core/src/multiarray/item_selection.c | 11 +++++------ numpy/_core/tests/test_multiarray.py | 1 + 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 3698b8fc532d..98b334659643 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -384,12 +384,6 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, return NULL; } - if (PyArray_Size((PyObject *)self) == 0) { - PyErr_SetString(PyExc_IndexError, - "put: cannot do a put on an empty array"); - return NULL; - } - if (PyArray_FailUnlessWriteable(self, "put: output array") < 0) { return NULL; } @@ -400,6 +394,11 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, goto fail; } ni = PyArray_SIZE(indices); + if ((ni > 0) && (PyArray_Size((PyObject *)self) == 0)) { + PyErr_SetString(PyExc_IndexError, + "put: cannot do a non empty put on an empty array"); + return NULL; + } Py_INCREF(PyArray_DESCR(self)); values = (PyArrayObject *)PyArray_FromAny(values0, PyArray_DESCR(self), 0, 0, NPY_ARRAY_DEFAULT | NPY_ARRAY_FORCECAST, NULL); diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index fcf7be5443d4..57f42cacc19e 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -3506,6 +3506,7 @@ def test_put(self): # array is empty empty_array = np.asarray(list()) assert_raises(IndexError, np.put, empty_array, 1, 1, mode="wrap") + assert_raises(IndexError, np.put, empty_array, 1, 1, mode="clip") def test_ravel(self): a = np.array([[0, 1], [2, 3]]) From 45107b0c67960f816fa0ddb0428a25cb88eda42a Mon Sep 17 00:00:00 2001 From: Raquel Braunschweig Date: Wed, 13 Mar 2024 20:27:54 +0000 Subject: [PATCH 050/980] TST: Update tests to assert specific error message in np.put --- numpy/_core/src/multiarray/item_selection.c | 2 +- numpy/_core/tests/test_multiarray.py | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 98b334659643..37e33c28a944 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -396,7 +396,7 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, ni = PyArray_SIZE(indices); if ((ni > 0) && (PyArray_Size((PyObject *)self) == 0)) { PyErr_SetString(PyExc_IndexError, - "put: cannot do a non empty put on an empty array"); + "cannot replace elements of an empty array"); return NULL; } Py_INCREF(PyArray_DESCR(self)); diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 57f42cacc19e..2f757298029f 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -3505,8 +3505,13 @@ def test_put(self): # IndexError is raised if the # array is empty empty_array = np.asarray(list()) - assert_raises(IndexError, np.put, empty_array, 1, 1, mode="wrap") - assert_raises(IndexError, np.put, empty_array, 1, 1, mode="clip") + with pytest.raises(IndexError, + match="cannot replace elements of an empty array"): + np.put(empty_array, 1, 1, mode="wrap") + with pytest.raises(IndexError, + match="cannot replace elements of an empty array"): + np.put(empty_array, 1, 1, mode="clip") + def test_ravel(self): a = np.array([[0, 1], [2, 3]]) From a542d44ed496ae6dd824fe79999fcfedf5f931c6 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 13 Mar 2024 14:24:19 -0600 Subject: [PATCH 051/980] MAINT: avoid use of flexible array member in public header --- numpy/_core/include/numpy/ndarraytypes.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index 09d3816feb52..4c938d921043 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -1298,9 +1298,11 @@ typedef struct { * growing structs (as of Cython 3.0.6). It also allows NPY_MAXARGS * to be runtime dependent. */ -#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD - PyArrayIterObject *iters[64]; /* 64 is NPY_MAXARGS */ -#else /* not internal build */ +#if (defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) || defined(__cplusplus) + /* 64 is NPY_MAXARGS for numpy 2.0 or newer. We can't use a flexible + array member in C++ so use the internal size there. */ + PyArrayIterObject *iters[64]; +#else PyArrayIterObject *iters[]; #endif } PyArrayMultiIterObject; From 52579948475729d4c87c0484aa1cbfb93b6ea692 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 14 Mar 2024 12:39:21 -0600 Subject: [PATCH 052/980] BUG: raise error trying to coerce timedelta64('NaT') --- numpy/_core/src/multiarray/stringdtype/dtype.c | 3 +++ numpy/_core/tests/test_stringdtype.py | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 98f137d1a624..62b537eaf9db 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -638,6 +638,9 @@ stringdtype_is_known_scalar_type(PyArray_DTypeMeta *NPY_UNUSED(cls), if (pytype == &PyDatetimeArrType_Type) { return 1; } + if (pytype == &PyTimedeltaArrType_Type) { + return 1; + } return 0; } diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index f35130d55d63..7cf613fad34d 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -900,6 +900,12 @@ def test_nat_casts(): np.array([output_object]*arr.size, dtype=dtype)) +def test_nat_conversion(): + for nat in [np.datetime64("NaT", "s"), np.timedelta64("NaT", "s")]: + with pytest.raises(ValueError, match="string coercion is disabled"): + np.array(["a", nat], dtype=StringDType(coerce=False)) + + def test_growing_strings(dtype): # growing a string leads to a heap allocation, this tests to make sure # we do that bookkeeping correctly for all possible starting cases From 3c7af2a233c413648f08bcbc35924d77b987e144 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 14 Mar 2024 13:09:46 -0600 Subject: [PATCH 053/980] BUG: add missing error handling in string to int cast internals --- numpy/_core/src/multiarray/stringdtype/casts.c | 3 +++ numpy/_core/tests/test_stringdtype.py | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/numpy/_core/src/multiarray/stringdtype/casts.c b/numpy/_core/src/multiarray/stringdtype/casts.c index e6819e3212dd..6cb648624d85 100644 --- a/numpy/_core/src/multiarray/stringdtype/casts.c +++ b/numpy/_core/src/multiarray/stringdtype/casts.c @@ -571,6 +571,9 @@ string_to_pylong(char *in, int has_null, { PyObject *val_obj = non_nullable_string_to_pystring( in, has_null, default_string, allocator); + if (val_obj == NULL) { + return NULL; + } // interpret as an integer in base 10 PyObject *pylong_value = PyLong_FromUnicodeObject(val_obj, 10); Py_DECREF(val_obj); diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index f35130d55d63..4a32c28644dd 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -546,6 +546,10 @@ def test_sized_integer_casts(bitsize, signed): with pytest.raises(OverflowError): np.array(oob, dtype="T").astype(idtype) + with pytest.raises(ValueError): + np.array(["1", np.nan, "3"], + dtype=StringDType(na_object=np.nan)).astype(idtype) + @pytest.mark.parametrize("typename", ["byte", "short", "int", "longlong"]) @pytest.mark.parametrize("signed", ["", "u"]) From c7db12031c03557e97bab40501ef1b5b39045ec6 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 14 Mar 2024 12:54:16 -0600 Subject: [PATCH 054/980] BUG: fix reference count leak in __array__ internals --- numpy/_core/src/multiarray/ctors.c | 8 +++++++- numpy/_core/tests/test_multiarray.py | 19 +++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index a475f3986759..4c9d76991296 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -2466,6 +2466,9 @@ PyArray_FromArrayAttr_int( if (new == NULL) { if (npy_ma_str_array_err_msg_substr == NULL) { + Py_DECREF(array_meth); + Py_DECREF(args); + Py_DECREF(kwargs); return NULL; } PyObject *type, *value, *traceback; @@ -2481,6 +2484,7 @@ PyArray_FromArrayAttr_int( "__array__ should implement 'dtype' and " "'copy' keywords", 1) < 0) { Py_DECREF(str_value); + Py_DECREF(array_meth); Py_DECREF(args); Py_DECREF(kwargs); return NULL; @@ -2490,6 +2494,7 @@ PyArray_FromArrayAttr_int( new = PyObject_Call(array_meth, args, kwargs); if (new == NULL) { Py_DECREF(str_value); + Py_DECREF(array_meth); Py_DECREF(args); Py_DECREF(kwargs); return NULL; @@ -2500,15 +2505,16 @@ PyArray_FromArrayAttr_int( } if (new == NULL) { PyErr_Restore(type, value, traceback); + Py_DECREF(array_meth); Py_DECREF(args); Py_DECREF(kwargs); return NULL; } } + Py_DECREF(array_meth); Py_DECREF(args); Py_DECREF(kwargs); - Py_DECREF(array_meth); if (!PyArray_Check(new)) { PyErr_SetString(PyExc_ValueError, diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 2f757298029f..cdc418897bcc 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -8492,6 +8492,25 @@ def __array__(self, dtype=None): "and 'copy' keywords")): np.array(a, copy=False) + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test__array__reference_leak(self): + class NotAnArray: + def __array__(self): + raise NotImplementedError() + + x = NotAnArray() + + refcount = sys.getrefcount(x) + + try: + np.array(x) + except NotImplementedError: + pass + + gc.collect() + + assert refcount == sys.getrefcount(x) + @pytest.mark.parametrize( "arr", [np.ones(()), np.arange(81).reshape((9, 9))]) @pytest.mark.parametrize("order1", ["C", "F", None]) From 6e11618a5750e674184bea767b3bf18d4ccebba4 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 14 Mar 2024 14:23:50 -0600 Subject: [PATCH 055/980] MNT: eliminate branching and duplication in is_known_scalar_type --- numpy/_core/src/multiarray/dtypemeta.c | 23 ++-- .../_core/src/multiarray/stringdtype/dtype.c | 107 +++++------------- 2 files changed, 33 insertions(+), 97 deletions(-) diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 626b3bde1032..acee68bad54f 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -838,22 +838,13 @@ python_builtins_are_known_scalar_types( * This is necessary only for python scalar classes which we discover * as valid DTypes. */ - if (pytype == &PyFloat_Type) { - return 1; - } - if (pytype == &PyLong_Type) { - return 1; - } - if (pytype == &PyBool_Type) { - return 1; - } - if (pytype == &PyComplex_Type) { - return 1; - } - if (pytype == &PyUnicode_Type) { - return 1; - } - if (pytype == &PyBytes_Type) { + if (pytype == &PyFloat_Type || + pytype == &PyLong_Type || + pytype == &PyBool_Type || + pytype == &PyComplex_Type || + pytype == &PyUnicode_Type || + pytype == &PyBytes_Type) + { return 1; } return 0; diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 62b537eaf9db..a7b31dd4800e 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -554,91 +554,36 @@ stringdtype_get_clear_loop(void *NPY_UNUSED(traverse_context), } static int -stringdtype_is_known_scalar_type(PyArray_DTypeMeta *NPY_UNUSED(cls), +stringdtype_is_known_scalar_type(PyArray_DTypeMeta *cls, PyTypeObject *pytype) { - if (pytype == &PyFloat_Type) { + if (python_builtins_are_known_scalar_types(cls, pytype)) { return 1; } - if (pytype == &PyLong_Type) { - return 1; - } - if (pytype == &PyBool_Type) { - return 1; - } - if (pytype == &PyComplex_Type) { - return 1; - } - if (pytype == &PyUnicode_Type) { - return 1; - } - if (pytype == &PyBytes_Type) { - return 1; - } - if (pytype == &PyBoolArrType_Type) { - return 1; - } - if (pytype == &PyByteArrType_Type) { - return 1; - } - if (pytype == &PyShortArrType_Type) { - return 1; - } - if (pytype == &PyIntArrType_Type) { - return 1; - } - if (pytype == &PyLongArrType_Type) { - return 1; - } - if (pytype == &PyLongLongArrType_Type) { - return 1; - } - if (pytype == &PyUByteArrType_Type) { - return 1; - } - if (pytype == &PyUShortArrType_Type) { - return 1; - } - if (pytype == &PyUIntArrType_Type) { - return 1; - } - if (pytype == &PyULongArrType_Type) { - return 1; - } - if (pytype == &PyULongLongArrType_Type) { - return 1; - } - if (pytype == &PyHalfArrType_Type) { - return 1; - } - if (pytype == &PyFloatArrType_Type) { - return 1; - } - if (pytype == &PyDoubleArrType_Type) { - return 1; - } - if (pytype == &PyLongDoubleArrType_Type) { - return 1; - } - if (pytype == &PyCFloatArrType_Type) { - return 1; - } - if (pytype == &PyCDoubleArrType_Type) { - return 1; - } - if (pytype == &PyCLongDoubleArrType_Type) { - return 1; - } - if (pytype == &PyIntpArrType_Type) { - return 1; - } - if (pytype == &PyUIntpArrType_Type) { - return 1; - } - if (pytype == &PyDatetimeArrType_Type) { - return 1; - } - if (pytype == &PyTimedeltaArrType_Type) { + // accept every built-in numpy dtype + else if (pytype == &PyBoolArrType_Type || + pytype == &PyByteArrType_Type || + pytype == &PyShortArrType_Type || + pytype == &PyIntArrType_Type || + pytype == &PyLongArrType_Type || + pytype == &PyLongLongArrType_Type || + pytype == &PyUByteArrType_Type || + pytype == &PyUShortArrType_Type || + pytype == &PyUIntArrType_Type || + pytype == &PyULongArrType_Type || + pytype == &PyULongLongArrType_Type || + pytype == &PyHalfArrType_Type || + pytype == &PyFloatArrType_Type || + pytype == &PyDoubleArrType_Type || + pytype == &PyLongDoubleArrType_Type || + pytype == &PyCFloatArrType_Type || + pytype == &PyCDoubleArrType_Type || + pytype == &PyCLongDoubleArrType_Type || + pytype == &PyIntpArrType_Type || + pytype == &PyUIntpArrType_Type || + pytype == &PyDatetimeArrType_Type || + pytype == &PyTimedeltaArrType_Type) + { return 1; } return 0; From c943291e215b26ec07e5588b213624d46ab8ddaf Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Fri, 15 Mar 2024 10:58:10 +0100 Subject: [PATCH 056/980] MAINT: Remove partition and split-like functions from numpy.strings - Temporary removal of these functions until their behavior has been discussed further. --- numpy/_core/defchararray.py | 8 ++++++++ numpy/_core/strings.py | 22 +++++++++++++--------- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index 96dec7543101..44754a747cec 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -24,6 +24,14 @@ from numpy._core import overrides from numpy.strings import * from numpy.strings import multiply as strings_multiply +from numpy._core.strings import ( + _partition as partition, + _rpartition as rpartition, + _split as split, + _rsplit as rsplit, + _splitlines as splitlines, + _join as join, +) __all__ = [ 'equal', 'not_equal', 'greater_equal', 'less_equal', diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 0b9de873d929..2eceb2957166 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -54,9 +54,13 @@ "zfill", # _vec_string - Will gradually become ufuncs as well - "mod", "decode", "encode", "upper", "lower", "swapcase", "capitalize", - "title", "join", "split", "rsplit", "splitlines", "partition", - "rpartition", "translate", + "upper", "lower", "swapcase", "capitalize", "title", + + # _vec_string - Will probably not become ufuncs + "mod", "decode", "encode", "translate", + + # Removed from namespace until behavior has been crystalized + # "join", "split", "rsplit", "splitlines", "partition", "rpartition", ] @@ -1145,7 +1149,7 @@ def replace(a, old, new, count=-1): return _replace(arr, old, new, counts, out=out) -def join(sep, seq): +def _join(sep, seq): """ Return a string which is the concatenation of the strings in the sequence `seq`. @@ -1179,7 +1183,7 @@ def join(sep, seq): _vec_string(sep, np.object_, 'join', (seq,)), seq) -def split(a, sep=None, maxsplit=None): +def _split(a, sep=None, maxsplit=None): """ For each element in `a`, return a list of the words in the string, using `sep` as the delimiter string. @@ -1222,7 +1226,7 @@ def split(a, sep=None, maxsplit=None): a, np.object_, 'split', [sep] + _clean_args(maxsplit)) -def rsplit(a, sep=None, maxsplit=None): +def _rsplit(a, sep=None, maxsplit=None): """ For each element in `a`, return a list of the words in the string, using `sep` as the delimiter string. @@ -1265,7 +1269,7 @@ def rsplit(a, sep=None, maxsplit=None): a, np.object_, 'rsplit', [sep] + _clean_args(maxsplit)) -def splitlines(a, keepends=None): +def _splitlines(a, keepends=None): """ For each element in `a`, return a list of the lines in the element, breaking at line boundaries. @@ -1294,7 +1298,7 @@ def splitlines(a, keepends=None): a, np.object_, 'splitlines', _clean_args(keepends)) -def partition(a, sep): +def _partition(a, sep): """ Partition each element in `a` around `sep`. @@ -1335,7 +1339,7 @@ def partition(a, sep): _vec_string(a, np.object_, 'partition', (sep,)), a) -def rpartition(a, sep): +def _rpartition(a, sep): """ Partition (split) each element around the right-most separator. From 6f857506295e343344c14a87cf83b3fe52de3edc Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Fri, 15 Mar 2024 11:35:51 +0100 Subject: [PATCH 057/980] Skip doctests for removed functions --- numpy/_core/strings.py | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 2eceb2957166..baf4cb1e2cf1 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -1172,11 +1172,11 @@ def _join(sep, seq): Examples -------- - >>> np.strings.join('-', 'osd') - array('o-s-d', dtype='>> np.strings.join('-', 'osd') # doctest: +SKIP + array('o-s-d', dtype='>> np.strings.join(['-', '.'], ['ghc', 'osd']) - array(['g-h-c', 'o.s.d'], dtype='>> np.strings.join(['-', '.'], ['ghc', 'osd']) # doctest: +SKIP + array(['g-h-c', 'o.s.d'], dtype='>> x = np.array("Numpy is nice!") - >>> np.strings.split(x, " ") - array(list(['Numpy', 'is', 'nice!']), dtype=object) + >>> np.strings.split(x, " ") # doctest: +SKIP + array(list(['Numpy', 'is', 'nice!']), dtype=object) # doctest: +SKIP - >>> np.strings.split(x, " ", 1) - array(list(['Numpy', 'is nice!']), dtype=object) + >>> np.strings.split(x, " ", 1) # doctest: +SKIP + array(list(['Numpy', 'is nice!']), dtype=object) # doctest: +SKIP See Also -------- @@ -1259,8 +1259,9 @@ def _rsplit(a, sep=None, maxsplit=None): Examples -------- >>> a = np.array(['aAaAaA', 'abBABba']) - >>> np.strings.rsplit(a, 'A') - array([list(['a', 'a', 'a', '']), list(['abB', 'Bba'])], dtype=object) + >>> np.strings.rsplit(a, 'A') # doctest: +SKIP + array([list(['a', 'a', 'a', '']), # doctest: +SKIP + list(['abB', 'Bba'])], dtype=object) # doctest: +SKIP """ # This will return an array of lists of different sizes, so we @@ -1327,8 +1328,8 @@ def _partition(a, sep): Examples -------- >>> x = np.array(["Numpy is nice!"]) - >>> np.strings.partition(x, " ") - array([['Numpy', ' ', 'is nice!']], dtype='>> np.strings.partition(x, " ") # doctest: +SKIP + array([['Numpy', ' ', 'is nice!']], dtype='>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) - >>> np.strings.rpartition(a, 'A') - array([['aAaAa', 'A', ''], - [' a', 'A', ' '], - ['abB', 'A', 'Bba']], dtype='>> np.strings.rpartition(a, 'A') # doctest: +SKIP + array([['aAaAa', 'A', ''], # doctest: +SKIP + [' a', 'A', ' '], # doctest: +SKIP + ['abB', 'A', 'Bba']], dtype=' Date: Sat, 16 Mar 2024 21:18:57 +0100 Subject: [PATCH 058/980] ENH: Optimize np.power(x, 2) for float type --- numpy/_core/src/umath/fast_loop_macros.h | 10 ++++++++ .../src/umath/loops_umath_fp.dispatch.c.src | 24 ++++++++++++++++--- 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/umath/fast_loop_macros.h b/numpy/_core/src/umath/fast_loop_macros.h index b8c1926b2f7e..7545fe40fb3e 100644 --- a/numpy/_core/src/umath/fast_loop_macros.h +++ b/numpy/_core/src/umath/fast_loop_macros.h @@ -90,6 +90,16 @@ abs_ptrdiff(char *a, char *b) BINARY_DEFS\ BINARY_LOOP_SLIDING +/** (ip1, ip2) -> (op1), for case ip2 has zero stride*/ +#define BINARY_DEFS_ZERO_STRIDE\ + char *ip1 = args[0], *ip2 = args[1], *op1 = args[2];\ + npy_intp is1 = steps[0], os1 = steps[2];\ + npy_intp n = dimensions[0];\ + npy_intp i; + +#define BINARY_LOOP_SLIDING_ZERO_STRIDE \ + for (i = 0; i < n; i++, ip1 += is1, op1 += os1) + /** (ip1, ip2) -> (op1, op2) */ #define BINARY_LOOP_TWO_OUT\ char *ip1 = args[0], *ip2 = args[1], *op1 = args[2], *op2 = args[3];\ diff --git a/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src b/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src index 2390fb989190..e5405d78ac34 100644 --- a/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src +++ b/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src @@ -251,10 +251,28 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) return; } #endif - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; + if (steps[1]==0) { + BINARY_DEFS_ZERO_STRIDE const @type@ in2 = *(@type@ *)ip2; - *(@type@ *)op1 = npy_@intrin@@vsub@(in1, in2); + if (in2 == 2.0) { + BINARY_LOOP_SLIDING_ZERO_STRIDE { + const @type@ in1 = *(@type@ *)ip1; + *(@type@ *)op1 = in1 * in1; + } + } + else { + BINARY_LOOP_SLIDING_ZERO_STRIDE { + const @type@ in1 = *(@type@ *)ip1; + *(@type@ *)op1 = npy_@intrin@@vsub@(in1, in2); + } + } + } + else { + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + *(@type@ *)op1 = npy_@intrin@@vsub@(in1, in2); + } } } /**end repeat1**/ From 852bfec6b714ab0f067c9bd58c16809e026ce955 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sat, 16 Mar 2024 21:25:37 +0100 Subject: [PATCH 059/980] fix macro --- numpy/_core/src/umath/fast_loop_macros.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/umath/fast_loop_macros.h b/numpy/_core/src/umath/fast_loop_macros.h index 7545fe40fb3e..366facb5a100 100644 --- a/numpy/_core/src/umath/fast_loop_macros.h +++ b/numpy/_core/src/umath/fast_loop_macros.h @@ -95,7 +95,7 @@ abs_ptrdiff(char *a, char *b) char *ip1 = args[0], *ip2 = args[1], *op1 = args[2];\ npy_intp is1 = steps[0], os1 = steps[2];\ npy_intp n = dimensions[0];\ - npy_intp i; + npy_intp i;\ #define BINARY_LOOP_SLIDING_ZERO_STRIDE \ for (i = 0; i < n; i++, ip1 += is1, op1 += os1) From a4e208d39de323827226427c9c854796c6d31b12 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sat, 16 Mar 2024 21:52:09 +0100 Subject: [PATCH 060/980] apply only to power and not arctan2 --- .../_core/src/umath/loops_umath_fp.dispatch.c.src | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src b/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src index e5405d78ac34..476286fb259a 100644 --- a/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src +++ b/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src @@ -230,6 +230,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) /**begin repeat1 * #func = power, arctan2# * #intrin = pow, atan2# + * #ispower = 1, 0# */ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) @@ -251,6 +252,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) return; } #endif +#if @ispower@ if (steps[1]==0) { BINARY_DEFS_ZERO_STRIDE const @type@ in2 = *(@type@ *)ip2; @@ -266,13 +268,13 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) *(@type@ *)op1 = npy_@intrin@@vsub@(in1, in2); } } + return; } - else { - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *(@type@ *)op1 = npy_@intrin@@vsub@(in1, in2); - } +#endif + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + *(@type@ *)op1 = npy_@intrin@@vsub@(in1, in2); } } /**end repeat1**/ From c42e3506b7daae0ff0b2f73e1ceb26b0a9f9af02 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 18 Mar 2024 12:25:31 +0100 Subject: [PATCH 061/980] MAINT: Add some const qualifiers to new DType related API These are mostly for the places where we pass arrays. --- numpy/_core/include/numpy/dtype_api.h | 24 +++++++++++------------ numpy/_core/include/numpy/npy_2_compat.h | 6 +++--- numpy/_core/src/multiarray/array_method.c | 6 +++--- numpy/_core/src/multiarray/dtypemeta.c | 2 +- numpy/_core/src/multiarray/dtypemeta.h | 2 +- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/numpy/_core/include/numpy/dtype_api.h b/numpy/_core/include/numpy/dtype_api.h index f21d0e6558f3..c35577fbbcad 100644 --- a/numpy/_core/include/numpy/dtype_api.h +++ b/numpy/_core/include/numpy/dtype_api.h @@ -106,7 +106,7 @@ typedef struct PyArrayMethod_Context_tag { struct PyArrayMethodObject_tag *method; /* Operand descriptors, filled in by resolve_descriptors */ - PyArray_Descr **descriptors; + PyArray_Descr *const *descriptors; /* Structure may grow (this is harmless for DType authors) */ } PyArrayMethod_Context; @@ -159,9 +159,9 @@ typedef NPY_CASTING (PyArrayMethod_ResolveDescriptors)( /* "method" is currently opaque (necessary e.g. to wrap Python) */ struct PyArrayMethodObject_tag *method, /* DTypes the method was created for */ - PyArray_DTypeMeta **dtypes, + PyArray_DTypeMeta *const *dtypes, /* Input descriptors (instances). Outputs may be NULL. */ - PyArray_Descr **given_descrs, + PyArray_Descr *const *given_descrs, /* Exact loop descriptors to use, must not hold references on error */ PyArray_Descr **loop_descrs, npy_intp *view_offset); @@ -177,9 +177,9 @@ typedef NPY_CASTING (PyArrayMethod_ResolveDescriptors)( */ typedef NPY_CASTING (PyArrayMethod_ResolveDescriptorsWithScalar)( struct PyArrayMethodObject_tag *method, - PyArray_DTypeMeta **dtypes, + PyArray_DTypeMeta *const *dtypes, /* Unlike above, these can have any DType and we may allow NULL. */ - PyArray_Descr **given_descrs, + PyArray_Descr *const *given_descrs, /* * Input scalars or NULL. Only ever passed for python scalars. * WARNING: In some cases, a loop may be explicitly selected and the @@ -227,7 +227,7 @@ typedef int (PyArrayMethod_GetLoop)( */ typedef int (PyArrayMethod_GetReductionInitial)( PyArrayMethod_Context *context, npy_bool reduction_is_empty, - char *initial); + void *initial); /* * The following functions are only used by the wrapping array method defined @@ -256,8 +256,8 @@ typedef int (PyArrayMethod_GetReductionInitial)( * `resolve_descriptors`, so that it can be filled there if not NULL.) */ typedef int (PyArrayMethod_TranslateGivenDescriptors)(int nin, int nout, - PyArray_DTypeMeta *wrapped_dtypes[], - PyArray_Descr *given_descrs[], PyArray_Descr *new_descrs[]); + PyArray_DTypeMeta *const wrapped_dtypes[], + PyArray_Descr *const given_descrs[], PyArray_Descr *new_descrs[]); /** * The function to convert the actual loop descriptors (as returned by the @@ -278,7 +278,7 @@ typedef int (PyArrayMethod_TranslateGivenDescriptors)(int nin, int nout, * @returns 0 on success, -1 on failure. */ typedef int (PyArrayMethod_TranslateLoopDescriptors)(int nin, int nout, - PyArray_DTypeMeta *new_dtypes[], PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const new_dtypes[], PyArray_Descr *const given_descrs[], PyArray_Descr *original_descrs[], PyArray_Descr *loop_descrs[]); @@ -303,7 +303,7 @@ typedef int (PyArrayMethod_TranslateLoopDescriptors)(int nin, int nout, * */ typedef int (PyArrayMethod_TraverseLoop)( - void *traverse_context, PyArray_Descr *descr, char *data, + void *traverse_context, const PyArray_Descr *descr, char *data, npy_intp size, npy_intp stride, NpyAuxData *auxdata); @@ -317,7 +317,7 @@ typedef int (PyArrayMethod_TraverseLoop)( * */ typedef int (PyArrayMethod_GetTraverseLoop)( - void *traverse_context, PyArray_Descr *descr, + void *traverse_context, const PyArray_Descr *descr, int aligned, npy_intp fixed_stride, PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags); @@ -334,7 +334,7 @@ typedef int (PyArrayMethod_GetTraverseLoop)( * (There are potential use-cases, these are currently unsupported.) */ typedef int (PyArrayMethod_PromoterFunction)(PyObject *ufunc, - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]); /* diff --git a/numpy/_core/include/numpy/npy_2_compat.h b/numpy/_core/include/numpy/npy_2_compat.h index 1d6d512f95b5..4e56f5678c0a 100644 --- a/numpy/_core/include/numpy/npy_2_compat.h +++ b/numpy/_core/include/numpy/npy_2_compat.h @@ -220,19 +220,19 @@ DESCR_ACCESSOR(C_METADATA, c_metadata, NpyAuxData *, 1) #if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) #if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION static inline PyArray_ArrFuncs * - PyDataType_GetArrFuncs(PyArray_Descr *descr) + PyDataType_GetArrFuncs(const PyArray_Descr *descr) { return _PyDataType_GetArrFuncs(descr); } #elif NPY_ABI_VERSION < 0x02000000 static inline PyArray_ArrFuncs * - PyDataType_GetArrFuncs(PyArray_Descr *descr) + PyDataType_GetArrFuncs(const PyArray_Descr *descr) { return descr->f; } #else static inline PyArray_ArrFuncs * - PyDataType_GetArrFuncs(PyArray_Descr *descr) + PyDataType_GetArrFuncs(const PyArray_Descr *descr) { if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) { return _PyDataType_GetArrFuncs(descr); diff --git a/numpy/_core/src/multiarray/array_method.c b/numpy/_core/src/multiarray/array_method.c index 9e4f3a55fba9..ac8a73aea005 100644 --- a/numpy/_core/src/multiarray/array_method.c +++ b/numpy/_core/src/multiarray/array_method.c @@ -59,8 +59,8 @@ static NPY_CASTING default_resolve_descriptors( PyArrayMethodObject *method, - PyArray_DTypeMeta **dtypes, - PyArray_Descr **input_descrs, + PyArray_DTypeMeta *const *dtypes, + PyArray_Descr *const *input_descrs, PyArray_Descr **output_descrs, npy_intp *view_offset) { @@ -139,7 +139,7 @@ npy_default_get_strided_loop( PyArrayMethod_StridedLoop **out_loop, NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; PyArrayMethodObject *meth = context->method; *flags = meth->flags & NPY_METH_RUNTIME_FLAGS; *out_transferdata = NULL; diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index acee68bad54f..dd8ba326b98f 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -1400,7 +1400,7 @@ PyArray_DTypeMeta *_Void_dtype = NULL; * for convenience as we are allowed to access the `DType` slots directly. */ NPY_NO_EXPORT PyArray_ArrFuncs * -_PyDataType_GetArrFuncs(PyArray_Descr *descr) +_PyDataType_GetArrFuncs(const PyArray_Descr *descr) { return PyDataType_GetArrFuncs(descr); } diff --git a/numpy/_core/src/multiarray/dtypemeta.h b/numpy/_core/src/multiarray/dtypemeta.h index 667f9280eb13..3315a7f80770 100644 --- a/numpy/_core/src/multiarray/dtypemeta.h +++ b/numpy/_core/src/multiarray/dtypemeta.h @@ -261,7 +261,7 @@ extern PyArray_DTypeMeta PyArray_StringDType; /* Internal version see dtypmeta.c for more information. */ static inline PyArray_ArrFuncs * -PyDataType_GetArrFuncs(PyArray_Descr *descr) +PyDataType_GetArrFuncs(const PyArray_Descr *descr) { return &NPY_DT_SLOTS(NPY_DTYPE(descr))->f; } From b22d092c70ae73ce3e239f89d07765481b671cf0 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 18 Mar 2024 13:22:49 +0100 Subject: [PATCH 062/980] MAINT: Fixup compiler warnings/errors based on const changes --- numpy/_core/src/multiarray/convert_datatype.c | 44 ++++++------ numpy/_core/src/multiarray/convert_datatype.h | 8 +-- numpy/_core/src/multiarray/datetime.c | 6 +- numpy/_core/src/multiarray/dtype_traversal.c | 32 ++++----- numpy/_core/src/multiarray/dtype_traversal.h | 10 +-- .../_core/src/multiarray/stringdtype/casts.c | 6 +- .../_core/src/multiarray/stringdtype/dtype.c | 2 +- .../multiarray/stringdtype/static_string.c | 2 +- numpy/_core/src/umath/_scaled_float_dtype.c | 8 +-- numpy/_core/src/umath/legacy_array_method.c | 12 ++-- numpy/_core/src/umath/legacy_array_method.h | 2 +- numpy/_core/src/umath/string_ufuncs.cpp | 46 ++++++------ numpy/_core/src/umath/stringdtype_ufuncs.cpp | 70 +++++++++++-------- numpy/_core/src/umath/ufunc_object.c | 8 ++- numpy/_core/src/umath/ufunc_type_resolution.c | 6 +- numpy/_core/src/umath/ufunc_type_resolution.h | 2 +- numpy/_core/src/umath/wrapping_array_method.c | 8 +-- 17 files changed, 141 insertions(+), 131 deletions(-) diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 2225ee94859c..92e6964fc21f 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -2440,8 +2440,8 @@ PyArray_AddCastingImplementation_FromSpec(PyArrayMethod_Spec *spec, int private) NPY_NO_EXPORT NPY_CASTING legacy_same_dtype_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[2]), + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset) { @@ -2483,7 +2483,7 @@ legacy_cast_get_strided_loop( PyArrayMethod_StridedLoop **out_loop, NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; int out_needs_api = 0; *flags = context->method->flags & NPY_METH_RUNTIME_FLAGS; @@ -2507,8 +2507,8 @@ legacy_cast_get_strided_loop( NPY_NO_EXPORT NPY_CASTING simple_cast_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset) { @@ -2548,7 +2548,7 @@ get_byteswap_loop( PyArrayMethod_StridedLoop **out_loop, NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; assert(descrs[0]->kind == descrs[1]->kind); assert(descrs[0]->elsize == descrs[1]->elsize); int itemsize = descrs[0]->elsize; @@ -2727,8 +2727,8 @@ PyArray_InitializeNumericCasts(void) static int cast_to_string_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *NPY_UNUSED(view_offset)) { @@ -2879,8 +2879,8 @@ add_other_to_and_from_string_cast( NPY_NO_EXPORT NPY_CASTING string_to_string_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[2]), + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset) { @@ -2932,7 +2932,7 @@ string_to_string_get_loop( NPY_ARRAYMETHOD_FLAGS *flags) { int unicode_swap = 0; - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; assert(NPY_DTYPE(descrs[0]) == NPY_DTYPE(descrs[1])); *flags = context->method->flags & NPY_METH_RUNTIME_FLAGS; @@ -3033,7 +3033,7 @@ PyArray_InitializeStringCasts(void) */ static NPY_CASTING cast_to_void_dtype_class( - PyArray_Descr **given_descrs, PyArray_Descr **loop_descrs, + PyArray_Descr *const *given_descrs, PyArray_Descr **loop_descrs, npy_intp *view_offset) { /* `dtype="V"` means unstructured currently (compare final path) */ @@ -3058,8 +3058,8 @@ cast_to_void_dtype_class( static NPY_CASTING nonstructured_to_structured_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[2]), + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset) { @@ -3251,8 +3251,8 @@ PyArray_GetGenericToVoidCastingImpl(void) static NPY_CASTING structured_to_nonstructured_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset) { @@ -3521,8 +3521,8 @@ can_cast_fields_safety( static NPY_CASTING void_to_void_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset) { @@ -3720,8 +3720,8 @@ PyArray_InitializeVoidToVoidCast(void) static NPY_CASTING object_to_any_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *NPY_UNUSED(view_offset)) { @@ -3794,8 +3794,8 @@ PyArray_GetObjectToGenericCastingImpl(void) static NPY_CASTING any_to_object_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *NPY_UNUSED(view_offset)) { diff --git a/numpy/_core/src/multiarray/convert_datatype.h b/numpy/_core/src/multiarray/convert_datatype.h index b32b637d8e55..8dccbab6f8c0 100644 --- a/numpy/_core/src/multiarray/convert_datatype.h +++ b/numpy/_core/src/multiarray/convert_datatype.h @@ -109,8 +109,8 @@ PyArray_CheckCastSafety(NPY_CASTING casting, NPY_NO_EXPORT NPY_CASTING legacy_same_dtype_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset); @@ -124,8 +124,8 @@ legacy_cast_get_strided_loop( NPY_NO_EXPORT NPY_CASTING simple_cast_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *input_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const input_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset); diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index 7397381daf91..b340cf6cf496 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -3780,7 +3780,7 @@ time_to_time_get_loop( { int requires_wrap = 0; int inner_aligned = aligned; - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; *flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; PyArray_DatetimeMetaData *meta1 = get_datetime_metadata_from_dtype(descrs[0]); @@ -3929,7 +3929,7 @@ datetime_to_string_get_loop( PyArrayMethod_StridedLoop **out_loop, NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; *flags = context->method->flags & NPY_METH_RUNTIME_FLAGS; if (descrs[1]->type_num == NPY_STRING) { @@ -3989,7 +3989,7 @@ string_to_datetime_cast_get_loop( PyArrayMethod_StridedLoop **out_loop, NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; *flags = context->method->flags & NPY_METH_RUNTIME_FLAGS; if (descrs[0]->type_num == NPY_STRING) { diff --git a/numpy/_core/src/multiarray/dtype_traversal.c b/numpy/_core/src/multiarray/dtype_traversal.c index 0402ad2c084d..3e20c8c85c1a 100644 --- a/numpy/_core/src/multiarray/dtype_traversal.c +++ b/numpy/_core/src/multiarray/dtype_traversal.c @@ -32,7 +32,7 @@ typedef int get_traverse_func_function( - void *traverse_context, PyArray_Descr *dtype, int aligned, + void *traverse_context, const PyArray_Descr *dtype, int aligned, npy_intp stride, NPY_traverse_info *clear_info, NPY_ARRAYMETHOD_FLAGS *flags); @@ -42,7 +42,7 @@ typedef int get_traverse_func_function( static int get_clear_function( - void *traverse_context, PyArray_Descr *dtype, int aligned, + void *traverse_context, const PyArray_Descr *dtype, int aligned, npy_intp stride, NPY_traverse_info *clear_info, NPY_ARRAYMETHOD_FLAGS *flags) { @@ -99,7 +99,7 @@ PyArray_GetClearFunction( static int get_zerofill_function( - void *traverse_context, PyArray_Descr *dtype, int aligned, + void *traverse_context, const PyArray_Descr *dtype, int aligned, npy_intp stride, NPY_traverse_info *zerofill_info, NPY_ARRAYMETHOD_FLAGS *flags) { @@ -136,7 +136,7 @@ get_zerofill_function( static int clear_object_strided_loop( - void *NPY_UNUSED(traverse_context), PyArray_Descr *NPY_UNUSED(descr), + void *NPY_UNUSED(traverse_context), const PyArray_Descr *NPY_UNUSED(descr), char *data, npy_intp size, npy_intp stride, NpyAuxData *NPY_UNUSED(auxdata)) { @@ -156,7 +156,7 @@ clear_object_strided_loop( NPY_NO_EXPORT int npy_get_clear_object_strided_loop( - void *NPY_UNUSED(traverse_context), PyArray_Descr *NPY_UNUSED(descr), + void *NPY_UNUSED(traverse_context), const PyArray_Descr *NPY_UNUSED(descr), int NPY_UNUSED(aligned), npy_intp NPY_UNUSED(fixed_stride), PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags) @@ -171,7 +171,7 @@ npy_get_clear_object_strided_loop( static int fill_zero_object_strided_loop( - void *NPY_UNUSED(traverse_context), PyArray_Descr *NPY_UNUSED(descr), + void *NPY_UNUSED(traverse_context), const PyArray_Descr *NPY_UNUSED(descr), char *data, npy_intp size, npy_intp stride, NpyAuxData *NPY_UNUSED(auxdata)) { @@ -188,7 +188,7 @@ fill_zero_object_strided_loop( NPY_NO_EXPORT int npy_object_get_fill_zero_loop(void *NPY_UNUSED(traverse_context), - PyArray_Descr *NPY_UNUSED(descr), + const PyArray_Descr *NPY_UNUSED(descr), int NPY_UNUSED(aligned), npy_intp NPY_UNUSED(fixed_stride), PyArrayMethod_TraverseLoop **out_loop, @@ -275,7 +275,7 @@ fields_traverse_data_clone(NpyAuxData *data) static int traverse_fields_function( - void *traverse_context, _PyArray_LegacyDescr *NPY_UNUSED(descr), + void *traverse_context, const _PyArray_LegacyDescr *NPY_UNUSED(descr), char *data, npy_intp N, npy_intp stride, NpyAuxData *auxdata) { @@ -315,7 +315,7 @@ traverse_fields_function( static int get_fields_traverse_function( - void *traverse_context, _PyArray_LegacyDescr *dtype, int NPY_UNUSED(aligned), + void *traverse_context, const _PyArray_LegacyDescr *dtype, int NPY_UNUSED(aligned), npy_intp stride, PyArrayMethod_TraverseLoop **out_func, NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags, get_traverse_func_function *get_traverse_func) @@ -431,14 +431,14 @@ subarray_traverse_data_clone(NpyAuxData *data) static int traverse_subarray_func( - void *traverse_context, PyArray_Descr *NPY_UNUSED(descr), + void *traverse_context, const PyArray_Descr *NPY_UNUSED(descr), char *data, npy_intp N, npy_intp stride, NpyAuxData *auxdata) { subarray_traverse_data *subarr_data = (subarray_traverse_data *)auxdata; PyArrayMethod_TraverseLoop *func = subarr_data->info.func; - PyArray_Descr *sub_descr = subarr_data->info.descr; + const PyArray_Descr *sub_descr = subarr_data->info.descr; npy_intp sub_N = subarr_data->count; NpyAuxData *sub_auxdata = subarr_data->info.auxdata; npy_intp sub_stride = sub_descr->elsize; @@ -456,7 +456,7 @@ traverse_subarray_func( static int get_subarray_traverse_func( - void *traverse_context, PyArray_Descr *dtype, int aligned, + void *traverse_context, const PyArray_Descr *dtype, int aligned, npy_intp size, npy_intp stride, PyArrayMethod_TraverseLoop **out_func, NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags, get_traverse_func_function *get_traverse_func) @@ -493,7 +493,7 @@ get_subarray_traverse_func( static int clear_no_op( - void *NPY_UNUSED(traverse_context), PyArray_Descr *NPY_UNUSED(descr), + void *NPY_UNUSED(traverse_context), const PyArray_Descr *NPY_UNUSED(descr), char *NPY_UNUSED(data), npy_intp NPY_UNUSED(size), npy_intp NPY_UNUSED(stride), NpyAuxData *NPY_UNUSED(auxdata)) { @@ -502,7 +502,7 @@ clear_no_op( NPY_NO_EXPORT int npy_get_clear_void_and_legacy_user_dtype_loop( - void *traverse_context, _PyArray_LegacyDescr *dtype, int aligned, + void *traverse_context, const _PyArray_LegacyDescr *dtype, int aligned, npy_intp stride, PyArrayMethod_TraverseLoop **out_func, NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags) { @@ -569,7 +569,7 @@ npy_get_clear_void_and_legacy_user_dtype_loop( static int zerofill_fields_function( - void *traverse_context, _PyArray_LegacyDescr *descr, + void *traverse_context, const _PyArray_LegacyDescr *descr, char *data, npy_intp N, npy_intp stride, NpyAuxData *auxdata) { @@ -598,7 +598,7 @@ zerofill_fields_function( */ NPY_NO_EXPORT int npy_get_zerofill_void_and_legacy_user_dtype_loop( - void *traverse_context, _PyArray_LegacyDescr *dtype, int aligned, + void *traverse_context, const _PyArray_LegacyDescr *dtype, int aligned, npy_intp stride, PyArrayMethod_TraverseLoop **out_func, NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags) { diff --git a/numpy/_core/src/multiarray/dtype_traversal.h b/numpy/_core/src/multiarray/dtype_traversal.h index bd3918ba4b65..7a06328cb2e0 100644 --- a/numpy/_core/src/multiarray/dtype_traversal.h +++ b/numpy/_core/src/multiarray/dtype_traversal.h @@ -7,14 +7,14 @@ NPY_NO_EXPORT int npy_get_clear_object_strided_loop( - void *traverse_context, PyArray_Descr *descr, int aligned, + void *traverse_context, const PyArray_Descr *descr, int aligned, npy_intp fixed_stride, PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **out_traversedata, NPY_ARRAYMETHOD_FLAGS *flags); NPY_NO_EXPORT int npy_get_clear_void_and_legacy_user_dtype_loop( - void *traverse_context, _PyArray_LegacyDescr *descr, int aligned, + void *traverse_context, const _PyArray_LegacyDescr *descr, int aligned, npy_intp fixed_stride, PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **out_traversedata, NPY_ARRAYMETHOD_FLAGS *flags); @@ -23,14 +23,14 @@ npy_get_clear_void_and_legacy_user_dtype_loop( NPY_NO_EXPORT int npy_object_get_fill_zero_loop( - void *NPY_UNUSED(traverse_context), PyArray_Descr *NPY_UNUSED(descr), + void *NPY_UNUSED(traverse_context), const PyArray_Descr *NPY_UNUSED(descr), int NPY_UNUSED(aligned), npy_intp NPY_UNUSED(fixed_stride), PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **NPY_UNUSED(out_auxdata), NPY_ARRAYMETHOD_FLAGS *flags); NPY_NO_EXPORT int npy_get_zerofill_void_and_legacy_user_dtype_loop( - void *traverse_context, _PyArray_LegacyDescr *dtype, int aligned, + void *traverse_context, const _PyArray_LegacyDescr *dtype, int aligned, npy_intp stride, PyArrayMethod_TraverseLoop **out_func, NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags); @@ -40,7 +40,7 @@ npy_get_zerofill_void_and_legacy_user_dtype_loop( typedef struct { PyArrayMethod_TraverseLoop *func; NpyAuxData *auxdata; - PyArray_Descr *descr; + const PyArray_Descr *descr; } NPY_traverse_info; diff --git a/numpy/_core/src/multiarray/stringdtype/casts.c b/numpy/_core/src/multiarray/stringdtype/casts.c index 6cb648624d85..e3e7c5fde872 100644 --- a/numpy/_core/src/multiarray/stringdtype/casts.c +++ b/numpy/_core/src/multiarray/stringdtype/casts.c @@ -156,7 +156,7 @@ unicode_to_string(PyArrayMethod_Context *context, char *const data[], npy_intp const dimensions[], npy_intp const strides[], NpyAuxData *NPY_UNUSED(auxdata)) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; PyArray_StringDTypeObject *sdescr = (PyArray_StringDTypeObject *)descrs[1]; npy_string_allocator *allocator = NpyString_acquire_allocator(sdescr); @@ -1672,7 +1672,7 @@ void_to_string(PyArrayMethod_Context *context, char *const data[], npy_intp const dimensions[], npy_intp const strides[], NpyAuxData *NPY_UNUSED(auxdata)) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; PyArray_StringDTypeObject *descr = (PyArray_StringDTypeObject *)descrs[1]; npy_string_allocator *allocator = NpyString_acquire_allocator(descr); @@ -1802,7 +1802,7 @@ bytes_to_string(PyArrayMethod_Context *context, char *const data[], npy_intp const dimensions[], npy_intp const strides[], NpyAuxData *NPY_UNUSED(auxdata)) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; PyArray_StringDTypeObject *descr = (PyArray_StringDTypeObject *)descrs[1]; npy_string_allocator *allocator = NpyString_acquire_allocator(descr); diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index a7b31dd4800e..f5488db87e5e 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -517,7 +517,7 @@ stringdtype_ensure_canonical(PyArray_StringDTypeObject *self) static int stringdtype_clear_loop(void *NPY_UNUSED(traverse_context), - PyArray_Descr *descr, char *data, npy_intp size, + const PyArray_Descr *descr, char *data, npy_intp size, npy_intp stride, NpyAuxData *NPY_UNUSED(auxdata)) { PyArray_StringDTypeObject *sdescr = (PyArray_StringDTypeObject *)descr; diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.c b/numpy/_core/src/multiarray/stringdtype/static_string.c index 85f499c3c3ae..ab0911709c75 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.c +++ b/numpy/_core/src/multiarray/stringdtype/static_string.c @@ -318,7 +318,7 @@ NpyString_acquire_allocator(PyArray_StringDTypeObject *descr) */ NPY_NO_EXPORT void NpyString_acquire_allocators(size_t n_descriptors, - PyArray_Descr *descrs[], + PyArray_Descr *const descrs[], npy_string_allocator *allocators[]) { for (size_t i=0; imethod->legacy_initial, context->descriptors[0]->elsize); @@ -266,7 +266,7 @@ copy_cached_initial( static int get_initial_from_ufunc( PyArrayMethod_Context *context, npy_bool reduction_is_empty, - char *initial) + void *initial) { if (context->caller == NULL || !PyObject_TypeCheck(context->caller, &PyUFunc_Type)) { diff --git a/numpy/_core/src/umath/legacy_array_method.h b/numpy/_core/src/umath/legacy_array_method.h index 750de06c7992..82eeb04a0a15 100644 --- a/numpy/_core/src/umath/legacy_array_method.h +++ b/numpy/_core/src/umath/legacy_array_method.h @@ -28,7 +28,7 @@ get_wrapped_legacy_ufunc_loop(PyArrayMethod_Context *context, NPY_NO_EXPORT NPY_CASTING wrapped_legacy_resolve_descriptors(PyArrayMethodObject *, - PyArray_DTypeMeta **, PyArray_Descr **, PyArray_Descr **, npy_intp *); + PyArray_DTypeMeta *const *, PyArray_Descr *const *, PyArray_Descr **, npy_intp *); #ifdef __cplusplus } diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 337c8f65ba1e..858493471f09 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -587,8 +587,8 @@ string_zfill_loop(PyArrayMethod_Context *context, static NPY_CASTING string_addition_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[3]), - PyArray_Descr *given_descrs[3], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), + PyArray_Descr *const given_descrs[3], PyArray_Descr *loop_descrs[3], npy_intp *NPY_UNUSED(view_offset)) { @@ -615,8 +615,8 @@ string_addition_resolve_descriptors( static NPY_CASTING string_multiply_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[3]), - PyArray_Descr *given_descrs[3], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), + PyArray_Descr *const given_descrs[3], PyArray_Descr *loop_descrs[3], npy_intp *NPY_UNUSED(view_offset)) { @@ -649,8 +649,8 @@ string_multiply_resolve_descriptors( static NPY_CASTING string_strip_whitespace_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[2]), + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *NPY_UNUSED(view_offset)) { @@ -669,8 +669,8 @@ string_strip_whitespace_resolve_descriptors( static NPY_CASTING string_strip_chars_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[3]), - PyArray_Descr *given_descrs[3], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), + PyArray_Descr *const given_descrs[3], PyArray_Descr *loop_descrs[3], npy_intp *NPY_UNUSED(view_offset)) { @@ -693,7 +693,7 @@ string_strip_chars_resolve_descriptors( static int string_findlike_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { Py_INCREF(op_dtypes[0]); @@ -709,7 +709,7 @@ string_findlike_promoter(PyObject *NPY_UNUSED(ufunc), static int string_replace_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { Py_INCREF(op_dtypes[0]); @@ -732,8 +732,8 @@ string_replace_promoter(PyObject *NPY_UNUSED(ufunc), static NPY_CASTING string_replace_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[5]), - PyArray_Descr *given_descrs[5], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[5]), + PyArray_Descr *const given_descrs[5], PyArray_Descr *loop_descrs[5], npy_intp *NPY_UNUSED(view_offset)) { @@ -769,7 +769,7 @@ string_replace_resolve_descriptors( static int string_startswith_endswith_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { Py_INCREF(op_dtypes[0]); @@ -785,7 +785,7 @@ string_startswith_endswith_promoter(PyObject *NPY_UNUSED(ufunc), static int string_expandtabs_length_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { Py_INCREF(op_dtypes[0]); @@ -798,7 +798,7 @@ string_expandtabs_length_promoter(PyObject *NPY_UNUSED(ufunc), static int string_expandtabs_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { Py_INCREF(op_dtypes[0]); @@ -813,8 +813,8 @@ string_expandtabs_promoter(PyObject *NPY_UNUSED(ufunc), static NPY_CASTING string_expandtabs_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[3]), - PyArray_Descr *given_descrs[3], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), + PyArray_Descr *const given_descrs[3], PyArray_Descr *loop_descrs[3], npy_intp *NPY_UNUSED(view_offset)) { @@ -846,7 +846,7 @@ string_expandtabs_resolve_descriptors( static int string_center_ljust_rjust_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { Py_INCREF(op_dtypes[0]); @@ -863,8 +863,8 @@ string_center_ljust_rjust_promoter(PyObject *NPY_UNUSED(ufunc), static NPY_CASTING string_center_ljust_rjust_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[3]), - PyArray_Descr *given_descrs[5], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), + PyArray_Descr *const given_descrs[5], PyArray_Descr *loop_descrs[5], npy_intp *NPY_UNUSED(view_offset)) { @@ -901,7 +901,7 @@ string_center_ljust_rjust_resolve_descriptors( static int string_zfill_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { Py_INCREF(op_dtypes[0]); @@ -916,8 +916,8 @@ string_zfill_promoter(PyObject *NPY_UNUSED(ufunc), static NPY_CASTING string_zfill_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[3]), - PyArray_Descr *given_descrs[3], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), + PyArray_Descr *const given_descrs[3], PyArray_Descr *loop_descrs[3], npy_intp *NPY_UNUSED(view_offset)) { diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index ed636fdf674e..7c087aea7e97 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -43,7 +43,7 @@ static NPY_CASTING multiply_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *dtypes[], PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const dtypes[], PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { PyArray_Descr *ldescr = given_descrs[0]; @@ -239,8 +239,8 @@ static int multiply_left_strided_loop( static NPY_CASTING binary_resolve_descriptors(struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { @@ -556,7 +556,8 @@ string_comparison_strided_loop(PyArrayMethod_Context *context, char *const data[ static NPY_CASTING string_comparison_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { Py_INCREF(given_descrs[0]); @@ -602,7 +603,8 @@ string_isnan_strided_loop(PyArrayMethod_Context *context, char *const data[], static NPY_CASTING string_bool_output_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { Py_INCREF(given_descrs[0]); @@ -615,7 +617,8 @@ string_bool_output_resolve_descriptors( static NPY_CASTING string_intp_output_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { Py_INCREF(given_descrs[0]); @@ -761,7 +764,8 @@ string_strlen_strided_loop(PyArrayMethod_Context *context, char *const data[], static int string_findlike_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_StringDType); @@ -775,8 +779,8 @@ string_findlike_promoter(PyObject *NPY_UNUSED(ufunc), static NPY_CASTING string_findlike_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { @@ -821,7 +825,8 @@ string_findlike_resolve_descriptors( static int string_startswith_endswith_promoter( PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_StringDType); @@ -835,8 +840,8 @@ string_startswith_endswith_promoter( static NPY_CASTING string_startswith_endswith_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { @@ -1044,7 +1049,8 @@ string_startswith_endswith_strided_loop(PyArrayMethod_Context *context, static int all_strings_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_StringDType); @@ -1056,8 +1062,8 @@ all_strings_promoter(PyObject *NPY_UNUSED(ufunc), static NPY_CASTING strip_chars_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { @@ -1171,8 +1177,8 @@ string_lrstrip_chars_strided_loop( static NPY_CASTING strip_whitespace_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { @@ -1278,7 +1284,8 @@ string_lrstrip_whitespace_strided_loop( static int string_replace_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_StringDType); @@ -1291,8 +1298,8 @@ string_replace_promoter(PyObject *NPY_UNUSED(ufunc), static NPY_CASTING replace_resolve_descriptors(struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { @@ -1459,8 +1466,8 @@ string_replace_strided_loop( static NPY_CASTING expandtabs_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { @@ -1574,7 +1581,7 @@ string_expandtabs_strided_loop(PyArrayMethod_Context *context, static NPY_CASTING center_ljust_rjust_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *dtypes[], PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const dtypes[], PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { PyArray_StringDTypeObject *input_descr = (PyArray_StringDTypeObject *)given_descrs[0]; @@ -1881,8 +1888,8 @@ zfill_strided_loop(PyArrayMethod_Context *context, NPY_NO_EXPORT int string_inputs_promoter( - PyObject *ufunc_obj, PyArray_DTypeMeta *op_dtypes[], - PyArray_DTypeMeta *signature[], + PyObject *ufunc_obj, PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[], PyArray_DTypeMeta *final_dtype, PyArray_DTypeMeta *result_dtype) @@ -1914,8 +1921,8 @@ string_inputs_promoter( static int string_object_bool_output_promoter( - PyObject *ufunc, PyArray_DTypeMeta *op_dtypes[], - PyArray_DTypeMeta *signature[], + PyObject *ufunc, PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { return string_inputs_promoter( @@ -1925,8 +1932,8 @@ string_object_bool_output_promoter( static int string_unicode_bool_output_promoter( - PyObject *ufunc, PyArray_DTypeMeta *op_dtypes[], - PyArray_DTypeMeta *signature[], + PyObject *ufunc, PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { return string_inputs_promoter( @@ -2007,8 +2014,9 @@ is_integer_dtype(PyArray_DTypeMeta *DType) static int -string_multiply_promoter(PyObject *ufunc_obj, PyArray_DTypeMeta *op_dtypes[], - PyArray_DTypeMeta *signature[], +string_multiply_promoter(PyObject *ufunc_obj, + PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { PyUFuncObject *ufunc = (PyUFuncObject *)ufunc_obj; diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 609cab5882f6..3eac49a56959 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -1011,9 +1011,11 @@ try_trivial_single_output_loop(PyArrayMethod_Context *context, */ static inline int validate_casting(PyArrayMethodObject *method, PyUFuncObject *ufunc, - PyArrayObject *ops[], PyArray_Descr *descriptors[], + PyArrayObject *ops[], PyArray_Descr *const descriptors_const[], NPY_CASTING casting) { + /* Cast away const to not change old public `PyUFunc_ValidateCasting`. */ + PyArray_Descr **descriptors = (PyArray_Descr **)descriptors_const; if (method->resolve_descriptors == &wrapped_legacy_resolve_descriptors) { /* * In this case the legacy type resolution was definitely called @@ -1091,7 +1093,7 @@ execute_ufunc_loop(PyArrayMethod_Context *context, int masked, NpyIter *iter = NpyIter_AdvancedNew(nop + masked, op, iter_flags, order, NPY_UNSAFE_CASTING, - op_flags, context->descriptors, + op_flags, (PyArray_Descr **)context->descriptors, -1, NULL, NULL, buffersize); if (iter == NULL) { return -1; @@ -6243,7 +6245,7 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, context->descriptors = call_info->_descrs; for (int i=0; i < ufunc->nargs; i++) { Py_INCREF(operation_descrs[i]); - context->descriptors[i] = operation_descrs[i]; + ((PyArray_Descr **)context->descriptors)[i] = operation_descrs[i]; } result = PyTuple_Pack(2, result_dtype_tuple, capsule); diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index 4975d41147ea..f9962a9b4e32 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -226,7 +226,7 @@ NPY_NO_EXPORT int PyUFunc_ValidateCasting(PyUFuncObject *ufunc, NPY_CASTING casting, PyArrayObject **operands, - PyArray_Descr **dtypes) + PyArray_Descr *const *dtypes) { int i, nin = ufunc->nin, nop = nin + ufunc->nout; @@ -1471,7 +1471,7 @@ PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc, static int find_userloop(PyUFuncObject *ufunc, - PyArray_Descr **dtypes, + PyArray_Descr *const *dtypes, PyUFuncGenericFunction *out_innerloop, void **out_innerloopdata) { @@ -1535,7 +1535,7 @@ find_userloop(PyUFuncObject *ufunc, NPY_NO_EXPORT int PyUFunc_DefaultLegacyInnerLoopSelector(PyUFuncObject *ufunc, - PyArray_Descr **dtypes, + PyArray_Descr *const *dtypes, PyUFuncGenericFunction *out_innerloop, void **out_innerloopdata, int *out_needs_api) diff --git a/numpy/_core/src/umath/ufunc_type_resolution.h b/numpy/_core/src/umath/ufunc_type_resolution.h index 84a2593f44c4..3f8e7505ea39 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.h +++ b/numpy/_core/src/umath/ufunc_type_resolution.h @@ -134,7 +134,7 @@ type_tuple_type_resolver(PyUFuncObject *self, NPY_NO_EXPORT int PyUFunc_DefaultLegacyInnerLoopSelector(PyUFuncObject *ufunc, - PyArray_Descr **dtypes, + PyArray_Descr *const *dtypes, PyUFuncGenericFunction *out_innerloop, void **out_innerloopdata, int *out_needs_api); diff --git a/numpy/_core/src/umath/wrapping_array_method.c b/numpy/_core/src/umath/wrapping_array_method.c index 3f3228237c21..ee1db62abf68 100644 --- a/numpy/_core/src/umath/wrapping_array_method.c +++ b/numpy/_core/src/umath/wrapping_array_method.c @@ -36,8 +36,8 @@ static NPY_CASTING wrapping_method_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *dtypes[], - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const dtypes[], + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *view_offset) { @@ -158,8 +158,8 @@ wrapping_method_get_loop( auxdata->orig_context.caller = context->caller; if (context->method->translate_given_descrs( - nin, nout, context->method->wrapped_dtypes, - context->descriptors, auxdata->orig_context.descriptors) < 0) { + nin, nout, context->method->wrapped_dtypes, context->descriptors, + (PyArray_Descr **)auxdata->orig_context.descriptors) < 0) { NPY_AUXDATA_FREE((NpyAuxData *)auxdata); return -1; } From 87f1fcb0f3821ed85d14f983e6fe93dd191a0107 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 18 Mar 2024 13:28:22 +0100 Subject: [PATCH 063/980] DOC: Also update docs for changes --- doc/source/reference/c-api/array.rst | 11 ++++++----- doc/source/reference/c-api/types-and-structures.rst | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 447871e644cf..d0f4b039a048 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -1682,8 +1682,8 @@ the functions that must be implemented for each slot. .. c:type:: NPY_CASTING (PyArrayMethod_ResolveDescriptors)( \ struct PyArrayMethodObject_tag *method, \ - PyArray_DTypeMeta **dtypes, \ - PyArray_Descr **given_descrs, \ + PyArray_DTypeMeta *const *dtypes, \ + PyArray_Descr *const *given_descrs, \ PyArray_Descr **loop_descrs, \ npy_intp *view_offset) @@ -1857,7 +1857,7 @@ Typedefs for functions that users of the ArrayMethod API can implement are described below. .. c:type:: int (PyArrayMethod_TraverseLoop)( \ - void *traverse_context, PyArray_Descr *descr, char *data, \ + void *traverse_context, const PyArray_Descr *descr, char *data, \ npy_intp size, npy_intp stride, NpyAuxData *auxdata) A traverse loop working on a single array. This is similar to the general @@ -1880,7 +1880,7 @@ described below. passed through in the future (for structured dtypes). .. c:type:: int (PyArrayMethod_GetTraverseLoop)( \ - void *traverse_context, PyArray_Descr *descr, \ + void *traverse_context, const PyArray_Descr *descr, \ int aligned, npy_intp fixed_stride, \ PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **out_auxdata, \ NPY_ARRAYMETHOD_FLAGS *flags) @@ -1920,7 +1920,8 @@ with the rest of the ArrayMethod API. attempt a new search for a matching loop/promoter. .. c:type:: int (PyArrayMethod_PromoterFunction)(PyObject *ufunc, \ - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], \ + PyArray_DTypeMeta *const op_dtypes[], \ + PyArray_DTypeMeta *const signature[], \ PyArray_DTypeMeta *new_op_dtypes[]) Type of the promoter function, which must be wrapped into a diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index df32b3dfcd60..d29115680ef2 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -728,7 +728,7 @@ PyArrayMethod_Context and PyArrayMethod_Spec typedef struct { PyObject *caller; struct PyArrayMethodObject_tag *method; - PyArray_Descr **descriptors; + PyArray_Descr *const *descriptors; } PyArrayMethod_Context .. c:member:: PyObject *caller From 550f7d782e53ddf04aa9bc0de03207b85288b3c2 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 18 Mar 2024 13:36:04 +0100 Subject: [PATCH 064/980] MAINT: Add a few more const to function parameters (safe) --- doc/source/reference/c-api/array.rst | 4 ++-- numpy/_core/src/multiarray/stringdtype/static_string.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index d0f4b039a048..1e67f97917d5 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -3387,7 +3387,7 @@ Data Type Promotion and Inspection ---------------------------------- .. c:function:: PyArray_DTypeMeta *PyArray_CommonDType( \ - PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2) + const PyArray_DTypeMeta *dtype1, const PyArray_DTypeMeta *dtype2) This function defines the common DType operator. Note that the common DType will not be ``object`` (unless one of the DTypes is ``object``). Similar to @@ -3414,7 +3414,7 @@ Data Type Promotion and Inspection For example promoting ``float16`` with any other float, integer, or unsigned integer again gives a floating point number. -.. c:function:: PyArray_Descr *PyArray_GetDefaultDescr(PyArray_DTypeMeta *DType) +.. c:function:: PyArray_Descr *PyArray_GetDefaultDescr(const PyArray_DTypeMeta *DType) Given a DType class, returns the default instance (descriptor). This checks for a ``singleton`` first and only calls the ``default_descr`` function if diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.c b/numpy/_core/src/multiarray/stringdtype/static_string.c index ab0911709c75..c9b5620211dc 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.c +++ b/numpy/_core/src/multiarray/stringdtype/static_string.c @@ -286,7 +286,7 @@ NpyString_free_allocator(npy_string_allocator *allocator) * allocator mutex is held, as doing so may cause deadlocks. */ NPY_NO_EXPORT npy_string_allocator * -NpyString_acquire_allocator(PyArray_StringDTypeObject *descr) +NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr) { if (!PyThread_acquire_lock(descr->allocator->allocator_lock, NOWAIT_LOCK)) { PyThread_acquire_lock(descr->allocator->allocator_lock, WAIT_LOCK); From 557c0a9cdfb2b7d3a9241fe91ea9ab4e427b1494 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 18 Mar 2024 14:23:00 +0100 Subject: [PATCH 065/980] MAINT: Make PyArrayMultiIterObject struct "smaller" This inverts the lie, on C++ we claim the struct is only fixed to 32 iterators, while in reality such a limit doesn't exist. The reason is that Cython is unhappy if it finds a smaller struct at runtime (on NumPy 1.x). If there is more oddities around this, we could probably even detect Cython... --- doc/source/reference/c-api/types-and-structures.rst | 2 +- numpy/_core/include/numpy/ndarraytypes.h | 11 ++++++++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index df32b3dfcd60..865ae836269b 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -1286,7 +1286,7 @@ PyArrayMultiIter_Type and PyArrayMultiIterObject npy_intp index; int nd; npy_intp dimensions[NPY_MAXDIMS_LEGACY_ITERS]; - PyArrayIterObject *iters[NPY_MAXDIMS_LEGACY_ITERS]; + PyArrayIterObject *iters[]; } PyArrayMultiIterObject; .. c:macro: PyObject_HEAD diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index 4c938d921043..95821b0baff2 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -1298,10 +1298,15 @@ typedef struct { * growing structs (as of Cython 3.0.6). It also allows NPY_MAXARGS * to be runtime dependent. */ -#if (defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) || defined(__cplusplus) - /* 64 is NPY_MAXARGS for numpy 2.0 or newer. We can't use a flexible - array member in C++ so use the internal size there. */ +#if (defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) PyArrayIterObject *iters[64]; +#elif defined(__cplusplus) + /* + * C++ doesn't stricly support flexible members and gives compilers + * warnings (pedantic only), so we lie. We can't make it 64 because + * then Cython is unhappy (larger struct at runtime is OK smaller not). + */ + PyArrayIterObject *iters[32]; #else PyArrayIterObject *iters[]; #endif From 5d0d97273c19c8cfa3cd658298120240d7078ef5 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 18 Mar 2024 09:13:02 -0600 Subject: [PATCH 066/980] DOC: note stringdtype output support in np.strings docstrings --- .../_core/code_generators/ufunc_docstrings.py | 13 ++-- numpy/_core/strings.py | 76 +++++++++++-------- 2 files changed, 54 insertions(+), 35 deletions(-) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index 534ed44406ba..d214ffbccb55 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -4904,8 +4904,8 @@ def add_newdoc(place, name, doc): Returns ------- out : ndarray - Output array of str or unicode, depending on input - types + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types $OUT_SCALAR_2 See Also @@ -4943,7 +4943,8 @@ def add_newdoc(place, name, doc): Returns ------- out : ndarray - Output array of str or unicode, depending on input type + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input type $OUT_SCALAR_2 See Also @@ -4978,7 +4979,8 @@ def add_newdoc(place, name, doc): Returns ------- out : ndarray - Output array of str or unicode, depending on input type + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input type $OUT_SCALAR_2 See Also @@ -5012,7 +5014,8 @@ def add_newdoc(place, name, doc): Returns ------- out : ndarray - Output array of str or unicode, depending on input type + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input type $OUT_SCALAR_2 See Also diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index baf4cb1e2cf1..c79c7db494ff 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -129,7 +129,8 @@ def multiply(a, i): Returns ------- out : ndarray - Output array of str or unicode, depending on input types + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types Examples -------- @@ -189,7 +190,8 @@ def mod(a, values): Returns ------- out : ndarray - Output array of str or unicode, depending on input types + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types """ return _to_bytes_or_str_array( @@ -310,7 +312,7 @@ def rindex(a, sub, start=0, end=None): Returns ------- out : ndarray - Output array of ints. + Output array of ints. See Also -------- @@ -551,7 +553,8 @@ def expandtabs(a, tabsize=8): Returns ------- out : ndarray - Output array of str or unicode, depending on input type + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input type See Also -------- @@ -593,7 +596,8 @@ def center(a, width, fillchar=' '): Returns ------- out : ndarray - Output array of str or unicode, depending on the type of ``a`` + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types See Also -------- @@ -651,7 +655,8 @@ def ljust(a, width, fillchar=' '): Returns ------- out : ndarray - Output array of str or unicode, depending on input type + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types See Also -------- @@ -706,7 +711,8 @@ def rjust(a, width, fillchar=' '): Returns ------- out : ndarray - Output array of str or unicode, depending on input type + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types See Also -------- @@ -760,7 +766,8 @@ def zfill(a, width): Returns ------- out : ndarray - Output array of str or unicode, depending on input type + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input type See Also -------- @@ -802,7 +809,8 @@ def lstrip(a, chars=None): Returns ------- out : ndarray - Output array of ``bytes_`` or ``str_`` dtype + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types See Also -------- @@ -847,7 +855,8 @@ def rstrip(a, chars=None): Returns ------- out : ndarray - Output array of ``bytes_`` or ``str_`` dtype + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types See Also -------- @@ -887,7 +896,8 @@ def strip(a, chars=None): Returns ------- out : ndarray - Output array of ``bytes_`` or ``str_`` dtype + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types See Also -------- @@ -928,8 +938,9 @@ def upper(a): Returns ------- - out : ndarray, {str, unicode} - Output array of str or unicode, depending on input type + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types See Also -------- @@ -962,8 +973,9 @@ def lower(a): Returns ------- - out : ndarray, {str, unicode} - Output array of str or unicode, depending on input type + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types See Also -------- @@ -997,8 +1009,9 @@ def swapcase(a): Returns ------- - out : ndarray, {str, unicode} - Output array of str or unicode, depending on input type + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types See Also -------- @@ -1035,8 +1048,8 @@ def capitalize(a): Returns ------- out : ndarray - Output array of str or unicode, depending on input - types + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types See Also -------- @@ -1075,7 +1088,8 @@ def title(a): Returns ------- out : ndarray - Output array of str or unicode, depending on input type + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types See Also -------- @@ -1113,7 +1127,8 @@ def replace(a, old, new, count=-1): Returns ------- out : ndarray - Output array of ``str_`` or ``bytes_`` dtype + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types See Also -------- @@ -1164,7 +1179,8 @@ def _join(sep, seq): Returns ------- out : ndarray - Output array of str or unicode, depending on input types + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types See Also -------- @@ -1250,7 +1266,7 @@ def _rsplit(a, sep=None, maxsplit=None): Returns ------- out : ndarray - Array of list objects + Array of list objects See Also -------- @@ -1320,10 +1336,10 @@ def _partition(a, sep): Returns ------- - out : ndarray, {str, unicode} - Output array of str or unicode, depending on input type. - The output array will have an extra dimension with 3 - elements per input element. + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types. The output array will have an extra + dimension with 3 elements per input element. Examples -------- @@ -1362,9 +1378,9 @@ def _rpartition(a, sep): Returns ------- out : ndarray - Output array of string or unicode, depending on input - type. The output array will have an extra dimension with - 3 elements per input element. + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types. The output array will have an extra + dimension with 3 elements per input element. See Also -------- From 8ad2ef9116d6de20d6542ca5933fa48a48d38604 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 18 Mar 2024 15:57:43 +0100 Subject: [PATCH 067/980] BUG: Allow the new string dtype summation to work It may be that it would be better to indicate reductions more clearly, but this seemed like an easy and explicit fix. Does not quite close the issue, since we shouldn't promote old-style strings probably. --- numpy/_core/src/umath/ufunc_object.c | 7 +++++++ numpy/_core/tests/test_stringdtype.py | 12 ++++++++++++ 2 files changed, 19 insertions(+) diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 609cab5882f6..334d45fe07a1 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -2407,6 +2407,13 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc, out_descrs[0], out_descrs[1], out_descrs[2]); goto fail; } + /* + * After checking that they are equivalent, we enforce the use of the out + * one (which the user should have defined). (Needed by string dtype) + */ + Py_INCREF(out_descrs[2]); + Py_SETREF(out_descrs[0], out_descrs[2]); + /* TODO: This really should _not_ be unsafe casting (same above)! */ if (validate_casting(ufuncimpl, ufunc, ops, out_descrs, casting) < 0) { goto fail; diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 7a3e210e02c2..4e3ee325250e 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -718,6 +718,18 @@ def test_ufunc_add(dtype, string_list, other_strings, use_out): np.add(arr1, arr2) +def test_ufunc_add_reduce(dtype): + values = ["a", "this is a long string", "c"] + arr = np.array(values, dtype=dtype) + out = np.empty((), dtype=dtype) + + expected = np.array("".join(values), dtype=dtype) + assert_array_equal(np.add.reduce(arr), expected) + + np.add.reduce(arr, out=out) + assert_array_equal(out, expected) + + def test_add_promoter(string_list): arr = np.array(string_list, dtype=StringDType()) lresult = np.array(["hello" + s for s in string_list], dtype=StringDType()) From 23c4a04d795c26796af14af8efbb62ccc97c5f70 Mon Sep 17 00:00:00 2001 From: adrinjalali Date: Mon, 18 Mar 2024 19:28:14 +0100 Subject: [PATCH 068/980] DOC clarifications on debugging numpy --- doc/source/dev/development_environment.rst | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index 6164eef4db26..ec19aabb44c2 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -247,7 +247,10 @@ of Python is encouraged, see :ref:`advanced_debugging`. In terms of debugging, NumPy also needs to be built in a debug mode. You need to use ``debug`` build type and disable optimizations to make sure ``-O0`` flag is used -during object building. To generate source-level debug information within the build process run:: +during object building. Note that NumPy should NOT be installed in your environment +before you build with the ``spin build`` command. + +To generate source-level debug information within the build process run:: $ spin build --clean -- -Dbuildtype=debug -Ddisable-optimization=true @@ -271,13 +274,14 @@ you want to debug. For instance ``mytest.py``:: x = np.arange(5) np.empty_like(x) -Now, you can run:: +Note that your test file needs to be outside the NumPy clone you have. Now, you can +run:: - $ spin gdb mytest.py + $ spin gdb /path/to/mytest.py In case you are using clang toolchain:: - $ lldb python mytest.py + $ lldb python /path/to/mytest.py And then in the debugger:: From 7641d8d0eba9f528c2d2bd2485eb7f6d8c0b1471 Mon Sep 17 00:00:00 2001 From: Adrin Jalali Date: Mon, 18 Mar 2024 20:34:39 +0100 Subject: [PATCH 069/980] Update doc/source/dev/development_environment.rst Co-authored-by: Nathan Goldbaum --- doc/source/dev/development_environment.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index ec19aabb44c2..b1cc7d96ffe2 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -281,7 +281,7 @@ run:: In case you are using clang toolchain:: - $ lldb python /path/to/mytest.py + $ spin lldb /path/to/mytest.py And then in the debugger:: From 9be279c507a0bd276089dbb4763d029fabfe7c79 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 18 Mar 2024 14:13:00 -0600 Subject: [PATCH 070/980] BUG: fix logic error in stringdtype maximum/minimum ufunc --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 4 +++- numpy/_core/tests/test_stringdtype.py | 6 ++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index ed636fdf674e..d78e756d62b9 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -429,7 +429,7 @@ minimum_maximum_strided_loop(PyArrayMethod_Context *context, char *const data[], npy_packed_static_string *sout = (npy_packed_static_string *)out; int cmp = _compare(in1, in2, in1_descr, in2_descr); if (cmp == 0 && (in1 == out || in2 == out)) { - continue; + goto next_step; } if ((cmp < 0) ^ invert) { // if in and out are the same address, do nothing to avoid a @@ -449,6 +449,8 @@ minimum_maximum_strided_loop(PyArrayMethod_Context *context, char *const data[], } } } + + next_step: in1 += in1_stride; in2 += in2_stride; out += out_stride; diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 4e3ee325250e..f87dc317b1d1 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -668,6 +668,12 @@ def test_ufuncs_minmax(string_list, ufunc_name, func, use_out): res = ufunc(arr, arr) assert_array_equal(uarr, res) + assert_array_equal(getattr(arr, ufunc_name)(), func(string_list)) + + +def test_max_regression(): + arr = np.array(['y', 'y', 'z'], dtype="T") + assert arr.max() == 'z' @pytest.mark.parametrize("use_out", [True, False]) From b74dff864db696f7f9b93da7c5a3c37655089798 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Mar 2024 17:22:34 +0000 Subject: [PATCH 071/980] MAINT: Bump actions/cache from 4.0.1 to 4.0.2 Bumps [actions/cache](https://github.com/actions/cache) from 4.0.1 to 4.0.2. - [Release notes](https://github.com/actions/cache/releases) - [Commits](https://github.com/actions/cache/compare/v4.0.1...v4.0.2) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/linux_qemu.yml | 2 +- .github/workflows/macos.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 967e16b327a9..d4d6fe4a4989 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -108,7 +108,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@v4.0.1 + uses: actions/cache@v4.0.2 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index d0d7605221f4..e962dde9d5bb 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -44,7 +44,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # v4.0.1 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -68,7 +68,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # v4.0.1 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 From 56dab5053eedbd0a34b8c5aa8bf66c46a8b15cb5 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Wed, 20 Mar 2024 08:51:46 +0200 Subject: [PATCH 072/980] BUG: adapt cython files to new complex declarations (#26080) Fixes #26029 declare npy_cfloat and friends as empty structs declare cfloat_t and friends as ctypedef float complex cfloat_t, without being directly connected to npy_cfloat. This allows still using .imag and .real attributes, which Cython wraps with macros add a test for BUG: changing the complex struct has implications for Cython #26029, it passes since we use C compilation for cython. The inplace += will fail on C++. resync the two cython pxd files. At what point do we declare NumPy requires Cython3? I checked that scipy and cython can use the pxd file. --------- Co-authored-by: Sebastian Berg Co-authored-by: Lysandros Nikolaou --- doc/source/numpy_2_0_migration_guide.rst | 7 + numpy/__init__.cython-30.pxd | 35 ++-- numpy/__init__.pxd | 204 ++++++++++++++++--- numpy/_core/tests/examples/cython/checks.pyx | 10 +- numpy/_core/tests/test_cython.py | 18 +- 5 files changed, 216 insertions(+), 58 deletions(-) diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index 0bbd68242524..4a569d612bf4 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -214,6 +214,13 @@ added for setting the real or imaginary part. The underlying type remains a struct under C++ (all of the above still remains valid). +This has implications for Cython. It is recommened to always use the native +typedefs ``cfloat_t``, ``cdouble_t``, ``clongdouble_t`` rather than the NumPy +types ``npy_cfloat``, etc, unless you have to interface with C code written +using the NumPy types. You can still write cython code using the ``c.real`` and +``c.imag`` attributes (using the native typedefs), but you can no longer use +in-place operators ``c.imag += 1`` in Cython's c++ mode. + Changes to namespaces ===================== diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd index 1afbe3d8ebd0..744a50956b56 100644 --- a/numpy/__init__.cython-30.pxd +++ b/numpy/__init__.cython-30.pxd @@ -68,36 +68,28 @@ cdef extern from "numpy/arrayobject.h": ctypedef long double npy_float128 ctypedef struct npy_cfloat: - float real - float imag + pass ctypedef struct npy_cdouble: - double real - double imag + pass ctypedef struct npy_clongdouble: - long double real - long double imag + pass ctypedef struct npy_complex64: - float real - float imag + pass ctypedef struct npy_complex128: - double real - double imag + pass ctypedef struct npy_complex160: - long double real - long double imag + pass ctypedef struct npy_complex192: - long double real - long double imag + pass ctypedef struct npy_complex256: - long double real - long double imag + pass ctypedef struct PyArray_Dims: npy_intp *ptr @@ -562,7 +554,6 @@ cdef extern from "numpy/arrayobject.h": object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) void PyArray_FILLWBYTE(ndarray, int val) - npy_intp PyArray_REFCOUNT(object) object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) bint PyArray_EquivByteorders(int b1, int b2) nogil @@ -808,11 +799,10 @@ ctypedef npy_double float_t ctypedef npy_double double_t ctypedef npy_longdouble longdouble_t -ctypedef npy_cfloat cfloat_t -ctypedef npy_cdouble cdouble_t -ctypedef npy_clongdouble clongdouble_t - -ctypedef npy_cdouble complex_t +ctypedef float complex cfloat_t +ctypedef double complex cdouble_t +ctypedef double complex complex_t +ctypedef long double complex clongdouble_t cdef inline object PyArray_MultiIterNew1(a): return PyArray_MultiIterNew(1, a) @@ -851,6 +841,7 @@ cdef extern from "numpy/ndarraytypes.h": int64_t year int32_t month, day, hour, min, sec, us, ps, as + cdef extern from "numpy/arrayscalars.h": # abstract types diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd index 096714f6d7cd..aebb71fffa9c 100644 --- a/numpy/__init__.pxd +++ b/numpy/__init__.pxd @@ -16,13 +16,27 @@ from cpython.buffer cimport PyObject_GetBuffer from cpython.type cimport type cimport libc.stdio as stdio + +cdef extern from *: + # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython. + # See https://github.com/cython/cython/issues/3573 + """ + /* Using NumPy API declarations from "numpy/__init__.pxd" */ + """ + + cdef extern from "Python.h": ctypedef int Py_intptr_t bint PyObject_TypeCheck(object obj, PyTypeObject* type) cdef extern from "numpy/arrayobject.h": - ctypedef Py_intptr_t npy_intp - ctypedef size_t npy_uintp + # It would be nice to use size_t and ssize_t, but ssize_t has special + # implicit conversion rules, so just use "long". + # Note: The actual type only matters for Cython promotion, so long + # is closer than int, but could lead to incorrect promotion. + # (Not to worrying, and always the status-quo.) + ctypedef signed long npy_intp + ctypedef unsigned long npy_uintp ctypedef unsigned char npy_bool @@ -63,36 +77,28 @@ cdef extern from "numpy/arrayobject.h": ctypedef long double npy_float128 ctypedef struct npy_cfloat: - float real - float imag + pass ctypedef struct npy_cdouble: - double real - double imag + pass ctypedef struct npy_clongdouble: - long double real - long double imag + pass ctypedef struct npy_complex64: - float real - float imag + pass ctypedef struct npy_complex128: - double real - double imag + pass ctypedef struct npy_complex160: - long double real - long double imag + pass ctypedef struct npy_complex192: - long double real - long double imag + pass ctypedef struct npy_complex256: - long double real - long double imag + pass ctypedef struct PyArray_Dims: npy_intp *ptr @@ -154,7 +160,7 @@ cdef extern from "numpy/arrayobject.h": NPY_COMPLEX512 NPY_INTP - NPY_DEFAULT_INT + NPY_DEFAULT_INT # Not a compile time constant (normally)! ctypedef enum NPY_ORDER: NPY_ANYORDER @@ -350,7 +356,10 @@ cdef extern from "numpy/arrayobject.h": PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! + PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr. int PyArray_FLAGS(ndarray) nogil + void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 npy_intp PyArray_ITEMSIZE(ndarray) nogil int PyArray_TYPE(ndarray arr) nogil @@ -371,7 +380,6 @@ cdef extern from "numpy/arrayobject.h": bint PyTypeNum_ISOBJECT(int) nogil npy_intp PyDataType_ELSIZE(dtype) nogil - void PyDataType_SET_ELSIZE(dtype, npy_intp) nogil npy_intp PyDataType_ALIGNMENT(dtype) nogil PyObject* PyDataType_METADATA(dtype) nogil PyArray_ArrayDescr* PyDataType_SUBARRAY(dtype) nogil @@ -501,6 +509,12 @@ cdef extern from "numpy/arrayobject.h": void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil + npy_intp PyArray_MultiIter_SIZE(broadcast multi) nogil + int PyArray_MultiIter_NDIM(broadcast multi) nogil + npy_intp PyArray_MultiIter_INDEX(broadcast multi) nogil + int PyArray_MultiIter_NUMITER(broadcast multi) nogil + npy_intp* PyArray_MultiIter_DIMS(broadcast multi) nogil + void** PyArray_MultiIter_ITERS(broadcast multi) nogil # Functions from __multiarray_api.h @@ -700,11 +714,10 @@ ctypedef npy_double float_t ctypedef npy_double double_t ctypedef npy_longdouble longdouble_t -ctypedef npy_cfloat cfloat_t -ctypedef npy_cdouble cdouble_t -ctypedef npy_clongdouble clongdouble_t - -ctypedef npy_cdouble complex_t +ctypedef float complex cfloat_t +ctypedef double complex cdouble_t +ctypedef double complex complex_t +ctypedef long double complex clongdouble_t cdef inline object PyArray_MultiIterNew1(a): return PyArray_MultiIterNew(1, a) @@ -939,13 +952,6 @@ cdef inline int import_ufunc() except -1: except Exception: raise ImportError("numpy._core.umath failed to import") -cdef extern from *: - # Leave a marker that the NumPy declarations came from this file - # See https://github.com/cython/cython/issues/3573 - """ - /* NumPy API declarations from "numpy/__init__.pxd" */ - """ - cdef inline bint is_timedelta64_object(object obj): """ @@ -999,3 +1005,137 @@ cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: returns the unit part of the dtype for a numpy datetime64 object. """ return (obj).obmeta.base + + +# Iterator API added in v1.6 +ctypedef int (*NpyIter_IterNextFunc)(NpyIter* it) noexcept nogil +ctypedef void (*NpyIter_GetMultiIndexFunc)(NpyIter* it, npy_intp* outcoords) noexcept nogil + +cdef extern from "numpy/arrayobject.h": + + ctypedef struct NpyIter: + pass + + cdef enum: + NPY_FAIL + NPY_SUCCEED + + cdef enum: + # Track an index representing C order + NPY_ITER_C_INDEX + # Track an index representing Fortran order + NPY_ITER_F_INDEX + # Track a multi-index + NPY_ITER_MULTI_INDEX + # User code external to the iterator does the 1-dimensional innermost loop + NPY_ITER_EXTERNAL_LOOP + # Convert all the operands to a common data type + NPY_ITER_COMMON_DTYPE + # Operands may hold references, requiring API access during iteration + NPY_ITER_REFS_OK + # Zero-sized operands should be permitted, iteration checks IterSize for 0 + NPY_ITER_ZEROSIZE_OK + # Permits reductions (size-0 stride with dimension size > 1) + NPY_ITER_REDUCE_OK + # Enables sub-range iteration + NPY_ITER_RANGED + # Enables buffering + NPY_ITER_BUFFERED + # When buffering is enabled, grows the inner loop if possible + NPY_ITER_GROWINNER + # Delay allocation of buffers until first Reset* call + NPY_ITER_DELAY_BUFALLOC + # When NPY_KEEPORDER is specified, disable reversing negative-stride axes + NPY_ITER_DONT_NEGATE_STRIDES + NPY_ITER_COPY_IF_OVERLAP + # The operand will be read from and written to + NPY_ITER_READWRITE + # The operand will only be read from + NPY_ITER_READONLY + # The operand will only be written to + NPY_ITER_WRITEONLY + # The operand's data must be in native byte order + NPY_ITER_NBO + # The operand's data must be aligned + NPY_ITER_ALIGNED + # The operand's data must be contiguous (within the inner loop) + NPY_ITER_CONTIG + # The operand may be copied to satisfy requirements + NPY_ITER_COPY + # The operand may be copied with WRITEBACKIFCOPY to satisfy requirements + NPY_ITER_UPDATEIFCOPY + # Allocate the operand if it is NULL + NPY_ITER_ALLOCATE + # If an operand is allocated, don't use any subtype + NPY_ITER_NO_SUBTYPE + # This is a virtual array slot, operand is NULL but temporary data is there + NPY_ITER_VIRTUAL + # Require that the dimension match the iterator dimensions exactly + NPY_ITER_NO_BROADCAST + # A mask is being used on this array, affects buffer -> array copy + NPY_ITER_WRITEMASKED + # This array is the mask for all WRITEMASKED operands + NPY_ITER_ARRAYMASK + # Assume iterator order data access for COPY_IF_OVERLAP + NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE + + # construction and destruction functions + NpyIter* NpyIter_New(ndarray arr, npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, dtype datatype) except NULL + NpyIter* NpyIter_MultiNew(npy_intp nop, PyArrayObject** op, npy_uint32 flags, + NPY_ORDER order, NPY_CASTING casting, npy_uint32* + op_flags, PyArray_Descr** op_dtypes) except NULL + NpyIter* NpyIter_AdvancedNew(npy_intp nop, PyArrayObject** op, + npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, npy_uint32* op_flags, + PyArray_Descr** op_dtypes, int oa_ndim, + int** op_axes, const npy_intp* itershape, + npy_intp buffersize) except NULL + NpyIter* NpyIter_Copy(NpyIter* it) except NULL + int NpyIter_RemoveAxis(NpyIter* it, int axis) except NPY_FAIL + int NpyIter_RemoveMultiIndex(NpyIter* it) except NPY_FAIL + int NpyIter_EnableExternalLoop(NpyIter* it) except NPY_FAIL + int NpyIter_Deallocate(NpyIter* it) except NPY_FAIL + int NpyIter_Reset(NpyIter* it, char** errmsg) except NPY_FAIL + int NpyIter_ResetToIterIndexRange(NpyIter* it, npy_intp istart, + npy_intp iend, char** errmsg) except NPY_FAIL + int NpyIter_ResetBasePointers(NpyIter* it, char** baseptrs, char** errmsg) except NPY_FAIL + int NpyIter_GotoMultiIndex(NpyIter* it, const npy_intp* multi_index) except NPY_FAIL + int NpyIter_GotoIndex(NpyIter* it, npy_intp index) except NPY_FAIL + npy_intp NpyIter_GetIterSize(NpyIter* it) nogil + npy_intp NpyIter_GetIterIndex(NpyIter* it) nogil + void NpyIter_GetIterIndexRange(NpyIter* it, npy_intp* istart, + npy_intp* iend) nogil + int NpyIter_GotoIterIndex(NpyIter* it, npy_intp iterindex) except NPY_FAIL + npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* it) nogil + npy_bool NpyIter_HasExternalLoop(NpyIter* it) nogil + npy_bool NpyIter_HasMultiIndex(NpyIter* it) nogil + npy_bool NpyIter_HasIndex(NpyIter* it) nogil + npy_bool NpyIter_RequiresBuffering(NpyIter* it) nogil + npy_bool NpyIter_IsBuffered(NpyIter* it) nogil + npy_bool NpyIter_IsGrowInner(NpyIter* it) nogil + npy_intp NpyIter_GetBufferSize(NpyIter* it) nogil + int NpyIter_GetNDim(NpyIter* it) nogil + int NpyIter_GetNOp(NpyIter* it) nogil + npy_intp* NpyIter_GetAxisStrideArray(NpyIter* it, int axis) except NULL + int NpyIter_GetShape(NpyIter* it, npy_intp* outshape) nogil + PyArray_Descr** NpyIter_GetDescrArray(NpyIter* it) + PyArrayObject** NpyIter_GetOperandArray(NpyIter* it) + ndarray NpyIter_GetIterView(NpyIter* it, npy_intp i) + void NpyIter_GetReadFlags(NpyIter* it, char* outreadflags) + void NpyIter_GetWriteFlags(NpyIter* it, char* outwriteflags) + int NpyIter_CreateCompatibleStrides(NpyIter* it, npy_intp itemsize, + npy_intp* outstrides) except NPY_FAIL + npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil + # functions for iterating an NpyIter object + NpyIter_IterNextFunc* NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL + NpyIter_GetMultiIndexFunc* NpyIter_GetGetMultiIndex(NpyIter* it, + char** errmsg) except NULL + char** NpyIter_GetDataPtrArray(NpyIter* it) nogil + char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil + npy_intp* NpyIter_GetIndexPtr(NpyIter* it) + npy_intp* NpyIter_GetInnerStrideArray(NpyIter* it) nogil + npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* it) nogil + void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil + npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil + void NpyIter_DebugPrint(NpyIter* it) diff --git a/numpy/_core/tests/examples/cython/checks.pyx b/numpy/_core/tests/examples/cython/checks.pyx index 4cf264ea521d..b51ab128053f 100644 --- a/numpy/_core/tests/examples/cython/checks.pyx +++ b/numpy/_core/tests/examples/cython/checks.pyx @@ -253,4 +253,12 @@ def compile_fillwithbyte(): dims = (1, 2) pos = cnp.PyArray_ZEROS(2, dims, cnp.NPY_UINT8, 0) cnp.PyArray_FILLWBYTE(pos, 1) - return pos \ No newline at end of file + return pos + +def inc2_cfloat_struct(cnp.ndarray[cnp.cfloat_t] arr): + # This works since we compile in C mode, it will fail in cpp mode + arr[1].real += 1 + arr[1].imag += 1 + # This works in both modes + arr[1].real = arr[1].real + 1 + arr[1].imag = arr[1].imag + 1 diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index 3d9ac2927a33..e2bde4ded0eb 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -61,9 +61,13 @@ def install_temp(tmpdir_factory): ) try: subprocess.check_call(["meson", "compile", "-vv"], cwd=build_dir) - except subprocess.CalledProcessError as p: - print(f"{p.stdout=}") - print(f"{p.stderr=}") + except subprocess.CalledProcessError: + print("----------------") + print("meson build failed when doing") + print(f"'meson setup --native-file {native_file} {srcdir}'") + print(f"'meson compile -vv'") + print(f"in {build_dir}") + print("----------------") raise sys.path.append(str(build_dir)) @@ -274,3 +278,11 @@ def test_fillwithbytes(install_temp): arr = checks.compile_fillwithbyte() assert_array_equal(arr, np.ones((1, 2))) + + +def test_complex(install_temp): + from checks import inc2_cfloat_struct + + arr = np.array([0, 10+10j], dtype="F") + inc2_cfloat_struct(arr) + assert arr[1] == (12 + 12j) From 9d49b0d17046fc6e2ba128c462665964d9efcbb7 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 20 Mar 2024 15:07:58 +0100 Subject: [PATCH 073/980] API: Require reduce promoters to start with None to match But additionally, we add an (implicit) fallback promoter for the reduction case that fills in the first dtype with the second one. --- numpy/_core/src/umath/dispatching.c | 99 ++++++++++++++++------------- 1 file changed, 56 insertions(+), 43 deletions(-) diff --git a/numpy/_core/src/umath/dispatching.c b/numpy/_core/src/umath/dispatching.c index 26cc66c3a898..fbba26ea247b 100644 --- a/numpy/_core/src/umath/dispatching.c +++ b/numpy/_core/src/umath/dispatching.c @@ -274,21 +274,20 @@ resolve_implementation_info(PyUFuncObject *ufunc, /* Unspecified out always matches (see below for inputs) */ continue; } + assert(i == 0); /* - * This is a reduce-like operation, which always have the form - * `(res_DType, op_DType, res_DType)`. If the first and last - * dtype of the loops match, this should be reduce-compatible. + * This is a reduce-like operation, we enforce that these + * register with None as the first DType. If a reduction + * uses the same DType, we will do that promotion. + * A `(res_DType, op_DType, res_DType)` pattern can make sense + * in other context as well and could be confusing. */ - if (PyTuple_GET_ITEM(curr_dtypes, 0) - == PyTuple_GET_ITEM(curr_dtypes, 2)) { + if (PyTuple_GET_ITEM(curr_dtypes, 0) == Py_None) { continue; } - /* - * This should be a reduce, but doesn't follow the reduce - * pattern. So (for now?) consider this not a match. - */ + /* Otherwise, this is not considered a match */ matches = NPY_FALSE; - continue; + break; } if (resolver_dtype == (PyArray_DTypeMeta *)Py_None) { @@ -488,7 +487,7 @@ resolve_implementation_info(PyUFuncObject *ufunc, * those defined by the `signature` unmodified). */ static PyObject * -call_promoter_and_recurse(PyUFuncObject *ufunc, PyObject *promoter, +call_promoter_and_recurse(PyUFuncObject *ufunc, PyObject *info, PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], PyArrayObject *const operands[]) { @@ -498,37 +497,51 @@ call_promoter_and_recurse(PyUFuncObject *ufunc, PyObject *promoter, int promoter_result; PyArray_DTypeMeta *new_op_dtypes[NPY_MAXARGS]; - if (PyCapsule_CheckExact(promoter)) { - /* We could also go the other way and wrap up the python function... */ - PyArrayMethod_PromoterFunction *promoter_function = PyCapsule_GetPointer( - promoter, "numpy._ufunc_promoter"); - if (promoter_function == NULL) { + if (info != NULL) { + PyObject *promoter = PyTuple_GET_ITEM(info, 1); + if (PyCapsule_CheckExact(promoter)) { + /* We could also go the other way and wrap up the python function... */ + PyArrayMethod_PromoterFunction *promoter_function = PyCapsule_GetPointer( + promoter, "numpy._ufunc_promoter"); + if (promoter_function == NULL) { + return NULL; + } + promoter_result = promoter_function((PyObject *)ufunc, + op_dtypes, signature, new_op_dtypes); + } + else { + PyErr_SetString(PyExc_NotImplementedError, + "Calling python functions for promotion is not implemented."); return NULL; } - promoter_result = promoter_function((PyObject *)ufunc, - op_dtypes, signature, new_op_dtypes); - } - else { - PyErr_SetString(PyExc_NotImplementedError, - "Calling python functions for promotion is not implemented."); - return NULL; - } - if (promoter_result < 0) { - return NULL; - } - /* - * If none of the dtypes changes, we would recurse infinitely, abort. - * (Of course it is nevertheless possible to recurse infinitely.) - */ - int dtypes_changed = 0; - for (int i = 0; i < nargs; i++) { - if (new_op_dtypes[i] != op_dtypes[i]) { - dtypes_changed = 1; - break; + if (promoter_result < 0) { + return NULL; + } + /* + * If none of the dtypes changes, we would recurse infinitely, abort. + * (Of course it is nevertheless possible to recurse infinitely.) + * + * TODO: We could allow users to signal this directly and also move + * the call to be (almost immediate). That would call it + * unnecessarily sometimes, but may allow additional flexibility. + */ + int dtypes_changed = 0; + for (int i = 0; i < nargs; i++) { + if (new_op_dtypes[i] != op_dtypes[i]) { + dtypes_changed = 1; + break; + } + } + if (!dtypes_changed) { + goto finish; } } - if (!dtypes_changed) { - goto finish; + else { + /* Reduction special path */ + new_op_dtypes[0] = NPY_DT_NewRef(op_dtypes[1]); + new_op_dtypes[1] = NPY_DT_NewRef(op_dtypes[1]); + Py_XINCREF(op_dtypes[2]); + new_op_dtypes[2] = op_dtypes[2]; } /* @@ -788,13 +801,13 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, /* * At this point `info` is NULL if there is no matching loop, or it is - * a promoter that needs to be used/called: + * a promoter that needs to be used/called. + * TODO: It may be nice to find a better reduce-solution, but this way + * it is a True fallback (not registered so lowest priority) */ - if (info != NULL) { - PyObject *promoter = PyTuple_GET_ITEM(info, 1); - + if (info != NULL || op_dtypes[0] == NULL) { info = call_promoter_and_recurse(ufunc, - promoter, op_dtypes, signature, ops); + info, op_dtypes, signature, ops); if (info == NULL && PyErr_Occurred()) { return NULL; } From a5e4adf8fe863a827ff9c6146bbd8251d4e96dfd Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 20 Mar 2024 15:21:12 +0100 Subject: [PATCH 074/980] TST: Test new reduce promoter rule for string addition --- numpy/_core/tests/test_stringdtype.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index f87dc317b1d1..66eae9096d7d 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -746,6 +746,15 @@ def test_add_promoter(string_list): assert_array_equal(arr + op, rresult) +def test_add_promoter_reduce(): + # Exact TypeError could change, but ensure StringDtype doesn't match + with pytest.raises(TypeError, match="the resolved dtypes are not"): + np.add.reduce(np.array(["a", "b"], dtype="U")) + + # On the other hand, using `dtype=T` in the *ufunc* should work. + np.add.reduce(np.array(["a", "b"], dtype="U"), dtype=np.dtypes.StringDType) + + @pytest.mark.parametrize("use_out", [True, False]) @pytest.mark.parametrize("other", [2, [2, 1, 3, 4, 1, 3]]) @pytest.mark.parametrize( From e4f2e41fefff6f3b7e1adf5ac13aec90a887fb04 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 20 Mar 2024 15:30:11 +0100 Subject: [PATCH 075/980] BUG: Fix small reduce bug and test string multiply-reduce --- numpy/_core/src/umath/reduction.c | 5 +++-- numpy/_core/tests/test_stringdtype.py | 10 ++++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/umath/reduction.c b/numpy/_core/src/umath/reduction.c index 5a938eaedb85..46466418e417 100644 --- a/numpy/_core/src/umath/reduction.c +++ b/numpy/_core/src/umath/reduction.c @@ -21,6 +21,7 @@ #include "array_coercion.h" #include "array_method.h" #include "ctors.h" +#include "refcount.h" #include "numpy/ufuncobject.h" #include "lowlevel_strided_loops.h" @@ -438,7 +439,7 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, Py_INCREF(result); if (initial_buf != NULL && PyDataType_REFCHK(PyArray_DESCR(result))) { - PyArray_Item_XDECREF(initial_buf, PyArray_DESCR(result)); + PyArray_ClearBuffer(PyArray_DESCR(result), initial_buf, 0, 1, 1); } PyMem_FREE(initial_buf); NPY_AUXDATA_FREE(auxdata); @@ -450,7 +451,7 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, fail: if (initial_buf != NULL && PyDataType_REFCHK(op_dtypes[0])) { - PyArray_Item_XDECREF(initial_buf, op_dtypes[0]); + PyArray_ClearBuffer(op_dtypes[0], initial_buf, 0, 1, 1); } PyMem_FREE(initial_buf); NPY_AUXDATA_FREE(auxdata); diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 66eae9096d7d..de41506f653a 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -755,6 +755,16 @@ def test_add_promoter_reduce(): np.add.reduce(np.array(["a", "b"], dtype="U"), dtype=np.dtypes.StringDType) +def test_multiply_reduce(): + # At the time of writing (NumPy 2.0) this is very limited (and rather + # ridiculous anyway). But it works and actually makes some sense... + # (NumPy does not allow non-scalar initial values) + repeats = np.array([2, 3, 4]) + val = "school-🚌" + res = np.multiply.reduce(repeats, initial=val, dtype=np.dtypes.StringDType) + assert res == val * np.prod(repeats) + + @pytest.mark.parametrize("use_out", [True, False]) @pytest.mark.parametrize("other", [2, [2, 1, 3, 4, 1, 3]]) @pytest.mark.parametrize( From f84ea1316259bb9a58912f8a06c74e1acde458ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Wed, 20 Mar 2024 14:17:10 +0100 Subject: [PATCH 076/980] TYP: Adjust `np.random.integers` and `np.random.randint` --- numpy/random/_generator.pyi | 100 +++++++ numpy/random/mtrand.pyi | 111 +++++++- numpy/typing/tests/data/reveal/random.pyi | 329 +++++++++++----------- 3 files changed, 374 insertions(+), 166 deletions(-) diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index 5dc2ebf6c1ef..e6a02b5ad147 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -210,6 +210,7 @@ class Generator: self, low: int, high: None | int = ..., + size: None = ..., ) -> int: ... @overload def integers( # type: ignore[misc] @@ -221,6 +222,15 @@ class Generator: endpoint: bool = ..., ) -> bool: ... @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: type[np.bool] = ..., + endpoint: bool = ..., + ) -> np.bool: ... + @overload def integers( # type: ignore[misc] self, low: int, @@ -230,6 +240,96 @@ class Generator: endpoint: bool = ..., ) -> int: ... @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., + endpoint: bool = ..., + ) -> uint8: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., + endpoint: bool = ..., + ) -> uint16: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., + endpoint: bool = ..., + ) -> uint32: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., + endpoint: bool = ..., + ) -> uint: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., + endpoint: bool = ..., + ) -> uint64: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., + endpoint: bool = ..., + ) -> int8: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., + endpoint: bool = ..., + ) -> int16: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., + endpoint: bool = ..., + ) -> int32: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., + endpoint: bool = ..., + ) -> int_: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., + endpoint: bool = ..., + ) -> int64: ... + @overload def integers( # type: ignore[misc] self, low: _ArrayLikeInt_co, diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index dcbc91292647..5d260b8e5624 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -11,12 +11,14 @@ from numpy import ( int16, int32, int64, + int_, long, - ulong, uint8, uint16, uint32, uint64, + uint, + ulong, ) from numpy.random.bit_generator import BitGenerator from numpy._typing import ( @@ -34,6 +36,7 @@ from numpy._typing import ( _Int16Codes, _Int32Codes, _Int64Codes, + _IntCodes, _LongCodes, _ShapeLike, _SingleCodes, @@ -42,6 +45,7 @@ from numpy._typing import ( _UInt16Codes, _UInt32Codes, _UInt64Codes, + _UIntCodes, _ULongCodes, ) @@ -114,6 +118,7 @@ class RandomState: self, low: int, high: None | int = ..., + size: None = ..., ) -> int: ... @overload def randint( # type: ignore[misc] @@ -124,6 +129,14 @@ class RandomState: dtype: type[bool] = ..., ) -> bool: ... @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: type[np.bool] = ..., + ) -> np.bool: ... + @overload def randint( # type: ignore[misc] self, low: int, @@ -132,6 +145,102 @@ class RandomState: dtype: type[int] = ..., ) -> int: ... @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., + ) -> uint8: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., + ) -> uint16: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., + ) -> uint32: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., + ) -> uint: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., + ) -> ulong: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., + ) -> uint64: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., + ) -> int8: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., + ) -> int16: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., + ) -> int32: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int_] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., + ) -> int_: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[long] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., + ) -> long: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., + ) -> int64: ... + @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, diff --git a/numpy/typing/tests/data/reveal/random.pyi b/numpy/typing/tests/data/reveal/random.pyi index 42a24936b903..3074033bfc65 100644 --- a/numpy/typing/tests/data/reveal/random.pyi +++ b/numpy/typing/tests/data/reveal/random.pyi @@ -530,12 +530,10 @@ assert_type(def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True), npt assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) assert_type(def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) -# TODO: Commented out tests are currently incorrectly typed as arrays rather -# than scalars. -#assert_type(def_gen.integers(2, dtype=np.bool), np.bool) -#assert_type(def_gen.integers(0, 2, dtype=np.bool), np.bool) -#assert_type(def_gen.integers(1, dtype=np.bool, endpoint=True), np.bool) -#assert_type(def_gen.integers(0, 1, dtype=np.bool, endpoint=True), np.bool) +assert_type(def_gen.integers(2, dtype=np.bool), np.bool) +assert_type(def_gen.integers(0, 2, dtype=np.bool), np.bool) +assert_type(def_gen.integers(1, dtype=np.bool, endpoint=True), np.bool) +assert_type(def_gen.integers(0, 1, dtype=np.bool, endpoint=True), np.bool) assert_type(def_gen.integers(I_bool_low_like, 1, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) assert_type(def_gen.integers(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) @@ -549,10 +547,10 @@ I_u1_low_like: list[int] = [0] I_u1_high_open: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) I_u1_high_closed: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) -# assert_type(def_gen.integers(256, dtype="u1"), np.uint8) -# assert_type(def_gen.integers(0, 256, dtype="u1"), np.uint8) -# assert_type(def_gen.integers(255, dtype="u1", endpoint=True), np.uint8) -# assert_type(def_gen.integers(0, 255, dtype="u1", endpoint=True), np.uint8) +assert_type(def_gen.integers(256, dtype="u1"), np.uint8) +assert_type(def_gen.integers(0, 256, dtype="u1"), np.uint8) +assert_type(def_gen.integers(255, dtype="u1", endpoint=True), np.uint8) +assert_type(def_gen.integers(0, 255, dtype="u1", endpoint=True), np.uint8) assert_type(def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) assert_type(def_gen.integers(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) @@ -561,10 +559,10 @@ assert_type(def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True), npt.N assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) assert_type(def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) -# assert_type(def_gen.integers(256, dtype="uint8"), np.uint8) -# assert_type(def_gen.integers(0, 256, dtype="uint8"), np.uint8) -# assert_type(def_gen.integers(255, dtype="uint8", endpoint=True), np.uint8) -# assert_type(def_gen.integers(0, 255, dtype="uint8", endpoint=True), np.uint8) +assert_type(def_gen.integers(256, dtype="uint8"), np.uint8) +assert_type(def_gen.integers(0, 256, dtype="uint8"), np.uint8) +assert_type(def_gen.integers(255, dtype="uint8", endpoint=True), np.uint8) +assert_type(def_gen.integers(0, 255, dtype="uint8", endpoint=True), np.uint8) assert_type(def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) assert_type(def_gen.integers(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) @@ -573,10 +571,10 @@ assert_type(def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True), np assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) assert_type(def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) -# assert_type(def_gen.integers(256, dtype=np.uint8), np.uint8) -# assert_type(def_gen.integers(0, 256, dtype=np.uint8), np.uint8) -# assert_type(def_gen.integers(255, dtype=np.uint8, endpoint=True), np.uint8) -# assert_type(def_gen.integers(0, 255, dtype=np.uint8, endpoint=True), np.uint8) +assert_type(def_gen.integers(256, dtype=np.uint8), np.uint8) +assert_type(def_gen.integers(0, 256, dtype=np.uint8), np.uint8) +assert_type(def_gen.integers(255, dtype=np.uint8, endpoint=True), np.uint8) +assert_type(def_gen.integers(0, 255, dtype=np.uint8, endpoint=True), np.uint8) assert_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) assert_type(def_gen.integers(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) @@ -590,10 +588,10 @@ I_u2_low_like: list[int] = [0] I_u2_high_open: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) I_u2_high_closed: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) -# assert_type(def_gen.integers(65536, dtype="u2"), np.uint16) -# assert_type(def_gen.integers(0, 65536, dtype="u2"), np.uint16) -# assert_type(def_gen.integers(65535, dtype="u2", endpoint=True), np.uint16) -# assert_type(def_gen.integers(0, 65535, dtype="u2", endpoint=True), np.uint16) +assert_type(def_gen.integers(65536, dtype="u2"), np.uint16) +assert_type(def_gen.integers(0, 65536, dtype="u2"), np.uint16) +assert_type(def_gen.integers(65535, dtype="u2", endpoint=True), np.uint16) +assert_type(def_gen.integers(0, 65535, dtype="u2", endpoint=True), np.uint16) assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) assert_type(def_gen.integers(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) @@ -602,10 +600,10 @@ assert_type(def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True), npt.N assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) assert_type(def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) -# assert_type(def_gen.integers(65536, dtype="uint16"), np.uint16) -# assert_type(def_gen.integers(0, 65536, dtype="uint16"), np.uint16) -# assert_type(def_gen.integers(65535, dtype="uint16", endpoint=True), np.uint16) -# assert_type(def_gen.integers(0, 65535, dtype="uint16", endpoint=True), np.uint16) +assert_type(def_gen.integers(65536, dtype="uint16"), np.uint16) +assert_type(def_gen.integers(0, 65536, dtype="uint16"), np.uint16) +assert_type(def_gen.integers(65535, dtype="uint16", endpoint=True), np.uint16) +assert_type(def_gen.integers(0, 65535, dtype="uint16", endpoint=True), np.uint16) assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) assert_type(def_gen.integers(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) @@ -614,10 +612,10 @@ assert_type(def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True), n assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) assert_type(def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) -# assert_type(def_gen.integers(65536, dtype=np.uint16), np.uint16) -# assert_type(def_gen.integers(0, 65536, dtype=np.uint16), np.uint16) -# assert_type(def_gen.integers(65535, dtype=np.uint16, endpoint=True), np.uint16) -# assert_type(def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True), np.uint16) +assert_type(def_gen.integers(65536, dtype=np.uint16), np.uint16) +assert_type(def_gen.integers(0, 65536, dtype=np.uint16), np.uint16) +assert_type(def_gen.integers(65535, dtype=np.uint16, endpoint=True), np.uint16) +assert_type(def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True), np.uint16) assert_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) assert_type(def_gen.integers(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) @@ -631,10 +629,10 @@ I_u4_low_like: list[int] = [0] I_u4_high_open: npt.NDArray[np.uint32] = np.array([4294967295], dtype=np.uint32) I_u4_high_closed: npt.NDArray[np.uint32] = np.array([4294967295], dtype=np.uint32) -# assert_type(def_gen.integers(4294967296, dtype=np.int_), np.int_) -# assert_type(def_gen.integers(0, 4294967296, dtype=np.int_), np.int_) -# assert_type(def_gen.integers(4294967295, dtype=np.int_, endpoint=True), np.int_) -# assert_type(def_gen.integers(0, 4294967295, dtype=np.int_, endpoint=True), np.int_) +assert_type(def_gen.integers(4294967296, dtype=np.int_), np.int_) +assert_type(def_gen.integers(0, 4294967296, dtype=np.int_), np.int_) +assert_type(def_gen.integers(4294967295, dtype=np.int_, endpoint=True), np.int_) +assert_type(def_gen.integers(0, 4294967295, dtype=np.int_, endpoint=True), np.int_) assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) assert_type(def_gen.integers(I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) @@ -644,10 +642,10 @@ assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) -# assert_type(def_gen.integers(4294967296, dtype="u4"), np.uint32) -# assert_type(def_gen.integers(0, 4294967296, dtype="u4"), np.uint32) -# assert_type(def_gen.integers(4294967295, dtype="u4", endpoint=True), np.uint32) -# assert_type(def_gen.integers(0, 4294967295, dtype="u4", endpoint=True), np.uint32) +assert_type(def_gen.integers(4294967296, dtype="u4"), np.uint32) +assert_type(def_gen.integers(0, 4294967296, dtype="u4"), np.uint32) +assert_type(def_gen.integers(4294967295, dtype="u4", endpoint=True), np.uint32) +assert_type(def_gen.integers(0, 4294967295, dtype="u4", endpoint=True), np.uint32) assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) assert_type(def_gen.integers(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) @@ -656,10 +654,10 @@ assert_type(def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True), npt.N assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) assert_type(def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) -# assert_type(def_gen.integers(4294967296, dtype="uint32"), np.uint32) -# assert_type(def_gen.integers(0, 4294967296, dtype="uint32"), np.uint32) -# assert_type(def_gen.integers(4294967295, dtype="uint32", endpoint=True), np.uint32) -# assert_type(def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True), np.uint32) +assert_type(def_gen.integers(4294967296, dtype="uint32"), np.uint32) +assert_type(def_gen.integers(0, 4294967296, dtype="uint32"), np.uint32) +assert_type(def_gen.integers(4294967295, dtype="uint32", endpoint=True), np.uint32) +assert_type(def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True), np.uint32) assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) assert_type(def_gen.integers(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) @@ -668,10 +666,10 @@ assert_type(def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True), n assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) assert_type(def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) -# assert_type(def_gen.integers(4294967296, dtype=np.uint32), np.uint32) -# assert_type(def_gen.integers(0, 4294967296, dtype=np.uint32), np.uint32) -# assert_type(def_gen.integers(4294967295, dtype=np.uint32, endpoint=True), np.uint32) -# assert_type(def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True), np.uint32) +assert_type(def_gen.integers(4294967296, dtype=np.uint32), np.uint32) +assert_type(def_gen.integers(0, 4294967296, dtype=np.uint32), np.uint32) +assert_type(def_gen.integers(4294967295, dtype=np.uint32, endpoint=True), np.uint32) +assert_type(def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True), np.uint32) assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) @@ -680,10 +678,10 @@ assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True), assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) -# assert_type(def_gen.integers(4294967296, dtype=np.uint), np.uint) -# assert_type(def_gen.integers(0, 4294967296, dtype=np.uint), np.uint) -# assert_type(def_gen.integers(4294967295, dtype=np.uint, endpoint=True), np.uint) -# assert_type(def_gen.integers(0, 4294967295, dtype=np.uint, endpoint=True), np.uint) +assert_type(def_gen.integers(4294967296, dtype=np.uint), np.uint) +assert_type(def_gen.integers(0, 4294967296, dtype=np.uint), np.uint) +assert_type(def_gen.integers(4294967295, dtype=np.uint, endpoint=True), np.uint) +assert_type(def_gen.integers(0, 4294967295, dtype=np.uint, endpoint=True), np.uint) assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) @@ -697,10 +695,10 @@ I_u8_low_like: list[int] = [0] I_u8_high_open: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) I_u8_high_closed: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) -# assert_type(def_gen.integers(18446744073709551616, dtype="u8"), np.uint64) -# assert_type(def_gen.integers(0, 18446744073709551616, dtype="u8"), np.uint64) -# assert_type(def_gen.integers(18446744073709551615, dtype="u8", endpoint=True), np.uint64) -# assert_type(def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True), np.uint64) +assert_type(def_gen.integers(18446744073709551616, dtype="u8"), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551616, dtype="u8"), np.uint64) +assert_type(def_gen.integers(18446744073709551615, dtype="u8", endpoint=True), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True), np.uint64) assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) assert_type(def_gen.integers(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) @@ -709,10 +707,10 @@ assert_type(def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True), npt.N assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) assert_type(def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) -# assert_type(def_gen.integers(18446744073709551616, dtype="uint64"), np.uint64) -# assert_type(def_gen.integers(0, 18446744073709551616, dtype="uint64"), np.uint64) -# assert_type(def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True), np.uint64) -# assert_type(def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True), np.uint64) +assert_type(def_gen.integers(18446744073709551616, dtype="uint64"), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551616, dtype="uint64"), np.uint64) +assert_type(def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True), np.uint64) assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) assert_type(def_gen.integers(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) @@ -721,10 +719,10 @@ assert_type(def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True), n assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) assert_type(def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) -# assert_type(def_gen.integers(18446744073709551616, dtype=np.uint64), np.uint64) -# assert_type(def_gen.integers(0, 18446744073709551616, dtype=np.uint64), np.uint64) -# assert_type(def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) -# assert_type(def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) +assert_type(def_gen.integers(18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) assert_type(def_gen.integers(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) @@ -738,10 +736,10 @@ I_i1_low_like: list[int] = [-128] I_i1_high_open: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) I_i1_high_closed: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) -# assert_type(def_gen.integers(128, dtype="i1"), np.int8) -# assert_type(def_gen.integers(-128, 128, dtype="i1"), np.int8) -# assert_type(def_gen.integers(127, dtype="i1", endpoint=True), np.int8) -# assert_type(def_gen.integers(-128, 127, dtype="i1", endpoint=True), np.int8) +assert_type(def_gen.integers(128, dtype="i1"), np.int8) +assert_type(def_gen.integers(-128, 128, dtype="i1"), np.int8) +assert_type(def_gen.integers(127, dtype="i1", endpoint=True), np.int8) +assert_type(def_gen.integers(-128, 127, dtype="i1", endpoint=True), np.int8) assert_type(def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True), npt.NDArray[np.int8]) assert_type(def_gen.integers(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) @@ -750,10 +748,10 @@ assert_type(def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True), npt.N assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) -# assert_type(def_gen.integers(128, dtype="int8"), np.int8) -# assert_type(def_gen.integers(-128, 128, dtype="int8"), np.int8) -# assert_type(def_gen.integers(127, dtype="int8", endpoint=True), np.int8) -# assert_type(def_gen.integers(-128, 127, dtype="int8", endpoint=True), np.int8) +assert_type(def_gen.integers(128, dtype="int8"), np.int8) +assert_type(def_gen.integers(-128, 128, dtype="int8"), np.int8) +assert_type(def_gen.integers(127, dtype="int8", endpoint=True), np.int8) +assert_type(def_gen.integers(-128, 127, dtype="int8", endpoint=True), np.int8) assert_type(def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True), npt.NDArray[np.int8]) assert_type(def_gen.integers(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) @@ -762,10 +760,10 @@ assert_type(def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True), npt assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) -# assert_type(def_gen.integers(128, dtype=np.int8), np.int8) -# assert_type(def_gen.integers(-128, 128, dtype=np.int8), np.int8) -# assert_type(def_gen.integers(127, dtype=np.int8, endpoint=True), np.int8) -# assert_type(def_gen.integers(-128, 127, dtype=np.int8, endpoint=True), np.int8) +assert_type(def_gen.integers(128, dtype=np.int8), np.int8) +assert_type(def_gen.integers(-128, 128, dtype=np.int8), np.int8) +assert_type(def_gen.integers(127, dtype=np.int8, endpoint=True), np.int8) +assert_type(def_gen.integers(-128, 127, dtype=np.int8, endpoint=True), np.int8) assert_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) assert_type(def_gen.integers(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) @@ -779,10 +777,10 @@ I_i2_low_like: list[int] = [-32768] I_i2_high_open: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) I_i2_high_closed: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) -# assert_type(def_gen.integers(32768, dtype="i2"), np.int16) -# assert_type(def_gen.integers(-32768, 32768, dtype="i2"), np.int16) -# assert_type(def_gen.integers(32767, dtype="i2", endpoint=True), np.int16) -# assert_type(def_gen.integers(-32768, 32767, dtype="i2", endpoint=True), np.int16) +assert_type(def_gen.integers(32768, dtype="i2"), np.int16) +assert_type(def_gen.integers(-32768, 32768, dtype="i2"), np.int16) +assert_type(def_gen.integers(32767, dtype="i2", endpoint=True), np.int16) +assert_type(def_gen.integers(-32768, 32767, dtype="i2", endpoint=True), np.int16) assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True), npt.NDArray[np.int16]) assert_type(def_gen.integers(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) @@ -791,10 +789,10 @@ assert_type(def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True), npt.N assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) -# assert_type(def_gen.integers(32768, dtype="int16"), np.int16) -# assert_type(def_gen.integers(-32768, 32768, dtype="int16"), np.int16) -# assert_type(def_gen.integers(32767, dtype="int16", endpoint=True), np.int16) -# assert_type(def_gen.integers(-32768, 32767, dtype="int16", endpoint=True), np.int16) +assert_type(def_gen.integers(32768, dtype="int16"), np.int16) +assert_type(def_gen.integers(-32768, 32768, dtype="int16"), np.int16) +assert_type(def_gen.integers(32767, dtype="int16", endpoint=True), np.int16) +assert_type(def_gen.integers(-32768, 32767, dtype="int16", endpoint=True), np.int16) assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True), npt.NDArray[np.int16]) assert_type(def_gen.integers(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) @@ -803,10 +801,10 @@ assert_type(def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True), np assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) -# assert_type(def_gen.integers(32768, dtype=np.int16), np.int16) -# assert_type(def_gen.integers(-32768, 32768, dtype=np.int16), np.int16) -# assert_type(def_gen.integers(32767, dtype=np.int16, endpoint=True), np.int16) -# assert_type(def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True), np.int16) +assert_type(def_gen.integers(32768, dtype=np.int16), np.int16) +assert_type(def_gen.integers(-32768, 32768, dtype=np.int16), np.int16) +assert_type(def_gen.integers(32767, dtype=np.int16, endpoint=True), np.int16) +assert_type(def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True), np.int16) assert_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) assert_type(def_gen.integers(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) @@ -820,10 +818,10 @@ I_i4_low_like: list[int] = [-2147483648] I_i4_high_open: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) I_i4_high_closed: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) -# assert_type(def_gen.integers(2147483648, dtype="i4"), np.int32) -# assert_type(def_gen.integers(-2147483648, 2147483648, dtype="i4"), np.int32) -# assert_type(def_gen.integers(2147483647, dtype="i4", endpoint=True), np.int32) -# assert_type(def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True), np.int32) +assert_type(def_gen.integers(2147483648, dtype="i4"), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483648, dtype="i4"), np.int32) +assert_type(def_gen.integers(2147483647, dtype="i4", endpoint=True), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True), np.int32) assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True), npt.NDArray[np.int32]) assert_type(def_gen.integers(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) @@ -832,10 +830,10 @@ assert_type(def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True), npt.N assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) -# assert_type(def_gen.integers(2147483648, dtype="int32"), np.int32) -# assert_type(def_gen.integers(-2147483648, 2147483648, dtype="int32"), np.int32) -# assert_type(def_gen.integers(2147483647, dtype="int32", endpoint=True), np.int32) -# assert_type(def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True), np.int32) +assert_type(def_gen.integers(2147483648, dtype="int32"), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483648, dtype="int32"), np.int32) +assert_type(def_gen.integers(2147483647, dtype="int32", endpoint=True), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True), np.int32) assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True), npt.NDArray[np.int32]) assert_type(def_gen.integers(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) @@ -844,10 +842,10 @@ assert_type(def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True), np assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) -# assert_type(def_gen.integers(2147483648, dtype=np.int32), np.int32) -# assert_type(def_gen.integers(-2147483648, 2147483648, dtype=np.int32), np.int32) -# assert_type(def_gen.integers(2147483647, dtype=np.int32, endpoint=True), np.int32) -# assert_type(def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True), np.int32) +assert_type(def_gen.integers(2147483648, dtype=np.int32), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483648, dtype=np.int32), np.int32) +assert_type(def_gen.integers(2147483647, dtype=np.int32, endpoint=True), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True), np.int32) assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) assert_type(def_gen.integers(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) @@ -861,10 +859,10 @@ I_i8_low_like: list[int] = [-9223372036854775808] I_i8_high_open: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) I_i8_high_closed: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) -# assert_type(def_gen.integers(9223372036854775808, dtype="i8"), np.int64) -# assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) -# assert_type(def_gen.integers(9223372036854775807, dtype="i8", endpoint=True), np.int64) -# assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True), np.int64) +assert_type(def_gen.integers(9223372036854775808, dtype="i8"), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) +assert_type(def_gen.integers(9223372036854775807, dtype="i8", endpoint=True), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True), np.int64) assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True), npt.NDArray[np.int64]) assert_type(def_gen.integers(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) @@ -873,10 +871,10 @@ assert_type(def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True), npt.N assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) -# assert_type(def_gen.integers(9223372036854775808, dtype="int64"), np.int64) -# assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) -# assert_type(def_gen.integers(9223372036854775807, dtype="int64", endpoint=True), np.int64) -# assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True), np.int64) +assert_type(def_gen.integers(9223372036854775808, dtype="int64"), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) +assert_type(def_gen.integers(9223372036854775807, dtype="int64", endpoint=True), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True), np.int64) assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True), npt.NDArray[np.int64]) assert_type(def_gen.integers(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) @@ -885,10 +883,10 @@ assert_type(def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True), np assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) -# assert_type(def_gen.integers(9223372036854775808, dtype=np.int64), np.int64) -# assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) -# assert_type(def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True), np.int64) -# assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True), np.int64) +assert_type(def_gen.integers(9223372036854775808, dtype=np.int64), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) +assert_type(def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True), np.int64) assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) assert_type(def_gen.integers(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) @@ -1324,163 +1322,164 @@ assert_type(random_st.randint(I_bool_high_open, dtype=bool), npt.NDArray[np.bool assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) assert_type(random_st.randint(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -# TODO: Commented out type incorrectly indicates an array return: -# assert_type(random_st.randint(2, dtype=np.bool), np.bool) -# assert_type(random_st.randint(0, 2, dtype=np.bool), np.bool) +assert_type(random_st.randint(2, dtype=np.bool), np.bool) +assert_type(random_st.randint(0, 2, dtype=np.bool), np.bool) assert_type(random_st.randint(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) assert_type(random_st.randint(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -# assert_type(random_st.randint(256, dtype="u1"), np.uint16) -# assert_type(random_st.randint(0, 256, dtype="u1"), np.uint16) +assert_type(random_st.randint(256, dtype="u1"), np.uint8) +assert_type(random_st.randint(0, 256, dtype="u1"), np.uint8) assert_type(random_st.randint(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) assert_type(random_st.randint(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -# assert_type(random_st.randint(256, dtype="uint8"), np.uint16) -# assert_type(random_st.randint(0, 256, dtype="uint8"), np.uint16) +assert_type(random_st.randint(256, dtype="uint8"), np.uint8) +assert_type(random_st.randint(0, 256, dtype="uint8"), np.uint8) assert_type(random_st.randint(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) assert_type(random_st.randint(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -# assert_type(random_st.randint(256, dtype=np.uint8), np.uint16) -# assert_type(random_st.randint(0, 256, dtype=np.uint8), np.uint16) +assert_type(random_st.randint(256, dtype=np.uint8), np.uint8) +assert_type(random_st.randint(0, 256, dtype=np.uint8), np.uint8) assert_type(random_st.randint(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) assert_type(random_st.randint(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -# assert_type(random_st.randint(65536, dtype="u2"), np.uint16) -# assert_type(random_st.randint(0, 65536, dtype="u2"), np.uint16) +assert_type(random_st.randint(65536, dtype="u2"), np.uint16) +assert_type(random_st.randint(0, 65536, dtype="u2"), np.uint16) assert_type(random_st.randint(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) assert_type(random_st.randint(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -# assert_type(random_st.randint(65536, dtype="uint16"), np.uint16) -# assert_type(random_st.randint(0, 65536, dtype="uint16"), np.uint16) +assert_type(random_st.randint(65536, dtype="uint16"), np.uint16) +assert_type(random_st.randint(0, 65536, dtype="uint16"), np.uint16) assert_type(random_st.randint(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) assert_type(random_st.randint(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -# assert_type(random_st.randint(65536, dtype=np.uint16), np.uint16) -# assert_type(random_st.randint(0, 65536, dtype=np.uint16), np.uint16) +assert_type(random_st.randint(65536, dtype=np.uint16), np.uint16) +assert_type(random_st.randint(0, 65536, dtype=np.uint16), np.uint16) assert_type(random_st.randint(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) assert_type(random_st.randint(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -# assert_type(random_st.randint(4294967296, dtype="u4"), np.uint32) -# assert_type(random_st.randint(0, 4294967296, dtype="u4"), np.uint32) +assert_type(random_st.randint(4294967296, dtype="u4"), np.uint32) +assert_type(random_st.randint(0, 4294967296, dtype="u4"), np.uint32) assert_type(random_st.randint(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) assert_type(random_st.randint(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -# assert_type(random_st.randint(4294967296, dtype="uint32"), np.uint32) -# assert_type(random_st.randint(0, 4294967296, dtype="uint32"), np.uint32) +assert_type(random_st.randint(4294967296, dtype="uint32"), np.uint32) +assert_type(random_st.randint(0, 4294967296, dtype="uint32"), np.uint32) assert_type(random_st.randint(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) assert_type(random_st.randint(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -# assert_type(random_st.randint(4294967296, dtype=np.uint32), np.uint32) -# assert_type(random_st.randint(0, 4294967296, dtype=np.uint32), np.uint32) +assert_type(random_st.randint(4294967296, dtype=np.uint32), np.uint32) +assert_type(random_st.randint(0, 4294967296, dtype=np.uint32), np.uint32) assert_type(random_st.randint(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -# assert_type(random_st.randint(4294967296, dtype=np.uint), np.uint) -# assert_type(random_st.randint(0, 4294967296, dtype=np.uint), np.uint) +assert_type(random_st.randint(4294967296, dtype=np.uint), np.uint) +assert_type(random_st.randint(0, 4294967296, dtype=np.uint), np.uint) assert_type(random_st.randint(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -# assert_type(random_st.randint(18446744073709551616, dtype="u8"), np.uint64) -# assert_type(random_st.randint(0, 18446744073709551616, dtype="u8"), np.uint64) +assert_type(random_st.randint(18446744073709551616, dtype="u8"), np.uint64) +assert_type(random_st.randint(0, 18446744073709551616, dtype="u8"), np.uint64) assert_type(random_st.randint(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) assert_type(random_st.randint(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -# assert_type(random_st.randint(18446744073709551616, dtype="uint64"), np.uint64) -# assert_type(random_st.randint(0, 18446744073709551616, dtype="uint64"), np.uint64) +assert_type(random_st.randint(18446744073709551616, dtype="uint64"), np.uint64) +assert_type(random_st.randint(0, 18446744073709551616, dtype="uint64"), np.uint64) assert_type(random_st.randint(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) assert_type(random_st.randint(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -# assert_type(random_st.randint(18446744073709551616, dtype=np.uint64), np.uint64) -# assert_type(random_st.randint(0, 18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(random_st.randint(18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(random_st.randint(0, 18446744073709551616, dtype=np.uint64), np.uint64) assert_type(random_st.randint(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) assert_type(random_st.randint(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -# assert_type(random_st.randint(128, dtype="i1"), np.int8) -# assert_type(random_st.randint(-128, 128, dtype="i1"), np.int8) +assert_type(random_st.randint(128, dtype="i1"), np.int8) +assert_type(random_st.randint(-128, 128, dtype="i1"), np.int8) assert_type(random_st.randint(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) assert_type(random_st.randint(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -# assert_type(random_st.randint(128, dtype="int8"), np.int8) -# assert_type(random_st.randint(-128, 128, dtype="int8"), np.int8) +assert_type(random_st.randint(128, dtype="int8"), np.int8) +assert_type(random_st.randint(-128, 128, dtype="int8"), np.int8) assert_type(random_st.randint(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) assert_type(random_st.randint(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -# assert_type(random_st.randint(128, dtype=np.int8), np.int8) -# assert_type(random_st.randint(-128, 128, dtype=np.int8), np.int8) +assert_type(random_st.randint(128, dtype=np.int8), np.int8) +assert_type(random_st.randint(-128, 128, dtype=np.int8), np.int8) assert_type(random_st.randint(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) assert_type(random_st.randint(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -# assert_type(random_st.randint(32768, dtype="i2"), np.int16) -# assert_type(random_st.randint(-32768, 32768, dtype="i2"), np.int16) +assert_type(random_st.randint(32768, dtype="i2"), np.int16) +assert_type(random_st.randint(-32768, 32768, dtype="i2"), np.int16) assert_type(random_st.randint(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) assert_type(random_st.randint(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -# assert_type(random_st.randint(32768, dtype="int16"), np.int16) -# assert_type(random_st.randint(-32768, 32768, dtype="int16"), np.int16) + +assert_type(random_st.randint(32768, dtype="int16"), np.int16) +assert_type(random_st.randint(-32768, 32768, dtype="int16"), np.int16) assert_type(random_st.randint(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) assert_type(random_st.randint(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -# assert_type(random_st.randint(32768, dtype=np.int16), np.int16) -# assert_type(random_st.randint(-32768, 32768, dtype=np.int16), np.int16) + +assert_type(random_st.randint(32768, dtype=np.int16), np.int16) +assert_type(random_st.randint(-32768, 32768, dtype=np.int16), np.int16) assert_type(random_st.randint(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) assert_type(random_st.randint(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -# assert_type(random_st.randint(2147483648, dtype="i4"), np.int32) -# assert_type(random_st.randint(-2147483648, 2147483648, dtype="i4"), np.int32) +assert_type(random_st.randint(2147483648, dtype="i4"), np.int32) +assert_type(random_st.randint(-2147483648, 2147483648, dtype="i4"), np.int32) assert_type(random_st.randint(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -# assert_type(random_st.randint(2147483648, dtype="int32"), np.int32) -# assert_type(random_st.randint(-2147483648, 2147483648, dtype="int32"), np.int32) +assert_type(random_st.randint(2147483648, dtype="int32"), np.int32) +assert_type(random_st.randint(-2147483648, 2147483648, dtype="int32"), np.int32) assert_type(random_st.randint(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -# assert_type(random_st.randint(2147483648, dtype=np.int32), np.int32) -# assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int32), np.int32) +assert_type(random_st.randint(2147483648, dtype=np.int32), np.int32) +assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int32), np.int32) assert_type(random_st.randint(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -# assert_type(random_st.randint(2147483648, dtype=np.int_), np.int_) -# assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int_), np.int_) +assert_type(random_st.randint(2147483648, dtype=np.int_), np.int_) +assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int_), np.int_) assert_type(random_st.randint(I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -# assert_type(random_st.randint(9223372036854775808, dtype="i8"), np.int64) -# assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) +assert_type(random_st.randint(9223372036854775808, dtype="i8"), np.int64) +assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) assert_type(random_st.randint(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -# assert_type(random_st.randint(9223372036854775808, dtype="int64"), np.int64) -# assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) +assert_type(random_st.randint(9223372036854775808, dtype="int64"), np.int64) +assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) assert_type(random_st.randint(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -# assert_type(random_st.randint(9223372036854775808, dtype=np.int64), np.int64) -# assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) +assert_type(random_st.randint(9223372036854775808, dtype=np.int64), np.int64) +assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) assert_type(random_st.randint(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) From e42437cb0562e8c85816f8a5217293b35971f8d3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Mar 2024 17:41:23 +0000 Subject: [PATCH 077/980] MAINT: Bump actions/dependency-review-action from 4.1.3 to 4.2.3 Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.1.3 to 4.2.3. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/9129d7d40b8c12c1ed0f60400d00c92d437adcce...0fa40c3c10055986a88de3baa0d6ec17c5a894b3) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/dependency-review.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index d18b1a0b18ef..f55dacad957d 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -17,4 +17,4 @@ jobs: - name: 'Checkout Repository' uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: 'Dependency Review' - uses: actions/dependency-review-action@9129d7d40b8c12c1ed0f60400d00c92d437adcce # v4.1.3 + uses: actions/dependency-review-action@0fa40c3c10055986a88de3baa0d6ec17c5a894b3 # v4.2.3 From 6bac54efcb5a4b6b099607f6b4372b5f20e3e715 Mon Sep 17 00:00:00 2001 From: Yevhen Amelin Date: Thu, 21 Mar 2024 18:01:25 +0200 Subject: [PATCH 078/980] DOC: fix type in doc/source/user/absolute_beginners.rst (#26099) --- doc/source/user/absolute_beginners.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index 93b009628571..f1804b20bea3 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -97,7 +97,7 @@ array". Most NumPy arrays have some restrictions. For instance: - All elements of the array must be of the same type of data. -- Once created, the total size of the the array can't change. +- Once created, the total size of the array can't change. - The shape must be "rectangular", not "jagged"; e.g., each row of a two-dimensional array must have the same number of columns. From 83867fb04279cb81e8bf2fe23c2db597231cf70b Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 21 Mar 2024 20:55:54 +0100 Subject: [PATCH 079/980] API: Default to hidden visibility for API tables This adds a new ``NPY_API_SYMBOL_ATTRIBUTE`` but defaults to using hidden visibility (always the case on windows!). That actually makes the situation "worse" for ``eigenpy`` in some degree, since it forces them to adapt, but it also allows them to decide to just roll with it (by actually also exporting it on windows which). Since it aligns windows and linux it seems like a good idea? OTOH, who knows how many projects get away with just not caring about windows... I tried to reorganize the docs a bit on how to import the array API... --- doc/source/numpy_2_0_migration_guide.rst | 15 ++ doc/source/reference/c-api/array.rst | 189 +++++++++++++----- doc/source/release/2.0.0-notes.rst | 11 + .../code_generators/generate_numpy_api.py | 13 +- .../code_generators/generate_ufunc_api.py | 9 +- 5 files changed, 181 insertions(+), 56 deletions(-) diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index 4a569d612bf4..73a8497dffb0 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -221,6 +221,21 @@ using the NumPy types. You can still write cython code using the ``c.real`` and ``c.imag`` attributes (using the native typedefs), but you can no longer use in-place operators ``c.imag += 1`` in Cython's c++ mode. +.. _api-table-visibility-change: + +Changes in table visibility (linking error) +------------------------------------------- +If you are experiencing linking errors related to ``PyArray_API`` or +``PyArray_RUNTIME_VERSION``, NumPy changed the default visibility to hidden +(which was always the case on windows). +You can use the :c:macro:`NPY_API_SYMBOL_ATTRIBUTE` to opt-out of this change. +However, we generally discourage linking across project boundaries because +it breaks NumPy compatibility checks. + +If you are experiencing problems due to an upstream header including NumPy, +the solution is to make sure you ``#include "numpy/ndarrayobject.h"`` before +their header and import NumPy yourself based on :ref:`_including-the-c-api`. + Changes to namespaces ===================== diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 447871e644cf..3ab7a547fe68 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -3814,13 +3814,118 @@ Other conversions in the *vals* array. The sequence can be smaller then *maxvals* as the number of converted objects is returned. +.. _including-the-c-api: -Miscellaneous -------------- +Including and importing the C API +--------------------------------- +To use the NumPy C-API you typically need to include the +``numpy/ndarrayobject.h`` header and ``numpy/ufuncobject.h`` for some ufunc +related functionality (``arrayobject.h`` is an alias for ``ndarrayobject.h``). -Importing the API -~~~~~~~~~~~~~~~~~ +These two headers export most relevant functionality. In general any project +which uses the NumPy API must import NumPy using one of the functions +``PyArray_ImportNumPyAPI()`` or ``import_array()``. +In some places, functionality which requires ``import_array()`` is not +needed, because you only need type definitions. In this case, it is +sufficient to include ``numpy/ndarratypes.h``. + +For the typical Python project, multiple C or C++ files will be compiled into +a single shared object (the Python C-module) and ``PyArray_ImportNumPyAPI()`` +should be called inside it's module initialization. + +When you have a single C-file, this will consist of:: + +.. code-block:: c + + #include "numpy/ndarrayobject.h" + + PyMODINIT_FUNC PyInit_my_module(void) + { + if (PyArray_ImportNumPyAPI() < 0) { + return NULL; + } + /* Other initialization code. */ + } + +However, most projects will have additional C files which are all +linked together into a single Python module. +In this case, the helper C files typically do not have a canonical place +where ``PyArray_ImportNumPyAPI`` should be called (although it is OK and +fast to call it often). + +To solve this, NumPy provides the following pattern that the the main +file is modified to define ``PY_ARRAY_UNIQUE_SYMBOL`` before the include: + +.. code-block:: c + /* Main module file */ + #define PY_ARRAY_UNIQUE_SYMBOL MyModule + #include "numpy/ndarrayobject.h" + + PyMODINIT_FUNC PyInit_my_module(void) + { + if (PyArray_ImportNumPyAPI() < 0) { + return NULL; + } + /* Other initialization code. */ + } + +while the other files use: + +.. code-block:: C + + /* Second file without any import */ + #define NO_IMPORT_ARRAY + #define PY_ARRAY_UNIQUE_SYMBOL MyModule + #include "numpy/ndarrayobject.h" + +You can of course add the defines to a local header used throughout. +You just have to make sure that the main file does _not_ define +``NO_IMPORT_ARRAY``. + +For ``numpy/ufuncobject.h`` the same logic applies, but the unique symbol +mechanism is ``#define PY_UFUNC_UNIQUE_SYMBOL`` (both can match). + +Additionally, you will probably wish to add a +``#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION`` +to avoid warnings about possible use of old API. + +.. note:: + If you are experiencing access violations make sure that the NumPy API + was properly imported and the symbol ``PyArray_API`` is not ``NULL``. + When in a debugger, this symbols actual name will be + ``PY_ARRAY_UNIQUE_SYMBOL``+``PyArray_API``, so for example + ``MyModulePyArray_API`` in the above. + (E.g. even a ``printf("%p\n", PyArray_API);`` just before the crash.) + + +Mechanism details and dynamic linking +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The main part of the mechanism is that without NumPy needs to define +a ``void **PyArray_API`` table for you to look up all functions. +Depending on your macro setup, this takes different routes depending on +whether :c:macro:`NO_IMPORT_ARRAY` and :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` +are defined: + +* If neither is defined, the C-API is declared to + ``static void **PyArray_API``, so it is only visible within the + compilation unit/file using ``#includes numpy/arrayobject.h``. +* If only ``PY_ARRAY_UNIQUE_SYMBOL`` is defined (it could be empty) then + the it is declared to a non-static ``void **`` allowing it to be used + by other files which are linked. +* If ``NO_IMPORT_ARRAY`` is defined, the table is declared as + ``extern void **``, meaning that it must be linked to a file which does not + use ``NO_IMPORT_ARRAY`. + +The ``PY_ARRAY_UNIQUE_SYMBOL`` mechanism additionally mangles the names to +avoid conflicts. + + +.. versionchanged:: + NumPy 2.0 exports the headers to avoid sharing the table outside of a + single shared object/dll (this was always the case on Windows). + Please see :c:macro:`NPY_API_SYMBOL_ATTRIBUTE` for details. In order to make use of the C-API from another extension module, the :c:func:`import_array` function must be called. If the extension module is @@ -3844,61 +3949,45 @@ the C-API is needed then some additional steps must be taken. module that will make use of the C-API. It imports the module where the function-pointer table is stored and points the correct variable to it. + This macro includes a ``return NULL;`` on error, so that + ``PyArray_ImportNumPyAPI()`` is preferable for custom error checking. + You may also see use of ``_import_array()`` (a function, not + a macro, but you may want to raise a better error if it fails) and + the variations ``import_array1(ret)`` which customizes the return value. .. c:macro:: PY_ARRAY_UNIQUE_SYMBOL -.. c:macro:: NO_IMPORT_ARRAY +.. c:macro:: NPY_API_SYMBOL_ATTRIBUTE - Using these #defines you can use the C-API in multiple files for a - single extension module. In each file you must define - :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` to some name that will hold the - C-API (*e.g.* myextension_ARRAY_API). This must be done **before** - including the numpy/arrayobject.h file. In the module - initialization routine you call :c:func:`import_array`. In addition, - in the files that do not have the module initialization - sub_routine define :c:macro:`NO_IMPORT_ARRAY` prior to including - numpy/arrayobject.h. - - Suppose I have two files coolmodule.c and coolhelper.c which need - to be compiled and linked into a single extension module. Suppose - coolmodule.c contains the required initcool module initialization - function (with the import_array() function called). Then, - coolmodule.c would have at the top: + .. versionadded:: 2.0 - .. code-block:: c + An additional symbol which can be used to share e.g. visibility beyond + shared object boundaries. + By default, NumPy adds the C visibility hidden attribute (if available): + ``void __attribute__((visibility("hidden"))) **PyArray_API;``. + You can change this by defining ``NPY_API_SYMBOL_ATTRIBUTE``, which will + make this: + ``void NPY_API_SYMBOL_ATTRIBUTE **PyArray_API;`` (with additional + name mangling via the unique symbol). - #define PY_ARRAY_UNIQUE_SYMBOL cool_ARRAY_API - #include numpy/arrayobject.h + Adding an empty ``#define NPY_API_SYMBOL_ATTRIBUTE`` will have the same + behavior as NumPy 1.x. - On the other hand, coolhelper.c would contain at the top: + .. note:: + Windows never had shared visbility although you can use this macro + to achieve it. We generally discourage sharing beyond shared boundary + lines since importing the array API includes NumPy version checks. + +.. c:macro:: NO_IMPORT_ARRAY + + Defining ``NO_IMPORT_ARRAY`` before the ``ndarrayobject.h`` include + indicates that the NumPy C API import is handled in a different file + and the include mechanism will not be added here. + You must have one file without ``NO_IMPORT_ARRAY`` defined. - .. code-block:: c - #define NO_IMPORT_ARRAY - #define PY_ARRAY_UNIQUE_SYMBOL cool_ARRAY_API - #include numpy/arrayobject.h - - You can also put the common two last lines into an extension-local - header file as long as you make sure that NO_IMPORT_ARRAY is - #defined before #including that file. - - Internally, these #defines work as follows: - - * If neither is defined, the C-API is declared to be - ``static void**``, so it is only visible within the - compilation unit that #includes numpy/arrayobject.h. - * If :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` is #defined, but - :c:macro:`NO_IMPORT_ARRAY` is not, the C-API is declared to - be ``void**``, so that it will also be visible to other - compilation units. - * If :c:macro:`NO_IMPORT_ARRAY` is #defined, regardless of - whether :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` is, the C-API is - declared to be ``extern void**``, so it is expected to - be defined in another compilation unit. - * Whenever :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` is #defined, it - also changes the name of the variable holding the C-API, which - defaults to ``PyArray_API``, to whatever the macro is - #defined to. +Miscellaneous +------------- Checking the API Version ~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/release/2.0.0-notes.rst b/doc/source/release/2.0.0-notes.rst index 216c38bb1538..1334b58ea742 100644 --- a/doc/source/release/2.0.0-notes.rst +++ b/doc/source/release/2.0.0-notes.rst @@ -655,6 +655,17 @@ Please see :ref:`migration_c_descr` for more information. (`gh-25943 `__) +API symbols now hidden but customizable +--------------------------------------- +NumPy now defaults to hide the API symbols it adds to allow all NumPy API +usage. +This means that by default you cannot dynamically fetch the NumPy API from +another library (this was never possible on windows). +Please see :ref:`api-table-visibility-change` and the new +:c:macro:`NPY_API_SYMBOL_ATTRIBUTE` which allows to customize/revert this. + +(`gh-26103 `__) + NumPy 2.0 C API removals ======================== diff --git a/numpy/_core/code_generators/generate_numpy_api.py b/numpy/_core/code_generators/generate_numpy_api.py index d69725e581aa..4d63a5fa6941 100644 --- a/numpy/_core/code_generators/generate_numpy_api.py +++ b/numpy/_core/code_generators/generate_numpy_api.py @@ -33,13 +33,18 @@ _NPY_VERSION_CONCAT_HELPER(PY_ARRAY_UNIQUE_SYMBOL) #endif +/* By default do not export API in an .so (was never the case on windows) */ +#ifndef NPY_API_SYMBOL_ATTRIBUTE + #define NPY_API_SYMBOL_ATTRIBUTE NPY_VISIBILITY_HIDDEN +#endif + #if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) -extern void **PyArray_API; -extern int PyArray_RUNTIME_VERSION; +extern NPY_API_SYMBOL_ATTRIBUTE void **PyArray_API; +extern NPY_API_SYMBOL_ATTRIBUTE int PyArray_RUNTIME_VERSION; #else #if defined(PY_ARRAY_UNIQUE_SYMBOL) -void **PyArray_API; -int PyArray_RUNTIME_VERSION; +NPY_API_SYMBOL_ATTRIBUTE void **PyArray_API; +NPY_API_SYMBOL_ATTRIBUTE int PyArray_RUNTIME_VERSION; #else static void **PyArray_API = NULL; static int PyArray_RUNTIME_VERSION = 0; diff --git a/numpy/_core/code_generators/generate_ufunc_api.py b/numpy/_core/code_generators/generate_ufunc_api.py index 2acced5d5619..4bdbbdb9abac 100644 --- a/numpy/_core/code_generators/generate_ufunc_api.py +++ b/numpy/_core/code_generators/generate_ufunc_api.py @@ -18,11 +18,16 @@ #define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL #endif +/* By default do not export API in an .so (was never the case on windows) */ +#ifndef NPY_API_SYMBOL_ATTRIBUTE + #define NPY_API_SYMBOL_ATTRIBUTE NPY_VISIBILITY_HIDDEN +#endif + #if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC) -extern void **PyUFunc_API; +extern NPY_API_SYMBOL_ATTRIBUTE void **PyUFunc_API; #else #if defined(PY_UFUNC_UNIQUE_SYMBOL) -void **PyUFunc_API; +NPY_API_SYMBOL_ATTRIBUTE void **PyUFunc_API; #else static void **PyUFunc_API=NULL; #endif From 3ac9316859b5e9d9b5ae97f06c18f6e626b3ffca Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 21 Mar 2024 14:35:07 -0600 Subject: [PATCH 080/980] MNT: install all-string promoter for multiply --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 12 ++++++++++++ numpy/_core/tests/test_stringdtype.py | 6 ++++++ 2 files changed, 18 insertions(+) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index d78e756d62b9..467770eca8fd 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -2339,6 +2339,18 @@ init_stringdtype_ufuncs(PyObject *umath) INIT_MULTIPLY(Int64, int64); INIT_MULTIPLY(UInt64, uint64); + // This is needed so the generic promoters defined after this don't match + // for np.multiply(string_array, string_array) + + PyArray_DTypeMeta *hdtypes[] = { + &PyArray_StringDType, + &PyArray_StringDType, + &PyArray_StringDType}; + + if (add_promoter(umath, "multiply", hdtypes, 3, string_multiply_promoter) < 0) { + return -1; + } + // all other integer dtypes are handled with a generic promoter PyArray_DTypeMeta *rdtypes[] = { diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index de41506f653a..dc646ddd427a 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -765,6 +765,12 @@ def test_multiply_reduce(): assert res == val * np.prod(repeats) +def test_multiply_two_string_raises(): + arr = np.array(["hello", "world"]) + with pytest.raises(np._core._exceptions._UFuncNoLoopError): + np.multiply(arr, arr) + + @pytest.mark.parametrize("use_out", [True, False]) @pytest.mark.parametrize("other", [2, [2, 1, 3, 4, 1, 3]]) @pytest.mark.parametrize( From aa3a1834f1d62093c2fb41ce313c8724cdea5b4a Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 22 Mar 2024 10:40:59 +0100 Subject: [PATCH 081/980] DOC: Fixup formatting and stray space --- doc/source/reference/c-api/array.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 3ab7a547fe68..2b17ecee38ea 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -3858,6 +3858,7 @@ To solve this, NumPy provides the following pattern that the the main file is modified to define ``PY_ARRAY_UNIQUE_SYMBOL`` before the include: .. code-block:: c + /* Main module file */ #define PY_ARRAY_UNIQUE_SYMBOL MyModule #include "numpy/ndarrayobject.h" @@ -3953,7 +3954,7 @@ the C-API is needed then some additional steps must be taken. ``PyArray_ImportNumPyAPI()`` is preferable for custom error checking. You may also see use of ``_import_array()`` (a function, not a macro, but you may want to raise a better error if it fails) and - the variations ``import_array1(ret)`` which customizes the return value. + the variations ``import_array1(ret)`` which customizes the return value. .. c:macro:: PY_ARRAY_UNIQUE_SYMBOL From 9b48d1bbc9bee94c8c304755069af8ff8068b7ed Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 22 Mar 2024 11:30:49 +0100 Subject: [PATCH 082/980] DOC: Fix incorrect link --- doc/source/numpy_2_0_migration_guide.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index 73a8497dffb0..c2f1f0e9ba8b 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -234,7 +234,7 @@ it breaks NumPy compatibility checks. If you are experiencing problems due to an upstream header including NumPy, the solution is to make sure you ``#include "numpy/ndarrayobject.h"`` before -their header and import NumPy yourself based on :ref:`_including-the-c-api`. +their header and import NumPy yourself based on :ref:`including-the-c-api`. Changes to namespaces From b48d3bfeb84fa0fba42ac1165a3e21ce0f8f8924 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 22 Mar 2024 12:09:14 +0100 Subject: [PATCH 083/980] DOC: Two more fixes... --- doc/source/reference/c-api/array.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 2b17ecee38ea..dee12ed86bf6 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -3834,7 +3834,7 @@ For the typical Python project, multiple C or C++ files will be compiled into a single shared object (the Python C-module) and ``PyArray_ImportNumPyAPI()`` should be called inside it's module initialization. -When you have a single C-file, this will consist of:: +When you have a single C-file, this will consist of: .. code-block:: c @@ -3917,7 +3917,7 @@ are defined: by other files which are linked. * If ``NO_IMPORT_ARRAY`` is defined, the table is declared as ``extern void **``, meaning that it must be linked to a file which does not - use ``NO_IMPORT_ARRAY`. + use ``NO_IMPORT_ARRAY``. The ``PY_ARRAY_UNIQUE_SYMBOL`` mechanism additionally mangles the names to avoid conflicts. From cbfb2eaeb558c3fdb490e8b8a1f2a9fc63c35e55 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 22 Mar 2024 14:32:49 +0100 Subject: [PATCH 084/980] MAINT: Remove unnecessarily defensive code from dlpack deleter This is a tp_dealloc, they don't need to care about errors. Even the assert is absurdly defensive: Python will already set a SystemError anyway! --- numpy/_core/src/multiarray/dlpack.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index d26701df8c57..5029e87e87b2 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -38,28 +38,19 @@ static void dlpack_capsule_deleter(PyObject *self) { return; } - /* an exception may be in-flight, we must save it in case we create another one */ - PyObject *type, *value, *traceback; - PyErr_Fetch(&type, &value, &traceback); - DLManagedTensor *managed = (DLManagedTensor *)PyCapsule_GetPointer(self, NPY_DLPACK_CAPSULE_NAME); if (managed == NULL) { PyErr_WriteUnraisable(self); - goto done; + return; } /* - * the spec says the deleter can be NULL if there is no way for the caller + * The spec says the deleter can be NULL if there is no way for the caller * to provide a reasonable destructor. */ if (managed->deleter) { managed->deleter(managed); - /* TODO: is the deleter allowed to set a python exception? */ - assert(!PyErr_Occurred()); } - -done: - PyErr_Restore(type, value, traceback); } /* used internally, almost identical to dlpack_capsule_deleter() */ From 65afaf8603da46418a2f2e1c408085322c31f91c Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 22 Mar 2024 10:58:17 -0600 Subject: [PATCH 085/980] TST: fix incorrect dtype in test --- numpy/_core/tests/test_stringdtype.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index dc646ddd427a..e38575dbc0dd 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -766,7 +766,7 @@ def test_multiply_reduce(): def test_multiply_two_string_raises(): - arr = np.array(["hello", "world"]) + arr = np.array(["hello", "world"], dtype="T") with pytest.raises(np._core._exceptions._UFuncNoLoopError): np.multiply(arr, arr) From 0b88f3bbda5366df76abc3ad45ce6c177b1c133d Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Fri, 22 Mar 2024 10:10:39 -0700 Subject: [PATCH 086/980] BLD: do not use -O3 flag for debug build --- numpy/_core/meson.build | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 8fc377aeedde..b1b7659b9eb1 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -347,7 +347,7 @@ max_opt = { 'msvc': ['/O2'], 'intel-cl': ['/O3'], }.get(compiler_id, ['-O3']) -max_opt = cc.has_multi_arguments(max_opt) ? max_opt : [] +max_opt = cc.has_multi_arguments(max_opt) and get_option('buildtype') != 'debug' ? max_opt : [] # Optional GCC compiler builtins and their call arguments. # If given, a required header and definition name (HAVE_ prepended) From dcdc91f94a030f6a8fdcf666edf58fefc03c9513 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 22 Mar 2024 17:46:31 -0600 Subject: [PATCH 087/980] BUG: fix reference counting error in stringdtype setup --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index 467770eca8fd..8ed0aa470c78 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -2102,16 +2102,16 @@ add_promoter(PyObject *numpy, const char *ufunc_name, PyObject *DType_tuple = PyTuple_New(n_dtypes); - for (size_t i=0; i Date: Fri, 22 Mar 2024 12:12:02 -0400 Subject: [PATCH 088/980] Revert "MNT: install all-string promoter for multiply" This reverts commit 3ac9316859b5e9d9b5ae97f06c18f6e626b3ffca. --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index 8ed0aa470c78..e595b8100a0c 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -2339,18 +2339,6 @@ init_stringdtype_ufuncs(PyObject *umath) INIT_MULTIPLY(Int64, int64); INIT_MULTIPLY(UInt64, uint64); - // This is needed so the generic promoters defined after this don't match - // for np.multiply(string_array, string_array) - - PyArray_DTypeMeta *hdtypes[] = { - &PyArray_StringDType, - &PyArray_StringDType, - &PyArray_StringDType}; - - if (add_promoter(umath, "multiply", hdtypes, 3, string_multiply_promoter) < 0) { - return -1; - } - // all other integer dtypes are handled with a generic promoter PyArray_DTypeMeta *rdtypes[] = { From 84e55dd4cb28394ed8699475d62191a5380cf346 Mon Sep 17 00:00:00 2001 From: Marten van Kerkwijk Date: Fri, 22 Mar 2024 12:19:15 -0400 Subject: [PATCH 089/980] BUG: fix StringDType multiplication promotors Ensure that they only promote integers. Also adds back the regression test. --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index e595b8100a0c..1a96798fedfe 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -2343,7 +2343,7 @@ init_stringdtype_ufuncs(PyObject *umath) PyArray_DTypeMeta *rdtypes[] = { &PyArray_StringDType, - (PyArray_DTypeMeta *)Py_None, + &PyArray_PyIntAbstractDType, &PyArray_StringDType}; if (add_promoter(umath, "multiply", rdtypes, 3, string_multiply_promoter) < 0) { @@ -2351,7 +2351,7 @@ init_stringdtype_ufuncs(PyObject *umath) } PyArray_DTypeMeta *ldtypes[] = { - (PyArray_DTypeMeta *)Py_None, + &PyArray_PyIntAbstractDType, &PyArray_StringDType, &PyArray_StringDType}; From 6711afc3a228f0d4b85950d2018049c60531f9d0 Mon Sep 17 00:00:00 2001 From: Marten van Kerkwijk Date: Fri, 22 Mar 2024 16:42:16 -0400 Subject: [PATCH 090/980] ENH: let numerical dtypes inherit from abstract ones --- numpy/_core/src/multiarray/abstractdtypes.c | 12 ++++++------ numpy/_core/src/multiarray/arraytypes.c.src | 7 +++++++ numpy/_core/src/multiarray/dtypemeta.c | 12 +++++++----- numpy/_core/src/multiarray/dtypemeta.h | 4 ++-- numpy/_core/src/multiarray/usertypes.c | 3 ++- 5 files changed, 24 insertions(+), 14 deletions(-) diff --git a/numpy/_core/src/multiarray/abstractdtypes.c b/numpy/_core/src/multiarray/abstractdtypes.c index 3142411b2b61..0049fdcfc0ac 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.c +++ b/numpy/_core/src/multiarray/abstractdtypes.c @@ -85,18 +85,12 @@ discover_descriptor_from_pycomplex( NPY_NO_EXPORT int initialize_and_map_pytypes_to_dtypes() { - ((PyTypeObject *)&PyArray_PyIntAbstractDType)->tp_base = &PyArrayDescr_Type; - PyArray_PyIntAbstractDType.scalar_type = &PyLong_Type; if (PyType_Ready((PyTypeObject *)&PyArray_PyIntAbstractDType) < 0) { return -1; } - ((PyTypeObject *)&PyArray_PyFloatAbstractDType)->tp_base = &PyArrayDescr_Type; - PyArray_PyFloatAbstractDType.scalar_type = &PyFloat_Type; if (PyType_Ready((PyTypeObject *)&PyArray_PyFloatAbstractDType) < 0) { return -1; } - ((PyTypeObject *)&PyArray_PyComplexAbstractDType)->tp_base = &PyArrayDescr_Type; - PyArray_PyComplexAbstractDType.scalar_type = &PyComplex_Type; if (PyType_Ready((PyTypeObject *)&PyArray_PyComplexAbstractDType) < 0) { return -1; } @@ -285,12 +279,14 @@ NPY_DType_Slots pyintabstractdtype_slots = { NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyIntAbstractDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) .tp_name = "numpy._IntegerAbstractDType", + .tp_base = &PyArrayDescr_Type, .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, .flags = NPY_DT_ABSTRACT, .dt_slots = &pyintabstractdtype_slots, + .scalar_type = &PyLong_Type, }; @@ -303,12 +299,14 @@ NPY_DType_Slots pyfloatabstractdtype_slots = { NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyFloatAbstractDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) .tp_name = "numpy._FloatAbstractDType", + .tp_base = &PyArrayDescr_Type, .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, .flags = NPY_DT_ABSTRACT, .dt_slots = &pyfloatabstractdtype_slots, + .scalar_type = &PyFloat_Type, }; @@ -321,10 +319,12 @@ NPY_DType_Slots pycomplexabstractdtype_slots = { NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyComplexAbstractDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) .tp_name = "numpy._ComplexAbstractDType", + .tp_base = &PyArrayDescr_Type, .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, .flags = NPY_DT_ABSTRACT, .dt_slots = &pycomplexabstractdtype_slots, + .scalar_type = &PyComplex_Type, }; diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index c266979c6f6f..404e92471fb2 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -18,6 +18,7 @@ #include "npy_config.h" #include "npy_sort.h" +#include "abstractdtypes.h" #include "common.h" #include "ctors.h" #include "convert_datatype.h" @@ -4351,10 +4352,16 @@ set_typeinfo(PyObject *dict) * CFloat, CDouble, CLongDouble, * Object, String, Unicode, Void, * Datetime, Timedelta# + * #scls = PyArrayDescr_Type, + * PyArray_PyIntAbstractDType*10, + * PyArray_PyFloatAbstractDType*4, + * PyArray_PyComplexAbstractDType*3, + * PyArrayDescr_Type*6 # */ if (dtypemeta_wrap_legacy_descriptor( _builtin_descrs[NPY_@NAME@], &_Py@Name@_ArrFuncs, + (PyTypeObject *)&@scls@, "numpy.dtypes." NPY_@NAME@_Name "DType", #ifdef NPY_@NAME@_alias "numpy.dtypes." NPY_@NAME@_Alias "DType" diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index acee68bad54f..ba897844dd0b 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -1063,8 +1063,9 @@ object_common_dtype( * @returns 0 on success, -1 on failure. */ NPY_NO_EXPORT int -dtypemeta_wrap_legacy_descriptor(_PyArray_LegacyDescr *descr, - PyArray_ArrFuncs *arr_funcs, const char *name, const char *alias) +dtypemeta_wrap_legacy_descriptor( + _PyArray_LegacyDescr *descr, PyArray_ArrFuncs *arr_funcs, + PyTypeObject *dtype_super_class, const char *name, const char *alias) { int has_type_set = Py_TYPE(descr) == &PyArrayDescr_Type; @@ -1118,7 +1119,7 @@ dtypemeta_wrap_legacy_descriptor(_PyArray_LegacyDescr *descr, .tp_name = NULL, /* set below */ .tp_basicsize = sizeof(_PyArray_LegacyDescr), .tp_flags = Py_TPFLAGS_DEFAULT, - .tp_base = &PyArrayDescr_Type, + .tp_base = NULL, /* set below */ .tp_new = (newfunc)legacy_dtype_default_new, .tp_doc = ( "DType class corresponding to the scalar type and dtype of " @@ -1131,11 +1132,12 @@ dtypemeta_wrap_legacy_descriptor(_PyArray_LegacyDescr *descr, /* Further fields are not common between DTypes */ }; memcpy(dtype_class, &prototype, sizeof(PyArray_DTypeMeta)); - /* Fix name of the Type*/ + /* Fix name and superclass of the Type*/ ((PyTypeObject *)dtype_class)->tp_name = name; + ((PyTypeObject *)dtype_class)->tp_base = dtype_super_class, dtype_class->dt_slots = dt_slots; - /* Let python finish the initialization (probably unnecessary) */ + /* Let python finish the initialization */ if (PyType_Ready((PyTypeObject *)dtype_class) < 0) { Py_DECREF(dtype_class); return -1; diff --git a/numpy/_core/src/multiarray/dtypemeta.h b/numpy/_core/src/multiarray/dtypemeta.h index 667f9280eb13..c1cfd050bdf2 100644 --- a/numpy/_core/src/multiarray/dtypemeta.h +++ b/numpy/_core/src/multiarray/dtypemeta.h @@ -155,8 +155,8 @@ python_builtins_are_known_scalar_types( NPY_NO_EXPORT int dtypemeta_wrap_legacy_descriptor( - _PyArray_LegacyDescr *descr, PyArray_ArrFuncs *arr_funcs, - const char *name, const char *alias); + _PyArray_LegacyDescr *descr, PyArray_ArrFuncs *arr_funcs, + PyTypeObject *dtype_super_class, const char *name, const char *alias); NPY_NO_EXPORT void initialize_legacy_dtypemeta_aliases(_PyArray_LegacyDescr **_builtin_descrs); diff --git a/numpy/_core/src/multiarray/usertypes.c b/numpy/_core/src/multiarray/usertypes.c index 92325247a60c..6b87345eab75 100644 --- a/numpy/_core/src/multiarray/usertypes.c +++ b/numpy/_core/src/multiarray/usertypes.c @@ -306,7 +306,8 @@ PyArray_RegisterDataType(PyArray_DescrProto *descr_proto) descr->type_num = typenum; /* update prototype to notice duplicate registration */ descr_proto->type_num = typenum; - if (dtypemeta_wrap_legacy_descriptor(descr, descr_proto->f, name, NULL) < 0) { + if (dtypemeta_wrap_legacy_descriptor( + descr, descr_proto->f, &PyArrayDescr_Type, name, NULL) < 0) { descr->type_num = -1; NPY_NUMUSERTYPES--; /* Override the type, it might be wrong and then decref crashes */ From 019b7e29742c87146b49c97617363f6656591a82 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Sat, 23 Mar 2024 11:02:00 -0400 Subject: [PATCH 091/980] TST: Checks for ufuncs with abstract int promotor --- numpy/_core/tests/test_stringdtype.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index e38575dbc0dd..378d1886d26a 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1252,6 +1252,27 @@ def test_binary(string_array, unicode_array, function_name, args): assert 0 +@pytest.mark.parametrize("function, expected", [ + (np.strings.find, [[2, -1], [1, -1]]), + (np.strings.startswith, [[False, False], [True, False]])]) +@pytest.mark.parametrize("start, stop", [ + (1, 4), + (np.int8(1), np.int8(4)), + (np.array([1, 1], dtype='u2'), np.array([4, 4], dtype='u2'))]) +def test_non_default_start_stop(function, start, stop, expected): + a = np.array([["--🐍--", "--🦜--"], + ["-🐍---", "-🦜---"]], "T") + indx = function(a, "🐍", start, stop) + assert_array_equal(indx, expected) + + +@pytest.mark.parametrize("count", [2, np.int8(2), np.array([2, 2], 'u2')]) +def test_replace_non_default_repeat(count): + a = np.array(["🐍--", "🦜-🦜-"], "T") + result = np.strings.replace(a, "🦜-", "🦜†", count) + assert_array_equal(result, np.array(["🐍--", "🦜†🦜†"], "T")) + + def test_strip_ljust_rjust_consistency(string_array, unicode_array): rjs = np.char.rjust(string_array, 1000) rju = np.char.rjust(unicode_array, 1000) From bf02ee5c175c525be26bf8d41bd17e389e1bf484 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Sat, 23 Mar 2024 13:34:15 -0400 Subject: [PATCH 092/980] BUG,ENH: Have true abstract dtypes from which python ones inherit. This ensures comparisons for unequal integer dtype do not return object. --- numpy/_core/src/multiarray/abstractdtypes.c | 64 ++++++++++++++++---- numpy/_core/src/multiarray/abstractdtypes.h | 3 + numpy/_core/src/multiarray/arraytypes.c.src | 6 +- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 10 +-- numpy/_core/tests/test_ufunc.py | 7 +++ 5 files changed, 70 insertions(+), 20 deletions(-) diff --git a/numpy/_core/src/multiarray/abstractdtypes.c b/numpy/_core/src/multiarray/abstractdtypes.c index 0049fdcfc0ac..d8b460503c85 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.c +++ b/numpy/_core/src/multiarray/abstractdtypes.c @@ -85,6 +85,15 @@ discover_descriptor_from_pycomplex( NPY_NO_EXPORT int initialize_and_map_pytypes_to_dtypes() { + if (PyType_Ready((PyTypeObject *)&PyArray_IntAbstractDType) < 0) { + return -1; + } + if (PyType_Ready((PyTypeObject *)&PyArray_FloatAbstractDType) < 0) { + return -1; + } + if (PyType_Ready((PyTypeObject *)&PyArray_ComplexAbstractDType) < 0) { + return -1; + } if (PyType_Ready((PyTypeObject *)&PyArray_PyIntAbstractDType) < 0) { return -1; } @@ -270,7 +279,18 @@ complex_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) * `Floating`, `Complex`, and `Integer` (both signed and unsigned). * They will have to be renamed and exposed in that capacity. */ -NPY_DType_Slots pyintabstractdtype_slots = { +NPY_NO_EXPORT PyArray_DTypeMeta PyArray_IntAbstractDType = {{{ + PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) + .tp_name = "numpy._IntegerAbstractDType", + .tp_base = &PyArrayDescr_Type, + .tp_basicsize = sizeof(PyArray_Descr), + .tp_flags = Py_TPFLAGS_DEFAULT, + },}, + .type_num = -1, + .flags = NPY_DT_ABSTRACT, +}; + +NPY_DType_Slots pyintdtype_slots = { .discover_descr_from_pyobject = discover_descriptor_from_pyint, .default_descr = int_default_descriptor, .common_dtype = int_common_dtype, @@ -278,19 +298,29 @@ NPY_DType_Slots pyintabstractdtype_slots = { NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyIntAbstractDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) - .tp_name = "numpy._IntegerAbstractDType", - .tp_base = &PyArrayDescr_Type, + .tp_name = "numpy._PythonIntegerDType", + .tp_base = (PyTypeObject *)&PyArray_IntAbstractDType, .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, .flags = NPY_DT_ABSTRACT, - .dt_slots = &pyintabstractdtype_slots, + .dt_slots = &pyintdtype_slots, .scalar_type = &PyLong_Type, }; +NPY_NO_EXPORT PyArray_DTypeMeta PyArray_FloatAbstractDType = {{{ + PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) + .tp_name = "numpy._FloatAbstractDType", + .tp_base = &PyArrayDescr_Type, + .tp_basicsize = sizeof(PyArray_Descr), + .tp_flags = Py_TPFLAGS_DEFAULT, + },}, + .type_num = -1, + .flags = NPY_DT_ABSTRACT, +}; -NPY_DType_Slots pyfloatabstractdtype_slots = { +NPY_DType_Slots pyfloatdtype_slots = { .discover_descr_from_pyobject = discover_descriptor_from_pyfloat, .default_descr = float_default_descriptor, .common_dtype = float_common_dtype, @@ -298,19 +328,29 @@ NPY_DType_Slots pyfloatabstractdtype_slots = { NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyFloatAbstractDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) - .tp_name = "numpy._FloatAbstractDType", - .tp_base = &PyArrayDescr_Type, + .tp_name = "numpy._PythonFloatDType", + .tp_base = (PyTypeObject *)&PyArray_FloatAbstractDType, .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, .flags = NPY_DT_ABSTRACT, - .dt_slots = &pyfloatabstractdtype_slots, + .dt_slots = &pyfloatdtype_slots, .scalar_type = &PyFloat_Type, }; +NPY_NO_EXPORT PyArray_DTypeMeta PyArray_ComplexAbstractDType = {{{ + PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) + .tp_name = "numpy._ComplexAbstractDType", + .tp_base = &PyArrayDescr_Type, + .tp_basicsize = sizeof(PyArray_Descr), + .tp_flags = Py_TPFLAGS_DEFAULT, + },}, + .type_num = -1, + .flags = NPY_DT_ABSTRACT, +}; -NPY_DType_Slots pycomplexabstractdtype_slots = { +NPY_DType_Slots pycomplexdtype_slots = { .discover_descr_from_pyobject = discover_descriptor_from_pycomplex, .default_descr = complex_default_descriptor, .common_dtype = complex_common_dtype, @@ -318,13 +358,13 @@ NPY_DType_Slots pycomplexabstractdtype_slots = { NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyComplexAbstractDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) - .tp_name = "numpy._ComplexAbstractDType", - .tp_base = &PyArrayDescr_Type, + .tp_name = "numpy._PythonComplexDType", + .tp_base = (PyTypeObject *)&PyArray_ComplexAbstractDType, .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, .flags = NPY_DT_ABSTRACT, - .dt_slots = &pycomplexabstractdtype_slots, + .dt_slots = &pycomplexdtype_slots, .scalar_type = &PyComplex_Type, }; diff --git a/numpy/_core/src/multiarray/abstractdtypes.h b/numpy/_core/src/multiarray/abstractdtypes.h index 212994a422ea..573520f26452 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.h +++ b/numpy/_core/src/multiarray/abstractdtypes.h @@ -14,6 +14,9 @@ extern "C" { * may be necessary to make them (partially) public, to allow user-defined * dtypes to perform value based casting. */ +NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_IntAbstractDType; +NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_FloatAbstractDType; +NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_ComplexAbstractDType; NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyIntAbstractDType; NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyFloatAbstractDType; NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyComplexAbstractDType; diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index 404e92471fb2..49701bf8499a 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -4353,9 +4353,9 @@ set_typeinfo(PyObject *dict) * Object, String, Unicode, Void, * Datetime, Timedelta# * #scls = PyArrayDescr_Type, - * PyArray_PyIntAbstractDType*10, - * PyArray_PyFloatAbstractDType*4, - * PyArray_PyComplexAbstractDType*3, + * PyArray_IntAbstractDType*10, + * PyArray_FloatAbstractDType*4, + * PyArray_ComplexAbstractDType*3, * PyArrayDescr_Type*6 # */ if (dtypemeta_wrap_legacy_descriptor( diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index 1a96798fedfe..c3b9c47c96e1 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -2343,7 +2343,7 @@ init_stringdtype_ufuncs(PyObject *umath) PyArray_DTypeMeta *rdtypes[] = { &PyArray_StringDType, - &PyArray_PyIntAbstractDType, + &PyArray_IntAbstractDType, &PyArray_StringDType}; if (add_promoter(umath, "multiply", rdtypes, 3, string_multiply_promoter) < 0) { @@ -2351,7 +2351,7 @@ init_stringdtype_ufuncs(PyObject *umath) } PyArray_DTypeMeta *ldtypes[] = { - &PyArray_PyIntAbstractDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, &PyArray_StringDType}; @@ -2371,7 +2371,7 @@ init_stringdtype_ufuncs(PyObject *umath) PyArray_DTypeMeta *findlike_promoter_dtypes[] = { &PyArray_StringDType, &PyArray_UnicodeDType, - &PyArray_PyIntAbstractDType, &PyArray_PyIntAbstractDType, + &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, &PyArray_DefaultIntDType, }; @@ -2412,7 +2412,7 @@ init_stringdtype_ufuncs(PyObject *umath) PyArray_DTypeMeta *startswith_endswith_promoter_dtypes[] = { &PyArray_StringDType, &PyArray_UnicodeDType, - &PyArray_PyIntAbstractDType, &PyArray_PyIntAbstractDType, + &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, &PyArray_BoolDType, }; @@ -2508,7 +2508,7 @@ init_stringdtype_ufuncs(PyObject *umath) PyArray_DTypeMeta *replace_promoter_pyint_dtypes[] = { &PyArray_StringDType, &PyArray_UnicodeDType, &PyArray_UnicodeDType, - &PyArray_PyIntAbstractDType, &PyArray_StringDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, }; if (add_promoter(umath, "_replace", replace_promoter_pyint_dtypes, 5, diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 6bdfde016cb2..dfe20bc577a9 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -2972,6 +2972,13 @@ def test_resolve_dtypes_basic(self): with pytest.raises(TypeError): np.add.resolve_dtypes((i4, f4, None), casting="no") + def test_resolve_dtypes_comparison(self): + i4 = np.dtype("i4") + i8 = np.dtype("i8") + b = np.dtype("?") + r = np.equal.resolve_dtypes((i4, i8, None)) + assert r == (i8, i8, b) + def test_weird_dtypes(self): S0 = np.dtype("S0") # S0 is often converted by NumPy to S1, but not here: From 6828191e9b74f65fe3f9c1e4546641049ead3686 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Sat, 23 Mar 2024 14:55:44 -0400 Subject: [PATCH 093/980] BUG: try to make windows happy --- numpy/_core/src/multiarray/abstractdtypes.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/multiarray/abstractdtypes.c b/numpy/_core/src/multiarray/abstractdtypes.c index d8b460503c85..f92fe51b41cb 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.c +++ b/numpy/_core/src/multiarray/abstractdtypes.c @@ -94,12 +94,21 @@ initialize_and_map_pytypes_to_dtypes() if (PyType_Ready((PyTypeObject *)&PyArray_ComplexAbstractDType) < 0) { return -1; } + ((PyTypeObject *)&PyArray_PyIntAbstractDType)->tp_base = + (PyTypeObject *)&PyArray_IntAbstractDType; + PyArray_PyIntAbstractDType.scalar_type = &PyLong_Type; if (PyType_Ready((PyTypeObject *)&PyArray_PyIntAbstractDType) < 0) { return -1; } + ((PyTypeObject *)&PyArray_PyFloatAbstractDType)->tp_base = + (PyTypeObject *)&PyArray_FloatAbstractDType; + PyArray_PyFloatAbstractDType.scalar_type = &PyFloat_Type; if (PyType_Ready((PyTypeObject *)&PyArray_PyFloatAbstractDType) < 0) { return -1; } + ((PyTypeObject *)&PyArray_PyComplexAbstractDType)->tp_base = + (PyTypeObject *)&PyArray_ComplexAbstractDType; + PyArray_PyComplexAbstractDType.scalar_type = &PyComplex_Type; if (PyType_Ready((PyTypeObject *)&PyArray_PyComplexAbstractDType) < 0) { return -1; } @@ -299,14 +308,12 @@ NPY_DType_Slots pyintdtype_slots = { NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyIntAbstractDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) .tp_name = "numpy._PythonIntegerDType", - .tp_base = (PyTypeObject *)&PyArray_IntAbstractDType, .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, .flags = NPY_DT_ABSTRACT, .dt_slots = &pyintdtype_slots, - .scalar_type = &PyLong_Type, }; NPY_NO_EXPORT PyArray_DTypeMeta PyArray_FloatAbstractDType = {{{ @@ -329,14 +336,12 @@ NPY_DType_Slots pyfloatdtype_slots = { NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyFloatAbstractDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) .tp_name = "numpy._PythonFloatDType", - .tp_base = (PyTypeObject *)&PyArray_FloatAbstractDType, .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, .flags = NPY_DT_ABSTRACT, .dt_slots = &pyfloatdtype_slots, - .scalar_type = &PyFloat_Type, }; NPY_NO_EXPORT PyArray_DTypeMeta PyArray_ComplexAbstractDType = {{{ @@ -359,12 +364,10 @@ NPY_DType_Slots pycomplexdtype_slots = { NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyComplexAbstractDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) .tp_name = "numpy._PythonComplexDType", - .tp_base = (PyTypeObject *)&PyArray_ComplexAbstractDType, .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, .flags = NPY_DT_ABSTRACT, .dt_slots = &pycomplexdtype_slots, - .scalar_type = &PyComplex_Type, }; From c9168356e5177fcfbcd9cf8625a3c186b04400bf Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Sat, 23 Mar 2024 16:18:05 -0400 Subject: [PATCH 094/980] MAINT: determine scalar input from it being of the scalar type --- numpy/_core/src/umath/ufunc_object.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 334d45fe07a1..665674e54310 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -4063,17 +4063,15 @@ resolve_descriptors(int nop, original_dtypes[i] = PyArray_DTYPE(operands[i]); Py_INCREF(original_dtypes[i]); } - if (i < nin - && NPY_DT_is_abstract(signature[i]) - && inputs_tup != NULL) { - /* - * TODO: We may wish to allow any scalar here. Checking for - * abstract assumes this works out for Python scalars, - * which is the important case (especially for now). - * - * One possible check would be `DType->type == type(obj)`. - */ - input_scalars[i] = PyTuple_GET_ITEM(inputs_tup, i); + /* + * Check whether something is a scalar of the given type. + * We leave it to resolve_descriptors_with_scalars to deal + * with, e.g., only doing something special for python scalars. + */ + if (i < nin && inputs_tup != NULL) { + PyObject *input = PyTuple_GET_ITEM(inputs_tup, i); + input_scalars[i] = signature[i]->scalar_type == Py_TYPE(input) ? + input : NULL; } else { input_scalars[i] = NULL; From ea61be78cb48f99474b0e1fccb006bf4ca1d02fc Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Sat, 23 Mar 2024 16:31:36 -0400 Subject: [PATCH 095/980] MAINT: default common dtype should only care about types it knows --- numpy/_core/src/multiarray/dtypemeta.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index ba897844dd0b..ecceca2e7d03 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -911,10 +911,12 @@ static PyArray_DTypeMeta * default_builtin_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) { assert(cls->type_num < NPY_NTYPES_LEGACY); - if (NPY_UNLIKELY(NPY_DT_is_abstract(other))) { + if (NPY_UNLIKELY(!NPY_DT_is_legacy(other))) { /* - * The abstract complex has a lower priority than the concrete inexact - * types to ensure the correct promotion with integers. + * Deal with the non-legacy types we understand: python scalars. + * These have lower priority than the concrete inexact types, but + * can change the type of the result (complex, float, int). + * If our own type if not numerical, signal not implemented. */ if (other == &PyArray_PyComplexAbstractDType) { if (PyTypeNum_ISCOMPLEX(cls->type_num)) { @@ -947,8 +949,10 @@ default_builtin_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) return cls; } } + Py_INCREF(Py_NotImplemented); + return (PyArray_DTypeMeta *)Py_NotImplemented; } - if (!NPY_DT_is_legacy(other) || other->type_num > cls->type_num) { + if (other->type_num > cls->type_num) { /* * Let the more generic (larger type number) DType handle this * (note that half is after all others, which works out here.) From 0b498291cf131e1f40841aab715bd8aa391edbb0 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Sat, 23 Mar 2024 16:45:54 -0400 Subject: [PATCH 096/980] MAINT: make python dtypes concrete (not abstract) --- numpy/_core/src/multiarray/abstractdtypes.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/numpy/_core/src/multiarray/abstractdtypes.c b/numpy/_core/src/multiarray/abstractdtypes.c index f92fe51b41cb..cb5109d8ba2d 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.c +++ b/numpy/_core/src/multiarray/abstractdtypes.c @@ -312,7 +312,6 @@ NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyIntAbstractDType = {{{ .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, - .flags = NPY_DT_ABSTRACT, .dt_slots = &pyintdtype_slots, }; @@ -340,7 +339,6 @@ NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyFloatAbstractDType = {{{ .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, - .flags = NPY_DT_ABSTRACT, .dt_slots = &pyfloatdtype_slots, }; @@ -368,6 +366,5 @@ NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyComplexAbstractDType = {{{ .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, - .flags = NPY_DT_ABSTRACT, .dt_slots = &pycomplexdtype_slots, }; From ff7b13efed8e467d4d784c17a667390c3b63a844 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Sat, 23 Mar 2024 16:48:05 -0400 Subject: [PATCH 097/980] MAINT: use that legacy dtypes can never be abstract --- numpy/_core/src/umath/dispatching.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/_core/src/umath/dispatching.c b/numpy/_core/src/umath/dispatching.c index fbba26ea247b..b7f2fb3d9caf 100644 --- a/numpy/_core/src/umath/dispatching.c +++ b/numpy/_core/src/umath/dispatching.c @@ -589,8 +589,7 @@ _make_new_typetup( none_count++; } else { - if (!NPY_DT_is_legacy(signature[i]) - || NPY_DT_is_abstract(signature[i])) { + if (!NPY_DT_is_legacy(signature[i])) { /* * The legacy type resolution can't deal with these. * This path will return `None` or so in the future to From 760a1892ce1505bd8620d3f79917b0f98b99db48 Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 24 Mar 2024 11:56:34 +0200 Subject: [PATCH 098/980] BUG: update pocketfft to unconditionaly disable use of aligned_alloc --- numpy/fft/pocketfft | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/fft/pocketfft b/numpy/fft/pocketfft index 0f7aa1225b06..33ae5dc94c9c 160000 --- a/numpy/fft/pocketfft +++ b/numpy/fft/pocketfft @@ -1 +1 @@ -Subproject commit 0f7aa1225b065938fc263b7914df16b8c1cbc9d7 +Subproject commit 33ae5dc94c9cdc7f1c78346504a85de87cadaa12 From defe9defcab634c051994135cd687d643ff3d9d0 Mon Sep 17 00:00:00 2001 From: Hugo van Kemenade <1324225+hugovk@users.noreply.github.com> Date: Sun, 24 Mar 2024 17:58:53 +0200 Subject: [PATCH 099/980] DOC: Bump pydata-sphinx-theme version Fixes inaccessible contrast for a:visited links in admonitions. Fixes #26058. --- environment.yml | 2 +- requirements/doc_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 0690d6fdac6c..8a6561b7891a 100644 --- a/environment.yml +++ b/environment.yml @@ -35,7 +35,7 @@ dependencies: - scipy - pandas - matplotlib - - pydata-sphinx-theme=0.13.3 + - pydata-sphinx-theme=0.15.2 - doxygen # NOTE: breathe 4.33.0 collides with sphinx.ext.graphviz - breathe>4.33.0 diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index a642de83b4e3..de2180a9ac6c 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -1,7 +1,7 @@ # doxygen required, use apt-get or dnf sphinx>=4.5.0 numpydoc==1.4 -pydata-sphinx-theme==0.13.3 +pydata-sphinx-theme==0.15.2 sphinx-design scipy matplotlib From 79e5c870743f2dc532b4f02cebbfb36efb4206b0 Mon Sep 17 00:00:00 2001 From: Hugo van Kemenade <1324225+hugovk@users.noreply.github.com> Date: Sun, 24 Mar 2024 23:03:43 +0200 Subject: [PATCH 100/980] DOC: Remove '-j2' to fix docs build Caused by: WARNING: the pydata_sphinx_theme extension is not safe for parallel writing --- .circleci/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 646ee52633d3..9a6d1784c487 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -74,7 +74,7 @@ jobs: . venv/bin/activate cd doc # Don't use -q, show warning summary" - SPHINXOPTS="-j2 -n" make -e html + SPHINXOPTS="-n" make -e html if [[ $(find build/html -type f | wc -l) -lt 1000 ]]; then echo "doc build failed: build/html is empty" exit -1 @@ -85,7 +85,7 @@ jobs: command: | . venv/bin/activate cd doc/neps - SPHINXOPTS="-j2 -n" make -e html || echo "ignoring errors for now" + SPHINXOPTS="-n" make -e html || echo "ignoring errors for now" - store_artifacts: path: doc/build/html/ From 60a23cf268c014a204b543702ab6c97a8b9492fb Mon Sep 17 00:00:00 2001 From: partev Date: Sun, 24 Mar 2024 21:04:54 -0400 Subject: [PATCH 101/980] DOC: Update absolute_beginners.rst fix a typo "Sorting an element ..." -> "Sorting an array ..." --- doc/source/user/absolute_beginners.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index f1804b20bea3..05c379f4cd4f 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -301,7 +301,7 @@ Adding, removing, and sorting elements ----- -Sorting an element is simple with ``np.sort()``. You can specify the axis, kind, +Sorting an array is simple with ``np.sort()``. You can specify the axis, kind, and order when you call the function. If you start with this array:: From 9b2722021f9917d6c5ad77da9b29519c7c22106c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gonzalo=20Tornar=C3=ADa?= Date: Mon, 11 Mar 2024 08:52:56 -0300 Subject: [PATCH 102/980] MAINT: add missing noexcept clauses (1/2) After https://github.com/cython/cython/pull/6087 it's much easier to figure out the missing noexcept clauses. Indeed, cython up to 3.0.9 has a warning that gives lots of false positives, but with the PR above (already merged in cython master and backported to 3.0.x) all the warnings are indeed cases of missing noexcept To test use this file `test_cimport.pyx`: ``` # cython: language_level=3 cimport numpy cimport numpy.random cimport numpy.random._bounded_integers cimport numpy.random._common cimport numpy.random.bit_generator cimport numpy.random.c_distributions ``` and build with `cython -X legacy_implicit_noexcept=True test_cimport.pyx` This commit applies cleanly to the 1.26.x branch and is meant to backport. The next commit fixes the remaining instances. --- numpy/__init__.cython-30.pxd | 24 ++++++++++++------------ numpy/random/_bounded_integers.pxd.in | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd index 744a50956b56..f0a2f7eb6f84 100644 --- a/numpy/__init__.cython-30.pxd +++ b/numpy/__init__.cython-30.pxd @@ -363,7 +363,7 @@ cdef extern from "numpy/arrayobject.h": # Instead, we use properties that map to the corresponding C-API functions. @property - cdef inline PyObject* base(self) nogil: + cdef inline PyObject* base(self) noexcept nogil: """Returns a borrowed reference to the object owning the data/memory. """ return PyArray_BASE(self) @@ -375,13 +375,13 @@ cdef extern from "numpy/arrayobject.h": return PyArray_DESCR(self) @property - cdef inline int ndim(self) nogil: + cdef inline int ndim(self) noexcept nogil: """Returns the number of dimensions in the array. """ return PyArray_NDIM(self) @property - cdef inline npy_intp *shape(self) nogil: + cdef inline npy_intp *shape(self) noexcept nogil: """Returns a pointer to the dimensions/shape of the array. The number of elements matches the number of dimensions of the array (ndim). Can return NULL for 0-dimensional arrays. @@ -389,20 +389,20 @@ cdef extern from "numpy/arrayobject.h": return PyArray_DIMS(self) @property - cdef inline npy_intp *strides(self) nogil: + cdef inline npy_intp *strides(self) noexcept nogil: """Returns a pointer to the strides of the array. The number of elements matches the number of dimensions of the array (ndim). """ return PyArray_STRIDES(self) @property - cdef inline npy_intp size(self) nogil: + cdef inline npy_intp size(self) noexcept nogil: """Returns the total size (in number of elements) of the array. """ return PyArray_SIZE(self) @property - cdef inline char* data(self) nogil: + cdef inline char* data(self) noexcept nogil: """The pointer to the data buffer as a char*. This is provided for legacy reasons to avoid direct struct field access. For new code that needs this access, you probably want to cast the result @@ -1007,7 +1007,7 @@ cdef extern from "numpy/ufuncobject.h": int _import_umath() except -1 -cdef inline void set_array_base(ndarray arr, object base): +cdef inline void set_array_base(ndarray arr, object base) except *: Py_INCREF(base) # important to do this before stealing the reference below! PyArray_SetBaseObject(arr, base) @@ -1038,7 +1038,7 @@ cdef inline int import_ufunc() except -1: raise ImportError("numpy._core.umath failed to import") -cdef inline bint is_timedelta64_object(object obj): +cdef inline bint is_timedelta64_object(object obj) noexcept: """ Cython equivalent of `isinstance(obj, np.timedelta64)` @@ -1053,7 +1053,7 @@ cdef inline bint is_timedelta64_object(object obj): return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) -cdef inline bint is_datetime64_object(object obj): +cdef inline bint is_datetime64_object(object obj) noexcept: """ Cython equivalent of `isinstance(obj, np.datetime64)` @@ -1068,7 +1068,7 @@ cdef inline bint is_datetime64_object(object obj): return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) -cdef inline npy_datetime get_datetime64_value(object obj) nogil: +cdef inline npy_datetime get_datetime64_value(object obj) noexcept nogil: """ returns the int64 value underlying scalar numpy datetime64 object @@ -1078,14 +1078,14 @@ cdef inline npy_datetime get_datetime64_value(object obj) nogil: return (obj).obval -cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: +cdef inline npy_timedelta get_timedelta64_value(object obj) noexcept nogil: """ returns the int64 value underlying scalar numpy timedelta64 object """ return (obj).obval -cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) noexcept nogil: """ returns the unit part of the dtype for a numpy datetime64 object. """ diff --git a/numpy/random/_bounded_integers.pxd.in b/numpy/random/_bounded_integers.pxd.in index 5ae5a806715c..bdcb32a7e212 100644 --- a/numpy/random/_bounded_integers.pxd.in +++ b/numpy/random/_bounded_integers.pxd.in @@ -6,7 +6,7 @@ ctypedef np.npy_bool bool_t from numpy.random cimport bitgen_t -cdef inline uint64_t _gen_mask(uint64_t max_val) nogil: +cdef inline uint64_t _gen_mask(uint64_t max_val) noexcept nogil: """Mask generator for use in bounded random numbers""" # Smallest bit mask >= max cdef uint64_t mask = max_val From 4045a60613728873729344c677c697e9a0b23726 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gonzalo=20Tornar=C3=ADa?= Date: Sun, 24 Mar 2024 21:46:57 -0300 Subject: [PATCH 103/980] MAINT: add missing noexcept clauses (2/2) This commit fixes the remaining instances. See description in previous commit. --- numpy/__init__.cython-30.pxd | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd index f0a2f7eb6f84..0270f0ee988f 100644 --- a/numpy/__init__.cython-30.pxd +++ b/numpy/__init__.cython-30.pxd @@ -283,11 +283,11 @@ cdef extern from "numpy/arrayobject.h": cdef int type_num @property - cdef inline npy_intp itemsize(self) nogil: + cdef inline npy_intp itemsize(self) noexcept nogil: return PyDataType_ELSIZE(self) @property - cdef inline npy_intp alignment(self) nogil: + cdef inline npy_intp alignment(self) noexcept nogil: return PyDataType_ALIGNMENT(self) # Use fields/names with care as they may be NULL. You must check @@ -304,11 +304,11 @@ cdef extern from "numpy/arrayobject.h": # valid (the pointer can be NULL). Most users should access # this field via the inline helper method PyDataType_SHAPE. @property - cdef inline PyArray_ArrayDescr* subarray(self) nogil: + cdef inline PyArray_ArrayDescr* subarray(self) noexcept nogil: return PyDataType_SUBARRAY(self) @property - cdef inline npy_uint64 flags(self) nogil: + cdef inline npy_uint64 flags(self) noexcept nogil: """The data types flags.""" return PyDataType_FLAGS(self) @@ -320,32 +320,32 @@ cdef extern from "numpy/arrayobject.h": ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: @property - cdef inline int numiter(self) nogil: + cdef inline int numiter(self) noexcept nogil: """The number of arrays that need to be broadcast to the same shape.""" return PyArray_MultiIter_NUMITER(self) @property - cdef inline npy_intp size(self) nogil: + cdef inline npy_intp size(self) noexcept nogil: """The total broadcasted size.""" return PyArray_MultiIter_SIZE(self) @property - cdef inline npy_intp index(self) nogil: + cdef inline npy_intp index(self) noexcept nogil: """The current (1-d) index into the broadcasted result.""" return PyArray_MultiIter_INDEX(self) @property - cdef inline int nd(self) nogil: + cdef inline int nd(self) noexcept nogil: """The number of dimensions in the broadcasted result.""" return PyArray_MultiIter_NDIM(self) @property - cdef inline npy_intp* dimensions(self) nogil: + cdef inline npy_intp* dimensions(self) noexcept nogil: """The shape of the broadcasted result.""" return PyArray_MultiIter_DIMS(self) @property - cdef inline void** iters(self) nogil: + cdef inline void** iters(self) noexcept nogil: """An array of iterator objects that holds the iterators for the arrays to be broadcast together. On return, the iterators are adjusted for broadcasting.""" return PyArray_MultiIter_ITERS(self) From 52544793ac04bf09c1d33808f559925e7f023c6c Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Mon, 25 Mar 2024 09:47:30 +0100 Subject: [PATCH 104/980] ENH: Optimize performance of np.atleast_1d --- numpy/_core/shape_base.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/numpy/_core/shape_base.py b/numpy/_core/shape_base.py index 200d8e7c74d7..e7e593a0d50e 100644 --- a/numpy/_core/shape_base.py +++ b/numpy/_core/shape_base.py @@ -60,18 +60,18 @@ def atleast_1d(*arys): (array([1]), array([3, 4])) """ + if len(arys) == 1: + result = asanyarray(arys[0]) + if result.ndim == 0: + result = result.reshape(1) + return result res = [] for ary in arys: - ary = asanyarray(ary) - if ary.ndim == 0: - result = ary.reshape(1) - else: - result = ary + result = asanyarray(ary) + if result.ndim == 0: + result = result.reshape(1) res.append(result) - if len(res) == 1: - return res[0] - else: - return tuple(res) + return res def _atleast_2d_dispatcher(*arys): From 2e2ccee629de233efbd58ad226a1e2d722ee53af Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Mon, 25 Mar 2024 11:15:40 +0100 Subject: [PATCH 105/980] cast to tuple --- numpy/_core/shape_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/shape_base.py b/numpy/_core/shape_base.py index e7e593a0d50e..07f185ed0c10 100644 --- a/numpy/_core/shape_base.py +++ b/numpy/_core/shape_base.py @@ -71,7 +71,7 @@ def atleast_1d(*arys): if result.ndim == 0: result = result.reshape(1) res.append(result) - return res + return tuple(res) def _atleast_2d_dispatcher(*arys): From e2b0c96c6eea07bd61d8b79e7b2a4984ef4ac676 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Mar 2024 17:40:38 +0000 Subject: [PATCH 106/980] MAINT: Bump actions/dependency-review-action from 4.2.3 to 4.2.4 Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.2.3 to 4.2.4. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/0fa40c3c10055986a88de3baa0d6ec17c5a894b3...733dd5d4a5203f238c33806593ec0f5fc5343d8c) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/dependency-review.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index f55dacad957d..ea774924332e 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -17,4 +17,4 @@ jobs: - name: 'Checkout Repository' uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: 'Dependency Review' - uses: actions/dependency-review-action@0fa40c3c10055986a88de3baa0d6ec17c5a894b3 # v4.2.3 + uses: actions/dependency-review-action@733dd5d4a5203f238c33806593ec0f5fc5343d8c # v4.2.4 From e9cc084fa222a00746939de04d8f136f8ef13d2f Mon Sep 17 00:00:00 2001 From: Francisco Sousa Date: Mon, 25 Mar 2024 23:55:36 +0000 Subject: [PATCH 107/980] BUG: fixed datetime64[ns] conversion issue in numpy.vectorize, see #25936 --- numpy/lib/_function_base_impl.py | 2 +- numpy/lib/tests/test_function_base.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 65bc7c592b29..436495e7f3fb 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2330,7 +2330,7 @@ def __init__(self, pyfunc=np._NoValue, otypes=None, doc=None, if char not in typecodes['All']: raise ValueError("Invalid otype specified: %s" % (char,)) elif iterable(otypes): - otypes = ''.join([_nx.dtype(x).char for x in otypes]) + otypes = [_nx.dtype(x) for x in otypes] elif otypes is not None: raise ValueError("Invalid otype specification") self.otypes = otypes diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index a6465019fae4..bdca07edd3e8 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1901,6 +1901,13 @@ def test_positional_regression_9477(self): r = f([2]) assert_equal(r.dtype, np.dtype('float64')) + def test_datetime_conversion(self): + otype = "datetime64[ns]" + arr = np.array(['2024-01-01', '2024-01-02', '2024-01-03'], + dtype='datetime64[ns]') + assert_array_equal(np.vectorize(lambda x: x, signature="(i)->(j)", + otypes=[otype])(arr), arr) + class TestLeaks: class A: From a4c76a0ea6170b37f7375ee83f385adcfa71c6c1 Mon Sep 17 00:00:00 2001 From: Marten van Kerkwijk Date: Tue, 26 Mar 2024 10:04:43 -0400 Subject: [PATCH 108/980] MAINT: rename python scalar dtypes (removing abstract) --- .../include/numpy/_public_dtype_api_table.h | 12 ++--- numpy/_core/src/multiarray/abstractdtypes.c | 50 +++++++++---------- numpy/_core/src/multiarray/abstractdtypes.h | 18 +++---- numpy/_core/src/multiarray/array_coercion.c | 4 +- numpy/_core/src/multiarray/array_converter.c | 4 +- numpy/_core/src/multiarray/common.c | 2 +- numpy/_core/src/multiarray/convert_datatype.c | 6 +-- numpy/_core/src/multiarray/dtypemeta.c | 6 +-- numpy/_core/src/multiarray/public_dtype_api.c | 6 +-- .../src/umath/special_integer_comparisons.cpp | 6 +-- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 2 +- numpy/_core/src/umath/ufunc_object.c | 12 ++--- 12 files changed, 63 insertions(+), 65 deletions(-) diff --git a/numpy/_core/include/numpy/_public_dtype_api_table.h b/numpy/_core/include/numpy/_public_dtype_api_table.h index 5fbbdd785e4e..e106386d35e2 100644 --- a/numpy/_core/include/numpy/_public_dtype_api_table.h +++ b/numpy/_core/include/numpy/_public_dtype_api_table.h @@ -61,13 +61,11 @@ /* Object/Void */ #define PyArray_ObjectDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[33]) #define PyArray_VoidDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[34]) -/* Abstract */ -#define PyArray_PyIntAbstractDType \ - (*(PyArray_DTypeMeta *)(PyArray_API + 320)[35]) -#define PyArray_PyFloatAbstractDType \ - (*(PyArray_DTypeMeta *)(PyArray_API + 320)[36]) -#define PyArray_PyComplexAbstractDType \ - (*(PyArray_DTypeMeta *)(PyArray_API + 320)[37]) +/* Python types (used as markers for scalars) */ +#define PyArray_PyLongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[35]) +#define PyArray_PyFloatDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[36]) +#define PyArray_PyComplexDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[37]) +/* Default integer type */ #define PyArray_DefaultIntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[38]) /* New non-legacy DTypes follow in the order they were added */ #define PyArray_StringDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[39]) diff --git a/numpy/_core/src/multiarray/abstractdtypes.c b/numpy/_core/src/multiarray/abstractdtypes.c index cb5109d8ba2d..754ea700bdb8 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.c +++ b/numpy/_core/src/multiarray/abstractdtypes.c @@ -21,7 +21,7 @@ int_default_descriptor(PyArray_DTypeMeta* NPY_UNUSED(cls)) } static PyArray_Descr * -discover_descriptor_from_pyint( +discover_descriptor_from_pylong( PyArray_DTypeMeta *NPY_UNUSED(cls), PyObject *obj) { assert(PyLong_Check(obj)); @@ -94,36 +94,36 @@ initialize_and_map_pytypes_to_dtypes() if (PyType_Ready((PyTypeObject *)&PyArray_ComplexAbstractDType) < 0) { return -1; } - ((PyTypeObject *)&PyArray_PyIntAbstractDType)->tp_base = + ((PyTypeObject *)&PyArray_PyLongDType)->tp_base = (PyTypeObject *)&PyArray_IntAbstractDType; - PyArray_PyIntAbstractDType.scalar_type = &PyLong_Type; - if (PyType_Ready((PyTypeObject *)&PyArray_PyIntAbstractDType) < 0) { + PyArray_PyLongDType.scalar_type = &PyLong_Type; + if (PyType_Ready((PyTypeObject *)&PyArray_PyLongDType) < 0) { return -1; } - ((PyTypeObject *)&PyArray_PyFloatAbstractDType)->tp_base = + ((PyTypeObject *)&PyArray_PyFloatDType)->tp_base = (PyTypeObject *)&PyArray_FloatAbstractDType; - PyArray_PyFloatAbstractDType.scalar_type = &PyFloat_Type; - if (PyType_Ready((PyTypeObject *)&PyArray_PyFloatAbstractDType) < 0) { + PyArray_PyFloatDType.scalar_type = &PyFloat_Type; + if (PyType_Ready((PyTypeObject *)&PyArray_PyFloatDType) < 0) { return -1; } - ((PyTypeObject *)&PyArray_PyComplexAbstractDType)->tp_base = + ((PyTypeObject *)&PyArray_PyComplexDType)->tp_base = (PyTypeObject *)&PyArray_ComplexAbstractDType; - PyArray_PyComplexAbstractDType.scalar_type = &PyComplex_Type; - if (PyType_Ready((PyTypeObject *)&PyArray_PyComplexAbstractDType) < 0) { + PyArray_PyComplexDType.scalar_type = &PyComplex_Type; + if (PyType_Ready((PyTypeObject *)&PyArray_PyComplexDType) < 0) { return -1; } /* Register the new DTypes for discovery */ if (_PyArray_MapPyTypeToDType( - &PyArray_PyIntAbstractDType, &PyLong_Type, NPY_FALSE) < 0) { + &PyArray_PyLongDType, &PyLong_Type, NPY_FALSE) < 0) { return -1; } if (_PyArray_MapPyTypeToDType( - &PyArray_PyFloatAbstractDType, &PyFloat_Type, NPY_FALSE) < 0) { + &PyArray_PyFloatDType, &PyFloat_Type, NPY_FALSE) < 0) { return -1; } if (_PyArray_MapPyTypeToDType( - &PyArray_PyComplexAbstractDType, &PyComplex_Type, NPY_FALSE) < 0) { + &PyArray_PyComplexDType, &PyComplex_Type, NPY_FALSE) < 0) { return -1; } @@ -217,7 +217,7 @@ float_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) return NPY_DT_NewRef(&PyArray_DoubleDType); } } - else if (other == &PyArray_PyIntAbstractDType) { + else if (other == &PyArray_PyLongDType) { Py_INCREF(cls); return cls; } @@ -273,8 +273,8 @@ complex_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) return res; } - else if (other == &PyArray_PyIntAbstractDType || - other == &PyArray_PyFloatAbstractDType) { + else if (other == &PyArray_PyLongDType || + other == &PyArray_PyFloatDType) { Py_INCREF(cls); return cls; } @@ -299,20 +299,20 @@ NPY_NO_EXPORT PyArray_DTypeMeta PyArray_IntAbstractDType = {{{ .flags = NPY_DT_ABSTRACT, }; -NPY_DType_Slots pyintdtype_slots = { - .discover_descr_from_pyobject = discover_descriptor_from_pyint, +NPY_DType_Slots pylongdtype_slots = { + .discover_descr_from_pyobject = discover_descriptor_from_pylong, .default_descr = int_default_descriptor, .common_dtype = int_common_dtype, }; -NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyIntAbstractDType = {{{ +NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyLongDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) - .tp_name = "numpy._PythonIntegerDType", + .tp_name = "numpy._PyLongDType", .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, - .dt_slots = &pyintdtype_slots, + .dt_slots = &pylongdtype_slots, }; NPY_NO_EXPORT PyArray_DTypeMeta PyArray_FloatAbstractDType = {{{ @@ -332,9 +332,9 @@ NPY_DType_Slots pyfloatdtype_slots = { .common_dtype = float_common_dtype, }; -NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyFloatAbstractDType = {{{ +NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyFloatDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) - .tp_name = "numpy._PythonFloatDType", + .tp_name = "numpy._PyFloatDType", .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, },}, @@ -359,9 +359,9 @@ NPY_DType_Slots pycomplexdtype_slots = { .common_dtype = complex_common_dtype, }; -NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyComplexAbstractDType = {{{ +NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyComplexDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) - .tp_name = "numpy._PythonComplexDType", + .tp_name = "numpy._PyComplexDType", .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, },}, diff --git a/numpy/_core/src/multiarray/abstractdtypes.h b/numpy/_core/src/multiarray/abstractdtypes.h index 573520f26452..4533e99b635f 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.h +++ b/numpy/_core/src/multiarray/abstractdtypes.h @@ -17,9 +17,9 @@ extern "C" { NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_IntAbstractDType; NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_FloatAbstractDType; NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_ComplexAbstractDType; -NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyIntAbstractDType; -NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyFloatAbstractDType; -NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyComplexAbstractDType; +NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyLongDType; +NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyFloatDType; +NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyComplexDType; NPY_NO_EXPORT int initialize_and_map_pytypes_to_dtypes(void); @@ -51,8 +51,8 @@ npy_mark_tmp_array_if_pyscalar( && (PyArray_ISINTEGER(arr) || PyArray_ISOBJECT(arr))) { ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_INT; if (dtype != NULL) { - Py_INCREF(&PyArray_PyIntAbstractDType); - Py_SETREF(*dtype, &PyArray_PyIntAbstractDType); + Py_INCREF(&PyArray_PyLongDType); + Py_SETREF(*dtype, &PyArray_PyLongDType); } return 1; } @@ -60,8 +60,8 @@ npy_mark_tmp_array_if_pyscalar( && PyArray_TYPE(arr) == NPY_DOUBLE) { ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_FLOAT; if (dtype != NULL) { - Py_INCREF(&PyArray_PyFloatAbstractDType); - Py_SETREF(*dtype, &PyArray_PyFloatAbstractDType); + Py_INCREF(&PyArray_PyFloatDType); + Py_SETREF(*dtype, &PyArray_PyFloatDType); } return 1; } @@ -69,8 +69,8 @@ npy_mark_tmp_array_if_pyscalar( && PyArray_TYPE(arr) == NPY_CDOUBLE) { ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_COMPLEX; if (dtype != NULL) { - Py_INCREF(&PyArray_PyComplexAbstractDType); - Py_SETREF(*dtype, &PyArray_PyComplexAbstractDType); + Py_INCREF(&PyArray_PyComplexDType); + Py_SETREF(*dtype, &PyArray_PyComplexDType); } return 1; } diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index c2b924e093b5..f63dbbc77e1f 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -226,10 +226,10 @@ npy_discover_dtype_from_pytype(PyTypeObject *pytype) DType = Py_None; } else if (pytype == &PyFloat_Type) { - DType = (PyObject *)&PyArray_PyFloatAbstractDType; + DType = (PyObject *)&PyArray_PyFloatDType; } else if (pytype == &PyLong_Type) { - DType = (PyObject *)&PyArray_PyIntAbstractDType; + DType = (PyObject *)&PyArray_PyLongDType; } else { DType = PyDict_GetItem(_global_pytype_to_type_dict, diff --git a/numpy/_core/src/multiarray/array_converter.c b/numpy/_core/src/multiarray/array_converter.c index fd7ccd767056..8d31a8ac2a06 100644 --- a/numpy/_core/src/multiarray/array_converter.c +++ b/numpy/_core/src/multiarray/array_converter.c @@ -351,8 +351,8 @@ array_converter_result_type(PyArrayArrayConverterObject *self, "extra_dtype and ensure_inexact are mutually exclusive."); goto finish; } - Py_INCREF(&PyArray_PyFloatAbstractDType); - dt_info.dtype = &PyArray_PyFloatAbstractDType; + Py_INCREF(&PyArray_PyFloatDType); + dt_info.dtype = &PyArray_PyFloatDType; } if (dt_info.dtype != NULL) { diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 5804c9cc9148..6759fbf19b53 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -48,7 +48,7 @@ _array_find_python_scalar_type(PyObject *op) } else if (PyLong_Check(op)) { return NPY_DT_CALL_discover_descr_from_pyobject( - &PyArray_PyIntAbstractDType, op); + &PyArray_PyLongDType, op); } return NULL; } diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 2225ee94859c..58bc136e18e2 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -1796,17 +1796,17 @@ PyArray_ResultType( all_descriptors[i_all] = NULL; /* no descriptor for py-scalars */ if (PyArray_FLAGS(arrs[i]) & NPY_ARRAY_WAS_PYTHON_INT) { /* This could even be an object dtype here for large ints */ - all_DTypes[i_all] = &PyArray_PyIntAbstractDType; + all_DTypes[i_all] = &PyArray_PyLongDType; if (PyArray_TYPE(arrs[i]) != NPY_LONG) { /* Not a "normal" scalar, so we cannot avoid the legacy path */ all_pyscalar = 0; } } else if (PyArray_FLAGS(arrs[i]) & NPY_ARRAY_WAS_PYTHON_FLOAT) { - all_DTypes[i_all] = &PyArray_PyFloatAbstractDType; + all_DTypes[i_all] = &PyArray_PyFloatDType; } else if (PyArray_FLAGS(arrs[i]) & NPY_ARRAY_WAS_PYTHON_COMPLEX) { - all_DTypes[i_all] = &PyArray_PyComplexAbstractDType; + all_DTypes[i_all] = &PyArray_PyComplexDType; } else { all_descriptors[i_all] = PyArray_DTYPE(arrs[i]); diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index ecceca2e7d03..998fa4792f22 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -918,7 +918,7 @@ default_builtin_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) * can change the type of the result (complex, float, int). * If our own type if not numerical, signal not implemented. */ - if (other == &PyArray_PyComplexAbstractDType) { + if (other == &PyArray_PyComplexDType) { if (PyTypeNum_ISCOMPLEX(cls->type_num)) { Py_INCREF(cls); return cls; @@ -933,14 +933,14 @@ default_builtin_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) return NPY_DT_NewRef(&PyArray_CLongDoubleDType); } } - else if (other == &PyArray_PyFloatAbstractDType) { + else if (other == &PyArray_PyFloatDType) { if (PyTypeNum_ISCOMPLEX(cls->type_num) || PyTypeNum_ISFLOAT(cls->type_num)) { Py_INCREF(cls); return cls; } } - else if (other == &PyArray_PyIntAbstractDType) { + else if (other == &PyArray_PyLongDType) { if (PyTypeNum_ISCOMPLEX(cls->type_num) || PyTypeNum_ISFLOAT(cls->type_num) || PyTypeNum_ISINTEGER(cls->type_num) diff --git a/numpy/_core/src/multiarray/public_dtype_api.c b/numpy/_core/src/multiarray/public_dtype_api.c index 73ab8a6b9f92..49ae3d2cd77e 100644 --- a/numpy/_core/src/multiarray/public_dtype_api.c +++ b/numpy/_core/src/multiarray/public_dtype_api.c @@ -169,9 +169,9 @@ _fill_dtype_api(void *full_api_table[]) api_table[33] = &PyArray_ObjectDType; api_table[34] = &PyArray_VoidDType; /* Abstract */ - api_table[35] = &PyArray_PyIntAbstractDType; - api_table[36] = &PyArray_PyFloatAbstractDType; - api_table[37] = &PyArray_PyComplexAbstractDType; + api_table[35] = &PyArray_PyLongDType; + api_table[36] = &PyArray_PyFloatDType; + api_table[37] = &PyArray_PyComplexDType; api_table[38] = &PyArray_DefaultIntDType; /* Non-legacy DTypes that are built in to NumPy */ api_table[39] = &PyArray_StringDType; diff --git a/numpy/_core/src/umath/special_integer_comparisons.cpp b/numpy/_core/src/umath/special_integer_comparisons.cpp index 9e0c9481960b..05026be96e67 100644 --- a/numpy/_core/src/umath/special_integer_comparisons.cpp +++ b/numpy/_core/src/umath/special_integer_comparisons.cpp @@ -177,7 +177,7 @@ resolve_descriptors_with_scalars( { int value_range = 0; - npy_bool first_is_pyint = dtypes[0] == &PyArray_PyIntAbstractDType; + npy_bool first_is_pyint = dtypes[0] == &PyArray_PyLongDType; int arr_idx = first_is_pyint? 1 : 0; int scalar_idx = first_is_pyint? 0 : 1; PyObject *scalar = input_scalars[scalar_idx]; @@ -327,7 +327,7 @@ template static int add_dtype_loops(PyObject *umath, PyArrayMethod_Spec *spec, PyObject *info) { - PyArray_DTypeMeta *PyInt = &PyArray_PyIntAbstractDType; + PyArray_DTypeMeta *PyInt = &PyArray_PyLongDType; PyObject *name = PyUnicode_FromString(comp_name(comp)); if (name == nullptr) { @@ -441,7 +441,7 @@ init_special_int_comparisons(PyObject *umath) * `np.equal(2, 4)` (with two python integers) use an object loop. */ PyObject *dtype_tuple = PyTuple_Pack(3, - &PyArray_PyIntAbstractDType, &PyArray_PyIntAbstractDType, Bool); + &PyArray_PyLongDType, &PyArray_PyLongDType, Bool); if (dtype_tuple == NULL) { goto finish; } diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index c3b9c47c96e1..9f9967590ce3 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1939,7 +1939,7 @@ string_unicode_bool_output_promoter( static int is_integer_dtype(PyArray_DTypeMeta *DType) { - if (DType == &PyArray_PyIntAbstractDType) { + if (DType == &PyArray_PyLongDType) { return 1; } else if (DType == &PyArray_Int8DType) { diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 665674e54310..ea053712b75f 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -6117,8 +6117,8 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, goto finish; } PyArray_ENABLEFLAGS(dummy_arrays[i], NPY_ARRAY_WAS_PYTHON_INT); - Py_INCREF(&PyArray_PyIntAbstractDType); - DTypes[i] = &PyArray_PyIntAbstractDType; + Py_INCREF(&PyArray_PyLongDType); + DTypes[i] = &PyArray_PyLongDType; promoting_pyscalars = NPY_TRUE; } else if (descr_obj == (PyObject *)&PyFloat_Type) { @@ -6128,8 +6128,8 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, goto finish; } PyArray_ENABLEFLAGS(dummy_arrays[i], NPY_ARRAY_WAS_PYTHON_FLOAT); - Py_INCREF(&PyArray_PyFloatAbstractDType); - DTypes[i] = &PyArray_PyFloatAbstractDType; + Py_INCREF(&PyArray_PyFloatDType); + DTypes[i] = &PyArray_PyFloatDType; promoting_pyscalars = NPY_TRUE; } else if (descr_obj == (PyObject *)&PyComplex_Type) { @@ -6139,8 +6139,8 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, goto finish; } PyArray_ENABLEFLAGS(dummy_arrays[i], NPY_ARRAY_WAS_PYTHON_COMPLEX); - Py_INCREF(&PyArray_PyComplexAbstractDType); - DTypes[i] = &PyArray_PyComplexAbstractDType; + Py_INCREF(&PyArray_PyComplexDType); + DTypes[i] = &PyArray_PyComplexDType; promoting_pyscalars = NPY_TRUE; } else if (descr_obj == Py_None) { From 9992c3a7802ae38e5766adc6af85c98962bbb92d Mon Sep 17 00:00:00 2001 From: Marten van Kerkwijk Date: Tue, 26 Mar 2024 10:24:40 -0400 Subject: [PATCH 109/980] MAINT: Make reason for delayed assignments to structs explicit --- numpy/_core/src/multiarray/abstractdtypes.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/abstractdtypes.c b/numpy/_core/src/multiarray/abstractdtypes.c index 754ea700bdb8..6822513f88dd 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.c +++ b/numpy/_core/src/multiarray/abstractdtypes.c @@ -94,6 +94,10 @@ initialize_and_map_pytypes_to_dtypes() if (PyType_Ready((PyTypeObject *)&PyArray_ComplexAbstractDType) < 0) { return -1; } + /* + * Delayed assignments to avoid "error C2099: initializer is not a constant" + * in windows compilers. Can hopefully be done in structs in the future. + */ ((PyTypeObject *)&PyArray_PyLongDType)->tp_base = (PyTypeObject *)&PyArray_IntAbstractDType; PyArray_PyLongDType.scalar_type = &PyLong_Type; @@ -284,9 +288,9 @@ complex_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) /* - * TODO: These abstract DTypes also carry the dual role of representing - * `Floating`, `Complex`, and `Integer` (both signed and unsigned). - * They will have to be renamed and exposed in that capacity. + * Define abstract numerical DTypes that all regular ones can inherit from + * (in arraytypes.c.src). + * Here, also define types corresponding to the python scalars. */ NPY_NO_EXPORT PyArray_DTypeMeta PyArray_IntAbstractDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) @@ -308,11 +312,13 @@ NPY_DType_Slots pylongdtype_slots = { NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyLongDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) .tp_name = "numpy._PyLongDType", + .tp_base = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, .dt_slots = &pylongdtype_slots, + .scalar_type = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ }; NPY_NO_EXPORT PyArray_DTypeMeta PyArray_FloatAbstractDType = {{{ @@ -335,11 +341,13 @@ NPY_DType_Slots pyfloatdtype_slots = { NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyFloatDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) .tp_name = "numpy._PyFloatDType", + .tp_base = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, .dt_slots = &pyfloatdtype_slots, + .scalar_type = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ }; NPY_NO_EXPORT PyArray_DTypeMeta PyArray_ComplexAbstractDType = {{{ @@ -362,9 +370,11 @@ NPY_DType_Slots pycomplexdtype_slots = { NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyComplexDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) .tp_name = "numpy._PyComplexDType", + .tp_base = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, .dt_slots = &pycomplexdtype_slots, + .scalar_type = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ }; From ed4dcdc20fd2781cc3c1a4daf5bc54fa6a8d5f3e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Mar 2024 17:07:59 +0000 Subject: [PATCH 110/980] MAINT: Bump actions/setup-python from 5.0.0 to 5.1.0 Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5.0.0 to 5.1.0. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/0a5c61591373683505ea898e09a3ea4f39ef2b9c...82c7e631bb3cdc910f68e0081d67478d79c6982d) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- .github/workflows/linux.yml | 16 ++++++++-------- .github/workflows/linux_blas.yml | 12 ++++++------ .github/workflows/linux_compiler_sanitizers.yml | 2 +- .github/workflows/linux_simd.yml | 10 +++++----- .github/workflows/macos.yml | 2 +- .github/workflows/mypy.yml | 2 +- .github/workflows/wheels.yml | 4 ++-- .github/workflows/windows.yml | 4 ++-- 9 files changed, 27 insertions(+), 27 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 728a91f691b3..0c8274756b8c 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -42,7 +42,7 @@ jobs: - name: Set up Python ${{ env.PYTHON_VERSION }} id: setup-python - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: ${{ env.PYTHON_VERSION }} diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index d0df9834ad70..6bfe339a6499 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -37,7 +37,7 @@ jobs: with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.9' - name: Install linter requirements @@ -58,7 +58,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.9' - uses: ./.github/meson_actions @@ -72,7 +72,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: 'pypy3.9-v7.3.12' - name: Setup using scipy-openblas @@ -119,7 +119,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.9' - name: Install build and test dependencies from PyPI @@ -156,7 +156,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.9' - name: Install build and benchmarking dependencies @@ -187,7 +187,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' - name: Install gfortran and setup OpenBLAS (sdist build) @@ -229,7 +229,7 @@ jobs: submodules: 'true' path: 'array-api-tests' - name: Set up Python - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' - name: Install build and test dependencies from PyPI @@ -259,7 +259,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' - name: Install build and test dependencies from PyPI diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 598a1c784b62..3b23072dccfa 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -69,7 +69,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' @@ -196,7 +196,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' @@ -224,7 +224,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' @@ -284,7 +284,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' @@ -347,7 +347,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' @@ -383,7 +383,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' diff --git a/.github/workflows/linux_compiler_sanitizers.yml b/.github/workflows/linux_compiler_sanitizers.yml index 78e90122c348..b0a40f4551d5 100644 --- a/.github/workflows/linux_compiler_sanitizers.yml +++ b/.github/workflows/linux_compiler_sanitizers.yml @@ -30,7 +30,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: ${{ env.PYTHON_VERSION }} - name: Install dependencies diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index 13ef2bffe005..7b5c22562b29 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -62,7 +62,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.9' - uses: ./.github/meson_actions @@ -79,7 +79,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.9' @@ -144,7 +144,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: "${{ matrix.BUILD_PROP[2] }}" - uses: ./.github/meson_actions @@ -158,7 +158,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' @@ -212,7 +212,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index e962dde9d5bb..7c759631c863 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -119,7 +119,7 @@ jobs: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.10' diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 6d0a25eb71c5..781bba2f1f0d 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -54,7 +54,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: ${{ matrix.os_python[1] }} - name: Install dependencies diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index e387ca1a9c02..21829f4596f7 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -123,7 +123,7 @@ jobs: if: runner.os == 'windows' # Used to push the built wheels - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: "3.x" @@ -213,7 +213,7 @@ jobs: with: submodules: true # Used to push the built wheels - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: # Build sdist on lowest supported Python python-version: "3.9" diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 38a6cf24b7e0..f6e0f1b3a5ca 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -31,7 +31,7 @@ jobs: fetch-tags: true - name: Setup Python - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' @@ -91,7 +91,7 @@ jobs: fetch-tags: true - name: Setup Python (32-bit) - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.10' architecture: 'x86' From 8c12121a50cd7d4b9240a01034977bde33f3741c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Mar 2024 17:08:04 +0000 Subject: [PATCH 111/980] MAINT: Bump actions/dependency-review-action from 4.2.4 to 4.2.5 Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.2.4 to 4.2.5. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/733dd5d4a5203f238c33806593ec0f5fc5343d8c...5bbc3ba658137598168acb2ab73b21c432dd411b) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/dependency-review.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index ea774924332e..a64f75f2833f 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -17,4 +17,4 @@ jobs: - name: 'Checkout Repository' uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: 'Dependency Review' - uses: actions/dependency-review-action@733dd5d4a5203f238c33806593ec0f5fc5343d8c # v4.2.4 + uses: actions/dependency-review-action@5bbc3ba658137598168acb2ab73b21c432dd411b # v4.2.5 From e1bf1d635016a0118f7d77bf8931071a2c74ad20 Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Tue, 26 Mar 2024 23:34:58 +0100 Subject: [PATCH 112/980] ENH: Add partition/rpartition ufunc for string dtypes (#26082) * ENH: Add partition/rpartition ufunc for string dtypes Closes #25993. * Fix doctests * Fix docstrings in ufunc_docstrings.py as well * Return array with the separators // optimize using find ufunc results * Address feedback * Fix chararray __array_finalize__ * ENH: add stringdtype partition/rpartition * BUG: remove unnecessary size_t cast * BUG: fix error handling and resource cleanup * MNT: refactor so stringdtype can combine find and partition * MNT: update signatures to reflect const API changes * MNT: simplfy fastsearch call * MNT: move variable binding out of inner loop * Fix error message about out; fix promoter * Remove unused import in defchararray; add assertion * BUG: don't use a user-provided descriptor to initialize a new stringdtype view * MNT: back out attempted fix for stringdtype view problem * MNT: address code review comments --------- Co-authored-by: Nathan Goldbaum --- numpy/_core/code_generators/generate_umath.py | 20 ++ .../_core/code_generators/ufunc_docstrings.py | 181 ++++++++++++++++ numpy/_core/defchararray.py | 93 +++++++- numpy/_core/src/umath/string_buffer.h | 42 ++++ numpy/_core/src/umath/string_ufuncs.cpp | 130 +++++++++++- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 198 ++++++++++++++++++ numpy/_core/strings.py | 137 ++++++++---- numpy/_core/tests/test_strings.py | 95 +++++++++ numpy/_core/umath.py | 3 +- 9 files changed, 850 insertions(+), 49 deletions(-) diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index b72d13d11c6c..b64624702db7 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -1300,6 +1300,26 @@ def english_upper(s): docstrings.get('numpy._core.umath._zfill'), None, ), +'_partition_index': + Ufunc(3, 3, None, + docstrings.get('numpy._core.umath._partition_index'), + None, + ), +'_rpartition_index': + Ufunc(3, 3, None, + docstrings.get('numpy._core.umath._rpartition_index'), + None, + ), +'_partition': + Ufunc(2, 3, None, + docstrings.get('numpy._core.umath._partition'), + None, + ), +'_rpartition': + Ufunc(2, 3, None, + docstrings.get('numpy._core.umath._rpartition'), + None, + ), } def indent(st, spaces): diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index d214ffbccb55..a3e1965151f1 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -5028,3 +5028,184 @@ def add_newdoc(place, name, doc): array(['001', '-01', '+01'], dtype='>> x = np.array(["Numpy is nice!"]) + >>> np.strings.partition(x, " ") + (array(['Numpy'], dtype='>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.strings.rpartition(a, 'A') + (array(['aAaAa', ' a', 'abB'], dtype='>> x = np.array(["Numpy is nice!"], dtype="T") + >>> np.strings.partition(x, " ") + (array(['Numpy'], dtype=StringDType()), + array([' '], dtype=StringDType()), + array(['is nice!'], dtype=StringDType())) + + """) + +add_newdoc('numpy._core.umath', '_rpartition', + """ + Partition each element in ``x1`` around the right-most separator, + ``x2``. + + For each element in ``x1``, split the element at the last + occurrence of ``x2`` at location ``x3``, and return a 3-tuple + containing the part before the separator, the separator itself, + and the part after the separator. If the separator is not found, + the third item of the tuple will contain the whole string, and + the first and second ones will be the empty string. + + Parameters + ---------- + x1 : array-like, with ``StringDType`` dtype + Input array + x2 : array-like, with ``StringDType`` dtype + Separator to split each string element in ``x1``. + + Returns + ------- + out : 3-tuple: + - ``StringDType`` array with the part before the separator + - ``StringDType`` array with the separator + - ``StringDType`` array with the part after the separator + + See Also + -------- + str.rpartition + + Examples + -------- + The ufunc is used most easily via ``np.strings.rpartition``, + which calls it after calculating the indices:: + + >>> a = np.array(['aAaAaA', ' aA ', 'abBABba'], dtype="T") + >>> np.strings.rpartition(a, 'A') + (array(['aAaAa', ' a', 'abB'], dtype=StringDType()), + array(['A', 'A', 'A'], dtype=StringDType()), + array(['', ' ', 'Bba'], dtype=StringDType())) + + """) diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index 44754a747cec..52a62791d382 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -17,16 +17,19 @@ """ import functools +import numpy as np from .._utils import set_module from .numerictypes import bytes_, str_, character from .numeric import ndarray, array as narray, asarray as asnarray from numpy._core.multiarray import compare_chararrays from numpy._core import overrides from numpy.strings import * -from numpy.strings import multiply as strings_multiply +from numpy.strings import ( + multiply as strings_multiply, + partition as strings_partition, + rpartition as strings_rpartition, +) from numpy._core.strings import ( - _partition as partition, - _rpartition as rpartition, _split as split, _rsplit as rsplit, _splitlines as splitlines, @@ -303,6 +306,88 @@ def multiply(a, i): raise ValueError("Can only multiply by integers") +def partition(a, sep): + """ + Partition each element in `a` around `sep`. + + Calls :meth:`str.partition` element-wise. + + For each element in `a`, split the element as the first + occurrence of `sep`, and return 3 strings containing the part + before the separator, the separator itself, and the part after + the separator. If the separator is not found, return 3 strings + containing the string itself, followed by two empty strings. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Input array + sep : {str, unicode} + Separator to split each string element in `a`. + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types. The output array will have an extra + dimension with 3 elements per input element. + + Examples + -------- + >>> x = np.array(["Numpy is nice!"]) + >>> np.char.partition(x, " ") + array([['Numpy', ' ', 'is nice!']], dtype='>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.char.rpartition(a, 'A') + array([['aAaAa', 'A', ''], + [' a', 'A', ' '], + ['abB', 'A', 'Bba']], dtype=' buf, npy_int64 width, Buffer out) } +template +static inline void +string_partition(Buffer buf1, Buffer buf2, npy_int64 idx, + Buffer out1, Buffer out2, Buffer out3, + npy_intp *final_len1, npy_intp *final_len2, npy_intp *final_len3, + STARTPOSITION pos) +{ + // StringDType uses a ufunc that implements the find-part as well + assert(enc != ENCODING::UTF8); + + size_t len1 = buf1.num_codepoints(); + size_t len2 = buf2.num_codepoints(); + + if (len2 == 0) { + npy_gil_error(PyExc_ValueError, "empty separator"); + *final_len1 = *final_len2 = *final_len3 = -1; + return; + } + + if (idx < 0) { + if (pos == STARTPOSITION::FRONT) { + buf1.buffer_memcpy(out1, len1); + *final_len1 = len1; + *final_len2 = *final_len3 = 0; + } + else { + buf1.buffer_memcpy(out3, len1); + *final_len1 = *final_len2 = 0; + *final_len3 = len1; + } + return; + } + + buf1.buffer_memcpy(out1, idx); + *final_len1 = idx; + buf2.buffer_memcpy(out2, len2); + *final_len2 = len2; + (buf1 + idx + len2).buffer_memcpy(out3, len1 - idx - len2); + *final_len3 = len1 - idx - len2; +} + + #endif /* _NPY_CORE_SRC_UMATH_STRING_BUFFER_H_ */ diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 858493471f09..2bc4ce20acd6 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -582,6 +582,57 @@ string_zfill_loop(PyArrayMethod_Context *context, } +template +static int +string_partition_index_loop(PyArrayMethod_Context *context, + char *const data[], npy_intp const dimensions[], + npy_intp const strides[], NpyAuxData *NPY_UNUSED(auxdata)) +{ + STARTPOSITION startposition = *(STARTPOSITION *)(context->method->static_data); + int elsize1 = context->descriptors[0]->elsize; + int elsize2 = context->descriptors[1]->elsize; + int outsize1 = context->descriptors[3]->elsize; + int outsize2 = context->descriptors[4]->elsize; + int outsize3 = context->descriptors[5]->elsize; + + char *in1 = data[0]; + char *in2 = data[1]; + char *in3 = data[2]; + char *out1 = data[3]; + char *out2 = data[4]; + char *out3 = data[5]; + + npy_intp N = dimensions[0]; + + while (N--) { + Buffer buf1(in1, elsize1); + Buffer buf2(in2, elsize2); + Buffer outbuf1(out1, outsize1); + Buffer outbuf2(out2, outsize2); + Buffer outbuf3(out3, outsize3); + + npy_intp final_len1, final_len2, final_len3; + string_partition(buf1, buf2, *(npy_int64 *)in3, outbuf1, outbuf2, outbuf3, + &final_len1, &final_len2, &final_len3, startposition); + if (final_len1 < 0 || final_len2 < 0 || final_len3 < 0) { + return -1; + } + outbuf1.buffer_fill_with_zeros_after_index(final_len1); + outbuf2.buffer_fill_with_zeros_after_index(final_len2); + outbuf3.buffer_fill_with_zeros_after_index(final_len3); + + in1 += strides[0]; + in2 += strides[1]; + in3 += strides[2]; + out1 += strides[3]; + out2 += strides[4]; + out3 += strides[5]; + } + + return 0; +} + + /* Resolve descriptors & promoter functions */ static NPY_CASTING @@ -947,6 +998,55 @@ string_zfill_resolve_descriptors( } +static int +string_partition_promoter(PyObject *NPY_UNUSED(ufunc), + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], + PyArray_DTypeMeta *new_op_dtypes[]) +{ + Py_INCREF(op_dtypes[0]); + new_op_dtypes[0] = op_dtypes[0]; + Py_INCREF(op_dtypes[1]); + new_op_dtypes[1] = op_dtypes[1]; + + new_op_dtypes[2] = NPY_DT_NewRef(&PyArray_Int64DType); + + Py_INCREF(op_dtypes[0]); + new_op_dtypes[3] = op_dtypes[0]; + Py_INCREF(op_dtypes[0]); + new_op_dtypes[4] = op_dtypes[0]; + Py_INCREF(op_dtypes[0]); + new_op_dtypes[5] = op_dtypes[0]; + return 0; +} + + +static NPY_CASTING +string_partition_resolve_descriptors( + PyArrayMethodObject *self, + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), + PyArray_Descr *const given_descrs[3], + PyArray_Descr *loop_descrs[3], + npy_intp *NPY_UNUSED(view_offset)) +{ + if (!given_descrs[3] || !given_descrs[4] || !given_descrs[5]) { + PyErr_Format(PyExc_TypeError, + "The '%s' ufunc requires the 'out' keyword to be set. The " + "python wrapper in numpy.strings can be used without the " + "out keyword.", self->name); + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + for (int i = 0; i < 6; i++) { + loop_descrs[i] = NPY_DT_CALL_ensure_canonical(given_descrs[i]); + if (!loop_descrs[i]) { + return _NPY_ERROR_OCCURRED_IN_CAST; + } + } + + return NPY_NO_CASTING; +} + + /* * Machinery to add the string loops to the existing ufuncs. */ @@ -1228,7 +1328,7 @@ init_mixed_type_ufunc(PyObject *umath, const char *name, int nin, int nout, NPY_NO_EXPORT int init_string_ufuncs(PyObject *umath) { - NPY_TYPES dtypes[] = {NPY_STRING, NPY_STRING, NPY_STRING, NPY_STRING, NPY_STRING}; + NPY_TYPES dtypes[] = {NPY_STRING, NPY_STRING, NPY_STRING, NPY_STRING, NPY_STRING, NPY_STRING}; if (init_comparison(umath) < 0) { return -1; @@ -1599,6 +1699,34 @@ init_string_ufuncs(PyObject *umath) return -1; } + dtypes[0] = dtypes[1] = dtypes[3] = dtypes[4] = dtypes[5] = NPY_OBJECT; + dtypes[2] = NPY_INT64; + + const char *partition_names[] = {"_partition_index", "_rpartition_index"}; + + static STARTPOSITION partition_startpositions[] = { + STARTPOSITION::FRONT, STARTPOSITION::BACK + }; + + for (int i = 0; i < 2; i++) { + if (init_ufunc( + umath, partition_names[i], 3, 3, dtypes, ENCODING::ASCII, + string_partition_index_loop, + string_partition_resolve_descriptors, &partition_startpositions[i]) < 0) { + return -1; + } + if (init_ufunc( + umath, partition_names[i], 3, 3, dtypes, ENCODING::UTF32, + string_partition_index_loop, + string_partition_resolve_descriptors, &partition_startpositions[i]) < 0) { + return -1; + } + if (init_promoter(umath, partition_names[i], 3, 3, + string_partition_promoter) < 0) { + return -1; + } + } + return 0; } diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index 1f24bec59c63..2380fa9495bd 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1888,6 +1888,181 @@ zfill_strided_loop(PyArrayMethod_Context *context, return -1; } +static NPY_CASTING +string_partition_resolve_descriptors( + PyArrayMethodObject *self, + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), + PyArray_Descr *const given_descrs[3], + PyArray_Descr *loop_descrs[3], + npy_intp *NPY_UNUSED(view_offset)) +{ + if (given_descrs[2] || given_descrs[3] || given_descrs[4]) { + PyErr_Format(PyExc_TypeError, "The StringDType '%s' ufunc does not " + "currently support the 'out' keyword", self->name); + return (NPY_CASTING)-1; + } + for (int i=0; i<2; i++) { + Py_INCREF(given_descrs[i]); + loop_descrs[i] = given_descrs[i]; + } + PyArray_StringDTypeObject *adescr = (PyArray_StringDTypeObject *)given_descrs[0]; + for (int i=2; i<5; i++) { + loop_descrs[i] = (PyArray_Descr *)new_stringdtype_instance( + adescr->na_object, adescr->coerce); + if (loop_descrs[i] == NULL) { + return (NPY_CASTING)-1; + } + } + + return NPY_NO_CASTING; +} + +NPY_NO_EXPORT int +string_partition_strided_loop( + PyArrayMethod_Context *context, + char *const data[], + npy_intp const dimensions[], + npy_intp const strides[], + NpyAuxData *NPY_UNUSED(auxdata)) +{ + STARTPOSITION startposition = *(STARTPOSITION *)(context->method->static_data); + int fastsearch_direction = + startposition == STARTPOSITION::FRONT ? FAST_SEARCH : FAST_RSEARCH; + + npy_intp N = dimensions[0]; + + char *in1 = data[0]; + char *in2 = data[1]; + char *out1 = data[2]; + char *out2 = data[3]; + char *out3 = data[4]; + + npy_intp in1_stride = strides[0]; + npy_intp in2_stride = strides[1]; + npy_intp out1_stride = strides[2]; + npy_intp out2_stride = strides[3]; + npy_intp out3_stride = strides[4]; + + npy_string_allocator *allocators[5] = {}; + NpyString_acquire_allocators(5, context->descriptors, allocators); + npy_string_allocator *in1allocator = allocators[0]; + npy_string_allocator *in2allocator = allocators[1]; + npy_string_allocator *out1allocator = allocators[2]; + npy_string_allocator *out2allocator = allocators[3]; + npy_string_allocator *out3allocator = allocators[4]; + + PyArray_StringDTypeObject *idescr = + (PyArray_StringDTypeObject *)context->descriptors[0]; + int has_string_na = idescr->has_string_na; + const npy_static_string *default_string = &idescr->default_string; + + while (N--) { + const npy_packed_static_string *i1ps = (npy_packed_static_string *)in1; + npy_static_string i1s = {0, NULL}; + const npy_packed_static_string *i2ps = (npy_packed_static_string *)in2; + npy_static_string i2s = {0, NULL}; + + int i1_isnull = NpyString_load(in1allocator, i1ps, &i1s); + int i2_isnull = NpyString_load(in2allocator, i2ps, &i2s); + + if (i1_isnull == -1 || i2_isnull == -1) { + npy_gil_error(PyExc_MemoryError, "Failed to load string in %s", + ((PyUFuncObject *)context->caller)->name); + goto fail; + } + else if (NPY_UNLIKELY(i1_isnull || i2_isnull)) { + if (!has_string_na) { + npy_gil_error(PyExc_ValueError, + "Null values are not supported in %s", + ((PyUFuncObject *)context->caller)->name); + goto fail; + } + else { + if (i1_isnull) { + i1s = *default_string; + } + if (i2_isnull) { + i2s = *default_string; + } + } + } + + if (i2s.size == 0) { + npy_gil_error(PyExc_ValueError, "empty separator"); + goto fail; + } + + npy_intp idx = fastsearch((char *)i1s.buf, i1s.size, (char *)i2s.buf, i2s.size, -1, + fastsearch_direction); + + npy_intp out1_size, out2_size, out3_size; + + if (idx == -1) { + if (startposition == STARTPOSITION::FRONT) { + out1_size = i1s.size; + out2_size = out3_size = 0; + } + else { + out1_size = out2_size = 0; + out3_size = i1s.size; + } + } + else { + out1_size = idx; + out2_size = i2s.size; + out3_size = i1s.size - out2_size - out1_size; + } + + npy_packed_static_string *o1ps = (npy_packed_static_string *)out1; + npy_static_string o1s = {0, NULL}; + npy_packed_static_string *o2ps = (npy_packed_static_string *)out2; + npy_static_string o2s = {0, NULL}; + npy_packed_static_string *o3ps = (npy_packed_static_string *)out3; + npy_static_string o3s = {0, NULL}; + + if (load_new_string(o1ps, &o1s, out1_size, out1allocator, + ((PyUFuncObject *)context->caller)->name) == -1) { + goto fail; + } + if (load_new_string(o2ps, &o2s, out2_size, out2allocator, + ((PyUFuncObject *)context->caller)->name) == -1) { + goto fail; + } + if (load_new_string(o3ps, &o3s, out3_size, out3allocator, + ((PyUFuncObject *)context->caller)->name) == -1) { + goto fail; + } + + if (idx == -1) { + if (startposition == STARTPOSITION::FRONT) { + memcpy((char *)o1s.buf, i1s.buf, out1_size); + } + else { + memcpy((char *)o3s.buf, i1s.buf, out3_size); + } + } + else { + memcpy((char *)o1s.buf, i1s.buf, out1_size); + memcpy((char *)o2s.buf, i2s.buf, out2_size); + memcpy((char *)o3s.buf, i1s.buf + out1_size + out2_size, out3_size); + } + + in1 += in1_stride; + in2 += in2_stride; + out1 += out1_stride; + out2 += out2_stride; + out3 += out3_stride; + } + + NpyString_release_allocators(5, allocators); + return 0; + + fail: + + NpyString_release_allocators(5, allocators); + return -1; +} + NPY_NO_EXPORT int string_inputs_promoter( PyObject *ufunc_obj, PyArray_DTypeMeta *const op_dtypes[], @@ -2645,5 +2820,28 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } + PyArray_DTypeMeta *partition_dtypes[] = { + &PyArray_StringDType, + &PyArray_StringDType, + &PyArray_StringDType, + &PyArray_StringDType, + &PyArray_StringDType + }; + + const char *partition_names[] = {"_partition", "_rpartition"}; + + static STARTPOSITION partition_startpositions[] = { + STARTPOSITION::FRONT, STARTPOSITION::BACK + }; + + for (int i=0; i<2; i++) { + if (init_ufunc(umath, partition_names[i], partition_dtypes, + string_partition_resolve_descriptors, + string_partition_strided_loop, 2, 3, NPY_NO_CASTING, + (NPY_ARRAYMETHOD_FLAGS) 0, &partition_startpositions[i]) < 0) { + return -1; + } + } + return 0; } diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index c79c7db494ff..8707bed2ffbb 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -41,6 +41,10 @@ _ljust, _rjust, _zfill, + _partition, + _partition_index, + _rpartition, + _rpartition_index, ) @@ -51,7 +55,7 @@ "isupper", "istitle", "isdecimal", "isnumeric", "str_len", "find", "rfind", "index", "rindex", "count", "startswith", "endswith", "lstrip", "rstrip", "strip", "replace", "expandtabs", "center", "ljust", "rjust", - "zfill", + "zfill", "partition", "rpartition", # _vec_string - Will gradually become ufuncs as well "upper", "lower", "swapcase", "capitalize", "title", @@ -60,7 +64,7 @@ "mod", "decode", "encode", "translate", # Removed from namespace until behavior has been crystalized - # "join", "split", "rsplit", "splitlines", "partition", "rpartition", + # "join", "split", "rsplit", "splitlines", ] @@ -1315,72 +1319,98 @@ def _splitlines(a, keepends=None): a, np.object_, 'splitlines', _clean_args(keepends)) -def _partition(a, sep): +def partition(a, sep): """ - Partition each element in `a` around `sep`. + Partition each element in ``a`` around ``sep``. - Calls :meth:`str.partition` element-wise. - - For each element in `a`, split the element as the first - occurrence of `sep`, and return 3 strings containing the part + For each element in ``a``, split the element at the first + occurrence of ``sep``, and return a 3-tuple containing the part before the separator, the separator itself, and the part after - the separator. If the separator is not found, return 3 strings - containing the string itself, followed by two empty strings. + the separator. If the separator is not found, the first item of + the tuple will contain the whole string, and the second and third + ones will be the empty string. Parameters ---------- a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype Input array - sep : {str, unicode} - Separator to split each string element in `a`. + sep : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Separator to split each string element in ``a``. Returns ------- - out : ndarray - Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, - depending on input types. The output array will have an extra - dimension with 3 elements per input element. + out : 3-tuple: + - array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the + part before the separator + - array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the + separator + - array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the + part after the separator - Examples - -------- - >>> x = np.array(["Numpy is nice!"]) - >>> np.strings.partition(x, " ") # doctest: +SKIP - array([['Numpy', ' ', 'is nice!']], dtype='>> x = np.array(["Numpy is nice!"]) + >>> np.strings.partition(x, " ") + (array(['Numpy'], dtype='>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) - >>> np.strings.rpartition(a, 'A') # doctest: +SKIP - array([['aAaAa', 'A', ''], # doctest: +SKIP - [' a', 'A', ' '], # doctest: +SKIP - ['abB', 'A', 'Bba']], dtype='>> np.strings.rpartition(a, 'A') + (array(['aAaAa', ' a', 'abB'], dtype=' Date: Wed, 27 Mar 2024 10:39:24 +0100 Subject: [PATCH 113/980] BUG,MAINT: Fix __array__ bugs and simplify code This fixes that the UserWarning is incorrect, it needs to be a DeprecationWarning (it does not related to end-users). Instead, include the information about `copy=None` into the error message. It also fixes a bug: If `copy=` kwarg is unsupported we cannot guarantee no-copy, so we should raise, or did we discuss to "just warn"? That is also a latent bug: When `copy=True` and the fallback path is taken, then we must make a copy to be on the safe side. --- numpy/_core/src/multiarray/ctors.c | 160 ++++++++++++------ numpy/_core/src/multiarray/ctors.h | 3 + numpy/_core/src/multiarray/methods.c | 3 +- numpy/_core/src/multiarray/multiarraymodule.c | 3 +- numpy/_core/tests/test_multiarray.py | 15 +- 5 files changed, 123 insertions(+), 61 deletions(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 4c9d76991296..11dc10f7b528 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -39,6 +39,13 @@ #include "umathmodule.h" + +NPY_NO_EXPORT const char *npy_no_copy_err_msg = ( + "Unable to avoid copy while creating an array as requested.\n" + "If using `np.array(obj, copy=False)` use `np.asarray(obj)` " + "or `copy=None` to allow NumPy to make the copy.\n" + "This changed in NumPy 2. The suggested fix works on all versions."); + /* * Reading from a file or a string. * @@ -1637,9 +1644,8 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, * If we got this far, we definitely have to create a copy, since we are * converting either from a scalar (cache == NULL) or a (nested) sequence. */ - if (flags & NPY_ARRAY_ENSURENOCOPY ) { - PyErr_SetString(PyExc_ValueError, - "Unable to avoid copy while creating an array."); + if (flags & NPY_ARRAY_ENSURENOCOPY) { + PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); Py_DECREF(dtype); npy_free_coercion_cache(cache); return NULL; @@ -1847,8 +1853,7 @@ PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, && !PyArray_ElementStrides(obj)) { PyObject *ret; if (requires & NPY_ARRAY_ENSURENOCOPY) { - PyErr_SetString(PyExc_ValueError, - "Unable to avoid copy while creating a new array."); + PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); return NULL; } ret = PyArray_NewCopy((PyArrayObject *)obj, NPY_ANYORDER); @@ -1926,8 +1931,7 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) if (copy) { if (flags & NPY_ARRAY_ENSURENOCOPY ) { - PyErr_SetString(PyExc_ValueError, - "Unable to avoid copy while creating an array from given array."); + PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); Py_DECREF(newtype); return NULL; } @@ -2405,6 +2409,63 @@ PyArray_FromInterface(PyObject *origin) } + +/* + * Returns -1 and an error set or 0 with the original error cleared, must + * be called with an error set. + */ +static inline int +check_or_clear_and_warn_error_if_due_to_copy_kwarg(PyObject *kwnames) +{ + if (kwnames == NULL) { + return -1; /* didn't pass kwnames, can't possibly be the reason */ + } + if (!PyErr_ExceptionMatches(PyExc_TypeError)) { + return -1; + } + + PyObject *type, *value, *traceback; + PyErr_Fetch(&type, &value, &traceback); + if (value == NULL) { + PyErr_Restore(type, value, traceback); + return -1; + } + + /* + * In most cases, if we fail, we assume the error was unrelated to the + * copy kwarg and simply restore the original one. + */ + PyObject *str_value = PyObject_Str(value); + if (str_value == NULL) { + PyErr_Restore(type, value, traceback); + return -1; + } + int copy_kwarg_unsupported = PyUnicode_Contains( + str_value, npy_ma_str_array_err_msg_substr); + Py_DECREF(str_value); + if (copy_kwarg_unsupported == -1) { + PyErr_Restore(type, value, traceback); + return -1; + } + if (copy_kwarg_unsupported) { + /* + * TODO: As of now NumPy 2.0, the this warning is only triggered with + * `copy=False` allowing downstream to not notice it. + */ + Py_DECREF(type); + Py_DECREF(value); + Py_XDECREF(traceback); + if (DEPRECATE("__array__ should implement the 'dtype' and " + "'copy' keyword argument") < 0) { + return -1; + } + return 0; + } + PyErr_Restore(type, value, traceback); + return -1; +} + + /** * Check for an __array__ attribute and call it when it exists. * @@ -2447,74 +2508,61 @@ PyArray_FromArrayAttr_int( return Py_NotImplemented; } - PyObject *kwargs = PyDict_New(); + static PyObject *kwnames_is_copy = NULL; + if (kwnames_is_copy == NULL) { + kwnames_is_copy = Py_BuildValue("(s)", "copy"); + if (kwnames_is_copy == NULL) { + Py_DECREF(array_meth); + return NULL; + } + } + + Py_ssize_t nargs = 0; + PyObject *arguments[2]; + PyObject *kwnames = NULL; + + if (descr != NULL) { + arguments[0] = (PyObject *)descr; + nargs++; + } /* * Only if the value of `copy` isn't the default one, we try to pass it * along; for backwards compatibility we then retry if it fails because the * signature of the __array__ method being called does not have `copy`. */ - int copy_passed = 0; if (copy != -1) { - copy_passed = 1; - PyObject *copy_obj = copy == 1 ? Py_True : Py_False; - PyDict_SetItemString(kwargs, "copy", copy_obj); + kwnames = kwnames_is_copy; + arguments[nargs] = copy == 1 ? Py_True : Py_False; } - PyObject *args = descr != NULL ? PyTuple_Pack(1, descr) : PyTuple_New(0); - - new = PyObject_Call(array_meth, args, kwargs); + int must_copy_but_copy_kwarg_unimplemented = 0; + new = PyObject_Vectorcall(array_meth, arguments, nargs, kwnames); if (new == NULL) { - if (npy_ma_str_array_err_msg_substr == NULL) { + if (check_or_clear_and_warn_error_if_due_to_copy_kwarg(kwnames) < 0) { + /* Error was not cleared (or a new error set) */ Py_DECREF(array_meth); - Py_DECREF(args); - Py_DECREF(kwargs); return NULL; } - PyObject *type, *value, *traceback; - PyErr_Fetch(&type, &value, &traceback); - if (value != NULL) { - PyObject *str_value = PyObject_Str(value); - if (PyUnicode_Contains( - str_value, npy_ma_str_array_err_msg_substr) > 0) { - Py_DECREF(type); - Py_DECREF(value); - Py_XDECREF(traceback); - if (PyErr_WarnEx(PyExc_UserWarning, - "__array__ should implement 'dtype' and " - "'copy' keywords", 1) < 0) { - Py_DECREF(str_value); - Py_DECREF(array_meth); - Py_DECREF(args); - Py_DECREF(kwargs); - return NULL; - } - if (copy_passed) { /* try again */ - PyDict_DelItemString(kwargs, "copy"); - new = PyObject_Call(array_meth, args, kwargs); - if (new == NULL) { - Py_DECREF(str_value); - Py_DECREF(array_meth); - Py_DECREF(args); - Py_DECREF(kwargs); - return NULL; - } - } - } - Py_DECREF(str_value); + if (copy == 0) { + /* Cannot possibly avoid a copy, so error out. */ + PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); + Py_DECREF(array_meth); + return NULL; } + /* + * The seems to have been due to passing copy. We try to see + * more precisely what the message is and may try again. + */ + must_copy_but_copy_kwarg_unimplemented = 1; + new = PyObject_Vectorcall(array_meth, arguments, nargs, NULL); if (new == NULL) { - PyErr_Restore(type, value, traceback); Py_DECREF(array_meth); - Py_DECREF(args); - Py_DECREF(kwargs); return NULL; } } Py_DECREF(array_meth); - Py_DECREF(args); - Py_DECREF(kwargs); if (!PyArray_Check(new)) { PyErr_SetString(PyExc_ValueError, @@ -2523,6 +2571,10 @@ PyArray_FromArrayAttr_int( Py_DECREF(new); return NULL; } + if (must_copy_but_copy_kwarg_unimplemented) { + /* TODO: As of NumPy 2.0 this path is only reachable by C-API. */ + Py_SETREF(new, PyArray_NewCopy((PyArrayObject *)new, NPY_KEEPORDER)); + } return new; } diff --git a/numpy/_core/src/multiarray/ctors.h b/numpy/_core/src/multiarray/ctors.h index d2577f83ef96..fa1cd72e1478 100644 --- a/numpy/_core/src/multiarray/ctors.h +++ b/numpy/_core/src/multiarray/ctors.h @@ -1,6 +1,9 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_CTORS_H_ #define NUMPY_CORE_SRC_MULTIARRAY_CTORS_H_ +extern NPY_NO_EXPORT const char *npy_no_copy_err_msg; + + NPY_NO_EXPORT PyObject * PyArray_NewFromDescr( PyTypeObject *subtype, PyArray_Descr *descr, int nd, diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index b61cbed4c957..62cc25e64c1b 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -980,8 +980,7 @@ array_getarray(PyArrayObject *self, PyObject *args, PyObject *kwds) Py_DECREF(self); return ret; } else { // copy == NPY_COPY_NEVER - PyErr_SetString(PyExc_ValueError, - "Unable to avoid copy while creating an array from given array."); + PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); Py_DECREF(self); return NULL; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 3c153adb83a8..f8f4c1603420 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -1586,8 +1586,7 @@ _array_fromobject_generic( } else { if (copy == NPY_COPY_NEVER) { - PyErr_SetString(PyExc_ValueError, - "Unable to avoid copy while creating a new array."); + PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); goto finish; } ret = (PyArrayObject *)PyArray_NewCopy(oparr, order); diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index cdc418897bcc..7747e431f64a 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -8488,9 +8488,18 @@ def __array__(self, dtype=None): assert_array_equal(arr, base_arr) assert arr is base_arr - with pytest.warns(UserWarning, match=("should implement 'dtype' " - "and 'copy' keywords")): - np.array(a, copy=False) + # As of NumPy 2, explicitly passing copy=True does not trigger passing + # it to __array__ (deprecation warning is not triggered). + arr = np.array(a, copy=True) + assert_array_equal(arr, base_arr) + assert arr is not base_arr + + # And passing copy=False gives a deprecation warning, but also raises + # an error: + with pytest.warns(DeprecationWarning, match="__array__.*'copy'"): + with pytest.raises(ValueError, + match=r"Unable to avoid copy(.|\n)*changed in NumPy 2"): + np.array(a, copy=False) @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test__array__reference_leak(self): From 99e5f19dddb8a695762a51029c428d301beb3bef Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 27 Mar 2024 15:36:01 +0100 Subject: [PATCH 114/980] API: Expose abstract DTypes and add a doc (stub!), plus smaller tweaks --- .../reference/c-api/types-and-structures.rst | 24 +++++++++++++++++++ numpy/_core/code_generators/numpy_api.py | 4 +++- .../include/numpy/_public_dtype_api_table.h | 11 ++++++++- numpy/_core/src/multiarray/abstractdtypes.c | 12 +++++----- numpy/_core/src/multiarray/dtypemeta.c | 7 +++--- numpy/_core/src/multiarray/public_dtype_api.c | 5 ++++ 6 files changed, 52 insertions(+), 11 deletions(-) diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index 865ae836269b..96f4f0d1e47a 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -906,6 +906,30 @@ PyArray_DTypeMeta and PyArrayDTypeMeta_Spec of functions in the DType API. Slot IDs must be one of the DType slot IDs enumerated in :ref:`dtype-slots`. +Exposed DTypes classes (``PyArray_DTypeMeta`` objects) +------------------------------------------------------ + +For use with promoters, NumPy exposes a number of Dtypes following the +pattern ``PyArray_DType`` corresponding to those found in `np.dtypes`. + +Additionally, the three DTypes, ``PyArray_PyLongDType``, +``PyArray_PyFloatDType``, ``PyArray_PyComplexDType`` correspond to the +Python scalar values. These cannot be used in all places, but do allow +for example the common dtype operation and implementing promotion with them +may be necessary. + +Further, the following abstract DTypes are defined which cover both the +builtin NumPy ones and the python ones, and users can in principle subclass +from them (this does not inherit any DType specific functionality): +* ``PyArray_IntAbstractDType`` +* ``PyArray_FloatAbstractDType`` +* ``PyArray_ComplexAbstractDType`` + +.. warning:: + As of NumPy 2.0, the *only* valid use for these DTypes is registering a + promoter conveniently to e.g. match "any integers" (and subclass checks). + Because of this, they are not exposed to Python. + PyUFunc_Type and PyUFuncObject ------------------------------ diff --git a/numpy/_core/code_generators/numpy_api.py b/numpy/_core/code_generators/numpy_api.py index 7dbaeff4940b..30e2222e557e 100644 --- a/numpy/_core/code_generators/numpy_api.py +++ b/numpy/_core/code_generators/numpy_api.py @@ -94,6 +94,7 @@ def get_annotations(): # NOTE: The Slots 320-360 are defined in `_experimental_dtype_api.h` # and filled explicitly outside the code generator as the metaclass # makes them tricky to expose. (This may be refactored.) + # Slot 366, 367, 368 are the abstract DTypes # End 2.0 API } @@ -107,7 +108,8 @@ def get_annotations(): 103, 115, 117, 122, 163, 164, 171, 173, 197, 201, 202, 208, 219, 220, 221, 222, 223, 278, 291, 293, 294, 295, 301] - + list(range(320, 361)) # range reserved DType class slots + # range/slots reserved DType classes (see _public_dtype_api_table.h): + + list(range(320, 361)) + [366, 367, 368] ), 'PyArray_GetNDArrayCVersion': (0,), # Unused slot 40, was `PyArray_SetNumericOps` diff --git a/numpy/_core/include/numpy/_public_dtype_api_table.h b/numpy/_core/include/numpy/_public_dtype_api_table.h index e106386d35e2..51f390540627 100644 --- a/numpy/_core/include/numpy/_public_dtype_api_table.h +++ b/numpy/_core/include/numpy/_public_dtype_api_table.h @@ -4,6 +4,9 @@ * * These definitions are only relevant for the public API and we reserve * the slots 320-360 in the API table generation for this (currently). + * + * TODO: This file should be consolidated with the API table generation + * (although not sure the current generation is worth preserving). */ #ifndef NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_ #define NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_ @@ -69,7 +72,13 @@ #define PyArray_DefaultIntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[38]) /* New non-legacy DTypes follow in the order they were added */ #define PyArray_StringDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[39]) -/* NOTE: offset 40 is free, after that a new range will need to be used */ + +/* NOTE: offset 40 is free */ + +/* Need to start with a larger offset again for the abstract classes: */ +#define PyArray_IntAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[366]) +#define PyArray_FloatAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[367]) +#define PyArray_ComplexAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[368]) #endif /* NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION */ diff --git a/numpy/_core/src/multiarray/abstractdtypes.c b/numpy/_core/src/multiarray/abstractdtypes.c index 6822513f88dd..8d00084f0efe 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.c +++ b/numpy/_core/src/multiarray/abstractdtypes.c @@ -294,7 +294,7 @@ complex_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) */ NPY_NO_EXPORT PyArray_DTypeMeta PyArray_IntAbstractDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) - .tp_name = "numpy._IntegerAbstractDType", + .tp_name = "numpy.dtypes._IntegerAbstractDType", .tp_base = &PyArrayDescr_Type, .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, @@ -311,7 +311,7 @@ NPY_DType_Slots pylongdtype_slots = { NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyLongDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) - .tp_name = "numpy._PyLongDType", + .tp_name = "numpy.dtypes._PyLongDType", .tp_base = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, @@ -323,7 +323,7 @@ NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyLongDType = {{{ NPY_NO_EXPORT PyArray_DTypeMeta PyArray_FloatAbstractDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) - .tp_name = "numpy._FloatAbstractDType", + .tp_name = "numpy.dtypes._FloatAbstractDType", .tp_base = &PyArrayDescr_Type, .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, @@ -340,7 +340,7 @@ NPY_DType_Slots pyfloatdtype_slots = { NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyFloatDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) - .tp_name = "numpy._PyFloatDType", + .tp_name = "numpy.dtypes._PyFloatDType", .tp_base = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, @@ -352,7 +352,7 @@ NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyFloatDType = {{{ NPY_NO_EXPORT PyArray_DTypeMeta PyArray_ComplexAbstractDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) - .tp_name = "numpy._ComplexAbstractDType", + .tp_name = "numpy.dtypes._ComplexAbstractDType", .tp_base = &PyArrayDescr_Type, .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, @@ -369,7 +369,7 @@ NPY_DType_Slots pycomplexdtype_slots = { NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyComplexDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) - .tp_name = "numpy._PyComplexDType", + .tp_name = "numpy.dtypes._PyComplexDType", .tp_base = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 998fa4792f22..a7cdb9a6ec0a 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -914,9 +914,10 @@ default_builtin_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) if (NPY_UNLIKELY(!NPY_DT_is_legacy(other))) { /* * Deal with the non-legacy types we understand: python scalars. - * These have lower priority than the concrete inexact types, but - * can change the type of the result (complex, float, int). - * If our own type if not numerical, signal not implemented. + * These may have lower priority than the concrete inexact types, + * but can change the type of the result (complex, float, int). + * If our own DType is not numerical or has lower priority (e.g. + * integer but abstract one is float), signal not implemented. */ if (other == &PyArray_PyComplexDType) { if (PyTypeNum_ISCOMPLEX(cls->type_num)) { diff --git a/numpy/_core/src/multiarray/public_dtype_api.c b/numpy/_core/src/multiarray/public_dtype_api.c index 49ae3d2cd77e..60dceae3275d 100644 --- a/numpy/_core/src/multiarray/public_dtype_api.c +++ b/numpy/_core/src/multiarray/public_dtype_api.c @@ -175,4 +175,9 @@ _fill_dtype_api(void *full_api_table[]) api_table[38] = &PyArray_DefaultIntDType; /* Non-legacy DTypes that are built in to NumPy */ api_table[39] = &PyArray_StringDType; + + /* Abstract ones added directly: */ + full_api_table[366] = &PyArray_IntAbstractDType; + full_api_table[367] = &PyArray_FloatAbstractDType; + full_api_table[368] = &PyArray_ComplexAbstractDType; } From ceee998f06df28de80df4ad53e73239e56c5c1d5 Mon Sep 17 00:00:00 2001 From: Hugo van Kemenade <1324225+hugovk@users.noreply.github.com> Date: Wed, 27 Mar 2024 16:45:42 +0200 Subject: [PATCH 115/980] Remove custom styling for div.admonition>.admonition-title::before and ::after --- doc/source/_static/numpy.css | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/doc/source/_static/numpy.css b/doc/source/_static/numpy.css index 4a489474d9d7..78b054f8fa4e 100644 --- a/doc/source/_static/numpy.css +++ b/doc/source/_static/numpy.css @@ -143,15 +143,13 @@ div.admonition-legacy { border-color: var(--pst-color-warning); } -.admonition-legacy.admonition>.admonition-title::before, -div.admonition>.admonition-title::before { +.admonition-legacy.admonition>.admonition-title::before { color: var(--pst-color-warning); content: var(--pst-icon-admonition-attention); background-color: var(--pst-color-warning); } -.admonition-legacy.admonition>.admonition-title::after, -div.admonition>.admonition-title::after { +.admonition-legacy.admonition>.admonition-title::after { color: var(--pst-color-warning); content: var(--pst-icon-admonition-default); } \ No newline at end of file From c9fbd088e2b6a08b3dae49bfd50e683dbbb2f7ba Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 27 Mar 2024 15:51:25 +0100 Subject: [PATCH 116/980] Address Marten's review comments --- numpy/_core/src/multiarray/ctors.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 11dc10f7b528..ac0a1a340c71 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -43,7 +43,7 @@ NPY_NO_EXPORT const char *npy_no_copy_err_msg = ( "Unable to avoid copy while creating an array as requested.\n" "If using `np.array(obj, copy=False)` use `np.asarray(obj)` " - "or `copy=None` to allow NumPy to make the copy.\n" + "to allow NumPy to make a copy when needed.\n" "This changed in NumPy 2. The suggested fix works on all versions."); /* @@ -2424,28 +2424,25 @@ check_or_clear_and_warn_error_if_due_to_copy_kwarg(PyObject *kwnames) return -1; } + /* + * In most cases, if we fail, we assume the error was unrelated to the + * copy kwarg and simply restore the original one. + */ PyObject *type, *value, *traceback; PyErr_Fetch(&type, &value, &traceback); if (value == NULL) { - PyErr_Restore(type, value, traceback); - return -1; + goto restore_error; } - /* - * In most cases, if we fail, we assume the error was unrelated to the - * copy kwarg and simply restore the original one. - */ PyObject *str_value = PyObject_Str(value); if (str_value == NULL) { - PyErr_Restore(type, value, traceback); - return -1; + goto restore_error; } int copy_kwarg_unsupported = PyUnicode_Contains( str_value, npy_ma_str_array_err_msg_substr); Py_DECREF(str_value); if (copy_kwarg_unsupported == -1) { - PyErr_Restore(type, value, traceback); - return -1; + goto restore_error; } if (copy_kwarg_unsupported) { /* @@ -2461,6 +2458,8 @@ check_or_clear_and_warn_error_if_due_to_copy_kwarg(PyObject *kwnames) } return 0; } + + restore_error: PyErr_Restore(type, value, traceback); return -1; } @@ -2551,7 +2550,7 @@ PyArray_FromArrayAttr_int( return NULL; } /* - * The seems to have been due to passing copy. We try to see + * The error seems to have been due to passing copy. We try to see * more precisely what the message is and may try again. */ must_copy_but_copy_kwarg_unimplemented = 1; From 67282f12d13b3d47242d69ab486e5625a9183d28 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 27 Mar 2024 18:06:30 +0100 Subject: [PATCH 117/980] BUG: If unused-indices are at the end, fill API table with NULLs Otherwise, the API table is too short! --- numpy/_core/code_generators/generate_numpy_api.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/numpy/_core/code_generators/generate_numpy_api.py b/numpy/_core/code_generators/generate_numpy_api.py index d69725e581aa..9e86c9499546 100644 --- a/numpy/_core/code_generators/generate_numpy_api.py +++ b/numpy/_core/code_generators/generate_numpy_api.py @@ -227,6 +227,7 @@ def do_generate_api(targets, sources): # Check multiarray api indexes multiarray_api_index = genapi.merge_api_dicts(multiarray_api) + unused_index_max = max(multiarray_api_index.get("__unused_indices__", 0)) genapi.check_api_dict(multiarray_api_index) numpyapi_list = genapi.get_api_functions('NUMPY_API', @@ -278,6 +279,10 @@ def do_generate_api(targets, sources): init_list.append(api_item.array_api_define()) module_list.append(api_item.internal_define()) + # In case we end with a "hole", append more NULLs + while len(init_list) <= unused_index_max: + init_list.append(" NULL") + # Write to header s = h_template % ('\n'.join(module_list), '\n'.join(extension_list)) genapi.write_file(header_file, s) From a703ff341df516eff831d0fea87f617ccae086ff Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 27 Mar 2024 14:04:08 -0600 Subject: [PATCH 118/980] BUG: introduce PyArray_ViewableTypes to fix issues around stringdtype views --- numpy/_core/_internal.py | 2 +- numpy/_core/src/multiarray/ctors.c | 2 +- numpy/_core/src/multiarray/multiarraymodule.c | 22 ++++++++++++++++++- numpy/_core/src/multiarray/multiarraymodule.h | 3 +++ numpy/_core/tests/test_stringdtype.py | 17 ++++++++++++++ 5 files changed, 43 insertions(+), 3 deletions(-) diff --git a/numpy/_core/_internal.py b/numpy/_core/_internal.py index 8d6dc04851b5..058e93644dec 100644 --- a/numpy/_core/_internal.py +++ b/numpy/_core/_internal.py @@ -560,7 +560,7 @@ def _view_is_safe(oldtype, newtype): return if newtype.hasobject or oldtype.hasobject: - raise TypeError("Cannot change data-type for object array.") + raise TypeError("Cannot change data-type for array of references.") return diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 4c9d76991296..322244e60c1e 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1922,7 +1922,7 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) /* If a writeable array was requested, and arr is not */ ((flags & NPY_ARRAY_WRITEABLE) && (!(arrflags & NPY_ARRAY_WRITEABLE))) || - !PyArray_EquivTypes(oldtype, newtype); + !PyArray_ViewableTypes(oldtype, newtype); if (copy) { if (flags & NPY_ARRAY_ENSURENOCOPY ) { diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 3c153adb83a8..f559c13d9811 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -1481,6 +1481,26 @@ PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2) } +/* + * This function returns true if a view can be safely created + * between the two types. This implies that PyArray_EquivTypes + * is true as well. + */ +NPY_NO_EXPORT unsigned char +PyArray_ViewableTypes(PyArray_Descr *type1, PyArray_Descr *type2) +{ + if (!PyArray_EquivTypes(type1, type2)) { + return 0; + } + // a view of a stringdtype array has a corrupt arena, unless + // type1 and type2 are exactly the same object + if ((type1 != type2) && (type1->kind == 'T')) { + return 0; + } + return 1; +} + + /*NUMPY_API*/ NPY_NO_EXPORT unsigned char PyArray_EquivTypenums(int typenum1, int typenum2) @@ -1609,7 +1629,7 @@ _array_fromobject_generic( /* One more chance for faster exit if user specified the dtype. */ oldtype = PyArray_DESCR(oparr); - if (PyArray_EquivTypes(oldtype, dtype)) { + if (PyArray_ViewableTypes(oldtype, dtype)) { if (copy != NPY_COPY_ALWAYS && STRIDING_OK(oparr, order)) { if (oldtype == dtype) { Py_INCREF(op); diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index ba03d367eeb8..7e00e46ea2e3 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -21,4 +21,7 @@ NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_convert_if_no_array; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_cpu; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_err_msg_substr; +NPY_NO_EXPORT unsigned char +PyArray_ViewableTypes(PyArray_Descr *type1, PyArray_Descr *type2); + #endif /* NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ */ diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index e38575dbc0dd..8d72df6401f3 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -458,6 +458,23 @@ def test_creation_functions(): assert np.empty(3, dtype="T")[0] == "" +def test_create_with_copy_none(string_list): + arr = np.array(string_list, dtype=StringDType()) + arr_rev = np.array(string_list[::-1], dtype=StringDType()) + + arr_copy = np.array(arr, copy=None, dtype=arr_rev.dtype) + np.testing.assert_array_equal(arr, arr_copy) + assert arr_copy.base is None + + with pytest.raises(ValueError, match="Unable to avoid copy"): + np.array(arr, copy=False, dtype=arr_rev.dtype) + + arr_view = np.array(arr, copy=None, dtype=arr.dtype) + np.testing.assert_array_equal(arr, arr) + np.testing.assert_array_equal(arr_view[::-1], arr_rev) + assert arr_view is arr + + @pytest.mark.parametrize( "strings", [ From 6b6759626867cc7bed2e11b2927941b0df9cd478 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 27 Mar 2024 22:20:03 +0100 Subject: [PATCH 119/980] DOC: Include full link to migration guide and tweak nocopy error --- numpy/_core/src/multiarray/ctors.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index ac0a1a340c71..f8afb1e28b25 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -42,9 +42,9 @@ NPY_NO_EXPORT const char *npy_no_copy_err_msg = ( "Unable to avoid copy while creating an array as requested.\n" - "If using `np.array(obj, copy=False)` use `np.asarray(obj)` " - "to allow NumPy to make a copy when needed.\n" - "This changed in NumPy 2. The suggested fix works on all versions."); + "If using `np.array(obj, copy=False)` replace it with `np.asarray(obj)` " + "to allow a copy when needed (no behavior change in NumPy 1.x).\n" + "For more details, see https://numpy.org/devdocs/numpy_2_0_migration_guide.html#adapting-to-changes-in-the-copy-keyword."); /* * Reading from a file or a string. From 973b99f340042128e8357d0e48de7b029ac798fa Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 27 Mar 2024 22:28:42 +0100 Subject: [PATCH 120/980] TST: Fixup test for changed error message... --- numpy/_core/tests/test_multiarray.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 7747e431f64a..88c17656a18b 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -8498,7 +8498,7 @@ def __array__(self, dtype=None): # an error: with pytest.warns(DeprecationWarning, match="__array__.*'copy'"): with pytest.raises(ValueError, - match=r"Unable to avoid copy(.|\n)*changed in NumPy 2"): + match=r"Unable to avoid copy(.|\n)*numpy_2_0_migration_guide.html"): np.array(a, copy=False) @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") From ae4be90ae2383ed12c58609a428e2f3c7d63000a Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 27 Mar 2024 19:29:33 -0600 Subject: [PATCH 121/980] MNT: make PyArray_ViewableTypes use view_offset from casting setup --- numpy/_core/src/multiarray/convert_datatype.c | 5 ++- numpy/_core/src/multiarray/ctors.c | 8 +++- numpy/_core/src/multiarray/multiarraymodule.c | 39 ++++++++++++++++--- .../_core/src/multiarray/stringdtype/casts.c | 3 +- 4 files changed, 46 insertions(+), 9 deletions(-) diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 92e6964fc21f..5f0a8138c10e 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -466,6 +466,7 @@ _get_cast_safety_from_castingimpl(PyArrayMethodObject *castingimpl, * Check for less harmful non-standard returns. The following two returns * should never happen: * 1. No-casting must imply a view offset of 0. + * (with an exception for stringdtype) * 2. Equivalent-casting + 0 view offset is (usually) the definition * of a "no" cast. However, changing the order of fields can also * create descriptors that are not equivalent but views. @@ -473,7 +474,9 @@ _get_cast_safety_from_castingimpl(PyArrayMethodObject *castingimpl, * principle, casting `kind != 'T' || to->kind != 'T') { + assert(casting != NPY_NO_CASTING); + } } else { assert(casting != NPY_EQUIV_CASTING diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 322244e60c1e..2c8373a25aca 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1908,6 +1908,12 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) } arrflags = PyArray_FLAGS(arr); + unsigned char viewable = PyArray_ViewableTypes(oldtype, newtype); + if (viewable < 0) { + Py_DECREF(newtype); + return NULL; + } + /* If a guaranteed copy was requested */ copy = (flags & NPY_ARRAY_ENSURECOPY) || /* If C contiguous was requested, and arr is not */ @@ -1922,7 +1928,7 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) /* If a writeable array was requested, and arr is not */ ((flags & NPY_ARRAY_WRITEABLE) && (!(arrflags & NPY_ARRAY_WRITEABLE))) || - !PyArray_ViewableTypes(oldtype, newtype); + !viewable; if (copy) { if (flags & NPY_ARRAY_ENSURENOCOPY ) { diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index f559c13d9811..9c1157cfb87b 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -1489,15 +1489,38 @@ PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2) NPY_NO_EXPORT unsigned char PyArray_ViewableTypes(PyArray_Descr *type1, PyArray_Descr *type2) { - if (!PyArray_EquivTypes(type1, type2)) { + if (type1 == type2) { + return 1; + } + + if (Py_TYPE(Py_TYPE(type1)) == &PyType_Type) { + /* + * 2021-12-17: This case is nonsense and should be removed eventually! + * + * boost::python has/had a bug effectively using EquivTypes with + * `type(arbitrary_obj)`. That is clearly wrong as that cannot be a + * `PyArray_Descr *`. We assume that `type(type(type(arbitrary_obj))` + * is always in practice `type` (this is the type of the metaclass), + * but for our descriptors, `type(type(descr))` is DTypeMeta. + * + * In that case, we just return False. There is a possibility that + * this actually _worked_ effectively (returning 1 sometimes). + * We ignore that possibility for simplicity; it really is not our bug. + */ return 0; } - // a view of a stringdtype array has a corrupt arena, unless - // type1 and type2 are exactly the same object - if ((type1 != type2) && (type1->kind == 'T')) { + + npy_intp view_offset; + NPY_CASTING safety = PyArray_GetCastInfo(type1, type2, NULL, &view_offset); + if (safety < 0) { + PyErr_Clear(); return 0; } - return 1; + if (view_offset != 0) { + return 0; + } + /* If casting is "no casting" this dtypes are considered equivalent. */ + return PyArray_MinCastSafety(safety, NPY_NO_CASTING) == NPY_NO_CASTING; } @@ -1629,7 +1652,11 @@ _array_fromobject_generic( /* One more chance for faster exit if user specified the dtype. */ oldtype = PyArray_DESCR(oparr); - if (PyArray_ViewableTypes(oldtype, dtype)) { + unsigned char viewable = PyArray_ViewableTypes(oldtype, dtype); + if (viewable < 0) { + goto finish; + } + if (viewable) { if (copy != NPY_COPY_ALWAYS && STRIDING_OK(oparr, order)) { if (oldtype == dtype) { Py_INCREF(op); diff --git a/numpy/_core/src/multiarray/stringdtype/casts.c b/numpy/_core/src/multiarray/stringdtype/casts.c index e3e7c5fde872..52e76b16c4f7 100644 --- a/numpy/_core/src/multiarray/stringdtype/casts.c +++ b/numpy/_core/src/multiarray/stringdtype/casts.c @@ -79,7 +79,8 @@ string_to_string_resolve_descriptors(PyObject *NPY_UNUSED(self), return NPY_UNSAFE_CASTING; } - *view_offset = 0; + // views are only legal between descriptors that share allocators (e.g. the same object) + *view_offset = descr0->allocator != descr1->allocator; return NPY_NO_CASTING; } From 8bb97252c7a42bf8fa95fa688d0ac2c039a429c9 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 28 Mar 2024 12:14:00 +0100 Subject: [PATCH 122/980] MAINT: Escalate import warning to an import error Now that pybind11 has the 2.12 release we can escalate the warning to an error here. --- numpy/core/_multiarray_umath.py | 44 +++++++++++---------------------- 1 file changed, 14 insertions(+), 30 deletions(-) diff --git a/numpy/core/_multiarray_umath.py b/numpy/core/_multiarray_umath.py index a77e1557ba62..04cc88229aac 100644 --- a/numpy/core/_multiarray_umath.py +++ b/numpy/core/_multiarray_umath.py @@ -14,7 +14,7 @@ def __getattr__(attr_name): from ._utils import _raise_warning if attr_name in {"_ARRAY_API", "_UFUNC_API"}: - from numpy.version import short_version, release + from numpy.version import short_version import textwrap import traceback import sys @@ -22,42 +22,26 @@ def __getattr__(attr_name): msg = textwrap.dedent(f""" A module that was compiled using NumPy 1.x cannot be run in NumPy {short_version} as it may crash. To support both 1.x and 2.x - versions of NumPy, modules must be compiled against NumPy 2.0. + versions of NumPy, modules must be compiled with NumPy 2.0. + Some module may need to rebuild instead e.g. with 'pybind11>=2.12'. If you are a user of the module, the easiest solution will be to - either downgrade NumPy or update the failing module (if available). + downgrade to 'numpy<2' or try to upgrade the affected module. + We expect that some modules will need time to support NumPy 2. """) - if not release and short_version.startswith("2.0.0"): - # TODO: Can remove this after the release. - msg += textwrap.dedent("""\ - NOTE: When testing against pre-release versions of NumPy 2.0 - or building nightly wheels for it, it is necessary to ensure - the NumPy pre-release is used at build time. - The main way to ensure this is using no build isolation - and installing dependencies manually with NumPy. - - If your dependencies have the issue, check whether they - build nightly wheels build against NumPy 2.0. - - pybind11 note: If you see this message and do not see - any errors raised, it's possible this is due to a - package using an old version of pybind11 that should be - updated. - - """) - msg += "Traceback (most recent call last):" + tb_msg = "Traceback (most recent call last):" for line in traceback.format_stack()[:-1]: if "frozen importlib" in line: continue - msg += line - # Only print the message. This has two reasons (for now!): - # 1. Old NumPy replaced the error here making it never actually show - # in practice, thus raising alone would not be helpful. - # 2. pybind11 simply reaches into NumPy internals and requires a - # new release that includes the fix. That is missing as of 2023-11. - # But, it "conveniently" ignores the ABI version. - sys.stderr.write(msg) + tb_msg += line + + # Also print the message (with traceback). This is because old versions + # of NumPy unfortunately set up the import to replace (and hide) the + # error. The traceback shouldn't be needed, but e.g. pytest plugins + # seem to swallow it and we should be failing anyway... + sys.stderr.write(msg + tb_msg) + raise ImportError(msg) ret = getattr(_multiarray_umath, attr_name, None) if ret is None: From 3094cadd5299a1319262f509030a4277dbff36ac Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 28 Mar 2024 11:07:15 +0100 Subject: [PATCH 123/980] DOC: Also add the message to the "import error" guide so it can be found --- .../user/troubleshooting-importerror.rst | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/doc/source/user/troubleshooting-importerror.rst b/doc/source/user/troubleshooting-importerror.rst index 232f9f7e2bf2..c36f49ff56c7 100644 --- a/doc/source/user/troubleshooting-importerror.rst +++ b/doc/source/user/troubleshooting-importerror.rst @@ -183,6 +183,34 @@ that usually works is to upgrade the NumPy version:: pip install numpy --upgrade + +Downstream ImportError or AttributeError +======================================== + +If you see a message such as:: + + A module that was compiled using NumPy 1.x cannot be run in + NumPy 2.0.0 as it may crash. To support both 1.x and 2.x + versions of NumPy, modules must be compiled with NumPy 2.0. + Some module may need to rebuild instead e.g. with 'pybind11>=2.12'. + +Either as an ``ImportError` or with:: + + AttributeError: _ARRAY_API not found + +Then you are using NumPy 2 together with a module that was build with NumPy 1. +NumPy 2 made some changes that require rebuilding such modules to avoid +possibly incorrect results or crashes. + +As the error message suggests, the easiest solution is likely to downgrade +NumPy to `numpy<2`. +Alternatively, you can search the traceback (from the back) to find the first +line that isn't inside NumPy to see which module needs to be updated. + +NumPy 2 was released in the first half of 2024 and especially smaller +modules downstream are expected need time to adapt and publish a new version. + + Segfaults or crashes ==================== From f883969af9d9d1496454cb45c070a12b98ad67f0 Mon Sep 17 00:00:00 2001 From: Fabian Vogt Date: Thu, 28 Mar 2024 14:39:45 +0100 Subject: [PATCH 124/980] BUG: Fix test_impossible_feature_enable failing without BASELINE_FEAT If the build has no baseline features set, the test ended up setting e.g. NPY_ENABLE_CPU_FEATURES="ASIMDHP, None". This actually made the execution succeed, as the warning for decoding "None" overrode the error for the real feature. Fix the error handling there by removing the errorneous "return 0;", add a test for this, and avoid passing "None" by accident. --- numpy/_core/src/common/npy_cpu_features.c | 1 - numpy/_core/tests/test_cpu_features.py | 14 ++++++++++++-- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index 7a24cb01625b..ab2594311419 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -325,7 +325,6 @@ npy__cpu_check_env(int disable, const char *env) { ) < 0) { return -1; } - return 0; } #define NOTSUPP_BODY \ diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index 9649be2fcc67..f4a85c54ca6a 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -311,8 +311,8 @@ def test_impossible_feature_enable(self): err_type = "RuntimeError" self._expect_error(msg, err_type) - # Ensure that only the bad feature gets reported - feats = f"{bad_feature}, {self.BASELINE_FEAT}" + # Ensure that it fails even when providing garbage in addition + feats = f"{bad_feature}, Foobar" self.env['NPY_ENABLE_CPU_FEATURES'] = feats msg = ( f"You cannot enable CPU features \\({bad_feature}\\), since they " @@ -320,6 +320,16 @@ def test_impossible_feature_enable(self): ) self._expect_error(msg, err_type) + if self.BASELINE_FEAT is not None: + # Ensure that only the bad feature gets reported + feats = f"{bad_feature}, {self.BASELINE_FEAT}" + self.env['NPY_ENABLE_CPU_FEATURES'] = feats + msg = ( + f"You cannot enable CPU features \\({bad_feature}\\), since " + "they are not supported by your machine." + ) + self._expect_error(msg, err_type) + is_linux = sys.platform.startswith('linux') is_cygwin = sys.platform.startswith('cygwin') machine = platform.machine() From b7f6de962a26a17fe482901711f5e631dac49d8e Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 28 Mar 2024 13:59:55 -0600 Subject: [PATCH 125/980] MNT: Only assert if finalize_descr isn't defined --- numpy/_core/src/multiarray/convert_datatype.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 5f0a8138c10e..c35f899847a5 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -465,18 +465,18 @@ _get_cast_safety_from_castingimpl(PyArrayMethodObject *castingimpl, /* * Check for less harmful non-standard returns. The following two returns * should never happen: - * 1. No-casting must imply a view offset of 0. - * (with an exception for stringdtype) + * 1. No-casting must imply a view offset of 0 unless the DType + defines a finalization function, which implies it stores data + on the descriptor * 2. Equivalent-casting + 0 view offset is (usually) the definition * of a "no" cast. However, changing the order of fields can also * create descriptors that are not equivalent but views. * Note that unsafe casts can have a view offset. For example, in * principle, casting `kind != 'T' || to->kind != 'T') { - assert(casting != NPY_NO_CASTING); - } + if ((*view_offset != 0 && + NPY_DT_SLOTS(NPY_DTYPE(from))->finalize_descr == NULL)) { + assert(casting != NPY_NO_CASTING); } else { assert(casting != NPY_EQUIV_CASTING From 15b08c305d23bf07b240f8792aa17a1359da4a8d Mon Sep 17 00:00:00 2001 From: krisrosengreen Date: Thu, 28 Mar 2024 22:25:09 +0100 Subject: [PATCH 126/980] BUG: Infinite Loop in numpy.base_repr --- numpy/_core/numeric.py | 2 +- numpy/_core/tests/test_numeric.py | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 429620da5359..d5116cee2756 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -2142,7 +2142,7 @@ def base_repr(number, base=2, padding=0): elif base < 2: raise ValueError("Bases less than 2 not handled in base_repr.") - num = abs(number) + num = abs(int(number)) res = [] while num: res.append(digits[num % base]) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 4dfc45cb180a..33b92bc5d928 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -1969,6 +1969,9 @@ def test_base_range(self): with assert_raises(ValueError): np.base_repr(1, 37) + def test_min_int(self): + assert_equal(np.base_repr(np.int8(-128)), '-10000000') + def _test_array_equal_parametrizations(): """ From 79d35524eec3fabe116ede5b11a4ccffdca128d7 Mon Sep 17 00:00:00 2001 From: Kristoffer Pedersen Date: Fri, 29 Mar 2024 08:58:54 +0100 Subject: [PATCH 127/980] Update to test function name Co-authored-by: Pieter Eendebak --- numpy/_core/tests/test_numeric.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 33b92bc5d928..422cb2baf85b 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -1969,7 +1969,7 @@ def test_base_range(self): with assert_raises(ValueError): np.base_repr(1, 37) - def test_min_int(self): + def test_minimal_signed_int(self): assert_equal(np.base_repr(np.int8(-128)), '-10000000') From daaec826e47cc4b4087f07588a56521e58f8b32b Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 29 Mar 2024 07:29:50 -0600 Subject: [PATCH 128/980] MAINT: Fix missing backtick. --- doc/source/user/troubleshooting-importerror.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/troubleshooting-importerror.rst b/doc/source/user/troubleshooting-importerror.rst index c36f49ff56c7..adbc9d898846 100644 --- a/doc/source/user/troubleshooting-importerror.rst +++ b/doc/source/user/troubleshooting-importerror.rst @@ -194,7 +194,7 @@ If you see a message such as:: versions of NumPy, modules must be compiled with NumPy 2.0. Some module may need to rebuild instead e.g. with 'pybind11>=2.12'. -Either as an ``ImportError` or with:: +Either as an ``ImportError`` or with:: AttributeError: _ARRAY_API not found From aaff72ac8682c61021eb902010fbe9dd3d0c8180 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 29 Mar 2024 16:29:36 +0100 Subject: [PATCH 129/980] DOC: mention np.lib.NumPyVersion in the 2.0 migration guide Closes gh-26031 [skip azp] [skip actions] [skip cirrus] --- doc/source/numpy_2_0_migration_guide.rst | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index 4a569d612bf4..2179fe8e8ca6 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -423,3 +423,24 @@ The :ref:`copy keyword behavior changes ` in 3. For any ``__array__`` method on a non-NumPy array-like object, a ``copy=None`` keyword can be added to the signature - this will work with older NumPy versions as well. + + +Writing numpy-version-dependent code +------------------------------------ + +It should be fairly rare to have to write code that explicitly branches on the +``numpy`` version - in most cases, code can be rewritten to be compatible with +1.x and 2.0 at the same time. However, if it is necessary, here is a suggested +code pattern to use, using `numpy.lib.NumpyVersion`:: + + # example with AxisError, which is no longer available in + # the main namespace in 2.0, and not available in the + # `exceptions` namespace in <1.25.0 (example uses <2.0.0b1 + # for illustrative purposes): + if np.lib.NumpyVersion(np.__version__) >= '2.0.0b1': + from numpy.exceptions import AxisError + else: + from numpy import AxisError + +This pattern will work correctly including with NumPy release candidates, which +is important during the 2.0.0 release period. From 2ea7ce0abb66ea1b9aa37d43223cecb2bcf9f571 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 19 Mar 2024 12:29:31 +0100 Subject: [PATCH 130/980] TST: update public API test/listing to reflect actual 2.0.x state --- numpy/tests/test_public_api.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 780e1fccb79e..618223705937 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -114,14 +114,14 @@ def test_NPY_NO_EXPORT(): "f2py", "fft", "lib", - "lib.format", # was this meant to be public? + "lib.array_utils", + "lib.format", + "lib.introspect", "lib.mixins", - "lib.recfunctions", + "lib.npyio", + "lib.recfunctions", # note: still needs cleaning, was forgotten for 2.0 "lib.scimath", "lib.stride_tricks", - "lib.npyio", - "lib.introspect", - "lib.array_utils", "linalg", "ma", "ma.extras", @@ -134,11 +134,12 @@ def test_NPY_NO_EXPORT(): "polynomial.legendre", "polynomial.polynomial", "random", + "strings", "testing", "testing.overrides", "typing", "typing.mypy_plugin", - "version" # Should be removed for NumPy 2.0 + "version", ]] if sys.version_info < (3, 12): PUBLIC_MODULES += [ @@ -158,7 +159,6 @@ def test_NPY_NO_EXPORT(): "numpy.char", "numpy.emath", "numpy.rec", - "numpy.strings", ] @@ -535,7 +535,7 @@ def test_core_shims_coherence(): # no need to add it to np.core if ( member_name.startswith("_") - or member_name == "tests" + or member_name in ["tests", "strings"] or f"numpy.{member_name}" in PUBLIC_ALIASED_MODULES ): continue From 3b9159d3d24c035ff8fd70ccd01427ad1b7ea5ca Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 29 Mar 2024 17:23:39 +0100 Subject: [PATCH 131/980] DOC: document the ``numpy.version`` submodule - officially public now --- doc/source/reference/module_structure.rst | 2 ++ doc/source/reference/routines.version.rst | 38 +++++++++++++++++++++++ numpy/_build_utils/gitversion.py | 3 ++ 3 files changed, 43 insertions(+) create mode 100644 doc/source/reference/routines.version.rst diff --git a/doc/source/reference/module_structure.rst b/doc/source/reference/module_structure.rst index 2db9de7f03a8..01a5bcff7fbc 100644 --- a/doc/source/reference/module_structure.rst +++ b/doc/source/reference/module_structure.rst @@ -35,6 +35,7 @@ Special-purpose namespaces - :ref:`numpy.emath ` - mathematical functions with automatic domain - :ref:`numpy.lib ` - utilities & functionality which do not fit the main namespace - :ref:`numpy.rec ` - record arrays (largely superseded by dataframe libraries) +- :ref:`numpy.version ` - small module with more detailed version info Legacy namespaces ================= @@ -67,6 +68,7 @@ and/or this code is deprecated or isn't reliable. numpy.emath numpy.lib numpy.rec + numpy.version numpy.char numpy.distutils numpy.f2py <../f2py/index> diff --git a/doc/source/reference/routines.version.rst b/doc/source/reference/routines.version.rst new file mode 100644 index 000000000000..84f3c3ef1175 --- /dev/null +++ b/doc/source/reference/routines.version.rst @@ -0,0 +1,38 @@ +.. currentmodule:: numpy.version + +.. _routines.version: + +******************* +Version information +******************* + +The ``numpy.version`` submodule includes several constants that expose more +detailed information about the exact version of the installed ``numpy`` +package: + +.. data:: version + + Version string for the installed package - matches ``numpy.__version__``. + +.. data:: full_version + + Version string - the same as ``numpy.version.version``. + +.. data:: short_version + + Version string without any local build identifiers. + + .. rubric:: Examples + + >>> np.__version__ + '2.1.0.dev0+git20240319.2ea7ce0' # may vary + >>> np.version.short_version + '2.1.0.dev0' + +.. data:: git_revision + + String containing the git hash of the commit from which ``numpy`` was built. + +.. data:: release + + ``True`` if this version is a ``numpy`` release, ``False`` if a dev version. diff --git a/numpy/_build_utils/gitversion.py b/numpy/_build_utils/gitversion.py index 4ee6e00bbd65..defc704c41eb 100644 --- a/numpy/_build_utils/gitversion.py +++ b/numpy/_build_utils/gitversion.py @@ -70,6 +70,9 @@ def git_version(version): # For NumPy 2.0, this should only have one field: `version` template = textwrap.dedent(f''' + """ + Module to expose more detailed version info for the installed `numpy` + """ version = "{version}" __version__ = version full_version = version From 03fd9f3d3b4e1925f9af34f0595aa69decde7efc Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 29 Mar 2024 17:29:19 +0100 Subject: [PATCH 132/980] TST: add a test that the `np.version` public content is as expected [skip azp] [skip cirrus] --- numpy/tests/test_numpy_version.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/numpy/tests/test_numpy_version.py b/numpy/tests/test_numpy_version.py index 61643426c8d7..d3abcb92c1c3 100644 --- a/numpy/tests/test_numpy_version.py +++ b/numpy/tests/test_numpy_version.py @@ -39,3 +39,16 @@ def test_short_version(): else: assert_(np.__version__.split("+")[0] == np.version.short_version, "short_version mismatch in development version") + + +def test_version_module(): + contents = set([s for s in dir(np.version) if not s.startswith('_')]) + expected = set([ + 'full_version', + 'git_revision', + 'release', + 'short_version', + 'version', + ]) + + assert contents == expected From 49ea209a712689f87a2b1e87ac629df322df7f5c Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 29 Mar 2024 13:02:35 -0600 Subject: [PATCH 133/980] MAINT: Fix failure in routines.version.rst --- doc/source/reference/routines.version.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/routines.version.rst b/doc/source/reference/routines.version.rst index 84f3c3ef1175..72c48a752cf6 100644 --- a/doc/source/reference/routines.version.rst +++ b/doc/source/reference/routines.version.rst @@ -27,7 +27,7 @@ package: >>> np.__version__ '2.1.0.dev0+git20240319.2ea7ce0' # may vary >>> np.version.short_version - '2.1.0.dev0' + '2.1.0.dev0' # may vary .. data:: git_revision From a914726f3b68f4b0d5526a051c3ba6ab1b19f55b Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 29 Mar 2024 16:59:21 -0600 Subject: [PATCH 134/980] MAINT: refactor to pass casting safety in --- numpy/_core/src/multiarray/ctors.c | 10 ++- numpy/_core/src/multiarray/multiarraymodule.c | 72 +++++-------------- numpy/_core/src/multiarray/multiarraymodule.h | 6 +- .../_core/src/multiarray/stringdtype/casts.c | 4 +- numpy/_core/src/umath/ufunc_object.c | 11 +-- 5 files changed, 36 insertions(+), 67 deletions(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 2c8373a25aca..abe6a631ebbe 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1908,11 +1908,9 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) } arrflags = PyArray_FLAGS(arr); - unsigned char viewable = PyArray_ViewableTypes(oldtype, newtype); - if (viewable < 0) { - Py_DECREF(newtype); - return NULL; - } + npy_intp view_offset; + npy_intp is_safe = PyArray_SafeCast(oldtype, newtype, &view_offset, NPY_NO_CASTING, 1); + npy_intp view_safe = (is_safe && (view_offset == 0)); /* If a guaranteed copy was requested */ copy = (flags & NPY_ARRAY_ENSURECOPY) || @@ -1928,7 +1926,7 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) /* If a writeable array was requested, and arr is not */ ((flags & NPY_ARRAY_WRITEABLE) && (!(arrflags & NPY_ARRAY_WRITEABLE))) || - !viewable; + !view_safe; if (copy) { if (flags & NPY_ARRAY_ENSURENOCOPY ) { diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 9c1157cfb87b..dff272164398 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -1449,23 +1449,6 @@ PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2) return 1; } - if (Py_TYPE(Py_TYPE(type1)) == &PyType_Type) { - /* - * 2021-12-17: This case is nonsense and should be removed eventually! - * - * boost::python has/had a bug effectively using EquivTypes with - * `type(arbitrary_obj)`. That is clearly wrong as that cannot be a - * `PyArray_Descr *`. We assume that `type(type(type(arbitrary_obj))` - * is always in practice `type` (this is the type of the metaclass), - * but for our descriptors, `type(type(descr))` is DTypeMeta. - * - * In that case, we just return False. There is a possibility that - * this actually _worked_ effectively (returning 1 sometimes). - * We ignore that possibility for simplicity; it really is not our bug. - */ - return 0; - } - /* * Do not use PyArray_CanCastTypeTo because it supports legacy flexible * dtypes as input. @@ -1482,45 +1465,29 @@ PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2) /* - * This function returns true if a view can be safely created - * between the two types. This implies that PyArray_EquivTypes - * is true as well. + * This function returns true if the two types can be safely cast at + * *minimum_safety* casting level. Sets the view_offset if that is set + * for the cast. If ignore_error is 1, errors in cast setup are ignored. */ -NPY_NO_EXPORT unsigned char -PyArray_ViewableTypes(PyArray_Descr *type1, PyArray_Descr *type2) +NPY_NO_EXPORT npy_intp +PyArray_SafeCast(PyArray_Descr *type1, PyArray_Descr *type2, + npy_intp* view_offset, NPY_CASTING minimum_safety, + npy_intp ignore_error) { if (type1 == type2) { + *view_offset = 0; return 1; } - if (Py_TYPE(Py_TYPE(type1)) == &PyType_Type) { - /* - * 2021-12-17: This case is nonsense and should be removed eventually! - * - * boost::python has/had a bug effectively using EquivTypes with - * `type(arbitrary_obj)`. That is clearly wrong as that cannot be a - * `PyArray_Descr *`. We assume that `type(type(type(arbitrary_obj))` - * is always in practice `type` (this is the type of the metaclass), - * but for our descriptors, `type(type(descr))` is DTypeMeta. - * - * In that case, we just return False. There is a possibility that - * this actually _worked_ effectively (returning 1 sometimes). - * We ignore that possibility for simplicity; it really is not our bug. - */ - return 0; - } - - npy_intp view_offset; - NPY_CASTING safety = PyArray_GetCastInfo(type1, type2, NULL, &view_offset); + NPY_CASTING safety = PyArray_GetCastInfo(type1, type2, NULL, view_offset); if (safety < 0) { - PyErr_Clear(); - return 0; - } - if (view_offset != 0) { - return 0; + if (ignore_error) { + PyErr_Clear(); + return 0; + } + return -1; } - /* If casting is "no casting" this dtypes are considered equivalent. */ - return PyArray_MinCastSafety(safety, NPY_NO_CASTING) == NPY_NO_CASTING; + return PyArray_MinCastSafety(safety, minimum_safety) == minimum_safety; } @@ -1652,11 +1619,10 @@ _array_fromobject_generic( /* One more chance for faster exit if user specified the dtype. */ oldtype = PyArray_DESCR(oparr); - unsigned char viewable = PyArray_ViewableTypes(oldtype, dtype); - if (viewable < 0) { - goto finish; - } - if (viewable) { + npy_intp view_offset; + npy_intp is_safe = PyArray_SafeCast(oldtype, dtype, &view_offset, NPY_NO_CASTING, 1); + npy_intp view_safe = (is_safe && (view_offset == 0)); + if (view_safe) { if (copy != NPY_COPY_ALWAYS && STRIDING_OK(oparr, order)) { if (oldtype == dtype) { Py_INCREF(op); diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index 7e00e46ea2e3..4aac241523d7 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -21,7 +21,9 @@ NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_convert_if_no_array; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_cpu; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_err_msg_substr; -NPY_NO_EXPORT unsigned char -PyArray_ViewableTypes(PyArray_Descr *type1, PyArray_Descr *type2); +NPY_NO_EXPORT npy_intp +PyArray_SafeCast(PyArray_Descr *type1, PyArray_Descr *type2, + npy_intp* view_offset, NPY_CASTING minimum_safety, + npy_intp ignore_errors); #endif /* NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ */ diff --git a/numpy/_core/src/multiarray/stringdtype/casts.c b/numpy/_core/src/multiarray/stringdtype/casts.c index 52e76b16c4f7..b896d09ace65 100644 --- a/numpy/_core/src/multiarray/stringdtype/casts.c +++ b/numpy/_core/src/multiarray/stringdtype/casts.c @@ -80,7 +80,9 @@ string_to_string_resolve_descriptors(PyObject *NPY_UNUSED(self), } // views are only legal between descriptors that share allocators (e.g. the same object) - *view_offset = descr0->allocator != descr1->allocator; + if (descr0->allocator == descr1->allocator) { + *view_offset = 0; + }; return NPY_NO_CASTING; } diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 63ef0d9080ca..0fabc37b070f 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -62,6 +62,7 @@ #include "legacy_array_method.h" #include "abstractdtypes.h" #include "mapping.h" +#include "multiarraymodule.h" /* TODO: Only for `NpyIter_GetTransferFlags` until it is public */ #define NPY_ITERATOR_IMPLEMENTATION_CODE @@ -789,9 +790,9 @@ check_for_trivial_loop(PyArrayMethodObject *ufuncimpl, if (dtypes[i] != PyArray_DESCR(op[i])) { npy_intp view_offset; - NPY_CASTING safety = PyArray_GetCastInfo( - PyArray_DESCR(op[i]), dtypes[i], NULL, &view_offset); - if (safety < 0 && PyErr_Occurred()) { + npy_intp is_safe = PyArray_SafeCast( + PyArray_DESCR(op[i]), dtypes[i], &view_offset, casting, 0); + if (is_safe < 0 && PyErr_Occurred()) { /* A proper error during a cast check, should be rare */ return -1; } @@ -806,8 +807,8 @@ check_for_trivial_loop(PyArrayMethodObject *ufuncimpl, * can force cast to bool) */ } - else if (PyArray_MinCastSafety(safety, casting) != casting) { - return 0; /* the cast is not safe enough */ + else if (is_safe != 1) { + return 0; /* there was a cast error or cast is not safe enough */ } } if (must_copy) { From 42bbfcea291f592a890ad1f60322196f7386a090 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 31 Mar 2024 11:34:36 -0600 Subject: [PATCH 135/980] MAINT: Update Pyodide to 0.25.1 Closes #26164. --- .github/workflows/emscripten.yml | 7 +-- ...environment-variable-pyodide-gh-4502.patch | 55 ------------------- 2 files changed, 1 insertion(+), 61 deletions(-) delete mode 100644 tools/ci/emscripten/0001-do-not-set-meson-environment-variable-pyodide-gh-4502.patch diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 0c8274756b8c..db980e27f2b6 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -23,7 +23,7 @@ jobs: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' env: - PYODIDE_VERSION: 0.25.0 + PYODIDE_VERSION: 0.25.1 # PYTHON_VERSION and EMSCRIPTEN_VERSION are determined by PYODIDE_VERSION. # The appropriate versions can be found in the Pyodide repodata.json # "info" field, or in Makefile.envs: @@ -69,11 +69,6 @@ jobs: with open(env_file, "a") as myfile: myfile.write(f"PYODIDE_BUILD_PATH={pyodide_build_path}\n") - - name: Apply patch(es) for pyodide-build installation - run: | - ls -a ${{ env.PYODIDE_BUILD_PATH }} - patch -d "${{ env.PYODIDE_BUILD_PATH }}" -p1 < tools/ci/emscripten/0001-do-not-set-meson-environment-variable-pyodide-gh-4502.patch - - name: Build NumPy for Pyodide run: | pyodide build -Cbuild-dir=build -Csetup-args="--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross" -Csetup-args="-Dblas=none" -Csetup-args="-Dlapack=none" diff --git a/tools/ci/emscripten/0001-do-not-set-meson-environment-variable-pyodide-gh-4502.patch b/tools/ci/emscripten/0001-do-not-set-meson-environment-variable-pyodide-gh-4502.patch deleted file mode 100644 index f06ea4eead19..000000000000 --- a/tools/ci/emscripten/0001-do-not-set-meson-environment-variable-pyodide-gh-4502.patch +++ /dev/null @@ -1,55 +0,0 @@ -From e08ebf0e90f632547c8ff5b396ec0c4ddd65aad4 Mon Sep 17 00:00:00 2001 -From: Gyeongjae Choi -Date: Sat, 10 Feb 2024 03:28:01 +0900 -Subject: [PATCH] Update numpy to 1.26.4 and don't set MESON env variable - (#4502) - -From meson-python 0.15, $MESON env variable is used to overwrite the meson binary -path. We don't want that behavior. ---- - pypabuild.py | 22 +++++++++++++++------- - 1 file changed, 15 insertions(+), 7 deletions(-) - -diff --git a/pypabuild.py b/pypabuild.py -index 9d0107a8..6961b14e 100644 ---- a/pypabuild.py -+++ b/pypabuild.py -@@ -40,6 +40,19 @@ AVOIDED_REQUIREMENTS = [ - "patchelf", - ] - -+# corresponding env variables for symlinks -+SYMLINK_ENV_VARS = { -+ "cc": "CC", -+ "c++": "CXX", -+ "ld": "LD", -+ "lld": "LLD", -+ "ar": "AR", -+ "gcc": "GCC", -+ "ranlib": "RANLIB", -+ "strip": "STRIP", -+ "gfortran": "FC", # https://mesonbuild.com/Reference-tables.html#compiler-and-linker-selection-variables -+} -+ - - def _gen_runner( - cross_build_env: Mapping[str, str], -@@ -207,13 +220,8 @@ def make_command_wrapper_symlinks(symlink_dir: Path) -> dict[str, str]: - symlink_path.unlink() - - symlink_path.symlink_to(pywasmcross_exe) -- if symlink == "c++": -- var = "CXX" -- elif symlink == "gfortran": -- var = "FC" # https://mesonbuild.com/Reference-tables.html#compiler-and-linker-selection-variables -- else: -- var = symlink.upper() -- env[var] = str(symlink_path) -+ if symlink in SYMLINK_ENV_VARS: -+ env[SYMLINK_ENV_VARS[symlink]] = str(symlink_path) - - return env - --- -2.39.3 (Apple Git-145) - From af099a77ec0101d541f2b604bf5a5846482aca13 Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Mon, 25 Mar 2024 23:58:59 +0530 Subject: [PATCH 136/980] CI: Push NumPy Pyodide wheels to Anaconda.org This commit adds a schedule to push WASM wheels that are compiled via the Emscripten toolchain and Pyodide ecosystem to NumPy's PyPI-like index on Anaconda.org. The key changes here, are: 1. A schedule has been added to the job 2. A workflow_dispatch trigger has been added to push the wheels manually if needed 3. A step has been added that runs after the tests run and succeed, which uses a repository secret that is NUMPY_NIGHTLY_UPLOAD_TOKEN. The artifacts can then be found on this link: https://anaconda.org/scientific-python-nightly-wheels/numpy The wheels uploads will not be attempted on forks or on workflow run contexts outside of the provided condition(s) in the newly added step. [skip cirrus] [skip circle] [skip azp] [skip travis] --- .github/workflows/emscripten.yml | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index db980e27f2b6..3346ce7b2e14 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -5,6 +5,26 @@ on: branches: - main - maintenance/** + # Note: this workflow gets triggered on the same schedule as the + # wheels.yml workflow, with the exception that this workflow runs + # the test suite for the Pyodide wheel too, prior to uploading it. + # + # Run on schedule to upload to Anaconda.org + schedule: + # ┌───────────── minute (0 - 59) + # │ ┌───────────── hour (0 - 23) + # │ │ ┌───────────── day of the month (1 - 31) + # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) + # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) + # │ │ │ │ │ + - cron: "42 2 * * SUN,WED" + workflow_dispatch: + inputs: + push_wheels: + description: > + Push wheels to Anaconda.org if the build is successful, can be 'true' or 'false'. Default is 'false'. Warning: this will overwrite existing wheels. + required: false + default: 'false' env: FORCE_COLOR: 3 @@ -90,3 +110,15 @@ jobs: source .venv-pyodide/bin/activate cd .. pytest --pyargs numpy -m "not slow" + + # Push to https://anaconda.org/scientific-python-nightly-wheels/numpy + # WARNING: this job will overwrite any existing WASM wheels. + - name: Push to Anaconda PyPI index + if: >- + (github.repository == 'numpy/numpy') && + (github.event_name == 'workflow_dispatch' && github.event.inputs.push_wheels == 'true') || + (github.event_name == 'schedule') + uses: scientific-python/upload-nightly-action@b67d7fcc0396e1128a474d1ab2b48aa94680f9fc # v0.5.0 + with: + artifacts_path: dist/ + anaconda_nightly_upload_token: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} From 1c8b03bf2c87f081eea211a5061e423285c548af Mon Sep 17 00:00:00 2001 From: partev Date: Sun, 31 Mar 2024 22:32:45 -0400 Subject: [PATCH 137/980] DOC: Update absolute_beginners.rst (#26182) fix a typo --- doc/source/user/absolute_beginners.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index 05c379f4cd4f..f294f384f7c1 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -1525,7 +1525,7 @@ If you want to store a single ndarray object, store it as a .npy file using save it as a .npz file using ``np.savez``. You can also save several arrays into a single file in compressed npz format with `savez_compressed`. -It's easy to save and load and array with ``np.save()``. Just make sure to +It's easy to save and load an array with ``np.save()``. Just make sure to specify the array you want to save and a file name. For example, if you create this array:: From 55abe417af376570df59928aa9584a33421cbeb4 Mon Sep 17 00:00:00 2001 From: Yang Liu Date: Mon, 1 Apr 2024 15:39:09 +0800 Subject: [PATCH 138/980] TST: Use platform.machine() for improved portability on riscv64 Replace `platform.processor()` with `platform.machine()` considering the latter is more portable. More specifically, `platform.processor()` returns empty string on Arch Linux, and this PR fixes the corresponding test failure. --- numpy/_core/tests/test_numeric.py | 2 +- numpy/_core/tests/test_umath.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 422cb2baf85b..7980973086f6 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -798,7 +798,7 @@ def setup_method(self): # Propagation of the RISC-V Unprivileged ISA for more details. # We disable the float32 sign test on riscv64 for -np.nan as the sign # of the NaN will be lost when it's converted to a float32. - if platform.processor() != 'riscv64': + if platform.machine() != 'riscv64': self.signf[3::6][self.ef[3::6]] = -np.nan self.signd[3::6][self.ed[3::6]] = -np.nan self.signf[4::6][self.ef[4::6]] = -0. diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index e01e6dd6346b..09942265f10d 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -1844,7 +1844,7 @@ def test_fpclass(self, stride): assert_equal(np.isnan(arr_f64[::stride]), nan[::stride]) assert_equal(np.isinf(arr_f32[::stride]), inf[::stride]) assert_equal(np.isinf(arr_f64[::stride]), inf[::stride]) - if platform.processor() == 'riscv64': + if platform.machine() == 'riscv64': # On RISC-V, many operations that produce NaNs, such as converting # a -NaN from f64 to f32, return a canonical NaN. The canonical # NaNs are always positive. See section 11.3 NaN Generation and @@ -1881,7 +1881,7 @@ def test_fp_noncontiguous(self, dtype): ncontig_out = out[1::3] contig_in = np.array(ncontig_in) - if platform.processor() == 'riscv64': + if platform.machine() == 'riscv64': # Disable the -np.nan signbit tests on riscv64. See comments in # test_fpclass for more details. data_rv = np.copy(data) @@ -1920,7 +1920,7 @@ def test_fp_noncontiguous(self, dtype): finite_split = np.array(np.array_split(finite, 2)) assert_equal(np.isnan(data_split), nan_split) assert_equal(np.isinf(data_split), inf_split) - if platform.processor() == 'riscv64': + if platform.machine() == 'riscv64': data_split_rv = np.array(np.array_split(data_rv, 2)) assert_equal(np.signbit(data_split_rv), sign_split) else: From 77b59140791123ab9cdd168117c375ecf8ae4d65 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 1 Apr 2024 09:52:40 -0600 Subject: [PATCH 139/980] MNT: add pythoncapi-compat as a git submodule --- .gitmodules | 3 +++ numpy/_core/src/common/pythoncapi-compat | 1 + 2 files changed, 4 insertions(+) create mode 160000 numpy/_core/src/common/pythoncapi-compat diff --git a/.gitmodules b/.gitmodules index 3934afe4500c..4aa48f5bac5c 100644 --- a/.gitmodules +++ b/.gitmodules @@ -16,3 +16,6 @@ [submodule "numpy/fft/pocketfft"] path = numpy/fft/pocketfft url = https://github.com/mreineck/pocketfft +[submodule "numpy/_core/src/common/pythoncapi-compat"] + path = numpy/_core/src/common/pythoncapi-compat + url = https://github.com/python/pythoncapi-compat diff --git a/numpy/_core/src/common/pythoncapi-compat b/numpy/_core/src/common/pythoncapi-compat new file mode 160000 index 000000000000..f66799130acd --- /dev/null +++ b/numpy/_core/src/common/pythoncapi-compat @@ -0,0 +1 @@ +Subproject commit f66799130acd8843802185553dadf0e300c5fe05 From 0ab2f25052f5c18106a8bd656b2417e8e7a9a743 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 1 Apr 2024 10:50:16 -0600 Subject: [PATCH 140/980] MNT: include pythoncapi_compat.h in npy_pycompat.h --- numpy/_core/src/common/npy_pycompat.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/common/npy_pycompat.h b/numpy/_core/src/common/npy_pycompat.h index ce6c34fa1333..67d4f6f625a0 100644 --- a/numpy/_core/src/common/npy_pycompat.h +++ b/numpy/_core/src/common/npy_pycompat.h @@ -2,7 +2,7 @@ #define NUMPY_CORE_SRC_COMMON_NPY_PYCOMPAT_H_ #include "numpy/npy_3kcompat.h" - +#include "pythoncapi-compat/pythoncapi_compat.h" /* * In Python 3.10a7 (or b1), python started using the identity for the hash From 917e4b039c3f44c8e3e09b624e3f447022c41e02 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 1 Apr 2024 10:50:35 -0600 Subject: [PATCH 141/980] MNT: remove unnecessary npy_pycompat.h includes --- numpy/_core/src/common/array_assign.c | 2 +- numpy/_core/src/common/npy_argparse.c | 5 +++-- numpy/_core/src/common/npy_longdouble.c | 2 +- numpy/_core/src/common/numpyos.c | 2 +- numpy/_core/src/common/ucsnarrow.c | 2 +- numpy/_core/src/common/ufunc_override.c | 2 +- numpy/_core/src/dummymodule.c | 2 -- numpy/_core/src/multiarray/_multiarray_tests.c.src | 2 +- numpy/_core/src/multiarray/array_assign_array.c | 2 +- numpy/_core/src/multiarray/array_assign_scalar.c | 2 +- numpy/_core/src/multiarray/array_converter.c | 2 +- numpy/_core/src/multiarray/arrayfunction_override.c | 2 +- numpy/_core/src/multiarray/calculation.c | 2 +- numpy/_core/src/multiarray/common.c | 2 +- numpy/_core/src/multiarray/conversion_utils.c | 2 +- numpy/_core/src/multiarray/convert.c | 2 +- numpy/_core/src/multiarray/ctors.c | 2 +- numpy/_core/src/multiarray/datetime.c | 2 +- numpy/_core/src/multiarray/datetime_busday.c | 2 +- numpy/_core/src/multiarray/datetime_busdaycal.c | 2 +- numpy/_core/src/multiarray/datetime_strings.c | 2 +- numpy/_core/src/multiarray/descriptor.c | 2 +- numpy/_core/src/multiarray/dragon4.h | 2 +- numpy/_core/src/multiarray/dtype_transfer.c | 2 +- numpy/_core/src/multiarray/dtypemeta.c | 2 +- numpy/_core/src/multiarray/einsum.c.src | 2 +- numpy/_core/src/multiarray/flagsobject.c | 2 +- numpy/_core/src/multiarray/getset.c | 2 +- numpy/_core/src/multiarray/hashdescr.c | 2 +- numpy/_core/src/multiarray/item_selection.c | 2 +- numpy/_core/src/multiarray/iterators.c | 2 +- numpy/_core/src/multiarray/nditer_impl.h | 2 +- numpy/_core/src/multiarray/nditer_pywrap.c | 2 +- numpy/_core/src/multiarray/number.c | 2 +- numpy/_core/src/multiarray/refcount.c | 2 +- numpy/_core/src/multiarray/scalarapi.c | 2 +- numpy/_core/src/multiarray/sequence.c | 2 +- numpy/_core/src/multiarray/shape.c | 2 +- numpy/_core/src/multiarray/usertypes.c | 2 +- numpy/_core/src/umath/_umath_tests.c.src | 2 +- numpy/_core/src/umath/extobj.c | 2 +- numpy/_core/src/umath/funcs.inc.src | 2 +- numpy/_core/src/umath/matmul.c.src | 2 +- numpy/_core/src/umath/override.c | 4 ++-- numpy/_core/src/umath/reduction.c | 2 +- numpy/_core/src/umath/scalarmath.c.src | 2 +- numpy/_core/src/umath/ufunc_object.c | 2 +- numpy/_core/src/umath/ufunc_type_resolution.c | 4 +++- numpy/linalg/umath_linalg.cpp | 2 -- 49 files changed, 52 insertions(+), 53 deletions(-) diff --git a/numpy/_core/src/common/array_assign.c b/numpy/_core/src/common/array_assign.c index 4e154b7fc7b0..3c6d2f14cb65 100644 --- a/numpy/_core/src/common/array_assign.c +++ b/numpy/_core/src/common/array_assign.c @@ -15,7 +15,7 @@ #include #include "npy_config.h" -#include "npy_pycompat.h" + #include "shape.h" diff --git a/numpy/_core/src/common/npy_argparse.c b/numpy/_core/src/common/npy_argparse.c index 59858f6207bb..2be17483ec28 100644 --- a/numpy/_core/src/common/npy_argparse.c +++ b/numpy/_core/src/common/npy_argparse.c @@ -5,8 +5,9 @@ #include #include "numpy/ndarraytypes.h" +#include "numpy/npy_2_compat.h" #include "npy_argparse.h" -#include "npy_pycompat.h" + #include "npy_import.h" #include "arrayfunction_override.h" @@ -195,7 +196,7 @@ initialize_keywords(const char *funcname, } if (i >= npositional_only) { int i_kwarg = i - npositional_only; - cache->kw_strings[i_kwarg] = PyUString_InternFromString(name); + cache->kw_strings[i_kwarg] = PyUnicode_InternFromString(name); if (cache->kw_strings[i_kwarg] == NULL) { va_end(va); goto error; diff --git a/numpy/_core/src/common/npy_longdouble.c b/numpy/_core/src/common/npy_longdouble.c index 38dfd325c685..ce80a9ae2bc3 100644 --- a/numpy/_core/src/common/npy_longdouble.c +++ b/numpy/_core/src/common/npy_longdouble.c @@ -6,7 +6,7 @@ #include "numpy/ndarraytypes.h" #include "numpy/npy_math.h" -#include "npy_pycompat.h" + #include "numpyos.h" /* diff --git a/numpy/_core/src/common/numpyos.c b/numpy/_core/src/common/numpyos.c index fb69e2587ee9..319f5dcc395f 100644 --- a/numpy/_core/src/common/numpyos.c +++ b/numpy/_core/src/common/numpyos.c @@ -9,7 +9,7 @@ #include "npy_config.h" -#include "npy_pycompat.h" + #if defined(HAVE_STRTOLD_L) && !defined(_GNU_SOURCE) # define _GNU_SOURCE diff --git a/numpy/_core/src/common/ucsnarrow.c b/numpy/_core/src/common/ucsnarrow.c index 4bea4beee384..203e02fbb3dd 100644 --- a/numpy/_core/src/common/ucsnarrow.c +++ b/numpy/_core/src/common/ucsnarrow.c @@ -9,7 +9,7 @@ #include "npy_config.h" -#include "npy_pycompat.h" + #include "ctors.h" /* diff --git a/numpy/_core/src/common/ufunc_override.c b/numpy/_core/src/common/ufunc_override.c index 4fb4d4b3edda..c9b5d0e68f82 100644 --- a/numpy/_core/src/common/ufunc_override.c +++ b/numpy/_core/src/common/ufunc_override.c @@ -1,7 +1,7 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE -#include "npy_pycompat.h" +#include "numpy/ndarraytypes.h" #include "get_attr_string.h" #include "npy_import.h" #include "ufunc_override.h" diff --git a/numpy/_core/src/dummymodule.c b/numpy/_core/src/dummymodule.c index 7284ffd68545..2f293d6c4cd6 100644 --- a/numpy/_core/src/dummymodule.c +++ b/numpy/_core/src/dummymodule.c @@ -10,8 +10,6 @@ #define PY_SSIZE_T_CLEAN #include -#include "npy_pycompat.h" - static struct PyMethodDef methods[] = { {NULL, NULL, 0, NULL} }; diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index 606405dbfdda..7ce182c2343f 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -58,7 +58,7 @@ IsPythonScalar(PyObject * dummy, PyObject *args) } } -#include "npy_pycompat.h" + /** Function to test calling via ctypes */ diff --git a/numpy/_core/src/multiarray/array_assign_array.c b/numpy/_core/src/multiarray/array_assign_array.c index 687757190d00..8886d1cacb40 100644 --- a/numpy/_core/src/multiarray/array_assign_array.c +++ b/numpy/_core/src/multiarray/array_assign_array.c @@ -17,7 +17,7 @@ #include "numpy/npy_math.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "convert_datatype.h" #include "methods.h" diff --git a/numpy/_core/src/multiarray/array_assign_scalar.c b/numpy/_core/src/multiarray/array_assign_scalar.c index d1919fafb4bd..6818c1aa2a1b 100644 --- a/numpy/_core/src/multiarray/array_assign_scalar.c +++ b/numpy/_core/src/multiarray/array_assign_scalar.c @@ -17,7 +17,7 @@ #include "numpy/npy_math.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "convert_datatype.h" #include "methods.h" diff --git a/numpy/_core/src/multiarray/array_converter.c b/numpy/_core/src/multiarray/array_converter.c index fd7ccd767056..22e4e8bb3cb9 100644 --- a/numpy/_core/src/multiarray/array_converter.c +++ b/numpy/_core/src/multiarray/array_converter.c @@ -26,7 +26,7 @@ #include "npy_config.h" -#include "npy_pycompat.h" + #include "array_assign.h" #include "common.h" diff --git a/numpy/_core/src/multiarray/arrayfunction_override.c b/numpy/_core/src/multiarray/arrayfunction_override.c index 707024f94c04..20223e1449fb 100644 --- a/numpy/_core/src/multiarray/arrayfunction_override.c +++ b/numpy/_core/src/multiarray/arrayfunction_override.c @@ -4,7 +4,7 @@ #include #include "structmember.h" -#include "npy_pycompat.h" +#include "numpy/ndarraytypes.h" #include "get_attr_string.h" #include "npy_import.h" #include "multiarraymodule.h" diff --git a/numpy/_core/src/multiarray/calculation.c b/numpy/_core/src/multiarray/calculation.c index 73d8ba58bd05..cf77ce90902d 100644 --- a/numpy/_core/src/multiarray/calculation.c +++ b/numpy/_core/src/multiarray/calculation.c @@ -11,7 +11,7 @@ #include "npy_config.h" -#include "npy_pycompat.h" + #include "common.h" #include "number.h" diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 5804c9cc9148..20495a0585be 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -7,7 +7,7 @@ #include "numpy/arrayobject.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "common.h" #include "abstractdtypes.h" diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index d58fee3823ee..ac1fb3ae38dc 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -10,7 +10,7 @@ #include "numpy/npy_math.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "common.h" #include "arraytypes.h" diff --git a/numpy/_core/src/multiarray/convert.c b/numpy/_core/src/multiarray/convert.c index c6b164d7f4e9..aad40cab9593 100644 --- a/numpy/_core/src/multiarray/convert.c +++ b/numpy/_core/src/multiarray/convert.c @@ -9,7 +9,7 @@ #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" -#include "npy_pycompat.h" + #include "common.h" #include "arrayobject.h" diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 4c9d76991296..324027659f52 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -14,7 +14,7 @@ #include "npy_config.h" #include "npy_ctypes.h" -#include "npy_pycompat.h" + #include "multiarraymodule.h" #include "common.h" diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index b340cf6cf496..474c048db6cf 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -16,7 +16,7 @@ #include "numpyos.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "common.h" #include "numpy/arrayscalars.h" diff --git a/numpy/_core/src/multiarray/datetime_busday.c b/numpy/_core/src/multiarray/datetime_busday.c index 93ed0972ec98..73c88811a0a9 100644 --- a/numpy/_core/src/multiarray/datetime_busday.c +++ b/numpy/_core/src/multiarray/datetime_busday.c @@ -15,7 +15,7 @@ #include #include "npy_config.h" -#include "npy_pycompat.h" + #include "numpy/arrayscalars.h" #include "lowlevel_strided_loops.h" diff --git a/numpy/_core/src/multiarray/datetime_busdaycal.c b/numpy/_core/src/multiarray/datetime_busdaycal.c index 880efe934c09..3a7e3a383dca 100644 --- a/numpy/_core/src/multiarray/datetime_busdaycal.c +++ b/numpy/_core/src/multiarray/datetime_busdaycal.c @@ -17,7 +17,7 @@ #include "numpy/arrayscalars.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "common.h" #include "lowlevel_strided_loops.h" diff --git a/numpy/_core/src/multiarray/datetime_strings.c b/numpy/_core/src/multiarray/datetime_strings.c index 090277e16939..f92eec3f5a59 100644 --- a/numpy/_core/src/multiarray/datetime_strings.c +++ b/numpy/_core/src/multiarray/datetime_strings.c @@ -16,7 +16,7 @@ #include "numpy/arrayobject.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "numpy/arrayscalars.h" #include "convert_datatype.h" diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 8a8e432d82ee..94f40fb42ca0 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -13,7 +13,7 @@ #include "npy_config.h" #include "npy_ctypes.h" #include "npy_import.h" -#include "npy_pycompat.h" + #include "_datetime.h" #include "common.h" diff --git a/numpy/_core/src/multiarray/dragon4.h b/numpy/_core/src/multiarray/dragon4.h index 0e29c42e3c09..8986c1672e71 100644 --- a/numpy/_core/src/multiarray/dragon4.h +++ b/numpy/_core/src/multiarray/dragon4.h @@ -38,7 +38,7 @@ #define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "numpy/arrayscalars.h" /* Half binary format */ diff --git a/numpy/_core/src/multiarray/dtype_transfer.c b/numpy/_core/src/multiarray/dtype_transfer.c index 513b2e6be478..d7a5e80800b6 100644 --- a/numpy/_core/src/multiarray/dtype_transfer.c +++ b/numpy/_core/src/multiarray/dtype_transfer.c @@ -21,7 +21,7 @@ #include "numpy/npy_math.h" #include "lowlevel_strided_loops.h" -#include "npy_pycompat.h" + #include "convert_datatype.h" #include "ctors.h" diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index dd8ba326b98f..4aadfd902545 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -9,7 +9,7 @@ #include #include #include -#include "npy_pycompat.h" + #include "npy_import.h" #include "abstractdtypes.h" diff --git a/numpy/_core/src/multiarray/einsum.c.src b/numpy/_core/src/multiarray/einsum.c.src index cf84c5a7629c..81d3f3e1d79b 100644 --- a/numpy/_core/src/multiarray/einsum.c.src +++ b/numpy/_core/src/multiarray/einsum.c.src @@ -16,7 +16,7 @@ #define _MULTIARRAYMODULE #include #include -#include + #include //PyArray_AssignRawScalar #include diff --git a/numpy/_core/src/multiarray/flagsobject.c b/numpy/_core/src/multiarray/flagsobject.c index 4751b2a8bfed..8257727030c0 100644 --- a/numpy/_core/src/multiarray/flagsobject.c +++ b/numpy/_core/src/multiarray/flagsobject.c @@ -12,7 +12,7 @@ #include "npy_config.h" -#include "npy_pycompat.h" + #include "array_assign.h" #include "common.h" diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index d53fc53601d6..d18463f27bb5 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -9,7 +9,7 @@ #include "numpy/arrayobject.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "npy_import.h" #include "common.h" diff --git a/numpy/_core/src/multiarray/hashdescr.c b/numpy/_core/src/multiarray/hashdescr.c index 925179e30a53..f570caf1588f 100644 --- a/numpy/_core/src/multiarray/hashdescr.c +++ b/numpy/_core/src/multiarray/hashdescr.c @@ -8,7 +8,7 @@ #include "npy_config.h" -#include "npy_pycompat.h" + #include "hashdescr.h" diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 37e33c28a944..df05e4aa8aa5 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -13,7 +13,7 @@ #include "npy_config.h" -#include "npy_pycompat.h" + #include "multiarraymodule.h" #include "common.h" diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index 68881a1f004f..2806670d3e07 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -11,7 +11,7 @@ #include "npy_config.h" -#include "npy_pycompat.h" + #include "arrayobject.h" #include "iterators.h" diff --git a/numpy/_core/src/multiarray/nditer_impl.h b/numpy/_core/src/multiarray/nditer_impl.h index 0332d78ec913..790ddcb11f83 100644 --- a/numpy/_core/src/multiarray/nditer_impl.h +++ b/numpy/_core/src/multiarray/nditer_impl.h @@ -18,7 +18,7 @@ #include #include "numpy/arrayobject.h" -#include "npy_pycompat.h" + #include "convert_datatype.h" #include "lowlevel_strided_loops.h" diff --git a/numpy/_core/src/multiarray/nditer_pywrap.c b/numpy/_core/src/multiarray/nditer_pywrap.c index 94dd526ceb6c..ad20194f308f 100644 --- a/numpy/_core/src/multiarray/nditer_pywrap.c +++ b/numpy/_core/src/multiarray/nditer_pywrap.c @@ -15,7 +15,7 @@ #include "numpy/arrayobject.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "alloc.h" #include "common.h" #include "conversion_utils.h" diff --git a/numpy/_core/src/multiarray/number.c b/numpy/_core/src/multiarray/number.c index d42d23a281ea..7b4d6f21f45c 100644 --- a/numpy/_core/src/multiarray/number.c +++ b/numpy/_core/src/multiarray/number.c @@ -8,7 +8,7 @@ #include "numpy/arrayobject.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "npy_import.h" #include "common.h" #include "number.h" diff --git a/numpy/_core/src/multiarray/refcount.c b/numpy/_core/src/multiarray/refcount.c index df16452ab283..1bc693532646 100644 --- a/numpy/_core/src/multiarray/refcount.c +++ b/numpy/_core/src/multiarray/refcount.c @@ -21,7 +21,7 @@ #include "npy_config.h" -#include "npy_pycompat.h" + /* * Helper function to clear a strided memory (normally or always contiguous) diff --git a/numpy/_core/src/multiarray/scalarapi.c b/numpy/_core/src/multiarray/scalarapi.c index a4ed781142ad..9ca83d8a57f5 100644 --- a/numpy/_core/src/multiarray/scalarapi.c +++ b/numpy/_core/src/multiarray/scalarapi.c @@ -12,7 +12,7 @@ #include "npy_config.h" -#include "npy_pycompat.h" + #include "array_coercion.h" #include "ctors.h" diff --git a/numpy/_core/src/multiarray/sequence.c b/numpy/_core/src/multiarray/sequence.c index 7bdd64d27e5f..4c94bb798072 100644 --- a/numpy/_core/src/multiarray/sequence.c +++ b/numpy/_core/src/multiarray/sequence.c @@ -10,7 +10,7 @@ #include "npy_config.h" -#include "npy_pycompat.h" + #include "common.h" #include "mapping.h" diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index ede7a617e00b..6ca8ff85e218 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -12,7 +12,7 @@ #include "npy_config.h" -#include "npy_pycompat.h" + #include "arraywrap.h" #include "ctors.h" diff --git a/numpy/_core/src/multiarray/usertypes.c b/numpy/_core/src/multiarray/usertypes.c index 92325247a60c..5e794a8490ca 100644 --- a/numpy/_core/src/multiarray/usertypes.c +++ b/numpy/_core/src/multiarray/usertypes.c @@ -34,7 +34,7 @@ maintainer email: oliphant.travis@ieee.org #include "common.h" -#include "npy_pycompat.h" + #include "usertypes.h" #include "dtypemeta.h" diff --git a/numpy/_core/src/umath/_umath_tests.c.src b/numpy/_core/src/umath/_umath_tests.c.src index 2479c9c9279c..ac45ae92afbc 100644 --- a/numpy/_core/src/umath/_umath_tests.c.src +++ b/numpy/_core/src/umath/_umath_tests.c.src @@ -19,7 +19,7 @@ #include "numpy/ndarrayobject.h" #include "numpy/npy_math.h" -#include "npy_pycompat.h" + #include "npy_config.h" #include "npy_cpu_features.h" diff --git a/numpy/_core/src/umath/extobj.c b/numpy/_core/src/umath/extobj.c index aea145de81f9..d32feaaa31da 100644 --- a/numpy/_core/src/umath/extobj.c +++ b/numpy/_core/src/umath/extobj.c @@ -6,7 +6,7 @@ #include #include "npy_config.h" -#include "npy_pycompat.h" + #include "npy_argparse.h" #include "conversion_utils.h" diff --git a/numpy/_core/src/umath/funcs.inc.src b/numpy/_core/src/umath/funcs.inc.src index 6cd9448d025b..df81c835034a 100644 --- a/numpy/_core/src/umath/funcs.inc.src +++ b/numpy/_core/src/umath/funcs.inc.src @@ -7,7 +7,7 @@ */ #define NPY_NO_DEPRECATED_API NPY_API_VERSION -#include "npy_pycompat.h" + #include "npy_import.h" diff --git a/numpy/_core/src/umath/matmul.c.src b/numpy/_core/src/umath/matmul.c.src index cdae9d1d22a5..37f990f970ed 100644 --- a/numpy/_core/src/umath/matmul.c.src +++ b/numpy/_core/src/umath/matmul.c.src @@ -14,7 +14,7 @@ #include "numpy/halffloat.h" #include "lowlevel_strided_loops.h" -#include "npy_pycompat.h" + #include "npy_cblas.h" #include "arraytypes.h" /* For TYPE_dot functions */ diff --git a/numpy/_core/src/umath/override.c b/numpy/_core/src/umath/override.c index bb29fcdf7c52..f03a253fe7c2 100644 --- a/numpy/_core/src/umath/override.c +++ b/numpy/_core/src/umath/override.c @@ -1,7 +1,7 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define NO_IMPORT_ARRAY +#define NO_IMPORT_ -#include "npy_pycompat.h" +#include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" #include "npy_import.h" diff --git a/numpy/_core/src/umath/reduction.c b/numpy/_core/src/umath/reduction.c index 46466418e417..548530e1ca3b 100644 --- a/numpy/_core/src/umath/reduction.c +++ b/numpy/_core/src/umath/reduction.c @@ -16,7 +16,7 @@ #include "npy_config.h" #include "numpy/arrayobject.h" -#include "npy_pycompat.h" + #include "array_assign.h" #include "array_coercion.h" #include "array_method.h" diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index 40ba7d65f05c..c04a8f248cd3 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -19,7 +19,7 @@ #include "numpy/npy_math.h" #include "npy_import.h" -#include "npy_pycompat.h" + #include "numpy/halffloat.h" #include "templ_common.h" diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 63ef0d9080ca..3585dd8ace21 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -33,7 +33,7 @@ #include #include "npy_config.h" -#include "npy_pycompat.h" + #include "npy_argparse.h" #include "numpy/arrayobject.h" diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index f9962a9b4e32..abb8c5ac7e07 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -33,9 +33,11 @@ #endif #include "npy_config.h" -#include "npy_pycompat.h" + +#include "numpy/npy_common.h" #include "npy_import.h" +#include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" #include "ufunc_type_resolution.h" #include "ufunc_object.h" diff --git a/numpy/linalg/umath_linalg.cpp b/numpy/linalg/umath_linalg.cpp index cd5ddddaa8d7..ecbca7f4d3ea 100644 --- a/numpy/linalg/umath_linalg.cpp +++ b/numpy/linalg/umath_linalg.cpp @@ -13,8 +13,6 @@ #include "numpy/ufuncobject.h" #include "numpy/npy_math.h" -#include "npy_pycompat.h" - #include "npy_config.h" #include "npy_cblas.h" From c74a960af165b55c6dd7986e45435bc2cd296ded Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 1 Apr 2024 11:26:21 -0600 Subject: [PATCH 142/980] MNT: skip pythoncapi-comat in check_installed_files --- tools/check_installed_files.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/check_installed_files.py b/tools/check_installed_files.py index 9e97f903d65a..7f78d5b1c7d6 100644 --- a/tools/check_installed_files.py +++ b/tools/check_installed_files.py @@ -79,6 +79,11 @@ def get_files(dir_to_check, kind='test'): k: v for k, v in files.items() if not k.startswith('distutils') } + # ignore python files in vendored pythoncapi-compat submodule + files = { + k: v for k, v in files.items() if 'pythoncapi-compat' not in k + } + return files From 3849c4bf15303c80d33e1e80a4401d1aab8e082d Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 1 Apr 2024 12:42:09 -0600 Subject: [PATCH 143/980] BUG: fix reference counting error in wrapping_method_resolve_descriptors --- numpy/_core/src/umath/wrapping_array_method.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/umath/wrapping_array_method.c b/numpy/_core/src/umath/wrapping_array_method.c index ee1db62abf68..fe065e175027 100644 --- a/numpy/_core/src/umath/wrapping_array_method.c +++ b/numpy/_core/src/umath/wrapping_array_method.c @@ -54,7 +54,7 @@ wrapping_method_resolve_descriptors( self->wrapped_meth, self->wrapped_dtypes, orig_given_descrs, orig_loop_descrs, view_offset); for (int i = 0; i < nargs; i++) { - Py_XDECREF(orig_given_descrs); + Py_XDECREF(orig_given_descrs[i]); } if (casting < 0) { return -1; @@ -62,7 +62,7 @@ wrapping_method_resolve_descriptors( int res = self->translate_loop_descrs( nin, nout, dtypes, given_descrs, orig_loop_descrs, loop_descrs); for (int i = 0; i < nargs; i++) { - Py_DECREF(orig_given_descrs); + Py_DECREF(orig_loop_descrs[i]); } if (res < 0) { return -1; From e16ad3b6f307522172cb6b513a430981d1854b05 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 1 Apr 2024 14:31:54 -0600 Subject: [PATCH 144/980] MNT: respond to review comments --- numpy/_core/src/multiarray/convert_datatype.c | 29 +++++++++++++++++++ numpy/_core/src/multiarray/convert_datatype.h | 5 ++++ numpy/_core/src/multiarray/ctors.c | 17 ++++++----- numpy/_core/src/multiarray/multiarraymodule.c | 27 ----------------- numpy/_core/src/multiarray/multiarraymodule.h | 5 ---- numpy/_core/src/umath/ufunc_object.c | 1 - numpy/_core/tests/test_stringdtype.py | 6 ++++ 7 files changed, 50 insertions(+), 40 deletions(-) diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index c35f899847a5..9765a4041e06 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -651,6 +651,35 @@ PyArray_CanCastTo(PyArray_Descr *from, PyArray_Descr *to) } +/* + * This function returns true if the two types can be safely cast at + * *minimum_safety* casting level. Sets the *view_offset* if that is set + * for the cast. If ignore_error is set, the error indicator is cleared + * if there are any errors in cast setup and returns false, otherwise + * the error indicator is left set and returns -1. + */ +NPY_NO_EXPORT npy_intp +PyArray_SafeCast(PyArray_Descr *type1, PyArray_Descr *type2, + npy_intp* view_offset, NPY_CASTING minimum_safety, + npy_intp ignore_error) +{ + if (type1 == type2) { + *view_offset = 0; + return 1; + } + + NPY_CASTING safety = PyArray_GetCastInfo(type1, type2, NULL, view_offset); + if (safety < 0) { + if (ignore_error) { + PyErr_Clear(); + return 0; + } + return -1; + } + return PyArray_MinCastSafety(safety, minimum_safety) == minimum_safety; +} + + /* Provides an ordering for the dtype 'kind' character codes */ NPY_NO_EXPORT int dtype_kind_to_ordering(char kind) diff --git a/numpy/_core/src/multiarray/convert_datatype.h b/numpy/_core/src/multiarray/convert_datatype.h index 8dccbab6f8c0..d1493e6997bd 100644 --- a/numpy/_core/src/multiarray/convert_datatype.h +++ b/numpy/_core/src/multiarray/convert_datatype.h @@ -102,6 +102,11 @@ PyArray_GetCastInfo( PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype, npy_intp *view_offset); +NPY_NO_EXPORT npy_intp +PyArray_SafeCast(PyArray_Descr *type1, PyArray_Descr *type2, + npy_intp* view_offset, NPY_CASTING minimum_safety, + npy_intp ignore_errors); + NPY_NO_EXPORT int PyArray_CheckCastSafety(NPY_CASTING casting, PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype); diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index abe6a631ebbe..f897afb5d4c0 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1908,12 +1908,10 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) } arrflags = PyArray_FLAGS(arr); - npy_intp view_offset; - npy_intp is_safe = PyArray_SafeCast(oldtype, newtype, &view_offset, NPY_NO_CASTING, 1); - npy_intp view_safe = (is_safe && (view_offset == 0)); - /* If a guaranteed copy was requested */ - copy = (flags & NPY_ARRAY_ENSURECOPY) || + + copy = /* If a guaranteed copy was requested */ + (flags & NPY_ARRAY_ENSURECOPY) || /* If C contiguous was requested, and arr is not */ ((flags & NPY_ARRAY_C_CONTIGUOUS) && (!(arrflags & NPY_ARRAY_C_CONTIGUOUS))) || @@ -1925,8 +1923,13 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) (!(arrflags & NPY_ARRAY_F_CONTIGUOUS))) || /* If a writeable array was requested, and arr is not */ ((flags & NPY_ARRAY_WRITEABLE) && - (!(arrflags & NPY_ARRAY_WRITEABLE))) || - !view_safe; + (!(arrflags & NPY_ARRAY_WRITEABLE))); + + if (!copy) { + npy_intp view_offset; + npy_intp is_safe = PyArray_SafeCast(oldtype, newtype, &view_offset, NPY_NO_CASTING, 1); + copy = copy || !(is_safe && (view_offset == 0)); + } if (copy) { if (flags & NPY_ARRAY_ENSURENOCOPY ) { diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index dff272164398..0df294be14dc 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -1464,33 +1464,6 @@ PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2) } -/* - * This function returns true if the two types can be safely cast at - * *minimum_safety* casting level. Sets the view_offset if that is set - * for the cast. If ignore_error is 1, errors in cast setup are ignored. - */ -NPY_NO_EXPORT npy_intp -PyArray_SafeCast(PyArray_Descr *type1, PyArray_Descr *type2, - npy_intp* view_offset, NPY_CASTING minimum_safety, - npy_intp ignore_error) -{ - if (type1 == type2) { - *view_offset = 0; - return 1; - } - - NPY_CASTING safety = PyArray_GetCastInfo(type1, type2, NULL, view_offset); - if (safety < 0) { - if (ignore_error) { - PyErr_Clear(); - return 0; - } - return -1; - } - return PyArray_MinCastSafety(safety, minimum_safety) == minimum_safety; -} - - /*NUMPY_API*/ NPY_NO_EXPORT unsigned char PyArray_EquivTypenums(int typenum1, int typenum2) diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index 4aac241523d7..ba03d367eeb8 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -21,9 +21,4 @@ NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_convert_if_no_array; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_cpu; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_err_msg_substr; -NPY_NO_EXPORT npy_intp -PyArray_SafeCast(PyArray_Descr *type1, PyArray_Descr *type2, - npy_intp* view_offset, NPY_CASTING minimum_safety, - npy_intp ignore_errors); - #endif /* NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ */ diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 0fabc37b070f..d1f4eb30b5a8 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -62,7 +62,6 @@ #include "legacy_array_method.h" #include "abstractdtypes.h" #include "mapping.h" -#include "multiarraymodule.h" /* TODO: Only for `NpyIter_GetTransferFlags` until it is public */ #define NPY_ITERATOR_IMPLEMENTATION_CODE diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 8d72df6401f3..cc11ad237d47 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -460,8 +460,13 @@ def test_creation_functions(): def test_create_with_copy_none(string_list): arr = np.array(string_list, dtype=StringDType()) + # create another stringdtype array with an arena that has a different + # in-memory layout than the first array arr_rev = np.array(string_list[::-1], dtype=StringDType()) + # this should create a copy and the resulting array + # shouldn't share an allocator or arena with arr_rev, despite + # explicitly passing arr_rev.dtype arr_copy = np.array(arr, copy=None, dtype=arr_rev.dtype) np.testing.assert_array_equal(arr, arr_copy) assert arr_copy.base is None @@ -469,6 +474,7 @@ def test_create_with_copy_none(string_list): with pytest.raises(ValueError, match="Unable to avoid copy"): np.array(arr, copy=False, dtype=arr_rev.dtype) + # because we're using arr's dtype instance, the view is safe arr_view = np.array(arr, copy=None, dtype=arr.dtype) np.testing.assert_array_equal(arr, arr) np.testing.assert_array_equal(arr_view[::-1], arr_rev) From 54e3a83ff64cfe4691b7a2cd0156bd659cd1cb6d Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 1 Apr 2024 14:36:33 -0600 Subject: [PATCH 145/980] MNT: check for valid views by comparing with NPY_MIN_INTP --- numpy/_core/src/multiarray/ctors.c | 2 +- numpy/_core/src/multiarray/multiarraymodule.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index f897afb5d4c0..3c0e11ddc749 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1928,7 +1928,7 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) if (!copy) { npy_intp view_offset; npy_intp is_safe = PyArray_SafeCast(oldtype, newtype, &view_offset, NPY_NO_CASTING, 1); - copy = copy || !(is_safe && (view_offset == 0)); + copy = copy || !(is_safe && (view_offset != NPY_MIN_INTP)); } if (copy) { diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 0df294be14dc..b8b777697fe3 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -1594,7 +1594,7 @@ _array_fromobject_generic( oldtype = PyArray_DESCR(oparr); npy_intp view_offset; npy_intp is_safe = PyArray_SafeCast(oldtype, dtype, &view_offset, NPY_NO_CASTING, 1); - npy_intp view_safe = (is_safe && (view_offset == 0)); + npy_intp view_safe = (is_safe && (view_offset != NPY_MIN_INTP)); if (view_safe) { if (copy != NPY_COPY_ALWAYS && STRIDING_OK(oparr, order)) { if (oldtype == dtype) { From 6373b850aa1baf676b14646e9cc76831eefee056 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 1 Apr 2024 15:18:24 -0600 Subject: [PATCH 146/980] MNT: tiny optimization --- numpy/_core/src/multiarray/ctors.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 3c0e11ddc749..836ed493e1e6 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1928,7 +1928,7 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) if (!copy) { npy_intp view_offset; npy_intp is_safe = PyArray_SafeCast(oldtype, newtype, &view_offset, NPY_NO_CASTING, 1); - copy = copy || !(is_safe && (view_offset != NPY_MIN_INTP)); + copy = !(is_safe && (view_offset != NPY_MIN_INTP)); } if (copy) { From 86b94dc66b4308c8082d1eb94422bfcf3c01de95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Tue, 2 Apr 2024 10:16:31 +0200 Subject: [PATCH 147/980] DOC: Mention copy=True for __array__ in migration guide [skip azp] [skip cirrus] [skip actions] --- doc/source/numpy_2_0_migration_guide.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index 2179fe8e8ca6..2b345942db21 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -422,7 +422,9 @@ The :ref:`copy keyword behavior changes ` in of how to do so. 3. For any ``__array__`` method on a non-NumPy array-like object, a ``copy=None`` keyword can be added to the signature - this will work with - older NumPy versions as well. + older NumPy versions as well. If ``copy`` keyword is considered in + the ``__array__`` method implementation, then for ``copy=True`` always + return a new copy. Writing numpy-version-dependent code From 31411afd7c88ea3bdedf8f8afaf321102ba742a8 Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Wed, 3 Apr 2024 00:49:08 +0530 Subject: [PATCH 148/980] CI: Shorten workflow dispatch input description [skip cirrus] [skip circle] [skip azp] [skip travis] --- .github/workflows/emscripten.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 3346ce7b2e14..5406e8329129 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -21,8 +21,10 @@ on: workflow_dispatch: inputs: push_wheels: + # Can be 'true' or 'false'. Default is 'false'. + # Warning: this will overwrite existing wheels. description: > - Push wheels to Anaconda.org if the build is successful, can be 'true' or 'false'. Default is 'false'. Warning: this will overwrite existing wheels. + Push wheels to Anaconda.org if the build succeeds required: false default: 'false' From 970476737080a053d9be3d6f4479bb1b5cd17f19 Mon Sep 17 00:00:00 2001 From: carlosilva10260 <121122527+carlosilva10260@users.noreply.github.com> Date: Wed, 3 Apr 2024 12:11:42 +0100 Subject: [PATCH 149/980] BUG: masked array division should ignore all FPEs in mask calculation (#26135) * BUG: masked array division broken with np.seterr(under=raise) #25810 * STY: Remove unnecessary brackets Closes gh-25810 --------- Co-authored-by: Sebastian Berg --- numpy/ma/core.py | 2 +- numpy/ma/tests/test_core.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index c67582265e4d..89b4f07031d5 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -877,7 +877,7 @@ def __call__(self, a, b): self.tolerance = np.finfo(float).tiny # don't call ma ufuncs from __array_wrap__ which would fail for scalars a, b = np.asarray(a), np.asarray(b) - with np.errstate(invalid='ignore'): + with np.errstate(all='ignore'): return umath.absolute(a) * self.tolerance >= umath.absolute(b) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 99e869685e60..58d787226e84 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -2581,6 +2581,13 @@ def test_no_masked_nan_warnings(self): # also check that allclose uses ma ufuncs, to avoid warning allclose(m, 0.5) + def test_masked_array_underflow(self): + x = np.arange(0, 3, 0.1) + X = np.ma.array(x) + with np.errstate(under="raise"): + X2 = X/2.0 + np.testing.assert_array_equal(X2, x/2) + class TestMaskedArrayInPlaceArithmetic: # Test MaskedArray Arithmetic From 1bceaf893336e50e936d80db996c860d3a35ea64 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 3 Apr 2024 08:07:17 -0600 Subject: [PATCH 150/980] MNT: respond to review comments --- numpy/_core/meson.build | 5 +++++ numpy/_core/src/umath/override.c | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 8fc377aeedde..658725af9216 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -123,6 +123,11 @@ if use_intel_sort and not fs.exists('src/npysort/x86-simd-sort/README.md') error('Missing the `x86-simd-sort` git submodule! Run `git submodule update --init` to fix this.') endif +if not fs.exists('src/common/pythoncapi-compat') + error('Missing the `pythoncapi-compat` git submodule! ' + + 'Run `git submodule update --init` to fix this.') +endif + # Check sizes of types. Note, some of these landed in config.h before, but were # unused. So clean that up and only define the NPY_SIZEOF flavors rather than # the SIZEOF ones diff --git a/numpy/_core/src/umath/override.c b/numpy/_core/src/umath/override.c index f03a253fe7c2..88d05abddc50 100644 --- a/numpy/_core/src/umath/override.c +++ b/numpy/_core/src/umath/override.c @@ -1,5 +1,5 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define NO_IMPORT_ +#define NO_IMPORT_ARRAY #include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" From 2b17c13cf7311c2d4385a4eead2785e295dc58b1 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 3 Apr 2024 09:56:10 -0600 Subject: [PATCH 151/980] TST: account for immortal objects in test_iter_refcount --- numpy/_core/tests/test_nditer.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index 8ce81503ff87..75451c4a2987 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -1,4 +1,5 @@ import sys +import sysconfig import pytest import textwrap @@ -13,6 +14,7 @@ IS_WASM, HAS_REFCOUNT, suppress_warnings, break_cycles ) +NOGIL_BUILD = bool(sysconfig.get_config_var('Py_GIL_DISABLED')) def iter_multi_index(i): ret = [] @@ -68,7 +70,9 @@ def test_iter_refcount(): rc2_dt = sys.getrefcount(dt) it2 = it.copy() assert_(sys.getrefcount(a) > rc2_a) - assert_(sys.getrefcount(dt) > rc2_dt) + if not NOGIL_BUILD: + # np.dtype('f4') is immortal in the nogil build + assert_(sys.getrefcount(dt) > rc2_dt) it = None assert_equal(sys.getrefcount(a), rc2_a) assert_equal(sys.getrefcount(dt), rc2_dt) From dc04ad52216a4fb781eaa31bc923049daef0443a Mon Sep 17 00:00:00 2001 From: Lucas Colley Date: Wed, 3 Apr 2024 18:45:07 +0100 Subject: [PATCH 152/980] NEP: add NEP 56 mailing list resolution (#26155) * NEP: add NEP 56 mailing list resolution [skip cirrus] [skip azp] [skip travis] * NEP: mark NEP 56 as Final [skip azp] [skip travis] [skip cirrus] --- doc/neps/nep-0056-array-api-main-namespace.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/neps/nep-0056-array-api-main-namespace.rst b/doc/neps/nep-0056-array-api-main-namespace.rst index ee1190c0ceff..028370877466 100644 --- a/doc/neps/nep-0056-array-api-main-namespace.rst +++ b/doc/neps/nep-0056-array-api-main-namespace.rst @@ -7,11 +7,11 @@ NEP 56 — Array API standard support in NumPy's main namespace :Author: Ralf Gommers :Author: Mateusz Sokół :Author: Nathan Goldbaum -:Status: Accepted +:Status: Final :Replaces: 30, 31, 37, 47 :Type: Standards Track :Created: 2023-12-19 -:Resolution: TODO mailing list link (after acceptance) +:Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/message/Z6AA5CL47NHBNEPTFWYOTSUVSRDGHYPN/ Abstract From 0d7b977136523b6bfacc6a8e8671fbd35907c325 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Thu, 4 Apr 2024 11:43:53 +0200 Subject: [PATCH 153/980] API: Readd np.bool_ typing stub --- numpy/__init__.pyi | 2 ++ numpy/_core/tests/_natype.py | 8 ++++---- numpy/_core/tests/test_multiarray.py | 4 ++-- numpy/_core/tests/test_stringdtype.py | 2 +- numpy/lib/tests/test_function_base.py | 4 ++-- numpy/typing/tests/data/pass/scalars.py | 5 +++-- numpy/typing/tests/data/reveal/numerictypes.pyi | 1 + numpy/typing/tests/data/reveal/scalars.pyi | 1 + 8 files changed, 16 insertions(+), 11 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 51103aaa991f..fb29a758dce5 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2817,6 +2817,8 @@ class bool(generic): __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] +bool_ = bool + class object_(generic): def __init__(self, value: object = ..., /) -> None: ... @property diff --git a/numpy/_core/tests/_natype.py b/numpy/_core/tests/_natype.py index b8a99833758d..07a8fc474c36 100644 --- a/numpy/_core/tests/_natype.py +++ b/numpy/_core/tests/_natype.py @@ -15,7 +15,7 @@ def method(self, other): if ( other is pd_NA or isinstance(other, (str, bytes)) - or isinstance(other, (numbers.Number, np.bool_)) + or isinstance(other, (numbers.Number, np.bool)) or util.is_array(other) and not other.shape ): @@ -119,7 +119,7 @@ def __reduce__(self): def __pow__(self, other): if other is pd_NA: return pd_NA - elif isinstance(other, (numbers.Number, np.bool_)): + elif isinstance(other, (numbers.Number, np.bool)): if other == 0: # returning positive is correct for +/- 0. return type(other)(1) @@ -133,7 +133,7 @@ def __pow__(self, other): def __rpow__(self, other): if other is pd_NA: return pd_NA - elif isinstance(other, (numbers.Number, np.bool_)): + elif isinstance(other, (numbers.Number, np.bool)): if other == 1: return other else: @@ -170,7 +170,7 @@ def __xor__(self, other): __rxor__ = __xor__ __array_priority__ = 1000 - _HANDLED_TYPES = (np.ndarray, numbers.Number, str, np.bool_) + _HANDLED_TYPES = (np.ndarray, numbers.Number, str, np.bool) def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): types = self._HANDLED_TYPES + (NAType,) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 88c17656a18b..e0b8593604a2 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -1890,8 +1890,8 @@ def test_any_where(self): ["i8", "U10", "object", "datetime64[ms]"]) def test_any_and_all_result_dtype(self, dtype): arr = np.ones(3, dtype=dtype) - assert arr.any().dtype == np.bool_ - assert arr.all().dtype == np.bool_ + assert arr.any().dtype == np.bool + assert arr.all().dtype == np.bool def test_any_and_all_object_dtype(self): # (seberg) Not sure we should even allow dtype here, but it is. diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index d45e3b01f6f7..10de20b4b3f6 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -356,7 +356,7 @@ def test_isnan(dtype, string_list): # isnan is only true when na_object is a NaN assert_array_equal( np.isnan(sarr), - np.array([0] * len(string_list) + [1], dtype=np.bool_), + np.array([0] * len(string_list) + [1], dtype=np.bool), ) else: assert not np.any(np.isnan(sarr)) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index bdca07edd3e8..3a517f5c93bf 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -254,8 +254,8 @@ def test_nd(self): @pytest.mark.parametrize("dtype", ["i8", "U10", "object", "datetime64[ms]"]) def test_any_and_all_result_dtype(dtype): arr = np.ones(3, dtype=dtype) - assert np.any(arr).dtype == np.bool_ - assert np.all(arr).dtype == np.bool_ + assert np.any(arr).dtype == np.bool + assert np.all(arr).dtype == np.bool class TestCopy: diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py index 7b8931f607eb..53caf7ff817d 100644 --- a/numpy/typing/tests/data/pass/scalars.py +++ b/numpy/typing/tests/data/pass/scalars.py @@ -5,6 +5,7 @@ import numpy as np b = np.bool() +b_ = np.bool_() u8 = np.uint64() i8 = np.int64() f8 = np.float64() @@ -121,7 +122,7 @@ def __float__(self) -> float: u8 = np.uint64() f8 = np.float64() c16 = np.complex128() -b_ = np.bool() +b = np.bool() td = np.timedelta64() U = np.str_("1") S = np.bytes_("1") @@ -130,7 +131,7 @@ def __float__(self) -> float: int(i8) int(u8) int(f8) -int(b_) +int(b) int(td) int(U) int(S) diff --git a/numpy/typing/tests/data/reveal/numerictypes.pyi b/numpy/typing/tests/data/reveal/numerictypes.pyi index 091aa7e5ab06..9f094ba72e3c 100644 --- a/numpy/typing/tests/data/reveal/numerictypes.pyi +++ b/numpy/typing/tests/data/reveal/numerictypes.pyi @@ -48,6 +48,7 @@ assert_type(np.ScalarType[0], type[int]) assert_type(np.ScalarType[3], type[bool]) assert_type(np.ScalarType[8], type[np.csingle]) assert_type(np.ScalarType[10], type[np.clongdouble]) +assert_type(np.bool_, type[np.bool]) assert_type(np.typecodes["Character"], Literal["c"]) assert_type(np.typecodes["Complex"], Literal["FDG"]) diff --git a/numpy/typing/tests/data/reveal/scalars.pyi b/numpy/typing/tests/data/reveal/scalars.pyi index 47c08997a0e3..95775e9a8dbe 100644 --- a/numpy/typing/tests/data/reveal/scalars.pyi +++ b/numpy/typing/tests/data/reveal/scalars.pyi @@ -50,6 +50,7 @@ assert_type(V[["field1", "field2"]], np.void) V[0] = 5 # Aliases +assert_type(np.bool_(), np.bool) assert_type(np.byte(), np.byte) assert_type(np.short(), np.short) assert_type(np.intc(), np.intc) From e0194de1c943120c261bd4d7c9a752e69a6f17e1 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Thu, 4 Apr 2024 12:54:10 +0200 Subject: [PATCH 154/980] add sqrt --- .../src/umath/loops_umath_fp.dispatch.c.src | 75 +++++++++++++++---- 1 file changed, 59 insertions(+), 16 deletions(-) diff --git a/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src b/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src index 476286fb259a..2e38e575b9e2 100644 --- a/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src +++ b/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src @@ -226,15 +226,34 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) * #type = npy_double, npy_float# * #vsub = , f# * #sfx = f64, f32# + * #sqrt = sqrt, sqrtf# */ /**begin repeat1 - * #func = power, arctan2# - * #intrin = pow, atan2# - * #ispower = 1, 0# + * #func = power# + * #intrin = pow# */ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { + int stride_zero = steps[1]==0; + if (stride_zero) { + BINARY_DEFS_ZERO_STRIDE + const @type@ in2 = *(@type@ *)ip2; + if (in2 == 2.0) { + BINARY_LOOP_SLIDING_ZERO_STRIDE { + const @type@ in1 = *(@type@ *)ip1; + *(@type@ *)op1 = in1 * in1; + } + return; + } + else if (in2 == 0.5) { + BINARY_LOOP_SLIDING_ZERO_STRIDE { + const @type@ in1 = *(@type@ *)ip1; + *(@type@ *)op1 =@sqrt@(in1); + } + return; + } + } #if NPY_SIMD && defined(NPY_HAVE_AVX512_SKX) && defined(NPY_CAN_LINK_SVML) const @type@ *src1 = (@type@*)args[0]; const @type@ *src2 = (@type@*)args[1]; @@ -252,24 +271,47 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) return; } #endif -#if @ispower@ - if (steps[1]==0) { + if (stride_zero) { + // this maybe a bit faster than the generic loop, but not sure BINARY_DEFS_ZERO_STRIDE const @type@ in2 = *(@type@ *)ip2; - if (in2 == 2.0) { - BINARY_LOOP_SLIDING_ZERO_STRIDE { - const @type@ in1 = *(@type@ *)ip1; - *(@type@ *)op1 = in1 * in1; - } - } - else { - BINARY_LOOP_SLIDING_ZERO_STRIDE { - const @type@ in1 = *(@type@ *)ip1; - *(@type@ *)op1 = npy_@intrin@@vsub@(in1, in2); - } + BINARY_LOOP_SLIDING_ZERO_STRIDE { + const @type@ in1 = *(@type@ *)ip1; + *(@type@ *)op1 = npy_@intrin@@vsub@(in1, in2); } return; } + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + *(@type@ *)op1 = npy_@intrin@@vsub@(in1, in2); + } +} +/**end repeat1**/ + +/**begin repeat1 + * #func = arctan2# + * #intrin = atan2# + */ +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) +{ +#if NPY_SIMD && defined(NPY_HAVE_AVX512_SKX) && defined(NPY_CAN_LINK_SVML) + const @type@ *src1 = (@type@*)args[0]; + const @type@ *src2 = (@type@*)args[1]; + @type@ *dst = (@type@*)args[2]; + const int lsize = sizeof(src1[0]); + const npy_intp ssrc1 = steps[0] / lsize; + const npy_intp ssrc2 = steps[1] / lsize; + const npy_intp sdst = steps[2] / lsize; + const npy_intp len = dimensions[0]; + assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); + if (!is_mem_overlap(src1, steps[0], dst, steps[2], len) && !is_mem_overlap(src2, steps[1], dst, steps[2], len) && + npyv_loadable_stride_@sfx@(ssrc1) && npyv_loadable_stride_@sfx@(ssrc2) && + npyv_storable_stride_@sfx@(sdst)) { + simd_@intrin@_@sfx@(src1, ssrc1, src2, ssrc2, dst, sdst, len); + return; + } #endif BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; @@ -278,4 +320,5 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) } } /**end repeat1**/ + /**end repeat**/ From 6ac0f2ddb69ab97020a8fc136b3bf52651b0fd16 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Thu, 4 Apr 2024 09:27:35 -0700 Subject: [PATCH 155/980] Add benchmarks for np.power(x,2) and np.power(0.5) --- benchmarks/benchmarks/bench_ufunc.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index 2ac820bc2e5c..dcc5fcbd08c6 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -559,21 +559,22 @@ def time_add_reduce_arg_parsing(self, arg_pack): np.add.reduce(*arg_pack.args, **arg_pack.kwargs) class BinaryBench(Benchmark): - def setup(self): + params = [np.float32, np.float64] + param_names = ['dtype'] + + def setup(self, dtype): N = 1000000 - self.a32 = np.random.rand(N).astype(np.float32) - self.b32 = np.random.rand(N).astype(np.float32) - self.a64 = np.random.rand(N).astype(np.float64) - self.b64 = np.random.rand(N).astype(np.float64) + self.a = np.random.rand(N).astype(dtype) + self.b = np.random.rand(N).astype(dtype) - def time_pow_32(self): - np.power(self.a32, self.b32) + def time_pow(self, dtype): + np.power(self.a, self.b) - def time_pow_64(self): - np.power(self.a64, self.b64) + def time_pow_2(self, dtype): + np.power(self.a, 2.0) - def time_atan2_32(self): - np.arctan2(self.a32, self.b32) + def time_pow_half(self, dype): + np.power(self.a, 0.5) - def time_atan2_64(self): - np.arctan2(self.a64, self.b64) + def time_atan2(self, dtype): + np.arctan2(self.a, self.b) From f8d9e5402c3d085d0eac4c3d5ca2138cd3044355 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Thu, 4 Apr 2024 23:23:03 +0200 Subject: [PATCH 156/980] add tests for the fast paths --- numpy/_core/tests/test_umath.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index e01e6dd6346b..32c000501fa2 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -1235,6 +1235,16 @@ def test_float_to_inf_power(self): r = np.array([1, 1, np.inf, 0, np.inf, 0, np.inf, 0], dt) assert_equal(np.power(a, b), r) + def test_power_fast_paths(self): + # gh-26055 + for dt in [np.float32, np.float64]: + expected = np.array([0.0, 1.21, 4., 1.44e+26, np.inf, np.inf]) + a = np.array([0, 1.1, 2, 12e12, np.inf, -np.inf], dt) + assert_equal(np.power(a, 2.), expected.astype(dt)) + + expected = np.sqrt(a) + assert_equal(np.power(a, 0.5), expected.astype(dt)) + class TestFloat_power: def test_type_conversion(self): From 3831d69bc0e9284c4dfa4022ba1e614ce89c31b3 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Thu, 4 Apr 2024 23:26:17 +0200 Subject: [PATCH 157/980] review comments --- numpy/_core/src/umath/loops_umath_fp.dispatch.c.src | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src b/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src index 2e38e575b9e2..7c379e1612ce 100644 --- a/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src +++ b/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src @@ -271,16 +271,6 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) return; } #endif - if (stride_zero) { - // this maybe a bit faster than the generic loop, but not sure - BINARY_DEFS_ZERO_STRIDE - const @type@ in2 = *(@type@ *)ip2; - BINARY_LOOP_SLIDING_ZERO_STRIDE { - const @type@ in1 = *(@type@ *)ip1; - *(@type@ *)op1 = npy_@intrin@@vsub@(in1, in2); - } - return; - } BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; From 93fd0a5772d67a848b3871a9870893db9a36affd Mon Sep 17 00:00:00 2001 From: Thomas A Caswell Date: Thu, 4 Apr 2024 13:26:05 -0400 Subject: [PATCH 158/980] MNT: update pythoncapi-compat Needed for current cpython main branch --- numpy/_core/src/common/pythoncapi-compat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/common/pythoncapi-compat b/numpy/_core/src/common/pythoncapi-compat index f66799130acd..9bfd8de33c76 160000 --- a/numpy/_core/src/common/pythoncapi-compat +++ b/numpy/_core/src/common/pythoncapi-compat @@ -1 +1 @@ -Subproject commit f66799130acd8843802185553dadf0e300c5fe05 +Subproject commit 9bfd8de33c763d026fd2568ea302bd76b1046fba From c5d3e29662722df586bf075151e84abf31b0e428 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 5 Apr 2024 10:51:13 +0200 Subject: [PATCH 159/980] review comments --- numpy/_core/tests/test_umath.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 32c000501fa2..491c1a54853c 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -1238,12 +1238,15 @@ def test_float_to_inf_power(self): def test_power_fast_paths(self): # gh-26055 for dt in [np.float32, np.float64]: + a = np.array([0, 1.1, 2, 12e12, -10., np.inf, -np.inf], dt) + expected = np.array([0.0, 1.21, 4., 1.44e+26, np.inf, np.inf]) - a = np.array([0, 1.1, 2, 12e12, np.inf, -np.inf], dt) - assert_equal(np.power(a, 2.), expected.astype(dt)) + result = np.power(a, 2.) + assert_array_max_ulp(result, expected.astype(dt), maxulp=1) expected = np.sqrt(a) - assert_equal(np.power(a, 0.5), expected.astype(dt)) + result = np.power(a, 0.5) + assert_array_max_ulp(result, expected.astype(dt), maxulp=1) class TestFloat_power: From 620b562756efb5dedefa2ded94ef43ec89f86ea4 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 5 Apr 2024 14:45:08 +0200 Subject: [PATCH 160/980] tests --- numpy/_core/tests/test_umath.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 491c1a54853c..429c3543add8 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -1240,13 +1240,14 @@ def test_power_fast_paths(self): for dt in [np.float32, np.float64]: a = np.array([0, 1.1, 2, 12e12, -10., np.inf, -np.inf], dt) - expected = np.array([0.0, 1.21, 4., 1.44e+26, np.inf, np.inf]) + expected = np.array([0.0, 1.21, 4., 1.44e+26, 100, np.inf, np.inf]) result = np.power(a, 2.) assert_array_max_ulp(result, expected.astype(dt), maxulp=1) expected = np.sqrt(a) result = np.power(a, 0.5) - assert_array_max_ulp(result, expected.astype(dt), maxulp=1) + # needs to be fixed! + assert_array_max_ulp(result[:-1], expected[:-1].astype(dt), maxulp=1) class TestFloat_power: From d1caa616cb965eb5a364e2d9c0b10d9c99c76acd Mon Sep 17 00:00:00 2001 From: Yang Liu Date: Sun, 7 Apr 2024 13:29:05 +0800 Subject: [PATCH 161/980] ENH: Enable RVV CPU feature detection Enhanced compile-time and runtime CPU feature detection for RISC-V vector extension, which will aid future RVV support. --- meson_cpu/main_config.h.in | 4 ++++ meson_cpu/meson.build | 4 ++++ meson_cpu/riscv64/meson.build | 8 ++++++++ numpy/_core/src/common/npy_cpu_features.c | 25 ++++++++++++++++++++++- numpy/_core/src/common/npy_cpu_features.h | 3 +++ numpy/distutils/checks/cpu_rvv.c | 13 ++++++++++++ 6 files changed, 56 insertions(+), 1 deletion(-) create mode 100644 meson_cpu/riscv64/meson.build create mode 100644 numpy/distutils/checks/cpu_rvv.c diff --git a/meson_cpu/main_config.h.in b/meson_cpu/main_config.h.in index c1fc2de349e0..0952adf67353 100644 --- a/meson_cpu/main_config.h.in +++ b/meson_cpu/main_config.h.in @@ -385,4 +385,8 @@ #ifdef @P@HAVE_NEON #include #endif + +#ifdef @P@HAVE_RVV + #include +#endif #endif // @P@_CPU_DISPATCHER_CONF_H_ diff --git a/meson_cpu/meson.build b/meson_cpu/meson.build index f96d9c315ea6..3afc54cae415 100644 --- a/meson_cpu/meson.build +++ b/meson_cpu/meson.build @@ -75,12 +75,14 @@ subdir('x86') subdir('ppc64') subdir('s390x') subdir('arm') +subdir('riscv64') CPU_FEATURES = {} CPU_FEATURES += ARM_FEATURES CPU_FEATURES += X86_FEATURES CPU_FEATURES += PPC64_FEATURES CPU_FEATURES += S390X_FEATURES +CPU_FEATURES += RV64_FEATURES # Parse the requested baseline (CPU_CONF_BASELINE) and dispatch features # (CPU_CONF_DISPATCH). @@ -93,6 +95,7 @@ min_features = { 's390x': [], 'arm': [], 'aarch64': [ASIMD], + 'riscv64': [], 'wasm32': [], }.get(cpu_family, []) if host_machine.endian() == 'little' and cpu_family == 'ppc64' @@ -107,6 +110,7 @@ max_features_dict = { 's390x': S390X_FEATURES, 'arm': ARM_FEATURES, 'aarch64': ARM_FEATURES, + 'riscv64': RV64_FEATURES, 'wasm32': {}, }.get(cpu_family, {}) max_features = [] diff --git a/meson_cpu/riscv64/meson.build b/meson_cpu/riscv64/meson.build new file mode 100644 index 000000000000..3f930f39e27e --- /dev/null +++ b/meson_cpu/riscv64/meson.build @@ -0,0 +1,8 @@ +source_root = meson.project_source_root() +mod_features = import('features') + +RVV = mod_features.new( + 'RVV', 1, args: ['-march=rv64gcv'], + test_code: files(source_root + '/numpy/distutils/checks/cpu_rvv.c')[0], +) +RV64_FEATURES = {'RVV': RVV} diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index 7a24cb01625b..795d40f13524 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -119,7 +119,8 @@ static struct { {NPY_CPU_FEATURE_ASIMDHP, "ASIMDHP"}, {NPY_CPU_FEATURE_ASIMDDP, "ASIMDDP"}, {NPY_CPU_FEATURE_ASIMDFHM, "ASIMDFHM"}, - {NPY_CPU_FEATURE_SVE, "SVE"}}; + {NPY_CPU_FEATURE_SVE, "SVE"}, + {NPY_CPU_FEATURE_RVV, "RVV"}}; NPY_VISIBILITY_HIDDEN PyObject * @@ -813,6 +814,28 @@ npy__cpu_init_features(void) #endif } +/************** RISC-V 64 ***************/ + +#elif defined(__riscv) && __riscv_xlen == 64 + +#include + +#ifndef HWCAP_RVV + // https://github.com/torvalds/linux/blob/v6.8/arch/riscv/include/uapi/asm/hwcap.h#L24 + #define COMPAT_HWCAP_ISA_V (1 << ('V' - 'A')) +#endif + +static void +npy__cpu_init_features(void) +{ + memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); + + unsigned int hwcap = getauxval(AT_HWCAP); + if (hwcap & COMPAT_HWCAP_ISA_V) { + npy__cpu_have[NPY_CPU_FEATURE_RVV] = 1; + } +} + /*********** Unsupported ARCH ***********/ #else static void diff --git a/numpy/_core/src/common/npy_cpu_features.h b/numpy/_core/src/common/npy_cpu_features.h index 83522b933785..d1e9d7e60d9f 100644 --- a/numpy/_core/src/common/npy_cpu_features.h +++ b/numpy/_core/src/common/npy_cpu_features.h @@ -98,6 +98,9 @@ enum npy_cpu_features // Vector-Enhancements Facility 2 NPY_CPU_FEATURE_VXE2 = 352, + // RISC-V + NPY_CPU_FEATURE_RVV = 400, + NPY_CPU_FEATURE_MAX }; diff --git a/numpy/distutils/checks/cpu_rvv.c b/numpy/distutils/checks/cpu_rvv.c new file mode 100644 index 000000000000..45545d88dcd1 --- /dev/null +++ b/numpy/distutils/checks/cpu_rvv.c @@ -0,0 +1,13 @@ +#ifndef __riscv_vector + #error RVV not supported +#endif + +#include + +int main(void) +{ + size_t vlmax = __riscv_vsetvlmax_e32m1(); + vuint32m1_t a = __riscv_vmv_v_x_u32m1(0, vlmax); + vuint32m1_t b = __riscv_vadd_vv_u32m1(a, a, vlmax); + return __riscv_vmv_x_s_u32m1_u32(b); +} From d7409326b14b2862a738a02f6c5ff53081456b5d Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 8 Apr 2024 08:52:19 -0600 Subject: [PATCH 162/980] MNT: update pythoncapi_compat to 01341ac --- numpy/_core/src/common/pythoncapi-compat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/common/pythoncapi-compat b/numpy/_core/src/common/pythoncapi-compat index 9bfd8de33c76..01341acbbef0 160000 --- a/numpy/_core/src/common/pythoncapi-compat +++ b/numpy/_core/src/common/pythoncapi-compat @@ -1 +1 @@ -Subproject commit 9bfd8de33c763d026fd2568ea302bd76b1046fba +Subproject commit 01341acbbef0ca85cf2fa31b63307ddf4d9a87fb From 44fddc29d4fcd43570d885c85662bd1727c3a637 Mon Sep 17 00:00:00 2001 From: Leo Singer Date: Mon, 8 Apr 2024 16:58:45 -0400 Subject: [PATCH 163/980] DOC: fix typo in _add_newdoc_ufunc docstring (#26232) --- numpy/_core/_add_newdocs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 0a0322ae292a..38462e19eaaf 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -4743,7 +4743,7 @@ Notes ----- This method allocates memory for new_docstring on - the heap. Technically this creates a mempory leak, since this + the heap. Technically this creates a memory leak, since this memory will not be reclaimed until the end of the program even if the ufunc itself is removed. However this will only be a problem if the user is repeatedly creating ufuncs with From 912fb6184b02ab1f80cd711d0f45c9ed095f2bbd Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 9 Apr 2024 12:13:13 +0200 Subject: [PATCH 164/980] DOC: Move documentation from 2.0 Notes/migration guide to release fragment --- doc/release/upcoming_changes/26103.c_api.rst | 15 +++++++++++++++ doc/source/numpy_2_0_migration_guide.rst | 15 --------------- doc/source/release/2.0.0-notes.rst | 11 ----------- 3 files changed, 15 insertions(+), 26 deletions(-) create mode 100644 doc/release/upcoming_changes/26103.c_api.rst diff --git a/doc/release/upcoming_changes/26103.c_api.rst b/doc/release/upcoming_changes/26103.c_api.rst new file mode 100644 index 000000000000..9d0d998e2dfc --- /dev/null +++ b/doc/release/upcoming_changes/26103.c_api.rst @@ -0,0 +1,15 @@ +API symbols now hidden but customizable +--------------------------------------- +NumPy now defaults to hide the API symbols it adds to allow all NumPy API +usage. +This means that by default you cannot dynamically fetch the NumPy API from +another library (this was never possible on windows). + +If you are experiencing linking errors related to ``PyArray_API`` or +``PyArray_RUNTIME_VERSION``, you can define the +:c:macro:`NPY_API_SYMBOL_ATTRIBUTE` to opt-out of this change. + +If you are experiencing problems due to an upstream header including NumPy, +the solution is to make sure you ``#include "numpy/ndarrayobject.h"`` before +their header and import NumPy yourself based on :ref:`including-the-c-api`. + diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index c2f1f0e9ba8b..4a569d612bf4 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -221,21 +221,6 @@ using the NumPy types. You can still write cython code using the ``c.real`` and ``c.imag`` attributes (using the native typedefs), but you can no longer use in-place operators ``c.imag += 1`` in Cython's c++ mode. -.. _api-table-visibility-change: - -Changes in table visibility (linking error) -------------------------------------------- -If you are experiencing linking errors related to ``PyArray_API`` or -``PyArray_RUNTIME_VERSION``, NumPy changed the default visibility to hidden -(which was always the case on windows). -You can use the :c:macro:`NPY_API_SYMBOL_ATTRIBUTE` to opt-out of this change. -However, we generally discourage linking across project boundaries because -it breaks NumPy compatibility checks. - -If you are experiencing problems due to an upstream header including NumPy, -the solution is to make sure you ``#include "numpy/ndarrayobject.h"`` before -their header and import NumPy yourself based on :ref:`including-the-c-api`. - Changes to namespaces ===================== diff --git a/doc/source/release/2.0.0-notes.rst b/doc/source/release/2.0.0-notes.rst index 1334b58ea742..216c38bb1538 100644 --- a/doc/source/release/2.0.0-notes.rst +++ b/doc/source/release/2.0.0-notes.rst @@ -655,17 +655,6 @@ Please see :ref:`migration_c_descr` for more information. (`gh-25943 `__) -API symbols now hidden but customizable ---------------------------------------- -NumPy now defaults to hide the API symbols it adds to allow all NumPy API -usage. -This means that by default you cannot dynamically fetch the NumPy API from -another library (this was never possible on windows). -Please see :ref:`api-table-visibility-change` and the new -:c:macro:`NPY_API_SYMBOL_ATTRIBUTE` which allows to customize/revert this. - -(`gh-26103 `__) - NumPy 2.0 C API removals ======================== From f021ce3b1993f974e9eb54076a2feadf85eea958 Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Tue, 9 Apr 2024 12:20:13 +0200 Subject: [PATCH 165/980] DOC: add versionadded for copy keyword in np.asarray docstring --- numpy/_core/_add_newdocs.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 38462e19eaaf..c306f27bef59 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -948,6 +948,8 @@ the other requirements (``dtype``, ``order``, etc.). For ``False`` it raises a ``ValueError`` if a copy cannot be avoided. Default: ``None``. + + .. versionadded:: 2.0.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 From 723678f7d6163ad531783934e27072784e73ed1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Tue, 9 Apr 2024 11:39:17 +0200 Subject: [PATCH 166/980] MAINT: Update array-api-tests job --- .github/workflows/linux.yml | 4 +-- tools/ci/array-api-skips.txt | 52 +++++++++--------------------------- 2 files changed, 13 insertions(+), 43 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 6bfe339a6499..597b9ae6538b 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -225,7 +225,7 @@ jobs: uses: actions/checkout@v4 with: repository: data-apis/array-api-tests - ref: '9afe8c709d81f005c98d383c82ad5e1c2cd8166c' # Latest commit as of 2023-11-24 + ref: '3cf8ef654c456d9fd1633d64e67b4470465940e9' # Latest commit as of 2024-04-09 submodules: 'true' path: 'array-api-tests' - name: Set up Python @@ -246,8 +246,6 @@ jobs: PYTHONWARNINGS: 'ignore::UserWarning::,ignore::DeprecationWarning::,ignore::RuntimeWarning::' run: | cd ${GITHUB_WORKSPACE}/array-api-tests - # remove once https://github.com/data-apis/array-api-tests/pull/217 is merged - touch pytest.ini pytest array_api_tests -v -c pytest.ini --ci --max-examples=2 --derandomize --disable-deadline --skips-file ${GITHUB_WORKSPACE}/tools/ci/array-api-skips.txt custom_checks: diff --git a/tools/ci/array-api-skips.txt b/tools/ci/array-api-skips.txt index fec7750098c5..44b9ec3b0a90 100644 --- a/tools/ci/array-api-skips.txt +++ b/tools/ci/array-api-skips.txt @@ -1,50 +1,22 @@ -# 'unique_inverse' output array is 1-D for 0-D input -array_api_tests/test_set_functions.py::test_unique_all -array_api_tests/test_set_functions.py::test_unique_inverse - -# https://github.com/numpy/numpy/issues/21213 -array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -infinity and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] -array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -0 and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] -# noted diversions from spec -array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] -array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] -array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] -array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] -array_api_tests/test_special_cases.py::test_binary[floor_divide(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] -array_api_tests/test_special_cases.py::test_binary[floor_divide(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] -array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] -array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] -array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] -array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] -array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] -array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] -array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] -array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] -array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] -array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] -array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] -array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] - -# fft test suite is buggy as of 83f0bcdc -array_api_tests/test_fft.py - # finfo return type misalignment array_api_tests/test_data_type_functions.py::test_finfo[float32] -# a few misalignments -array_api_tests/test_operators_and_elementwise_functions.py -array_api_tests/test_signatures.py::test_func_signature[std] -array_api_tests/test_signatures.py::test_func_signature[var] -array_api_tests/test_signatures.py::test_func_signature[asarray] -array_api_tests/test_signatures.py::test_func_signature[reshape] -array_api_tests/test_signatures.py::test_array_method_signature[__array_namespace__] +# out.dtype=float32, but should be int16 +# dtype('float16') not found +array_api_tests/test_operators_and_elementwise_functions.py::test_ceil +array_api_tests/test_operators_and_elementwise_functions.py::test_floor +array_api_tests/test_operators_and_elementwise_functions.py::test_trunc -# missing 'copy' keyword argument, 'newshape' should be named 'shape' +# 'newshape' should be named 'shape' array_api_tests/test_signatures.py::test_func_signature[reshape] # missing 'descending' keyword arguments array_api_tests/test_signatures.py::test_func_signature[argsort] array_api_tests/test_signatures.py::test_func_signature[sort] -# assertionError: out.dtype=float32, but should be float64 [sum(float32)] -array_api_tests/test_statistical_functions.py::test_sum +# nonzero for 0D should error +array_api_tests/test_searching_functions.py::test_nonzero_zerodim_error + +# TODO: check why in CI `inspect.signature(np.vecdot)` returns (*arg, **kwarg) +# instead of raising ValueError. mtsokol: couldn't reproduce locally +array_api_tests/test_signatures.py::test_func_signature[vecdot] From 816df7eb831ed01f2efdbeff5f4784f04f53528f Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 9 Apr 2024 14:49:25 +0200 Subject: [PATCH 167/980] DOC: Fixup intp/uintp documentation for ssize_t/size_t changes This fixes up the docs with slightly verbose/repetive explanations. Closes gh-26092 --- doc/source/reference/arrays.scalars.rst | 29 ++++++++++++------- doc/source/reference/c-api/dtype.rst | 38 ++++++++++++++++++------- 2 files changed, 46 insertions(+), 21 deletions(-) diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst index 7789a47221b5..11b3bdc16c6c 100644 --- a/doc/source/reference/arrays.scalars.rst +++ b/doc/source/reference/arrays.scalars.rst @@ -37,9 +37,8 @@ of the flexible itemsize array types (:class:`str_`, **Figure:** Hierarchy of type objects representing the array data types. Not shown are the two integer types :class:`intp` and - :class:`uintp` which just point to the integer type that holds a - pointer for the platform. All the number types can be obtained - using bit-width names as well. + :class:`uintp` which are used for indexing (the same as the + default integer since NumPy 2). .. TODO - use something like this instead of the diagram above, as it generates @@ -377,21 +376,29 @@ are also provided. Alias for the signed integer type (one of `numpy.byte`, `numpy.short`, `numpy.intc`, `numpy.int_`, `numpy.long` and `numpy.longlong`) - that is the same size as a pointer. + that is used as a default integer and for indexing. - Compatible with the C ``intptr_t``. + Compatible with the C ``Py_ssize_t``. - :Character code: ``'p'`` + :Character code: ``'n'`` + + .. versionchanged:: 2.0 + Before NumPy 2, this had the same size as a pointer. In practice this + is almost always identical, but the character code ``'p'`` maps to the C + ``intptr_t``. The character code ``'n'`` was added in NumPy 2.0. .. attribute:: uintp - Alias for the unsigned integer type (one of `numpy.ubyte`, `numpy.ushort`, - `numpy.uintc`, `numpy.uint`, `numpy.ulong` and `numpy.ulonglong`) - that is the same size as a pointer. + Alias for the unsigned integer type that is the same size as ``intp``. + + Compatible with the C ``size_t``. - Compatible with the C ``uintptr_t``. + :Character code: ``'N'`` - :Character code: ``'P'`` + .. versionchanged:: 2.0 + Before NumPy 2, this had the same size as a pointer. In practice this + is almost always identical, but the character code ``'P'`` maps to the C + ``uintptr_t``. The character code ``'N'`` was added in NumPy 2.0. .. autoclass:: numpy.float16 diff --git a/doc/source/reference/c-api/dtype.rst b/doc/source/reference/c-api/dtype.rst index 8412219a79e1..ce23c51aa9ea 100644 --- a/doc/source/reference/c-api/dtype.rst +++ b/doc/source/reference/c-api/dtype.rst @@ -170,14 +170,26 @@ Enumerated types .. c:enumerator:: NPY_INTP - The enumeration value for a signed integer type which is the same - size as a (void \*) pointer. This is the type used by all + The enumeration value for a signed integer of type ``Py_ssize_t`` + (same as ``ssize_t`` if defined). This is the type used by all arrays of indices. + .. versionchanged:: 2.0 + Previously, this was the same as ``intptr_t`` (same size as a + pointer). In practice, this is identical except on very niche + platforms. + You can use the ``'p'`` character code for the pointer meaning. + .. c:enumerator:: NPY_UINTP - The enumeration value for an unsigned integer type which is the - same size as a (void \*) pointer. + The enumeration value for an unsigned integer type that is identical + to a ``size_t``. + + .. versionchanged:: 2.0 + Previously, this was the same as ``uintptr_t`` (same size as a + pointer). In practice, this is identical except on very niche + platforms. + You can use the ``'P'`` character code for the pointer meaning. .. c:enumerator:: NPY_MASK @@ -287,14 +299,20 @@ all platforms for all the kinds of numeric types. Commonly 8-, 16-, types are available. -Integer that can hold a pointer -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Further integer aliases +~~~~~~~~~~~~~~~~~~~~~~~ -The constants **NPY_INTP** and **NPY_UINTP** refer to an -enumerated integer type that is large enough to hold a pointer on the -platform. Index arrays should always be converted to **NPY_INTP** -, because the dimension of the array is of type npy_intp. +The constants **NPY_INTP** and **NPY_UINTP** refer to an ``Py_ssize_t`` +and ``size_t``. +Although in practice normally true, these types are strictly speaking not +pointer sized and the character codes ``'p'`` and ``'P'`` can be used for +pointer sized integers. +(Before NumPy 2, ``intp`` was pointer size, but this almost never matched +the actual use, which is the reason for the name.) +Since NumPy 2, **NPY_DEFAULT_INT** is additionally defined. +The value of the macro is runtime dependent: Since NumPy 2, it maps to +``NPY_INTP`` while on earlier versions it maps to ``NPY_LONG``. C-type names ------------ From 21b0d4cf11be1e01c33c8ff4e7ff2081ef580ad9 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 9 Apr 2024 12:25:46 -0600 Subject: [PATCH 168/980] MNT: migrate PyList_GetItem usages to PyList_GetItemRef --- numpy/_core/src/umath/dispatching.c | 7 +++++-- numpy/_core/src/umath/wrapping_array_method.c | 4 +++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/umath/dispatching.c b/numpy/_core/src/umath/dispatching.c index b7f2fb3d9caf..1cf915aee3b4 100644 --- a/numpy/_core/src/umath/dispatching.c +++ b/numpy/_core/src/umath/dispatching.c @@ -45,6 +45,7 @@ #include "numpy/ndarraytypes.h" #include "numpy/npy_3kcompat.h" #include "common.h" +#include "npy_pycompat.h" #include "dispatching.h" #include "dtypemeta.h" @@ -121,8 +122,9 @@ PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate) PyObject *loops = ufunc->_loops; Py_ssize_t length = PyList_Size(loops); for (Py_ssize_t i = 0; i < length; i++) { - PyObject *item = PyList_GetItem(loops, i); + PyObject *item = PyList_GetItemRef(loops, i); PyObject *cur_DType_tuple = PyTuple_GetItem(item, 0); + Py_DECREF(item); int cmp = PyObject_RichCompareBool(cur_DType_tuple, DType_tuple, Py_EQ); if (cmp < 0) { return -1; @@ -1277,8 +1279,9 @@ get_info_no_cast(PyUFuncObject *ufunc, PyArray_DTypeMeta *op_dtype, PyObject *loops = ufunc->_loops; Py_ssize_t length = PyList_Size(loops); for (Py_ssize_t i = 0; i < length; i++) { - PyObject *item = PyList_GetItem(loops, i); + PyObject *item = PyList_GetItemRef(loops, i); PyObject *cur_DType_tuple = PyTuple_GetItem(item, 0); + Py_DECREF(item); int cmp = PyObject_RichCompareBool(cur_DType_tuple, t_dtypes, Py_EQ); if (cmp < 0) { diff --git a/numpy/_core/src/umath/wrapping_array_method.c b/numpy/_core/src/umath/wrapping_array_method.c index fe065e175027..756f09507954 100644 --- a/numpy/_core/src/umath/wrapping_array_method.c +++ b/numpy/_core/src/umath/wrapping_array_method.c @@ -26,6 +26,7 @@ #include "numpy/ndarraytypes.h" +#include "npy_pycompat.h" #include "common.h" #include "array_method.h" #include "legacy_array_method.h" @@ -250,8 +251,9 @@ PyUFunc_AddWrappingLoop(PyObject *ufunc_obj, PyObject *loops = ufunc->_loops; Py_ssize_t length = PyList_Size(loops); for (Py_ssize_t i = 0; i < length; i++) { - PyObject *item = PyList_GetItem(loops, i); + PyObject *item = PyList_GetItemRef(loops, i); PyObject *cur_DType_tuple = PyTuple_GetItem(item, 0); + Py_DECREF(item); int cmp = PyObject_RichCompareBool(cur_DType_tuple, wrapped_dt_tuple, Py_EQ); if (cmp < 0) { goto finish; From b0df15023f98caaf05fb4d009f086cb5623b0433 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Tue, 9 Apr 2024 18:27:42 +0000 Subject: [PATCH 169/980] MAINT: Robust string meson template substitution *Slightly robust Closes gh-26107 --- numpy/f2py/_backends/_meson.py | 10 +++++----- numpy/f2py/_backends/meson.build.template | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/f2py/_backends/_meson.py b/numpy/f2py/_backends/_meson.py index 20df79a1c71d..d4b650857e74 100644 --- a/numpy/f2py/_backends/_meson.py +++ b/numpy/f2py/_backends/_meson.py @@ -73,8 +73,8 @@ def initialize_template(self) -> None: self.substitutions["python"] = self.python_exe def sources_substitution(self) -> None: - self.substitutions["source_list"] = f",\n{self.indent}".join( - [f"{self.indent}'{source}'," for source in self.sources] + self.substitutions["source_list"] = ",\n".join( + [f"{self.indent}'''{source}'''," for source in self.sources] ) def deps_substitution(self) -> None: @@ -85,7 +85,7 @@ def deps_substitution(self) -> None: def libraries_substitution(self) -> None: self.substitutions["lib_dir_declarations"] = "\n".join( [ - f"lib_dir_{i} = declare_dependency(link_args : ['-L{lib_dir}'])" + f"lib_dir_{i} = declare_dependency(link_args : ['''-L{lib_dir}'''])" for i, lib_dir in enumerate(self.library_dirs) ] ) @@ -106,7 +106,7 @@ def libraries_substitution(self) -> None: def include_substitution(self) -> None: self.substitutions["inc_list"] = f",\n{self.indent}".join( - [f"{self.indent}'{inc}'," for inc in self.include_dirs] + [f"{self.indent}'''{inc}'''," for inc in self.include_dirs] ) def generate_meson_build(self): @@ -114,7 +114,7 @@ def generate_meson_build(self): node() template = Template(self.meson_build_template()) meson_build = template.substitute(self.substitutions) - meson_build = re.sub(r',,', ',', meson_build) + meson_build = re.sub(r",,", ",", meson_build) return meson_build diff --git a/numpy/f2py/_backends/meson.build.template b/numpy/f2py/_backends/meson.build.template index 8e34fdc8d4d6..092b1112c262 100644 --- a/numpy/f2py/_backends/meson.build.template +++ b/numpy/f2py/_backends/meson.build.template @@ -8,7 +8,7 @@ project('${modulename}', ]) fc = meson.get_compiler('fortran') -py = import('python').find_installation('${python}', pure: false) +py = import('python').find_installation('''${python}''', pure: false) py_dep = py.dependency() incdir_numpy = run_command(py, From 360caa7c7cddd54de867b90c0d0fd9780c3d479c Mon Sep 17 00:00:00 2001 From: Leo Singer Date: Mon, 8 Apr 2024 10:55:53 -0400 Subject: [PATCH 170/980] MAINT: utilize ufunc API const correctness internally When internally defining ufuncs, declare data and types as const to take advantage of the const correctness of the API. See #23847. --- numpy/_core/code_generators/generate_umath.py | 2 +- numpy/_core/src/umath/_operand_flag_tests.c | 4 +-- numpy/_core/src/umath/_rational_tests.c | 18 ++++++------ numpy/_core/src/umath/_umath_tests.c.src | 28 +++++++++---------- numpy/fft/_pocketfft_umath.cpp | 10 +++---- numpy/linalg/umath_linalg.cpp | 28 +++++++++---------- 6 files changed, 45 insertions(+), 45 deletions(-) diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index b64624702db7..06871a44b37f 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -1450,7 +1450,7 @@ def make_arrays(funcdict): % (name, funcnames)) code1list.append("static void * %s_data[] = {%s};" % (name, datanames)) - code1list.append("static char %s_signatures[] = {%s};" + code1list.append("static const char %s_signatures[] = {%s};" % (name, signames)) uf.empty = False else: diff --git a/numpy/_core/src/umath/_operand_flag_tests.c b/numpy/_core/src/umath/_operand_flag_tests.c index a674dfa560b7..11b74af72d28 100644 --- a/numpy/_core/src/umath/_operand_flag_tests.c +++ b/numpy/_core/src/umath/_operand_flag_tests.c @@ -36,9 +36,9 @@ inplace_add(char **args, npy_intp const *dimensions, npy_intp const *steps, void PyUFuncGenericFunction funcs[1] = {&inplace_add}; /* These are the input and return dtypes of logit.*/ -static char types[2] = {NPY_INTP, NPY_INTP}; +static const char types[2] = {NPY_INTP, NPY_INTP}; -static void *data[1] = {NULL}; +static void *const data[1] = {NULL}; static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, diff --git a/numpy/_core/src/umath/_rational_tests.c b/numpy/_core/src/umath/_rational_tests.c index 80acf38354ae..aa4250e4efc8 100644 --- a/numpy/_core/src/umath/_rational_tests.c +++ b/numpy/_core/src/umath/_rational_tests.c @@ -1273,8 +1273,8 @@ PyMODINIT_FUNC PyInit__rational_tests(void) { { int types2[3] = {npy_rational,npy_rational,npy_rational}; PyObject* gufunc = PyUFunc_FromFuncAndDataAndSignature(0,0,0,0,2,1, - PyUFunc_None,(char*)"matrix_multiply", - (char*)"return result of multiplying two matrices of rationals", + PyUFunc_None,"matrix_multiply", + "return result of multiplying two matrices of rationals", 0,"(m,n),(n,p)->(m,p)"); if (!gufunc) { goto fail; @@ -1291,8 +1291,8 @@ PyMODINIT_FUNC PyInit__rational_tests(void) { int types3[3] = {NPY_INT64,NPY_INT64,npy_rational}; PyObject* ufunc = PyUFunc_FromFuncAndData(0,0,0,0,2,1, - PyUFunc_None,(char*)"test_add", - (char*)"add two matrices of int64 and return rational matrix",0); + PyUFunc_None,"test_add", + "add two matrices of int64 and return rational matrix",0); if (!ufunc) { goto fail; } @@ -1306,8 +1306,8 @@ PyMODINIT_FUNC PyInit__rational_tests(void) { /* Create test ufunc with rational types using RegisterLoopForDescr */ { PyObject* ufunc = PyUFunc_FromFuncAndData(0,0,0,0,2,1, - PyUFunc_None,(char*)"test_add_rationals", - (char*)"add two matrices of rationals and return rational matrix",0); + PyUFunc_None,"test_add_rationals", + "add two matrices of rationals and return rational matrix",0); PyArray_Descr* types[3] = {npyrational_descr, npyrational_descr, npyrational_descr}; @@ -1326,7 +1326,7 @@ PyMODINIT_FUNC PyInit__rational_tests(void) { #define NEW_UNARY_UFUNC(name,type,doc) { \ int types[2] = {npy_rational,type}; \ PyObject* ufunc = PyUFunc_FromFuncAndData(0,0,0,0,1,1, \ - PyUFunc_None,(char*)#name,(char*)doc,0); \ + PyUFunc_None,#name,doc,0); \ if (!ufunc) { \ goto fail; \ } \ @@ -1345,8 +1345,8 @@ PyMODINIT_FUNC PyInit__rational_tests(void) { static const char types[3] = {type,type,type}; \ static void* data[1] = {0}; \ PyObject* ufunc = PyUFunc_FromFuncAndData( \ - (PyUFuncGenericFunction*)func, data,(char*)types, \ - 1,2,1,PyUFunc_One,(char*)#name,(char*)doc,0); \ + (PyUFuncGenericFunction*)func, data,types, \ + 1,2,1,PyUFunc_One,#name,doc,0); \ if (!ufunc) { \ goto fail; \ } \ diff --git a/numpy/_core/src/umath/_umath_tests.c.src b/numpy/_core/src/umath/_umath_tests.c.src index ac45ae92afbc..a16a915c09d5 100644 --- a/numpy/_core/src/umath/_umath_tests.c.src +++ b/numpy/_core/src/umath/_umath_tests.c.src @@ -422,29 +422,29 @@ defdict = { */ static PyUFuncGenericFunction always_error_functions[] = { always_error_loop }; -static void *always_error_data[] = { (void *)NULL }; -static char always_error_signatures[] = { NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; +static void *const always_error_data[] = { (void *)NULL }; +static const char always_error_signatures[] = { NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; static PyUFuncGenericFunction inner1d_functions[] = { INTP_inner1d, DOUBLE_inner1d }; -static void *inner1d_data[] = { (void *)NULL, (void *)NULL }; -static char inner1d_signatures[] = { NPY_INTP, NPY_INTP, NPY_INTP, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; +static void *const inner1d_data[] = { (void *)NULL, (void *)NULL }; +static const char inner1d_signatures[] = { NPY_INTP, NPY_INTP, NPY_INTP, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; static PyUFuncGenericFunction innerwt_functions[] = { INTP_innerwt, DOUBLE_innerwt }; -static void *innerwt_data[] = { (void *)NULL, (void *)NULL }; -static char innerwt_signatures[] = { NPY_INTP, NPY_INTP, NPY_INTP, NPY_INTP, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; +static void *const innerwt_data[] = { (void *)NULL, (void *)NULL }; +static const char innerwt_signatures[] = { NPY_INTP, NPY_INTP, NPY_INTP, NPY_INTP, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; static PyUFuncGenericFunction matrix_multiply_functions[] = { INTP_matrix_multiply, FLOAT_matrix_multiply, DOUBLE_matrix_multiply }; -static void *matrix_multiply_data[] = { (void *)NULL, (void *)NULL, (void *)NULL }; -static char matrix_multiply_signatures[] = { NPY_INTP, NPY_INTP, NPY_INTP, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; +static void *const matrix_multiply_data[] = { (void *)NULL, (void *)NULL, (void *)NULL }; +static const char matrix_multiply_signatures[] = { NPY_INTP, NPY_INTP, NPY_INTP, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; static PyUFuncGenericFunction cross1d_functions[] = { INTP_cross1d, DOUBLE_cross1d }; -static void *cross1d_data[] = { (void *)NULL, (void *)NULL }; -static char cross1d_signatures[] = { NPY_INTP, NPY_INTP, NPY_INTP, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; +static void *const cross1d_data[] = { (void *)NULL, (void *)NULL }; +static const char cross1d_signatures[] = { NPY_INTP, NPY_INTP, NPY_INTP, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; static PyUFuncGenericFunction euclidean_pdist_functions[] = { FLOAT_euclidean_pdist, DOUBLE_euclidean_pdist }; -static void *eucldiean_pdist_data[] = { (void *)NULL, (void *)NULL }; -static char euclidean_pdist_signatures[] = { NPY_FLOAT, NPY_FLOAT, +static void *const eucldiean_pdist_data[] = { (void *)NULL, (void *)NULL }; +static const char euclidean_pdist_signatures[] = { NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE }; static PyUFuncGenericFunction cumsum_functions[] = { INTP_cumsum, DOUBLE_cumsum }; -static void *cumsum_data[] = { (void *)NULL, (void *)NULL }; -static char cumsum_signatures[] = { NPY_INTP, NPY_INTP, NPY_DOUBLE, NPY_DOUBLE }; +static void *const cumsum_data[] = { (void *)NULL, (void *)NULL }; +static const char cumsum_signatures[] = { NPY_INTP, NPY_INTP, NPY_DOUBLE, NPY_DOUBLE }; static int diff --git a/numpy/fft/_pocketfft_umath.cpp b/numpy/fft/_pocketfft_umath.cpp index 127ebfdb6149..7b42f8edc97b 100644 --- a/numpy/fft/_pocketfft_umath.cpp +++ b/numpy/fft/_pocketfft_umath.cpp @@ -297,17 +297,17 @@ static PyUFuncGenericFunction fft_functions[] = { wrap_legacy_cpp_ufunc>, wrap_legacy_cpp_ufunc> }; -static char fft_types[] = { +static const char fft_types[] = { NPY_CDOUBLE, NPY_DOUBLE, NPY_CDOUBLE, NPY_CFLOAT, NPY_FLOAT, NPY_CFLOAT, NPY_CLONGDOUBLE, NPY_LONGDOUBLE, NPY_CLONGDOUBLE }; -static void *fft_data[] = { +static void *const fft_data[] = { (void*)&pocketfft::FORWARD, (void*)&pocketfft::FORWARD, (void*)&pocketfft::FORWARD }; -static void *ifft_data[] = { +static void *const ifft_data[] = { (void*)&pocketfft::BACKWARD, (void*)&pocketfft::BACKWARD, (void*)&pocketfft::BACKWARD @@ -323,7 +323,7 @@ static PyUFuncGenericFunction rfft_n_odd_functions[] = { wrap_legacy_cpp_ufunc>, wrap_legacy_cpp_ufunc> }; -static char rfft_types[] = { +static const char rfft_types[] = { NPY_DOUBLE, NPY_DOUBLE, NPY_CDOUBLE, NPY_FLOAT, NPY_FLOAT, NPY_CFLOAT, NPY_LONGDOUBLE, NPY_LONGDOUBLE, NPY_CLONGDOUBLE @@ -334,7 +334,7 @@ static PyUFuncGenericFunction irfft_functions[] = { wrap_legacy_cpp_ufunc>, wrap_legacy_cpp_ufunc> }; -static char irfft_types[] = { +static const char irfft_types[] = { NPY_CDOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_FLOAT, NPY_FLOAT, NPY_CLONGDOUBLE, NPY_LONGDOUBLE, NPY_LONGDOUBLE diff --git a/numpy/linalg/umath_linalg.cpp b/numpy/linalg/umath_linalg.cpp index ecbca7f4d3ea..4b0bb30cb56f 100644 --- a/numpy/linalg/umath_linalg.cpp +++ b/numpy/linalg/umath_linalg.cpp @@ -4215,14 +4215,14 @@ GUFUNC_FUNC_ARRAY_REAL_COMPLEX__(lstsq); GUFUNC_FUNC_ARRAY_EIG(eig); GUFUNC_FUNC_ARRAY_EIG(eigvals); -static char equal_2_types[] = { +static const char equal_2_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_CFLOAT, NPY_CDOUBLE, NPY_CDOUBLE }; -static char equal_3_types[] = { +static const char equal_3_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_CFLOAT, NPY_CFLOAT, @@ -4230,47 +4230,47 @@ static char equal_3_types[] = { }; /* second result is logdet, that will always be a REAL */ -static char slogdet_types[] = { +static const char slogdet_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_CFLOAT, NPY_FLOAT, NPY_CDOUBLE, NPY_CDOUBLE, NPY_DOUBLE }; -static char eigh_types[] = { +static const char eigh_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_FLOAT, NPY_CFLOAT, NPY_CDOUBLE, NPY_DOUBLE, NPY_CDOUBLE }; -static char eighvals_types[] = { +static const char eighvals_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_FLOAT, NPY_CDOUBLE, NPY_DOUBLE }; -static char eig_types[] = { +static const char eig_types[] = { NPY_FLOAT, NPY_CFLOAT, NPY_CFLOAT, NPY_DOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE }; -static char eigvals_types[] = { +static const char eigvals_types[] = { NPY_FLOAT, NPY_CFLOAT, NPY_DOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE }; -static char svd_1_1_types[] = { +static const char svd_1_1_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_FLOAT, NPY_CDOUBLE, NPY_DOUBLE }; -static char svd_1_3_types[] = { +static const char svd_1_3_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_CFLOAT, NPY_FLOAT, NPY_CFLOAT, @@ -4278,25 +4278,25 @@ static char svd_1_3_types[] = { }; /* A, tau */ -static char qr_r_raw_types[] = { +static const char qr_r_raw_types[] = { NPY_DOUBLE, NPY_DOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, }; /* A, tau, q */ -static char qr_reduced_types[] = { +static const char qr_reduced_types[] = { NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, }; /* A, tau, q */ -static char qr_complete_types[] = { +static const char qr_complete_types[] = { NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, }; /* A, b, rcond, x, resid, rank, s, */ -static char lstsq_types[] = { +static const char lstsq_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_INT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_INT, NPY_DOUBLE, NPY_CFLOAT, NPY_CFLOAT, NPY_FLOAT, NPY_CFLOAT, NPY_FLOAT, NPY_INT, NPY_FLOAT, @@ -4311,7 +4311,7 @@ typedef struct gufunc_descriptor_struct { int nin; int nout; PyUFuncGenericFunction *funcs; - char *types; + const char *types; } GUFUNC_DESCRIPTOR_t; GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { From 04a6fe2f9d032f76273871c130f3fc4b51c1e8f5 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 9 Apr 2024 14:44:26 -0600 Subject: [PATCH 171/980] MNT: disable the allocator cache for nogil builds --- numpy/_core/src/multiarray/alloc.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index 0487fad1a942..2f3c82bc5909 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -96,11 +96,13 @@ _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz, assert((esz == 1 && cache == datacache) || (esz == sizeof(npy_intp) && cache == dimcache)); assert(PyGILState_Check()); +#ifndef Py_GIL_DISABLED if (nelem < msz) { if (cache[nelem].available > 0) { return cache[nelem].ptrs[--(cache[nelem].available)]; } } +#endif p = alloc(nelem * esz); if (p) { #ifdef _PyPyGC_AddMemoryPressure @@ -131,12 +133,14 @@ _npy_free_cache(void * p, npy_uintp nelem, npy_uint msz, cache_bucket * cache, void (*dealloc)(void *)) { assert(PyGILState_Check()); +#ifndef Py_GIL_DISABLED if (p != NULL && nelem < msz) { if (cache[nelem].available < NCACHE) { cache[nelem].ptrs[cache[nelem].available++] = p; return; } } +#endif dealloc(p); } From cad9171f9ec3b5ddd79a51077196a57a22380b3b Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 9 Apr 2024 16:31:29 -0600 Subject: [PATCH 172/980] TST: skip limited API test on nogil python build (#26229) --- numpy/_core/tests/test_limited_api.py | 6 +++++- numpy/_core/tests/test_nditer.py | 6 ++---- numpy/testing/_private/utils.py | 3 ++- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/numpy/_core/tests/test_limited_api.py b/numpy/_core/tests/test_limited_api.py index c1b2cfcbaff9..f88164b9db91 100644 --- a/numpy/_core/tests/test_limited_api.py +++ b/numpy/_core/tests/test_limited_api.py @@ -5,7 +5,7 @@ import sysconfig import pytest -from numpy.testing import IS_WASM, IS_PYPY +from numpy.testing import IS_WASM, IS_PYPY, NOGIL_BUILD # This import is copied from random.tests.test_extending try: @@ -67,6 +67,10 @@ def install_temp(tmpdir_factory): "and Py_REF_DEBUG" ), ) +@pytest.mark.xfail( + NOGIL_BUILD, + reason="Py_GIL_DISABLED builds do not currently support the limited API", +) @pytest.mark.skipif(IS_PYPY, reason="no support for limited API in PyPy") def test_limited_api(install_temp): """Test building a third-party C extension with the limited API diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index 75451c4a2987..91c50e4ba408 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -1,5 +1,4 @@ import sys -import sysconfig import pytest import textwrap @@ -11,11 +10,10 @@ from numpy import array, arange, nditer, all from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_raises, - IS_WASM, HAS_REFCOUNT, suppress_warnings, break_cycles + IS_WASM, HAS_REFCOUNT, suppress_warnings, break_cycles, + NOGIL_BUILD ) -NOGIL_BUILD = bool(sysconfig.get_config_var('Py_GIL_DISABLED')) - def iter_multi_index(i): ret = [] while not i.finished: diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 8e33f319b11f..4570cbf01420 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -39,7 +39,7 @@ 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY', 'HAS_REFCOUNT', "IS_WASM", 'suppress_warnings', 'assert_array_compare', 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON', - '_OLD_PROMOTION', 'IS_MUSL', '_SUPPORTS_SVE' + '_OLD_PROMOTION', 'IS_MUSL', '_SUPPORTS_SVE', 'NOGIL_BUILD' ] @@ -68,6 +68,7 @@ class KnownFailureException(Exception): if 'musl' in _v: IS_MUSL = True +NOGIL_BUILD = bool(sysconfig.get_config_var("Py_GIL_DISABLED")) def assert_(val, msg=''): """ From 2452ff4f7d2cf7ede3b50945fce2d4fe194f7a0e Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 7 Apr 2024 13:34:55 -0600 Subject: [PATCH 173/980] MAINT: Drop Python 3.9 Drop Python 3.9 from the supported Python versions and CI. Python 3.9 is still used in the Cygwin build test because Cygwin does not yet support 3.10. --- .github/workflows/linux.yml | 10 +++++----- .github/workflows/linux_simd.yml | 8 ++++---- .github/workflows/mypy.yml | 2 +- .github/workflows/wheels.yml | 10 +++++----- INSTALL.rst | 4 ++-- azure-pipelines.yml | 8 ++++---- building_with_meson.md | 5 ++--- pyproject.toml | 3 +-- tools/ci/cirrus_arm.yml | 11 ++++++----- tools/ci/cirrus_wheels.yml | 9 ++------- tools/ci/run_32_bit_linux_docker.sh | 2 +- 11 files changed, 33 insertions(+), 39 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 6bfe339a6499..48d487e27d7f 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -39,7 +39,7 @@ jobs: fetch-depth: 0 - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: - python-version: '3.9' + python-version: '3.10' - name: Install linter requirements run: python -m pip install -r requirements/linter_requirements.txt @@ -60,7 +60,7 @@ jobs: fetch-tags: true - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: - python-version: '3.9' + python-version: '3.10' - uses: ./.github/meson_actions pypy: @@ -74,7 +74,7 @@ jobs: fetch-tags: true - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: - python-version: 'pypy3.9-v7.3.12' + python-version: 'pypy3.10-v7.3.15' - name: Setup using scipy-openblas run: | python -m pip install -r requirements/ci_requirements.txt @@ -121,7 +121,7 @@ jobs: fetch-tags: true - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: - python-version: '3.9' + python-version: '3.10' - name: Install build and test dependencies from PyPI run: | pip install -r requirements/build_requirements.txt @@ -158,7 +158,7 @@ jobs: fetch-tags: true - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: - python-version: '3.9' + python-version: '3.10' - name: Install build and benchmarking dependencies run: | sudo apt-get update diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index 7b5c22562b29..f2f50fbe4684 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -64,7 +64,7 @@ jobs: fetch-tags: true - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: - python-version: '3.9' + python-version: '3.10' - uses: ./.github/meson_actions name: Build/Test @@ -81,7 +81,7 @@ jobs: fetch-tags: true - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: - python-version: '3.9' + python-version: '3.10' - name: Install GCC/8/9 run: | @@ -117,7 +117,7 @@ jobs: - [ "without optimizations", "-Dallow-noblas=true -Ddisable-optimization=true", - "3.12-dev" + "3.12" ] - [ "native", @@ -132,7 +132,7 @@ jobs: - [ "without avx512/avx2/fma3", "-Dallow-noblas=true -Dcpu-dispatch=SSSE3,SSE41,POPCNT,SSE42,AVX,F16C", - "3.9" + "3.10" ] env: diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 781bba2f1f0d..be9874a9f7eb 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -48,7 +48,7 @@ jobs: os_python: - [ubuntu-latest, '3.12'] - [windows-2019, '3.11'] - - [macos-12, '3.9'] + - [macos-12, '3.10'] steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 21829f4596f7..eb9919528b91 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -85,15 +85,15 @@ jobs: - [macos-14, macosx_arm64, accelerate] # always use accelerate - [windows-2019, win_amd64, ""] - [windows-2019, win32, ""] - python: ["cp39", "cp310", "cp311", "cp312", "pp39"] + python: ["cp310", "cp311", "cp312", "pp310"] exclude: # Don't build PyPy 32-bit windows - buildplat: [windows-2019, win32, ""] - python: "pp39" + python: "pp310" - buildplat: [ ubuntu-20.04, musllinux_x86_64, "" ] - python: "pp39" + python: "pp310" - buildplat: [ macos-14, macosx_arm64, accelerate ] - python: "pp39" + python: "pp310" env: IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} IS_PUSH: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') }} @@ -216,7 +216,7 @@ jobs: - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: # Build sdist on lowest supported Python - python-version: "3.9" + python-version: "3.10" - name: Build sdist run: | python -m pip install -U pip build diff --git a/INSTALL.rst b/INSTALL.rst index e305e29facdd..e5f598f153d6 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -14,13 +14,13 @@ Prerequisites Building NumPy requires the following installed software: -1) Python__ 3.9.x or newer. +1) Python__ 3.10.x or newer. Please note that the Python development headers also need to be installed, e.g., on Debian/Ubuntu one needs to install both `python3` and `python3-dev`. On Windows and macOS this is normally not an issue. -2) Cython >= 3.0 +2) Cython >= 3.0.6 3) pytest__ (optional) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 29fb0f7a8974..2393a96d3f86 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -44,7 +44,7 @@ stages: steps: - task: UsePythonVersion@0 inputs: - versionSpec: '3.9' + versionSpec: '3.10' addToPath: true architecture: 'x64' - script: >- @@ -57,7 +57,7 @@ stages: displayName: 'Run Lint Checks' failOnStderr: true - - job: Linux_Python_39_32bit_full_with_asserts + - job: Linux_Python_310_32bit_full_with_asserts pool: vmImage: 'ubuntu-20.04' steps: @@ -89,8 +89,8 @@ stages: TEST_MODE: full BITS: 64 _USE_BLAS_ILP64: '1' - PyPy39-64bit-fast: - PYTHON_VERSION: 'pypy3.9' + PyPy310-64bit-fast: + PYTHON_VERSION: 'pypy3.10' PYTHON_ARCH: 'x64' TEST_MODE: fast BITS: 64 diff --git a/building_with_meson.md b/building_with_meson.md index ec7625b0d2c3..6498d3659bb0 100644 --- a/building_with_meson.md +++ b/building_with_meson.md @@ -1,9 +1,8 @@ # Building with Meson _Note: this is for early adopters. It has been tested on Linux and macOS, and -with Python 3.9-3.12. Windows will be tested soon. There is one CI job to keep -the build stable. This may have rough edges, please open an issue if you run -into a problem._ +with Python 3.10-3.12. There is one CI job to keep the build stable. This may +have rough edges, please open an issue if you run into a problem._ ### Developer build diff --git a/pyproject.toml b/pyproject.toml index 6be12513c3ef..ec34720b6564 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ authors = [{name = "Travis E. Oliphant et al."}] maintainers = [ {name = "NumPy Developers", email="numpy-discussion@python.org"}, ] -requires-python = ">=3.9" +requires-python = ">=3.10" readme = "README.md" classifiers = [ 'Development Status :: 5 - Production/Stable', @@ -26,7 +26,6 @@ classifiers = [ 'Programming Language :: C', 'Programming Language :: Python', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', diff --git a/tools/ci/cirrus_arm.yml b/tools/ci/cirrus_arm.yml index cbf99c9dace6..3b48089dcc08 100644 --- a/tools/ci/cirrus_arm.yml +++ b/tools/ci/cirrus_arm.yml @@ -74,6 +74,7 @@ freebsd_test_task: install_devtools_script: | pkg install -y git bash ninja ccache blas cblas lapack pkgconf + pkg install -y python311 <<: *MODIFIED_CLONE @@ -86,22 +87,22 @@ freebsd_test_task: prepare_env_script: | # Create a venv (the `source` command needs bash, not the default sh shell) chsh -s /usr/local/bin/bash - python -m venv .venv + python3.11 -m venv .venv source .venv/bin/activate # Minimal build and test requirements - python -m pip install -U pip - python -m pip install meson-python Cython pytest hypothesis + python3.11 -m pip install -U pip + python3.11 -m pip install meson-python Cython pytest hypothesis build_script: | chsh -s /usr/local/bin/bash source .venv/bin/activate - python -m pip install . --no-build-isolation -v -Csetup-args="-Dallow-noblas=false" + python3.11 -m pip install . --no-build-isolation -v -Csetup-args="-Dallow-noblas=false" test_script: | chsh -s /usr/local/bin/bash source .venv/bin/activate cd tools - python -m pytest --pyargs numpy -m "not slow" + python3.11 -m pytest --pyargs numpy -m "not slow" ccache -s on_failure: diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml index bf44a8b72704..8705bd9b9cbd 100644 --- a/tools/ci/cirrus_wheels.yml +++ b/tools/ci/cirrus_wheels.yml @@ -23,10 +23,6 @@ linux_aarch64_task: # build in a matrix because building and testing all four wheels in a # single task takes longer than 60 mins (the default time limit for a # cirrus-ci task). - - env: - CIRRUS_CLONE_SUBMODULES: true - CIBW_BUILD: cp39-* - EXPECT_CPU_FEATURES: NEON NEON_FP16 NEON_VFPV4 ASIMD ASIMDHP ASIMDDP ASIMDFHM - env: CIRRUS_CLONE_SUBMODULES: true CIBW_BUILD: cp310-* @@ -35,7 +31,6 @@ linux_aarch64_task: CIBW_BUILD: cp311-* - env: CIRRUS_CLONE_SUBMODULES: true - CIBW_PRERELEASE_PYTHONS: True CIBW_BUILD: cp312-* initial_setup_script: | @@ -64,10 +59,10 @@ macosx_arm64_task: matrix: - env: CIRRUS_CLONE_SUBMODULES: true - CIBW_BUILD: cp39-* cp310-* + CIBW_BUILD: cp310-* cp311 - env: CIRRUS_CLONE_SUBMODULES: true - CIBW_BUILD: cp311-* cp312-* + CIBW_BUILD: cp312-* env: PATH: /usr/local/lib:/usr/local/include:$PATH CIBW_ARCHS: arm64 diff --git a/tools/ci/run_32_bit_linux_docker.sh b/tools/ci/run_32_bit_linux_docker.sh index b1cf4391e550..5e5e8bae4f96 100644 --- a/tools/ci/run_32_bit_linux_docker.sh +++ b/tools/ci/run_32_bit_linux_docker.sh @@ -2,7 +2,7 @@ set -xe git config --global --add safe.directory /numpy cd /numpy -/opt/python/cp39-cp39/bin/python -mvenv venv +/opt/python/cp310-cp310/bin/python -mvenv venv source venv/bin/activate pip install -r requirements/ci32_requirements.txt python3 -m pip install -r requirements/test_requirements.txt From 1f35b27d866e929a418e0f33bc3353f901986b5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Tue, 9 Apr 2024 18:35:01 +0200 Subject: [PATCH 174/980] DOC: Update __array__ copy keyword docs --- doc/source/numpy_2_0_migration_guide.rst | 31 +++++++++++++----------- doc/source/user/basics.dispatch.rst | 16 ++++++++++++ numpy/_core/_add_newdocs.py | 7 +++--- numpy/_core/src/multiarray/ctors.c | 5 ++-- 4 files changed, 40 insertions(+), 19 deletions(-) diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index 2b345942db21..2e43a7d7c087 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -412,20 +412,23 @@ The :ref:`copy keyword behavior changes ` in `~numpy.asarray`, `~numpy.array` and `ndarray.__array__ ` may require these changes: -1. Code using ``np.array(..., copy=False)`` can in most cases be changed to - ``np.asarray(...)``. Older code tended to use ``np.array`` like this because - it had less overhead than the default ``np.asarray`` copy-if-needed - behavior. This is no longer true, and ``np.asarray`` is the preferred function. -2. For code that explicitly needs to pass ``None``/``False`` meaning "copy if - needed" in a way that's compatible with NumPy 1.x and 2.x, see - `scipy#20172 `__ for an example - of how to do so. -3. For any ``__array__`` method on a non-NumPy array-like object, a - ``copy=None`` keyword can be added to the signature - this will work with - older NumPy versions as well. If ``copy`` keyword is considered in - the ``__array__`` method implementation, then for ``copy=True`` always - return a new copy. - +* Code using ``np.array(..., copy=False)`` can in most cases be changed to + ``np.asarray(...)``. Older code tended to use ``np.array`` like this because + it had less overhead than the default ``np.asarray`` copy-if-needed + behavior. This is no longer true, and ``np.asarray`` is the preferred function. +* For code that explicitly needs to pass ``None``/``False`` meaning "copy if + needed" in a way that's compatible with NumPy 1.x and 2.x, see + `scipy#20172 `__ for an example + of how to do so. +* For any ``__array__`` method on a non-NumPy array-like object, ``dtype=None`` + and ``copy=None`` keywords must be added to the signature - this will work with older + NumPy versions as well (although older numpy versions will never pass in ``copy`` keyword). + If the keywords are actually used in the ``__array__`` method implementation, then for: + + * ``copy=True`` and any ``dtype`` value always return a new copy, + * ``copy=None`` create a copy if required (for example by ``dtype``), + * ``copy=False`` a copy must never be made. If a copy is needed to return a numpy array + or satisfy ``dtype``, then raise an exception (``ValueError``). Writing numpy-version-dependent code ------------------------------------ diff --git a/doc/source/user/basics.dispatch.rst b/doc/source/user/basics.dispatch.rst index 29b9eae06481..daea7474aa1a 100644 --- a/doc/source/user/basics.dispatch.rst +++ b/doc/source/user/basics.dispatch.rst @@ -23,6 +23,10 @@ example that has rather narrow utility but illustrates the concepts involved. ... def __repr__(self): ... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" ... def __array__(self, dtype=None, copy=None): +... if copy is False: +... raise ValueError( +... "`copy=False` isn't supported. A copy is always created." +... ) ... return self._i * np.eye(self._N, dtype=dtype) Our custom array can be instantiated like: @@ -85,6 +89,10 @@ For this example we will only handle the method ``__call__`` ... def __repr__(self): ... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" ... def __array__(self, dtype=None, copy=None): +... if copy is False: +... raise ValueError( +... "`copy=False` isn't supported. A copy is always created." +... ) ... return self._i * np.eye(self._N, dtype=dtype) ... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ... if method == '__call__': @@ -136,6 +144,10 @@ conveniently by inheriting from the mixin ... def __repr__(self): ... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" ... def __array__(self, dtype=None, copy=None): +... if copy is False: +... raise ValueError( +... "`copy=False` isn't supported. A copy is always created." +... ) ... return self._i * np.eye(self._N, dtype=dtype) ... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ... if method == '__call__': @@ -174,6 +186,10 @@ functions to our custom variants. ... def __repr__(self): ... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" ... def __array__(self, dtype=None, copy=None): +... if copy is False: +... raise ValueError( +... "`copy=False` isn't supported. A copy is always created." +... ) ... return self._i * np.eye(self._N, dtype=dtype) ... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ... if method == '__call__': diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 38462e19eaaf..c3631218e4d2 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -2943,10 +2943,11 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__array__', """ - a.__array__([dtype], /, *, copy=None) + a.__array__([dtype], *, copy=None) - For ``dtype`` parameter it returns either a new reference to self if - ``dtype`` is not given or a new array of provided data type if ``dtype`` + For ``dtype`` parameter it returns a new reference to self if + ``dtype`` is not given or it matches array's data type. + A new array of provided data type is returned if ``dtype`` is different from the current data type of the array. For ``copy`` parameter it returns a new reference to self if ``copy=False`` or ``copy=None`` and copying isn't enforced by ``dtype`` diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 8d176446772e..b5871c8b04f8 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -2459,8 +2459,9 @@ check_or_clear_and_warn_error_if_due_to_copy_kwarg(PyObject *kwnames) Py_DECREF(type); Py_DECREF(value); Py_XDECREF(traceback); - if (DEPRECATE("__array__ should implement the 'dtype' and " - "'copy' keyword argument") < 0) { + if (DEPRECATE("__array__ implementation doesn't accept a copy keyword, " + "so passing copy=False failed. __array__ must implement " + "'dtype' and 'copy' keyword arguments.") < 0) { return -1; } return 0; From bff75cfb0631814b2d2efa0228487e3f90778564 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Wed, 10 Apr 2024 18:12:22 +0200 Subject: [PATCH 175/980] DOC: Add _dunder_array reference --- doc/source/numpy_2_0_migration_guide.rst | 2 +- doc/source/user/basics.interoperability.rst | 2 ++ numpy/_core/_add_newdocs.py | 3 +++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index 2e43a7d7c087..8cd02628497c 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -423,7 +423,7 @@ The :ref:`copy keyword behavior changes ` in * For any ``__array__`` method on a non-NumPy array-like object, ``dtype=None`` and ``copy=None`` keywords must be added to the signature - this will work with older NumPy versions as well (although older numpy versions will never pass in ``copy`` keyword). - If the keywords are actually used in the ``__array__`` method implementation, then for: + If the keywords are added to the ``__array__`` signature, then for: * ``copy=True`` and any ``dtype`` value always return a new copy, * ``copy=None`` create a copy if required (for example by ``dtype``), diff --git a/doc/source/user/basics.interoperability.rst b/doc/source/user/basics.interoperability.rst index e0faf0c052c9..ca0c39d7081f 100644 --- a/doc/source/user/basics.interoperability.rst +++ b/doc/source/user/basics.interoperability.rst @@ -113,6 +113,8 @@ We can check that ``arr`` and ``new_arr`` share the same data buffer: array([1000, 2, 3, 4]) +.. _dunder_array.interface: + The ``__array__()`` method ~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index c3631218e4d2..94392f3adf94 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -2954,6 +2954,9 @@ parameter. The method returns a new array for ``copy=True``, regardless of ``dtype`` parameter. + A more detailed explanation of the ``__array__`` interface + can be found in :ref:`dunder_array.interface`. + """)) From 482199c99ee6694b29ba02165bbabae1ed3f1e80 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 11 Apr 2024 06:09:09 +1000 Subject: [PATCH 176/980] BLD: update to OpenBLAS 0.3.27 --- requirements/ci32_requirements.txt | 3 +-- requirements/ci_requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 0484e5084474..796f5e549c7a 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,4 +1,3 @@ spin # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.26.0.4 - +scipy-openblas32==0.3.27.0.0 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index 9ac795a626a6..9ca49dcd9884 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.26.0.4 -scipy-openblas64==0.3.26.0.4 +scipy-openblas32==0.3.27.0.0 +scipy-openblas64==0.3.27.0.0 From de8aee4e16aee7bb6c3eccf01e1fb32000e6ef68 Mon Sep 17 00:00:00 2001 From: Devyani Chavan <104618068+devyanic11@users.noreply.github.com> Date: Thu, 11 Apr 2024 10:57:49 +0530 Subject: [PATCH 177/980] DOC: Update numpy.any example (#26235) * Update docstring example to give different values along axis=0 and axis=1 --------- Co-authored-by: Ross Barnowski Co-authored-by: Sebastian Berg --- numpy/_core/fromnumeric.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 0ef50471b9c4..54f9bf5877d6 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -2469,8 +2469,9 @@ def any(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): >>> np.any([[True, False], [True, True]]) True - >>> np.any([[True, False], [False, False]], axis=0) - array([ True, False]) + >>> np.any([[True, False, True ], + ... [False, False, False]], axis=0) + array([ True, False, True]) >>> np.any([-1, 0, 5]) True From c8cdda22367ad96e719a916eb30418cb72e7c1d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Fri, 5 Apr 2024 13:04:57 +0200 Subject: [PATCH 178/980] API: Enforce one copy for __array__ when copy=True --- numpy/_core/src/multiarray/array_coercion.c | 26 +++++++++-- numpy/_core/src/multiarray/array_coercion.h | 2 +- numpy/_core/src/multiarray/arrayobject.c | 2 +- numpy/_core/src/multiarray/common.c | 2 +- numpy/_core/src/multiarray/ctors.c | 30 +++++++++---- numpy/_core/src/multiarray/ctors.h | 4 +- numpy/_core/tests/test_array_coercion.py | 4 +- numpy/_core/tests/test_multiarray.py | 48 +++++++++++++++++---- numpy/_core/tests/test_protocols.py | 4 +- 9 files changed, 94 insertions(+), 28 deletions(-) diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index f63dbbc77e1f..3d4174c0a87f 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -99,6 +99,7 @@ enum _dtype_discovery_flags { DISCOVER_TUPLES_AS_ELEMENTS = 1 << 4, MAX_DIMS_WAS_REACHED = 1 << 5, DESCRIPTOR_WAS_SET = 1 << 6, + COPY_WAS_CREATED = 1 << 7, }; @@ -1027,8 +1028,9 @@ PyArray_DiscoverDTypeAndShape_Recursive( /* __array__ may be passed the requested descriptor if provided */ requested_descr = *out_descr; } + int was_copied = 0; arr = (PyArrayObject *)_array_from_array_like(obj, - requested_descr, 0, NULL, copy); + requested_descr, 0, NULL, copy, &was_copied); if (arr == NULL) { return -1; } @@ -1036,6 +1038,9 @@ PyArray_DiscoverDTypeAndShape_Recursive( Py_DECREF(arr); arr = NULL; } + if (was_copied == 1) { + *flags |= COPY_WAS_CREATED; + } } if (arr != NULL) { /* @@ -1170,6 +1175,15 @@ PyArray_DiscoverDTypeAndShape_Recursive( return -1; } + /* + * For a sequence we need to make a copy of the final aggreate anyway. + * There's no need to pass explicit `copy=True`, so we switch + * to `copy=None` (copy if needed). + */ + if (copy == 1) { + copy = -1; + } + /* Recursive call for each sequence item */ for (Py_ssize_t i = 0; i < size; i++) { max_dims = PyArray_DiscoverDTypeAndShape_Recursive( @@ -1217,6 +1231,8 @@ PyArray_DiscoverDTypeAndShape_Recursive( * to choose a default. * @param copy Specifies the copy behavior. -1 is corresponds to copy=None, * 0 to copy=False, and 1 to copy=True in the Python API. + * @param was_copied Set to 1 if it can be assumed that a copy was made + * by implementor. * @return dimensions of the discovered object or -1 on error. * WARNING: If (and only if) the output is a single array, the ndim * returned _can_ exceed the maximum allowed number of dimensions. @@ -1229,7 +1245,7 @@ PyArray_DiscoverDTypeAndShape( npy_intp out_shape[NPY_MAXDIMS], coercion_cache_obj **coercion_cache, PyArray_DTypeMeta *fixed_DType, PyArray_Descr *requested_descr, - PyArray_Descr **out_descr, int copy) + PyArray_Descr **out_descr, int copy, int *was_copied) { coercion_cache_obj **coercion_cache_head = coercion_cache; *coercion_cache = NULL; @@ -1282,6 +1298,10 @@ PyArray_DiscoverDTypeAndShape( goto fail; } + if (was_copied != NULL && flags & COPY_WAS_CREATED) { + *was_copied = 1; + } + if (NPY_UNLIKELY(flags & FOUND_RAGGED_ARRAY)) { /* * If max-dims was reached and the dimensions reduced, this is ragged. @@ -1396,7 +1416,7 @@ _discover_array_parameters(PyObject *NPY_UNUSED(self), int ndim = PyArray_DiscoverDTypeAndShape( obj, NPY_MAXDIMS, shape, &coercion_cache, - dt_info.dtype, dt_info.descr, (PyArray_Descr **)&out_dtype, 0); + dt_info.dtype, dt_info.descr, (PyArray_Descr **)&out_dtype, 0, NULL); Py_XDECREF(dt_info.dtype); Py_XDECREF(dt_info.descr); if (ndim < 0) { diff --git a/numpy/_core/src/multiarray/array_coercion.h b/numpy/_core/src/multiarray/array_coercion.h index e6639ba1bba9..0fca0702641b 100644 --- a/numpy/_core/src/multiarray/array_coercion.h +++ b/numpy/_core/src/multiarray/array_coercion.h @@ -40,7 +40,7 @@ PyArray_DiscoverDTypeAndShape( npy_intp out_shape[NPY_MAXDIMS], coercion_cache_obj **coercion_cache, PyArray_DTypeMeta *fixed_DType, PyArray_Descr *requested_descr, - PyArray_Descr **out_descr, int copy); + PyArray_Descr **out_descr, int copy, int *was_copied); NPY_NO_EXPORT PyObject * _discover_array_parameters(PyObject *NPY_UNUSED(self), diff --git a/numpy/_core/src/multiarray/arrayobject.c b/numpy/_core/src/multiarray/arrayobject.c index 3001f84edf05..5139bc8b4f00 100644 --- a/numpy/_core/src/multiarray/arrayobject.c +++ b/numpy/_core/src/multiarray/arrayobject.c @@ -251,7 +251,7 @@ PyArray_CopyObject(PyArrayObject *dest, PyObject *src_object) */ ndim = PyArray_DiscoverDTypeAndShape(src_object, PyArray_NDIM(dest), dims, &cache, - NPY_DTYPE(PyArray_DESCR(dest)), PyArray_DESCR(dest), &dtype, 1); + NPY_DTYPE(PyArray_DESCR(dest)), PyArray_DESCR(dest), &dtype, 1, NULL); if (ndim < 0) { return -1; } diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index c7fcbd42b46a..655122ff7f09 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -119,7 +119,7 @@ PyArray_DTypeFromObject(PyObject *obj, int maxdims, PyArray_Descr **out_dtype) int ndim; ndim = PyArray_DiscoverDTypeAndShape( - obj, maxdims, shape, &cache, NULL, NULL, out_dtype, 1); + obj, maxdims, shape, &cache, NULL, NULL, out_dtype, 1, NULL); if (ndim < 0) { return -1; } diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index b5871c8b04f8..1c98ca33e2b6 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1429,6 +1429,8 @@ _array_from_buffer_3118(PyObject *memoryview) * @param writeable whether the result must be writeable. * @param context Unused parameter, must be NULL (should be removed later). * @param copy Specifies the copy behavior. + * @param was_copied Set to 1 if it can be assumed that a copy was made + * by implementor. * * @returns The array object, Py_NotImplemented if op is not array-like, * or NULL with an error set. (A new reference to Py_NotImplemented @@ -1437,7 +1439,7 @@ _array_from_buffer_3118(PyObject *memoryview) NPY_NO_EXPORT PyObject * _array_from_array_like(PyObject *op, PyArray_Descr *requested_dtype, npy_bool writeable, PyObject *context, - int copy) { + int copy, int *was_copied) { PyObject* tmp; /* @@ -1485,7 +1487,7 @@ _array_from_array_like(PyObject *op, } if (tmp == Py_NotImplemented) { - tmp = PyArray_FromArrayAttr_int(op, requested_dtype, copy); + tmp = PyArray_FromArrayAttr_int(op, requested_dtype, copy, was_copied); if (tmp == NULL) { return NULL; } @@ -1572,13 +1574,16 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, // Default is copy = None int copy = -1; + int was_copied = 0; if (flags & NPY_ARRAY_ENSURENOCOPY) { copy = 0; + } else if (flags & NPY_ARRAY_ENSURECOPY) { + copy = 1; } ndim = PyArray_DiscoverDTypeAndShape( - op, NPY_MAXDIMS, dims, &cache, in_DType, in_descr, &dtype, copy); + op, NPY_MAXDIMS, dims, &cache, in_DType, in_descr, &dtype, copy, &was_copied); if (ndim < 0) { return NULL; @@ -1615,6 +1620,10 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, assert(cache->converted_obj == op); arr = (PyArrayObject *)(cache->arr_or_sequence); /* we may need to cast or assert flags (e.g. copy) */ + if (was_copied == 1 && flags & NPY_ARRAY_ENSURECOPY) { + flags = flags & ~NPY_ARRAY_ENSURECOPY; + flags = flags | NPY_ARRAY_ENSURENOCOPY; + } PyObject *res = PyArray_FromArray(arr, dtype, flags); npy_unlink_coercion_cache(cache); return res; @@ -1937,7 +1946,7 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) } if (copy) { - if (flags & NPY_ARRAY_ENSURENOCOPY ) { + if (flags & NPY_ARRAY_ENSURENOCOPY) { PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); Py_DECREF(newtype); return NULL; @@ -2486,12 +2495,14 @@ check_or_clear_and_warn_error_if_due_to_copy_kwarg(PyObject *kwnames) * NOTE: For copy == -1 it passes `op.__array__(copy=None)`, * for copy == 0, `op.__array__(copy=False)`, and * for copy == 1, `op.__array__(copy=True). + * @param was_copied Set to 1 if it can be assumed that a copy was made + * by implementor. * @returns NotImplemented if `__array__` is not defined or a NumPy array * (or subclass). On error, return NULL. */ NPY_NO_EXPORT PyObject * PyArray_FromArrayAttr_int( - PyObject *op, PyArray_Descr *descr, int copy) + PyObject *op, PyArray_Descr *descr, int copy, int *was_copied) { PyObject *new; PyObject *array_meth; @@ -2578,10 +2589,11 @@ PyArray_FromArrayAttr_int( Py_DECREF(new); return NULL; } - if (must_copy_but_copy_kwarg_unimplemented) { - /* TODO: As of NumPy 2.0 this path is only reachable by C-API. */ - Py_SETREF(new, PyArray_NewCopy((PyArrayObject *)new, NPY_KEEPORDER)); + if (was_copied != NULL && copy == 1 && must_copy_but_copy_kwarg_unimplemented == 0) { + /* We can assume that a copy was made */ + *was_copied = 1; } + return new; } @@ -2596,7 +2608,7 @@ PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject *context) return NULL; } - return PyArray_FromArrayAttr_int(op, typecode, 0); + return PyArray_FromArrayAttr_int(op, typecode, 0, NULL); } diff --git a/numpy/_core/src/multiarray/ctors.h b/numpy/_core/src/multiarray/ctors.h index fa1cd72e1478..a629a4ee1c44 100644 --- a/numpy/_core/src/multiarray/ctors.h +++ b/numpy/_core/src/multiarray/ctors.h @@ -54,7 +54,7 @@ PyArray_New( NPY_NO_EXPORT PyObject * _array_from_array_like(PyObject *op, PyArray_Descr *requested_dtype, npy_bool writeable, PyObject *context, - int copy); + int copy, int *was_copied); NPY_NO_EXPORT PyObject * PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, @@ -85,7 +85,7 @@ PyArray_FromInterface(PyObject *input); NPY_NO_EXPORT PyObject * PyArray_FromArrayAttr_int( - PyObject *op, PyArray_Descr *descr, int copy); + PyObject *op, PyArray_Descr *descr, int copy, int *was_copied); NPY_NO_EXPORT PyObject * PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index 726e8d8252a8..a88873fb7fc5 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -54,7 +54,9 @@ def __init__(self, a): self.a = a def __array__(self, dtype=None, copy=None): - return self.a + if dtype is None: + return self.a + return self.a.astype(dtype) yield param(ArrayDunder, id="__array__") diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index e0b8593604a2..896c7dfb23ba 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -8452,10 +8452,9 @@ def __array__(self, dtype=None, copy=None): for copy in self.true_vals: res = np.array(arr, copy=copy) assert_array_equal(res, base_arr) - # An additional copy is currently forced by numpy in this case, - # you could argue, numpy does not trust the ArrayLike. This - # may be open for change: - assert res is not base_arr + # An additional copy is no longer forced by NumPy in this case. + # NumPy trusts the ArrayLike made a copy: + assert res is base_arr for copy in self.if_needed_vals + self.false_vals: res = np.array(arr, copy=copy) @@ -8488,9 +8487,11 @@ def __array__(self, dtype=None): assert_array_equal(arr, base_arr) assert arr is base_arr - # As of NumPy 2, explicitly passing copy=True does not trigger passing - # it to __array__ (deprecation warning is not triggered). - arr = np.array(a, copy=True) + # As of NumPy 2.1, explicitly passing copy=True does trigger passing + # it to __array__ (deprecation warning is triggered). + with pytest.warns(DeprecationWarning, + match="__array__.*should implement.*'copy'"): + arr = np.array(a, copy=True) assert_array_equal(arr, base_arr) assert arr is not base_arr @@ -8501,10 +8502,41 @@ def __array__(self, dtype=None): match=r"Unable to avoid copy(.|\n)*numpy_2_0_migration_guide.html"): np.array(a, copy=False) + @pytest.mark.skipif(IS_PYPY, reason="PyPy copies differently") + def test___array__copy_once(self): + size = 100 + base_arr = np.zeros((size, size)) + copy_arr = np.zeros((size, size)) + + class ArrayRandom: + def __init__(self): + self.true_passed = False + + def __array__(self, dtype=None, copy=None): + if copy: + self.true_passed = True + return copy_arr + else: + return base_arr + + arr_random = ArrayRandom() + first_copy = np.array(arr_random, copy=True) + assert arr_random.true_passed + assert first_copy is copy_arr + + arr_random = ArrayRandom() + no_copy = np.array(arr_random, copy=False) + assert not arr_random.true_passed + assert no_copy is base_arr + + arr_random = ArrayRandom() + _ = np.array([arr_random], copy=True) + assert not arr_random.true_passed + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test__array__reference_leak(self): class NotAnArray: - def __array__(self): + def __array__(self, dtype=None, copy=None): raise NotImplementedError() x = NotAnArray() diff --git a/numpy/_core/tests/test_protocols.py b/numpy/_core/tests/test_protocols.py index 7cab1223bfe1..1709629fa89b 100644 --- a/numpy/_core/tests/test_protocols.py +++ b/numpy/_core/tests/test_protocols.py @@ -35,8 +35,8 @@ def test_array_called(): class Wrapper: val = '0' * 100 - def __array__(self, result=None, copy=None): - return np.array([self.val], dtype=object) + def __array__(self, dtype=None, copy=None): + return np.array([self.val], dtype=dtype, copy=copy) wrapped = Wrapper() From ea983c6dd2612b6e9a313942a13a03c08233faf2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Wed, 10 Apr 2024 16:58:09 +0200 Subject: [PATCH 179/980] Adjust was_copied variable names --- numpy/_core/src/multiarray/array_coercion.c | 20 +++++++------- numpy/_core/src/multiarray/array_coercion.h | 2 +- numpy/_core/src/multiarray/ctors.c | 29 ++++++++++++--------- numpy/_core/src/multiarray/ctors.h | 6 ++--- numpy/_core/tests/test_multiarray.py | 1 - 5 files changed, 30 insertions(+), 28 deletions(-) diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index 3d4174c0a87f..1b36deca95c2 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -99,7 +99,7 @@ enum _dtype_discovery_flags { DISCOVER_TUPLES_AS_ELEMENTS = 1 << 4, MAX_DIMS_WAS_REACHED = 1 << 5, DESCRIPTOR_WAS_SET = 1 << 6, - COPY_WAS_CREATED = 1 << 7, + COPY_WAS_CREATED_BY__ARRAY__ = 1 << 7, }; @@ -1028,9 +1028,9 @@ PyArray_DiscoverDTypeAndShape_Recursive( /* __array__ may be passed the requested descriptor if provided */ requested_descr = *out_descr; } - int was_copied = 0; + int was_copied_by__array__ = 0; arr = (PyArrayObject *)_array_from_array_like(obj, - requested_descr, 0, NULL, copy, &was_copied); + requested_descr, 0, NULL, copy, &was_copied_by__array__); if (arr == NULL) { return -1; } @@ -1038,8 +1038,8 @@ PyArray_DiscoverDTypeAndShape_Recursive( Py_DECREF(arr); arr = NULL; } - if (was_copied == 1) { - *flags |= COPY_WAS_CREATED; + if (was_copied_by__array__ == 1) { + *flags |= COPY_WAS_CREATED_BY__ARRAY__; } } if (arr != NULL) { @@ -1231,8 +1231,8 @@ PyArray_DiscoverDTypeAndShape_Recursive( * to choose a default. * @param copy Specifies the copy behavior. -1 is corresponds to copy=None, * 0 to copy=False, and 1 to copy=True in the Python API. - * @param was_copied Set to 1 if it can be assumed that a copy was made - * by implementor. + * @param was_copied_by__array__ Set to 1 if it can be assumed that a copy was + * made by implementor. * @return dimensions of the discovered object or -1 on error. * WARNING: If (and only if) the output is a single array, the ndim * returned _can_ exceed the maximum allowed number of dimensions. @@ -1245,7 +1245,7 @@ PyArray_DiscoverDTypeAndShape( npy_intp out_shape[NPY_MAXDIMS], coercion_cache_obj **coercion_cache, PyArray_DTypeMeta *fixed_DType, PyArray_Descr *requested_descr, - PyArray_Descr **out_descr, int copy, int *was_copied) + PyArray_Descr **out_descr, int copy, int *was_copied_by__array__) { coercion_cache_obj **coercion_cache_head = coercion_cache; *coercion_cache = NULL; @@ -1298,8 +1298,8 @@ PyArray_DiscoverDTypeAndShape( goto fail; } - if (was_copied != NULL && flags & COPY_WAS_CREATED) { - *was_copied = 1; + if (was_copied_by__array__ != NULL && flags & COPY_WAS_CREATED_BY__ARRAY__) { + *was_copied_by__array__ = 1; } if (NPY_UNLIKELY(flags & FOUND_RAGGED_ARRAY)) { diff --git a/numpy/_core/src/multiarray/array_coercion.h b/numpy/_core/src/multiarray/array_coercion.h index 0fca0702641b..d8f72903a67c 100644 --- a/numpy/_core/src/multiarray/array_coercion.h +++ b/numpy/_core/src/multiarray/array_coercion.h @@ -40,7 +40,7 @@ PyArray_DiscoverDTypeAndShape( npy_intp out_shape[NPY_MAXDIMS], coercion_cache_obj **coercion_cache, PyArray_DTypeMeta *fixed_DType, PyArray_Descr *requested_descr, - PyArray_Descr **out_descr, int copy, int *was_copied); + PyArray_Descr **out_descr, int copy, int *was_copied_by__array__); NPY_NO_EXPORT PyObject * _discover_array_parameters(PyObject *NPY_UNUSED(self), diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 1c98ca33e2b6..163f63f8d2cf 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1429,8 +1429,8 @@ _array_from_buffer_3118(PyObject *memoryview) * @param writeable whether the result must be writeable. * @param context Unused parameter, must be NULL (should be removed later). * @param copy Specifies the copy behavior. - * @param was_copied Set to 1 if it can be assumed that a copy was made - * by implementor. + * @param was_copied_by__array__ Set to 1 if it can be assumed that a copy + * was made by implementor. * * @returns The array object, Py_NotImplemented if op is not array-like, * or NULL with an error set. (A new reference to Py_NotImplemented @@ -1439,7 +1439,7 @@ _array_from_buffer_3118(PyObject *memoryview) NPY_NO_EXPORT PyObject * _array_from_array_like(PyObject *op, PyArray_Descr *requested_dtype, npy_bool writeable, PyObject *context, - int copy, int *was_copied) { + int copy, int *was_copied_by__array__) { PyObject* tmp; /* @@ -1487,7 +1487,8 @@ _array_from_array_like(PyObject *op, } if (tmp == Py_NotImplemented) { - tmp = PyArray_FromArrayAttr_int(op, requested_dtype, copy, was_copied); + tmp = PyArray_FromArrayAttr_int( + op, requested_dtype, copy, was_copied_by__array__); if (tmp == NULL) { return NULL; } @@ -1574,7 +1575,7 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, // Default is copy = None int copy = -1; - int was_copied = 0; + int was_copied_by__array__ = 0; if (flags & NPY_ARRAY_ENSURENOCOPY) { copy = 0; @@ -1583,7 +1584,8 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, } ndim = PyArray_DiscoverDTypeAndShape( - op, NPY_MAXDIMS, dims, &cache, in_DType, in_descr, &dtype, copy, &was_copied); + op, NPY_MAXDIMS, dims, &cache, in_DType, in_descr, &dtype, + copy, &was_copied_by__array__); if (ndim < 0) { return NULL; @@ -1620,7 +1622,7 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, assert(cache->converted_obj == op); arr = (PyArrayObject *)(cache->arr_or_sequence); /* we may need to cast or assert flags (e.g. copy) */ - if (was_copied == 1 && flags & NPY_ARRAY_ENSURECOPY) { + if (was_copied_by__array__ == 1 && flags & NPY_ARRAY_ENSURECOPY) { flags = flags & ~NPY_ARRAY_ENSURECOPY; flags = flags | NPY_ARRAY_ENSURENOCOPY; } @@ -2495,14 +2497,14 @@ check_or_clear_and_warn_error_if_due_to_copy_kwarg(PyObject *kwnames) * NOTE: For copy == -1 it passes `op.__array__(copy=None)`, * for copy == 0, `op.__array__(copy=False)`, and * for copy == 1, `op.__array__(copy=True). - * @param was_copied Set to 1 if it can be assumed that a copy was made - * by implementor. + * @param was_copied_by__array__ Set to 1 if it can be assumed that a copy + * was made by implementor. * @returns NotImplemented if `__array__` is not defined or a NumPy array * (or subclass). On error, return NULL. */ NPY_NO_EXPORT PyObject * -PyArray_FromArrayAttr_int( - PyObject *op, PyArray_Descr *descr, int copy, int *was_copied) +PyArray_FromArrayAttr_int(PyObject *op, PyArray_Descr *descr, int copy, + int *was_copied_by__array__) { PyObject *new; PyObject *array_meth; @@ -2589,9 +2591,10 @@ PyArray_FromArrayAttr_int( Py_DECREF(new); return NULL; } - if (was_copied != NULL && copy == 1 && must_copy_but_copy_kwarg_unimplemented == 0) { + if (was_copied_by__array__ != NULL && copy == 1 && + must_copy_but_copy_kwarg_unimplemented == 0) { /* We can assume that a copy was made */ - *was_copied = 1; + *was_copied_by__array__ = 1; } return new; diff --git a/numpy/_core/src/multiarray/ctors.h b/numpy/_core/src/multiarray/ctors.h index a629a4ee1c44..094589968b66 100644 --- a/numpy/_core/src/multiarray/ctors.h +++ b/numpy/_core/src/multiarray/ctors.h @@ -54,7 +54,7 @@ PyArray_New( NPY_NO_EXPORT PyObject * _array_from_array_like(PyObject *op, PyArray_Descr *requested_dtype, npy_bool writeable, PyObject *context, - int copy, int *was_copied); + int copy, int *was_copied_by__array__); NPY_NO_EXPORT PyObject * PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, @@ -84,8 +84,8 @@ NPY_NO_EXPORT PyObject * PyArray_FromInterface(PyObject *input); NPY_NO_EXPORT PyObject * -PyArray_FromArrayAttr_int( - PyObject *op, PyArray_Descr *descr, int copy, int *was_copied); +PyArray_FromArrayAttr_int(PyObject *op, PyArray_Descr *descr, int copy, + int *was_copied_by__array__); NPY_NO_EXPORT PyObject * PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 896c7dfb23ba..7252a21ce331 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -8502,7 +8502,6 @@ def __array__(self, dtype=None): match=r"Unable to avoid copy(.|\n)*numpy_2_0_migration_guide.html"): np.array(a, copy=False) - @pytest.mark.skipif(IS_PYPY, reason="PyPy copies differently") def test___array__copy_once(self): size = 100 base_arr = np.zeros((size, size)) From 6c7b6f20cd791b9df8000fe92f31fb669db8b1ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Thu, 11 Apr 2024 12:06:04 +0200 Subject: [PATCH 180/980] Apply review comments --- numpy/_core/src/multiarray/ctors.c | 4 ++-- numpy/_core/tests/test_multiarray.py | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 163f63f8d2cf..519e9888f613 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1622,9 +1622,8 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, assert(cache->converted_obj == op); arr = (PyArrayObject *)(cache->arr_or_sequence); /* we may need to cast or assert flags (e.g. copy) */ - if (was_copied_by__array__ == 1 && flags & NPY_ARRAY_ENSURECOPY) { + if (was_copied_by__array__ == 1) { flags = flags & ~NPY_ARRAY_ENSURECOPY; - flags = flags | NPY_ARRAY_ENSURENOCOPY; } PyObject *res = PyArray_FromArray(arr, dtype, flags); npy_unlink_coercion_cache(cache); @@ -2591,6 +2590,7 @@ PyArray_FromArrayAttr_int(PyObject *op, PyArray_Descr *descr, int copy, Py_DECREF(new); return NULL; } + /* TODO: Remove was_copied_by__array__ argument */ if (was_copied_by__array__ != NULL && copy == 1 && must_copy_but_copy_kwarg_unimplemented == 0) { /* We can assume that a copy was made */ diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 7252a21ce331..04084c35af2c 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -8532,6 +8532,11 @@ def __array__(self, dtype=None, copy=None): _ = np.array([arr_random], copy=True) assert not arr_random.true_passed + arr_random = ArrayRandom() + second_copy = np.array(arr_random, copy=True, order="F") + assert arr_random.true_passed + assert not second_copy is copy_arr + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test__array__reference_leak(self): class NotAnArray: From 3549902ecb6b550c89c840604082bc6b84f64d6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Thu, 11 Apr 2024 12:08:15 +0200 Subject: [PATCH 181/980] linting --- numpy/_core/tests/test_multiarray.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 04084c35af2c..99cb9453c6ae 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -8490,7 +8490,7 @@ def __array__(self, dtype=None): # As of NumPy 2.1, explicitly passing copy=True does trigger passing # it to __array__ (deprecation warning is triggered). with pytest.warns(DeprecationWarning, - match="__array__.*should implement.*'copy'"): + match="__array__.*must implement.*'copy'"): arr = np.array(a, copy=True) assert_array_equal(arr, base_arr) assert arr is not base_arr @@ -8535,7 +8535,7 @@ def __array__(self, dtype=None, copy=None): arr_random = ArrayRandom() second_copy = np.array(arr_random, copy=True, order="F") assert arr_random.true_passed - assert not second_copy is copy_arr + assert second_copy is not copy_arr @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test__array__reference_leak(self): From 1b9cf8bb512c5cd9d287892c05d1fece8b3a163b Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Fri, 12 Apr 2024 00:04:05 +1000 Subject: [PATCH 182/980] use 0.3.27.0.1 --- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 796f5e549c7a..0b7d0e63ff33 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ spin # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.27.0.0 +scipy-openblas32==0.3.27.0.1 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index 9ca49dcd9884..dd2725e6dba4 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.27.0.0 -scipy-openblas64==0.3.27.0.0 +scipy-openblas32==0.3.27.0.1 +scipy-openblas64==0.3.27.0.1 From 97356bc6f0d6538389a9eef475d883a0f4024c2a Mon Sep 17 00:00:00 2001 From: Leo Singer Date: Thu, 11 Apr 2024 20:27:19 -0400 Subject: [PATCH 183/980] MAINT: fix typo --- numpy/_core/include/numpy/npy_2_compat.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/include/numpy/npy_2_compat.h b/numpy/_core/include/numpy/npy_2_compat.h index 4e56f5678c0a..50e637f79223 100644 --- a/numpy/_core/include/numpy/npy_2_compat.h +++ b/numpy/_core/include/numpy/npy_2_compat.h @@ -46,7 +46,7 @@ #error "The NumPy 2 compat header requires `import_array()` for which " \ "the `ndarraytypes.h` header include is not sufficient. Please " \ "include it after `numpy/ndarrayobject.h` or similar.\n" \ - "To simplify includsion, you may use `PyArray_ImportNumPy()` " \ + "To simplify inclusion, you may use `PyArray_ImportNumPy()` " \ "which is defined in the compat header and is lightweight (can be)." #endif From d6d293c9d277af83059ca79e4ff655f288904838 Mon Sep 17 00:00:00 2001 From: Leo Singer Date: Thu, 11 Apr 2024 20:35:20 -0400 Subject: [PATCH 184/980] MAINT: fix typo in #include example --- doc/source/reference/c-api/array.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 1e67f97917d5..8de6839f83b5 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -3869,7 +3869,7 @@ the C-API is needed then some additional steps must be taken. .. code-block:: c #define PY_ARRAY_UNIQUE_SYMBOL cool_ARRAY_API - #include numpy/arrayobject.h + #include On the other hand, coolhelper.c would contain at the top: @@ -3877,7 +3877,7 @@ the C-API is needed then some additional steps must be taken. #define NO_IMPORT_ARRAY #define PY_ARRAY_UNIQUE_SYMBOL cool_ARRAY_API - #include numpy/arrayobject.h + #include You can also put the common two last lines into an extension-local header file as long as you make sure that NO_IMPORT_ARRAY is From f7312ad27a3c936e73ac14386c8a8827827eda78 Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Fri, 12 Apr 2024 10:20:42 +0200 Subject: [PATCH 185/980] MAINT: Update URL in nep 0014 - domain change See https://github.com/python3statement/python3statement.github.io/issues/292 To avoid having to pay for the domain indefinitely the devs are now redirecting toward github pages. Be proactive and start switching domains. --- doc/neps/nep-0014-dropping-python2.7-proposal.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/neps/nep-0014-dropping-python2.7-proposal.rst b/doc/neps/nep-0014-dropping-python2.7-proposal.rst index e14a173e2032..e08c3caf0ddc 100644 --- a/doc/neps/nep-0014-dropping-python2.7-proposal.rst +++ b/doc/neps/nep-0014-dropping-python2.7-proposal.rst @@ -52,6 +52,6 @@ to Python3 only, see the python3-statement_. For more information on porting your code to run on Python 3, see the python3-howto_. -.. _python3-statement: https://python3statement.org/ +.. _python3-statement: https://python3statement.github.io/ .. _python3-howto: https://docs.python.org/3/howto/pyporting.html From 9796349b455b95a99a7083a71fea8e204c260cb5 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 12 Apr 2024 08:43:57 -0600 Subject: [PATCH 186/980] BUG: ensure np.vectorize doesn't truncate fixed-width strings --- numpy/_core/tests/test_regression.py | 6 ++++++ numpy/lib/_function_base_impl.py | 8 +++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 17d589af582a..826415f9ce00 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -2614,3 +2614,9 @@ def test_logspace_base_does_not_determine_dtype(self): base=np.array([10.0])) with pytest.raises(AssertionError, match="not almost equal"): assert_almost_equal(out2, expected) + + def test_vectorize_fixed_width_string(self): + arr = np.array(["SOme wOrd DŽ ß ᾛ ΣΣ ffi⁵Å Ç Ⅰ"]).astype(np.str_) + f = str.casefold + res = np.vectorize(f, otypes=[arr.dtype])(arr) + assert res.dtype == "U30" diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 436495e7f3fb..ce661155dc4f 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2132,6 +2132,12 @@ def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes, return arrays +def _get_vectorize_dtype(dtype): + if dtype.char in "SU": + return dtype.char + return dtype + + @set_module('numpy') class vectorize: """ @@ -2330,7 +2336,7 @@ def __init__(self, pyfunc=np._NoValue, otypes=None, doc=None, if char not in typecodes['All']: raise ValueError("Invalid otype specified: %s" % (char,)) elif iterable(otypes): - otypes = [_nx.dtype(x) for x in otypes] + otypes = [_get_vectorize_dtype(_nx.dtype(x)) for x in otypes] elif otypes is not None: raise ValueError("Invalid otype specification") self.otypes = otypes From 229de79186e7c6ff650462c896cb0a95c76311e1 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 11 Apr 2024 13:48:23 -0600 Subject: [PATCH 187/980] MNT: fix copy/paste error for NA type extracted from Pandas --- numpy/_core/tests/_natype.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/_core/tests/_natype.py b/numpy/_core/tests/_natype.py index 07a8fc474c36..e529e548cf1e 100644 --- a/numpy/_core/tests/_natype.py +++ b/numpy/_core/tests/_natype.py @@ -16,7 +16,7 @@ def method(self, other): other is pd_NA or isinstance(other, (str, bytes)) or isinstance(other, (numbers.Number, np.bool)) - or util.is_array(other) + or isinstance(other, np.ndarray) and not other.shape ): # Need the other.shape clause to handle NumPy scalars, @@ -27,7 +27,7 @@ def method(self, other): else: return pd_NA - elif util.is_array(other): + elif isinstance(other, np.ndarray): out = np.empty(other.shape, dtype=object) out[:] = pd_NA @@ -36,14 +36,14 @@ def method(self, other): else: return out - elif is_cmp and isinstance(other, (date, time, timedelta)): + elif is_cmp and isinstance(other, (np.datetime64, np.timedelta64)): return pd_NA - elif isinstance(other, date): + elif isinstance(other, np.datetime64): if name in ["__sub__", "__rsub__"]: return pd_NA - elif isinstance(other, timedelta): + elif isinstance(other, np.timedelta64): if name in ["__sub__", "__rsub__", "__add__", "__radd__"]: return pd_NA From 9817861a67241e7f1a71c1c4dc3c835aad32dfe4 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 11 Apr 2024 13:54:49 -0600 Subject: [PATCH 188/980] ENH: introduce 'compatible' stringdtype instances --- doc/neps/nep-0055-string_dtype.rst | 32 +++- .../_core/src/multiarray/stringdtype/dtype.c | 35 ++-- .../_core/src/multiarray/stringdtype/dtype.h | 3 + numpy/_core/src/umath/stringdtype_ufuncs.cpp | 154 +++++------------- numpy/_core/tests/test_stringdtype.py | 57 ++++++- 5 files changed, 145 insertions(+), 136 deletions(-) diff --git a/doc/neps/nep-0055-string_dtype.rst b/doc/neps/nep-0055-string_dtype.rst index 6417898d2ec6..2e3f3cbf03c4 100644 --- a/doc/neps/nep-0055-string_dtype.rst +++ b/doc/neps/nep-0055-string_dtype.rst @@ -534,11 +534,33 @@ future NumPy or a downstream library may add locale-aware sorting, case folding, and normalization for NumPy unicode strings arrays, but we are not proposing adding these features at this time. -Two ``StringDType`` instances are considered identical if they are created with -the same ``na_object`` and ``coerce`` parameter. We propose checking for unequal -``StringDType`` instances in the ``resolve_descriptors`` function of binary -ufuncs that take two string arrays and raising an error if an operation is -performed with unequal ``StringDType`` instances. +Two ``StringDType`` instances are considered equal if they are created with the +same ``na_object`` and ``coerce`` parameter. For ufuncs that accept more than +one string argument we also introduce the concept of "compatible" +``StringDType`` instances. We allow distinct DType instances to be used in ufunc +operations together if have the same ``na_object`` or if only one +or the other DType has an ``na_object`` explicitly set. We do not consider +string coercion for determining whether instances are compatible, although if +the result of the operation is a string, the result will inherit the stricter +string coercion setting of the original operands. + +This notion of "compatible" instances will be enforced in the +``resolve_descriptors`` function of binary ufuncs. This choice makes it easier +to work with non-default ``StringDType`` instances, because python strings are +coerced to the default ``StringDType`` instance, so the following idiomatic +expression is allowed:: + + >>> arr = np.array(["hello", "world"], dtype=StringDType(na_object=None)) + >>> arr + "!" + array(['hello!', 'world!'], dtype=StringDType(na_object=None)) + +If we only considered equality of ``StringDType`` instances, this would +be an error, making for an awkward user experience. If the operands have +distinct ``na_object`` settings, NumPy will raise an error because the choice +for the result DType is ambiguous:: + + >>> arr + np.array("!", dtype=StringDType(na_object="")) + TypeError: Cannot find common instance for incompatible dtype instances ``np.strings`` namespace ************************ diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index f5488db87e5e..c09d09cf4b41 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -144,14 +144,21 @@ new_stringdtype_instance(PyObject *na_object, int coerce) return NULL; } -static int -na_eq_cmp(PyObject *a, PyObject *b) { +NPY_NO_EXPORT int +na_eq_cmp(PyObject *a, PyObject *b, int coerce_nulls) { if (a == b) { // catches None and other singletons like Pandas.NA return 1; } if (a == NULL || b == NULL) { - return 0; + if (coerce_nulls) { + // an object with an explictly set NA object is considered + // compatible for binary operations to one with no explicitly set NA + return 1; + } + else { + return 0; + } } if (PyFloat_Check(a) && PyFloat_Check(b)) { // nan check catches np.nan and float('nan') @@ -182,7 +189,7 @@ _eq_comparison(int scoerce, int ocoerce, PyObject *sna, PyObject *ona) if (scoerce != ocoerce) { return 0; } - return na_eq_cmp(sna, ona); + return na_eq_cmp(sna, ona, 0); } /* @@ -190,21 +197,21 @@ _eq_comparison(int scoerce, int ocoerce, PyObject *sna, PyObject *ona) * with a mix of different dtypes (for example when creating an array * from a list of scalars). */ -static PyArray_StringDTypeObject * +NPY_NO_EXPORT PyArray_StringDTypeObject * common_instance(PyArray_StringDTypeObject *dtype1, PyArray_StringDTypeObject *dtype2) { - int eq = _eq_comparison(dtype1->coerce, dtype2->coerce, dtype1->na_object, - dtype2->na_object); + int eq = na_eq_cmp(dtype1->na_object, dtype2->na_object, 1); if (eq <= 0) { PyErr_SetString( - PyExc_ValueError, - "Cannot find common instance for unequal dtype instances"); + PyExc_TypeError, + "Cannot find common instance for incompatible dtype instances"); return NULL; } return (PyArray_StringDTypeObject *)new_stringdtype_instance( - dtype1->na_object, dtype1->coerce); + dtype1->na_object != NULL ? dtype1->na_object : dtype2->na_object, + !((dtype1->coerce == 0) || (dtype2->coerce == 0))); } /* @@ -280,7 +287,7 @@ stringdtype_setitem(PyArray_StringDTypeObject *descr, PyObject *obj, char **data { npy_packed_static_string *sdata = (npy_packed_static_string *)dataptr; - int is_cmp = 0; + int na_cmp = 0; // borrow reference PyObject *na_object = descr->na_object; @@ -294,8 +301,8 @@ stringdtype_setitem(PyArray_StringDTypeObject *descr, PyObject *obj, char **data // so we do the comparison before acquiring the allocator. if (na_object != NULL) { - is_cmp = na_eq_cmp(obj, na_object); - if (is_cmp == -1) { + na_cmp = na_eq_cmp(obj, na_object, 1); + if (na_cmp == -1) { return -1; } } @@ -303,7 +310,7 @@ stringdtype_setitem(PyArray_StringDTypeObject *descr, PyObject *obj, char **data npy_string_allocator *allocator = NpyString_acquire_allocator(descr); if (na_object != NULL) { - if (is_cmp) { + if (na_cmp) { if (NpyString_pack_null(allocator, sdata) < 0) { PyErr_SetString(PyExc_MemoryError, "Failed to pack null string during StringDType " diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.h b/numpy/_core/src/multiarray/stringdtype/dtype.h index 278513fe8f12..3fe196a5d358 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.h +++ b/numpy/_core/src/multiarray/stringdtype/dtype.h @@ -49,6 +49,9 @@ stringdtype_finalize_descr(PyArray_Descr *dtype); NPY_NO_EXPORT int _eq_comparison(int scoerce, int ocoerce, PyObject *sna, PyObject *ona); +NPY_NO_EXPORT PyArray_StringDTypeObject * +common_instance(PyArray_StringDTypeObject *dtype1, PyArray_StringDTypeObject *dtype2); + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index de6ac4ebdfa3..1d9482e50c4d 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -246,20 +246,9 @@ binary_resolve_descriptors(struct PyArrayMethodObject_tag *NPY_UNUSED(method), { PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; + PyArray_StringDTypeObject *common_descr = common_instance(descr1, descr2); - // _eq_comparison has a short-circuit pointer comparison fast path, - // so no need to check here - int eq_res = _eq_comparison(descr1->coerce, descr2->coerce, - descr1->na_object, descr2->na_object); - - if (eq_res < 0) { - return (NPY_CASTING)-1; - } - - if (eq_res != 1) { - PyErr_SetString(PyExc_TypeError, - "Can only do binary operations with equal StringDType " - "instances."); + if (common_descr == NULL) { return (NPY_CASTING)-1; } @@ -272,8 +261,7 @@ binary_resolve_descriptors(struct PyArrayMethodObject_tag *NPY_UNUSED(method), if (given_descrs[2] == NULL) { out_descr = (PyArray_Descr *)new_stringdtype_instance( - ((PyArray_StringDTypeObject *)given_descrs[1])->na_object, - ((PyArray_StringDTypeObject *)given_descrs[1])->coerce); + common_descr->na_object, common_descr->coerce); if (out_descr == NULL) { return (NPY_CASTING)-1; @@ -562,6 +550,14 @@ string_comparison_resolve_descriptors( PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { + PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; + PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; + PyArray_StringDTypeObject *common_descr = common_instance(descr1, descr2); + + if (common_descr == NULL) { + return (NPY_CASTING)-1; + } + Py_INCREF(given_descrs[0]); loop_descrs[0] = given_descrs[0]; Py_INCREF(given_descrs[1]); @@ -788,20 +784,9 @@ string_findlike_resolve_descriptors( { PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; + PyArray_StringDTypeObject *common_descr = common_instance(descr1, descr2); - // _eq_comparison has a short-circuit pointer comparison fast path, - // so no need to check here - int eq_res = _eq_comparison(descr1->coerce, descr2->coerce, - descr1->na_object, descr2->na_object); - - if (eq_res < 0) { - return (NPY_CASTING)-1; - } - - if (eq_res != 1) { - PyErr_SetString(PyExc_TypeError, - "Can only do binary operations with equal StringDType " - "instances."); + if (common_descr == NULL) { return (NPY_CASTING)-1; } @@ -849,20 +834,9 @@ string_startswith_endswith_resolve_descriptors( { PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; + PyArray_StringDTypeObject *common_descr = common_instance(descr1, descr2); - // _eq_comparison has a short-circuit pointer comparison fast path, so - // no need to do it here - int eq_res = _eq_comparison(descr1->coerce, descr2->coerce, - descr1->na_object, descr2->na_object); - - if (eq_res < 0) { - return (NPY_CASTING)-1; - } - - if (eq_res != 1) { - PyErr_SetString(PyExc_TypeError, - "Can only do binary operations with equal StringDType " - "instances."); + if (common_descr == NULL) { return (NPY_CASTING)-1; } @@ -1061,46 +1035,6 @@ all_strings_promoter(PyObject *NPY_UNUSED(ufunc), return 0; } -static NPY_CASTING -strip_chars_resolve_descriptors( - struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), - PyArray_Descr *const given_descrs[], - PyArray_Descr *loop_descrs[], - npy_intp *NPY_UNUSED(view_offset)) -{ - Py_INCREF(given_descrs[0]); - loop_descrs[0] = given_descrs[0]; - - // we don't actually care about the null behavior of the second argument, - // so no need to check if the first two descrs are equal like in - // binary_resolve_descriptors - - Py_INCREF(given_descrs[1]); - loop_descrs[1] = given_descrs[1]; - - PyArray_Descr *out_descr = NULL; - - if (given_descrs[2] == NULL) { - out_descr = (PyArray_Descr *)new_stringdtype_instance( - ((PyArray_StringDTypeObject *)given_descrs[0])->na_object, - ((PyArray_StringDTypeObject *)given_descrs[0])->coerce); - - if (out_descr == NULL) { - return (NPY_CASTING)-1; - } - } - else { - Py_INCREF(given_descrs[2]); - out_descr = given_descrs[2]; - } - - loop_descrs[2] = out_descr; - - return NPY_NO_CASTING; -} - - NPY_NO_EXPORT int string_lrstrip_chars_strided_loop( PyArrayMethod_Context *context, char *const data[], @@ -1309,21 +1243,10 @@ replace_resolve_descriptors(struct PyArrayMethodObject_tag *NPY_UNUSED(method), PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; PyArray_StringDTypeObject *descr3 = (PyArray_StringDTypeObject *)given_descrs[2]; - // _eq_comparison has a short-circuit pointer comparison fast path, so - // no need to do it here - int eq_res = (_eq_comparison(descr1->coerce, descr2->coerce, - descr1->na_object, descr2->na_object) && - _eq_comparison(descr1->coerce, descr3->coerce, - descr1->na_object, descr3->na_object)); - - if (eq_res < 0) { - return (NPY_CASTING)-1; - } + PyArray_StringDTypeObject *common_descr = common_instance( + common_instance(descr1, descr2), descr3); - if (eq_res != 1) { - PyErr_SetString(PyExc_TypeError, - "String replace is only supported with equal StringDType " - "instances."); + if (common_descr == NULL) { return (NPY_CASTING)-1; } @@ -1340,8 +1263,7 @@ replace_resolve_descriptors(struct PyArrayMethodObject_tag *NPY_UNUSED(method), if (given_descrs[4] == NULL) { out_descr = (PyArray_Descr *)new_stringdtype_instance( - ((PyArray_StringDTypeObject *)given_descrs[0])->na_object, - ((PyArray_StringDTypeObject *)given_descrs[0])->coerce); + common_descr->na_object, common_descr->coerce); if (out_descr == NULL) { return (NPY_CASTING)-1; @@ -1588,18 +1510,9 @@ center_ljust_rjust_resolve_descriptors( { PyArray_StringDTypeObject *input_descr = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *fill_descr = (PyArray_StringDTypeObject *)given_descrs[2]; + PyArray_StringDTypeObject *common_descr = common_instance(input_descr, fill_descr); - int eq_res = _eq_comparison(input_descr->coerce, fill_descr->coerce, - input_descr->na_object, fill_descr->na_object); - - if (eq_res < 0) { - return (NPY_CASTING)-1; - } - - if (eq_res != 1) { - PyErr_SetString(PyExc_TypeError, - "Can only do text justification operations with equal" - "StringDType instances."); + if (common_descr == NULL) { return (NPY_CASTING)-1; } @@ -1614,8 +1527,7 @@ center_ljust_rjust_resolve_descriptors( if (given_descrs[3] == NULL) { out_descr = (PyArray_Descr *)new_stringdtype_instance( - ((PyArray_StringDTypeObject *)given_descrs[1])->na_object, - ((PyArray_StringDTypeObject *)given_descrs[1])->coerce); + common_descr->na_object, common_descr->coerce); if (out_descr == NULL) { return (NPY_CASTING)-1; @@ -1888,6 +1800,7 @@ zfill_strided_loop(PyArrayMethod_Context *context, return -1; } + static NPY_CASTING string_partition_resolve_descriptors( PyArrayMethodObject *self, @@ -1901,14 +1814,23 @@ string_partition_resolve_descriptors( "currently support the 'out' keyword", self->name); return (NPY_CASTING)-1; } - for (int i=0; i<2; i++) { - Py_INCREF(given_descrs[i]); - loop_descrs[i] = given_descrs[i]; + + PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; + PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; + PyArray_StringDTypeObject *common_descr = common_instance(descr1, descr2); + + if (common_descr == NULL) { + return (NPY_CASTING)-1; } - PyArray_StringDTypeObject *adescr = (PyArray_StringDTypeObject *)given_descrs[0]; + + Py_INCREF(given_descrs[0]); + loop_descrs[0] = given_descrs[0]; + Py_INCREF(given_descrs[1]); + loop_descrs[1] = given_descrs[1]; + for (int i=2; i<5; i++) { loop_descrs[i] = (PyArray_Descr *)new_stringdtype_instance( - adescr->na_object, adescr->coerce); + common_descr->na_object, common_descr->coerce); if (loop_descrs[i] == NULL) { return (NPY_CASTING)-1; } @@ -2655,7 +2577,7 @@ init_stringdtype_ufuncs(PyObject *umath) for (int i=0; i<3; i++) { if (init_ufunc(umath, strip_chars_names[i], strip_chars_dtypes, - &strip_chars_resolve_descriptors, + &binary_resolve_descriptors, &string_lrstrip_chars_strided_loop, 2, 1, NPY_NO_CASTING, (NPY_ARRAYMETHOD_FLAGS) 0, &strip_types[i]) < 0) { diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 10de20b4b3f6..7ece12a2a3a5 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -225,7 +225,22 @@ def test_self_casts(dtype, dtype2, strings): else: arr.astype(dtype2, casting="safe") - assert_array_equal(arr[:-1], newarr[:-1]) + if hasattr(dtype, "na_object") and hasattr(dtype2, "na_object"): + na1 = dtype.na_object + na2 = dtype2.na_object + if na1 is na2: + assert_array_equal(arr[:-1], newarr[:-1]) + else: + # comparisons between arrays with distinct NA objects + # aren't allowed + if ((na1 is pd_NA or na2 is pd_NA or + (na1 != na2 and not ((na1 != na1) and (na2 != na2))))): + with pytest.raises(TypeError): + arr[:-1] == newarr[:-1] + else: + assert_array_equal(arr[:-1], newarr[:-1]) + else: + assert_array_equal(arr[:-1], newarr[:-1]) @pytest.mark.parametrize( @@ -1324,6 +1339,46 @@ def test_strip_ljust_rjust_consistency(string_array, unicode_array): ) +def test_unset_na_coercion(): + # a dtype instance with an unset na object is compatible + # with a dtype that has one set + + # this tests uses the "add" ufunc but all ufuncs that accept more + # than one string argument and produce a string should behave this way + # TODO: generalize to more ufuncs + inp = ["hello", "world"] + arr = np.array(inp, dtype=StringDType(na_object=None)) + for op_dtype in [None, StringDType(), StringDType(coerce=False), + StringDType(na_object=None)]: + if op_dtype is None: + op = "2" + else: + op = np.array("2", dtype=op_dtype) + res = arr + op + assert_array_equal(res, ["hello2", "world2"]) + + # dtype instances with distinct explicitly set NA objects are incompatible + for op_dtype in [StringDType(na_object=pd_NA), StringDType(na_object="")]: + op = np.array("2", dtype=op_dtype) + with pytest.raises(TypeError): + arr + op + + # comparisons only consider the na_object + for op_dtype in [None, StringDType(), StringDType(coerce=True), + StringDType(na_object=None)]: + if op_dtype is None: + op = inp + else: + op = np.array(inp, dtype=op_dtype) + assert_array_equal(arr, op) + + for op_dtype in [StringDType(na_object=pd_NA), + StringDType(na_object=np.nan)]: + op = np.array(inp, dtype=op_dtype) + with pytest.raises(TypeError): + arr == op + + class TestImplementation: """Check that strings are stored in the arena when possible. From b356e04665d2527d83c7efe07b42870fdbf80ba6 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 12 Apr 2024 13:56:43 -0600 Subject: [PATCH 189/980] MNT: refactor stringdtype compatibility checking out of common_instance --- .../_core/src/multiarray/stringdtype/dtype.c | 70 +++++++++++++------ .../_core/src/multiarray/stringdtype/dtype.h | 8 ++- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 59 +++++++++++----- 3 files changed, 96 insertions(+), 41 deletions(-) diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index c09d09cf4b41..5b8da60e811f 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -145,20 +145,13 @@ new_stringdtype_instance(PyObject *na_object, int coerce) } NPY_NO_EXPORT int -na_eq_cmp(PyObject *a, PyObject *b, int coerce_nulls) { +na_eq_cmp(PyObject *a, PyObject *b) { if (a == b) { // catches None and other singletons like Pandas.NA return 1; } if (a == NULL || b == NULL) { - if (coerce_nulls) { - // an object with an explictly set NA object is considered - // compatible for binary operations to one with no explicitly set NA - return 1; - } - else { - return 0; - } + return 0; } if (PyFloat_Check(a) && PyFloat_Check(b)) { // nan check catches np.nan and float('nan') @@ -189,7 +182,42 @@ _eq_comparison(int scoerce, int ocoerce, PyObject *sna, PyObject *ona) if (scoerce != ocoerce) { return 0; } - return na_eq_cmp(sna, ona, 0); + return na_eq_cmp(sna, ona); +} + +// currently this can only return 1 or -1, the latter indicating that the +// error indicator is set +NPY_NO_EXPORT int +stringdtype_compatible_na(PyObject *na1, PyObject *na2) { + if ((na1 == NULL) != (na2 == NULL)) { + return 1; + } + + int na_eq = na_eq_cmp(na1, na2); + + if (na_eq < 0) { + return -1; + } + else if (na_eq == 0) { + PyErr_Format(PyExc_TypeError, + "Cannot find a compatible null string value for " + "null strings '%R' and '%R'", na1, na2); + return -1; + } + return 1; +} + +NPY_NO_EXPORT int +stringdtype_compatible_settings(PyObject *na1, PyObject *na2, PyObject **out_na, + int coerce1, int coerce2, int *out_coerce) { + int compatible = stringdtype_compatible_na(na1, na2); + if (compatible == -1) { + return -1; + } + *out_na = (na1 ? na1 : na2); + *out_coerce = (coerce1 && coerce2); + + return 0; } /* @@ -197,21 +225,23 @@ _eq_comparison(int scoerce, int ocoerce, PyObject *sna, PyObject *ona) * with a mix of different dtypes (for example when creating an array * from a list of scalars). */ -NPY_NO_EXPORT PyArray_StringDTypeObject * +static PyArray_StringDTypeObject * common_instance(PyArray_StringDTypeObject *dtype1, PyArray_StringDTypeObject *dtype2) { - int eq = na_eq_cmp(dtype1->na_object, dtype2->na_object, 1); - - if (eq <= 0) { - PyErr_SetString( - PyExc_TypeError, - "Cannot find common instance for incompatible dtype instances"); + int out_coerce = 1; + PyObject *out_na_object = NULL; + + if (stringdtype_compatible_settings( + dtype1->na_object, dtype2->na_object, &out_na_object, + dtype1->coerce, dtype2->coerce, &out_coerce) == -1) { + PyErr_Format(PyExc_TypeError, + "Cannot find common instance for incompatible dtypes " + "'%R' and '%R'", (PyObject *)dtype1, (PyObject *)dtype2); return NULL; } return (PyArray_StringDTypeObject *)new_stringdtype_instance( - dtype1->na_object != NULL ? dtype1->na_object : dtype2->na_object, - !((dtype1->coerce == 0) || (dtype2->coerce == 0))); + out_na_object, out_coerce); } /* @@ -301,7 +331,7 @@ stringdtype_setitem(PyArray_StringDTypeObject *descr, PyObject *obj, char **data // so we do the comparison before acquiring the allocator. if (na_object != NULL) { - na_cmp = na_eq_cmp(obj, na_object, 1); + na_cmp = na_eq_cmp(obj, na_object); if (na_cmp == -1) { return -1; } diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.h b/numpy/_core/src/multiarray/stringdtype/dtype.h index 3fe196a5d358..d187dbd4e7bb 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.h +++ b/numpy/_core/src/multiarray/stringdtype/dtype.h @@ -49,8 +49,12 @@ stringdtype_finalize_descr(PyArray_Descr *dtype); NPY_NO_EXPORT int _eq_comparison(int scoerce, int ocoerce, PyObject *sna, PyObject *ona); -NPY_NO_EXPORT PyArray_StringDTypeObject * -common_instance(PyArray_StringDTypeObject *dtype1, PyArray_StringDTypeObject *dtype2); +NPY_NO_EXPORT int +stringdtype_compatible_settings(PyObject *na1, PyObject *na2, PyObject **out_na, + int coerce1, int coerce2, int *out_coerce); + +NPY_NO_EXPORT int +stringdtype_compatible_na(PyObject *na1, PyObject *na2); #ifdef __cplusplus } diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index 1d9482e50c4d..a0fa85ca219e 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -246,9 +246,12 @@ binary_resolve_descriptors(struct PyArrayMethodObject_tag *NPY_UNUSED(method), { PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; - PyArray_StringDTypeObject *common_descr = common_instance(descr1, descr2); + int out_coerce = 1; + PyObject *out_na_object = NULL; - if (common_descr == NULL) { + if (stringdtype_compatible_settings( + descr1->na_object, descr2->na_object, &out_na_object, + descr1->coerce, descr2->coerce, &out_coerce) == -1) { return (NPY_CASTING)-1; } @@ -261,7 +264,7 @@ binary_resolve_descriptors(struct PyArrayMethodObject_tag *NPY_UNUSED(method), if (given_descrs[2] == NULL) { out_descr = (PyArray_Descr *)new_stringdtype_instance( - common_descr->na_object, common_descr->coerce); + out_na_object, out_coerce); if (out_descr == NULL) { return (NPY_CASTING)-1; @@ -552,9 +555,8 @@ string_comparison_resolve_descriptors( { PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; - PyArray_StringDTypeObject *common_descr = common_instance(descr1, descr2); - if (common_descr == NULL) { + if (stringdtype_compatible_na(descr1->na_object, descr2->na_object) == -1) { return (NPY_CASTING)-1; } @@ -784,9 +786,12 @@ string_findlike_resolve_descriptors( { PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; - PyArray_StringDTypeObject *common_descr = common_instance(descr1, descr2); + int out_coerce = 1; + PyObject *out_na_object = NULL; - if (common_descr == NULL) { + if (stringdtype_compatible_settings( + descr1->na_object, descr2->na_object, &out_na_object, + descr1->coerce, descr2->coerce, &out_coerce) == -1) { return (NPY_CASTING)-1; } @@ -834,9 +839,12 @@ string_startswith_endswith_resolve_descriptors( { PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; - PyArray_StringDTypeObject *common_descr = common_instance(descr1, descr2); + int out_coerce = 1; + PyObject *out_na_object = NULL; - if (common_descr == NULL) { + if (stringdtype_compatible_settings( + descr1->na_object, descr2->na_object, &out_na_object, + descr1->coerce, descr2->coerce, &out_coerce) == -1) { return (NPY_CASTING)-1; } @@ -1242,11 +1250,18 @@ replace_resolve_descriptors(struct PyArrayMethodObject_tag *NPY_UNUSED(method), PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; PyArray_StringDTypeObject *descr3 = (PyArray_StringDTypeObject *)given_descrs[2]; + int out_coerce = 1; + PyObject *out_na_object = NULL; - PyArray_StringDTypeObject *common_descr = common_instance( - common_instance(descr1, descr2), descr3); + if (stringdtype_compatible_settings( + descr1->na_object, descr2->na_object, &out_na_object, + descr1->coerce, descr2->coerce, &out_coerce) == -1) { + return (NPY_CASTING)-1; + } - if (common_descr == NULL) { + if (stringdtype_compatible_settings( + out_na_object, descr3->na_object, &out_na_object, + out_coerce, descr3->coerce, &out_coerce) == -1) { return (NPY_CASTING)-1; } @@ -1263,7 +1278,7 @@ replace_resolve_descriptors(struct PyArrayMethodObject_tag *NPY_UNUSED(method), if (given_descrs[4] == NULL) { out_descr = (PyArray_Descr *)new_stringdtype_instance( - common_descr->na_object, common_descr->coerce); + out_na_object, out_coerce); if (out_descr == NULL) { return (NPY_CASTING)-1; @@ -1510,9 +1525,12 @@ center_ljust_rjust_resolve_descriptors( { PyArray_StringDTypeObject *input_descr = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *fill_descr = (PyArray_StringDTypeObject *)given_descrs[2]; - PyArray_StringDTypeObject *common_descr = common_instance(input_descr, fill_descr); + int out_coerce = 1; + PyObject *out_na_object = NULL; - if (common_descr == NULL) { + if (stringdtype_compatible_settings( + input_descr->na_object, fill_descr->na_object, &out_na_object, + input_descr->coerce, fill_descr->coerce, &out_coerce) == -1) { return (NPY_CASTING)-1; } @@ -1527,7 +1545,7 @@ center_ljust_rjust_resolve_descriptors( if (given_descrs[3] == NULL) { out_descr = (PyArray_Descr *)new_stringdtype_instance( - common_descr->na_object, common_descr->coerce); + out_na_object, out_coerce); if (out_descr == NULL) { return (NPY_CASTING)-1; @@ -1817,9 +1835,12 @@ string_partition_resolve_descriptors( PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; - PyArray_StringDTypeObject *common_descr = common_instance(descr1, descr2); + int out_coerce = 1; + PyObject *out_na_object = NULL; - if (common_descr == NULL) { + if (stringdtype_compatible_settings( + descr1->na_object, descr2->na_object, &out_na_object, + descr1->coerce, descr2->coerce, &out_coerce) == -1) { return (NPY_CASTING)-1; } @@ -1830,7 +1851,7 @@ string_partition_resolve_descriptors( for (int i=2; i<5; i++) { loop_descrs[i] = (PyArray_Descr *)new_stringdtype_instance( - common_descr->na_object, common_descr->coerce); + out_na_object, out_coerce); if (loop_descrs[i] == NULL) { return (NPY_CASTING)-1; } From 53a4e756a3b08b7699e2fc52bdb1f67e10f1a966 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 12 Apr 2024 14:26:36 -0600 Subject: [PATCH 190/980] MNT: refactor tortured logic in test --- numpy/_core/tests/test_stringdtype.py | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 7ece12a2a3a5..55fa4e18558f 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -228,19 +228,16 @@ def test_self_casts(dtype, dtype2, strings): if hasattr(dtype, "na_object") and hasattr(dtype2, "na_object"): na1 = dtype.na_object na2 = dtype2.na_object - if na1 is na2: - assert_array_equal(arr[:-1], newarr[:-1]) - else: - # comparisons between arrays with distinct NA objects - # aren't allowed - if ((na1 is pd_NA or na2 is pd_NA or - (na1 != na2 and not ((na1 != na1) and (na2 != na2))))): - with pytest.raises(TypeError): - arr[:-1] == newarr[:-1] - else: - assert_array_equal(arr[:-1], newarr[:-1]) - else: - assert_array_equal(arr[:-1], newarr[:-1]) + if ((na1 is not na2 and + # check for pd_NA first because bool(pd_NA) is an error + ((na1 is pd_NA or na2 is pd_NA) or + # the second check is a NaN check, spelled this way + # to avoid errors from math.isnan and np.isnan + (na1 != na2 and not (na1 != na1 and na2 != na2))))): + with pytest.raises(TypeError): + arr[:-1] == newarr[:-1] + return + assert_array_equal(arr[:-1], newarr[:-1]) @pytest.mark.parametrize( From 9ba3cd373b508dbd5b58f9261c5fbd7ea7e58f64 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 12 Apr 2024 14:38:27 -0600 Subject: [PATCH 191/980] MNT: refactor stringdtype_setitem following marten's suggestion --- numpy/_core/src/multiarray/stringdtype/dtype.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 5b8da60e811f..3bcb9120a260 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -317,24 +317,16 @@ stringdtype_setitem(PyArray_StringDTypeObject *descr, PyObject *obj, char **data { npy_packed_static_string *sdata = (npy_packed_static_string *)dataptr; - int na_cmp = 0; - // borrow reference PyObject *na_object = descr->na_object; - // Note there are two different na_object != NULL checks here. - // - // Do not refactor this! - // // We need the result of the comparison after acquiring the allocator, but // cannot use functions requiring the GIL when the allocator is acquired, // so we do the comparison before acquiring the allocator. - if (na_object != NULL) { - na_cmp = na_eq_cmp(obj, na_object); - if (na_cmp == -1) { - return -1; - } + int na_cmp = na_eq_cmp(obj, na_object); + if (na_cmp == -1) { + return -1; } npy_string_allocator *allocator = NpyString_acquire_allocator(descr); From e723d39fb39b7a7513635e0d4eb074fd5e4e3b99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Fri, 12 Apr 2024 15:42:24 +0200 Subject: [PATCH 192/980] API: Disallow 0D input arrays in nonzero --- .../upcoming_changes/26268.expired.rst | 1 + numpy/_core/src/multiarray/item_selection.c | 43 +++---------------- numpy/_core/tests/test_deprecations.py | 7 --- numpy/_core/tests/test_numeric.py | 16 +++---- tools/ci/array-api-skips.txt | 3 -- 5 files changed, 14 insertions(+), 56 deletions(-) create mode 100644 doc/release/upcoming_changes/26268.expired.rst diff --git a/doc/release/upcoming_changes/26268.expired.rst b/doc/release/upcoming_changes/26268.expired.rst new file mode 100644 index 000000000000..932fdbfae6d7 --- /dev/null +++ b/doc/release/upcoming_changes/26268.expired.rst @@ -0,0 +1 @@ +* Scalars and 0D arrays are disallowed for `numpy.nonzero` and `numpy.ndarray.nonzero`. diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index df05e4aa8aa5..3ddc6abf2251 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -2797,6 +2797,13 @@ NPY_NO_EXPORT PyObject * PyArray_Nonzero(PyArrayObject *self) { int i, ndim = PyArray_NDIM(self); + if (ndim == 0) { + PyErr_SetString(PyExc_ValueError, + "Calling nonzero on 0d arrays is not allowed. " + "Use np.atleast_1d(scalar).nonzero() instead."); + return NULL; + } + PyArrayObject *ret = NULL; PyObject *ret_tuple; npy_intp ret_dims[2]; @@ -2818,42 +2825,6 @@ PyArray_Nonzero(PyArrayObject *self) nonzero = PyDataType_GetArrFuncs(dtype)->nonzero; needs_api = PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI); - /* Special case - nonzero(zero_d) is nonzero(atleast_1d(zero_d)) */ - if (ndim == 0) { - char const* msg; - if (PyArray_ISBOOL(self)) { - msg = - "Calling nonzero on 0d arrays is deprecated, as it behaves " - "surprisingly. Use `atleast_1d(cond).nonzero()` if the old " - "behavior was intended. If the context of this warning is of " - "the form `arr[nonzero(cond)]`, just use `arr[cond]`."; - } - else { - msg = - "Calling nonzero on 0d arrays is deprecated, as it behaves " - "surprisingly. Use `atleast_1d(arr).nonzero()` if the old " - "behavior was intended."; - } - if (DEPRECATE(msg) < 0) { - return NULL; - } - - static npy_intp const zero_dim_shape[1] = {1}; - static npy_intp const zero_dim_strides[1] = {0}; - - Py_INCREF(PyArray_DESCR(self)); /* array creation steals reference */ - PyArrayObject *self_1d = (PyArrayObject *)PyArray_NewFromDescrAndBase( - Py_TYPE(self), PyArray_DESCR(self), - 1, zero_dim_shape, zero_dim_strides, PyArray_BYTES(self), - PyArray_FLAGS(self), (PyObject *)self, (PyObject *)self); - if (self_1d == NULL) { - return NULL; - } - ret_tuple = PyArray_Nonzero(self_1d); - Py_DECREF(self_1d); - return ret_tuple; - } - /* * First count the number of non-zeros in 'self'. */ diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index f0d4d533cd92..f0fc5645122a 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -283,13 +283,6 @@ def test_deprecate_unparsable_string(self, invalid_str): assert_array_equal(res, x) -class TestNonZero(_DeprecationTestCase): - # 2019-05-26, 1.17.0 - def test_zerod(self): - self.assert_deprecated(lambda: np.nonzero(np.array(0))) - self.assert_deprecated(lambda: np.nonzero(np.array(1))) - - class TestToString(_DeprecationTestCase): # 2020-03-06 1.19.0 message = re.escape("tostring() is deprecated. Use tobytes() instead.") diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 7980973086f6..6daa6bea5b9d 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -1567,16 +1567,12 @@ def test_nonzero_trivial(self): assert_equal(np.count_nonzero(np.array([1], dtype='?')), 1) assert_equal(np.nonzero(np.array([1])), ([0],)) - def test_nonzero_zerod(self): - assert_equal(np.count_nonzero(np.array(0)), 0) - assert_equal(np.count_nonzero(np.array(0, dtype='?')), 0) - with assert_warns(DeprecationWarning): - assert_equal(np.nonzero(np.array(0)), ([],)) - - assert_equal(np.count_nonzero(np.array(1)), 1) - assert_equal(np.count_nonzero(np.array(1, dtype='?')), 1) - with assert_warns(DeprecationWarning): - assert_equal(np.nonzero(np.array(1)), ([0],)) + def test_nonzero_zerodim(self): + err_msg = "Calling nonzero on 0d arrays is not allowed" + with assert_raises_regex(ValueError, err_msg): + np.nonzero(np.array(0)) + with assert_raises_regex(ValueError, err_msg): + np.array(1).nonzero() def test_nonzero_onedim(self): x = np.array([1, 0, 2, -1, 0, 0, 8]) diff --git a/tools/ci/array-api-skips.txt b/tools/ci/array-api-skips.txt index 44b9ec3b0a90..002f862241c0 100644 --- a/tools/ci/array-api-skips.txt +++ b/tools/ci/array-api-skips.txt @@ -14,9 +14,6 @@ array_api_tests/test_signatures.py::test_func_signature[reshape] array_api_tests/test_signatures.py::test_func_signature[argsort] array_api_tests/test_signatures.py::test_func_signature[sort] -# nonzero for 0D should error -array_api_tests/test_searching_functions.py::test_nonzero_zerodim_error - # TODO: check why in CI `inspect.signature(np.vecdot)` returns (*arg, **kwarg) # instead of raising ValueError. mtsokol: couldn't reproduce locally array_api_tests/test_signatures.py::test_func_signature[vecdot] From 38bf07f3f6584b02b6df8ae3ab3ccc4be075d9ed Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Mon, 15 Apr 2024 17:35:51 +0200 Subject: [PATCH 193/980] BUG: Workaround for Intel Compiler mask conversion bug MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Encountered a specific bug with Intel Compiler when `_cvtmask64_u64` comparisons against `-1` trigger erroneous optimizations. This bug affects equality (`==`) and inequality (`!=`) comparisons crucial for operations like `np.logical_or`. The issue originates from the compiler’s optimizer, which mistakenly duplicates the last vector comparison instruction (targeting `zmm`) onto `ymm`. It then incorrectly performs a bitwise XOR between the masks from the duplicate and original instructions, leading to wrong results. This parch implements a workaround to bypass this behavior. --- .../_core/src/common/simd/avx512/conversion.h | 34 ++++++++++++++++--- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/common/simd/avx512/conversion.h b/numpy/_core/src/common/simd/avx512/conversion.h index 474aee446b6a..3b29b6729f20 100644 --- a/numpy/_core/src/common/simd/avx512/conversion.h +++ b/numpy/_core/src/common/simd/avx512/conversion.h @@ -131,20 +131,44 @@ npyv_pack_b8_b64(npyv_b64 a, npyv_b64 b, npyv_b64 c, npyv_b64 d, __mmask16 gh = _mm512_kunpackb((__mmask16)h, (__mmask16)g); return npyv_pack_b8_b32(ab, cd, ef, gh); } - +/* + * A compiler bug workaround on Intel Compiler Classic. + * The bug manifests specifically when the + * scalar result of _cvtmask64_u64 is compared against the constant -1. This + * comparison uniquely triggers a bug under conditions of equality (==) or + * inequality (!=) checks, which are typically used in reduction operations like + * np.logical_or. + * + * The underlying issue arises from the compiler's optimizer. When the last + * vector comparison instruction operates on zmm, the optimizer erroneously + * emits a duplicate of this instruction but on the lower half register ymm. It + * then performs a bitwise XOR operation between the mask produced by this + * duplicated instruction and the mask from the original comparison instruction. + * This erroneous behavior leads to incorrect results. + * + * See https://github.com/numpy/numpy/issues/26197#issuecomment-2056750975 + */ +#ifdef __INTEL_COMPILER +#define NPYV__VOLATILE_CVTMASK64 volatile +#else +#define NPYV__VOLATILE_CVTMASK64 +#endif // convert boolean vectors to integer bitfield -NPY_FINLINE npy_uint64 npyv_tobits_b8(npyv_b8 a) -{ +NPY_FINLINE npy_uint64 npyv_tobits_b8(npyv_b8 a) { #ifdef NPY_HAVE_AVX512BW_MASK - return (npy_uint64)_cvtmask64_u64(a); + npy_uint64 NPYV__VOLATILE_CVTMASK64 t = (npy_uint64)_cvtmask64_u64(a); + return t; #elif defined(NPY_HAVE_AVX512BW) - return (npy_uint64)a; + npy_uint64 NPYV__VOLATILE_CVTMASK64 t = (npy_uint64)a; + return t; #else int mask_lo = _mm256_movemask_epi8(npyv512_lower_si256(a)); int mask_hi = _mm256_movemask_epi8(npyv512_higher_si256(a)); return (unsigned)mask_lo | ((npy_uint64)(unsigned)mask_hi << 32); #endif } +#undef NPYV__VOLATILE_CVTMASK64 + NPY_FINLINE npy_uint64 npyv_tobits_b16(npyv_b16 a) { #ifdef NPY_HAVE_AVX512BW_MASK From a39ed175755f026ebbb233c202a6d7a70c883654 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 15 Apr 2024 13:17:22 -0600 Subject: [PATCH 194/980] MAINT: respond to marten's comments --- .../_core/src/multiarray/stringdtype/dtype.c | 55 +++++++------------ .../_core/src/multiarray/stringdtype/dtype.h | 6 +- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 47 ++++++---------- 3 files changed, 39 insertions(+), 69 deletions(-) diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 3bcb9120a260..d4fc068093ae 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -144,7 +144,7 @@ new_stringdtype_instance(PyObject *na_object, int coerce) return NULL; } -NPY_NO_EXPORT int +static int na_eq_cmp(PyObject *a, PyObject *b) { if (a == b) { // catches None and other singletons like Pandas.NA @@ -185,41 +185,30 @@ _eq_comparison(int scoerce, int ocoerce, PyObject *sna, PyObject *ona) return na_eq_cmp(sna, ona); } -// currently this can only return 1 or -1, the latter indicating that the -// error indicator is set +// Currently this can only return 1 or -1, the latter indicating that the +// error indicator is set. Pass in out_na if you want to figure out which +// na is valid. NPY_NO_EXPORT int -stringdtype_compatible_na(PyObject *na1, PyObject *na2) { - if ((na1 == NULL) != (na2 == NULL)) { - return 1; - } - - int na_eq = na_eq_cmp(na1, na2); +stringdtype_compatible_na(PyObject *na1, PyObject *na2, PyObject **out_na) { + if ((na1 != NULL) && (na2 != NULL)) { + int na_eq = na_eq_cmp(na1, na2); - if (na_eq < 0) { - return -1; + if (na_eq < 0) { + return -1; + } + else if (na_eq == 0) { + PyErr_Format(PyExc_TypeError, + "Cannot find a compatible null string value for " + "null strings '%R' and '%R'", na1, na2); + return -1; + } } - else if (na_eq == 0) { - PyErr_Format(PyExc_TypeError, - "Cannot find a compatible null string value for " - "null strings '%R' and '%R'", na1, na2); - return -1; + if (out_na != NULL) { + *out_na = na1 ? na1 : na2; } return 1; } -NPY_NO_EXPORT int -stringdtype_compatible_settings(PyObject *na1, PyObject *na2, PyObject **out_na, - int coerce1, int coerce2, int *out_coerce) { - int compatible = stringdtype_compatible_na(na1, na2); - if (compatible == -1) { - return -1; - } - *out_na = (na1 ? na1 : na2); - *out_coerce = (coerce1 && coerce2); - - return 0; -} - /* * This is used to determine the correct dtype to return when dealing * with a mix of different dtypes (for example when creating an array @@ -228,12 +217,10 @@ stringdtype_compatible_settings(PyObject *na1, PyObject *na2, PyObject **out_na, static PyArray_StringDTypeObject * common_instance(PyArray_StringDTypeObject *dtype1, PyArray_StringDTypeObject *dtype2) { - int out_coerce = 1; PyObject *out_na_object = NULL; - if (stringdtype_compatible_settings( - dtype1->na_object, dtype2->na_object, &out_na_object, - dtype1->coerce, dtype2->coerce, &out_coerce) == -1) { + if (stringdtype_compatible_na( + dtype1->na_object, dtype2->na_object, &out_na_object) == -1) { PyErr_Format(PyExc_TypeError, "Cannot find common instance for incompatible dtypes " "'%R' and '%R'", (PyObject *)dtype1, (PyObject *)dtype2); @@ -241,7 +228,7 @@ common_instance(PyArray_StringDTypeObject *dtype1, PyArray_StringDTypeObject *dt } return (PyArray_StringDTypeObject *)new_stringdtype_instance( - out_na_object, out_coerce); + out_na_object, dtype1->coerce && dtype1->coerce); } /* diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.h b/numpy/_core/src/multiarray/stringdtype/dtype.h index d187dbd4e7bb..2c2719602c32 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.h +++ b/numpy/_core/src/multiarray/stringdtype/dtype.h @@ -50,11 +50,7 @@ NPY_NO_EXPORT int _eq_comparison(int scoerce, int ocoerce, PyObject *sna, PyObject *ona); NPY_NO_EXPORT int -stringdtype_compatible_settings(PyObject *na1, PyObject *na2, PyObject **out_na, - int coerce1, int coerce2, int *out_coerce); - -NPY_NO_EXPORT int -stringdtype_compatible_na(PyObject *na1, PyObject *na2); +stringdtype_compatible_na(PyObject *na1, PyObject *na2, PyObject **out_na); #ifdef __cplusplus } diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index a0fa85ca219e..6153d8cd5a93 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -246,12 +246,11 @@ binary_resolve_descriptors(struct PyArrayMethodObject_tag *NPY_UNUSED(method), { PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; - int out_coerce = 1; + int out_coerce = descr1->coerce && descr1->coerce; PyObject *out_na_object = NULL; - if (stringdtype_compatible_settings( - descr1->na_object, descr2->na_object, &out_na_object, - descr1->coerce, descr2->coerce, &out_coerce) == -1) { + if (stringdtype_compatible_na( + descr1->na_object, descr2->na_object, &out_na_object) == -1) { return (NPY_CASTING)-1; } @@ -556,7 +555,7 @@ string_comparison_resolve_descriptors( PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; - if (stringdtype_compatible_na(descr1->na_object, descr2->na_object) == -1) { + if (stringdtype_compatible_na(descr1->na_object, descr2->na_object, NULL) == -1) { return (NPY_CASTING)-1; } @@ -786,12 +785,8 @@ string_findlike_resolve_descriptors( { PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; - int out_coerce = 1; - PyObject *out_na_object = NULL; - if (stringdtype_compatible_settings( - descr1->na_object, descr2->na_object, &out_na_object, - descr1->coerce, descr2->coerce, &out_coerce) == -1) { + if (stringdtype_compatible_na(descr1->na_object, descr2->na_object, NULL) == -1) { return (NPY_CASTING)-1; } @@ -839,12 +834,8 @@ string_startswith_endswith_resolve_descriptors( { PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; - int out_coerce = 1; - PyObject *out_na_object = NULL; - if (stringdtype_compatible_settings( - descr1->na_object, descr2->na_object, &out_na_object, - descr1->coerce, descr2->coerce, &out_coerce) == -1) { + if (stringdtype_compatible_na(descr1->na_object, descr2->na_object, NULL) == -1) { return (NPY_CASTING)-1; } @@ -1250,18 +1241,16 @@ replace_resolve_descriptors(struct PyArrayMethodObject_tag *NPY_UNUSED(method), PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; PyArray_StringDTypeObject *descr3 = (PyArray_StringDTypeObject *)given_descrs[2]; - int out_coerce = 1; + int out_coerce = descr1->coerce && descr2->coerce && descr3->coerce; PyObject *out_na_object = NULL; - if (stringdtype_compatible_settings( - descr1->na_object, descr2->na_object, &out_na_object, - descr1->coerce, descr2->coerce, &out_coerce) == -1) { + if (stringdtype_compatible_na( + descr1->na_object, descr2->na_object, &out_na_object) == -1) { return (NPY_CASTING)-1; } - if (stringdtype_compatible_settings( - out_na_object, descr3->na_object, &out_na_object, - out_coerce, descr3->coerce, &out_coerce) == -1) { + if (stringdtype_compatible_na( + out_na_object, descr3->na_object, &out_na_object) == -1) { return (NPY_CASTING)-1; } @@ -1525,12 +1514,11 @@ center_ljust_rjust_resolve_descriptors( { PyArray_StringDTypeObject *input_descr = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *fill_descr = (PyArray_StringDTypeObject *)given_descrs[2]; - int out_coerce = 1; + int out_coerce = input_descr->coerce && fill_descr->coerce; PyObject *out_na_object = NULL; - if (stringdtype_compatible_settings( - input_descr->na_object, fill_descr->na_object, &out_na_object, - input_descr->coerce, fill_descr->coerce, &out_coerce) == -1) { + if (stringdtype_compatible_na( + input_descr->na_object, fill_descr->na_object, &out_na_object) == -1) { return (NPY_CASTING)-1; } @@ -1835,12 +1823,11 @@ string_partition_resolve_descriptors( PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; - int out_coerce = 1; + int out_coerce = descr1->coerce && descr2->coerce; PyObject *out_na_object = NULL; - if (stringdtype_compatible_settings( - descr1->na_object, descr2->na_object, &out_na_object, - descr1->coerce, descr2->coerce, &out_coerce) == -1) { + if (stringdtype_compatible_na( + descr1->na_object, descr2->na_object, &out_na_object) == -1) { return (NPY_CASTING)-1; } From 70b0fc498bad6eeee59c4ab975157fa5e4f85de5 Mon Sep 17 00:00:00 2001 From: Christopher Titchen <109701765+christopher-titchen@users.noreply.github.com> Date: Thu, 28 Mar 2024 13:39:56 +0000 Subject: [PATCH 195/980] ENH: :zap: Cast NOT mask output of ma.extras._covhelper to float. --- numpy/ma/extras.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 743f4bead446..770f75bc45da 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1569,7 +1569,14 @@ def _covhelper(x, y=None, rowvar=True, allow_masked=True): tup = (None, slice(None)) # if y is None: - xnotmask = np.logical_not(xmask).astype(int) + # Check if we can guarantee that the integers in the (N - ddof) normalisation + # can be accurately represented with single-precision before computing the dot + # product. + if x.shape[0] > 2 ** 24 or x.shape[1] > 2 ** 24: + xnm_dtype = np.float64 + else: + xnm_dtype = np.float32 + xnotmask = np.logical_not(xmask).astype(xnm_dtype) else: y = array(y, copy=False, ndmin=2, dtype=float) ymask = ma.getmaskarray(y) @@ -1584,7 +1591,16 @@ def _covhelper(x, y=None, rowvar=True, allow_masked=True): x._sharedmask = False y._sharedmask = False x = ma.concatenate((x, y), axis) - xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int) + # Check if we can guarantee that the integers in the (N - ddof) normalisation + # can be accurately represented with single-precision before computing the dot + # product. + if x.shape[0] > 2 ** 24 or x.shape[1] > 2 ** 24: + xnm_dtype = np.float64 + else: + xnm_dtype = np.float32 + xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype( + xnm_dtype + ) x -= x.mean(axis=rowvar)[tup] return (x, xnotmask, rowvar) From 5374ed30b94ae4737b42f8b3a169ea41788596e2 Mon Sep 17 00:00:00 2001 From: Christopher Titchen <109701765+christopher-titchen@users.noreply.github.com> Date: Thu, 28 Mar 2024 13:44:51 +0000 Subject: [PATCH 196/980] ENH: :zap: Decrease wall time of ma.cov function. --- numpy/ma/extras.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 770f75bc45da..f6fd96f2cb22 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1687,11 +1687,17 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) if not rowvar: - fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof - result = (dot(x.T, x.conj(), strict=False) / fact).squeeze() + fact = np.dot(xnotmask.T, xnotmask) + mask = np.equal(fact, 0) + fact -= ddof + data = np.dot(filled(x.T, 0), filled(x.conj(), 0)) / fact + result = ma.masked_array(data, mask=mask).squeeze() else: - fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof - result = (dot(x, x.T.conj(), strict=False) / fact).squeeze() + fact = np.dot(xnotmask, xnotmask.T) + mask = np.equal(fact, 0) + fact -= ddof + data = np.dot(filled(x, 0), filled(x.T.conj(), 0)) / fact + result = ma.masked_array(data, mask=mask).squeeze() return result From 818279d931eec94809ea0b0b02c7a96a6b172ec2 Mon Sep 17 00:00:00 2001 From: Christopher Titchen <109701765+christopher-titchen@users.noreply.github.com> Date: Thu, 28 Mar 2024 14:57:37 +0000 Subject: [PATCH 197/980] MAINT: :recycle: Change instantiation of ma.cov result. --- numpy/ma/extras.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index f6fd96f2cb22..fd774626f531 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1691,13 +1691,13 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): mask = np.equal(fact, 0) fact -= ddof data = np.dot(filled(x.T, 0), filled(x.conj(), 0)) / fact - result = ma.masked_array(data, mask=mask).squeeze() + result = ma.array(data, mask=mask).squeeze() else: fact = np.dot(xnotmask, xnotmask.T) mask = np.equal(fact, 0) fact -= ddof data = np.dot(filled(x, 0), filled(x.T.conj(), 0)) / fact - result = ma.masked_array(data, mask=mask).squeeze() + result = ma.array(data, mask=mask).squeeze() return result From e2b423f771fd23501454970223f8e69419e4d8af Mon Sep 17 00:00:00 2001 From: Christopher Titchen <109701765+christopher-titchen@users.noreply.github.com> Date: Thu, 28 Mar 2024 15:08:08 +0000 Subject: [PATCH 198/980] ENH: :zap: Decrease wall time of ma.corrcoef function. --- numpy/ma/extras.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index fd774626f531..38e193b9df61 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1770,11 +1770,15 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) # Compute the covariance matrix if not rowvar: - fact = np.dot(xnotmask.T, xnotmask) * 1. - c = (dot(x.T, x.conj(), strict=False) / fact).squeeze() + fact = np.dot(xnotmask.T, xnotmask) + mask = np.equal(fact, 0) + data = np.dot(filled(x.T, 0), filled(x.conj(), 0)) / fact + c = ma.array(data, mask=mask).squeeze() else: - fact = np.dot(xnotmask, xnotmask.T) * 1. - c = (dot(x, x.T.conj(), strict=False) / fact).squeeze() + fact = np.dot(xnotmask, xnotmask.T) + mask = np.equal(fact, 0) + data = np.dot(filled(x, 0), filled(x.T.conj(), 0)) / fact + c = ma.array(data, mask=mask).squeeze() # Check whether we have a scalar try: diag = ma.diagonal(c) From 5b68f23ddb6db8a50734a01a534f0be0b6ab2075 Mon Sep 17 00:00:00 2001 From: Christopher Titchen <109701765+christopher-titchen@users.noreply.github.com> Date: Thu, 28 Mar 2024 15:28:13 +0000 Subject: [PATCH 199/980] MAINT: :construction: Change instantiations in ma.extras for masks. --- numpy/ma/extras.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 38e193b9df61..e3f2bb79bd4e 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1576,7 +1576,7 @@ def _covhelper(x, y=None, rowvar=True, allow_masked=True): xnm_dtype = np.float64 else: xnm_dtype = np.float32 - xnotmask = np.logical_not(xmask).astype(xnm_dtype) + xnotmask = np.logical_not(xmask, dtype=xnm_dtype) else: y = array(y, copy=False, ndmin=2, dtype=float) ymask = ma.getmaskarray(y) @@ -1598,9 +1598,7 @@ def _covhelper(x, y=None, rowvar=True, allow_masked=True): xnm_dtype = np.float64 else: xnm_dtype = np.float32 - xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype( - xnm_dtype - ) + xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis), dtype=xnm_dtype) x -= x.mean(axis=rowvar)[tup] return (x, xnotmask, rowvar) @@ -1688,13 +1686,13 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) if not rowvar: fact = np.dot(xnotmask.T, xnotmask) - mask = np.equal(fact, 0) + mask = np.equal(fact, 0, dtype=bool) fact -= ddof data = np.dot(filled(x.T, 0), filled(x.conj(), 0)) / fact result = ma.array(data, mask=mask).squeeze() else: fact = np.dot(xnotmask, xnotmask.T) - mask = np.equal(fact, 0) + mask = np.equal(fact, 0, dtype=bool) fact -= ddof data = np.dot(filled(x, 0), filled(x.T.conj(), 0)) / fact result = ma.array(data, mask=mask).squeeze() @@ -1771,12 +1769,12 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, # Compute the covariance matrix if not rowvar: fact = np.dot(xnotmask.T, xnotmask) - mask = np.equal(fact, 0) + mask = np.equal(fact, 0, dtype=bool) data = np.dot(filled(x.T, 0), filled(x.conj(), 0)) / fact c = ma.array(data, mask=mask).squeeze() else: fact = np.dot(xnotmask, xnotmask.T) - mask = np.equal(fact, 0) + mask = np.equal(fact, 0, dtype=bool) data = np.dot(filled(x, 0), filled(x.T.conj(), 0)) / fact c = ma.array(data, mask=mask).squeeze() # Check whether we have a scalar From 529ec1056996956e682c874bbf401d239b265f74 Mon Sep 17 00:00:00 2001 From: Christopher Titchen <109701765+christopher-titchen@users.noreply.github.com> Date: Thu, 28 Mar 2024 16:29:36 +0000 Subject: [PATCH 200/980] REV: :rewind: Revert changes in ma.extras._covhelper for casting. --- numpy/ma/extras.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index e3f2bb79bd4e..e33376a3bc09 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1576,7 +1576,7 @@ def _covhelper(x, y=None, rowvar=True, allow_masked=True): xnm_dtype = np.float64 else: xnm_dtype = np.float32 - xnotmask = np.logical_not(xmask, dtype=xnm_dtype) + xnotmask = np.logical_not(xmask).astype(xnm_dtype) else: y = array(y, copy=False, ndmin=2, dtype=float) ymask = ma.getmaskarray(y) @@ -1598,7 +1598,9 @@ def _covhelper(x, y=None, rowvar=True, allow_masked=True): xnm_dtype = np.float64 else: xnm_dtype = np.float32 - xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis), dtype=xnm_dtype) + xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype( + xnm_dtype + ) x -= x.mean(axis=rowvar)[tup] return (x, xnotmask, rowvar) From 91a5e73112a10ee6c766b08df6f735cc5e2df583 Mon Sep 17 00:00:00 2001 From: Christopher Titchen <109701765+christopher-titchen@users.noreply.github.com> Date: Thu, 28 Mar 2024 17:19:10 +0000 Subject: [PATCH 201/980] TST: :white_check_mark: Test ma.extras._covhelper changes. --- numpy/ma/tests/test_extras.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index ad6bdf38f45c..4c5c9e1343f5 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -29,7 +29,7 @@ ediff1d, apply_over_axes, apply_along_axis, compress_nd, compress_rowcols, mask_rowcols, clump_masked, clump_unmasked, flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges, masked_all, masked_all_like, isin, - diagflat, ndenumerate, stack, vstack + diagflat, ndenumerate, stack, vstack, _covhelper ) @@ -1287,6 +1287,22 @@ class TestCov: def setup_method(self): self.data = array(np.random.rand(12)) + def test_covhelper(self): + x = self.data + # Test not mask output type is a float. + assert_(_covhelper(x, rowvar=True)[1].dtype, np.float32) + assert_(_covhelper(x, y=x, rowvar=False)[1].dtype, np.float32) + # Test not mask output is equal after casting to float. + mask = x > 0.5 + assert_array_equal( + _covhelper(np.ma.masked_array(x, mask), rowvar=True)[1].astype(bool), + ~mask.reshape(1, -1), + ) + assert_array_equal( + _covhelper(np.ma.masked_array(x, mask), y=x, rowvar=False)[1].astype(bool), + np.vstack((~mask, ~mask)), + ) + def test_1d_without_missing(self): # Test cov on 1D variable w/o missing values x = self.data From 72859f525c22fb47c46e49d8f28d1a116aa375e1 Mon Sep 17 00:00:00 2001 From: Christopher Titchen <109701765+christopher-titchen@users.noreply.github.com> Date: Mon, 8 Apr 2024 21:45:13 +0100 Subject: [PATCH 202/980] ENH: :zap: :rewind: Decrease wall time of ma.corrcoef function further. --- numpy/ma/extras.py | 58 ++++++++++++---------------------------------- 1 file changed, 15 insertions(+), 43 deletions(-) diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index e33376a3bc09..be956a3232f3 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1687,16 +1687,16 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) if not rowvar: - fact = np.dot(xnotmask.T, xnotmask) - mask = np.equal(fact, 0, dtype=bool) - fact -= ddof - data = np.dot(filled(x.T, 0), filled(x.conj(), 0)) / fact + fact = np.dot(xnotmask.T, xnotmask) - ddof + mask = np.less_equal(fact, 0, dtype=bool) + with np.errstate(divide="ignore", invalid="ignore"): + data = np.dot(filled(x.T, 0), filled(x.conj(), 0)) / fact result = ma.array(data, mask=mask).squeeze() else: - fact = np.dot(xnotmask, xnotmask.T) - mask = np.equal(fact, 0, dtype=bool) - fact -= ddof - data = np.dot(filled(x, 0), filled(x.T.conj(), 0)) / fact + fact = np.dot(xnotmask, xnotmask.T) - ddof + mask = np.less_equal(fact, 0, dtype=bool) + with np.errstate(divide="ignore", invalid="ignore"): + data = np.dot(filled(x, 0), filled(x.T.conj(), 0)) / fact result = ma.array(data, mask=mask).squeeze() return result @@ -1766,43 +1766,15 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, if bias is not np._NoValue or ddof is not np._NoValue: # 2015-03-15, 1.10 warnings.warn(msg, DeprecationWarning, stacklevel=2) - # Get the data - (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) - # Compute the covariance matrix - if not rowvar: - fact = np.dot(xnotmask.T, xnotmask) - mask = np.equal(fact, 0, dtype=bool) - data = np.dot(filled(x.T, 0), filled(x.conj(), 0)) / fact - c = ma.array(data, mask=mask).squeeze() - else: - fact = np.dot(xnotmask, xnotmask.T) - mask = np.equal(fact, 0, dtype=bool) - data = np.dot(filled(x, 0), filled(x.T.conj(), 0)) / fact - c = ma.array(data, mask=mask).squeeze() - # Check whether we have a scalar + # Estimate the covariance matrix. + corr = cov(x, y, rowvar, allow_masked=allow_masked) + # The non-masked version returns a masked value for a scalar. try: - diag = ma.diagonal(c) + std = ma.sqrt(ma.diagonal(corr)) except ValueError: - return 1 - # - if xnotmask.all(): - _denom = ma.sqrt(ma.multiply.outer(diag, diag)) - else: - _denom = diagflat(diag) - _denom._sharedmask = False # We know return is always a copy - n = x.shape[1 - rowvar] - if rowvar: - for i in range(n - 1): - for j in range(i + 1, n): - _x = mask_cols(vstack((x[i], x[j]))).var(axis=1) - _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) - else: - for i in range(n - 1): - for j in range(i + 1, n): - _x = mask_cols( - vstack((x[:, i], x[:, j]))).var(axis=1) - _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) - return c / _denom + return ma.MaskedConstant() + corr /= ma.multiply.outer(std, std) + return corr #####-------------------------------------------------------------------------- #---- --- Concatenation helpers --- From 8ce170f6fc4a7324c824adcb25869d0e5b6ac437 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 15 Apr 2024 13:39:14 -0600 Subject: [PATCH 203/980] TST: run the smoke tests on more python versions --- .github/workflows/linux.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 545ff978a83c..fc89608358ac 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -53,6 +53,9 @@ jobs: runs-on: ubuntu-latest env: MESON_ARGS: "-Dallow-noblas=true -Dcpu-baseline=none -Dcpu-dispatch=none" + strategy: + matrix: + version: ["3.10", "3.11", "3.12", "3.13-dev"] steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: @@ -60,7 +63,7 @@ jobs: fetch-tags: true - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: - python-version: '3.10' + python-version: ${{ matrix.version }} - uses: ./.github/meson_actions pypy: From 019ae0206eb40c80720da553408c637b1e0ff7e2 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 15 Apr 2024 14:18:40 -0600 Subject: [PATCH 204/980] MNT: respond to minor comments from marten --- numpy/_core/src/multiarray/stringdtype/dtype.c | 4 ++-- numpy/_core/tests/test_stringdtype.py | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index d4fc068093ae..9ba48e26bac4 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -185,7 +185,7 @@ _eq_comparison(int scoerce, int ocoerce, PyObject *sna, PyObject *ona) return na_eq_cmp(sna, ona); } -// Currently this can only return 1 or -1, the latter indicating that the +// Currently this can only return 0 or -1, the latter indicating that the // error indicator is set. Pass in out_na if you want to figure out which // na is valid. NPY_NO_EXPORT int @@ -206,7 +206,7 @@ stringdtype_compatible_na(PyObject *na1, PyObject *na2, PyObject **out_na) { if (out_na != NULL) { *out_na = na1 ? na1 : na2; } - return 1; + return 0; } /* diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 55fa4e18558f..3d5ad7737081 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1340,8 +1340,9 @@ def test_unset_na_coercion(): # a dtype instance with an unset na object is compatible # with a dtype that has one set - # this tests uses the "add" ufunc but all ufuncs that accept more - # than one string argument and produce a string should behave this way + # this test uses the "add" and "equal" ufunc but all ufuncs that + # accept more than one string argument and produce a string should + # behave this way # TODO: generalize to more ufuncs inp = ["hello", "world"] arr = np.array(inp, dtype=StringDType(na_object=None)) From a23e26647de9e66ce248e024c694ad3b46ebd4c6 Mon Sep 17 00:00:00 2001 From: Christopher Titchen <109701765+christopher-titchen@users.noreply.github.com> Date: Tue, 16 Apr 2024 00:12:11 +0100 Subject: [PATCH 205/980] MAINT: :rotating_light: Fix linter warnings. --- numpy/ma/extras.py | 12 ++++++------ numpy/ma/tests/test_extras.py | 8 ++++++-- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index be956a3232f3..f6f036e4a8ac 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1569,9 +1569,9 @@ def _covhelper(x, y=None, rowvar=True, allow_masked=True): tup = (None, slice(None)) # if y is None: - # Check if we can guarantee that the integers in the (N - ddof) normalisation - # can be accurately represented with single-precision before computing the dot - # product. + # Check if we can guarantee that the integers in the (N - ddof) + # normalisation can be accurately represented with single-precision + # before computing the dot product. if x.shape[0] > 2 ** 24 or x.shape[1] > 2 ** 24: xnm_dtype = np.float64 else: @@ -1591,9 +1591,9 @@ def _covhelper(x, y=None, rowvar=True, allow_masked=True): x._sharedmask = False y._sharedmask = False x = ma.concatenate((x, y), axis) - # Check if we can guarantee that the integers in the (N - ddof) normalisation - # can be accurately represented with single-precision before computing the dot - # product. + # Check if we can guarantee that the integers in the (N - ddof) + # normalisation can be accurately represented with single-precision + # before computing the dot product. if x.shape[0] > 2 ** 24 or x.shape[1] > 2 ** 24: xnm_dtype = np.float64 else: diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index 4c5c9e1343f5..eb7821a74e3d 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -1295,11 +1295,15 @@ def test_covhelper(self): # Test not mask output is equal after casting to float. mask = x > 0.5 assert_array_equal( - _covhelper(np.ma.masked_array(x, mask), rowvar=True)[1].astype(bool), + _covhelper( + np.ma.masked_array(x, mask), rowvar=True + )[1].astype(bool), ~mask.reshape(1, -1), ) assert_array_equal( - _covhelper(np.ma.masked_array(x, mask), y=x, rowvar=False)[1].astype(bool), + _covhelper( + np.ma.masked_array(x, mask), y=x, rowvar=False + )[1].astype(bool), np.vstack((~mask, ~mask)), ) From 84f283a5ab2564a50c4e7a295d36280fc579930e Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 16 Apr 2024 06:59:39 +0200 Subject: [PATCH 206/980] BLD: ensure libnpymath and highway static libs use hidden visibility The effect this has is that symbols don't get re-exported as public by Python extension modules that link with these shared libraries. E.g., building on macOS arm64 with Clang changes the set of exported symbols for `_multiarray_umath` from: ``` % dy2ld_info -exports build/numpy/_core/_multiarray_umath.cpython-39-darwin.so build/numpy/_core/_multiarray_umath.cpython-39-darwin.so [arm64]: -exports: offset symbol 0x00112BA4 _PyInit__multiarray_umath 0x001C955C _npy_spacingf 0x001C95F8 _npy_spacing ... ``` to: ``` build/numpy/_core/_multiarray_umath.cpython-311-darwin.so [arm64]: -exports: offset symbol 0x0010B8F8 _PyInit__multiarray_umath ``` This works for all compilers that support GNU-style `__attribute__((visibility("hidden"))`, which both GCC and Clang do. Note that the `libnpyrandom` static library is left alone here. Trying to change visibility there breaks a test for CFFI, because that test is accessing private symbols. This is clearly wrong, but explicitly documented at https://numpy.org/devdocs/reference/random/extending.html#cffi. So leaving that alone here. `libnpyrandom` isn't used by SciPy anymore and may well have zero users left, so it's not critical. --- numpy/_core/meson.build | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index f4ac5cd421c0..f9d7434353fe 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -112,7 +112,8 @@ if use_highway ], cpp_args: '-DTOOLCHAIN_MISS_ASM_HWCAP_H', include_directories: ['src/highway'], - install: false + install: false, + gnu_symbol_visibility: 'hidden', ) else highway_lib = [] @@ -561,6 +562,7 @@ npymath_lib = static_library('npymath', install_dir: np_dir / '_core/lib', name_prefix: name_prefix_staticlib, name_suffix: name_suffix_staticlib, + gnu_symbol_visibility: 'hidden', ) dir_separator = '/' From eb3db92e1e2636f045d31d4af5d74f99d1d39533 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Tue, 16 Apr 2024 10:25:17 +0200 Subject: [PATCH 207/980] Add boolean scalar error message --- numpy/_core/src/multiarray/item_selection.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 3ddc6abf2251..656688bda2fc 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -2798,9 +2798,19 @@ PyArray_Nonzero(PyArrayObject *self) { int i, ndim = PyArray_NDIM(self); if (ndim == 0) { - PyErr_SetString(PyExc_ValueError, - "Calling nonzero on 0d arrays is not allowed. " - "Use np.atleast_1d(scalar).nonzero() instead."); + char const* msg; + if (PyArray_ISBOOL(self)) { + msg = + "Calling nonzero on 0d arrays is not allowed. " + "Use np.atleast_1d(scalar).nonzero() instead. " + "If the context of this error is of the form " + "`arr[nonzero(cond)]`, just use `arr[cond]`."; + } else { + msg = + "Calling nonzero on 0d arrays is not allowed. " + "Use np.atleast_1d(scalar).nonzero() instead."; + } + PyErr_SetString(PyExc_ValueError, msg); return NULL; } From d567754fae4fb603b4e33b491d84fea6659ba31c Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Tue, 16 Apr 2024 11:10:15 +0100 Subject: [PATCH 208/980] BUG: Ensure seed sequences are restored through pickling (#26260) Explicity store and restore seed sequence closes #26234 --- * BUG: Ensure seed sequences are restored through pickling Explicity store and restore seed sequence closes #26234 * CLN: Simplify refactor Make more use of set and getstate to avoid changes in the pickling functions * BUG: Correct behavior for legacy pickles Add test for legacy pickles Include pickles for tests * MAINT: Correct types for pickle related functions * REF: Switch from string to type * REF: Swtich to returning bit generators Explicitly return bit generator rather than ctor --- numpy/random/_generator.pyi | 9 ++-- numpy/random/_generator.pyx | 16 +++--- numpy/random/_pickle.py | 33 +++++++----- numpy/random/bit_generator.pyi | 12 +++-- numpy/random/bit_generator.pyx | 21 ++++++-- numpy/random/meson.build | 3 ++ numpy/random/mtrand.pyi | 2 +- numpy/random/mtrand.pyx | 9 ++-- .../tests/data/generator_pcg64_np121.pkl.gz | Bin 0 -> 203 bytes .../tests/data/generator_pcg64_np126.pkl.gz | Bin 0 -> 208 bytes numpy/random/tests/data/sfc64_np126.pkl.gz | Bin 0 -> 290 bytes numpy/random/tests/test_direct.py | 47 +++++++++++++++- numpy/random/tests/test_generator_mt19937.py | 51 ++++++++++++++++-- numpy/random/tests/test_randomstate.py | 4 +- numpy/typing/tests/data/pass/random.py | 4 +- numpy/typing/tests/data/reveal/random.pyi | 4 +- 16 files changed, 167 insertions(+), 48 deletions(-) create mode 100644 numpy/random/tests/data/generator_pcg64_np121.pkl.gz create mode 100644 numpy/random/tests/data/generator_pcg64_np126.pkl.gz create mode 100644 numpy/random/tests/data/sfc64_np126.pkl.gz diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index e6a02b5ad147..16a0e5e0ff8d 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -68,9 +68,12 @@ class Generator: def __init__(self, bit_generator: BitGenerator) -> None: ... def __repr__(self) -> str: ... def __str__(self) -> str: ... - def __getstate__(self) -> dict[str, Any]: ... - def __setstate__(self, state: dict[str, Any]) -> None: ... - def __reduce__(self) -> tuple[Callable[[str], Generator], tuple[str], dict[str, Any]]: ... + def __getstate__(self) -> None: ... + def __setstate__(self, state: dict[str, Any] | None) -> None: ... + def __reduce__(self) -> tuple[ + Callable[[BitGenerator], Generator], + tuple[BitGenerator], + None]: ... @property def bit_generator(self) -> BitGenerator: ... def spawn(self, n_children: int) -> list[Generator]: ... diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index ab8a15555ae3..641d665a8eaa 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -214,17 +214,19 @@ cdef class Generator: # Pickling support: def __getstate__(self): - return self.bit_generator.state + return None - def __setstate__(self, state): - self.bit_generator.state = state + def __setstate__(self, bit_gen): + if isinstance(bit_gen, dict): + # Legacy path + # Prior to 2.0.x only the state of the underlying bit generator + # was preserved and any seed sequence information was lost + self.bit_generator.state = bit_gen def __reduce__(self): - ctor, name_tpl, state = self._bit_generator.__reduce__() - from ._pickle import __generator_ctor - # Requirements of __generator_ctor are (name, ctor) - return __generator_ctor, (name_tpl[0], ctor), state + # Requirements of __generator_ctor are (bit_generator, ) + return __generator_ctor, (self._bit_generator, ), None @property def bit_generator(self): diff --git a/numpy/random/_pickle.py b/numpy/random/_pickle.py index 073993726eb3..842bd441a502 100644 --- a/numpy/random/_pickle.py +++ b/numpy/random/_pickle.py @@ -1,3 +1,4 @@ +from .bit_generator import BitGenerator from .mtrand import RandomState from ._philox import Philox from ._pcg64 import PCG64, PCG64DXSM @@ -14,27 +15,30 @@ } -def __bit_generator_ctor(bit_generator_name='MT19937'): +def __bit_generator_ctor(bit_generator: str | type[BitGenerator] = 'MT19937'): """ Pickling helper function that returns a bit generator object Parameters ---------- - bit_generator_name : str - String containing the name of the BitGenerator + bit_generator : type[BitGenerator] or str + BitGenerator class or string containing the name of the BitGenerator Returns ------- - bit_generator : BitGenerator + BitGenerator BitGenerator instance """ - if bit_generator_name in BitGenerators: - bit_generator = BitGenerators[bit_generator_name] + if isinstance(bit_generator, type): + bit_gen_class = bit_generator + elif bit_generator in BitGenerators: + bit_gen_class = BitGenerators[bit_generator] else: - raise ValueError(str(bit_generator_name) + ' is not a known ' - 'BitGenerator module.') + raise ValueError( + str(bit_generator) + ' is not a known BitGenerator module.' + ) - return bit_generator() + return bit_gen_class() def __generator_ctor(bit_generator_name="MT19937", @@ -44,8 +48,9 @@ def __generator_ctor(bit_generator_name="MT19937", Parameters ---------- - bit_generator_name : str - String containing the core BitGenerator's name + bit_generator_name : str or BitGenerator + String containing the core BitGenerator's name or a + BitGenerator instance bit_generator_ctor : callable, optional Callable function that takes bit_generator_name as its only argument and returns an instantized bit generator. @@ -55,6 +60,9 @@ def __generator_ctor(bit_generator_name="MT19937", rg : Generator Generator using the named core BitGenerator """ + if isinstance(bit_generator_name, BitGenerator): + return Generator(bit_generator_name) + # Legacy path that uses a bit generator name and ctor return Generator(bit_generator_ctor(bit_generator_name)) @@ -76,5 +84,6 @@ def __randomstate_ctor(bit_generator_name="MT19937", rs : RandomState Legacy RandomState using the named core BitGenerator """ - + if isinstance(bit_generator_name, BitGenerator): + return RandomState(bit_generator_name) return RandomState(bit_generator_ctor(bit_generator_name)) diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi index 4556658efff4..d99278e861ea 100644 --- a/numpy/random/bit_generator.pyi +++ b/numpy/random/bit_generator.pyi @@ -92,11 +92,17 @@ class SeedSequence(ISpawnableSeedSequence): class BitGenerator(abc.ABC): lock: Lock def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... - def __getstate__(self) -> dict[str, Any]: ... - def __setstate__(self, state: dict[str, Any]) -> None: ... + def __getstate__(self) -> tuple[dict[str, Any], ISeedSequence]: ... + def __setstate__( + self, state_seed_seq: dict[str, Any] | tuple[dict[str, Any], ISeedSequence] + ) -> None: ... def __reduce__( self, - ) -> tuple[Callable[[str], BitGenerator], tuple[str], tuple[dict[str, Any]]]: ... + ) -> tuple[ + Callable[[str], BitGenerator], + tuple[str], + tuple[dict[str, Any], ISeedSequence] + ]: ... @abc.abstractmethod @property def state(self) -> Mapping[str, Any]: ... diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx index e49902f5c330..c999e6e32794 100644 --- a/numpy/random/bit_generator.pyx +++ b/numpy/random/bit_generator.pyx @@ -537,14 +537,27 @@ cdef class BitGenerator(): # Pickling support: def __getstate__(self): - return self.state + return self.state, self._seed_seq - def __setstate__(self, state): - self.state = state + def __setstate__(self, state_seed_seq): + + if isinstance(state_seed_seq, dict): + # Legacy path + # Prior to 2.0.x only the state of the underlying bit generator + # was preserved and any seed sequence information was lost + self.state = state_seed_seq + else: + self._seed_seq = state_seed_seq[1] + self.state = state_seed_seq[0] def __reduce__(self): from ._pickle import __bit_generator_ctor - return __bit_generator_ctor, (self.state['bit_generator'],), self.state + + return ( + __bit_generator_ctor, + (type(self), ), + (self.state, self._seed_seq) + ) @property def state(self): diff --git a/numpy/random/meson.build b/numpy/random/meson.build index 2da23a168b8a..103b07545d65 100644 --- a/numpy/random/meson.build +++ b/numpy/random/meson.build @@ -139,6 +139,9 @@ py.install_sources( 'tests/data/philox-testset-2.csv', 'tests/data/sfc64-testset-1.csv', 'tests/data/sfc64-testset-2.csv', + 'tests/data/sfc64_np126.pkl.gz', + 'tests/data/generator_pcg64_np126.pkl.gz', + 'tests/data/generator_pcg64_np121.pkl.gz', ], subdir: 'numpy/random/tests/data' ) diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index 5d260b8e5624..dbd3cd609495 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -73,7 +73,7 @@ class RandomState: def __str__(self) -> str: ... def __getstate__(self) -> dict[str, Any]: ... def __setstate__(self, state: dict[str, Any]) -> None: ... - def __reduce__(self) -> tuple[Callable[[str], RandomState], tuple[str], dict[str, Any]]: ... + def __reduce__(self) -> tuple[Callable[[BitGenerator], RandomState], tuple[BitGenerator], dict[str, Any]]: ... def seed(self, seed: None | _ArrayLikeFloat_co = ...) -> None: ... @overload def get_state(self, legacy: Literal[False] = ...) -> dict[str, Any]: ... diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index d67e4533f663..b42b0a7764b8 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -205,10 +205,13 @@ cdef class RandomState: self.set_state(state) def __reduce__(self): - ctor, name_tpl, _ = self._bit_generator.__reduce__() - from ._pickle import __randomstate_ctor - return __randomstate_ctor, (name_tpl[0], ctor), self.get_state(legacy=False) + # The third argument containing the state is required here since + # RandomState contains state information in addition to the state + # contained in the bit generator that described the gaussian + # generator. This argument is passed to __setstate__ after the + # Generator is created. + return __randomstate_ctor, (self._bit_generator, ), self.get_state(legacy=False) cdef _initialize_bit_generator(self, bit_generator): self._bit_generator = bit_generator diff --git a/numpy/random/tests/data/generator_pcg64_np121.pkl.gz b/numpy/random/tests/data/generator_pcg64_np121.pkl.gz new file mode 100644 index 0000000000000000000000000000000000000000..b7ad03d8e63b9a7f024952f35db2827a7393592b GIT binary patch literal 203 zcmV;+05ty}iwFpfAsJ=@|7T@xWpZJ3Z*pI7V`nxrUv6+QGBGZ2Yis~(V41p#0RnnN z@=9|HD)ovI^HTD2_2LUMle2SDr}PNK$ET;}r4}WY05AU^iwFp%6&Yp%|7T@xWpZJ3Z*pI7V`nxrUokQ^E^upX0Bc~GdXxbIdPMR{ za|0P2d5 zPs%L8RMIvjXiDuAjUHYUS-8#~*5Z=Hl2nkA4DK$0$B9#yZI6rK)ODQ}X|i>pR1b4z zUhsZ>um62E+Uz(X$Vr(?Uo1upT#7s?3Edp{%lk@R3v20bEqrMU%_dPRwO zDfzj2@dcU5**U3GdPL&mlQK)<(^K?&dr5$LWhBu=(OY4*jz8=<;lFEWqkQSy=iz%HQ5LxCP zMw=;qetus6fdEW+Gn7n8>U36U4QnwEEjH|0!1Jt;OFM;qQR3aCaJe66cp-tnFa@Zr oxQ8nvu{gdoGq1$h2xxB)2Z))Po>~OtlqTr`0PJ;xAvFO20Gtei^#A|> literal 0 HcmV?d00001 diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py index fa2ae866beeb..12c2f1d5ab57 100644 --- a/numpy/random/tests/test_direct.py +++ b/numpy/random/tests/test_direct.py @@ -298,6 +298,24 @@ def test_pickle(self): aa = pickle.loads(pickle.dumps(ss)) assert_equal(ss.state, aa.state) + def test_pickle_preserves_seed_sequence(self): + # GH 26234 + # Add explicit test that bit generators preserve seed sequences + import pickle + + bit_generator = self.bit_generator(*self.data1['seed']) + ss = bit_generator.seed_seq + bg_plk = pickle.loads(pickle.dumps(bit_generator)) + ss_plk = bg_plk.seed_seq + assert_equal(ss.state, ss_plk.state) + assert_equal(ss.pool, ss_plk.pool) + + bit_generator.seed_seq.spawn(10) + bg_plk = pickle.loads(pickle.dumps(bit_generator)) + ss_plk = bg_plk.seed_seq + assert_equal(ss.state, ss_plk.state) + assert_equal(ss.n_children_spawned, ss_plk.n_children_spawned) + def test_invalid_state_type(self): bit_generator = self.bit_generator(*self.data1['seed']) with pytest.raises(TypeError): @@ -349,8 +367,9 @@ def test_getstate(self): bit_generator = self.bit_generator(*self.data1['seed']) state = bit_generator.state alt_state = bit_generator.__getstate__() - assert_state_equal(state, alt_state) - + assert isinstance(alt_state, tuple) + assert_state_equal(state, alt_state[0]) + assert isinstance(alt_state[1], SeedSequence) class TestPhilox(Base): @classmethod @@ -413,6 +432,7 @@ def test_advange_large(self): assert state["state"] == advanced_state + class TestPCG64DXSM(Base): @classmethod def setup_class(cls): @@ -502,6 +522,29 @@ def setup_class(cls): cls.invalid_init_types = [(3.2,), ([None],), (1, None)] cls.invalid_init_values = [(-1,)] + def test_legacy_pickle(self): + # Pickling format was changed in 2.0.x + import gzip + import pickle + + expected_state = np.array( + [ + 9957867060933711493, + 532597980065565856, + 14769588338631205282, + 13 + ], + dtype=np.uint64 + ) + + base_path = os.path.split(os.path.abspath(__file__))[0] + pkl_file = os.path.join(base_path, "data", f"sfc64_np126.pkl.gz") + with gzip.open(pkl_file) as gz: + sfc = pickle.load(gz) + + assert isinstance(sfc, SFC64) + assert_equal(sfc.state["state"]["state"], expected_state) + class TestDefaultRNG: def test_seed(self): diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index a0bee225d20b..aca1ccde24af 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1,3 +1,4 @@ +import os.path import sys import hashlib @@ -2738,10 +2739,50 @@ def test_generator_ctor_old_style_pickle(): rg = np.random.Generator(np.random.PCG64DXSM(0)) rg.standard_normal(1) # Directly call reduce which is used in pickling - ctor, args, state_a = rg.__reduce__() + ctor, (bit_gen, ), _ = rg.__reduce__() # Simulate unpickling an old pickle that only has the name - assert args[:1] == ("PCG64DXSM",) - b = ctor(*args[:1]) - b.bit_generator.state = state_a + assert bit_gen.__class__.__name__ == "PCG64DXSM" + print(ctor) + b = ctor(*("PCG64DXSM",)) + print(b) + b.bit_generator.state = bit_gen.state state_b = b.bit_generator.state - assert state_a == state_b + assert bit_gen.state == state_b + + +def test_pickle_preserves_seed_sequence(): + # GH 26234 + # Add explicit test that bit generators preserve seed sequences + import pickle + + rg = np.random.Generator(np.random.PCG64DXSM(20240411)) + ss = rg.bit_generator.seed_seq + rg_plk = pickle.loads(pickle.dumps(rg)) + ss_plk = rg_plk.bit_generator.seed_seq + assert_equal(ss.state, ss_plk.state) + assert_equal(ss.pool, ss_plk.pool) + + rg.bit_generator.seed_seq.spawn(10) + rg_plk = pickle.loads(pickle.dumps(rg)) + ss_plk = rg_plk.bit_generator.seed_seq + assert_equal(ss.state, ss_plk.state) + + +@pytest.mark.parametrize("version", [121, 126]) +def test_legacy_pickle(version): + # Pickling format was changes in 1.22.x and in 2.0.x + import pickle + import gzip + + base_path = os.path.split(os.path.abspath(__file__))[0] + pkl_file = os.path.join( + base_path, "data", f"generator_pcg64_np{version}.pkl.gz" + ) + with gzip.open(pkl_file) as gz: + rg = pickle.load(gz) + state = rg.bit_generator.state['state'] + + assert isinstance(rg, Generator) + assert isinstance(rg.bit_generator, np.random.PCG64) + assert state['state'] == 35399562948360463058890781895381311971 + assert state['inc'] == 87136372517582989555478159403783844777 diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index aa24936bae2b..5121a684f693 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -2052,8 +2052,8 @@ def test_randomstate_ctor_old_style_pickle(): # Directly call reduce which is used in pickling ctor, args, state_a = rs.__reduce__() # Simulate unpickling an old pickle that only has the name - assert args[:1] == ("MT19937",) - b = ctor(*args[:1]) + assert args[0].__class__.__name__ == "MT19937" + b = ctor(*("MT19937",)) b.set_state(state_a) state_b = b.get_state(legacy=False) diff --git a/numpy/typing/tests/data/pass/random.py b/numpy/typing/tests/data/pass/random.py index 40b88ce4dfe4..69afb28c48ec 100644 --- a/numpy/typing/tests/data/pass/random.py +++ b/numpy/typing/tests/data/pass/random.py @@ -911,9 +911,7 @@ def_gen.__str__() def_gen.__repr__() -def_gen_state: dict[str, Any] -def_gen_state = def_gen.__getstate__() -def_gen.__setstate__(def_gen_state) +def_gen.__setstate__(dict(def_gen.bit_generator.state)) # RandomState random_st: np.random.RandomState = np.random.RandomState() diff --git a/numpy/typing/tests/data/reveal/random.pyi b/numpy/typing/tests/data/reveal/random.pyi index 3074033bfc65..b31b4b56f870 100644 --- a/numpy/typing/tests/data/reveal/random.pyi +++ b/numpy/typing/tests/data/reveal/random.pyi @@ -953,9 +953,7 @@ assert_type(def_gen.shuffle(D_2D, axis=1), None) assert_type(np.random.Generator(pcg64), np.random.Generator) assert_type(def_gen.__str__(), str) assert_type(def_gen.__repr__(), str) -def_gen_state = def_gen.__getstate__() -assert_type(def_gen_state, dict[str, Any]) -assert_type(def_gen.__setstate__(def_gen_state), None) +assert_type(def_gen.__setstate__(dict(def_gen.bit_generator.state)), None) # RandomState random_st: np.random.RandomState = np.random.RandomState() From 3ea613cfc35659e5b093859a6c69fb057f8d6cfa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E0=A8=97=E0=A8=97=E0=A8=A8=E0=A8=A6=E0=A9=80=E0=A8=AA=20?= =?UTF-8?q?=E0=A8=B8=E0=A8=BF=E0=A9=B0=E0=A8=98=20=28Gagandeep=20Singh=29?= Date: Tue, 16 Apr 2024 18:06:33 +0530 Subject: [PATCH 209/980] BLD: use install-tags to optionally install tests (#26274) - Add the 'tests' install tag to all test-related files - Move `test_pyinstaller.py` under a `tests` folder to match standard test layout of other directories - Extend `tools/check_installation.py` to check that all installed files have tags, and that omitting 'tests' is complete - Add a CI step in the "custom checks" job to use the extra `check_installation.py` checks - Shift docstring of private function in `_multiarray_tests.so` to a code comment Co-authored-by: Ralf Gommers --- .github/workflows/linux.yml | 5 ++ numpy/_core/_add_newdocs.py | 33 +----------- numpy/_core/meson.build | 4 +- .../src/multiarray/_multiarray_tests.c.src | 30 ++++++++++- numpy/_pyinstaller/tests/__init__.py | 0 .../{ => tests}/pyinstaller-smoke.py | 0 .../{ => tests}/test_pyinstaller.py | 0 numpy/fft/meson.build | 3 +- numpy/linalg/meson.build | 3 +- numpy/meson.build | 8 ++- numpy/random/meson.build | 6 ++- pyproject.toml | 3 ++ tools/check_installed_files.py | 52 ++++++++++++++----- 13 files changed, 95 insertions(+), 52 deletions(-) create mode 100644 numpy/_pyinstaller/tests/__init__.py rename numpy/_pyinstaller/{ => tests}/pyinstaller-smoke.py (100%) rename numpy/_pyinstaller/{ => tests}/test_pyinstaller.py (100%) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index fc89608358ac..3c59b639baa1 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -282,3 +282,8 @@ jobs: run: | # Need the explicit `bash -c` here because `grep` returns exit code 1 for no matches bash -c "! vulture . --min-confidence 100 --exclude doc/,numpy/distutils/,vendored-meson/ | grep 'unreachable'" + - name: Check usage of install_tag + run: | + rm -rf build-install + ./vendored-meson/meson/meson.py install -C build --destdir ../build-install --tags=runtime,python-runtime,devel + python tools/check_installed_files.py $(find ./build-install -path '*/site-packages/numpy') --no-tests diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 8b9910723104..bdd193400b05 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -2034,7 +2034,7 @@ To enable and control broadcasting, use an ellipsis. Default NumPy-style broadcasting is done by adding an ellipsis to the left of each term, like ``np.einsum('...ii->...i', a)``. - ``np.einsum('...i->...', a)`` is like + ``np.einsum('...i->...', a)`` is like :py:func:`np.sum(a, axis=-1) ` for array ``a`` of any shape. To take the trace along the first and last axes, you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix @@ -4871,37 +4871,6 @@ See `global_state` for more information. """) -add_newdoc('numpy._core._multiarray_tests', 'format_float_OSprintf_g', - """ - format_float_OSprintf_g(val, precision) - - Print a floating point scalar using the system's printf function, - equivalent to: - - printf("%.*g", precision, val); - - for half/float/double, or replacing 'g' by 'Lg' for longdouble. This - method is designed to help cross-validate the format_float_* methods. - - Parameters - ---------- - val : python float or numpy floating scalar - Value to format. - - precision : non-negative integer, optional - Precision given to printf. - - Returns - ------- - rep : string - The string representation of the floating point value - - See Also - -------- - format_float_scientific - format_float_positional - """) - ############################################################################## # diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index f4ac5cd421c0..a5bfe3a6a6c7 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -718,6 +718,7 @@ py.extension_module('_multiarray_tests', gnu_symbol_visibility: 'default', install: true, subdir: 'numpy/_core', + install_tag: 'tests' ) _umath_tests_mtargets = mod_features.multi_targets( @@ -754,6 +755,7 @@ foreach gen: test_modules_src install: true, subdir: 'numpy/_core', link_with: gen[2], + install_tag: 'tests' ) endforeach @@ -1329,4 +1331,4 @@ py.install_sources( ) subdir('include') -install_subdir('tests', install_dir: np_dir / '_core', install_tag: 'python-runtime') +install_subdir('tests', install_dir: np_dir / '_core', install_tag: 'tests') diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index 7ce182c2343f..fbd5fc445a2c 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -1992,7 +1992,35 @@ PrintFloat_Printf_g(PyObject *obj, int precision) return PyUnicode_FromString(str); } - +/* + * format_float_OSprintf_g(val, precision) + * + * Print a floating point scalar using the system's printf function, + * equivalent to: + * + * printf("%.*g", precision, val); + * + * for half/float/double, or replacing 'g' by 'Lg' for longdouble. This + * method is designed to help cross-validate the format_float_* methods. + * + * Parameters + * ---------- + * val : python float or numpy floating scalar + * Value to format. + * + * precision : non-negative integer, optional + * Precision given to printf. + * + * Returns + * ------- + * rep : string + * The string representation of the floating point value + * + * See Also + * -------- + * format_float_scientific + * format_float_positional + */ static PyObject * printf_float_g(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { diff --git a/numpy/_pyinstaller/tests/__init__.py b/numpy/_pyinstaller/tests/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/numpy/_pyinstaller/pyinstaller-smoke.py b/numpy/_pyinstaller/tests/pyinstaller-smoke.py similarity index 100% rename from numpy/_pyinstaller/pyinstaller-smoke.py rename to numpy/_pyinstaller/tests/pyinstaller-smoke.py diff --git a/numpy/_pyinstaller/test_pyinstaller.py b/numpy/_pyinstaller/tests/test_pyinstaller.py similarity index 100% rename from numpy/_pyinstaller/test_pyinstaller.py rename to numpy/_pyinstaller/tests/test_pyinstaller.py diff --git a/numpy/fft/meson.build b/numpy/fft/meson.build index 854a9a0a4d6f..751b5dc74d30 100644 --- a/numpy/fft/meson.build +++ b/numpy/fft/meson.build @@ -34,5 +34,6 @@ py.install_sources( 'tests/test_helper.py', 'tests/test_pocketfft.py', ], - subdir: 'numpy/fft/tests' + subdir: 'numpy/fft/tests', + install_tag: 'tests' ) diff --git a/numpy/linalg/meson.build b/numpy/linalg/meson.build index 104808ab5a1d..740c9f56c6fa 100644 --- a/numpy/linalg/meson.build +++ b/numpy/linalg/meson.build @@ -57,5 +57,6 @@ py.install_sources( 'tests/test_linalg.py', 'tests/test_regression.py', ], - subdir: 'numpy/linalg/tests' + subdir: 'numpy/linalg/tests', + install_tag: 'tests' ) diff --git a/numpy/meson.build b/numpy/meson.build index 80fa720b82e6..1d35e7dc4fec 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -273,7 +273,6 @@ pure_subdirs = [ 'matrixlib', 'polynomial', 'testing', - 'tests', 'typing', 'rec', 'char', @@ -312,9 +311,14 @@ else endif foreach subdir: pure_subdirs - install_subdir(subdir, install_dir: np_dir, install_tag: 'python-runtime') + install_subdir(subdir, install_dir: np_dir, install_tag: 'python-runtime', exclude_directories: ['tests']) + if fs.is_dir(subdir/'tests') + install_subdir(subdir/'tests', install_dir: np_dir/subdir, install_tag: 'tests') + endif endforeach +install_subdir('tests', install_dir: np_dir, install_tag: 'tests') + compilers = { 'C': cc, 'CPP': cpp, diff --git a/numpy/random/meson.build b/numpy/random/meson.build index 103b07545d65..1c90fb5866f2 100644 --- a/numpy/random/meson.build +++ b/numpy/random/meson.build @@ -123,7 +123,8 @@ py.install_sources( 'tests/test_seed_sequence.py', 'tests/test_smoke.py', ], - subdir: 'numpy/random/tests' + subdir: 'numpy/random/tests', + install_tag: 'tests' ) py.install_sources( @@ -143,7 +144,8 @@ py.install_sources( 'tests/data/generator_pcg64_np126.pkl.gz', 'tests/data/generator_pcg64_np121.pkl.gz', ], - subdir: 'numpy/random/tests/data' + subdir: 'numpy/random/tests/data', + install_tag: 'tests' ) py.install_sources( diff --git a/pyproject.toml b/pyproject.toml index ec34720b6564..4e069fee8972 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -183,6 +183,9 @@ repair-wheel-command = "" [tool.meson-python] meson = 'vendored-meson/meson/meson.py' +[tool.meson-python.args] +install = ['--tags=runtime,python-runtime,tests,devel'] + [tool.spin] package = 'numpy' diff --git a/tools/check_installed_files.py b/tools/check_installed_files.py index 7f78d5b1c7d6..c45a046b1ca2 100644 --- a/tools/check_installed_files.py +++ b/tools/check_installed_files.py @@ -21,6 +21,7 @@ import os import glob import sys +import json CUR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__))) @@ -34,7 +35,7 @@ } -def main(install_dir): +def main(install_dir, tests_check): INSTALLED_DIR = os.path.join(ROOT_DIR, install_dir) if not os.path.exists(INSTALLED_DIR): raise ValueError( @@ -44,14 +45,20 @@ def main(install_dir): numpy_test_files = get_files(NUMPY_DIR, kind='test') installed_test_files = get_files(INSTALLED_DIR, kind='test') - # Check test files detected in repo are installed - for test_file in numpy_test_files.keys(): - if test_file not in installed_test_files.keys(): - raise Exception( - "%s is not installed" % numpy_test_files[test_file] - ) - - print("----------- All the test files were installed --------------") + if tests_check == "--no-tests": + if len(installed_test_files) > 0: + raise Exception("Test files aren't expected to be installed in %s" + ", found %s" % (INSTALLED_DIR, installed_test_files)) + print("----------- No test files were installed --------------") + else: + # Check test files detected in repo are installed + for test_file in numpy_test_files.keys(): + if test_file not in installed_test_files.keys(): + raise Exception( + "%s is not installed" % numpy_test_files[test_file] + ) + + print("----------- All the test files were installed --------------") numpy_pyi_files = get_files(NUMPY_DIR, kind='stub') installed_pyi_files = get_files(INSTALLED_DIR, kind='stub') @@ -59,9 +66,13 @@ def main(install_dir): # Check *.pyi files detected in repo are installed for pyi_file in numpy_pyi_files.keys(): if pyi_file not in installed_pyi_files.keys(): + if (tests_check == "--no-tests" and + "tests" in numpy_pyi_files[pyi_file]): + continue raise Exception("%s is not installed" % numpy_pyi_files[pyi_file]) - print("----------- All the .pyi files were installed --------------") + print("----------- All the necessary .pyi files " + "were installed --------------") def get_files(dir_to_check, kind='test'): @@ -88,9 +99,26 @@ def get_files(dir_to_check, kind='test'): if __name__ == '__main__': - if not len(sys.argv) == 2: + if len(sys.argv) < 2: raise ValueError("Incorrect number of input arguments, need " "check_installation.py relpath/to/installed/numpy") install_dir = sys.argv[1] - main(install_dir) + tests_check = "" + if len(sys.argv) >= 3: + tests_check = sys.argv[2] + main(install_dir, tests_check) + + all_tags = set() + + with open(os.path.join('build', 'meson-info', + 'intro-install_plan.json'), 'r') as f: + targets = json.load(f) + + for key in targets.keys(): + for values in list(targets[key].values()): + if not values['tag'] in all_tags: + all_tags.add(values['tag']) + + if all_tags != set(['runtime', 'python-runtime', 'devel', 'tests']): + raise AssertionError(f"Found unexpected install tag: {all_tags}") From eb4c33d7cee1ad3efeb17e27398875038b72a906 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Melissa=20Weber=20Mendon=C3=A7a?= Date: Tue, 16 Apr 2024 17:41:42 -0300 Subject: [PATCH 210/980] DOC: Clean up CSS and adapt to new PyData Sphinx Theme version [skip azp] [skip cirrus] --- .../upcoming_changes/25908.improvement.rst | 5 - doc/source/_static/numpy.css | 139 +++--------------- doc/source/conf.py | 40 ++--- doc/source/index.rst | 23 +-- 4 files changed, 61 insertions(+), 146 deletions(-) delete mode 100644 doc/release/upcoming_changes/25908.improvement.rst diff --git a/doc/release/upcoming_changes/25908.improvement.rst b/doc/release/upcoming_changes/25908.improvement.rst deleted file mode 100644 index ad7a2cdfdc2e..000000000000 --- a/doc/release/upcoming_changes/25908.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -``center``, ``ljust``, ``rjust``, and ``zfill`` are now implemented using ufuncs --------------------------------------------------------------------------------- - -The text justification functions in `numpy.strings` are now implemented using -ufuncs under the hood and should be significantly faster. diff --git a/doc/source/_static/numpy.css b/doc/source/_static/numpy.css index 78b054f8fa4e..9207173728b2 100644 --- a/doc/source/_static/numpy.css +++ b/doc/source/_static/numpy.css @@ -9,132 +9,42 @@ body { font-family: 'Open Sans', sans-serif; + font-size: medium; } -pre, code { - font-size: 100%; - line-height: 155%; -} - -h1 { - font-family: "Lato", sans-serif; - color: #013243; /* warm black */ -} - -h2 { - color: #4d77cf; /* han blue */ - letter-spacing: -.03em; -} - -h3 { - color: #013243; /* warm black */ - letter-spacing: -.03em; -} - -/* Style the active version button. - -- dev: orange -- stable: green -- old, PR: red - -Colors from: - -Wong, B. Points of view: Color blindness. -Nat Methods 8, 441 (2011). https://doi.org/10.1038/nmeth.1618 -*/ +/* Version switcher colors from PyData Sphinx Theme */ -/* If the active version has the name "dev", style it orange */ -#version_switcher_button[data-active-version-name*="dev"] { - background-color: #E69F00; - border-color: #E69F00; - color:#000000; -} - -/* green for `stable` */ -#version_switcher_button[data-active-version-name*="stable"] { - background-color: #009E73; - border-color: #009E73; -} - -/* red for `old` */ -#version_switcher_button:not([data-active-version-name*="stable"], [data-active-version-name*="dev"], [data-active-version-name=""]) { - background-color: #980F0F; - border-color: #980F0F; +.version-switcher__button[data-active-version-name*="dev"] { + background-color: var(--pst-color-warning); + border-color: var(--pst-color-warning); + opacity: 0.9; } -/* Main page overview cards */ - -.sd-card { - background: #fff; - border-radius: 0; - padding: 30px 10px 20px 10px; - margin: 10px 0px; +.version-switcher__button:not([data-active-version-name*="stable"]):not([data-active-version-name*="dev"]):not([data-active-version-name*="pull"]) { + background-color: var(--pst-color-danger); + border-color: var(--pst-color-danger); + opacity: 0.9; } -.sd-card .sd-card-header { - text-align: center; +button.btn.version-switcher__button, +button.btn.version-switcher__button:hover { + color: black; } -.sd-card .sd-card-header .sd-card-text { - margin: 0px; -} +/* Main index page overview cards */ .sd-card .sd-card-img-top { - height: 52px; - width: 52px; + height: 60px; + width: 60px; margin-left: auto; margin-right: auto; + margin-top: 10px; } -.sd-card .sd-card-header { - border: none; - background-color: white; - color: #150458 !important; - font-size: var(--pst-font-size-h5); - font-weight: bold; - padding: 2.5rem 0rem 0.5rem 0rem; -} - -.sd-card .sd-card-footer { - border: none; - background-color: white; -} - -.sd-card .sd-card-footer .sd-card-text { - max-width: 220px; - margin-left: auto; - margin-right: auto; -} +/* Main index page overview images */ -/* Dark theme tweaking */ html[data-theme=dark] .sd-card img[src*='.svg'] { - filter: invert(0.82) brightness(0.8) contrast(1.2); -} - -/* Main index page overview cards */ -html[data-theme=dark] .sd-card { - background-color:var(--pst-color-background); -} - -html[data-theme=dark] .sd-shadow-sm { - box-shadow: 0 .1rem 1rem rgba(250, 250, 250, .6) !important -} - -html[data-theme=dark] .sd-card .sd-card-header { - background-color:var(--pst-color-background); - color: #150458 !important; -} - -html[data-theme=dark] .sd-card .sd-card-footer { - background-color:var(--pst-color-background); -} - -html[data-theme=dark] h1 { - color: var(--pst-color-primary); -} - -html[data-theme=dark] h3 { - color: #0a6774; + filter: invert(0.82) brightness(0.8) contrast(1.2); } /* Legacy admonition */ @@ -143,13 +53,12 @@ div.admonition-legacy { border-color: var(--pst-color-warning); } -.admonition-legacy.admonition>.admonition-title::before { +.admonition>.admonition-title::after, +div.admonition>.admonition-title::after { color: var(--pst-color-warning); - content: var(--pst-icon-admonition-attention); - background-color: var(--pst-color-warning); } -.admonition-legacy.admonition>.admonition-title::after { - color: var(--pst-color-warning); - content: var(--pst-icon-admonition-default); +.admonition>.admonition-title, +div.admonition>.admonition-title { + background-color: var(--pst-color-warning-bg); } \ No newline at end of file diff --git a/doc/source/conf.py b/doc/source/conf.py index b30fe3c9978a..53721e4b511a 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -239,23 +239,29 @@ def setup(app): switcher_version = f"{version}" html_theme_options = { - "logo": { - "image_light": "_static/numpylogo.svg", - "image_dark": "_static/numpylogo_dark.svg", - }, - "github_url": "https://github.com/numpy/numpy", - "collapse_navigation": True, - "external_links": [ - {"name": "Learn", "url": "https://numpy.org/numpy-tutorials/"}, - {"name": "NEPs", "url": "https://numpy.org/neps"} - ], - "header_links_before_dropdown": 6, - # Add light/dark mode and documentation version switcher: - "navbar_end": ["theme-switcher", "version-switcher", "navbar-icon-links"], - "switcher": { - "version_match": switcher_version, - "json_url": "https://numpy.org/doc/_static/versions.json", - }, + "logo": { + "image_light": "_static/numpylogo.svg", + "image_dark": "_static/numpylogo_dark.svg", + }, + "github_url": "https://github.com/numpy/numpy", + "collapse_navigation": True, + "external_links": [ + {"name": "Learn", "url": "https://numpy.org/numpy-tutorials/"}, + {"name": "NEPs", "url": "https://numpy.org/neps"}, + ], + "header_links_before_dropdown": 6, + # Add light/dark mode and documentation version switcher: + "navbar_end": ["theme-switcher", "version-switcher", "navbar-icon-links"], + "navbar_persistent": [], + "switcher": { + "version_match": switcher_version, + "json_url": "https://numpy.org/doc/_static/versions.json", + }, +} + +html_sidebars = { + "index": "search-button-field", + "**": ["search-button-field", "sidebar-nav-bs"], } html_title = "%s v%s Manual" % (project, version) diff --git a/doc/source/index.rst b/doc/source/index.rst index b80d65ce2c4e..02f3a8dc12b0 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -21,10 +21,10 @@ NumPy documentation `Historical versions of documentation `_ **Useful links**: -`Installation `_ | -`Source Repository `_ | -`Issue Tracker `_ | -`Q&A Support `_ | +`Installation `_ | +`Source Repository `_ | +`Issue Tracker `_ | +`Q&A Support `_ | `Mailing List `_ NumPy is the fundamental package for scientific computing in Python. It is a @@ -36,13 +36,15 @@ basic statistical operations, random simulation and much more. -.. grid:: 2 +.. grid:: 1 1 2 2 + :gutter: 2 3 4 4 .. grid-item-card:: :img-top: ../source/_static/index-images/getting_started.svg + :text-align: center Getting started - ^^^^^^^^^^^^^^^ + ^^^ New to NumPy? Check out the Absolute Beginner's Guide. It contains an introduction to NumPy's main concepts and links to additional tutorials. @@ -58,9 +60,10 @@ basic statistical operations, random simulation and much more. .. grid-item-card:: :img-top: ../source/_static/index-images/user_guide.svg + :text-align: center User guide - ^^^^^^^^^^ + ^^^ The user guide provides in-depth information on the key concepts of NumPy with useful background information and explanation. @@ -76,9 +79,10 @@ basic statistical operations, random simulation and much more. .. grid-item-card:: :img-top: ../source/_static/index-images/api.svg + :text-align: center API reference - ^^^^^^^^^^^^^ + ^^^ The reference guide contains a detailed description of the functions, modules, and objects included in NumPy. The reference describes how the @@ -96,9 +100,10 @@ basic statistical operations, random simulation and much more. .. grid-item-card:: :img-top: ../source/_static/index-images/contributor.svg + :text-align: center Contributor's guide - ^^^^^^^^^^^^^^^^^^^ + ^^^ Want to add to the codebase? Can help add translation or a flowchart to the documentation? The contributing guidelines will guide you through the From 08f8bc7b9a6c510deaa7b509042025be2a67ffbc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Melissa=20Weber=20Mendon=C3=A7a?= Date: Tue, 16 Apr 2024 17:55:41 -0300 Subject: [PATCH 211/980] DOC: Add search button and fix version switcher dev label [skip azp] [skip cirrus] --- doc/source/_static/numpy.css | 2 +- doc/source/conf.py | 7 +------ 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/doc/source/_static/numpy.css b/doc/source/_static/numpy.css index 9207173728b2..b2d34a3ab063 100644 --- a/doc/source/_static/numpy.css +++ b/doc/source/_static/numpy.css @@ -14,7 +14,7 @@ body { /* Version switcher colors from PyData Sphinx Theme */ -.version-switcher__button[data-active-version-name*="dev"] { +.version-switcher__button[data-active-version-name*="devdocs"] { background-color: var(--pst-color-warning); border-color: var(--pst-color-warning); opacity: 0.9; diff --git a/doc/source/conf.py b/doc/source/conf.py index 53721e4b511a..bd9008f06aca 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -251,7 +251,7 @@ def setup(app): ], "header_links_before_dropdown": 6, # Add light/dark mode and documentation version switcher: - "navbar_end": ["theme-switcher", "version-switcher", "navbar-icon-links"], + "navbar_end": ["search-button", "theme-switcher", "version-switcher", "navbar-icon-links"], "navbar_persistent": [], "switcher": { "version_match": switcher_version, @@ -259,11 +259,6 @@ def setup(app): }, } -html_sidebars = { - "index": "search-button-field", - "**": ["search-button-field", "sidebar-nav-bs"], -} - html_title = "%s v%s Manual" % (project, version) html_static_path = ['_static'] html_last_updated_fmt = '%b %d, %Y' From a8f17c4785f78cc4cbc8ccb6598a09827c856694 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Melissa=20Weber=20Mendon=C3=A7a?= Date: Tue, 16 Apr 2024 17:58:15 -0300 Subject: [PATCH 212/980] DOC: Lint conf.py file [skip azp] [skip cirrus] --- doc/source/conf.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index bd9008f06aca..f57646c3a19e 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -251,7 +251,12 @@ def setup(app): ], "header_links_before_dropdown": 6, # Add light/dark mode and documentation version switcher: - "navbar_end": ["search-button", "theme-switcher", "version-switcher", "navbar-icon-links"], + "navbar_end": [ + "search-button", + "theme-switcher", + "version-switcher", + "navbar-icon-links" + ], "navbar_persistent": [], "switcher": { "version_match": switcher_version, From 130a04626afaa275e5672a8da51b2531ce165043 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 16 Apr 2024 15:34:25 -0600 Subject: [PATCH 213/980] CI: add llvm/clang sanitizer tests --- .github/workflows/linux_compiler_sanitizers.yml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/linux_compiler_sanitizers.yml b/.github/workflows/linux_compiler_sanitizers.yml index b0a40f4551d5..d54dd1415950 100644 --- a/.github/workflows/linux_compiler_sanitizers.yml +++ b/.github/workflows/linux_compiler_sanitizers.yml @@ -21,7 +21,7 @@ permissions: contents: read # to fetch code (actions/checkout) jobs: - gcc_sanitizers: + clang_sanitizers: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' runs-on: ubuntu-latest @@ -35,6 +35,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Install dependencies run: | + sudo apt update + sudo apt install -y llvm libstdc++-12-dev pip install -r requirements/build_requirements.txt pip install -r requirements/ci_requirements.txt - name: Build @@ -43,7 +45,7 @@ jobs: TERM: xterm-256color PKG_CONFIG_PATH: ${{ github.workspace }}/.openblas run: - spin build --with-scipy-openblas=32 -- --werror -Db_sanitize=address,undefined + CC=clang CXX=clang++ spin build --with-scipy-openblas=32 -- -Db_sanitize=address,undefined - name: Test shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' env: @@ -52,5 +54,5 @@ jobs: pip install pytest pytest-xdist hypothesis typing_extensions ASAN_OPTIONS=detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1:halt_on_error=1 \ UBSAN_OPTIONS=halt_on_error=0 \ - LD_PRELOAD=$(gcc --print-file-name=libasan.so) \ + LD_PRELOAD=$(clang --print-file-name=libclang_rt.asan-x86_64.so) \ python -m spin test -- -v -s From 8e4414dc84d52b841b64257fb7252618a2f8b687 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Melissa=20Weber=20Mendon=C3=A7a?= Date: Tue, 16 Apr 2024 18:58:37 -0300 Subject: [PATCH 214/980] DOC: Make version switcher font size smaller This helps adjust the flow of the navbar items. [skip azp][skip cirrus] --- doc/source/_static/numpy.css | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/source/_static/numpy.css b/doc/source/_static/numpy.css index b2d34a3ab063..203a282436ee 100644 --- a/doc/source/_static/numpy.css +++ b/doc/source/_static/numpy.css @@ -26,9 +26,14 @@ body { opacity: 0.9; } +.version-switcher__menu a.list-group-item { + font-size: small; +} + button.btn.version-switcher__button, button.btn.version-switcher__button:hover { color: black; + font-size: small; } /* Main index page overview cards */ From a1e66ec99657a275afa33183b90b3ea319e40870 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 16 Apr 2024 21:58:08 -0600 Subject: [PATCH 215/980] MAINT: Pin sphinx to version 7.2.6 Sphinx >= 7.3.0 breaks breathe. Closes #26298. --- requirements/doc_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index a642de83b4e3..d9976a0d6312 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -1,5 +1,5 @@ # doxygen required, use apt-get or dnf -sphinx>=4.5.0 +sphinx==7.2.6 numpydoc==1.4 pydata-sphinx-theme==0.13.3 sphinx-design From 50a705cb9c6f4c755c9793f719d35ebdfe574978 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 17 Apr 2024 02:05:13 -0600 Subject: [PATCH 216/980] MNT: replace _PyDict_GetItemStringWithError with PyDict_GetItemStringRef (#26282) Replaces all usages of the private _PyDict_GetItemStringWithError Python C API function with PyDict_GetItemStringRef. The latter is available in the python 3.13 C API and via python-capicompat in older python versions. The new function has the same semantics as PyDict_GetItemRef but takes a UTF-8 C string instead of a python object. Like _PyDict_GetItemStringWithError, it does not suppress errors. It also has the nice improvement that the return type is now int and it signals success or failure. This lets me re-structure control flow a bit and avoid PyErr_Occurred() calls. I used the new PyDict_ContainsString function if all we care about is whether a key is in the dict. I also used Py_SETREF in a few places that were using the unsafe decref then set pattern. See #26159 for the relevant tracking issue. --- * MNT: replace _PyDict_GetItemStringWithError with PyDict_GetItemStringRef * MNT: fix issue spotted in code review * MNT: fix two more issues spotted in code review * MNT: more missing decrefs spotted in code review * MNT: delete unnecessary decrefs (covered by goto fail logic) --- numpy/_core/src/common/ufunc_override.c | 17 +++---- numpy/_core/src/multiarray/ctors.c | 67 ++++++++++++++----------- numpy/_core/src/multiarray/number.c | 17 ++++--- numpy/_core/src/umath/override.c | 15 +++--- numpy/_core/src/umath/ufunc_object.c | 9 ++-- numpy/_core/src/umath/umathmodule.c | 19 ++++--- 6 files changed, 82 insertions(+), 62 deletions(-) diff --git a/numpy/_core/src/common/ufunc_override.c b/numpy/_core/src/common/ufunc_override.c index c9b5d0e68f82..dd7706d41475 100644 --- a/numpy/_core/src/common/ufunc_override.c +++ b/numpy/_core/src/common/ufunc_override.c @@ -2,6 +2,7 @@ #define _MULTIARRAYMODULE #include "numpy/ndarraytypes.h" +#include "npy_pycompat.h" #include "get_attr_string.h" #include "npy_import.h" #include "ufunc_override.h" @@ -99,12 +100,11 @@ PyUFuncOverride_GetOutObjects(PyObject *kwds, PyObject **out_kwd_obj, PyObject * *out_kwd_obj = NULL; return -1; } - /* borrowed reference */ - *out_kwd_obj = _PyDict_GetItemStringWithError(kwds, "out"); - if (*out_kwd_obj == NULL) { - if (PyErr_Occurred()) { - return -1; - } + int result = PyDict_GetItemStringRef(kwds, "out", out_kwd_obj); + if (result == -1) { + return -1; + } + else if (result == 0) { Py_INCREF(Py_None); *out_kwd_obj = Py_None; return 0; @@ -118,15 +118,14 @@ PyUFuncOverride_GetOutObjects(PyObject *kwds, PyObject **out_kwd_obj, PyObject * seq = PySequence_Fast(*out_kwd_obj, "Could not convert object to sequence"); if (seq == NULL) { - *out_kwd_obj = NULL; + Py_CLEAR(*out_kwd_obj); return -1; } *out_objs = PySequence_Fast_ITEMS(seq); - *out_kwd_obj = seq; + Py_SETREF(*out_kwd_obj, seq); return PySequence_Fast_GET_SIZE(seq); } else { - Py_INCREF(*out_kwd_obj); *out_objs = out_kwd_obj; return 1; } diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index b5871c8b04f8..ebcd40094277 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -12,7 +12,7 @@ #include "numpy/npy_math.h" #include "npy_config.h" - +#include "npy_pycompat.h" #include "npy_ctypes.h" #include "multiarraymodule.h" @@ -2180,10 +2180,10 @@ PyArray_FromInterface(PyObject *origin) } /* Get type string from interface specification */ - attr = _PyDict_GetItemStringWithError(iface, "typestr"); - if (attr == NULL) { + int result = PyDict_GetItemStringRef(iface, "typestr", &attr); + if (result <= 0) { Py_DECREF(iface); - if (!PyErr_Occurred()) { + if (result == 0) { PyErr_SetString(PyExc_ValueError, "Missing __array_interface__ typestr"); } @@ -2207,43 +2207,47 @@ PyArray_FromInterface(PyObject *origin) * the 'descr' attribute. */ if (dtype->type_num == NPY_VOID) { - PyObject *descr = _PyDict_GetItemStringWithError(iface, "descr"); - if (descr == NULL && PyErr_Occurred()) { + PyObject *descr = NULL; + result = PyDict_GetItemStringRef(iface, "descr", &descr); + if (result == -1) { goto fail; } PyArray_Descr *new_dtype = NULL; - if (descr != NULL) { + if (result == 1) { int is_default = _is_default_descr(descr, attr); if (is_default < 0) { + Py_DECREF(descr); goto fail; } if (!is_default) { if (PyArray_DescrConverter2(descr, &new_dtype) != NPY_SUCCEED) { + Py_DECREF(descr); goto fail; } if (new_dtype != NULL) { - Py_DECREF(dtype); - dtype = new_dtype; + Py_SETREF(dtype, new_dtype); } } - } - + Py_DECREF(descr); } + Py_CLEAR(attr); /* Get shape tuple from interface specification */ - attr = _PyDict_GetItemStringWithError(iface, "shape"); - if (attr == NULL) { - if (PyErr_Occurred()) { - return NULL; - } + result = PyDict_GetItemStringRef(iface, "shape", &attr); + if (result < 0) { + return NULL; + } + if (result == 0) { /* Shape must be specified when 'data' is specified */ - PyObject *data = _PyDict_GetItemStringWithError(iface, "data"); - if (data == NULL && PyErr_Occurred()) { + int result = PyDict_ContainsString(iface, "data"); + if (result < 0) { + Py_DECREF(attr); return NULL; } - else if (data != NULL) { + else if (result == 1) { Py_DECREF(iface); + Py_DECREF(attr); PyErr_SetString(PyExc_ValueError, "Missing __array_interface__ shape"); return NULL; @@ -2271,10 +2275,11 @@ PyArray_FromInterface(PyObject *origin) } } } + Py_CLEAR(attr); /* Get data buffer from interface specification */ - attr = _PyDict_GetItemStringWithError(iface, "data"); - if (attr == NULL && PyErr_Occurred()){ + result = PyDict_GetItemStringRef(iface, "data", &attr); + if (result == -1){ return NULL; } @@ -2337,20 +2342,24 @@ PyArray_FromInterface(PyObject *origin) PyBuffer_Release(&view); /* Get offset number from interface specification */ - attr = _PyDict_GetItemStringWithError(iface, "offset"); - if (attr == NULL && PyErr_Occurred()) { + PyObject *offset = NULL; + result = PyDict_GetItemStringRef(iface, "offset", &offset); + if (result == -1) { goto fail; } - else if (attr) { - npy_longlong num = PyLong_AsLongLong(attr); + else if (result == 1) { + npy_longlong num = PyLong_AsLongLong(offset); if (error_converting(num)) { PyErr_SetString(PyExc_TypeError, "__array_interface__ offset must be an integer"); + Py_DECREF(offset); goto fail; } data += num; + Py_DECREF(offset); } } + Py_CLEAR(attr); ret = (PyArrayObject *)PyArray_NewFromDescrAndBase( &PyArray_Type, dtype, @@ -2376,11 +2385,11 @@ PyArray_FromInterface(PyObject *origin) goto fail; } } - attr = _PyDict_GetItemStringWithError(iface, "strides"); - if (attr == NULL && PyErr_Occurred()){ + result = PyDict_GetItemStringRef(iface, "strides", &attr); + if (result == -1){ return NULL; } - if (attr != NULL && attr != Py_None) { + if (result == 1 && attr != Py_None) { if (!PyTuple_Check(attr)) { PyErr_SetString(PyExc_TypeError, "strides must be a tuple"); @@ -2404,12 +2413,14 @@ PyArray_FromInterface(PyObject *origin) if (n) { memcpy(PyArray_STRIDES(ret), strides, n*sizeof(npy_intp)); } + Py_DECREF(attr); } PyArray_UpdateFlags(ret, NPY_ARRAY_UPDATE_ALL); Py_DECREF(iface); return (PyObject *)ret; fail: + Py_XDECREF(attr); Py_XDECREF(dtype); Py_XDECREF(iface); return NULL; diff --git a/numpy/_core/src/multiarray/number.c b/numpy/_core/src/multiarray/number.c index 7b4d6f21f45c..9532662b327a 100644 --- a/numpy/_core/src/multiarray/number.c +++ b/numpy/_core/src/multiarray/number.c @@ -8,7 +8,7 @@ #include "numpy/arrayobject.h" #include "npy_config.h" - +#include "npy_pycompat.h" #include "npy_import.h" #include "common.h" #include "number.h" @@ -60,24 +60,25 @@ array_inplace_matrix_multiply(PyArrayObject *m1, PyObject *m2); * Those not present will not be changed */ -/* FIXME - macro contains a return */ -#define SET(op) temp = _PyDict_GetItemStringWithError(dict, #op); \ - if (temp == NULL && PyErr_Occurred()) { \ +/* FIXME - macro contains returns */ +#define SET(op) \ + res = PyDict_GetItemStringRef(dict, #op, &temp); \ + if (res == -1) { \ return -1; \ } \ - else if (temp != NULL) { \ + else if (res == 1) { \ if (!(PyCallable_Check(temp))) { \ + Py_DECREF(temp); \ return -1; \ } \ - Py_INCREF(temp); \ - Py_XDECREF(n_ops.op); \ - n_ops.op = temp; \ + Py_XSETREF(n_ops.op, temp); \ } NPY_NO_EXPORT int _PyArray_SetNumericOps(PyObject *dict) { PyObject *temp = NULL; + int res; SET(add); SET(subtract); SET(multiply); diff --git a/numpy/_core/src/umath/override.c b/numpy/_core/src/umath/override.c index 88d05abddc50..d10b86be7b57 100644 --- a/numpy/_core/src/umath/override.c +++ b/numpy/_core/src/umath/override.c @@ -4,7 +4,7 @@ #include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" #include "npy_import.h" - +#include "npy_pycompat.h" #include "override.h" #include "ufunc_override.h" @@ -148,18 +148,17 @@ static int normalize_signature_keyword(PyObject *normal_kwds) { /* If the keywords include `sig` rename to `signature`. */ - PyObject* obj = _PyDict_GetItemStringWithError(normal_kwds, "sig"); - if (obj == NULL && PyErr_Occurred()) { + PyObject* obj = NULL; + int result = PyDict_GetItemStringRef(normal_kwds, "sig", &obj); + if (result == -1) { return -1; } - if (obj != NULL) { - /* - * No INCREF or DECREF needed: got a borrowed reference above, - * and, unlike e.g. PyList_SetItem, PyDict_SetItem INCREF's it. - */ + if (result == 1) { if (PyDict_SetItemString(normal_kwds, "signature", obj) < 0) { + Py_DECREF(obj); return -1; } + Py_DECREF(obj); if (PyDict_DelItemString(normal_kwds, "sig") < 0) { return -1; } diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 5c4f0d6cc293..c58d417773ba 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -33,7 +33,7 @@ #include #include "npy_config.h" - +#include "npy_pycompat.h" #include "npy_argparse.h" #include "numpy/arrayobject.h" @@ -142,8 +142,10 @@ PyUFunc_clearfperr() NPY_NO_EXPORT int set_matmul_flags(PyObject *d) { - PyObject *matmul = _PyDict_GetItemStringWithError(d, "matmul"); - if (matmul == NULL) { + PyObject *matmul = NULL; + int result = PyDict_GetItemStringRef(d, "matmul", &matmul); + if (result <= 0) { + // caller sets an error if one isn't already set return -1; } /* @@ -162,6 +164,7 @@ set_matmul_flags(PyObject *d) NPY_ITER_UPDATEIFCOPY | NPY_UFUNC_DEFAULT_OUTPUT_FLAGS) & ~NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE; + Py_DECREF(matmul); return 0; } diff --git a/numpy/_core/src/umath/umathmodule.c b/numpy/_core/src/umath/umathmodule.c index b8b920b50137..7c774f9fffc3 100644 --- a/numpy/_core/src/umath/umathmodule.c +++ b/numpy/_core/src/umath/umathmodule.c @@ -21,6 +21,7 @@ #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "numpy/npy_3kcompat.h" +#include "npy_pycompat.h" #include "abstract.h" #include "numpy/npy_math.h" @@ -303,29 +304,35 @@ int initumath(PyObject *m) * TODO: This should probably be done at a better place, or even in the * code generator directly. */ - s = _PyDict_GetItemStringWithError(d, "logical_and"); - if (s == NULL) { + int res = PyDict_GetItemStringRef(d, "logical_and", &s); + if (res <= 0) { return -1; } if (install_logical_ufunc_promoter(s) < 0) { + Py_DECREF(s); return -1; } + Py_DECREF(s); - s = _PyDict_GetItemStringWithError(d, "logical_or"); - if (s == NULL) { + res = PyDict_GetItemStringRef(d, "logical_or", &s); + if (res <= 0) { return -1; } if (install_logical_ufunc_promoter(s) < 0) { + Py_DECREF(s); return -1; } + Py_DECREF(s); - s = _PyDict_GetItemStringWithError(d, "logical_xor"); - if (s == NULL) { + res = PyDict_GetItemStringRef(d, "logical_xor", &s); + if (res <= 0) { return -1; } if (install_logical_ufunc_promoter(s) < 0) { + Py_DECREF(s); return -1; } + Py_DECREF(s); if (init_string_ufuncs(d) < 0) { return -1; From 4e82c893ad3777d25a5e92dd3effdd710e9e69cd Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 18 Apr 2024 06:27:49 +1000 Subject: [PATCH 217/980] BLD: use newer openblas wheels [wheel build] --- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 0b7d0e63ff33..5e4a90f5fcbd 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ spin # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.27.0.1 +scipy-openblas32==0.3.27.44.2 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index dd2725e6dba4..b9b98beea466 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.27.0.1 -scipy-openblas64==0.3.27.0.1 +scipy-openblas32==0.3.27.44.2 +scipy-openblas64==0.3.27.44.2 From 24e5db315de689bf56a74dcd2ed5e07df9c1b2f6 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 18 Apr 2024 11:51:58 +1000 Subject: [PATCH 218/980] BLD: use newer openblas wheels [wheel build] --- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 5e4a90f5fcbd..e134b0dae82e 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ spin # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.27.44.2 +scipy-openblas32==0.3.27.44.3 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index b9b98beea466..f688bfb6eb3a 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.27.44.2 -scipy-openblas64==0.3.27.44.2 +scipy-openblas32==0.3.27.44.3 +scipy-openblas64==0.3.27.44.3 From 1807e08acf7fb945dd6de31d89ccc03a37451c4b Mon Sep 17 00:00:00 2001 From: Tuhin Sharma Date: Thu, 18 Apr 2024 14:51:44 +0530 Subject: [PATCH 219/980] DOC: add explanation of dtype to parameter values for np.append --- numpy/lib/_function_base_impl.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index ce661155dc4f..7cb67f341734 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -5715,7 +5715,10 @@ def append(arr, values, axis=None): These values are appended to a copy of `arr`. It must be of the correct shape (the same shape as `arr`, excluding `axis`). If `axis` is not specified, `values` can be any shape and will be - flattened before use. + flattened before use. If dtype of values is different from the + dtype of arr then their dtypes are compared to figure out the + common dtype they can both be safely coerced to. For e.g. `int64` + and `float64` can both go to `float64`. axis : int, optional The axis along which `values` are appended. If `axis` is not given, both `arr` and `values` are flattened before use. From c63969c6e1d58e791632aacfb88ecae465d6dcfc Mon Sep 17 00:00:00 2001 From: Antony Lee Date: Thu, 11 Oct 2018 18:13:49 +0200 Subject: [PATCH 220/980] ENH: When histogramming data with integer dtype, force bin width >= 1. Bins of width < 1 don't make sense for integer data, they just add a bunch of spurious, unpopulated bins. (Perhaps an even better improvement would be to make sure that, when using integer data, the binwidth is also integer, so that each bin always covers the same number of possible values, but I guess that's possibly a more domain-specific issue.) Before the PR: In [1]: np.histogram_bin_edges(np.tile(np.arange(10), 1000), "auto") Out[1]: array([0. , 0.45, 0.9 , 1.35, 1.8 , 2.25, 2.7 , 3.15, 3.6 , 4.05, 4.5 , 4.95, 5.4 , 5.85, 6.3 , 6.75, 7.2 , 7.65, 8.1 , 8.55, 9. ]) After: In [1]: np.histogram_bin_edges(np.tile(np.arange(10), 1000), "auto") Out[1]: array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]) --- .../upcoming_changes/12150.improvement.rst | 5 ++++ numpy/lib/_histograms_impl.py | 5 ++++ numpy/lib/tests/test_histograms.py | 26 ++++++++++++++++++- 3 files changed, 35 insertions(+), 1 deletion(-) create mode 100644 doc/release/upcoming_changes/12150.improvement.rst diff --git a/doc/release/upcoming_changes/12150.improvement.rst b/doc/release/upcoming_changes/12150.improvement.rst new file mode 100644 index 000000000000..f73a6d2aaa28 --- /dev/null +++ b/doc/release/upcoming_changes/12150.improvement.rst @@ -0,0 +1,5 @@ +``histogram`` auto-binning now returns bin sizes >=1 for integer input data +--------------------------------------------------------------------------- +For integer input data, bin sizes smaller than 1 result in spurious empty +bins. This is now avoided when the number of bins is computed using one of the +algorithms provided by `histogram_bin_edges`. diff --git a/numpy/lib/_histograms_impl.py b/numpy/lib/_histograms_impl.py index a091d41a84c8..1439f49592fa 100644 --- a/numpy/lib/_histograms_impl.py +++ b/numpy/lib/_histograms_impl.py @@ -410,6 +410,8 @@ def _get_bin_edges(a, bins, range, weights): # Do not call selectors on empty arrays width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge)) if width: + if np.issubdtype(a.dtype, np.integer) and width < 1: + width = 1 n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width)) else: # Width can be zero for some estimators, e.g. FD when @@ -625,6 +627,9 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None): The simplest and fastest estimator. Only takes into account the data size. + Additionally, if the data is of integer dtype, then the binwidth will never + be less than 1. + Examples -------- >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5]) diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index 89758706d78f..09a1a5ab709d 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -469,7 +469,7 @@ def test_small(self): 'doane': 3, 'sqrt': 2, 'stone': 1}} for testlen, expectedResults in small_dat.items(): - testdat = np.arange(testlen) + testdat = np.arange(testlen).astype(float) for estimator, expbins in expectedResults.items(): a, b = np.histogram(testdat, estimator) assert_equal(len(a), expbins, err_msg="For the {0} estimator " @@ -592,6 +592,30 @@ def test_signed_integer_data(self, bins): assert_array_equal(hist, hist32) assert_array_equal(edges, edges32) + @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott', + 'stone', 'rice', 'sturges']) + def test_integer(self, bins): + """ + Test that bin width for integer data is at least 1. + """ + with suppress_warnings() as sup: + if bins == 'stone': + sup.filter(RuntimeWarning) + assert_equal( + np.histogram_bin_edges(np.tile(np.arange(9), 1000), bins), + np.arange(9)) + + def test_integer_non_auto(self): + """ + Test that the bin-width>=1 requirement *only* applies to auto binning. + """ + assert_equal( + np.histogram_bin_edges(np.tile(np.arange(9), 1000), 16), + np.arange(17) / 2) + assert_equal( + np.histogram_bin_edges(np.tile(np.arange(9), 1000), [.1, .2]), + [.1, .2]) + def test_simple_weighted(self): """ Check that weighted data raises a TypeError From e99f012026ab832364e7594d1112533a2b8acf6a Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 18 Apr 2024 16:17:22 +0200 Subject: [PATCH 221/980] DOC: Fix versionadded/changed and slightly tweak --- doc/source/reference/c-api/array.rst | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 26c650c5e669..7d1296cba8fe 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -3923,9 +3923,8 @@ are defined: The ``PY_ARRAY_UNIQUE_SYMBOL`` mechanism additionally mangles the names to avoid conflicts. - .. versionchanged:: - NumPy 2.0 exports the headers to avoid sharing the table outside of a + NumPy 2.1 changed the headers to avoid sharing the table outside of a single shared object/dll (this was always the case on Windows). Please see :c:macro:`NPY_API_SYMBOL_ATTRIBUTE` for details. @@ -3961,7 +3960,7 @@ the C-API is needed then some additional steps must be taken. .. c:macro:: NPY_API_SYMBOL_ATTRIBUTE - .. versionadded:: 2.0 + .. versionadded:: 2.1 An additional symbol which can be used to share e.g. visibility beyond shared object boundaries. From fd3a52b57d64ad9fb5dc4cc8bf3fe0a72da98286 Mon Sep 17 00:00:00 2001 From: Christopher Sidebottom Date: Thu, 18 Apr 2024 15:25:16 +0100 Subject: [PATCH 222/980] ENH: Bump Highway to HEAD and remove platform filter (#26273) This results in a pretty nice boost on the effected platforms --- numpy/_core/meson.build | 7 ++----- numpy/_core/src/highway | 2 +- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index a5bfe3a6a6c7..1e60fa05cd95 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -96,10 +96,7 @@ if use_svml endif endif -use_highway = not get_option('disable-highway') and not ( - host_machine.system() == 'darwin' and - cpu_family == 'aarch64' -) +use_highway = not get_option('disable-highway') if use_highway and not fs.exists('src/highway/README.md') error('Missing the `highway` git submodule! Run `git submodule update --init` to fix this.') endif @@ -108,7 +105,7 @@ if use_highway highway_lib = static_library('highway', [ # required for hwy::Abort symbol - 'src/highway/hwy/targets.cc' + 'src/highway/hwy/abort.cc' ], cpp_args: '-DTOOLCHAIN_MISS_ASM_HWCAP_H', include_directories: ['src/highway'], diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 58b52a717469..3af6ba57bf82 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 58b52a717469e62b2d9b8eaa2f5dddb44d4a4cbf +Subproject commit 3af6ba57bf82c861870f92f0483149439007d652 From ab719e1bdb5d73cabe7c063f45f21fd295bd2f9d Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 16 Apr 2024 12:44:49 -0600 Subject: [PATCH 223/980] MNT: disable the coercion cache for the nogil build --- numpy/_core/src/multiarray/array_coercion.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index f63dbbc77e1f..443239274ea3 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -614,10 +614,13 @@ update_shape(int curr_ndim, int *max_ndim, return success; } - +#ifndef Py_GIL_DISABLED #define COERCION_CACHE_CACHE_SIZE 5 static int _coercion_cache_num = 0; static coercion_cache_obj *_coercion_cache_cache[COERCION_CACHE_CACHE_SIZE]; +#else +#define COERCION_CACHE_CACHE_SIZE 0 +#endif /* * Steals a reference to the object. @@ -628,11 +631,14 @@ npy_new_coercion_cache( coercion_cache_obj ***next_ptr, int ndim) { coercion_cache_obj *cache; +#if COERCION_CACHE_CACHE_SIZE > 0 if (_coercion_cache_num > 0) { _coercion_cache_num--; cache = _coercion_cache_cache[_coercion_cache_num]; } - else { + else +#endif + { cache = PyMem_Malloc(sizeof(coercion_cache_obj)); } if (cache == NULL) { @@ -661,11 +667,14 @@ npy_unlink_coercion_cache(coercion_cache_obj *current) { coercion_cache_obj *next = current->next; Py_DECREF(current->arr_or_sequence); +#if COERCION_CACHE_CACHE_SIZE > 0 if (_coercion_cache_num < COERCION_CACHE_CACHE_SIZE) { _coercion_cache_cache[_coercion_cache_num] = current; _coercion_cache_num++; } - else { + else +#endif + { PyMem_Free(current); } return next; From b3b5109100e5880ce03ca93a112b25bca6578ddb Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 18 Apr 2024 09:09:08 -0600 Subject: [PATCH 224/980] TST: add a test in a new test_multithreading module --- numpy/_core/tests/test_multithreading.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 numpy/_core/tests/test_multithreading.py diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py new file mode 100644 index 000000000000..0e35408ef6b2 --- /dev/null +++ b/numpy/_core/tests/test_multithreading.py @@ -0,0 +1,15 @@ +import concurrent.futures + +import numpy as np + + +def test_parallel_errstate_creation(): + # if the coercion cache is enabled and not thread-safe, creating + # RandomState instances simultaneously leads to a data race + def func(seed): + np.random.RandomState(seed) + + with concurrent.futures.ThreadPoolExecutor(max_workers=8) as tpe: + futures = [tpe.submit(func, i) for i in range(500)] + for f in futures: + f.result() From 1d976fc448cedc1033f9f928daed5fdeac7738ea Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 18 Apr 2024 10:06:14 -0600 Subject: [PATCH 225/980] TST: skip multithreading tests on WASM --- numpy/_core/tests/test_multithreading.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 0e35408ef6b2..8999a18a39ff 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -1,6 +1,12 @@ import concurrent.futures import numpy as np +import pytest + +from numpy.testing import IS_WASM + +if IS_WASM: + pytest.skip(allow_module_level=True, reason="no threading support in wasm") def test_parallel_errstate_creation(): From 265be4a2250f4f93b748b8e17345509fef6b2cf0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 18 Apr 2024 17:43:00 +0000 Subject: [PATCH 226/980] MAINT: Bump actions/upload-artifact from 4.3.1 to 4.3.2 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.3.1 to 4.3.2. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/5d5d22a31266ced268874388b861e4b58bb5c2f3...1746f4ab65b179e0ea60a494b83293b640dd5bba) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/cygwin.yml | 2 +- .github/workflows/scorecards.yml | 2 +- .github/workflows/wheels.yml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 5bc30262db01..ebf3792cc153 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -62,7 +62,7 @@ jobs: cd tools /usr/bin/python3.9 -m pytest --pyargs numpy -n2 -m "not slow" - name: Upload wheel if tests fail - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + uses: actions/upload-artifact@1746f4ab65b179e0ea60a494b83293b640dd5bba # v4.3.2 if: failure() with: name: numpy-cygwin-wheel diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index aef8222d9ea7..a3b73bf0b209 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -42,7 +42,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable # uploads of run results in SARIF format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + uses: actions/upload-artifact@1746f4ab65b179e0ea60a494b83293b640dd5bba # v4.3.2 with: name: SARIF file path: results.sarif diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index eb9919528b91..691006a64157 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -154,7 +154,7 @@ jobs: CIBW_PRERELEASE_PYTHONS: True CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + - uses: actions/upload-artifact@1746f4ab65b179e0ea60a494b83293b640dd5bba # v4.3.2 with: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl @@ -235,7 +235,7 @@ jobs: python -mpip install twine twine check dist/* - - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + - uses: actions/upload-artifact@1746f4ab65b179e0ea60a494b83293b640dd5bba # v4.3.2 with: name: sdist path: ./dist/* From 26df13cc6cb9cc6aae64d3bbd412d54d0aa079b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Melissa=20Weber=20Mendon=C3=A7a?= Date: Thu, 18 Apr 2024 19:23:54 -0300 Subject: [PATCH 227/980] DOC: Follow-up fixes for new theme Fixes spacing between logo and navbar section titles, and admonition colors. [skip azp] [skip cirrus] --- doc/source/_static/numpy.css | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/doc/source/_static/numpy.css b/doc/source/_static/numpy.css index 203a282436ee..180dec530649 100644 --- a/doc/source/_static/numpy.css +++ b/doc/source/_static/numpy.css @@ -12,6 +12,13 @@ body { font-size: medium; } +/* Making sure the navbar shows correctly in one line + Reduces the space between the top-left logo and the navbar section titles */ + +.col-lg-3 { + width: 15%; +} + /* Version switcher colors from PyData Sphinx Theme */ .version-switcher__button[data-active-version-name*="devdocs"] { @@ -58,12 +65,10 @@ div.admonition-legacy { border-color: var(--pst-color-warning); } -.admonition>.admonition-title::after, -div.admonition>.admonition-title::after { +div.admonition-legacy>.admonition-title::after { color: var(--pst-color-warning); } -.admonition>.admonition-title, -div.admonition>.admonition-title { +div.admonition-legacy>.admonition-title { background-color: var(--pst-color-warning-bg); } \ No newline at end of file From 509985c5aa6420c83348ded13cdcab455093e449 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Fri, 19 Apr 2024 01:25:43 -0700 Subject: [PATCH 228/980] MAINT: address improper error handling and cleanup for `spin` (#26304) I just came across that while trying to benchmark docs. It seem that when this was moved from subprocess.run to spin.util run the error handling was not updated. spin.util seem to always return a CompletedProcess and it has the stderr and return code. (tested locally by changing towncrier to `false`). While at it I removed unused import and reformatted a tiny bit the pyprojet.toml to only have 1 command per line for readability. --- .spin/cmds.py | 16 +++------------- pyproject.toml | 6 ++++-- 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/.spin/cmds.py b/.spin/cmds.py index 11e2b1b0e2d3..8bcdfe4c64ca 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -1,11 +1,7 @@ import os import shutil -import sys -import argparse -import tempfile import pathlib import shutil -import json import pathlib import importlib import subprocess @@ -621,16 +617,10 @@ def notes(ctx, version_override): ) # towncrier build --version 2.1 --yes cmd = ["towncrier", "build", "--version", version, "--yes"] - try: - p = util.run( - cmd=cmd, - sys_exit=False, - output=True, - encoding="utf-8" - ) - except subprocess.SubprocessError as e: + p = util.run(cmd=cmd, sys_exit=False, output=True, encoding="utf-8") + if p.returncode != 0: raise click.ClickException( - f"`towncrier` failed returned {e.returncode} with error `{e.stderr}`" + f"`towncrier` failed returned {p.returncode} with error `{p.stderr}`" ) output_path = project_config['tool.towncrier.filename'].format(version=version) diff --git a/pyproject.toml b/pyproject.toml index 4e069fee8972..b4df3c36d71f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -201,8 +201,10 @@ cli = 'vendored-meson/meson/meson.py' ".spin/cmds.py:lint", ] "Environments" = [ - "spin.cmds.meson.run", ".spin/cmds.py:ipython", - ".spin/cmds.py:python", "spin.cmds.meson.gdb", + "spin.cmds.meson.run", + ".spin/cmds.py:ipython", + ".spin/cmds.py:python", + "spin.cmds.meson.gdb", "spin.cmds.meson.lldb" ] "Documentation" = [ From 1dbc2535a08706c999cd5ee498cc46777863021e Mon Sep 17 00:00:00 2001 From: Tuhin Sharma Date: Fri, 19 Apr 2024 17:25:05 +0530 Subject: [PATCH 229/980] DOC: added markdown formatting for values and arr [skip azp][skip cirrus][skip actions] --- numpy/lib/_function_base_impl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 7cb67f341734..ead4fe9523a2 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -5715,8 +5715,8 @@ def append(arr, values, axis=None): These values are appended to a copy of `arr`. It must be of the correct shape (the same shape as `arr`, excluding `axis`). If `axis` is not specified, `values` can be any shape and will be - flattened before use. If dtype of values is different from the - dtype of arr then their dtypes are compared to figure out the + flattened before use. If dtype of `values` is different from the + dtype of `arr` then their dtypes are compared to figure out the common dtype they can both be safely coerced to. For e.g. `int64` and `float64` can both go to `float64`. axis : int, optional From 320d036a2658bcc82d37095afdd58226e3c7f8da Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 19 Apr 2024 20:40:01 +0200 Subject: [PATCH 230/980] remove fast path for sqrt --- numpy/_core/src/umath/loops_umath_fp.dispatch.c.src | 7 ------- numpy/_core/tests/test_umath.py | 1 - 2 files changed, 8 deletions(-) diff --git a/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src b/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src index 7c379e1612ce..8b6b06c4199a 100644 --- a/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src +++ b/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src @@ -246,13 +246,6 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) } return; } - else if (in2 == 0.5) { - BINARY_LOOP_SLIDING_ZERO_STRIDE { - const @type@ in1 = *(@type@ *)ip1; - *(@type@ *)op1 =@sqrt@(in1); - } - return; - } } #if NPY_SIMD && defined(NPY_HAVE_AVX512_SKX) && defined(NPY_CAN_LINK_SVML) const @type@ *src1 = (@type@*)args[0]; diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 429c3543add8..be3cdf2e8e06 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -1246,7 +1246,6 @@ def test_power_fast_paths(self): expected = np.sqrt(a) result = np.power(a, 0.5) - # needs to be fixed! assert_array_max_ulp(result[:-1], expected[:-1].astype(dt), maxulp=1) From 9d5683490db3f00fe4400f1d5b2772fda6fdc62b Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 19 Apr 2024 14:18:16 -0600 Subject: [PATCH 231/980] BUG: use PyArray_SafeCast in array_astype --- numpy/_core/src/multiarray/methods.c | 18 +++++++++++------- numpy/_core/tests/test_stringdtype.py | 7 +++++++ 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 62cc25e64c1b..adc1e53e24ab 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -811,8 +811,8 @@ array_astype(PyArrayObject *self, /* * If the memory layout matches and, data types are equivalent, - * and it's not a subtype if subok is False, then we - * can skip the copy. + * it's not a subtype if subok is False, and if the cast says + * view are possible, we can skip the copy. */ if (forcecopy != NPY_AS_TYPE_COPY_ALWAYS && (order == NPY_KEEPORDER || @@ -823,11 +823,15 @@ array_astype(PyArrayObject *self, PyArray_IS_C_CONTIGUOUS(self)) || (order == NPY_FORTRANORDER && PyArray_IS_F_CONTIGUOUS(self))) && - (subok || PyArray_CheckExact(self)) && - PyArray_EquivTypes(dtype, PyArray_DESCR(self))) { - Py_DECREF(dtype); - Py_INCREF(self); - return (PyObject *)self; + (subok || PyArray_CheckExact(self))) { + npy_intp view_offset; + npy_intp is_safe = PyArray_SafeCast(dtype, PyArray_DESCR(self), + &view_offset, NPY_NO_CASTING, 1); + if (is_safe && (view_offset != NPY_MIN_INTP)) { + Py_DECREF(dtype); + Py_INCREF(self); + return (PyObject *)self; + } } if (!PyArray_CanCastArrayTo(self, dtype, casting)) { diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 3d5ad7737081..b8176afc0c01 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -493,6 +493,13 @@ def test_create_with_copy_none(string_list): assert arr_view is arr +def test_astype_copy_false(): + orig_dt = StringDType() + arr = np.array(["hello", "world"], dtype=StringDType()) + assert not arr.astype(StringDType(coerce=False), copy=False).dtype.coerce + + assert arr.astype(orig_dt, copy=False).dtype is orig_dt + @pytest.mark.parametrize( "strings", [ From 9f6e842eb2121e0e29efbdba9cd7b892b7904849 Mon Sep 17 00:00:00 2001 From: KIU Shueng Chuan Date: Sat, 20 Apr 2024 12:02:52 +0800 Subject: [PATCH 232/980] asv: use os dependent pathsep when pre-pending to PATH --- .spin/cmds.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.spin/cmds.py b/.spin/cmds.py index 8bcdfe4c64ca..fbd876be9132 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -309,7 +309,7 @@ def _run_asv(cmd): '/usr/local/lib/ccache', '/usr/local/lib/f90cache' ]) env = os.environ - env['PATH'] = f'EXTRA_PATH:{PATH}' + env['PATH'] = f'{EXTRA_PATH}{os.pathsep}{PATH}' # Control BLAS/LAPACK threads env['OPENBLAS_NUM_THREADS'] = '1' From 6eebb376ede246f93dec2514a9c6d6d17721685e Mon Sep 17 00:00:00 2001 From: Arun Pa Date: Sat, 20 Apr 2024 16:02:26 +0530 Subject: [PATCH 233/980] add references to replaced by, replaces neps --- doc/neps/nep-0030-duck-array-protocol.rst | 2 +- doc/neps/nep-0031-uarray.rst | 2 +- doc/neps/nep-0037-array-module.rst | 2 +- doc/neps/nep-0047-array-api-standard.rst | 2 +- doc/neps/nep-0056-array-api-main-namespace.rst | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/neps/nep-0030-duck-array-protocol.rst b/doc/neps/nep-0030-duck-array-protocol.rst index 358e280bd080..bb58eaf4fa24 100644 --- a/doc/neps/nep-0030-duck-array-protocol.rst +++ b/doc/neps/nep-0030-duck-array-protocol.rst @@ -7,7 +7,7 @@ NEP 30 — Duck typing for NumPy arrays - implementation :Author: Peter Andreas Entschev :Author: Stephan Hoyer :Status: Superseded -:Replaced-By: 56 +:Replaced-By: :ref:`NEP56` :Type: Standards Track :Created: 2019-07-31 :Updated: 2019-07-31 diff --git a/doc/neps/nep-0031-uarray.rst b/doc/neps/nep-0031-uarray.rst index 3a2354bfe3ff..cf06d1109c11 100644 --- a/doc/neps/nep-0031-uarray.rst +++ b/doc/neps/nep-0031-uarray.rst @@ -8,7 +8,7 @@ NEP 31 — Context-local and global overrides of the NumPy API :Author: Ralf Gommers :Author: Peter Bell :Status: Superseded -:Replaced-By: 56 +:Replaced-By: :ref:`NEP56` :Type: Standards Track :Created: 2019-08-22 :Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/message/Z6AA5CL47NHBNEPTFWYOTSUVSRDGHYPN/ diff --git a/doc/neps/nep-0037-array-module.rst b/doc/neps/nep-0037-array-module.rst index 248f1c79fd78..653141661421 100644 --- a/doc/neps/nep-0037-array-module.rst +++ b/doc/neps/nep-0037-array-module.rst @@ -8,7 +8,7 @@ NEP 37 — A dispatch protocol for NumPy-like modules :Author: Hameer Abbasi :Author: Sebastian Berg :Status: Superseded -:Replaced-By: 56 +:Replaced-By: :ref:`NEP56` :Type: Standards Track :Created: 2019-12-29 :Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/message/Z6AA5CL47NHBNEPTFWYOTSUVSRDGHYPN/ diff --git a/doc/neps/nep-0047-array-api-standard.rst b/doc/neps/nep-0047-array-api-standard.rst index 495d823f79bc..78191eabdbd3 100644 --- a/doc/neps/nep-0047-array-api-standard.rst +++ b/doc/neps/nep-0047-array-api-standard.rst @@ -8,7 +8,7 @@ NEP 47 — Adopting the array API standard :Author: Stephan Hoyer :Author: Aaron Meurer :Status: Superseded -:Replaced-By: 56 +:Replaced-By: :ref:`NEP56` :Type: Standards Track :Created: 2021-01-21 :Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/message/Z6AA5CL47NHBNEPTFWYOTSUVSRDGHYPN/ diff --git a/doc/neps/nep-0056-array-api-main-namespace.rst b/doc/neps/nep-0056-array-api-main-namespace.rst index 028370877466..5fb8ad250a81 100644 --- a/doc/neps/nep-0056-array-api-main-namespace.rst +++ b/doc/neps/nep-0056-array-api-main-namespace.rst @@ -8,7 +8,7 @@ NEP 56 — Array API standard support in NumPy's main namespace :Author: Mateusz Sokół :Author: Nathan Goldbaum :Status: Final -:Replaces: 30, 31, 37, 47 +:Replaces: :ref:`NEP30`, :ref:`NEP31`, :ref:`NEP37`, :ref:`NEP47` :Type: Standards Track :Created: 2023-12-19 :Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/message/Z6AA5CL47NHBNEPTFWYOTSUVSRDGHYPN/ From b680d4de5847d5e270ef33b9edc3a9df04806fd2 Mon Sep 17 00:00:00 2001 From: Arun Pa Date: Sat, 20 Apr 2024 16:03:34 +0530 Subject: [PATCH 234/980] use regex to get nep ids from tags --- doc/neps/tools/build_index.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/doc/neps/tools/build_index.py b/doc/neps/tools/build_index.py index 68212e110e8b..e8ca86e68c13 100644 --- a/doc/neps/tools/build_index.py +++ b/doc/neps/tools/build_index.py @@ -75,7 +75,7 @@ def nep_metadata(): f'NEP {nr} has been Superseded, but has no Replaced-By tag' ) - replaced_by = int(tags['Replaced-By']) + replaced_by = int(re.findall(r'\d+', tags['Replaced-By'])[0]) replacement_nep = neps[replaced_by] if not 'Replaces' in replacement_nep: @@ -105,13 +105,8 @@ def nep_metadata(): def parse_replaces_metadata(replacement_nep): """Handle :Replaces: as integer or list of integers""" - replaces = replacement_nep['Replaces'] - if ' ' in replaces: - # Replaces multiple NEPs, should be comma-separated ints - replaced_neps = [int(s) for s in replaces.split(', ')] - else: - replaced_neps = [int(replaces)] - + replaces = re.findall(r'\d+', replacement_nep['Replaces']) + replaced_neps = [int(s) for s in replaces] return replaced_neps From 0cbc44956204d238f58d129f659f7a8e093edcb5 Mon Sep 17 00:00:00 2001 From: ajay kumar janapareddi Date: Sat, 20 Apr 2024 03:39:40 -0700 Subject: [PATCH 235/980] documented behavior for converting month/year timedelta64 to days --- doc/source/reference/arrays.datetime.rst | 30 ++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 8a7b648281ba..9641ba321c84 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -205,8 +205,12 @@ simple datetime calculations. There are two Timedelta units ('Y', years and 'M', months) which are treated specially, because how much time they represent changes depending on when they are used. While a timedelta day unit is equivalent to -24 hours, there is no way to convert a month unit into days, because -different months have different numbers of days. +24 hours, month and year units cannot be converted directly into days +without using 'unsafe' casting. + +The `numpy.ndarray.astype` method can be used for unsafe +conversion of months/years to days. The conversion follows +calculating the averaged values from the 400 year leap-year cycle. .. admonition:: Example @@ -219,6 +223,28 @@ different months have different numbers of days. Traceback (most recent call last): File "", line 1, in TypeError: Cannot cast NumPy timedelta64 scalar from metadata [Y] to [D] according to the rule 'same_kind' + + Using the astype to convert month unit to days with unsafe casting. + 400 year leap-year cycle has 400*365 + 97 days and 400*12 months. + Each month has approximately 30.4369 days rounded to an integer value of 30. + + >>> np.timedelta64(1, 'M').astype(np.timedelta64(1, 'D')) # The default casting for astype is 'unsafe'. + numpy.timedelta64(30,'D') + + Similarly, 12 years in the 400-year leap-year cycle is equivalent to + 4382.91 rounded to an integer value of 4382. + + >>> np.timedelta64(12, 'Y').astype(np.timedelta64(1, 'D')) + numpy.timedelta64(4382,'D') + + Safe casting cannot be used for the conversion of month unit to days + as different months have different numbers of days. + + >>> np.timedelta64(1, 'M').astype(np.timedelta64(1, 'D'), casting='safe') + Traceback (most recent call last): + File "", line 1, in + TypeError: Cannot cast scalar from dtype(' Date: Sun, 21 Apr 2024 09:22:09 +0530 Subject: [PATCH 236/980] DOC: added one example with mixed dtype [skip azp][skip cirrus][skip actions] --- numpy/lib/_function_base_impl.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index ead4fe9523a2..02ae34267566 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -5746,6 +5746,7 @@ def append(arr, values, axis=None): array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) Traceback (most recent call last): ... @@ -5753,6 +5754,14 @@ def append(arr, values, axis=None): the array at index 0 has 2 dimension(s) and the array at index 1 has 1 dimension(s) + >>> a = np.array([1, 2], dtype=int) + >>> b = [] + >>> np.append(a, b) + array([1., 2.]) + + Default dtype for empty lists is `float64` thus making the output of dtype + `float64` when appended with dtype `int64` + """ arr = asanyarray(arr) if axis is None: From 04890824f0d1d2de70947ed8185b0760e882cac0 Mon Sep 17 00:00:00 2001 From: Tuhin Sharma Date: Sun, 21 Apr 2024 09:29:52 +0530 Subject: [PATCH 237/980] DOC: added Notes section to document mixed dtype effect on np.append [skip azp][skip cirrus][skip actions] --- numpy/lib/_function_base_impl.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 02ae34267566..29c9097e2a0c 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -5715,10 +5715,7 @@ def append(arr, values, axis=None): These values are appended to a copy of `arr`. It must be of the correct shape (the same shape as `arr`, excluding `axis`). If `axis` is not specified, `values` can be any shape and will be - flattened before use. If dtype of `values` is different from the - dtype of `arr` then their dtypes are compared to figure out the - common dtype they can both be safely coerced to. For e.g. `int64` - and `float64` can both go to `float64`. + flattened before use. axis : int, optional The axis along which `values` are appended. If `axis` is not given, both `arr` and `values` are flattened before use. @@ -5735,6 +5732,13 @@ def append(arr, values, axis=None): insert : Insert elements into an array. delete : Delete elements from an array. + Notes + ----- + If dtype of `values` is different from the dtype of `arr` then + their dtypes are compared to figure out the common dtype they can + both be safely coerced to. For e.g. `int64` and `float64` can both + go to `float64`. See examples section below. + Examples -------- >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) From e02cb0263cd8603cc633f4ae80ea761647d22c80 Mon Sep 17 00:00:00 2001 From: Tuhin Sharma Date: Sun, 21 Apr 2024 09:33:45 +0530 Subject: [PATCH 238/980] DOC: enhanced example with c.dtype [skip azp][skip cirrus][skip actions] --- numpy/lib/_function_base_impl.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 29c9097e2a0c..9a1ee7a64aed 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -5760,8 +5760,11 @@ def append(arr, values, axis=None): >>> a = np.array([1, 2], dtype=int) >>> b = [] - >>> np.append(a, b) + >>> c = np.append(a, b) + >>> c array([1., 2.]) + >>> c.dtype + float64 Default dtype for empty lists is `float64` thus making the output of dtype `float64` when appended with dtype `int64` From 12a48015c9160780a7fea9a7c69fe72be698607d Mon Sep 17 00:00:00 2001 From: Romain Geissler Date: Sun, 21 Apr 2024 21:12:33 +0000 Subject: [PATCH 239/980] BUG: Fix invalid constructor in string_fastsearch.h with C++ >= 20. This is an error with -std=gnu++20 at least with gcc 11 to 13, and will be an explicit warning with gcc >= 14. See https://godbolt.org/z/YdoTc7r8f In practice, it avoids these compilation errors: In file included from ../numpy/_core/src/umath/string_ufuncs.cpp:20: ../numpy/_core/src/umath/string_fastsearch.h:63:31: error: expected unqualified-id before ')' token 63 | CheckedIndexer() | ^ ../numpy/_core/src/umath/string_fastsearch.h:69:40: error: expected ')' before '*' token 69 | CheckedIndexer(char_type *buf, size_t len) | ~ ^~ | --- numpy/_core/src/umath/string_fastsearch.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/umath/string_fastsearch.h b/numpy/_core/src/umath/string_fastsearch.h index 33563b7007c2..61abdcb5ad19 100644 --- a/numpy/_core/src/umath/string_fastsearch.h +++ b/numpy/_core/src/umath/string_fastsearch.h @@ -60,13 +60,13 @@ struct CheckedIndexer { char_type *buffer; size_t length; - CheckedIndexer() + CheckedIndexer() { buffer = NULL; length = 0; } - CheckedIndexer(char_type *buf, size_t len) + CheckedIndexer(char_type *buf, size_t len) { buffer = buf; length = len; From 853c42bd580170050073b06296abf9cf3677a0aa Mon Sep 17 00:00:00 2001 From: Tuhin Sharma Date: Mon, 22 Apr 2024 11:02:13 +0530 Subject: [PATCH 240/980] DOC: remove Notes section and refine Example [skip azp][skip cirrus][skip actions] --- numpy/lib/_function_base_impl.py | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 9a1ee7a64aed..802ce1c66817 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -5732,13 +5732,6 @@ def append(arr, values, axis=None): insert : Insert elements into an array. delete : Delete elements from an array. - Notes - ----- - If dtype of `values` is different from the dtype of `arr` then - their dtypes are compared to figure out the common dtype they can - both be safely coerced to. For e.g. `int64` and `float64` can both - go to `float64`. See examples section below. - Examples -------- >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) @@ -5759,14 +5752,13 @@ def append(arr, values, axis=None): dimension(s) >>> a = np.array([1, 2], dtype=int) - >>> b = [] - >>> c = np.append(a, b) + >>> c = np.append(a, []) >>> c array([1., 2.]) >>> c.dtype float64 - Default dtype for empty lists is `float64` thus making the output of dtype + Default dtype for empty ndarrays is `float64` thus making the output of dtype `float64` when appended with dtype `int64` """ From 46d72939a52ce327415c3d140fb5f640c647f4fa Mon Sep 17 00:00:00 2001 From: Arun Pa Date: Mon, 22 Apr 2024 22:36:07 +0530 Subject: [PATCH 241/980] add example --- doc/neps/nep-0000.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/neps/nep-0000.rst b/doc/neps/nep-0000.rst index 7b73b4741f27..cd6dfd4941e7 100644 --- a/doc/neps/nep-0000.rst +++ b/doc/neps/nep-0000.rst @@ -146,7 +146,8 @@ thread in the mailing list archives. NEPs can also be ``Superseded`` by a different NEP, rendering the original obsolete. The ``Replaced-By`` and ``Replaces`` headers -should be added to the original and new NEPs respectively. +containing references to the original and new NEPs, like +``:ref:`NEP#number``` should be added respectively. Process NEPs may also have a status of ``Active`` if they are never meant to be completed, e.g. NEP 0 (this NEP). From 53993e88eaf8a6de7b3d21052e3584bfb1ed5f1c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Apr 2024 17:27:08 +0000 Subject: [PATCH 242/980] MAINT: Bump actions/upload-artifact from 4.3.2 to 4.3.3 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.3.2 to 4.3.3. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/1746f4ab65b179e0ea60a494b83293b640dd5bba...65462800fd760344b1a7b4382951275a0abb4808) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/cygwin.yml | 2 +- .github/workflows/scorecards.yml | 2 +- .github/workflows/wheels.yml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index ebf3792cc153..ce43d807f8f0 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -62,7 +62,7 @@ jobs: cd tools /usr/bin/python3.9 -m pytest --pyargs numpy -n2 -m "not slow" - name: Upload wheel if tests fail - uses: actions/upload-artifact@1746f4ab65b179e0ea60a494b83293b640dd5bba # v4.3.2 + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 if: failure() with: name: numpy-cygwin-wheel diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index a3b73bf0b209..9fc7e4757afe 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -42,7 +42,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable # uploads of run results in SARIF format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@1746f4ab65b179e0ea60a494b83293b640dd5bba # v4.3.2 + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 with: name: SARIF file path: results.sarif diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 691006a64157..fddf3510aa5e 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -154,7 +154,7 @@ jobs: CIBW_PRERELEASE_PYTHONS: True CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - - uses: actions/upload-artifact@1746f4ab65b179e0ea60a494b83293b640dd5bba # v4.3.2 + - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 with: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl @@ -235,7 +235,7 @@ jobs: python -mpip install twine twine check dist/* - - uses: actions/upload-artifact@1746f4ab65b179e0ea60a494b83293b640dd5bba # v4.3.2 + - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 with: name: sdist path: ./dist/* From 418f0eee41040f43d011ed8f74de677c8d15d9f0 Mon Sep 17 00:00:00 2001 From: thalassemia Date: Sun, 21 Apr 2024 17:59:13 -0700 Subject: [PATCH 243/980] TST: Skip Cython test for editable install [skip circle] --- numpy/random/tests/test_extending.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py index a4a84de2ee7c..087741a07a3d 100644 --- a/numpy/random/tests/test_extending.py +++ b/numpy/random/tests/test_extending.py @@ -46,6 +46,10 @@ cython = None +@pytest.mark.skipif( + 'editable' in np.__path__[0], + reason='Editable install cannot find .pxd headers' +) @pytest.mark.skipif( sys.platform == "win32" and sys.maxsize < 2**32, reason="Failing in 32-bit Windows wheel build job, skip for now" From 1ffef9ff95e24230332e1cba9593959461931ca9 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 23 Apr 2024 09:56:06 +0200 Subject: [PATCH 244/980] TST: add `IS_EDITABLE` to numpy.testing Also make it work with meson-python 0.15.0 (that returns an empty list for `__path__`). --- numpy/random/tests/test_extending.py | 4 ++-- numpy/testing/_private/utils.py | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py index 087741a07a3d..791fbaba9850 100644 --- a/numpy/random/tests/test_extending.py +++ b/numpy/random/tests/test_extending.py @@ -10,7 +10,7 @@ import warnings import numpy as np -from numpy.testing import IS_WASM +from numpy.testing import IS_WASM, IS_EDITABLE try: @@ -47,7 +47,7 @@ @pytest.mark.skipif( - 'editable' in np.__path__[0], + IS_EDITABLE, reason='Editable install cannot find .pxd headers' ) @pytest.mark.skipif( diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 4570cbf01420..a913c1a69f88 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -39,7 +39,8 @@ 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY', 'HAS_REFCOUNT', "IS_WASM", 'suppress_warnings', 'assert_array_compare', 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON', - '_OLD_PROMOTION', 'IS_MUSL', '_SUPPORTS_SVE', 'NOGIL_BUILD' + '_OLD_PROMOTION', 'IS_MUSL', '_SUPPORTS_SVE', 'NOGIL_BUILD', + 'IS_EDITABLE' ] @@ -54,6 +55,7 @@ class KnownFailureException(Exception): IS_WASM = platform.machine() in ["wasm32", "wasm64"] IS_PYPY = sys.implementation.name == 'pypy' IS_PYSTON = hasattr(sys, "pyston_version_info") +IS_EDITABLE = not bool(np.__path__) or 'editable' in np.__path__[0] HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 From e2665a53414ecea7774f3b43bb88eb027ce9b1cc Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 23 Apr 2024 10:05:03 +0200 Subject: [PATCH 245/980] TST: pytest.ini: don't pick up pythonapi-compat test in editable mode --- pytest.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytest.ini b/pytest.ini index 33bb8bd1e5b1..71542643e170 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,6 +1,6 @@ [pytest] addopts = -l -norecursedirs = doc tools numpy/linalg/lapack_lite numpy/_core/code_generators +norecursedirs = doc tools numpy/linalg/lapack_lite numpy/_core/code_generators numpy/_core/src/common/pythoncapi-compat doctest_optionflags = NORMALIZE_WHITESPACE ELLIPSIS ALLOW_UNICODE ALLOW_BYTES junit_family=xunit2 From 208b343a53765ffeccbbbfc5164fb65d291e03c8 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 23 Apr 2024 10:28:04 +0200 Subject: [PATCH 246/980] TST: skip other tests that don't work under editable install The main reason is that tests with a compile step cannot work, because headers aren't found. The distutils test could be made to work, but that isn't worth the effort since it's deprecated code. --- numpy/_core/tests/test_array_interface.py | 4 +++- numpy/_core/tests/test_cython.py | 9 ++++++++- numpy/_core/tests/test_limited_api.py | 9 ++++++++- numpy/_core/tests/test_mem_policy.py | 5 ++++- numpy/_pyinstaller/tests/__init__.py | 16 ++++++++++++++++ numpy/distutils/tests/test_misc_util.py | 8 +++++++- numpy/f2py/tests/__init__.py | 9 ++++++++- 7 files changed, 54 insertions(+), 6 deletions(-) diff --git a/numpy/_core/tests/test_array_interface.py b/numpy/_core/tests/test_array_interface.py index f8e0dfad64c5..f049eea55d8a 100644 --- a/numpy/_core/tests/test_array_interface.py +++ b/numpy/_core/tests/test_array_interface.py @@ -1,7 +1,7 @@ import sys import pytest import numpy as np -from numpy.testing import extbuild, IS_WASM +from numpy.testing import extbuild, IS_WASM, IS_EDITABLE @pytest.fixture @@ -14,6 +14,8 @@ def get_module(tmp_path): pytest.skip('link fails on cygwin') if IS_WASM: pytest.skip("Can't build module inside Wasm") + if IS_EDITABLE: + pytest.skip("Can't build module for editable install") prologue = ''' #include diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index e2bde4ded0eb..26a1fafa0066 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -7,7 +7,7 @@ import pytest import numpy as np -from numpy.testing import assert_array_equal, IS_WASM +from numpy.testing import assert_array_equal, IS_WASM, IS_EDITABLE # This import is copied from random.tests.test_extending try: @@ -27,6 +27,13 @@ pytestmark = pytest.mark.skipif(cython is None, reason="requires cython") +if IS_EDITABLE: + pytest.skip( + "Editable install doesn't support tests with a compile step", + allow_module_level=True + ) + + @pytest.fixture(scope='module') def install_temp(tmpdir_factory): # Based in part on test_cython from random.tests.test_extending diff --git a/numpy/_core/tests/test_limited_api.py b/numpy/_core/tests/test_limited_api.py index f88164b9db91..9b13208d81af 100644 --- a/numpy/_core/tests/test_limited_api.py +++ b/numpy/_core/tests/test_limited_api.py @@ -5,7 +5,7 @@ import sysconfig import pytest -from numpy.testing import IS_WASM, IS_PYPY, NOGIL_BUILD +from numpy.testing import IS_WASM, IS_PYPY, NOGIL_BUILD, IS_EDITABLE # This import is copied from random.tests.test_extending try: @@ -25,6 +25,13 @@ pytestmark = pytest.mark.skipif(cython is None, reason="requires cython") +if IS_EDITABLE: + pytest.skip( + "Editable install doesn't support tests with a compile step", + allow_module_level=True + ) + + @pytest.fixture(scope='module') def install_temp(tmpdir_factory): # Based in part on test_cython from random.tests.test_extending diff --git a/numpy/_core/tests/test_mem_policy.py b/numpy/_core/tests/test_mem_policy.py index 9cbbedeca0f5..3c7f6bb34661 100644 --- a/numpy/_core/tests/test_mem_policy.py +++ b/numpy/_core/tests/test_mem_policy.py @@ -8,7 +8,7 @@ import pytest import numpy as np -from numpy.testing import extbuild, assert_warns, IS_WASM +from numpy.testing import extbuild, assert_warns, IS_WASM, IS_EDITABLE from numpy._core.multiarray import get_handler_name @@ -28,6 +28,9 @@ def get_module(tmp_path): pytest.skip('link fails on cygwin') if IS_WASM: pytest.skip("Can't build module inside Wasm") + if IS_EDITABLE: + pytest.skip("Can't build module for editable install") + functions = [ ("get_default_policy", "METH_NOARGS", """ Py_INCREF(PyDataMem_DefaultHandler); diff --git a/numpy/_pyinstaller/tests/__init__.py b/numpy/_pyinstaller/tests/__init__.py index e69de29bb2d1..f7c033bcf503 100644 --- a/numpy/_pyinstaller/tests/__init__.py +++ b/numpy/_pyinstaller/tests/__init__.py @@ -0,0 +1,16 @@ +from numpy.testing import IS_WASM, IS_EDITABLE +import pytest + + +if IS_WASM: + pytest.skip( + "WASM/Pyodide does not use or support Fortran", + allow_module_level=True + ) + + +if IS_EDITABLE: + pytest.skip( + "Editable install doesn't support tests with a compile step", + allow_module_level=True + ) diff --git a/numpy/distutils/tests/test_misc_util.py b/numpy/distutils/tests/test_misc_util.py index 605c80483b77..40e7606eeb76 100644 --- a/numpy/distutils/tests/test_misc_util.py +++ b/numpy/distutils/tests/test_misc_util.py @@ -1,10 +1,12 @@ from os.path import join, sep, dirname +import pytest + from numpy.distutils.misc_util import ( appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info ) from numpy.testing import ( - assert_, assert_equal + assert_, assert_equal, IS_EDITABLE ) ajoin = lambda *paths: join(*((sep,)+paths)) @@ -73,6 +75,10 @@ def test_get_shared_lib_extension(self): assert_(get_shared_lib_extension(is_python_ext=True)) +@pytest.mark.skipif( + IS_EDITABLE, + reason="`get_info` .ini lookup method incompatible with editable install" +) def test_installed_npymath_ini(): # Regression test for gh-7707. If npymath.ini wasn't installed, then this # will give an error. diff --git a/numpy/f2py/tests/__init__.py b/numpy/f2py/tests/__init__.py index b07a4e724282..5ecb68077b94 100644 --- a/numpy/f2py/tests/__init__.py +++ b/numpy/f2py/tests/__init__.py @@ -1,4 +1,4 @@ -from numpy.testing import IS_WASM +from numpy.testing import IS_WASM, IS_EDITABLE import pytest if IS_WASM: @@ -6,3 +6,10 @@ "WASM/Pyodide does not use or support Fortran", allow_module_level=True ) + + +if IS_EDITABLE: + pytest.skip( + "Editable install doesn't support tests with a compile step", + allow_module_level=True + ) From c1cf2dac9831df11f462f84f7edc2ee548376128 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Tue, 2 Apr 2024 14:53:52 -0700 Subject: [PATCH 247/980] Use the new static methods API from x86-simd-sort --- .../src/npysort/x86_simd_argsort.dispatch.cpp | 55 +++++------------- .../src/npysort/x86_simd_qsort.dispatch.cpp | 58 +++++-------------- .../npysort/x86_simd_qsort_16bit.dispatch.cpp | 21 +++---- 3 files changed, 33 insertions(+), 101 deletions(-) diff --git a/numpy/_core/src/npysort/x86_simd_argsort.dispatch.cpp b/numpy/_core/src/npysort/x86_simd_argsort.dispatch.cpp index 3083d6a8bf23..04716ebfe81c 100644 --- a/numpy/_core/src/npysort/x86_simd_argsort.dispatch.cpp +++ b/numpy/_core/src/npysort/x86_simd_argsort.dispatch.cpp @@ -1,86 +1,57 @@ #include "x86_simd_qsort.hpp" #ifndef __CYGWIN__ -#if defined(NPY_HAVE_AVX512_SKX) -#include "x86-simd-sort/src/avx512-64bit-argsort.hpp" -#elif defined(NPY_HAVE_AVX2) -#include "x86-simd-sort/src/avx2-32bit-half.hpp" -#include "x86-simd-sort/src/avx2-32bit-qsort.hpp" -#include "x86-simd-sort/src/avx2-64bit-qsort.hpp" -#include "x86-simd-sort/src/xss-common-argsort.h" -#endif - -namespace { -template -void x86_argsort(T* arr, size_t* arg, npy_intp num) -{ -#if defined(NPY_HAVE_AVX512_SKX) - avx512_argsort(arr, arg, num, true); -#elif defined(NPY_HAVE_AVX2) - avx2_argsort(arr, arg, num, true); -#endif -} - -template -void x86_argselect(T* arr, size_t* arg, npy_intp kth, npy_intp num) -{ -#if defined(NPY_HAVE_AVX512_SKX) - avx512_argselect(arr, arg, kth, num, true); -#elif defined(NPY_HAVE_AVX2) - avx2_argselect(arr, arg, kth, num, true); -#endif -} -} // anonymous +#include "x86-simd-sort/src/x86simdsort-static-incl.h" namespace np { namespace qsort_simd { template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(int32_t *arr, npy_intp* arg, npy_intp num, npy_intp kth) { - x86_argselect(arr, reinterpret_cast(arg), kth, num); + x86simdsortStatic::argselect(arr, reinterpret_cast(arg), kth, num); } template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(uint32_t *arr, npy_intp* arg, npy_intp num, npy_intp kth) { - x86_argselect(arr, reinterpret_cast(arg), kth, num); + x86simdsortStatic::argselect(arr, reinterpret_cast(arg), kth, num); } template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(int64_t*arr, npy_intp* arg, npy_intp num, npy_intp kth) { - x86_argselect(arr, reinterpret_cast(arg), kth, num); + x86simdsortStatic::argselect(arr, reinterpret_cast(arg), kth, num); } template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(uint64_t*arr, npy_intp* arg, npy_intp num, npy_intp kth) { - x86_argselect(arr, reinterpret_cast(arg), kth, num); + x86simdsortStatic::argselect(arr, reinterpret_cast(arg), kth, num); } template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(float *arr, npy_intp* arg, npy_intp num, npy_intp kth) { - x86_argselect(arr, reinterpret_cast(arg), kth, num); + x86simdsortStatic::argselect(arr, reinterpret_cast(arg), kth, num, true); } template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(double *arr, npy_intp* arg, npy_intp num, npy_intp kth) { - x86_argselect(arr, reinterpret_cast(arg), kth, num); + x86simdsortStatic::argselect(arr, reinterpret_cast(arg), kth, num, true); } template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(int32_t *arr, npy_intp *arg, npy_intp size) { - x86_argsort(arr, reinterpret_cast(arg), size); + x86simdsortStatic::argsort(arr, reinterpret_cast(arg), size); } template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(uint32_t *arr, npy_intp *arg, npy_intp size) { - x86_argsort(arr, reinterpret_cast(arg), size); + x86simdsortStatic::argsort(arr, reinterpret_cast(arg), size); } template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(int64_t *arr, npy_intp *arg, npy_intp size) { - x86_argsort(arr, reinterpret_cast(arg), size); + x86simdsortStatic::argsort(arr, reinterpret_cast(arg), size); } template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(uint64_t *arr, npy_intp *arg, npy_intp size) { - x86_argsort(arr, reinterpret_cast(arg), size); + x86simdsortStatic::argsort(arr, reinterpret_cast(arg), size); } template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(float *arr, npy_intp *arg, npy_intp size) { - x86_argsort(arr, reinterpret_cast(arg), size); + x86simdsortStatic::argsort(arr, reinterpret_cast(arg), size, true); } template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(double *arr, npy_intp *arg, npy_intp size) { - x86_argsort(arr, reinterpret_cast(arg), size); + x86simdsortStatic::argsort(arr, reinterpret_cast(arg), size, true); } }} // namespace np::simd diff --git a/numpy/_core/src/npysort/x86_simd_qsort.dispatch.cpp b/numpy/_core/src/npysort/x86_simd_qsort.dispatch.cpp index ea4516408c56..856ca0153875 100644 --- a/numpy/_core/src/npysort/x86_simd_qsort.dispatch.cpp +++ b/numpy/_core/src/npysort/x86_simd_qsort.dispatch.cpp @@ -1,89 +1,57 @@ #include "x86_simd_qsort.hpp" #ifndef __CYGWIN__ -#if defined(NPY_HAVE_AVX512_SKX) - #include "x86-simd-sort/src/avx512-32bit-qsort.hpp" - #include "x86-simd-sort/src/avx512-64bit-qsort.hpp" - #include "x86-simd-sort/src/avx512-64bit-argsort.hpp" -#elif defined(NPY_HAVE_AVX2) - #include "x86-simd-sort/src/avx2-32bit-qsort.hpp" - #include "x86-simd-sort/src/avx2-64bit-qsort.hpp" -#endif - -namespace { -template -void x86_qsort(T* arr, npy_intp num) -{ -#if defined(NPY_HAVE_AVX512_SKX) - avx512_qsort(arr, num, true); -#elif defined(NPY_HAVE_AVX2) - avx2_qsort(arr, num, true); -#endif -} - -template -void x86_qselect(T* arr, npy_intp num, npy_intp kth) -{ -#if defined(NPY_HAVE_AVX512_SKX) - avx512_qselect(arr, kth, num, true); -#elif defined(NPY_HAVE_AVX2) - avx2_qselect(arr, kth, num, true); -#endif -} -} // anonymous +#include "x86-simd-sort/src/x86simdsort-static-incl.h" namespace np { namespace qsort_simd { -#if defined(NPY_HAVE_AVX512_SKX) || defined(NPY_HAVE_AVX2) template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(int32_t *arr, npy_intp num, npy_intp kth) { - x86_qselect(arr, num, kth); + x86simdsortStatic::qselect(arr, kth, num); } template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(uint32_t *arr, npy_intp num, npy_intp kth) { - x86_qselect(arr, num, kth); + x86simdsortStatic::qselect(arr, kth, num); } template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(int64_t*arr, npy_intp num, npy_intp kth) { - x86_qselect(arr, num, kth); + x86simdsortStatic::qselect(arr, kth, num); } template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(uint64_t*arr, npy_intp num, npy_intp kth) { - x86_qselect(arr, num, kth); + x86simdsortStatic::qselect(arr, kth, num); } template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(float *arr, npy_intp num, npy_intp kth) { - x86_qselect(arr, num, kth); + x86simdsortStatic::qselect(arr, kth, num, true); } template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(double *arr, npy_intp num, npy_intp kth) { - x86_qselect(arr, num, kth); + x86simdsortStatic::qselect(arr, kth, num, true); } template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int32_t *arr, npy_intp num) { - x86_qsort(arr, num); + x86simdsortStatic::qsort(arr, num); } template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint32_t *arr, npy_intp num) { - x86_qsort(arr, num); + x86simdsortStatic::qsort(arr, num); } template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int64_t *arr, npy_intp num) { - x86_qsort(arr, num); + x86simdsortStatic::qsort(arr, num); } template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint64_t *arr, npy_intp num) { - x86_qsort(arr, num); + x86simdsortStatic::qsort(arr, num); } template<> void NPY_CPU_DISPATCH_CURFX(QSort)(float *arr, npy_intp num) { - x86_qsort(arr, num); + x86simdsortStatic::qsort(arr, num, true); } template<> void NPY_CPU_DISPATCH_CURFX(QSort)(double *arr, npy_intp num) { - x86_qsort(arr, num); + x86simdsortStatic::qsort(arr, num, true); } -#endif // NPY_HAVE_AVX512_SKX || NPY_HAVE_AVX2 - }} // namespace np::qsort_simd #endif // __CYGWIN__ diff --git a/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp b/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp index 8222fc77cae3..1968890cae99 100644 --- a/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp +++ b/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp @@ -1,23 +1,17 @@ #include "x86_simd_qsort.hpp" #ifndef __CYGWIN__ -#if defined(NPY_HAVE_AVX512_SPR) - #include "x86-simd-sort/src/avx512fp16-16bit-qsort.hpp" - #include "x86-simd-sort/src/avx512-16bit-qsort.hpp" -#elif defined(NPY_HAVE_AVX512_ICL) - #include "x86-simd-sort/src/avx512-16bit-qsort.hpp" -#endif +#include "x86-simd-sort/src/x86simdsort-static-incl.h" namespace np { namespace qsort_simd { /* * QSelect dispatch functions: */ -#if defined(NPY_HAVE_AVX512_ICL) || defined(NPY_HAVE_AVX512_SPR) template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(Half *arr, npy_intp num, npy_intp kth) { #if defined(NPY_HAVE_AVX512_SPR) - avx512_qselect(reinterpret_cast<_Float16*>(arr), kth, num, true); + x86simdsortStatic::qselect(reinterpret_cast<_Float16*>(arr), kth, num, true); #else avx512_qselect_fp16(reinterpret_cast(arr), kth, num, true); #endif @@ -25,12 +19,12 @@ template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(Half *arr, npy_intp num, npy_int template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(uint16_t *arr, npy_intp num, npy_intp kth) { - avx512_qselect(arr, kth, num); + x86simdsortStatic::qselect(arr, kth, num); } template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(int16_t *arr, npy_intp num, npy_intp kth) { - avx512_qselect(arr, kth, num); + x86simdsortStatic::qselect(arr, kth, num); } /* @@ -39,20 +33,19 @@ template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(int16_t *arr, npy_intp num, npy_ template<> void NPY_CPU_DISPATCH_CURFX(QSort)(Half *arr, npy_intp size) { #if defined(NPY_HAVE_AVX512_SPR) - avx512_qsort(reinterpret_cast<_Float16*>(arr), size, true); + x86simdsortStatic::qsort(reinterpret_cast<_Float16*>(arr), size, true); #else avx512_qsort_fp16(reinterpret_cast(arr), size, true); #endif } template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint16_t *arr, npy_intp size) { - avx512_qsort(arr, size); + x86simdsortStatic::qsort(arr, size); } template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int16_t *arr, npy_intp size) { - avx512_qsort(arr, size); + x86simdsortStatic::qsort(arr, size); } -#endif // NPY_HAVE_AVX512_ICL || SPR }} // namespace np::qsort_simd From 1f2dab335ce4b4691495ae03121b9c05bdc1396f Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Tue, 2 Apr 2024 15:06:44 -0700 Subject: [PATCH 248/980] MAINT: use macro to reduce duplicate code --- .../src/npysort/highway_qsort.dispatch.cpp | 35 ++++------ .../src/npysort/x86_simd_argsort.dispatch.cpp | 64 +++++-------------- .../src/npysort/x86_simd_qsort.dispatch.cpp | 64 +++++-------------- 3 files changed, 44 insertions(+), 119 deletions(-) diff --git a/numpy/_core/src/npysort/highway_qsort.dispatch.cpp b/numpy/_core/src/npysort/highway_qsort.dispatch.cpp index 1c06eb5755c7..1e2a6f59c5cc 100644 --- a/numpy/_core/src/npysort/highway_qsort.dispatch.cpp +++ b/numpy/_core/src/npysort/highway_qsort.dispatch.cpp @@ -2,31 +2,20 @@ #define VQSORT_ONLY_STATIC 1 #include "hwy/contrib/sort/vqsort-inl.h" +#define DISPATCH_VQSORT(TYPE) \ +template<> void NPY_CPU_DISPATCH_CURFX(QSort)(TYPE *arr, intptr_t size) \ +{ \ + hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); \ +} \ + namespace np { namespace highway { namespace qsort_simd { -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int32_t *arr, intptr_t size) -{ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint32_t *arr, intptr_t size) -{ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int64_t *arr, intptr_t size) -{ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint64_t *arr, intptr_t size) -{ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(float *arr, intptr_t size) -{ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(double *arr, intptr_t size) -{ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); + DISPATCH_VQSORT(int32_t) + DISPATCH_VQSORT(uint32_t) + DISPATCH_VQSORT(int64_t) + DISPATCH_VQSORT(uint64_t) + DISPATCH_VQSORT(double) + DISPATCH_VQSORT(float) } } } } // np::highway::qsort_simd diff --git a/numpy/_core/src/npysort/x86_simd_argsort.dispatch.cpp b/numpy/_core/src/npysort/x86_simd_argsort.dispatch.cpp index 04716ebfe81c..04bb03532719 100644 --- a/numpy/_core/src/npysort/x86_simd_argsort.dispatch.cpp +++ b/numpy/_core/src/npysort/x86_simd_argsort.dispatch.cpp @@ -3,56 +3,24 @@ #include "x86-simd-sort/src/x86simdsort-static-incl.h" +#define DISPATCH_ARG_METHODS(TYPE) \ +template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(TYPE* arr, npy_intp* arg, npy_intp num, npy_intp kth) \ +{ \ + x86simdsortStatic::argselect(arr, reinterpret_cast(arg), kth, num, true); \ +} \ +template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(TYPE* arr, npy_intp *arg, npy_intp size) \ +{ \ + x86simdsortStatic::argsort(arr, reinterpret_cast(arg), size, true); \ +} \ + namespace np { namespace qsort_simd { -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(int32_t *arr, npy_intp* arg, npy_intp num, npy_intp kth) -{ - x86simdsortStatic::argselect(arr, reinterpret_cast(arg), kth, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(uint32_t *arr, npy_intp* arg, npy_intp num, npy_intp kth) -{ - x86simdsortStatic::argselect(arr, reinterpret_cast(arg), kth, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(int64_t*arr, npy_intp* arg, npy_intp num, npy_intp kth) -{ - x86simdsortStatic::argselect(arr, reinterpret_cast(arg), kth, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(uint64_t*arr, npy_intp* arg, npy_intp num, npy_intp kth) -{ - x86simdsortStatic::argselect(arr, reinterpret_cast(arg), kth, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(float *arr, npy_intp* arg, npy_intp num, npy_intp kth) -{ - x86simdsortStatic::argselect(arr, reinterpret_cast(arg), kth, num, true); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(double *arr, npy_intp* arg, npy_intp num, npy_intp kth) -{ - x86simdsortStatic::argselect(arr, reinterpret_cast(arg), kth, num, true); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(int32_t *arr, npy_intp *arg, npy_intp size) -{ - x86simdsortStatic::argsort(arr, reinterpret_cast(arg), size); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(uint32_t *arr, npy_intp *arg, npy_intp size) -{ - x86simdsortStatic::argsort(arr, reinterpret_cast(arg), size); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(int64_t *arr, npy_intp *arg, npy_intp size) -{ - x86simdsortStatic::argsort(arr, reinterpret_cast(arg), size); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(uint64_t *arr, npy_intp *arg, npy_intp size) -{ - x86simdsortStatic::argsort(arr, reinterpret_cast(arg), size); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(float *arr, npy_intp *arg, npy_intp size) -{ - x86simdsortStatic::argsort(arr, reinterpret_cast(arg), size, true); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(double *arr, npy_intp *arg, npy_intp size) -{ - x86simdsortStatic::argsort(arr, reinterpret_cast(arg), size, true); -} + DISPATCH_ARG_METHODS(uint32_t) + DISPATCH_ARG_METHODS(int32_t) + DISPATCH_ARG_METHODS(float) + DISPATCH_ARG_METHODS(uint64_t) + DISPATCH_ARG_METHODS(int64_t) + DISPATCH_ARG_METHODS(double) }} // namespace np::simd diff --git a/numpy/_core/src/npysort/x86_simd_qsort.dispatch.cpp b/numpy/_core/src/npysort/x86_simd_qsort.dispatch.cpp index 856ca0153875..c4505f058857 100644 --- a/numpy/_core/src/npysort/x86_simd_qsort.dispatch.cpp +++ b/numpy/_core/src/npysort/x86_simd_qsort.dispatch.cpp @@ -3,55 +3,23 @@ #include "x86-simd-sort/src/x86simdsort-static-incl.h" +#define DISPATCH_SORT_METHODS(TYPE) \ +template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(TYPE *arr, npy_intp num, npy_intp kth) \ +{ \ + x86simdsortStatic::qselect(arr, kth, num, true); \ +} \ +template<> void NPY_CPU_DISPATCH_CURFX(QSort)(TYPE *arr, npy_intp num) \ +{ \ + x86simdsortStatic::qsort(arr, num, true); \ +} \ + namespace np { namespace qsort_simd { -template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(int32_t *arr, npy_intp num, npy_intp kth) -{ - x86simdsortStatic::qselect(arr, kth, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(uint32_t *arr, npy_intp num, npy_intp kth) -{ - x86simdsortStatic::qselect(arr, kth, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(int64_t*arr, npy_intp num, npy_intp kth) -{ - x86simdsortStatic::qselect(arr, kth, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(uint64_t*arr, npy_intp num, npy_intp kth) -{ - x86simdsortStatic::qselect(arr, kth, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(float *arr, npy_intp num, npy_intp kth) -{ - x86simdsortStatic::qselect(arr, kth, num, true); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(double *arr, npy_intp num, npy_intp kth) -{ - x86simdsortStatic::qselect(arr, kth, num, true); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int32_t *arr, npy_intp num) -{ - x86simdsortStatic::qsort(arr, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint32_t *arr, npy_intp num) -{ - x86simdsortStatic::qsort(arr, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int64_t *arr, npy_intp num) -{ - x86simdsortStatic::qsort(arr, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint64_t *arr, npy_intp num) -{ - x86simdsortStatic::qsort(arr, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(float *arr, npy_intp num) -{ - x86simdsortStatic::qsort(arr, num, true); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(double *arr, npy_intp num) -{ - x86simdsortStatic::qsort(arr, num, true); -} + DISPATCH_SORT_METHODS(uint32_t) + DISPATCH_SORT_METHODS(int32_t) + DISPATCH_SORT_METHODS(float) + DISPATCH_SORT_METHODS(uint64_t) + DISPATCH_SORT_METHODS(int64_t) + DISPATCH_SORT_METHODS(double) }} // namespace np::qsort_simd #endif // __CYGWIN__ From f5285e165fdf6d2bb5a57fb04aed89e2c369f53c Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Tue, 23 Apr 2024 13:01:49 -0700 Subject: [PATCH 249/980] Update x86-simd-sort submodule --- numpy/_core/src/npysort/x86-simd-sort | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/npysort/x86-simd-sort b/numpy/_core/src/npysort/x86-simd-sort index 868696d22ad8..aad3db19def3 160000 --- a/numpy/_core/src/npysort/x86-simd-sort +++ b/numpy/_core/src/npysort/x86-simd-sort @@ -1 +1 @@ -Subproject commit 868696d22ad84c5cd46bf9c2a4dac65e60a9213a +Subproject commit aad3db19def3273843d4390808d63c2b6ebd1dbf From 20c707a0ae82b97f855e388688e690863e78ddb8 Mon Sep 17 00:00:00 2001 From: bmwoodruff Date: Wed, 24 Apr 2024 01:14:31 -0600 Subject: [PATCH 250/980] DOC: Added small clarification note, based on discussion in issue #25778 This is my first PR. I am not sure what CI tags are needed for just documentation updates. I'll apply [skip ci] for now. This Closes #25778. --- doc/source/reference/random/parallel.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/source/reference/random/parallel.rst b/doc/source/reference/random/parallel.rst index af2aac82f480..dbc10694720b 100644 --- a/doc/source/reference/random/parallel.rst +++ b/doc/source/reference/random/parallel.rst @@ -97,6 +97,10 @@ streams, about :math:`2^{20}`, then the probability that at least one pair of them are identical is about :math:`2^{-88}`, which is in solidly-ignorable territory ([2]_). +Note that while `~SeedSequence` attempts to solve many of the issues related to +user-provided small seeds, we still recommend using ``secrets.randbits(128)`` seeds +for community practice reasons. + .. [1] The algorithm is carefully designed to eliminate a number of possible ways to collide. For example, if one only does one level of spawning, it is guaranteed that all states will be unique. But it's easier to From 403a9bf633100d378a4c6ff67006cb579556879e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Tue, 16 Apr 2024 15:59:37 +0200 Subject: [PATCH 251/980] API: Add shape argument to numpy.reshape --- .../upcoming_changes/26292.new_feature.rst | 1 + numpy/__init__.pyi | 12 ++- numpy/_core/_add_newdocs.py | 2 +- numpy/_core/fromnumeric.py | 36 +++++--- numpy/_core/fromnumeric.pyi | 2 + numpy/_core/src/multiarray/methods.c | 10 ++- numpy/_core/src/multiarray/shape.c | 87 ++++++++++++------- numpy/_core/src/multiarray/shape.h | 6 ++ numpy/_core/tests/test_numeric.py | 50 +++++++++++ tools/ci/array-api-skips.txt | 2 +- 10 files changed, 157 insertions(+), 51 deletions(-) create mode 100644 doc/release/upcoming_changes/26292.new_feature.rst diff --git a/doc/release/upcoming_changes/26292.new_feature.rst b/doc/release/upcoming_changes/26292.new_feature.rst new file mode 100644 index 000000000000..fc2c33571d77 --- /dev/null +++ b/doc/release/upcoming_changes/26292.new_feature.rst @@ -0,0 +1 @@ +* `numpy.reshape` and `numpy.ndarray.reshape` now support ``shape`` and ``copy`` arguments. diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fb29a758dce5..1a52e1b85d10 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1713,11 +1713,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def reshape( - self, shape: _ShapeLike, /, *, order: _OrderACF = ... + self, + shape: _ShapeLike, + /, + *, + order: _OrderACF = ..., + copy: None | bool = ..., ) -> ndarray[Any, _DType_co]: ... @overload def reshape( - self, *shape: SupportsIndex, order: _OrderACF = ... + self, + *shape: SupportsIndex, + order: _OrderACF = ..., + copy: None | bool = ..., ) -> ndarray[Any, _DType_co]: ... @overload diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index bdd193400b05..e967a298fa84 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -3824,7 +3824,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('reshape', """ - a.reshape(shape, /, *, order='C') + a.reshape(shape, /, *, order='C', copy=None) Returns an array containing the same data with a new shape. diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 54f9bf5877d6..141d83e8ee48 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -206,13 +206,13 @@ def take(a, indices, axis=None, out=None, mode='raise'): return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) -def _reshape_dispatcher(a, newshape, order=None): +def _reshape_dispatcher(a, /, newshape=None, shape=None, *, + order=None, copy=None): return (a,) -# not deprecated --- copy if necessary, view otherwise @array_function_dispatch(_reshape_dispatcher) -def reshape(a, newshape, order='C'): +def reshape(a, /, newshape=None, shape=None, *, order='C', copy=None): """ Gives a new shape to an array without changing its data. @@ -221,13 +221,16 @@ def reshape(a, newshape, order='C'): a : array_like Array to be reshaped. newshape : int or tuple of ints + Replaced by ``shape`` argument. Retained for backward + compatibility. + shape : int or tuple of ints The new shape should be compatible with the original shape. If an integer, then the result will be a 1-D array of that length. One shape dimension can be -1. In this case, the value is inferred from the length of the array and remaining dimensions. order : {'C', 'F', 'A'}, optional - Read the elements of `a` using this index order, and place the - elements into the reshaped array using this index order. 'C' + Read the elements of ``a`` using this index order, and place the + elements into the reshaped array using this index order. 'C' means to read / write the elements using C-like index order, with the last axis index changing fastest, back to the first axis index changing slowest. 'F' means to read / write the @@ -236,8 +239,12 @@ def reshape(a, newshape, order='C'): the 'C' and 'F' options take no account of the memory layout of the underlying array, and only refer to the order of indexing. 'A' means to read / write the elements in Fortran-like index - order if `a` is Fortran *contiguous* in memory, C-like order + order if ``a`` is Fortran *contiguous* in memory, C-like order otherwise. + copy : bool, optional + If ``True``, then the array data is copied. If ``None``, a copy will + only be made if it's required by ``order``. For ``False`` it raises + a ``ValueError`` if a copy cannot be avoided. Default: ``None``. Returns ------- @@ -255,9 +262,9 @@ def reshape(a, newshape, order='C'): It is not always possible to change the shape of an array without copying the data. - The `order` keyword gives the index ordering both for *fetching* the values - from `a`, and then *placing* the values into the output array. - For example, let's say you have an array: + The ``order`` keyword gives the index ordering both for *fetching* + the values from ``a``, and then *placing* the values into the output + array. For example, let's say you have an array: >>> a = np.arange(6).reshape((3, 2)) >>> a @@ -296,7 +303,16 @@ def reshape(a, newshape, order='C'): [3, 4], [5, 6]]) """ - return _wrapfunc(a, 'reshape', newshape, order=order) + if newshape is None and shape is None: + raise TypeError( + "reshape() missing 1 required positional argument: 'shape'") + if newshape is not None and shape is not None: + raise ValueError( + "You cannot specify 'newshape' and 'shape' arguments " + "at the same time.") + if shape is not None: + newshape = shape + return _wrapfunc(a, 'reshape', newshape, order=order, copy=copy) def _choose_dispatcher(a, choices, out=None, mode=None): diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 440c0a046890..cde666f6f37d 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -94,12 +94,14 @@ def reshape( a: _ArrayLike[_SCT], newshape: _ShapeLike, order: _OrderACF = ..., + copy: None | bool = ..., ) -> NDArray[_SCT]: ... @overload def reshape( a: ArrayLike, newshape: _ShapeLike, order: _OrderACF = ..., + copy: None | bool = ..., ) -> NDArray[Any]: ... @overload diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index adc1e53e24ab..da9bd30c8b10 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -181,14 +181,16 @@ array_put(PyArrayObject *self, PyObject *args, PyObject *kwds) static PyObject * array_reshape(PyArrayObject *self, PyObject *args, PyObject *kwds) { - static char *keywords[] = {"order", NULL}; + static char *keywords[] = {"order", "copy", NULL}; PyArray_Dims newshape; PyObject *ret; NPY_ORDER order = NPY_CORDER; + NPY_COPYMODE copy = NPY_COPY_IF_NEEDED; Py_ssize_t n = PyTuple_Size(args); - if (!NpyArg_ParseKeywords(kwds, "|O&", keywords, - PyArray_OrderConverter, &order)) { + if (!NpyArg_ParseKeywords(kwds, "|$O&O&", keywords, + PyArray_OrderConverter, &order, + PyArray_CopyConverter, ©)) { return NULL; } @@ -210,7 +212,7 @@ array_reshape(PyArrayObject *self, PyObject *args, PyObject *kwds) goto fail; } } - ret = PyArray_Newshape(self, &newshape, order); + ret = _reshape_with_copy_arg(self, &newshape, order, copy); npy_free_cache_dim_obj(newshape); return ret; diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index 6ca8ff85e218..044c3a43b3bb 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -201,6 +201,14 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, NPY_NO_EXPORT PyObject * PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims, NPY_ORDER order) +{ + return _reshape_with_copy_arg(self, newdims, order, NPY_COPY_IF_NEEDED); +} + + +NPY_NO_EXPORT PyObject * +_reshape_with_copy_arg(PyArrayObject *array, PyArray_Dims *newdims, + NPY_ORDER order, NPY_COPYMODE copy) { npy_intp i; npy_intp *dimensions = newdims->ptr; @@ -212,7 +220,7 @@ PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims, int flags; if (order == NPY_ANYORDER) { - order = PyArray_ISFORTRAN(self) ? NPY_FORTRANORDER : NPY_CORDER; + order = PyArray_ISFORTRAN(array) ? NPY_FORTRANORDER : NPY_CORDER; } else if (order == NPY_KEEPORDER) { PyErr_SetString(PyExc_ValueError, @@ -220,56 +228,70 @@ PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims, return NULL; } /* Quick check to make sure anything actually needs to be done */ - if (ndim == PyArray_NDIM(self)) { + if (ndim == PyArray_NDIM(array) && copy != NPY_COPY_ALWAYS) { same = NPY_TRUE; i = 0; while (same && i < ndim) { - if (PyArray_DIM(self,i) != dimensions[i]) { + if (PyArray_DIM(array, i) != dimensions[i]) { same=NPY_FALSE; } i++; } if (same) { - return PyArray_View(self, NULL, NULL); + return PyArray_View(array, NULL, NULL); } } /* * fix any -1 dimensions and check new-dimensions against old size */ - if (_fix_unknown_dimension(newdims, self) < 0) { + if (_fix_unknown_dimension(newdims, array) < 0) { return NULL; } - /* - * sometimes we have to create a new copy of the array - * in order to get the right orientation and - * because we can't just reuse the buffer with the - * data in the order it is in. - */ - Py_INCREF(self); - if (((order == NPY_CORDER && !PyArray_IS_C_CONTIGUOUS(self)) || - (order == NPY_FORTRANORDER && !PyArray_IS_F_CONTIGUOUS(self)))) { - int success = 0; - success = _attempt_nocopy_reshape(self, ndim, dimensions, - newstrides, order); - if (success) { - /* no need to copy the array after all */ - strides = newstrides; + if (copy == NPY_COPY_ALWAYS) { + PyObject *newcopy = PyArray_NewCopy(array, order); + if (newcopy == NULL) { + return NULL; } - else { - PyObject *newcopy; - newcopy = PyArray_NewCopy(self, order); - Py_DECREF(self); - if (newcopy == NULL) { + array = (PyArrayObject *)newcopy; + } + else { + /* + * sometimes we have to create a new copy of the array + * in order to get the right orientation and + * because we can't just reuse the buffer with the + * data in the order it is in. + */ + Py_INCREF(array); + if (((order == NPY_CORDER && !PyArray_IS_C_CONTIGUOUS(array)) || + (order == NPY_FORTRANORDER && !PyArray_IS_F_CONTIGUOUS(array)))) { + int success = 0; + success = _attempt_nocopy_reshape(array, ndim, dimensions, + newstrides, order); + if (success) { + /* no need to copy the array after all */ + strides = newstrides; + } + else if (copy == NPY_COPY_NEVER) { + PyErr_SetString(PyExc_ValueError, + "Unable to avoid creating a copy while reshaping."); + Py_DECREF(array); return NULL; } - self = (PyArrayObject *)newcopy; + else { + PyObject *newcopy = PyArray_NewCopy(array, order); + Py_DECREF(array); + if (newcopy == NULL) { + return NULL; + } + array = (PyArrayObject *)newcopy; + } } } /* We always have to interpret the contiguous buffer correctly */ /* Make sure the flags argument is set. */ - flags = PyArray_FLAGS(self); + flags = PyArray_FLAGS(array); if (ndim > 1) { if (order == NPY_FORTRANORDER) { flags &= ~NPY_ARRAY_C_CONTIGUOUS; @@ -281,18 +303,17 @@ PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims, } } - Py_INCREF(PyArray_DESCR(self)); + Py_INCREF(PyArray_DESCR(array)); ret = (PyArrayObject *)PyArray_NewFromDescr_int( - Py_TYPE(self), PyArray_DESCR(self), - ndim, dimensions, strides, PyArray_DATA(self), - flags, (PyObject *)self, (PyObject *)self, + Py_TYPE(array), PyArray_DESCR(array), + ndim, dimensions, strides, PyArray_DATA(array), + flags, (PyObject *)array, (PyObject *)array, _NPY_ARRAY_ENSURE_DTYPE_IDENTITY); - Py_DECREF(self); + Py_DECREF(array); return (PyObject *)ret; } - /* For backward compatibility -- Not recommended */ /*NUMPY_API diff --git a/numpy/_core/src/multiarray/shape.h b/numpy/_core/src/multiarray/shape.h index d1c1ce723459..a9b91feb0b4a 100644 --- a/numpy/_core/src/multiarray/shape.h +++ b/numpy/_core/src/multiarray/shape.h @@ -1,6 +1,8 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_SHAPE_H_ #define NUMPY_CORE_SRC_MULTIARRAY_SHAPE_H_ +#include "conversion_utils.h" + /* * Creates a sorted stride perm matching the KEEPORDER behavior * of the NpyIter object. Because this operates based on multiple @@ -27,4 +29,8 @@ PyArray_SqueezeSelected(PyArrayObject *self, npy_bool *axis_flags); NPY_NO_EXPORT PyObject * PyArray_MatrixTranspose(PyArrayObject *ap); +NPY_NO_EXPORT PyObject * +_reshape_with_copy_arg(PyArrayObject *array, PyArray_Dims *newdims, + NPY_ORDER order, NPY_COPYMODE copy); + #endif /* NUMPY_CORE_SRC_MULTIARRAY_SHAPE_H_ */ diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 6daa6bea5b9d..b0fcd6473a6e 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -165,6 +165,56 @@ def test_reshape(self): tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] assert_equal(np.reshape(arr, (2, 6)), tgt) + def test_reshape_shape_arg(self): + arr = np.arange(12) + shape = (3, 4) + expected = arr.reshape(shape) + + with pytest.raises( + ValueError, + match="You cannot specify 'newshape' and 'shape' " + "arguments at the same time." + ): + np.reshape(arr, newshape=shape, shape=shape) + with pytest.raises( + TypeError, + match=r"reshape\(\) missing 1 required positional " + "argument: 'shape'" + ): + np.reshape(arr) + + assert_equal(np.reshape(arr, shape), expected) + assert_equal(np.reshape(arr, shape, order="C"), expected) + assert_equal(np.reshape(arr, newshape=shape), expected) + assert_equal(np.reshape(arr, shape=shape), expected) + assert_equal(np.reshape(arr, shape=shape, order="C"), expected) + + def test_reshape_copy_arg(self): + arr = np.arange(24).reshape(2, 3, 4) + arr_f_ord = np.array(arr, order="F") + shape = (12, 2) + + assert np.shares_memory(np.reshape(arr, shape), arr) + assert np.shares_memory(np.reshape(arr, shape, order="C"), arr) + assert np.shares_memory( + np.reshape(arr_f_ord, shape, order="F"), arr_f_ord) + assert np.shares_memory(np.reshape(arr, shape, copy=None), arr) + assert np.shares_memory(np.reshape(arr, shape, copy=False), arr) + assert np.shares_memory(arr.reshape(shape, copy=False), arr) + assert not np.shares_memory(np.reshape(arr, shape, copy=True), arr) + assert not np.shares_memory( + np.reshape(arr, shape, order="C", copy=True), arr) + assert not np.shares_memory( + np.reshape(arr, shape, order="F", copy=True), arr) + assert not np.shares_memory( + np.reshape(arr, shape, order="F", copy=None), arr) + + err_msg = "Unable to avoid creating a copy while reshaping." + with pytest.raises(ValueError, match=err_msg): + np.reshape(arr, shape, order="F", copy=False) + with pytest.raises(ValueError, match=err_msg): + np.reshape(arr_f_ord, shape, order="C", copy=False) + def test_round(self): arr = [1.56, 72.54, 6.35, 3.25] tgt = [1.6, 72.5, 6.4, 3.2] diff --git a/tools/ci/array-api-skips.txt b/tools/ci/array-api-skips.txt index 002f862241c0..b57462275051 100644 --- a/tools/ci/array-api-skips.txt +++ b/tools/ci/array-api-skips.txt @@ -7,7 +7,7 @@ array_api_tests/test_operators_and_elementwise_functions.py::test_ceil array_api_tests/test_operators_and_elementwise_functions.py::test_floor array_api_tests/test_operators_and_elementwise_functions.py::test_trunc -# 'newshape' should be named 'shape' +# 'shape' arg is present. 'newshape' is retained for backward compat. array_api_tests/test_signatures.py::test_func_signature[reshape] # missing 'descending' keyword arguments From fed445b9e6156d7d8aca35bd410b7c4a813624f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Thu, 18 Apr 2024 11:36:51 +0200 Subject: [PATCH 252/980] Apply review comments --- doc/source/user/absolute_beginners.rst | 2 +- numpy/_core/fromnumeric.py | 26 +++++++++++++++++--------- numpy/_core/tests/test_numeric.py | 6 ++++-- 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index f294f384f7c1..fb5fcd9240df 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -425,7 +425,7 @@ this array to an array with three rows and two columns:: With ``np.reshape``, you can specify a few optional parameters:: - >>> np.reshape(a, newshape=(1, 6), order='C') + >>> np.reshape(a, shape=(1, 6), order='C') array([[0, 1, 2, 3, 4, 5]]) ``a`` is the array to be reshaped. diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 141d83e8ee48..7cb54dfad9c8 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -206,13 +206,13 @@ def take(a, indices, axis=None, out=None, mode='raise'): return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) -def _reshape_dispatcher(a, /, newshape=None, shape=None, *, - order=None, copy=None): +def _reshape_dispatcher(a, /, shape=None, *, newshape=None, order=None, + copy=None): return (a,) @array_function_dispatch(_reshape_dispatcher) -def reshape(a, /, newshape=None, shape=None, *, order='C', copy=None): +def reshape(a, /, shape=None, *, newshape=None, order='C', copy=None): """ Gives a new shape to an array without changing its data. @@ -220,14 +220,14 @@ def reshape(a, /, newshape=None, shape=None, *, order='C', copy=None): ---------- a : array_like Array to be reshaped. - newshape : int or tuple of ints - Replaced by ``shape`` argument. Retained for backward - compatibility. shape : int or tuple of ints The new shape should be compatible with the original shape. If an integer, then the result will be a 1-D array of that length. One shape dimension can be -1. In this case, the value is inferred from the length of the array and remaining dimensions. + newshape : int or tuple of ints + Replaced by ``shape`` argument. Retained for backward + compatibility. order : {'C', 'F', 'A'}, optional Read the elements of ``a`` using this index order, and place the elements into the reshaped array using this index order. 'C' @@ -310,9 +310,17 @@ def reshape(a, /, newshape=None, shape=None, *, order='C', copy=None): raise ValueError( "You cannot specify 'newshape' and 'shape' arguments " "at the same time.") - if shape is not None: - newshape = shape - return _wrapfunc(a, 'reshape', newshape, order=order, copy=copy) + if newshape is not None: + # Deprecated in NumPy 2.1, 2024-04-18 + warnings.warn( + "`newshape` keyword argument is deprecated, " + "use `shape=...` or pass shape positionally instead. " + "(deprecated in NumPy 2.1)", + DeprecationWarning, + stacklevel=2, + ) + shape = newshape + return _wrapfunc(a, 'reshape', shape, order=order, copy=copy) def _choose_dispatcher(a, choices, out=None, mode=None): diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index b0fcd6473a6e..d40451b00d30 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -175,7 +175,7 @@ def test_reshape_shape_arg(self): match="You cannot specify 'newshape' and 'shape' " "arguments at the same time." ): - np.reshape(arr, newshape=shape, shape=shape) + np.reshape(arr, shape=shape, newshape=shape) with pytest.raises( TypeError, match=r"reshape\(\) missing 1 required positional " @@ -185,9 +185,11 @@ def test_reshape_shape_arg(self): assert_equal(np.reshape(arr, shape), expected) assert_equal(np.reshape(arr, shape, order="C"), expected) - assert_equal(np.reshape(arr, newshape=shape), expected) assert_equal(np.reshape(arr, shape=shape), expected) assert_equal(np.reshape(arr, shape=shape, order="C"), expected) + with pytest.warns(DeprecationWarning): + actual = np.reshape(arr, newshape=shape) + assert_equal(actual, expected) def test_reshape_copy_arg(self): arr = np.arange(24).reshape(2, 3, 4) From c59773b00e0149f25ad2cf83a81c64122265d6a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Thu, 18 Apr 2024 12:43:29 +0200 Subject: [PATCH 253/980] Apply review comments --- numpy/_core/fromnumeric.py | 5 ++++- numpy/_core/src/multiarray/shape.c | 4 ++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 7cb54dfad9c8..d033e7fd4f41 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -320,7 +320,10 @@ def reshape(a, /, shape=None, *, newshape=None, order='C', copy=None): stacklevel=2, ) shape = newshape - return _wrapfunc(a, 'reshape', shape, order=order, copy=copy) + kwargs = {"order": order} + if copy is not None: + kwargs["copy"] = copy + return _wrapfunc(a, 'reshape', shape, **kwargs) def _choose_dispatcher(a, choices, out=None, mode=None): diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index 044c3a43b3bb..e766a61ed12f 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -248,6 +248,10 @@ _reshape_with_copy_arg(PyArrayObject *array, PyArray_Dims *newdims, if (_fix_unknown_dimension(newdims, array) < 0) { return NULL; } + /* + * Memory order doesn't depend on a copy/no-copy context. + * 'order' argument is always honored. + */ if (copy == NPY_COPY_ALWAYS) { PyObject *newcopy = PyArray_NewCopy(array, order); if (newcopy == NULL) { From cd6c03615faafd04bac65906053291f7e707a37b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Wed, 24 Apr 2024 11:42:54 +0200 Subject: [PATCH 254/980] Fix deprecation message --- numpy/_core/fromnumeric.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index d033e7fd4f41..3394536e19c6 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -226,8 +226,9 @@ def reshape(a, /, shape=None, *, newshape=None, order='C', copy=None): One shape dimension can be -1. In this case, the value is inferred from the length of the array and remaining dimensions. newshape : int or tuple of ints - Replaced by ``shape`` argument. Retained for backward - compatibility. + .. deprecated:: 2.1 + Replaced by ``shape`` argument. Retained for backward + compatibility. order : {'C', 'F', 'A'}, optional Read the elements of ``a`` using this index order, and place the elements into the reshaped array using this index order. 'C' From 8bd8d9be887f1df2d7c81714a65a5eff1459f114 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Wed, 24 Apr 2024 13:43:14 +0200 Subject: [PATCH 255/980] Apply review comments --- numpy/_core/fromnumeric.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 3394536e19c6..e8ac19e50637 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -307,11 +307,11 @@ def reshape(a, /, shape=None, *, newshape=None, order='C', copy=None): if newshape is None and shape is None: raise TypeError( "reshape() missing 1 required positional argument: 'shape'") - if newshape is not None and shape is not None: - raise ValueError( - "You cannot specify 'newshape' and 'shape' arguments " - "at the same time.") if newshape is not None: + if shape is not None: + raise TypeError( + "You cannot specify 'newshape' and 'shape' arguments " + "at the same time.") # Deprecated in NumPy 2.1, 2024-04-18 warnings.warn( "`newshape` keyword argument is deprecated, " @@ -321,10 +321,9 @@ def reshape(a, /, shape=None, *, newshape=None, order='C', copy=None): stacklevel=2, ) shape = newshape - kwargs = {"order": order} if copy is not None: - kwargs["copy"] = copy - return _wrapfunc(a, 'reshape', shape, **kwargs) + return _wrapfunc(a, 'reshape', shape, order=order, copy=copy) + return _wrapfunc(a, 'reshape', shape, order=order) def _choose_dispatcher(a, choices, out=None, mode=None): From 4a264d16640505401287e82e7b6a945414e132e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Wed, 24 Apr 2024 13:45:35 +0200 Subject: [PATCH 256/980] Fix test --- numpy/_core/tests/test_numeric.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index d40451b00d30..db5e81e7cc9a 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -171,7 +171,7 @@ def test_reshape_shape_arg(self): expected = arr.reshape(shape) with pytest.raises( - ValueError, + TypeError, match="You cannot specify 'newshape' and 'shape' " "arguments at the same time." ): From d337663d5233dfaa45f28328024d0089849c6447 Mon Sep 17 00:00:00 2001 From: bmwoodruff Date: Wed, 24 Apr 2024 11:55:47 -0600 Subject: [PATCH 257/980] DOC: Emtpy commit to run tests to close #25778, [skip azp] [skip actions] [skip cirrus] From 458afab7e661db68f037afde03a1eec1f21879b6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 Apr 2024 17:18:20 +0000 Subject: [PATCH 258/980] MAINT: Bump conda-incubator/setup-miniconda from 3.0.3 to 3.0.4 Bumps [conda-incubator/setup-miniconda](https://github.com/conda-incubator/setup-miniconda) from 3.0.3 to 3.0.4. - [Release notes](https://github.com/conda-incubator/setup-miniconda/releases) - [Changelog](https://github.com/conda-incubator/setup-miniconda/blob/main/CHANGELOG.md) - [Commits](https://github.com/conda-incubator/setup-miniconda/compare/030178870c779d9e5e1b4e563269f3aa69b04081...a4260408e20b96e80095f42ff7f1a15b27dd94ca) --- updated-dependencies: - dependency-name: conda-incubator/setup-miniconda dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/macos.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 7c759631c863..9e622f2221d4 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -53,7 +53,7 @@ jobs: ${{ github.workflow }}-${{ matrix.python-version }}-ccache-macos- - name: Setup Mambaforge - uses: conda-incubator/setup-miniconda@030178870c779d9e5e1b4e563269f3aa69b04081 # v3.0.3 + uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4 with: python-version: ${{ matrix.python-version }} channels: conda-forge diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index fddf3510aa5e..39868abf6dff 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -240,7 +240,7 @@ jobs: name: sdist path: ./dist/* - - uses: conda-incubator/setup-miniconda@030178870c779d9e5e1b4e563269f3aa69b04081 # v3.0.3 + - uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4 with: # for installation of anaconda-client, required for upload to # anaconda.org From 695b9fc568e842bfeecab1b11aa9eff5c0f4d009 Mon Sep 17 00:00:00 2001 From: Ben Woodruff Date: Thu, 25 Apr 2024 23:51:30 -0600 Subject: [PATCH 259/980] DOC: implement ngoldbaum's suggestions [skip ci] Co-authored-by: Nathan Goldbaum --- doc/source/reference/random/parallel.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/random/parallel.rst b/doc/source/reference/random/parallel.rst index dbc10694720b..70a2d058416c 100644 --- a/doc/source/reference/random/parallel.rst +++ b/doc/source/reference/random/parallel.rst @@ -98,8 +98,8 @@ them are identical is about :math:`2^{-88}`, which is in solidly-ignorable territory ([2]_). Note that while `~SeedSequence` attempts to solve many of the issues related to -user-provided small seeds, we still recommend using ``secrets.randbits(128)`` seeds -for community practice reasons. +user-provided small seeds, we still recommend using :py:func:`secrets.randbits` to generate +seeds with 128 bits of entropy to avoid the remaining biases introduced by human-chosen seeds. .. [1] The algorithm is carefully designed to eliminate a number of possible ways to collide. For example, if one only does one level of spawning, it From f62354560db59914ef6990547441c3216eb0390e Mon Sep 17 00:00:00 2001 From: bmwoodruff Date: Fri, 26 Apr 2024 09:23:40 -0600 Subject: [PATCH 260/980] moved note up in file and added internal link. [skip azp] [skip actions] [skip cirrus] --- doc/source/reference/random/index.rst | 2 ++ doc/source/reference/random/parallel.rst | 8 ++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/source/reference/random/index.rst b/doc/source/reference/random/index.rst index 682d02c31cd2..a2f508c58bbf 100644 --- a/doc/source/reference/random/index.rst +++ b/doc/source/reference/random/index.rst @@ -56,6 +56,8 @@ pseudo-randomness was good for in the first place. or cryptographic purposes. See the :py:mod:`secrets` module from the standard library for such use cases. +.. _recommend-secrets-randbits: + Seeds should be large positive integers. `default_rng` can take positive integers of any size. We recommend using very large, unique numbers to ensure that your seed is different from anyone else's. This is good practice to ensure diff --git a/doc/source/reference/random/parallel.rst b/doc/source/reference/random/parallel.rst index 70a2d058416c..eeada129721d 100644 --- a/doc/source/reference/random/parallel.rst +++ b/doc/source/reference/random/parallel.rst @@ -45,6 +45,10 @@ These properties together mean that we can safely mix together the usual user-provided seed with simple incrementing counters to get `~BitGenerator` states that are (to very high probability) independent of each other. We can wrap this together into an API that is easy to use and difficult to misuse. +Note that while `~SeedSequence` attempts to solve many of the issues related to +user-provided small seeds, we still :ref:`recommend` +using :py:func:`secrets.randbits` to generate seeds with 128 bits of entropy to +avoid the remaining biases introduced by human-chosen seeds. .. code-block:: python @@ -97,10 +101,6 @@ streams, about :math:`2^{20}`, then the probability that at least one pair of them are identical is about :math:`2^{-88}`, which is in solidly-ignorable territory ([2]_). -Note that while `~SeedSequence` attempts to solve many of the issues related to -user-provided small seeds, we still recommend using :py:func:`secrets.randbits` to generate -seeds with 128 bits of entropy to avoid the remaining biases introduced by human-chosen seeds. - .. [1] The algorithm is carefully designed to eliminate a number of possible ways to collide. For example, if one only does one level of spawning, it is guaranteed that all states will be unique. But it's easier to From 51c0d52e61bc7556015a1081d57d7a2f8fa114d2 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Fri, 26 Apr 2024 09:36:38 -0700 Subject: [PATCH 261/980] BUG: remove extra braces --- numpy/_core/src/npysort/highway_qsort.dispatch.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/_core/src/npysort/highway_qsort.dispatch.cpp b/numpy/_core/src/npysort/highway_qsort.dispatch.cpp index 1e2a6f59c5cc..38adfc6de894 100644 --- a/numpy/_core/src/npysort/highway_qsort.dispatch.cpp +++ b/numpy/_core/src/npysort/highway_qsort.dispatch.cpp @@ -16,6 +16,5 @@ namespace np { namespace highway { namespace qsort_simd { DISPATCH_VQSORT(uint64_t) DISPATCH_VQSORT(double) DISPATCH_VQSORT(float) -} } } } // np::highway::qsort_simd From 2e42c4b523dfa1c2289d5d09a541bc9a8983a60f Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Fri, 26 Apr 2024 09:55:29 -0700 Subject: [PATCH 262/980] MAINT: Fix up arguments of avx512_*_fp16 functions --- numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp b/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp index 1968890cae99..2593b905b0ab 100644 --- a/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp +++ b/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp @@ -13,7 +13,7 @@ template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(Half *arr, npy_intp num, npy_int #if defined(NPY_HAVE_AVX512_SPR) x86simdsortStatic::qselect(reinterpret_cast<_Float16*>(arr), kth, num, true); #else - avx512_qselect_fp16(reinterpret_cast(arr), kth, num, true); + avx512_qselect_fp16(reinterpret_cast(arr), kth, num, true, false); #endif } @@ -35,7 +35,7 @@ template<> void NPY_CPU_DISPATCH_CURFX(QSort)(Half *arr, npy_intp size) #if defined(NPY_HAVE_AVX512_SPR) x86simdsortStatic::qsort(reinterpret_cast<_Float16*>(arr), size, true); #else - avx512_qsort_fp16(reinterpret_cast(arr), size, true); + avx512_qsort_fp16(reinterpret_cast(arr), size, true, false); #endif } template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint16_t *arr, npy_intp size) From c3fec332ea3a679b956d50ff428a9e027000c433 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Fri, 26 Apr 2024 10:09:46 -0700 Subject: [PATCH 263/980] CI: Use cpu_baseline of avx512_skx instead of avx512f --- .github/workflows/linux_simd.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index f2f50fbe4684..386bc7ba0c98 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -178,19 +178,19 @@ jobs: python -m pip install pytest pytest-xdist hypothesis typing_extensions - name: Build - run: spin build -- -Dallow-noblas=true -Dcpu-baseline=avx512f -Dtest-simd='BASELINE,AVX512_KNL,AVX512_KNM,AVX512_SKX,AVX512_CLX,AVX512_CNL,AVX512_ICL,AVX512_SPR' + run: spin build -- -Dallow-noblas=true -Dcpu-baseline=avx512_skx -Dtest-simd='BASELINE,AVX512_KNL,AVX512_KNM,AVX512_SKX,AVX512_CLX,AVX512_CNL,AVX512_ICL,AVX512_SPR' - name: Meson Log if: always() run: cat build/meson-logs/meson-log.txt - - name: SIMD tests (KNM) + - name: SIMD tests (SKX) run: | export NUMPY_SITE=$(realpath build-install/usr/lib/python*/site-packages/) export PYTHONPATH="$PYTHONPATH:$NUMPY_SITE" cd build-install && - sde -knm -- python -c "import numpy; numpy.show_config()" && - sde -knm -- python -m pytest $NUMPY_SITE/numpy/_core/tests/test_simd* + sde -skx -- python -c "import numpy; numpy.show_config()" && + sde -skx -- python -m pytest $NUMPY_SITE/numpy/_core/tests/test_simd* - name: linalg/ufunc/umath tests (TGL) run: | From 4045b72ece48e9e5b80f8b3b673a5131e225f330 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Fri, 26 Apr 2024 10:46:53 -0700 Subject: [PATCH 264/980] MAINT: manually include 16bit file on MSVC --- numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp b/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp index 2593b905b0ab..063e713c5256 100644 --- a/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp +++ b/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp @@ -2,6 +2,13 @@ #ifndef __CYGWIN__ #include "x86-simd-sort/src/x86simdsort-static-incl.h" +/* + * MSVC doesn't set the macro __AVX512VBMI2__ which is required for the 16-bit + * functions and therefore we need to manually include this file here + */ +#ifdef _MSC_VER +#include "x86-simd-sort/src/avx512-16bit-qsort.hpp" +#endif namespace np { namespace qsort_simd { From 197c915854ebbdf41cfb49070620ff926b0a338f Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 26 Apr 2024 13:05:54 -0600 Subject: [PATCH 265/980] BUG: ensure text padding ufuncs handle stringdtype nulls --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 12 +++++++++++- numpy/_core/strings.py | 8 ++++---- numpy/_core/tests/test_stringdtype.py | 4 ++++ 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index 6153d8cd5a93..e28f4100cadc 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1625,12 +1625,19 @@ center_ljust_rjust_strided_loop(PyArrayMethod_Context *context, Buffer inbuf((char *)s1.buf, s1.size); Buffer fill((char *)s2.buf, s2.size); + size_t num_codepoints = inbuf.num_codepoints(); + npy_intp width = (npy_intp)*(npy_int64*)in2; + + if (num_codepoints > (size_t)width) { + width = num_codepoints; + } + char *buf = NULL; npy_intp newsize; int overflowed = npy_mul_sizes_with_overflow( &(newsize), (npy_intp)num_bytes_for_utf8_character((unsigned char *)s2.buf), - (npy_intp)*(npy_int64*)in2 - inbuf.num_codepoints()); + width - num_codepoints); newsize += s1.size; if (overflowed) { @@ -1752,6 +1759,9 @@ zfill_strided_loop(PyArrayMethod_Context *context, Buffer inbuf((char *)is.buf, is.size); size_t in_codepoints = inbuf.num_codepoints(); size_t width = (size_t)*(npy_int64 *)in2; + if (in_codepoints > width) { + width = in_codepoints; + } // number of leading one-byte characters plus the size of the // original string size_t outsize = (width - in_codepoints) + is.size; diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 8707bed2ffbb..d30c4be3d62e 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -626,7 +626,6 @@ def center(a, width, fillchar=' '): """ a = np.asanyarray(a) - width = np.maximum(str_len(a), width) fillchar = np.asanyarray(fillchar, dtype=a.dtype) if np.any(str_len(fillchar) != 1): @@ -636,6 +635,7 @@ def center(a, width, fillchar=' '): if a.dtype.char == "T": return _center(a, width, fillchar) + width = np.maximum(str_len(a), width) out_dtype = f"{a.dtype.char}{width.max()}" shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape) out = np.empty_like(a, shape=shape, dtype=out_dtype) @@ -682,7 +682,6 @@ def ljust(a, width, fillchar=' '): """ a = np.asanyarray(a) - width = np.maximum(str_len(a), width) fillchar = np.asanyarray(fillchar, dtype=a.dtype) if np.any(str_len(fillchar) != 1): @@ -692,6 +691,7 @@ def ljust(a, width, fillchar=' '): if a.dtype.char == "T": return _ljust(a, width, fillchar) + width = np.maximum(str_len(a), width) shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape) out_dtype = f"{a.dtype.char}{width.max()}" out = np.empty_like(a, shape=shape, dtype=out_dtype) @@ -738,7 +738,6 @@ def rjust(a, width, fillchar=' '): """ a = np.asanyarray(a) - width = np.maximum(str_len(a), width) fillchar = np.asanyarray(fillchar, dtype=a.dtype) if np.any(str_len(fillchar) != 1): @@ -748,6 +747,7 @@ def rjust(a, width, fillchar=' '): if a.dtype.char == "T": return _rjust(a, width, fillchar) + width = np.maximum(str_len(a), width) shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape) out_dtype = f"{a.dtype.char}{width.max()}" out = np.empty_like(a, shape=shape, dtype=out_dtype) @@ -784,11 +784,11 @@ def zfill(a, width): """ a = np.asanyarray(a) - width = np.maximum(str_len(a), width) if a.dtype.char == "T": return _zfill(a, width) + width = np.maximum(str_len(a), width) shape = np.broadcast_shapes(a.shape, width.shape) out_dtype = f"{a.dtype.char}{width.max()}" out = np.empty_like(a, shape=shape, dtype=out_dtype) diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index b8176afc0c01..8d3c0a381e5b 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1210,11 +1210,15 @@ def test_unary(string_array, unicode_array, function_name): PASSES_THROUGH_NAN_NULLS = [ "add", + "center", + "ljust", "multiply", "replace", + "rjust", "strip", "lstrip", "rstrip", + "zfill", ] NULLS_ARE_FALSEY = [ From 63a4f61f1698012701c802fbcc55b22c84e40f32 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Fri, 26 Apr 2024 18:44:32 -0400 Subject: [PATCH 266/980] BUG: Fix rfft for even input length. Closes gh-26349 --- numpy/fft/_pocketfft_umath.cpp | 3 +++ numpy/fft/tests/test_pocketfft.py | 11 ++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/numpy/fft/_pocketfft_umath.cpp b/numpy/fft/_pocketfft_umath.cpp index 7b42f8edc97b..bbab371dad83 100644 --- a/numpy/fft/_pocketfft_umath.cpp +++ b/numpy/fft/_pocketfft_umath.cpp @@ -189,6 +189,9 @@ rfft_impl(char **args, npy_intp const *dimensions, npy_intp const *steps, copy_input(ip, step_in, nin, &((T *)op_or_buff)[1], nout*2 - 1); plan->exec(&((T *)op_or_buff)[1], *(T *)fp, pocketfft::FORWARD); op_or_buff[0] = op_or_buff[0].imag(); // I0->R0, I0=0 + if (!(npts & 1)) { + op_or_buff[nout - 1].imag(0.0); + } if (buffered) { copy_output(op_or_buff, op, step_out, nout); } diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py index 500d97282cde..8e8a19570674 100644 --- a/numpy/fft/tests/test_pocketfft.py +++ b/numpy/fft/tests/test_pocketfft.py @@ -183,7 +183,6 @@ def test_fft_bad_out(self): with pytest.raises(TypeError, match="Cannot cast"): np.fft.fft(x, out=np.zeros_like(x, dtype=float)) - @pytest.mark.parametrize('norm', (None, 'backward', 'ortho', 'forward')) def test_ifft(self, norm): x = random(30) + 1j*random(30) @@ -258,6 +257,16 @@ def test_rfft(self): np.fft.rfft(x, n=n) / n, np.fft.rfft(x, n=n, norm="forward"), atol=1e-6) + def test_rfft_even(self): + x = np.arange(8) + y = np.fft.rfft(x, 4) + assert_allclose(y, [6.0, -2.0+2.0j, -2.0], rtol=1e-14) + + def test_rfft_odd(self): + x = np.array([1, 0, 2, 3, -3]) + y = np.fft.rfft(x) + assert_allclose(y, np.fft.fft(x)[:3], rtol=1e-14) + def test_irfft(self): x = random(30) assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6) From 9078d30f9ac9756cb812ce6e53f9a947ab8afb3f Mon Sep 17 00:00:00 2001 From: KIU Shueng Chuan Date: Mon, 15 Apr 2024 19:01:39 +0800 Subject: [PATCH 267/980] add benchmark for np.clip --- benchmarks/benchmarks/bench_clip.py | 35 +++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 benchmarks/benchmarks/bench_clip.py diff --git a/benchmarks/benchmarks/bench_clip.py b/benchmarks/benchmarks/bench_clip.py new file mode 100644 index 000000000000..edbeb745ad60 --- /dev/null +++ b/benchmarks/benchmarks/bench_clip.py @@ -0,0 +1,35 @@ +from .common import Benchmark + +import numpy as np + + +class ClipFloat(Benchmark): + param_names = ["dtype", "size"] + params = [ + [np.float32, np.float64, np.longdouble], + [100, 100_000] + ] + + def setup(self, dtype, size): + rng = np.random.default_rng() + self.array = rng.random(size=size).astype(dtype) + self.dataout = np.full_like(self.array, 0.5) + + def time_clip(self, dtype, size): + np.clip(self.array, 0.125, 0.875, self.dataout) + + +class ClipInteger(Benchmark): + param_names = ["dtype", "size"] + params = [ + [np.int32, np.int64], + [100, 100_000] + ] + + def setup(self, dtype, size): + rng = np.random.default_rng() + self.array = rng.integers(256, size=size, dtype=dtype) + self.dataout = np.full_like(self.array, 128) + + def time_clip(self, dtype, size): + np.clip(self.array, 32, 224, self.dataout) From 7906d11a7ef56d048045af48ae9439e38a826133 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Sat, 27 Apr 2024 08:03:07 -0400 Subject: [PATCH 268/980] TST: Use fft() to compute the expected result of an rfft() test. --- numpy/fft/tests/test_pocketfft.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py index 8e8a19570674..f58ed0cecb39 100644 --- a/numpy/fft/tests/test_pocketfft.py +++ b/numpy/fft/tests/test_pocketfft.py @@ -259,8 +259,9 @@ def test_rfft(self): def test_rfft_even(self): x = np.arange(8) - y = np.fft.rfft(x, 4) - assert_allclose(y, [6.0, -2.0+2.0j, -2.0], rtol=1e-14) + n = 4 + y = np.fft.rfft(x, n) + assert_allclose(y, np.fft.fft(x[:n])[:n//2 + 1], rtol=1e-14) def test_rfft_odd(self): x = np.array([1, 0, 2, 3, -3]) From 5cec3fbb792016c726d79e315f3124996e141bd0 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Sat, 27 Apr 2024 08:05:19 -0400 Subject: [PATCH 269/980] MAINT: Fix a comment to refer to copy_input instead of copy_data. --- numpy/fft/_pocketfft_umath.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/fft/_pocketfft_umath.cpp b/numpy/fft/_pocketfft_umath.cpp index bbab371dad83..44c500ee6fe7 100644 --- a/numpy/fft/_pocketfft_umath.cpp +++ b/numpy/fft/_pocketfft_umath.cpp @@ -183,7 +183,7 @@ rfft_impl(char **args, npy_intp const *dimensions, npy_intp const *steps, * Pocketfft uses FFTpack order, R0,R1,I1,...Rn-1,In-1,Rn[,In] (last * for npts odd only). To make unpacking easy, we place the real data * offset by one in the buffer, so that we just have to move R0 and - * create I0=0. Note that copy_data will zero the In component for + * create I0=0. Note that copy_input will zero the In component for * even number of points. */ copy_input(ip, step_in, nin, &((T *)op_or_buff)[1], nout*2 - 1); From 394ffa41aadb6f12f6912a2865d94719f37aa0a9 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Sat, 27 Apr 2024 11:29:11 -0400 Subject: [PATCH 270/980] MAINT: Simplify bugfix for even rfft --- numpy/fft/_pocketfft_umath.cpp | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/numpy/fft/_pocketfft_umath.cpp b/numpy/fft/_pocketfft_umath.cpp index 44c500ee6fe7..013db5f1d8d4 100644 --- a/numpy/fft/_pocketfft_umath.cpp +++ b/numpy/fft/_pocketfft_umath.cpp @@ -172,6 +172,7 @@ rfft_impl(char **args, npy_intp const *dimensions, npy_intp const *steps, auto plan = pocketfft::detail::get_plan>(npts); auto buffered = (step_out != sizeof(std::complex)); pocketfft::detail::arr> buff(buffered ? nout : 0); + auto nin_used = nin <= npts ? nin : npts; for (size_t i = 0; i < n_outer; i++, ip += si, fp += sf, op += so) { std::complex *op_or_buff = buffered ? buff.data() : (std::complex *)op; /* @@ -186,12 +187,9 @@ rfft_impl(char **args, npy_intp const *dimensions, npy_intp const *steps, * create I0=0. Note that copy_input will zero the In component for * even number of points. */ - copy_input(ip, step_in, nin, &((T *)op_or_buff)[1], nout*2 - 1); + copy_input(ip, step_in, nin_used, &((T *)op_or_buff)[1], nout*2 - 1); plan->exec(&((T *)op_or_buff)[1], *(T *)fp, pocketfft::FORWARD); op_or_buff[0] = op_or_buff[0].imag(); // I0->R0, I0=0 - if (!(npts & 1)) { - op_or_buff[nout - 1].imag(0.0); - } if (buffered) { copy_output(op_or_buff, op, step_out, nout); } From 43401452c6ff197368ddd47ce2f291a17e9dfc72 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Apr 2024 18:03:46 +0000 Subject: [PATCH 271/980] MAINT: Bump actions/dependency-review-action from 4.2.5 to 4.3.1 Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.2.5 to 4.3.1. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/5bbc3ba658137598168acb2ab73b21c432dd411b...e58c696e52cac8e62d61cc21fda89565d71505d7) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/dependency-review.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index a64f75f2833f..02805a931621 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -17,4 +17,4 @@ jobs: - name: 'Checkout Repository' uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: 'Dependency Review' - uses: actions/dependency-review-action@5bbc3ba658137598168acb2ab73b21c432dd411b # v4.2.5 + uses: actions/dependency-review-action@e58c696e52cac8e62d61cc21fda89565d71505d7 # v4.3.1 From 2fd11c00aa03e7373d20c039b53b483d35be0786 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 25 Apr 2024 11:49:52 -0600 Subject: [PATCH 272/980] BUG: disable the loop data cache in the nogil build --- numpy/_core/src/umath/legacy_array_method.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/umath/legacy_array_method.c b/numpy/_core/src/umath/legacy_array_method.c index 681cbadadb07..9592df0e1366 100644 --- a/numpy/_core/src/umath/legacy_array_method.c +++ b/numpy/_core/src/umath/legacy_array_method.c @@ -33,37 +33,43 @@ typedef struct { /* Use a free list, since we should normally only need one at a time */ +#ifndef Py_GIL_DISABLED #define NPY_LOOP_DATA_CACHE_SIZE 5 static int loop_data_num_cached = 0; static legacy_array_method_auxdata *loop_data_cache[NPY_LOOP_DATA_CACHE_SIZE]; - +#else +#define NPY_LOOP_DATA_CACHE_SIZE 0 +#endif static void legacy_array_method_auxdata_free(NpyAuxData *data) { +#if NPY_LOOP_DATA_CACHE_SIZE > 0 if (loop_data_num_cached < NPY_LOOP_DATA_CACHE_SIZE) { loop_data_cache[loop_data_num_cached] = ( (legacy_array_method_auxdata *)data); loop_data_num_cached++; } - else { + else +#endif + { PyMem_Free(data); } } -#undef NPY_LOOP_DATA_CACHE_SIZE - - NpyAuxData * get_new_loop_data( PyUFuncGenericFunction loop, void *user_data, int pyerr_check) { legacy_array_method_auxdata *data; +#if NPY_LOOP_DATA_CACHE_SIZE > 0 if (NPY_LIKELY(loop_data_num_cached > 0)) { loop_data_num_cached--; data = loop_data_cache[loop_data_num_cached]; } - else { + else +#endif + { data = PyMem_Malloc(sizeof(legacy_array_method_auxdata)); if (data == NULL) { return NULL; @@ -77,6 +83,7 @@ get_new_loop_data( return (NpyAuxData *)data; } +#undef NPY_LOOP_DATA_CACHE_SIZE /* * This is a thin wrapper around the legacy loop signature. From 62615248fd54ec507eb731ec6520e4e9572446e0 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 25 Apr 2024 11:54:49 -0600 Subject: [PATCH 273/980] TST: add multithreaded ufunc execution test --- numpy/_core/tests/test_multithreading.py | 27 +++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 8999a18a39ff..1a2534e78aaf 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -9,13 +9,30 @@ pytest.skip(allow_module_level=True, reason="no threading support in wasm") -def test_parallel_errstate_creation(): +def run_threaded(func, iters, pass_count=False): + with concurrent.futures.ThreadPoolExecutor(max_workers=8) as tpe: + if pass_count: + futures = [tpe.submit(func, i) for i in range(iters)] + else: + futures = [tpe.submit(func) for _ in range(iters)] + for f in futures: + f.result() + + +def test_parallel_randomstate_creation(): # if the coercion cache is enabled and not thread-safe, creating # RandomState instances simultaneously leads to a data race def func(seed): np.random.RandomState(seed) - with concurrent.futures.ThreadPoolExecutor(max_workers=8) as tpe: - futures = [tpe.submit(func, i) for i in range(500)] - for f in futures: - f.result() + run_threaded(func, 500, pass_count=True) + +def test_parallel_ufunc_execution(): + # if the loop data cache or dispatch cache are not thread-safe + # computing ufuncs simultaneously in multiple threads leads + # to a data race + def func(): + arr = np.random.random((25,)) + np.isnan(arr) + + run_threaded(func, 500) From cc0f257bbff46030fc8a09d03335b2a66c8df2f0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Apr 2024 17:31:34 +0000 Subject: [PATCH 274/980] MAINT: Bump actions/dependency-review-action from 4.3.1 to 4.3.2 Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.3.1 to 4.3.2. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/e58c696e52cac8e62d61cc21fda89565d71505d7...0c155c5e8556a497adf53f2c18edabf945ed8e70) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/dependency-review.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 02805a931621..334a89bf6ea7 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -17,4 +17,4 @@ jobs: - name: 'Checkout Repository' uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: 'Dependency Review' - uses: actions/dependency-review-action@e58c696e52cac8e62d61cc21fda89565d71505d7 # v4.3.1 + uses: actions/dependency-review-action@0c155c5e8556a497adf53f2c18edabf945ed8e70 # v4.3.2 From 86e39a0827fc12972f195e785dd5069be5842b6d Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 29 Apr 2024 15:36:33 -0600 Subject: [PATCH 275/980] MNT: add locking for PyArrayIdentityHash --- numpy/_core/src/common/npy_hashtable.c | 44 ++++++++++++++++++++++++-- numpy/_core/src/common/npy_hashtable.h | 3 ++ 2 files changed, 44 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/common/npy_hashtable.c b/numpy/_core/src/common/npy_hashtable.c index 14f6cca1b864..02fe5ca29751 100644 --- a/numpy/_core/src/common/npy_hashtable.c +++ b/numpy/_core/src/common/npy_hashtable.c @@ -29,6 +29,33 @@ #define _NpyHASH_XXROTATE(x) ((x << 13) | (x >> 19)) /* Rotate left 13 bits */ #endif +#ifdef Py_GIL_DISABLED +// TODO: replace with PyMutex when it is public +#define LOCK_TABLE(tb) \ + if (!PyThread_acquire_lock(tb->mutex, NOWAIT_LOCK)) { \ + PyThread_acquire_lock(tb->mutex, WAIT_LOCK); \ + } +#define UNLOCK_TABLE(tb) PyThread_release_lock(tb->mutex); +#define INITIALIZE_LOCK(tb) \ + tb->mutex = PyThread_allocate_lock(); \ + if (tb->mutex == NULL) { \ + PyErr_NoMemory(); \ + PyMem_Free(res); \ + return NULL; \ + } +#define FREE_LOCK(tb) \ + if (tb->mutex != NULL) { \ + PyThread_free_lock(tb->mutex); \ + } +#else +// the GIL serializes access to the table so no need +// for locking if it is enabled +#define LOCK_TABLE(tb) +#define UNLOCK_TABLE(tb) +#define INITIALIZE_LOCK(tb) +#define FREE_LOCK(tb) +#endif + /* * This hashing function is basically the Python tuple hash with the type * identity hash inlined. The tuple hash itself is a reduced version of xxHash. @@ -100,6 +127,8 @@ PyArrayIdentityHash_New(int key_len) res->size = 4; /* Start with a size of 4 */ res->nelem = 0; + INITIALIZE_LOCK(res); + res->buckets = PyMem_Calloc(4 * (key_len + 1), sizeof(PyObject *)); if (res->buckets == NULL) { PyErr_NoMemory(); @@ -114,6 +143,7 @@ NPY_NO_EXPORT void PyArrayIdentityHash_Dealloc(PyArrayIdentityHash *tb) { PyMem_Free(tb->buckets); + FREE_LOCK(tb); PyMem_Free(tb); } @@ -160,8 +190,9 @@ _resize_if_necessary(PyArrayIdentityHash *tb) for (npy_intp i = 0; i < prev_size; i++) { PyObject **item = &old_table[i * (tb->key_len + 1)]; if (item[0] != NULL) { - tb->nelem -= 1; /* Decrement, setitem will increment again */ - PyArrayIdentityHash_SetItem(tb, item+1, item[0], 1); + PyObject **tb_item = find_item(tb, item + 1); + tb_item[0] = item[0]; + memcpy(tb_item+1, item+1, tb->key_len * sizeof(PyObject *)); } } PyMem_Free(old_table); @@ -188,14 +219,17 @@ NPY_NO_EXPORT int PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, PyObject *const *key, PyObject *value, int replace) { + LOCK_TABLE(tb); if (value != NULL && _resize_if_necessary(tb) < 0) { /* Shrink, only if a new value is added. */ + UNLOCK_TABLE(tb); return -1; } PyObject **tb_item = find_item(tb, key); if (value != NULL) { if (tb_item[0] != NULL && !replace) { + UNLOCK_TABLE(tb); PyErr_SetString(PyExc_RuntimeError, "Identity cache already includes the item."); return -1; @@ -209,6 +243,7 @@ PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, memset(tb_item, 0, (tb->key_len + 1) * sizeof(PyObject *)); } + UNLOCK_TABLE(tb); return 0; } @@ -216,5 +251,8 @@ PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, NPY_NO_EXPORT PyObject * PyArrayIdentityHash_GetItem(PyArrayIdentityHash const *tb, PyObject *const *key) { - return find_item(tb, key)[0]; + LOCK_TABLE(tb); + PyObject *res = find_item(tb, key)[0]; + UNLOCK_TABLE(tb); + return res; } diff --git a/numpy/_core/src/common/npy_hashtable.h b/numpy/_core/src/common/npy_hashtable.h index a0bf81967d75..fdb241667164 100644 --- a/numpy/_core/src/common/npy_hashtable.h +++ b/numpy/_core/src/common/npy_hashtable.h @@ -13,6 +13,9 @@ typedef struct { PyObject **buckets; npy_intp size; /* current size */ npy_intp nelem; /* number of elements */ +#ifdef Py_GIL_DISABLED + PyThread_type_lock *mutex; +#endif } PyArrayIdentityHash; From d7ebf00065357b537a8ae2673f7d161de3837197 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 30 Apr 2024 13:31:27 -0600 Subject: [PATCH 276/980] TST: static types are now immortal in the default build too --- numpy/_core/tests/test_nditer.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index 91c50e4ba408..517e21a92cf8 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -11,7 +11,6 @@ from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_raises, IS_WASM, HAS_REFCOUNT, suppress_warnings, break_cycles, - NOGIL_BUILD ) def iter_multi_index(i): @@ -68,8 +67,8 @@ def test_iter_refcount(): rc2_dt = sys.getrefcount(dt) it2 = it.copy() assert_(sys.getrefcount(a) > rc2_a) - if not NOGIL_BUILD: - # np.dtype('f4') is immortal in the nogil build + if sys.version_info < (3, 13): + # np.dtype('f4') is immortal after Python 3.13 assert_(sys.getrefcount(dt) > rc2_dt) it = None assert_equal(sys.getrefcount(a), rc2_a) From 4e6d2bf866ff26e7088c47b01a4dfbb5485c7c85 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 30 Apr 2024 14:29:29 -0600 Subject: [PATCH 277/980] ENH: add support for nan-like null strings in string replace (#26355) This fixes an issue similar to the one fixed by #26353. In particular, right now np.strings.replace calls the count ufunc to get the number of replacements. This is necessary for fixed-width strings, but it turns out to make it impossible to support null strings in replace. I went ahead and instead found the replacement counts inline in the ufunc loop. This lets me add support for nan-like null strings, which it turns out pandas needs. --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 95 ++++++++++++++------ numpy/_core/strings.py | 8 +- numpy/_core/tests/test_stringdtype.py | 2 +- 3 files changed, 72 insertions(+), 33 deletions(-) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index e28f4100cadc..052c4381a4b5 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1300,7 +1300,9 @@ string_replace_strided_loop( PyArray_StringDTypeObject *descr0 = (PyArray_StringDTypeObject *)context->descriptors[0]; + int has_null = descr0->na_object != NULL; int has_string_na = descr0->has_string_na; + int has_nan_na = descr0->has_nan_na; const npy_static_string *default_string = &descr0->default_string; @@ -1330,11 +1332,29 @@ string_replace_strided_loop( goto fail; } else if (i1_isnull || i2_isnull || i3_isnull) { - if (!has_string_na) { - npy_gil_error(PyExc_ValueError, - "Null values are not supported as replacement arguments " - "for replace"); - goto fail; + if (has_null && !has_string_na) { + if (i2_isnull || i3_isnull) { + npy_gil_error(PyExc_ValueError, + "Null values are not supported as search " + "patterns or replacement strings for " + "replace"); + goto fail; + } + else if (i1_isnull) { + if (has_nan_na) { + if (NpyString_pack_null(oallocator, ops) < 0) { + npy_gil_error(PyExc_MemoryError, + "Failed to deallocate string in replace"); + goto fail; + } + goto next_step; + } + else { + npy_gil_error(PyExc_ValueError, + "Only string or NaN-like null strings can " + "be used as search strings for replace"); + } + } } else { if (i1_isnull) { @@ -1349,32 +1369,51 @@ string_replace_strided_loop( } } - // conservatively overallocate - // TODO check overflow - size_t max_size; - if (i2s.size == 0) { - // interleaving - max_size = i1s.size + (i1s.size + 1)*(i3s.size); - } - else { - // replace i2 with i3 - max_size = i1s.size * (i3s.size/i2s.size + 1); - } - char *new_buf = (char *)PyMem_RawCalloc(max_size, 1); - Buffer buf1((char *)i1s.buf, i1s.size); - Buffer buf2((char *)i2s.buf, i2s.size); - Buffer buf3((char *)i3s.buf, i3s.size); - Buffer outbuf(new_buf, max_size); + { + Buffer buf1((char *)i1s.buf, i1s.size); + Buffer buf2((char *)i2s.buf, i2s.size); - size_t new_buf_size = string_replace( - buf1, buf2, buf3, *(npy_int64 *)in4, outbuf); + npy_int64 in_count = *(npy_int64*)in4; + if (in_count == -1) { + in_count = NPY_MAX_INT64; + } - if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { - npy_gil_error(PyExc_MemoryError, "Failed to pack string in replace"); - goto fail; - } + npy_int64 found_count = string_count( + buf1, buf2, 0, NPY_MAX_INT64); + if (found_count < 0) { + goto fail; + } - PyMem_RawFree(new_buf); + npy_intp count = Py_MIN(in_count, found_count); + + Buffer buf3((char *)i3s.buf, i3s.size); + + // conservatively overallocate + // TODO check overflow + size_t max_size; + if (i2s.size == 0) { + // interleaving + max_size = i1s.size + (i1s.size + 1)*(i3s.size); + } + else { + // replace i2 with i3 + size_t change = i2s.size >= i3s.size ? 0 : i3s.size - i2s.size; + max_size = i1s.size + count * change; + } + char *new_buf = (char *)PyMem_RawCalloc(max_size, 1); + Buffer outbuf(new_buf, max_size); + + size_t new_buf_size = string_replace( + buf1, buf2, buf3, count, outbuf); + + if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { + npy_gil_error(PyExc_MemoryError, "Failed to pack string in replace"); + goto fail; + } + + PyMem_RawFree(new_buf); + } + next_step: in1 += strides[0]; in2 += strides[1]; diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index d30c4be3d62e..83034705f525 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -1153,15 +1153,15 @@ def replace(a, old, new, count=-1): a_dt = arr.dtype old = np.asanyarray(old, dtype=getattr(old, 'dtype', a_dt)) new = np.asanyarray(new, dtype=getattr(new, 'dtype', a_dt)) + count = np.asanyarray(count) + + if arr.dtype.char == "T": + return _replace(arr, old, new, count) max_int64 = np.iinfo(np.int64).max counts = _count_ufunc(arr, old, 0, max_int64) - count = np.asanyarray(count) counts = np.where(count < 0, counts, np.minimum(counts, count)) - if arr.dtype.char == "T": - return _replace(arr, old, new, counts) - buffersizes = str_len(arr) + counts * (str_len(new) - str_len(old)) out_dtype = f"{arr.dtype.char}{buffersizes.max()}" out = np.empty_like(arr, shape=buffersizes.shape, dtype=out_dtype) diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 8d3c0a381e5b..dd6ac36999e6 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1218,6 +1218,7 @@ def test_unary(string_array, unicode_array, function_name): "strip", "lstrip", "rstrip", + "replace" "zfill", ] @@ -1230,7 +1231,6 @@ def test_unary(string_array, unicode_array, function_name): "count", "find", "rfind", - "replace", ] SUPPORTS_NULLS = ( From 1715888ed657328045074c16944559a8fb8b5bf5 Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Thu, 2 May 2024 08:42:34 +1000 Subject: [PATCH 278/980] DOC: fix np.unique release notes [skip cirrus] --- doc/source/release/2.0.0-notes.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/release/2.0.0-notes.rst b/doc/source/release/2.0.0-notes.rst index 216c38bb1538..73aff5d1e268 100644 --- a/doc/source/release/2.0.0-notes.rst +++ b/doc/source/release/2.0.0-notes.rst @@ -1481,7 +1481,7 @@ the ``unique_inverse`` output is now shaped such that the input can be reconstru directly using ``np.take(unique, unique_inverse)`` when ``axis=None``, and ``np.take_along_axis(unique, unique_inverse, axis=axis)`` otherwise. -(`gh-25553 `__, +(`gh-25553 `__, `gh-25570 `__) ``any`` and ``all`` return booleans for object arrays From 9c8ed4ee83237ee979b40930f2540f5ac27be0c9 Mon Sep 17 00:00:00 2001 From: KIU Shueng Chuan Date: Thu, 2 May 2024 19:36:48 +0800 Subject: [PATCH 279/980] specialize float clip --- numpy/_core/src/umath/clip.cpp | 108 ++++++++++++++++++++++++--------- 1 file changed, 80 insertions(+), 28 deletions(-) diff --git a/numpy/_core/src/umath/clip.cpp b/numpy/_core/src/umath/clip.cpp index c30ab89b1595..e051692c6d48 100644 --- a/numpy/_core/src/umath/clip.cpp +++ b/numpy/_core/src/umath/clip.cpp @@ -1,6 +1,8 @@ /** * This module provides the inner loops for the clip ufunc */ +#include + #define _UMATHMODULE #define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION @@ -150,50 +152,100 @@ _NPY_CLIP(T x, T min, T max) return _NPY_MIN(_NPY_MAX((x), (min)), (max)); } -template -static void -_npy_clip_(T **args, npy_intp const *dimensions, npy_intp const *steps) -{ - npy_intp n = dimensions[0]; - if (steps[1] == 0 && steps[2] == 0) { - /* min and max are constant throughout the loop, the most common case - */ - /* NOTE: it may be possible to optimize these checks for nan */ - T min_val = *args[1]; - T max_val = *args[2]; +template +static inline void +_npy_clip_const_minmax_( + char *ip, npy_intp is, char *op, npy_intp os, npy_intp n, T min_val, T max_val, + std::false_type /* non-floating point */ +) +{ + /* contiguous, branch to let the compiler optimize */ + if (is == sizeof(T) && os == sizeof(T)) { + for (npy_intp i = 0; i < n; i++, ip += sizeof(T), op += sizeof(T)) { + *(T *)op = _NPY_CLIP(*(T *)ip, min_val, max_val); + } + } + else { + for (npy_intp i = 0; i < n; i++, ip += is, op += os) { + *(T *)op = _NPY_CLIP(*(T *)ip, min_val, max_val); + } + } +} - T *ip1 = args[0], *op1 = args[3]; - npy_intp is1 = steps[0] / sizeof(T), os1 = steps[3] / sizeof(T); +template +static inline void +_npy_clip_const_minmax_( + char *ip, npy_intp is, char *op, npy_intp os, npy_intp n, T min_val, T max_val, + std::true_type /* floating point */ +) +{ + if (!npy_isnan(min_val) && !npy_isnan(max_val)) { + /* + * The min/max_val are not NaN so the comparison below will + * propagate NaNs in the input without further NaN checks. + */ /* contiguous, branch to let the compiler optimize */ - if (is1 == 1 && os1 == 1) { - for (npy_intp i = 0; i < n; i++, ip1++, op1++) { - *op1 = _NPY_CLIP(*ip1, min_val, max_val); + if (is == sizeof(T) && os == sizeof(T)) { + for (npy_intp i = 0; i < n; i++, ip += sizeof(T), op += sizeof(T)) { + T x = *(T *)ip; + if (x < min_val) { + x = min_val; + } + if (x > max_val) { + x = max_val; + } + *(T *)op = x; } } else { - for (npy_intp i = 0; i < n; i++, ip1 += is1, op1 += os1) { - *op1 = _NPY_CLIP(*ip1, min_val, max_val); + for (npy_intp i = 0; i < n; i++, ip += is, op += os) { + T x = *(T *)ip; + if (x < min_val) { + x = min_val; + } + if (x > max_val) { + x = max_val; + } + *(T *)op = x; } } } else { - T *ip1 = args[0], *ip2 = args[1], *ip3 = args[2], *op1 = args[3]; - npy_intp is1 = steps[0] / sizeof(T), is2 = steps[1] / sizeof(T), - is3 = steps[2] / sizeof(T), os1 = steps[3] / sizeof(T); - for (npy_intp i = 0; i < n; - i++, ip1 += is1, ip2 += is2, ip3 += is3, op1 += os1) - *op1 = _NPY_CLIP(*ip1, *ip2, *ip3); + /* min_val and/or max_val are nans */ + T x = npy_isnan(min_val) ? min_val : max_val; + for (npy_intp i = 0; i < n; i++, op += os) { + *(T *)op = x; + } } - npy_clear_floatstatus_barrier((char *)dimensions); } -template +template static void _npy_clip(char **args, npy_intp const *dimensions, npy_intp const *steps) { - using T = typename Tag::type; - return _npy_clip_((T **)args, dimensions, steps); + npy_intp n = dimensions[0]; + if (steps[1] == 0 && steps[2] == 0) { + /* min and max are constant throughout the loop, the most common case */ + T min_val = *(T *)args[1]; + T max_val = *(T *)args[2]; + + _npy_clip_const_minmax_( + args[0], steps[0], args[3], steps[3], n, min_val, max_val, + std::is_base_of{} + ); + } + else { + char *ip1 = args[0], *ip2 = args[1], *ip3 = args[2], *op1 = args[3]; + npy_intp is1 = steps[0], is2 = steps[1], + is3 = steps[2], os1 = steps[3]; + for (npy_intp i = 0; i < n; + i++, ip1 += is1, ip2 += is2, ip3 += is3, op1 += os1) + { + *(T *)op1 = _NPY_CLIP(*(T *)ip1, *(T *)ip2, *(T *)ip3); + } + } + npy_clear_floatstatus_barrier((char *)dimensions); } extern "C" { From d2c00c7ce9e2a05d8c7b1dd4f8ab5972a1dfee8e Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 2 May 2024 13:43:27 +0200 Subject: [PATCH 280/980] DOC: Make a note of can_cast not supporting int, float, complex --- doc/source/release/2.0.0-notes.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/source/release/2.0.0-notes.rst b/doc/source/release/2.0.0-notes.rst index 216c38bb1538..4d935fea5338 100644 --- a/doc/source/release/2.0.0-notes.rst +++ b/doc/source/release/2.0.0-notes.rst @@ -1496,6 +1496,16 @@ to achieve the previous behavior. (`gh-25712 `__) +``np.can_cast`` cannot be called on Python int, float, or complex +----------------------------------------------------------------- +``np.can_cast`` cannot be called with Python int, float, or complex instances +anymore. This is because NEP 50 means that the result of ``can_cast`` must +not depend on the value passed in. +Unfortunately, for Python scalars whether a cast should be considered +``"same_kind"`` or ``"safe"`` may depend on the context and value so that +this is currently not implemented. +In some cases, this means you may have to add a specific path for: +``if type(obj) in (int, float, complex): ...``. **Content from release note snippets in doc/release/upcoming_changes:** From 2e02cb76fad5f91474c0ddfb7d740406ab0a4933 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 2 May 2024 13:43:49 +0200 Subject: [PATCH 281/980] BUG: Make sure that NumPy scalars are supported by can_cast The main issue here was the order of the checks, since float64 is a subclass of float the error path was taken even though it should not have been. This also avoids converting to an array (which is very slow) when possible. I opted to use `scalar.dtype` since that may be a bit easier for potential future user dtype. That may not be quite ideal (I would like to not force `np.generic` as a base-class for user scalars), but is probably pretty close and more complicated fixes are probably not good for backport. --- numpy/_core/src/multiarray/descriptor.c | 3 +- numpy/_core/src/multiarray/multiarraymodule.c | 44 +++++++++++++++---- numpy/_core/src/multiarray/multiarraymodule.h | 1 + numpy/_core/tests/test_numeric.py | 11 +++++ 4 files changed, 49 insertions(+), 10 deletions(-) diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 94f40fb42ca0..0617a7b8de44 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -20,6 +20,7 @@ #include "conversion_utils.h" /* for PyArray_TypestrConvert */ #include "templ_common.h" /* for npy_mul_sizes_with_overflow */ #include "descriptor.h" +#include "multiarraymodule.h" #include "alloc.h" #include "assert.h" #include "npy_buffer.h" @@ -2701,7 +2702,7 @@ arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args)) Py_DECREF(ret); return NULL; } - obj = PyObject_GetAttrString(mod, "dtype"); + obj = PyObject_GetAttr(mod, npy_ma_str_dtype); Py_DECREF(mod); if (obj == NULL) { Py_DECREF(ret); diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index de77f784d79c..39daa8db242e 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -3488,6 +3488,36 @@ array_can_cast_safely(PyObject *NPY_UNUSED(self), if (PyArray_Check(from_obj)) { ret = PyArray_CanCastArrayTo((PyArrayObject *)from_obj, d2, casting); } + else if (PyArray_IsScalar(from_obj, Generic)) { + /* + * TODO: `PyArray_IsScalar` should not be required for new dtypes. + * weak-promotion branch is in practice identical to dtype one. + */ + if (npy_promotion_state == NPY_USE_WEAK_PROMOTION) { + PyObject *descr = PyObject_GetAttr(from_obj, npy_ma_str_dtype); + if (descr == NULL) { + goto finish; + } + if (!PyArray_DescrCheck(descr)) { + Py_DECREF(descr); + PyErr_SetString(PyExc_TypeError, + "numpy_scalar.dtype did not return a dtype instance."); + goto finish; + } + ret = PyArray_CanCastTypeTo((PyArray_Descr *)descr, d2, casting); + Py_DECREF(descr); + } + else { + /* need to convert to object to consider old value-based logic */ + PyArrayObject *arr; + arr = (PyArrayObject *)PyArray_FROM_O(from_obj); + if (arr == NULL) { + goto finish; + } + ret = PyArray_CanCastArrayTo(arr, d2, casting); + Py_DECREF(arr); + } + } else if (PyArray_IsPythonNumber(from_obj)) { PyErr_SetString(PyExc_TypeError, "can_cast() does not support Python ints, floats, and " @@ -3496,15 +3526,6 @@ array_can_cast_safely(PyObject *NPY_UNUSED(self), "explicitly allow them again in the future."); goto finish; } - else if (PyArray_IsScalar(from_obj, Generic)) { - PyArrayObject *arr; - arr = (PyArrayObject *)PyArray_FROM_O(from_obj); - if (arr == NULL) { - goto finish; - } - ret = PyArray_CanCastArrayTo(arr, d2, casting); - Py_DECREF(arr); - } /* Otherwise use CanCastTypeTo */ else { if (!PyArray_DescrConverter2(from_obj, &d1) || d1 == NULL) { @@ -4772,6 +4793,7 @@ NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_convert = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_preserve = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_convert_if_no_array = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_cpu = NULL; +NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_dtype = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_err_msg_substr = NULL; static int @@ -4850,6 +4872,10 @@ intern_strings(void) if (npy_ma_str_cpu == NULL) { return -1; } + npy_ma_str_dtype = PyUnicode_InternFromString("dtype"); + if (npy_ma_str_dtype == NULL) { + return -1; + } npy_ma_str_array_err_msg_substr = PyUnicode_InternFromString( "__array__() got an unexpected keyword argument 'copy'"); if (npy_ma_str_array_err_msg_substr == NULL) { diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index ba03d367eeb8..52ca654804d0 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -19,6 +19,7 @@ NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_convert; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_preserve; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_convert_if_no_array; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_cpu; +NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_dtype; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_err_msg_substr; #endif /* NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ */ diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index db5e81e7cc9a..4d560df6456e 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -1492,6 +1492,17 @@ def test_can_cast_values(self): assert_(np.can_cast(fi.min, dt)) assert_(np.can_cast(fi.max, dt)) + @pytest.mark.parametrize("dtype", + list("?bhilqBHILQefdgFDG") + [rational]) + def test_can_cast_scalars(self, dtype): + # Basic test to ensure that scalars are supported in can-cast + # (does not check behavior exhaustively). + dtype = np.dtype(dtype) + scalar = dtype.type(0) + + assert np.can_cast(scalar, "int64") == np.can_cast(dtype, "int64") + assert np.can_cast(scalar, "float32", casting="unsafe") + # Custom exception class to test exception propagation in fromiter class NIterError(Exception): From f6cb2a9209cd0ce69f56f9aa0de820923509e862 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Thu, 2 May 2024 21:23:59 +0200 Subject: [PATCH 282/980] add benchmarks --- benchmarks/benchmarks/bench_shape_base.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/benchmarks/benchmarks/bench_shape_base.py b/benchmarks/benchmarks/bench_shape_base.py index 72c2a6132e4e..eb13ff969353 100644 --- a/benchmarks/benchmarks/bench_shape_base.py +++ b/benchmarks/benchmarks/bench_shape_base.py @@ -152,3 +152,19 @@ def time_scalar_kron(self): def time_mat_kron(self): np.kron(self.large_mat, self.large_mat) + +class AtLeast1D(Benchmark): + """Benchmarks for np.atleast_1d""" + + def setup(self): + self.x = np.array([1, 2, 3]) + self.zero_d = np.float64(1.) + + def time_atleast_1d(self): + np.atleast_1d(self.x, self.x, self.x) + + def time_atleast_1d_reshape(self): + np.atleast_1d(self.zero_d, self.zero_d, self.zero_d) + + def time_atleast_1d_single_argument(self): + np.atleast_1d(self.x) From c28d8ee629dbc37fa046b930b4f7bdbc5b2e3dd1 Mon Sep 17 00:00:00 2001 From: Arun Kannawadi Date: Thu, 2 May 2024 17:03:17 -0400 Subject: [PATCH 283/980] TYP: Hint that formats is optional Closes #26376. --- numpy/_core/records.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index 6424f100f8c5..e7de3d10c521 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -174,7 +174,7 @@ def fromrecords( dtype: None = ..., shape: None | _ShapeLike = ..., *, - formats: DTypeLike, + formats: DTypeLike = ..., names: None | str | Sequence[str] = ..., titles: None | str | Sequence[str] = ..., aligned: bool = ..., From 4b23d55465f6f215dad4a59b3041dcde5207da14 Mon Sep 17 00:00:00 2001 From: Ben Woodruff Date: Thu, 2 May 2024 19:59:09 -0600 Subject: [PATCH 284/980] DOC: Update internal links for generator.rst and related This is a first in a sequence of future PRs to update internal links, as discussed in the triage meeting. In addition to removing double backticks on internal functions, I removed unneeded tilde ~ as well. This is a great example page to see why a tilde ~ is needed in the first paragraph of generator.rst. I included instructions for the POSSEE team on this topic, as its a simple way to help train them in the PR process. [skip azp] [skip actions] [skip cirrus] --- doc/source/reference/random/generator.rst | 8 ++++---- numpy/random/_generator.pyx | 18 +++++++++--------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/doc/source/reference/random/generator.rst b/doc/source/reference/random/generator.rst index c8662c56a788..eaa29feae57e 100644 --- a/doc/source/reference/random/generator.rst +++ b/doc/source/reference/random/generator.rst @@ -2,14 +2,14 @@ Random ``Generator`` ==================== -The `~Generator` provides access to +The `Generator` provides access to a wide range of distributions, and served as a replacement for :class:`~numpy.random.RandomState`. The main difference between -the two is that ``Generator`` relies on an additional BitGenerator to +the two is that `Generator` relies on an additional BitGenerator to manage state and generate the random bits, which are then transformed into random values from useful distributions. The default BitGenerator used by -``Generator`` is `~PCG64`. The BitGenerator -can be changed by passing an instantized BitGenerator to ``Generator``. +`Generator` is `PCG64`. The BitGenerator +can be changed by passing an instantized BitGenerator to `Generator`. .. autofunction:: default_rng diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 641d665a8eaa..82129afd0a27 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -146,7 +146,7 @@ cdef class Generator: Container for the BitGenerators. - ``Generator`` exposes a number of methods for generating random + `Generator` exposes a number of methods for generating random numbers drawn from a variety of probability distributions. In addition to the distribution-specific arguments, each method takes a keyword argument `size` that defaults to ``None``. If `size` is ``None``, then a single @@ -159,7 +159,7 @@ cdef class Generator: **No Compatibility Guarantee** - ``Generator`` does not provide a version compatibility guarantee. In + `Generator` does not provide a version compatibility guarantee. In particular, as better algorithms evolve the bit stream may change. Parameters @@ -171,8 +171,8 @@ cdef class Generator: ----- The Python stdlib module `random` contains pseudo-random number generator with a number of methods that are similar to the ones available in - ``Generator``. It uses Mersenne Twister, and this bit generator can - be accessed using ``MT19937``. ``Generator``, besides being + `Generator`. It uses Mersenne Twister, and this bit generator can + be accessed using `MT19937`. `Generator`, besides being NumPy-aware, has the advantage that it provides a much larger number of probability distributions to choose from. @@ -5025,11 +5025,11 @@ def default_rng(seed=None): Examples -------- - ``default_rng`` is the recommended constructor for the random number class - ``Generator``. Here are several ways we can construct a random - number generator using ``default_rng`` and the ``Generator`` class. + `default_rng` is the recommended constructor for the random number class + `Generator`. Here are several ways we can construct a random + number generator using `default_rng` and the `Generator` class. - Here we use ``default_rng`` to generate a random float: + Here we use `default_rng` to generate a random float: >>> import numpy as np >>> rng = np.random.default_rng(12345) @@ -5041,7 +5041,7 @@ def default_rng(seed=None): >>> type(rfloat) - Here we use ``default_rng`` to generate 3 random integers between 0 + Here we use `default_rng` to generate 3 random integers between 0 (inclusive) and 10 (exclusive): >>> import numpy as np From dcd896f04879f33c353188c7ba0b6b5fadc15f8e Mon Sep 17 00:00:00 2001 From: ajayjanapareddi Date: Thu, 2 May 2024 23:51:34 -0700 Subject: [PATCH 285/980] removed examples as suggested --- doc/source/reference/arrays.datetime.rst | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 9641ba321c84..ea76425e0160 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -223,27 +223,6 @@ calculating the averaged values from the 400 year leap-year cycle. Traceback (most recent call last): File "", line 1, in TypeError: Cannot cast NumPy timedelta64 scalar from metadata [Y] to [D] according to the rule 'same_kind' - - Using the astype to convert month unit to days with unsafe casting. - 400 year leap-year cycle has 400*365 + 97 days and 400*12 months. - Each month has approximately 30.4369 days rounded to an integer value of 30. - - >>> np.timedelta64(1, 'M').astype(np.timedelta64(1, 'D')) # The default casting for astype is 'unsafe'. - numpy.timedelta64(30,'D') - - Similarly, 12 years in the 400-year leap-year cycle is equivalent to - 4382.91 rounded to an integer value of 4382. - - >>> np.timedelta64(12, 'Y').astype(np.timedelta64(1, 'D')) - numpy.timedelta64(4382,'D') - - Safe casting cannot be used for the conversion of month unit to days - as different months have different numbers of days. - - >>> np.timedelta64(1, 'M').astype(np.timedelta64(1, 'D'), casting='safe') - Traceback (most recent call last): - File "", line 1, in - TypeError: Cannot cast scalar from dtype(' Date: Fri, 3 May 2024 12:50:54 -0600 Subject: [PATCH 286/980] DOC: Updated remaining links in random folder This should clean up the rest of the internal links in the random folder of reference. Let me know if this is too many at once to review, and I'll batch them in smaller groups. [skip azp] [skip actions] [skip cirrus] --- doc/source/reference/c-api/coremath.rst | 4 +- doc/source/reference/distutils.rst | 2 +- doc/source/reference/random/compatibility.rst | 2 +- doc/source/reference/random/extending.rst | 6 +-- .../reference/random/multithreading.rst | 4 +- .../reference/random/new-or-different.rst | 20 ++++----- doc/source/reference/random/parallel.rst | 42 +++++++++---------- doc/source/reference/random/performance.rst | 2 +- .../reference/random/upgrading-pcg64.rst | 2 +- numpy/random/_mt19937.pyx | 8 ++-- numpy/random/_pcg64.pyx | 18 ++++---- numpy/random/_philox.pyx | 16 +++---- numpy/random/_sfc64.pyx | 16 +++---- 13 files changed, 71 insertions(+), 71 deletions(-) diff --git a/doc/source/reference/c-api/coremath.rst b/doc/source/reference/c-api/coremath.rst index ef91ab28e6aa..f8e0efb34d24 100644 --- a/doc/source/reference/c-api/coremath.rst +++ b/doc/source/reference/c-api/coremath.rst @@ -1,7 +1,7 @@ NumPy core math library ======================= -The numpy core math library ('npymath') is a first step in this direction. This +The numpy core math library (``npymath``) is a first step in this direction. This library contains most math-related C99 functionality, which can be used on platforms where C99 is not well supported. The core math functions have the same API as the C99 ones, except for the ``npy_*`` prefix. @@ -304,7 +304,7 @@ Linking against the core math library in an extension ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To use the core math library that NumPy ships as a static library in your own -Python extension, you need to add the npymath compile and link options to your +Python extension, you need to add the ``npymath`` compile and link options to your extension. The exact steps to take will depend on the build system you are using. The generic steps to take are: diff --git a/doc/source/reference/distutils.rst b/doc/source/reference/distutils.rst index d4640e65456f..72b61e3a94db 100644 --- a/doc/source/reference/distutils.rst +++ b/doc/source/reference/distutils.rst @@ -14,7 +14,7 @@ Packaging (:mod:`numpy.distutils`) .. warning:: Note that ``setuptools`` does major releases often and those may contain - changes that break ``numpy.distutils``, which will *not* be updated anymore + changes that break :mod:`numpy.distutils`, which will *not* be updated anymore for new ``setuptools`` versions. It is therefore recommended to set an upper version bound in your build configuration for the last known version of ``setuptools`` that works with your build. diff --git a/doc/source/reference/random/compatibility.rst b/doc/source/reference/random/compatibility.rst index b45e195fbd71..455a2485ea4a 100644 --- a/doc/source/reference/random/compatibility.rst +++ b/doc/source/reference/random/compatibility.rst @@ -22,7 +22,7 @@ outside of NumPy's control that limit our ability to guarantee much more than this. For example, different CPUs implement floating point arithmetic differently, and this can cause differences in certain edge cases that cascade to the rest of the stream. `Generator.multivariate_normal`, for another -example, uses a matrix decomposition from ``numpy.linalg``. Even on the same +example, uses a matrix decomposition from `numpy.linalg`. Even on the same platform, a different build of ``numpy`` may use a different version of this matrix decomposition algorithm from the LAPACK that it links to, causing `Generator.multivariate_normal` to return completely different (but equally diff --git a/doc/source/reference/random/extending.rst b/doc/source/reference/random/extending.rst index 26407bb2a3fa..e04fabd0dd97 100644 --- a/doc/source/reference/random/extending.rst +++ b/doc/source/reference/random/extending.rst @@ -5,7 +5,7 @@ Extending ========= The BitGenerators have been designed to be extendable using standard tools for -high-performance Python -- numba and Cython. The `~Generator` object can also +high-performance Python -- numba and Cython. The `Generator` object can also be used with user-provided BitGenerators as long as these export a small set of required functions. @@ -81,7 +81,7 @@ directly from the ``_generator`` shared object, using the `BitGenerator.cffi` in New BitGenerators ----------------- -`~Generator` can be used with user-provided `~BitGenerator`\ s. The simplest +`Generator` can be used with user-provided `BitGenerator`\ s. The simplest way to write a new BitGenerator is to examine the pyx file of one of the existing BitGenerators. The key structure that must be provided is the ``capsule`` which contains a ``PyCapsule`` to a struct pointer of type @@ -102,7 +102,7 @@ used by the BitGenerators. The next three are function pointers which return the next 64- and 32-bit unsigned integers, the next random double and the next raw value. This final function is used for testing and so can be set to the next 64-bit unsigned integer function if not needed. Functions inside -``Generator`` use this structure as in +`Generator` use this structure as in .. code-block:: c diff --git a/doc/source/reference/random/multithreading.rst b/doc/source/reference/random/multithreading.rst index 09a048561e25..99b7ec781b55 100644 --- a/doc/source/reference/random/multithreading.rst +++ b/doc/source/reference/random/multithreading.rst @@ -104,8 +104,8 @@ that does not use an existing array due to array creation overhead. Out[6]: 125 ms ± 309 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) -Note that if `threads` is not set by the user, it will be determined by -`multiprocessing.cpu_count()`. +Note that if ``threads`` is not set by the user, it will be determined by +``multiprocessing.cpu_count()``. .. code-block:: ipython diff --git a/doc/source/reference/random/new-or-different.rst b/doc/source/reference/random/new-or-different.rst index 0fcd6f4c9dd3..db886237bcce 100644 --- a/doc/source/reference/random/new-or-different.rst +++ b/doc/source/reference/random/new-or-different.rst @@ -12,16 +12,16 @@ implementations. ================== ==================== ============= Feature Older Equivalent Notes ------------------ -------------------- ------------- -`~.Generator` `~.RandomState` ``Generator`` requires a stream +`Generator` `RandomState` `Generator` requires a stream source, called a `BitGenerator` A number of these are provided. - ``RandomState`` uses - the Mersenne Twister `~.MT19937` by + `RandomState` uses + the Mersenne Twister `MT19937` by default, but can also be instantiated with any BitGenerator. ------------------ -------------------- ------------- -``random`` ``random_sample``, Access the values in a BitGenerator, - ``rand`` convert them to ``float64`` in the +`random` `random_sample`, Access the values in a BitGenerator, + `rand` convert them to ``float64`` in the interval ``[0.0.,`` `` 1.0)``. In addition to the ``size`` kwarg, now supports ``dtype='d'`` or ``dtype='f'``, @@ -31,8 +31,8 @@ Feature Older Equivalent Notes Many other distributions are also supported. ------------------ -------------------- ------------- -``integers`` ``randint``, Use the ``endpoint`` kwarg to adjust - ``random_integers`` the inclusion or exclusion of the +`integers` `randint`, Use the ``endpoint`` kwarg to adjust + `random_integers` the inclusion or exclusion of the ``high`` interval endpoint ================== ==================== ============= @@ -40,7 +40,7 @@ Feature Older Equivalent Notes methods which are 2-10 times faster than NumPy's default implementation in `~.Generator.standard_normal`, `~.Generator.standard_exponential` or `~.Generator.standard_gamma`. Because of the change in algorithms, it is not - possible to reproduce the exact random values using ``Generator`` for these + possible to reproduce the exact random values using `Generator` for these distributions or any distribution method that relies on them. .. ipython:: python @@ -63,8 +63,8 @@ Feature Older Equivalent Notes * `~.Generator.integers` is now the canonical way to generate integer random numbers from a discrete uniform distribution. This replaces both - ``randint`` and the deprecated ``random_integers``. -* The ``rand`` and ``randn`` methods are only available through the legacy + `randint` and the deprecated `random_integers`. +* The `rand` and `randn` methods are only available through the legacy `~.RandomState`. * `Generator.random` is now the canonical way to generate floating-point random numbers, which replaces `RandomState.random_sample`, diff --git a/doc/source/reference/random/parallel.rst b/doc/source/reference/random/parallel.rst index eeada129721d..892ceb3d1698 100644 --- a/doc/source/reference/random/parallel.rst +++ b/doc/source/reference/random/parallel.rst @@ -13,39 +13,39 @@ or distributed). ------------------------ NumPy allows you to spawn new (with very high probability) independent -`~BitGenerator` and `~Generator` instances via their ``spawn()`` method. -This spawning is implemented by the `~SeedSequence` used for initializing +`BitGenerator` and `Generator` instances via their ``spawn()`` method. +This spawning is implemented by the `SeedSequence` used for initializing the bit generators random stream. -`~SeedSequence` `implements an algorithm`_ to process a user-provided seed, +`SeedSequence` `implements an algorithm`_ to process a user-provided seed, typically as an integer of some size, and to convert it into an initial state for -a `~BitGenerator`. It uses hashing techniques to ensure that low-quality seeds +a `BitGenerator`. It uses hashing techniques to ensure that low-quality seeds are turned into high quality initial states (at least, with very high probability). -For example, `MT19937` has a state consisting of 624 -`uint32` integers. A naive way to take a 32-bit integer seed would be to just set +For example, `MT19937` has a state consisting of 624 ``uint32`` +integers. A naive way to take a 32-bit integer seed would be to just set the last element of the state to the 32-bit seed and leave the rest 0s. This is a valid state for `MT19937`, but not a good one. The Mersenne Twister algorithm `suffers if there are too many 0s`_. Similarly, two adjacent 32-bit integer seeds (i.e. ``12345`` and ``12346``) would produce very similar streams. -`~SeedSequence` avoids these problems by using successions of integer hashes +`SeedSequence` avoids these problems by using successions of integer hashes with good `avalanche properties`_ to ensure that flipping any bit in the input has about a 50% chance of flipping any bit in the output. Two input seeds that are very close to each other will produce initial states that are very far from each other (with very high probability). It is also constructed in such a way that you can provide arbitrary-sized integers or lists of integers. -`~SeedSequence` will take all of the bits that you provide and mix them -together to produce however many bits the consuming `~BitGenerator` needs to +`SeedSequence` will take all of the bits that you provide and mix them +together to produce however many bits the consuming `BitGenerator` needs to initialize itself. These properties together mean that we can safely mix together the usual -user-provided seed with simple incrementing counters to get `~BitGenerator` +user-provided seed with simple incrementing counters to get `BitGenerator` states that are (to very high probability) independent of each other. We can wrap this together into an API that is easy to use and difficult to misuse. -Note that while `~SeedSequence` attempts to solve many of the issues related to +Note that while `SeedSequence` attempts to solve many of the issues related to user-provided small seeds, we still :ref:`recommend` using :py:func:`secrets.randbits` to generate seeds with 128 bits of entropy to avoid the remaining biases introduced by human-chosen seeds. @@ -62,7 +62,7 @@ avoid the remaining biases introduced by human-chosen seeds. .. end_block -For convenience the direct use of `~SeedSequence` is not necessary. +For convenience the direct use of `SeedSequence` is not necessary. The above ``streams`` can be spawned directly from a parent generator via `~Generator.spawn`: @@ -74,7 +74,7 @@ via `~Generator.spawn`: .. end_block Child objects can also spawn to make grandchildren, and so on. -Each child has a `~SeedSequence` with its position in the tree of spawned +Each child has a `SeedSequence` with its position in the tree of spawned child objects mixed in with the user-provided seed to generate independent (with very high probability) streams. @@ -92,7 +92,7 @@ Python has increasingly-flexible mechanisms for parallelization available, and this scheme fits in very well with that kind of use. Using this scheme, an upper bound on the probability of a collision can be -estimated if one knows the number of streams that you derive. `~SeedSequence` +estimated if one knows the number of streams that you derive. `SeedSequence` hashes its inputs, both the seed and the spawn-tree-path, down to a 128-bit pool by default. The probability that there is a collision in that pool, pessimistically-estimated ([1]_), will be about :math:`n^2*2^{-128}` where @@ -110,7 +110,7 @@ territory ([2]_). .. [2] In this calculation, we can mostly ignore the amount of numbers drawn from each stream. See :ref:`upgrading-pcg64` for the technical details about `PCG64`. The other PRNGs we provide have some extra protection built in - that avoids overlaps if the `~SeedSequence` pools differ in the + that avoids overlaps if the `SeedSequence` pools differ in the slightest bit. `PCG64DXSM` has :math:`2^{127}` separate cycles determined by the seed in addition to the position in the :math:`2^{128}` long period for each cycle, so one has to both get on or @@ -133,7 +133,7 @@ territory ([2]_). Sequence of integer seeds ------------------------- -As discussed in the previous section, `~SeedSequence` can not only take an +As discussed in the previous section, `SeedSequence` can not only take an integer seed, it can also take an arbitrary-length sequence of (non-negative) integers. If one exercises a little care, one can use this feature to design *ad hoc* schemes for getting safe parallel PRNG streams with similar safety @@ -164,7 +164,7 @@ integer in a list. This can be used to replace a number of unsafe strategies that have been used in the past which try to combine the root seed and the ID back into a single integer seed value. For example, it is common to see users add the worker ID to -the root seed, especially with the legacy `~RandomState` code. +the root seed, especially with the legacy `RandomState` code. .. code-block:: python @@ -253,13 +253,13 @@ are listed below. +-----------------+-------------------------+-------------------------+-------------------------+ | BitGenerator | Period | Jump Size | Bits per Draw | +=================+=========================+=========================+=========================+ -| MT19937 | :math:`2^{19937}-1` | :math:`2^{128}` | 32 | +| `MT19937` | :math:`2^{19937}-1` | :math:`2^{128}` | 32 | +-----------------+-------------------------+-------------------------+-------------------------+ -| PCG64 | :math:`2^{128}` | :math:`~2^{127}` ([3]_) | 64 | +| `PCG64` | :math:`2^{128}` | :math:`~2^{127}` ([3]_) | 64 | +-----------------+-------------------------+-------------------------+-------------------------+ -| PCG64DXSM | :math:`2^{128}` | :math:`~2^{127}` ([3]_) | 64 | +| `PCG64DXSM` | :math:`2^{128}` | :math:`~2^{127}` ([3]_) | 64 | +-----------------+-------------------------+-------------------------+-------------------------+ -| Philox | :math:`2^{256}` | :math:`2^{128}` | 64 | +| `Philox` | :math:`2^{256}` | :math:`2^{128}` | 64 | +-----------------+-------------------------+-------------------------+-------------------------+ .. [3] The jump size is :math:`(\phi-1)*2^{128}` where :math:`\phi` is the diff --git a/doc/source/reference/random/performance.rst b/doc/source/reference/random/performance.rst index 7fe383f24bdd..7043734f24c8 100644 --- a/doc/source/reference/random/performance.rst +++ b/doc/source/reference/random/performance.rst @@ -24,7 +24,7 @@ even on 32-bit processes, this is your choice. `MT19937` `fails some statistical tests`_ and is not especially fast compared to modern PRNGs. For these reasons, we mostly do not recommend -using it on its own, only through the legacy `~.RandomState` for +using it on its own, only through the legacy `RandomState` for reproducing old results. That said, it has a very long history as a default in many systems. diff --git a/doc/source/reference/random/upgrading-pcg64.rst b/doc/source/reference/random/upgrading-pcg64.rst index 79be8440ef5c..79432ac578f1 100644 --- a/doc/source/reference/random/upgrading-pcg64.rst +++ b/doc/source/reference/random/upgrading-pcg64.rst @@ -2,7 +2,7 @@ .. currentmodule:: numpy.random -Upgrading ``PCG64`` with ``PCG64DXSM`` +Upgrading `PCG64` with `PCG64DXSM` ====================================== Uses of the `PCG64` `BitGenerator` in a massively-parallel context have been diff --git a/numpy/random/_mt19937.pyx b/numpy/random/_mt19937.pyx index 1ebf43faa117..826cb8441ef1 100644 --- a/numpy/random/_mt19937.pyx +++ b/numpy/random/_mt19937.pyx @@ -67,9 +67,9 @@ cdef class MT19937(BitGenerator): Notes ----- - ``MT19937`` provides a capsule containing function pointers that produce + `MT19937` provides a capsule containing function pointers that produce doubles, and unsigned 32 and 64- bit integers [1]_. These are not - directly consumable in Python and must be consumed by a ``Generator`` + directly consumable in Python and must be consumed by a `Generator` or similar object that supports low-level access. The Python stdlib module "random" also contains a Mersenne Twister @@ -77,7 +77,7 @@ cdef class MT19937(BitGenerator): **State and Seeding** - The ``MT19937`` state vector consists of a 624-element array of + The `MT19937` state vector consists of a 624-element array of 32-bit unsigned integers plus a single integer value between 0 and 624 that indexes the current position within the main array. @@ -111,7 +111,7 @@ cdef class MT19937(BitGenerator): **Compatibility Guarantee** - ``MT19937`` makes a guarantee that a fixed seed will always produce + `MT19937` makes a guarantee that a fixed seed will always produce the same random integer stream. References diff --git a/numpy/random/_pcg64.pyx b/numpy/random/_pcg64.pyx index 77e2090e72bf..250bf967bba2 100644 --- a/numpy/random/_pcg64.pyx +++ b/numpy/random/_pcg64.pyx @@ -73,9 +73,9 @@ cdef class PCG64(BitGenerator): The specific member of the PCG family that we use is PCG XSL RR 128/64 as described in the paper ([2]_). - ``PCG64`` provides a capsule containing function pointers that produce + `PCG64` provides a capsule containing function pointers that produce doubles, and unsigned 32 and 64- bit integers. These are not - directly consumable in Python and must be consumed by a ``Generator`` + directly consumable in Python and must be consumed by a `Generator` or similar object that supports low-level access. Supports the method :meth:`advance` to advance the RNG an arbitrary number of @@ -84,7 +84,7 @@ cdef class PCG64(BitGenerator): **State and Seeding** - The ``PCG64`` state vector consists of 2 unsigned 128-bit values, + The `PCG64` state vector consists of 2 unsigned 128-bit values, which are represented externally as Python ints. One is the state of the PRNG, which is advanced by a linear congruential generator (LCG). The second is a fixed odd increment used in the LCG. @@ -104,7 +104,7 @@ cdef class PCG64(BitGenerator): **Compatibility Guarantee** - ``PCG64`` makes a guarantee that a fixed seed will always produce + `PCG64` makes a guarantee that a fixed seed will always produce the same random integer stream. References @@ -305,13 +305,13 @@ cdef class PCG64DXSM(BitGenerator): generator ([1]_, [2]_). PCG-64 DXSM has a period of :math:`2^{128}` and supports advancing an arbitrary number of steps as well as :math:`2^{127}` streams. The specific member of the PCG family that we use is PCG CM DXSM 128/64. It - differs from ``PCG64`` in that it uses the stronger DXSM output function, + differs from `PCG64` in that it uses the stronger DXSM output function, a 64-bit "cheap multiplier" in the LCG, and outputs from the state before advancing it rather than advance-then-output. - ``PCG64DXSM`` provides a capsule containing function pointers that produce + `PCG64DXSM` provides a capsule containing function pointers that produce doubles, and unsigned 32 and 64- bit integers. These are not - directly consumable in Python and must be consumed by a ``Generator`` + directly consumable in Python and must be consumed by a `Generator` or similar object that supports low-level access. Supports the method :meth:`advance` to advance the RNG an arbitrary number of @@ -320,7 +320,7 @@ cdef class PCG64DXSM(BitGenerator): **State and Seeding** - The ``PCG64DXSM`` state vector consists of 2 unsigned 128-bit values, + The `PCG64DXSM` state vector consists of 2 unsigned 128-bit values, which are represented externally as Python ints. One is the state of the PRNG, which is advanced by a linear congruential generator (LCG). The second is a fixed odd increment used in the LCG. @@ -340,7 +340,7 @@ cdef class PCG64DXSM(BitGenerator): **Compatibility Guarantee** - ``PCG64DXSM`` makes a guarantee that a fixed seed will always produce + `PCG64DXSM` makes a guarantee that a fixed seed will always produce the same random integer stream. References diff --git a/numpy/random/_philox.pyx b/numpy/random/_philox.pyx index d90da6a9b657..7daefd69eb0c 100644 --- a/numpy/random/_philox.pyx +++ b/numpy/random/_philox.pyx @@ -93,14 +93,14 @@ cdef class Philox(BitGenerator): the sequence in increments of :math:`2^{128}`. These features allow multiple non-overlapping sequences to be generated. - ``Philox`` provides a capsule containing function pointers that produce + `Philox` provides a capsule containing function pointers that produce doubles, and unsigned 32 and 64- bit integers. These are not - directly consumable in Python and must be consumed by a ``Generator`` + directly consumable in Python and must be consumed by a `Generator` or similar object that supports low-level access. **State and Seeding** - The ``Philox`` state vector consists of a 256-bit value encoded as + The `Philox` state vector consists of a 256-bit value encoded as a 4-element uint64 array and a 128-bit value encoded as a 2-element uint64 array. The former is a counter which is incremented by 1 for every 4 64-bit randoms produced. The second is a key which determined the sequence @@ -122,10 +122,10 @@ cdef class Philox(BitGenerator): >>> sg = SeedSequence(1234) >>> rg = [Generator(Philox(s)) for s in sg.spawn(10)] - ``Philox`` can be used in parallel applications by calling the ``jumped`` + `Philox` can be used in parallel applications by calling the :meth:`jumped` method to advances the state as-if :math:`2^{128}` random numbers have - been generated. Alternatively, ``advance`` can be used to advance the - counter for any positive step in [0, 2**256). When using ``jumped``, all + been generated. Alternatively, :meth:`advance` can be used to advance the + counter for any positive step in [0, 2**256). When using :meth:`jumped`, all generators should be chained to ensure that the segments come from the same sequence. @@ -136,7 +136,7 @@ cdef class Philox(BitGenerator): ... rg.append(Generator(bit_generator)) ... bit_generator = bit_generator.jumped() - Alternatively, ``Philox`` can be used in parallel applications by using + Alternatively, `Philox` can be used in parallel applications by using a sequence of distinct keys where each instance uses different key. >>> key = 2**96 + 2**33 + 2**17 + 2**9 @@ -144,7 +144,7 @@ cdef class Philox(BitGenerator): **Compatibility Guarantee** - ``Philox`` makes a guarantee that a fixed ``seed`` will always produce + `Philox` makes a guarantee that a fixed ``seed`` will always produce the same random integer stream. Examples diff --git a/numpy/random/_sfc64.pyx b/numpy/random/_sfc64.pyx index 81a4bc764026..12b48059cef2 100644 --- a/numpy/random/_sfc64.pyx +++ b/numpy/random/_sfc64.pyx @@ -50,30 +50,30 @@ cdef class SFC64(BitGenerator): Notes ----- - ``SFC64`` is a 256-bit implementation of Chris Doty-Humphrey's Small Fast - Chaotic PRNG ([1]_). ``SFC64`` has a few different cycles that one might be + `SFC64` is a 256-bit implementation of Chris Doty-Humphrey's Small Fast + Chaotic PRNG ([1]_). `SFC64` has a few different cycles that one might be on, depending on the seed; the expected period will be about - :math:`2^{255}` ([2]_). ``SFC64`` incorporates a 64-bit counter which means + :math:`2^{255}` ([2]_). `SFC64` incorporates a 64-bit counter which means that the absolute minimum cycle length is :math:`2^{64}` and that distinct seeds will not run into each other for at least :math:`2^{64}` iterations. - ``SFC64`` provides a capsule containing function pointers that produce + `SFC64` provides a capsule containing function pointers that produce doubles, and unsigned 32 and 64- bit integers. These are not - directly consumable in Python and must be consumed by a ``Generator`` + directly consumable in Python and must be consumed by a `Generator` or similar object that supports low-level access. **State and Seeding** - The ``SFC64`` state vector consists of 4 unsigned 64-bit values. The last + The `SFC64` state vector consists of 4 unsigned 64-bit values. The last is a 64-bit counter that increments by 1 each iteration. The input seed is processed by `SeedSequence` to generate the first - 3 values, then the ``SFC64`` algorithm is iterated a small number of times + 3 values, then the `SFC64` algorithm is iterated a small number of times to mix. **Compatibility Guarantee** - ``SFC64`` makes a guarantee that a fixed seed will always produce the same + `SFC64` makes a guarantee that a fixed seed will always produce the same random integer stream. References From 4836024553d1a09b7c6e75f128a91a4cecd6f279 Mon Sep 17 00:00:00 2001 From: Ben Woodruff Date: Fri, 3 May 2024 14:04:51 -0600 Subject: [PATCH 287/980] Fixed links to Generator.random and Generator.integers in table. Then cleaned up table formatting. --- .../reference/random/new-or-different.rst | 54 ++++++++++--------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/doc/source/reference/random/new-or-different.rst b/doc/source/reference/random/new-or-different.rst index db886237bcce..86c4fc50a251 100644 --- a/doc/source/reference/random/new-or-different.rst +++ b/doc/source/reference/random/new-or-different.rst @@ -9,32 +9,34 @@ NumPy 1.17.0 introduced `Generator` as an improved replacement for the :ref:`legacy ` `RandomState`. Here is a quick comparison of the two implementations. -================== ==================== ============= -Feature Older Equivalent Notes ------------------- -------------------- ------------- -`Generator` `RandomState` `Generator` requires a stream - source, called a `BitGenerator` - A number of these are provided. - `RandomState` uses - the Mersenne Twister `MT19937` by - default, but can also be instantiated - with any BitGenerator. ------------------- -------------------- ------------- -`random` `random_sample`, Access the values in a BitGenerator, - `rand` convert them to ``float64`` in the - interval ``[0.0.,`` `` 1.0)``. - In addition to the ``size`` kwarg, now - supports ``dtype='d'`` or ``dtype='f'``, - and an ``out`` kwarg to fill a user- - supplied array. - - Many other distributions are also - supported. ------------------- -------------------- ------------- -`integers` `randint`, Use the ``endpoint`` kwarg to adjust - `random_integers` the inclusion or exclusion of the - ``high`` interval endpoint -================== ==================== ============= +======================= ================== ============= +Feature Older Equivalent Notes +----------------------- ------------------ ------------- +`Generator` `RandomState` `Generator` requires a stream + source, called a `BitGenerator` + A number of these are provided. + `RandomState` uses the Mersenne + Twister `MT19937` by default, + but can also be instantiated + with any BitGenerator. +----------------------- ------------------ ------------- +`~.Generator.random` `random_sample`, Access the values in a + `rand` BitGenerator, convert them to + ``float64`` in the interval + ``[0.0.,`` `` 1.0)``. In addition + to the ``size`` kwarg, now + supports ``dtype='d'`` or + ``dtype='f'``, and an ``out`` + kwarg to fill a user-supplied + array. + + Many other distributions are also + supported. +----------------------- ------------------ ------------- +`~.Generator.integers` `randint`, Use the ``endpoint`` kwarg to + `random_integers` adjust the inclusion or exclusion + of the ``high`` interval endpoint. +======================= ================== ============= * The normal, exponential and gamma generators use 256-step Ziggurat methods which are 2-10 times faster than NumPy's default implementation in From 5e83981fe228f2ad4c453ca00e9e3c6f32131681 Mon Sep 17 00:00:00 2001 From: Ben Woodruff Date: Fri, 3 May 2024 14:09:31 -0600 Subject: [PATCH 288/980] fixed small typo --- numpy/random/_philox.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/_philox.pyx b/numpy/random/_philox.pyx index 7daefd69eb0c..a046d9441fae 100644 --- a/numpy/random/_philox.pyx +++ b/numpy/random/_philox.pyx @@ -123,7 +123,7 @@ cdef class Philox(BitGenerator): >>> rg = [Generator(Philox(s)) for s in sg.spawn(10)] `Philox` can be used in parallel applications by calling the :meth:`jumped` - method to advances the state as-if :math:`2^{128}` random numbers have + method to advance the state as-if :math:`2^{128}` random numbers have been generated. Alternatively, :meth:`advance` can be used to advance the counter for any positive step in [0, 2**256). When using :meth:`jumped`, all generators should be chained to ensure that the segments come from the same From 2bd6dca9780693a8d76d23552181dd99364dada7 Mon Sep 17 00:00:00 2001 From: warren Date: Fri, 3 May 2024 16:18:15 -0400 Subject: [PATCH 289/980] BUG: Fix incorrect return type of item with length 0 from chararray.__getitem__ Closes gh-26375. --- numpy/_core/defchararray.py | 8 +------- numpy/_core/tests/test_defchararray.py | 10 ++++++++++ 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index 52a62791d382..b5a3aadfd54d 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -577,14 +577,8 @@ def __array_finalize__(self, obj): def __getitem__(self, obj): val = ndarray.__getitem__(self, obj) - if isinstance(val, character): - temp = val.rstrip() - if len(temp) == 0: - val = '' - else: - val = temp - + return val.rstrip() return val # IMPLEMENTATION NOTE: Most of the methods of this class are diff --git a/numpy/_core/tests/test_defchararray.py b/numpy/_core/tests/test_defchararray.py index a7716ab7baf0..6b688ab443a4 100644 --- a/numpy/_core/tests/test_defchararray.py +++ b/numpy/_core/tests/test_defchararray.py @@ -738,6 +738,16 @@ def test_slice(self): assert_(arr[0, 0] == b'abc') + @pytest.mark.parametrize('data', [['plate', ' ', 'shrimp'], + [b'retro', b' ', b'encabulator']]) + def test_getitem_length_zero_item(self, data): + # Regression test for gh-26375. + a = np.char.array(data) + # a.dtype.type() will be an empty string or bytes instance. + # The equality test will fail if a[1] has the wrong type + # or does not have length 0. + assert_equal(a[1], a.dtype.type()) + class TestMethodsEmptyArray: def setup_method(self): From eba0a9ec6f757626f95d05c18fc1f7cebbd6fd97 Mon Sep 17 00:00:00 2001 From: Ben Woodruff Date: Fri, 3 May 2024 14:36:06 -0600 Subject: [PATCH 290/980] Empty commit to include proper CI commands for doc updates [skip azp] [skip actions] [skip cirrus] From 785d863b557a62b7dd0182d5620e7b7244093c93 Mon Sep 17 00:00:00 2001 From: Arun Pa Date: Sun, 5 May 2024 18:11:07 +0530 Subject: [PATCH 291/980] fix doc example --- doc/source/user/basics.indexing.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/user/basics.indexing.rst b/doc/source/user/basics.indexing.rst index fffb0ecb8519..7481468fe6db 100644 --- a/doc/source/user/basics.indexing.rst +++ b/doc/source/user/basics.indexing.rst @@ -665,11 +665,11 @@ behave just like slicing). .. rubric:: Example -Suppose ``x.shape`` is (10, 20, 30) and ``ind`` is a (2, 3, 4)-shaped +Suppose ``x.shape`` is (10, 20, 30) and ``ind`` is a (2, 5, 2)-shaped indexing :class:`intp` array, then ``result = x[..., ind, :]`` has -shape (10, 2, 3, 4, 30) because the (20,)-shaped subspace has been -replaced with a (2, 3, 4)-shaped broadcasted indexing subspace. If -we let *i, j, k* loop over the (2, 3, 4)-shaped subspace then +shape (10, 2, 5, 2, 30) because the (20,)-shaped subspace has been +replaced with a (2, 5, 2)-shaped broadcasted indexing subspace. If +we let *i, j, k* loop over the (2, 5, 2)-shaped subspace then ``result[..., i, j, k, :] = x[..., ind[i, j, k], :]``. This example produces the same result as :meth:`x.take(ind, axis=-2) `. From 52ddb74649003c1947a8584241bce524d2cdbf7b Mon Sep 17 00:00:00 2001 From: Matt Haberland Date: Sun, 5 May 2024 11:21:38 -0700 Subject: [PATCH 292/980] DOC: quantile: correct/simplify documentation --- numpy/lib/_function_base_impl.py | 339 +++++++------------------------ numpy/lib/_nanfunctions_impl.py | 8 +- 2 files changed, 81 insertions(+), 266 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 802ce1c66817..f2441d16fbc2 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4050,144 +4050,9 @@ def percentile(a, Notes ----- - In general, the percentile at percentage level :math:`q` of a cumulative - distribution function :math:`F(y)=P(Y \\leq y)` with probability measure - :math:`P` is defined as any number :math:`x` that fulfills the - *coverage conditions* - - .. math:: P(Y < x) \\leq q/100 \\quad\\text{and} - \\quad P(Y \\leq x) \\geq q/100 - - with random variable :math:`Y\\sim P`. - Sample percentiles, the result of ``percentile``, provide nonparametric - estimation of the underlying population counterparts, represented by the - unknown :math:`F`, given a data vector ``a`` of length ``n``. - - One type of estimators arises when one considers :math:`F` as the empirical - distribution function of the data, i.e. - :math:`F(y) = \\frac{1}{n} \\sum_i 1_{a_i \\leq y}`. - Then, different methods correspond to different choices of :math:`x` that - fulfill the above inequalities. Methods that follow this approach are - ``inverted_cdf`` and ``averaged_inverted_cdf``. - - A more general way to define sample percentile estimators is as follows. - The empirical q-percentile of ``a`` is the ``n * q/100``-th value of the - way from the minimum to the maximum in a sorted copy of ``a``. The values - and distances of the two nearest neighbors as well as the `method` - parameter will determine the percentile if the normalized ranking does not - match the location of ``n * q/100`` exactly. This function is the same as - the median if ``q=50``, the same as the minimum if ``q=0`` and the same - as the maximum if ``q=100``. - - The optional `method` parameter specifies the method to use when the - desired percentile lies between two indexes ``i`` and ``j = i + 1``. - In that case, we first determine ``i + g``, a virtual index that lies - between ``i`` and ``j``, where ``i`` is the floor and ``g`` is the - fractional part of the index. The final result is, then, an interpolation - of ``a[i]`` and ``a[j]`` based on ``g``. During the computation of ``g``, - ``i`` and ``j`` are modified using correction constants ``alpha`` and - ``beta`` whose choices depend on the ``method`` used. Finally, note that - since Python uses 0-based indexing, the code subtracts another 1 from the - index internally. - - The following formula determines the virtual index ``i + g``, the location - of the percentile in the sorted sample: - - .. math:: - i + g = (q / 100) * ( n - alpha - beta + 1 ) + alpha - - The different methods then work as follows - - inverted_cdf: - method 1 of H&F [1]_. - This method gives discontinuous results: - - * if g > 0 ; then take j - * if g = 0 ; then take i - - averaged_inverted_cdf: - method 2 of H&F [1]_. - This method gives discontinuous results: - - * if g > 0 ; then take j - * if g = 0 ; then average between bounds - - closest_observation: - method 3 of H&F [1]_. - This method gives discontinuous results: - - * if g > 0 ; then take j - * if g = 0 and index is odd ; then take j - * if g = 0 and index is even ; then take i - - interpolated_inverted_cdf: - method 4 of H&F [1]_. - This method gives continuous results using: - - * alpha = 0 - * beta = 1 - - hazen: - method 5 of H&F [1]_. - This method gives continuous results using: - - * alpha = 1/2 - * beta = 1/2 - - weibull: - method 6 of H&F [1]_. - This method gives continuous results using: - - * alpha = 0 - * beta = 0 - - linear: - method 7 of H&F [1]_. - This method gives continuous results using: - - * alpha = 1 - * beta = 1 - - median_unbiased: - method 8 of H&F [1]_. - This method is probably the best method if the sample - distribution function is unknown (see reference). - This method gives continuous results using: - - * alpha = 1/3 - * beta = 1/3 - - normal_unbiased: - method 9 of H&F [1]_. - This method is probably the best method if the sample - distribution function is known to be normal. - This method gives continuous results using: - - * alpha = 3/8 - * beta = 3/8 - - lower: - NumPy method kept for backwards compatibility. - Takes ``i`` as the interpolation point. - - higher: - NumPy method kept for backwards compatibility. - Takes ``j`` as the interpolation point. - - nearest: - NumPy method kept for backwards compatibility. - Takes ``i`` or ``j``, whichever is nearest. - - midpoint: - NumPy method kept for backwards compatibility. - Uses ``(i + j) / 2``. - - For weighted percentiles, the above coverage conditions still hold. The - empirical cumulative distribution is simply replaced by its weighted - version, i.e. - :math:`P(Y \\leq t) = \\frac{1}{\\sum_i w_i} \\sum_i w_i 1_{x_i \\leq t}`. - Only ``method="inverted_cdf"`` supports weights. - + The behavior of `numpy.percentile` with percentage `q` is + that of `numpy.quantile` with argument ``q/100``. + For more information, please see `numpy.quantile`. Examples -------- @@ -4315,7 +4180,7 @@ def quantile(a, a : array_like of real numbers Input array or object that can be converted to an array. q : array_like of float - Probability or sequence of probabilities for the quantiles to compute. + Probability or sequence of probabilities of the quantiles to compute. Values must be between 0 and 1 inclusive. axis : {int, tuple of int, None}, optional Axis or axes along which the quantiles are computed. The default is @@ -4332,8 +4197,7 @@ def quantile(a, method : str, optional This parameter specifies the method to use for estimating the quantile. There are many different methods, some unique to NumPy. - See the notes for explanation. The options sorted by their R type - as summarized in the H&F paper [1]_ are: + The recommended options, numbered as they appear in [1]_, are: 1. 'inverted_cdf' 2. 'averaged_inverted_cdf' @@ -4345,14 +4209,17 @@ def quantile(a, 8. 'median_unbiased' 9. 'normal_unbiased' - The first three methods are discontinuous. NumPy further defines the - following discontinuous variations of the default 'linear' (7.) option: + The first three methods are discontinuous. For backward compatibility + with previous versions of NumPy, the following discontinuous variations + of the default 'linear' (7.) option are available: * 'lower' * 'higher', * 'midpoint' * 'nearest' + See Notes for details. + .. versionchanged:: 1.22.0 This argument was previously called "interpolation" and only offered the "linear" default and last four options. @@ -4400,7 +4267,64 @@ def quantile(a, Notes ----- - In general, the quantile at probability level :math:`q` of a cumulative + Given a sample `a` from an underlying distribution, `quantile` provides a + nonparametric estimate of the inverse cumulative distribution function. + + By default, this is done by interpolating between adjacent elements in + ``y``, a sorted copy of `a`:: + + (1-g)*y[j] + g*y[j+1] + + where the index ``j`` and coefficient ``g`` are the integral and + fractional components of ``q * (n-1)``, and ``n`` is the number of + elements in the sample. + + This is a special case of Equation 1 of H&F [1]_. More generally, + + - ``j = (q*n + m - 1) // 1``, and + - ``g = (q*n + m - 1) % 1``, + + where ``m`` may be defined according to several different conventions. + The preferred convention may be selected using the ``method`` parameter: + + =============================== =============== =============== + ``method`` number in H&F ``m`` + =============================== =============== =============== + ``interpolated_inverted_cdf`` 4 ``0`` + ``hazen`` 5 ``1/2`` + ``weibull`` 6 ``q`` + ``linear`` (default) 7 ``1 - q`` + ``median_unbiased`` 8 ``q/3 + 1/3`` + ``normal_unbiased`` 9 ``q/4 + 3/8`` + =============================== =============== =============== + + Note that indices ``j`` and ``j + 1`` are clipped to the range ``0`` to + ``n - 1`` when the results of the formula would be outside the allowed + range of non-negative indices. The ``- 1`` in the formulas for ``j`` and + ``g`` accounts for Python's 0-based indexing. + + The table above includes only the estimators from H&F that are continuous + functions of probability `q` (estimators 4-9). NumPy also provides the + three discontinuous estimators from H&F (estimators 1-3), where ``j`` is + defined as above and ``m`` and ``g`` are defined as follows. + + 1. ``inverted_cdf``: ``m = 0`` and ``g = int(q*n > 0)`` + 2. ``averaged_inverted_cdf``: ``m = 0`` and ``g = (1 + int(q*n > 0)) / 2`` + 3. ``closest_observation``: ``m = -1/2`` and + ``1 - int((g == 0) & (j%2 == 0))`` + + For backward compatibility with previous versions of NumPy, `quantile` + provides four additional discontinuous estimators. Like + ``method='linear'``, all have ``m = 1 - q`` so that ``j = q*(n-1) // 1``, + but ``g`` is defined as follows. + + - ``lower``: ``g = 0`` + - ``midpoint``: ``g = 0.5`` + - ``higher``: ``g = 1`` + - ``nearest``: ``g = (q*(n-1) % 1) > 0.5`` + + **Weighted quantiles:** + More formally, the quantile at probability level :math:`q` of a cumulative distribution function :math:`F(y)=P(Y \\leq y)` with probability measure :math:`P` is defined as any number :math:`x` that fulfills the *coverage conditions* @@ -4408,131 +4332,18 @@ def quantile(a, .. math:: P(Y < x) \\leq q \\quad\\text{and}\\quad P(Y \\leq x) \\geq q with random variable :math:`Y\\sim P`. - Sample quantiles, the result of ``quantile``, provide nonparametric + Sample quantiles, the result of `quantile`, provide nonparametric estimation of the underlying population counterparts, represented by the - unknown :math:`F`, given a data vector ``a`` of length ``n``. + unknown :math:`F`, given a data vector `a` of length ``n``. - One type of estimators arises when one considers :math:`F` as the empirical - distribution function of the data, i.e. + Some of the estimators above arise when one considers :math:`F` as the + empirical distribution function of the data, i.e. :math:`F(y) = \\frac{1}{n} \\sum_i 1_{a_i \\leq y}`. Then, different methods correspond to different choices of :math:`x` that - fulfill the above inequalities. Methods that follow this approach are - ``inverted_cdf`` and ``averaged_inverted_cdf``. - - A more general way to define sample quantile estimators is as follows. - The empirical q-quantile of ``a`` is the ``n * q``-th value of the - way from the minimum to the maximum in a sorted copy of ``a``. The values - and distances of the two nearest neighbors as well as the `method` - parameter will determine the quantile if the normalized ranking does not - match the location of ``n * q`` exactly. This function is the same as - the median if ``q=0.5``, the same as the minimum if ``q=0.0`` and the same - as the maximum if ``q=1.0``. - - The optional `method` parameter specifies the method to use when the - desired quantile lies between two indexes ``i`` and ``j = i + 1``. - In that case, we first determine ``i + g``, a virtual index that lies - between ``i`` and ``j``, where ``i`` is the floor and ``g`` is the - fractional part of the index. The final result is, then, an interpolation - of ``a[i]`` and ``a[j]`` based on ``g``. During the computation of ``g``, - ``i`` and ``j`` are modified using correction constants ``alpha`` and - ``beta`` whose choices depend on the ``method`` used. Finally, note that - since Python uses 0-based indexing, the code subtracts another 1 from the - index internally. - - The following formula determines the virtual index ``i + g``, the location - of the quantile in the sorted sample: - - .. math:: - i + g = q * ( n - alpha - beta + 1 ) + alpha - - The different methods then work as follows - - inverted_cdf: - method 1 of H&F [1]_. - This method gives discontinuous results: - - * if g > 0 ; then take j - * if g = 0 ; then take i - - averaged_inverted_cdf: - method 2 of H&F [1]_. - This method gives discontinuous results: - - * if g > 0 ; then take j - * if g = 0 ; then average between bounds - - closest_observation: - method 3 of H&F [1]_. - This method gives discontinuous results: + fulfill the above coverage conditions. Methods that follow this approach + are ``inverted_cdf`` and ``averaged_inverted_cdf``. - * if g > 0 ; then take j - * if g = 0 and index is odd ; then take j - * if g = 0 and index is even ; then take i - - interpolated_inverted_cdf: - method 4 of H&F [1]_. - This method gives continuous results using: - - * alpha = 0 - * beta = 1 - - hazen: - method 5 of H&F [1]_. - This method gives continuous results using: - - * alpha = 1/2 - * beta = 1/2 - - weibull: - method 6 of H&F [1]_. - This method gives continuous results using: - - * alpha = 0 - * beta = 0 - - linear: - method 7 of H&F [1]_. - This method gives continuous results using: - - * alpha = 1 - * beta = 1 - - median_unbiased: - method 8 of H&F [1]_. - This method is probably the best method if the sample - distribution function is unknown (see reference). - This method gives continuous results using: - - * alpha = 1/3 - * beta = 1/3 - - normal_unbiased: - method 9 of H&F [1]_. - This method is probably the best method if the sample - distribution function is known to be normal. - This method gives continuous results using: - - * alpha = 3/8 - * beta = 3/8 - - lower: - NumPy method kept for backwards compatibility. - Takes ``i`` as the interpolation point. - - higher: - NumPy method kept for backwards compatibility. - Takes ``j`` as the interpolation point. - - nearest: - NumPy method kept for backwards compatibility. - Takes ``i`` or ``j``, whichever is nearest. - - midpoint: - NumPy method kept for backwards compatibility. - Uses ``(i + j) / 2``. - - **Weighted quantiles:** - For weighted quantiles, the above coverage conditions still hold. The + For weighted quantiles, the coverage conditions still hold. The empirical cumulative distribution is simply replaced by its weighted version, i.e. :math:`P(Y \\leq t) = \\frac{1}{\\sum_i w_i} \\sum_i w_i 1_{x_i \\leq t}`. diff --git a/numpy/lib/_nanfunctions_impl.py b/numpy/lib/_nanfunctions_impl.py index 54788a738c7e..baedb7d12498 100644 --- a/numpy/lib/_nanfunctions_impl.py +++ b/numpy/lib/_nanfunctions_impl.py @@ -1344,7 +1344,9 @@ def nanpercentile( Notes ----- - For more information please see `numpy.percentile` + The behavior of `numpy.nanpercentile` with percentage `q` is that of + `numpy.quantile` with argument ``q/100`` (ignoring nan values). + For more information, please see `numpy.quantile`. Examples -------- @@ -1532,7 +1534,9 @@ def nanquantile( Notes ----- - For more information please see `numpy.quantile` + The behavior of `numpy.nanquantile` is the same as that of + `numpy.quantile` (ignoring nan values). + For more information, please see `numpy.quantile`. Examples -------- From 70bff21fbb3dea4cb5129f46d0d155493669b8af Mon Sep 17 00:00:00 2001 From: Thomas Li <47963215+lithomas1@users.noreply.github.com> Date: Mon, 6 May 2024 14:23:48 -0400 Subject: [PATCH 293/980] DOC: Add replace to numpy.strings docs --- doc/source/reference/routines.strings.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/reference/routines.strings.rst b/doc/source/reference/routines.strings.rst index 635a01fa1254..c3eea03e1198 100644 --- a/doc/source/reference/routines.strings.rst +++ b/doc/source/reference/routines.strings.rst @@ -32,6 +32,7 @@ String operations add lstrip + replace rstrip strip From fe19909198a84c60ce80fb8a96ea59f68f80eb33 Mon Sep 17 00:00:00 2001 From: Thomas Li <47963215+lithomas1@users.noreply.github.com> Date: Mon, 6 May 2024 14:48:30 -0400 Subject: [PATCH 294/980] Update routines.strings.rst --- doc/source/reference/routines.strings.rst | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/doc/source/reference/routines.strings.rst b/doc/source/reference/routines.strings.rst index c3eea03e1198..a8ea56a4b2be 100644 --- a/doc/source/reference/routines.strings.rst +++ b/doc/source/reference/routines.strings.rst @@ -31,10 +31,22 @@ String operations :toctree: generated/ add + ljust + lower lstrip + mod + multiply + partition replace + rjust + rpartition rstrip strip + swapcase + title + translate + upper + zfill Comparison ---------- @@ -61,11 +73,17 @@ String information count endswith find + index + isalnum isalpha isdecimal isdigit + islower isnumeric isspace + istitle + isupper rfind + rindex startswith str_len From ca58cde7de282a485ca12e5f2582a64fb2e52113 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Tue, 7 May 2024 07:57:02 +0200 Subject: [PATCH 295/980] ENH: Optimize np.power for integer type (#26045) In this PR we optimize np.power(x, n) for integer types and a scalar argument n. The current implementation is a generic binary loop for the arguments. In the case n is a scalar (stride 0) we can optimize the loop. --- benchmarks/benchmarks/bench_ufunc.py | 20 +++++++++++- numpy/_core/src/umath/loops.c.src | 49 ++++++++++++++++++++++------ 2 files changed, 58 insertions(+), 11 deletions(-) diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index dcc5fcbd08c6..ca96d8c22775 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -573,8 +573,26 @@ def time_pow(self, dtype): def time_pow_2(self, dtype): np.power(self.a, 2.0) - def time_pow_half(self, dype): + def time_pow_half(self, dtype): np.power(self.a, 0.5) def time_atan2(self, dtype): np.arctan2(self.a, self.b) + +class BinaryBenchInteger(Benchmark): + params = [np.int32, np.int64] + param_names = ['dtype'] + + def setup(self, dtype): + N = 1000000 + self.a = np.random.randint(20, size=N).astype(dtype) + self.b = np.random.randint(4, size=N).astype(dtype) + + def time_pow(self, dtype): + np.power(self.a, self.b) + + def time_pow_two(self, dtype): + np.power(self.a, 2) + + def time_pow_five(self, dtype): + np.power(self.a, 5) diff --git a/numpy/_core/src/umath/loops.c.src b/numpy/_core/src/umath/loops.c.src index 811680b9c47c..5ac67fa3024b 100644 --- a/numpy/_core/src/umath/loops.c.src +++ b/numpy/_core/src/umath/loops.c.src @@ -471,13 +471,49 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 int } /**end repeat1**/ +static inline @type@ +_@TYPE@_squared_exponentiation_helper(@type@ base, @type@ exponent_two, int first_bit) { + // Helper method to calculate power using squared exponentiation + // The algorithm is partly unrolled. The second and third argument are the exponent//2 and the first bit of the exponent + @type@ out = first_bit ? base : 1; + while (exponent_two > 0) { + base *= base; + if (exponent_two & 1) { + out *= base; + } + exponent_two >>= 1; + } + return out; +} + NPY_NO_EXPORT void @TYPE@_power(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { + if (steps[1]==0) { + // stride for second argument is 0 + BINARY_DEFS + const @type@ in2 = *(@type@ *)ip2; + #if @SIGNED@ + if (in2 < 0) { + npy_gil_error(PyExc_ValueError, + "Integers to negative integer powers are not allowed."); + return; + } + #endif + + int first_bit = in2 & 1; + @type@ in2start = in2 >> 1; + + BINARY_LOOP_SLIDING { + @type@ in1 = *(@type@ *)ip1; + + *((@type@ *) op1) = _@TYPE@_squared_exponentiation_helper(in1, in2start, first_bit); + } + return; + } BINARY_LOOP { @type@ in1 = *(@type@ *)ip1; @type@ in2 = *(@type@ *)ip2; - @type@ out; #if @SIGNED@ if (in2 < 0) { @@ -495,16 +531,9 @@ NPY_NO_EXPORT void continue; } - out = in2 & 1 ? in1 : 1; + int first_bit = in2 & 1; in2 >>= 1; - while (in2 > 0) { - in1 *= in1; - if (in2 & 1) { - out *= in1; - } - in2 >>= 1; - } - *((@type@ *) op1) = out; + *((@type@ *) op1) = _@TYPE@_squared_exponentiation_helper(in1, in2, first_bit); } } /**end repeat**/ From 13dc08305b4785e5f0f37dff534fff45109a75da Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 7 May 2024 11:35:19 -0600 Subject: [PATCH 296/980] MNT: more gracefully handle spin adding arguments to functions we override --- .spin/cmds.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.spin/cmds.py b/.spin/cmds.py index fbd876be9132..d221ad18a1f4 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -95,7 +95,7 @@ def changelog(ctx, token, revision_range): ) @click.argument("meson_args", nargs=-1) @click.pass_context -def build(ctx, meson_args, with_scipy_openblas, jobs=None, clean=False, verbose=False, quiet=False): +def build(ctx, meson_args, with_scipy_openblas, jobs=None, clean=False, verbose=False, quiet=False, *args, **kwargs): """🔧 Build package with Meson/ninja and install MESON_ARGS are passed through e.g.: @@ -136,7 +136,7 @@ def build(ctx, meson_args, with_scipy_openblas, jobs=None, clean=False, verbose= help="Number of parallel build jobs" ) @click.pass_context -def docs(ctx, sphinx_target, clean, first_build, jobs): +def docs(ctx, sphinx_target, clean, first_build, jobs, *args, **kwargs): """📖 Build Sphinx documentation By default, SPHINXOPTS="-W", raising errors on warnings. @@ -205,7 +205,7 @@ def docs(ctx, sphinx_target, clean, first_build, jobs): '--verbose', '-v', is_flag=True, default=False ) @click.pass_context -def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose): +def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose, *args, **kwargs): """🔧 Run tests PYTEST_ARGS are passed through directly to pytest, e.g.: @@ -486,7 +486,7 @@ def bench(ctx, tests, compare, verbose, quick, commits): }) @click.argument("python_args", metavar='', nargs=-1) @click.pass_context -def python(ctx, python_args): +def python(ctx, python_args, *args, **kwargs): """🐍 Launch Python shell with PYTHONPATH set OPTIONS are passed through directly to Python, e.g.: From d1a52e3ad9750bfa32540721a498f3cb4b372f88 Mon Sep 17 00:00:00 2001 From: bmwoodruff Date: Tue, 7 May 2024 13:27:12 -0600 Subject: [PATCH 297/980] combined [0.0, 1.0) into a single monospace, and added BitGenerator cross references throughout extending.rst. [skip azp] [skip actions] [skip cirrus] --- doc/source/reference/random/extending.rst | 26 +++++++++---------- .../reference/random/new-or-different.rst | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/doc/source/reference/random/extending.rst b/doc/source/reference/random/extending.rst index e04fabd0dd97..9c7dc86b2825 100644 --- a/doc/source/reference/random/extending.rst +++ b/doc/source/reference/random/extending.rst @@ -4,15 +4,15 @@ Extending ========= -The BitGenerators have been designed to be extendable using standard tools for -high-performance Python -- numba and Cython. The `Generator` object can also -be used with user-provided BitGenerators as long as these export a small set of -required functions. +The `BitGenerator`\ s have been designed to be extendable using standard tools +for high-performance Python -- numba and Cython. The `Generator` object can +also be used with user-provided `BitGenerator`\ s as long as these export a +small set of required functions. Numba ----- Numba can be used with either CTypes or CFFI. The current iteration of the -BitGenerators all export a small set of functions through both interfaces. +`BitGenerator`\ s all export a small set of functions through both interfaces. This example shows how numba can be used to produce gaussian samples using a pure Python implementation which is then compiled. The random numbers are @@ -32,7 +32,7 @@ the `Examples`_ section below. Cython ------ -Cython can be used to unpack the ``PyCapsule`` provided by a BitGenerator. +Cython can be used to unpack the ``PyCapsule`` provided by a `BitGenerator`. This example uses `PCG64` and the example from above. The usual caveats for writing high-performance code using Cython -- removing bounds checks and wrap around, providing array alignment information -- still apply. @@ -41,7 +41,7 @@ wrap around, providing array alignment information -- still apply. :language: cython :end-before: example 2 -The BitGenerator can also be directly accessed using the members of the ``bitgen_t`` +The `BitGenerator` can also be directly accessed using the members of the ``bitgen_t`` struct. .. literalinclude:: ../../../../numpy/random/_examples/cython/extending_distributions.pyx @@ -82,8 +82,8 @@ directly from the ``_generator`` shared object, using the `BitGenerator.cffi` in New BitGenerators ----------------- `Generator` can be used with user-provided `BitGenerator`\ s. The simplest -way to write a new BitGenerator is to examine the pyx file of one of the -existing BitGenerators. The key structure that must be provided is the +way to write a new `BitGenerator` is to examine the pyx file of one of the +existing `BitGenerator`\ s. The key structure that must be provided is the ``capsule`` which contains a ``PyCapsule`` to a struct pointer of type ``bitgen_t``, @@ -98,10 +98,10 @@ existing BitGenerators. The key structure that must be provided is the } bitgen_t; which provides 5 pointers. The first is an opaque pointer to the data structure -used by the BitGenerators. The next three are function pointers which return -the next 64- and 32-bit unsigned integers, the next random double and the next -raw value. This final function is used for testing and so can be set to -the next 64-bit unsigned integer function if not needed. Functions inside +used by the `BitGenerator`\ s. The next three are function pointers which +return the next 64- and 32-bit unsigned integers, the next random double and +the next raw value. This final function is used for testing and so can be set +to the next 64-bit unsigned integer function if not needed. Functions inside `Generator` use this structure as in .. code-block:: c diff --git a/doc/source/reference/random/new-or-different.rst b/doc/source/reference/random/new-or-different.rst index 86c4fc50a251..44cf7aa11013 100644 --- a/doc/source/reference/random/new-or-different.rst +++ b/doc/source/reference/random/new-or-different.rst @@ -23,7 +23,7 @@ Feature Older Equivalent Notes `~.Generator.random` `random_sample`, Access the values in a `rand` BitGenerator, convert them to ``float64`` in the interval - ``[0.0.,`` `` 1.0)``. In addition + ``[0.0., 1.0)``. In addition to the ``size`` kwarg, now supports ``dtype='d'`` or ``dtype='f'``, and an ``out`` From 2e55856762d3a929445fa23b09153a0df6beb349 Mon Sep 17 00:00:00 2001 From: love-bees <33499899+love-bees@users.noreply.github.com> Date: Wed, 8 May 2024 13:19:00 +0200 Subject: [PATCH 298/980] Update INSTALL.rst Updated link to extensive info for building from source. Original link was 404 --- INSTALL.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/INSTALL.rst b/INSTALL.rst index e5f598f153d6..eea2e3c9d7de 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -44,7 +44,7 @@ Hypothesis__ https://hypothesis.readthedocs.io/en/latest/ .. note:: More extensive information on building NumPy is maintained at - https://numpy.org/devdocs/user/building.html#building-from-source + https://numpy.org/devdocs/building/#building-numpy-from-source Basic installation From 9ef44a49ec230836f99435b27596cd5a8809e8e8 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 1 May 2024 15:21:46 -0600 Subject: [PATCH 299/980] ENH: make the promotion state use thread-local storage --- numpy/_core/config.h.in | 2 + numpy/_core/include/numpy/npy_common.h | 16 +++++--- numpy/_core/meson.build | 4 +- numpy/_core/src/multiarray/arraytypes.c.src | 6 +-- numpy/_core/src/multiarray/convert_datatype.c | 40 +++++++++++++------ numpy/_core/src/multiarray/convert_datatype.h | 9 ++++- numpy/_core/src/umath/dispatching.c | 15 +++---- numpy/_core/src/umath/scalarmath.c.src | 8 ++-- numpy/_core/src/umath/ufunc_object.c | 8 ++-- numpy/_core/src/umath/ufunc_type_resolution.c | 12 ++++-- 10 files changed, 78 insertions(+), 42 deletions(-) diff --git a/numpy/_core/config.h.in b/numpy/_core/config.h.in index 7ef169c44427..e58609b7f073 100644 --- a/numpy/_core/config.h.in +++ b/numpy/_core/config.h.in @@ -8,6 +8,8 @@ #mesondefine HAVE_FSEEKO #mesondefine HAVE_FALLOCATE #mesondefine HAVE_STRTOLD_L +#mesondefine HAVE_THREAD_LOCAL +#mesondefine HAVE__THREAD_LOCAL #mesondefine HAVE__THREAD #mesondefine HAVE___DECLSPEC_THREAD_ diff --git a/numpy/_core/include/numpy/npy_common.h b/numpy/_core/include/numpy/npy_common.h index 9fb3f6b3f51f..557b589b66f5 100644 --- a/numpy/_core/include/numpy/npy_common.h +++ b/numpy/_core/include/numpy/npy_common.h @@ -113,14 +113,18 @@ #define NPY_NOINLINE static #endif -#ifdef HAVE___THREAD +#ifdef __cplusplus + #define NPY_TLS thread_local +#elif defined(HAVE_THREAD_LOCAL) + #define NPY_TLS thread_local +#elif defined(HAVE__THREAD_LOCAL) + #define NPY_TLS _Thread_local +#elif defined(HAVE___THREAD) #define NPY_TLS __thread +#elif defined(HAVE__DECLSPEC_THREAD_) + #define NPY_TLS __declspec(thread) #else - #ifdef HAVE___DECLSPEC_THREAD_ - #define NPY_TLS __declspec(thread) - #else - #define NPY_TLS - #endif + #define NPY_TLS #endif #ifdef WITH_CPYCHECKER_RETURNS_BORROWED_REF_ATTRIBUTE diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index efdcd2162b6c..5a343340d315 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -244,6 +244,8 @@ endforeach # variable attributes tested via "int %s a" % attribute optional_variable_attributes = [ + ['thread_local', 'HAVE_THREAD_LOCAL'], + ['_Thread_local', 'HAVE__THREAD_LOCAL'], ['__thread', 'HAVE__THREAD'], ['__declspec(thread)', 'HAVE___DECLSPEC_THREAD_'] ] @@ -262,7 +264,7 @@ foreach optional_attr: optional_variable_attributes return 0; } ''' - if cc.compiles(code) + if cc.compiles(code, name: optional_attr[0]) cdata.set10(optional_attr[1], true) endif endforeach diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index 49701bf8499a..39a2dad619e0 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -274,9 +274,9 @@ static int #endif ) { PyArray_Descr *descr = PyArray_DescrFromType(NPY_@TYPE@); - - if (npy_promotion_state == NPY_USE_LEGACY_PROMOTION || ( - npy_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN + int promotion_state = get_npy_promotion_state(); + if (promotion_state == NPY_USE_LEGACY_PROMOTION || ( + get_npy_promotion_state() == NPY_USE_WEAK_PROMOTION_AND_WARN && !npy_give_promotion_warnings())) { /* * This path will be taken both for the "promotion" case such as diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index f6f06ef4fb37..166ee6c2b555 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -51,11 +51,22 @@ NPY_NO_EXPORT npy_intp REQUIRED_STR_LEN[] = {0, 3, 5, 10, 10, 20, 20, 20, 20}; /* * Whether or not legacy value-based promotion/casting is used. */ -NPY_NO_EXPORT int npy_promotion_state = NPY_USE_LEGACY_PROMOTION; + NPY_NO_EXPORT PyObject *NO_NEP50_WARNING_CTX = NULL; NPY_NO_EXPORT PyObject *npy_DTypePromotionError = NULL; NPY_NO_EXPORT PyObject *npy_UFuncNoLoopError = NULL; +NPY_NO_EXPORT NPY_TLS int npy_promotion_state = NPY_USE_LEGACY_PROMOTION; + +NPY_NO_EXPORT int +get_npy_promotion_state() { + return npy_promotion_state; +} + +NPY_NO_EXPORT void +set_npy_promotion_state(int new_promotion_state) { + npy_promotion_state = new_promotion_state; +} static PyObject * PyArray_GetGenericToVoidCastingImpl(void); @@ -100,13 +111,14 @@ npy_give_promotion_warnings(void) NPY_NO_EXPORT PyObject * npy__get_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(arg)) { - if (npy_promotion_state == NPY_USE_WEAK_PROMOTION) { + int promotion_state = get_npy_promotion_state(); + if (promotion_state == NPY_USE_WEAK_PROMOTION) { return PyUnicode_FromString("weak"); } - else if (npy_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN) { + else if (promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN) { return PyUnicode_FromString("weak_and_warn"); } - else if (npy_promotion_state == NPY_USE_LEGACY_PROMOTION) { + else if (promotion_state == NPY_USE_LEGACY_PROMOTION) { return PyUnicode_FromString("legacy"); } PyErr_SetString(PyExc_SystemError, "invalid promotion state!"); @@ -123,14 +135,15 @@ npy__set_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *arg) "must be a string."); return NULL; } + int new_promotion_state; if (PyUnicode_CompareWithASCIIString(arg, "weak") == 0) { - npy_promotion_state = NPY_USE_WEAK_PROMOTION; + new_promotion_state = NPY_USE_WEAK_PROMOTION; } else if (PyUnicode_CompareWithASCIIString(arg, "weak_and_warn") == 0) { - npy_promotion_state = NPY_USE_WEAK_PROMOTION_AND_WARN; + new_promotion_state = NPY_USE_WEAK_PROMOTION_AND_WARN; } else if (PyUnicode_CompareWithASCIIString(arg, "legacy") == 0) { - npy_promotion_state = NPY_USE_LEGACY_PROMOTION; + new_promotion_state = NPY_USE_LEGACY_PROMOTION; } else { PyErr_Format(PyExc_TypeError, @@ -138,6 +151,7 @@ npy__set_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *arg) "'weak', 'legacy', or 'weak_and_warn' but got '%.100S'", arg); return NULL; } + set_npy_promotion_state(new_promotion_state); Py_RETURN_NONE; } @@ -930,7 +944,7 @@ PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr *to, to = NULL; } - if (npy_promotion_state == NPY_USE_LEGACY_PROMOTION) { + if (get_npy_promotion_state() == NPY_USE_LEGACY_PROMOTION) { /* * If it's a scalar, check the value. (This only currently matters for * numeric types and for `to == NULL` it can't be numeric.) @@ -1954,10 +1968,11 @@ PyArray_CheckLegacyResultType( npy_intp ndtypes, PyArray_Descr **dtypes) { PyArray_Descr *ret = NULL; - if (npy_promotion_state == NPY_USE_WEAK_PROMOTION) { + int promotion_state = get_npy_promotion_state(); + if (promotion_state == NPY_USE_WEAK_PROMOTION) { return 0; } - if (npy_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN + if (promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN && !npy_give_promotion_warnings()) { return 0; } @@ -2054,12 +2069,13 @@ PyArray_CheckLegacyResultType( Py_DECREF(ret); return 0; } - if (npy_promotion_state == NPY_USE_LEGACY_PROMOTION) { + + if (promotion_state == NPY_USE_LEGACY_PROMOTION) { Py_SETREF(*new_result, ret); return 0; } - assert(npy_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN); + assert(promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN); if (PyErr_WarnFormat(PyExc_UserWarning, 1, "result dtype changed due to the removal of value-based " "promotion from NumPy. Changed from %S to %S.", diff --git a/numpy/_core/src/multiarray/convert_datatype.h b/numpy/_core/src/multiarray/convert_datatype.h index d1493e6997bd..4eda59d236cc 100644 --- a/numpy/_core/src/multiarray/convert_datatype.h +++ b/numpy/_core/src/multiarray/convert_datatype.h @@ -12,7 +12,8 @@ extern NPY_NO_EXPORT npy_intp REQUIRED_STR_LEN[]; #define NPY_USE_LEGACY_PROMOTION 0 #define NPY_USE_WEAK_PROMOTION 1 #define NPY_USE_WEAK_PROMOTION_AND_WARN 2 -extern NPY_NO_EXPORT int npy_promotion_state; + +extern NPY_NO_EXPORT NPY_TLS int npy_promotion_state; extern NPY_NO_EXPORT PyObject *NO_NEP50_WARNING_CTX; extern NPY_NO_EXPORT PyObject *npy_DTypePromotionError; extern NPY_NO_EXPORT PyObject *npy_UFuncNoLoopError; @@ -137,6 +138,12 @@ simple_cast_resolve_descriptors( NPY_NO_EXPORT int PyArray_InitializeCasts(void); +NPY_NO_EXPORT int +get_npy_promotion_state(); + +NPY_NO_EXPORT void +set_npy_promotion_state(int new_promotion_state); + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/umath/dispatching.c b/numpy/_core/src/umath/dispatching.c index 1cf915aee3b4..673d4fd68b5c 100644 --- a/numpy/_core/src/umath/dispatching.c +++ b/numpy/_core/src/umath/dispatching.c @@ -966,8 +966,10 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, } } + int current_promotion_state = get_npy_promotion_state(); + if (force_legacy_promotion - && npy_promotion_state == NPY_USE_LEGACY_PROMOTION + && current_promotion_state == NPY_USE_LEGACY_PROMOTION && (ufunc->ntypes != 0 || ufunc->userloops != NULL)) { /* * We must use legacy promotion for value-based logic. Call the old @@ -982,11 +984,10 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, } /* Pause warnings and always use "new" path */ - int old_promotion_state = npy_promotion_state; - npy_promotion_state = NPY_USE_WEAK_PROMOTION; + set_npy_promotion_state(NPY_USE_WEAK_PROMOTION); PyObject *info = promote_and_get_info_and_ufuncimpl(ufunc, ops, signature, op_dtypes, allow_legacy_promotion); - npy_promotion_state = old_promotion_state; + set_npy_promotion_state(current_promotion_state); if (info == NULL) { goto handle_error; @@ -996,7 +997,7 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, PyObject *all_dtypes = PyTuple_GET_ITEM(info, 0); /* If necessary, check if the old result would have been different */ - if (NPY_UNLIKELY(npy_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN) + if (NPY_UNLIKELY(current_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN) && (force_legacy_promotion || promoting_pyscalars) && npy_give_promotion_warnings()) { PyArray_DTypeMeta *check_dtypes[NPY_MAXARGS]; @@ -1005,11 +1006,11 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, all_dtypes, i); } /* Before calling to the legacy promotion, pretend that is the state: */ - npy_promotion_state = NPY_USE_LEGACY_PROMOTION; + set_npy_promotion_state(NPY_USE_LEGACY_PROMOTION); int res = legacy_promote_using_legacy_type_resolver(ufunc, ops, signature, check_dtypes, NULL, NPY_TRUE); /* Reset the promotion state: */ - npy_promotion_state = NPY_USE_WEAK_PROMOTION_AND_WARN; + set_npy_promotion_state(NPY_USE_WEAK_PROMOTION_AND_WARN); if (res < 0) { goto handle_error; } diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index c04a8f248cd3..cc8f82aca11b 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -963,7 +963,7 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) *may_need_deferring = NPY_TRUE; } if (!IS_SAFE(NPY_DOUBLE, NPY_@TYPE@)) { - if (npy_promotion_state != NPY_USE_WEAK_PROMOTION) { + if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { /* Legacy promotion and weak-and-warn not handled here */ return PROMOTION_REQUIRED; } @@ -986,7 +986,7 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) * long -> (c)longdouble is safe, so `OTHER_IS_UNKNOWN_OBJECT` will * be returned below for huge integers. */ - if (npy_promotion_state != NPY_USE_WEAK_PROMOTION) { + if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { /* Legacy promotion and weak-and-warn not handled here */ return PROMOTION_REQUIRED; } @@ -996,7 +996,7 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) long val = PyLong_AsLongAndOverflow(value, &overflow); if (overflow) { /* handle as if "unsafe" */ - if (npy_promotion_state != NPY_USE_WEAK_PROMOTION) { + if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { return OTHER_IS_UNKNOWN_OBJECT; } return CONVERT_PYSCALAR; @@ -1018,7 +1018,7 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) *may_need_deferring = NPY_TRUE; } if (!IS_SAFE(NPY_CDOUBLE, NPY_@TYPE@)) { - if (npy_promotion_state != NPY_USE_WEAK_PROMOTION) { + if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { /* Legacy promotion and weak-and-warn not handled here */ return PROMOTION_REQUIRED; } diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index c58d417773ba..efad2a7be2b4 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -670,7 +670,7 @@ convert_ufunc_arguments(PyUFuncObject *ufunc, // TODO: Is this equivalent/better by removing the logic which enforces // that we always use weak promotion in the core? - if (npy_promotion_state == NPY_USE_LEGACY_PROMOTION) { + if (get_npy_promotion_state() == NPY_USE_LEGACY_PROMOTION) { continue; /* Skip use of special dtypes */ } @@ -6073,8 +6073,8 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, PyArray_Descr *operation_descrs[NPY_MAXARGS] = {NULL}; /* This entry-point to promotion lives in the NEP 50 future: */ - int original_promotion_state = npy_promotion_state; - npy_promotion_state = NPY_USE_WEAK_PROMOTION; + int original_promotion_state = get_npy_promotion_state(); + set_npy_promotion_state(NPY_USE_WEAK_PROMOTION); npy_bool promoting_pyscalars = NPY_FALSE; npy_bool allow_legacy_promotion = NPY_TRUE; @@ -6261,7 +6261,7 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, Py_DECREF(capsule); finish: - npy_promotion_state = original_promotion_state; + set_npy_promotion_state(original_promotion_state); Py_XDECREF(result_dtype_tuple); for (int i = 0; i < ufunc->nargs; i++) { diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index abb8c5ac7e07..f6f231223f63 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -1965,10 +1965,12 @@ linear_search_type_resolver(PyUFuncObject *self, ufunc_name = ufunc_get_name_cstr(self); - assert(npy_promotion_state != NPY_USE_WEAK_PROMOTION_AND_WARN); + int promotion_state = get_npy_promotion_state(); + + assert(promotion_state != NPY_USE_WEAK_PROMOTION_AND_WARN); /* Always "use" with new promotion in case of Python int/float/complex */ int use_min_scalar; - if (npy_promotion_state == NPY_USE_LEGACY_PROMOTION) { + if (promotion_state == NPY_USE_LEGACY_PROMOTION) { use_min_scalar = should_use_min_scalar(nin, op, 0, NULL); } else { @@ -2167,10 +2169,12 @@ type_tuple_type_resolver(PyUFuncObject *self, ufunc_name = ufunc_get_name_cstr(self); - assert(npy_promotion_state != NPY_USE_WEAK_PROMOTION_AND_WARN); + int promotion_state = get_npy_promotion_state(); + + assert(promotion_state != NPY_USE_WEAK_PROMOTION_AND_WARN); /* Always "use" with new promotion in case of Python int/float/complex */ int use_min_scalar; - if (npy_promotion_state == NPY_USE_LEGACY_PROMOTION) { + if (promotion_state == NPY_USE_LEGACY_PROMOTION) { use_min_scalar = should_use_min_scalar(nin, op, 0, NULL); } else { From 688af9d08d24653f8f55d027345bc908d01e347b Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 1 May 2024 15:49:09 -0600 Subject: [PATCH 300/980] TST: add test for promotion state thread safety --- numpy/_core/tests/test_nep50_promotions.py | 30 ++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/numpy/_core/tests/test_nep50_promotions.py b/numpy/_core/tests/test_nep50_promotions.py index ce23be9c1dcb..ba7f0d5d1319 100644 --- a/numpy/_core/tests/test_nep50_promotions.py +++ b/numpy/_core/tests/test_nep50_promotions.py @@ -5,6 +5,8 @@ """ import operator +import threading +import warnings import numpy as np @@ -340,3 +342,31 @@ def test_oob_creation(sctype, create): assert create(sctype, iinfo.min) == iinfo.min assert create(sctype, iinfo.max) == iinfo.max + + +def test_thread_local_promotion_state(): + b = threading.Barrier(2) + def legacy_no_warn(): + np._set_promotion_state("legacy") + b.wait() + assert np._get_promotion_state() == "legacy" + # turn warnings into errors, this should not warn with + # legacy promotion state + with warnings.catch_warnings(): + warnings.simplefilter("error") + np.float16(1) + 131008 + + def weak_warn(): + np._set_promotion_state("weak") + b.wait() + assert np._get_promotion_state() == "weak" + with pytest.warns(RuntimeWarning): + np.float16(1) + 131008 + + task1 = threading.Thread(target=legacy_no_warn) + task2 = threading.Thread(target=weak_warn) + + task1.start() + task2.start() + task1.join() + task2.join() From 8d0a8ad6f15e7fcd656b0fa0c178aebe90791966 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 6 May 2024 15:12:54 -0600 Subject: [PATCH 301/980] MNT: appease linter --- numpy/_core/tests/test_nep50_promotions.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/tests/test_nep50_promotions.py b/numpy/_core/tests/test_nep50_promotions.py index ba7f0d5d1319..7631c665c76a 100644 --- a/numpy/_core/tests/test_nep50_promotions.py +++ b/numpy/_core/tests/test_nep50_promotions.py @@ -346,6 +346,7 @@ def test_oob_creation(sctype, create): def test_thread_local_promotion_state(): b = threading.Barrier(2) + def legacy_no_warn(): np._set_promotion_state("legacy") b.wait() From b8f31dc95043cacc451955b3c84e4b204a7ee4f8 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 6 May 2024 15:28:57 -0600 Subject: [PATCH 302/980] TST: skip test using threads on WASM --- numpy/_core/tests/test_nep50_promotions.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/tests/test_nep50_promotions.py b/numpy/_core/tests/test_nep50_promotions.py index 7631c665c76a..741359bc63cd 100644 --- a/numpy/_core/tests/test_nep50_promotions.py +++ b/numpy/_core/tests/test_nep50_promotions.py @@ -344,6 +344,7 @@ def test_oob_creation(sctype, create): assert create(sctype, iinfo.max) == iinfo.max +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") def test_thread_local_promotion_state(): b = threading.Barrier(2) From bdc63a0e39698d503e94cc2b7f8da603d5ae7e78 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 6 May 2024 16:02:53 -0600 Subject: [PATCH 303/980] BUG: fix typo in __declspec(thread) check --- numpy/_core/include/numpy/npy_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/include/numpy/npy_common.h b/numpy/_core/include/numpy/npy_common.h index 557b589b66f5..c6ef7a6ec669 100644 --- a/numpy/_core/include/numpy/npy_common.h +++ b/numpy/_core/include/numpy/npy_common.h @@ -121,7 +121,7 @@ #define NPY_TLS _Thread_local #elif defined(HAVE___THREAD) #define NPY_TLS __thread -#elif defined(HAVE__DECLSPEC_THREAD_) +#elif defined(HAVE___DECLSPEC_THREAD_) #define NPY_TLS __declspec(thread) #else #define NPY_TLS From 2e82f7506d708afe6c404d4c0d8457dffde98b88 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 7 May 2024 14:03:00 -0600 Subject: [PATCH 304/980] MNT: fix last remaining direct use of npy_promotion_state --- numpy/_core/src/multiarray/multiarraymodule.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 39daa8db242e..37b2f4860b1a 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -3493,7 +3493,7 @@ array_can_cast_safely(PyObject *NPY_UNUSED(self), * TODO: `PyArray_IsScalar` should not be required for new dtypes. * weak-promotion branch is in practice identical to dtype one. */ - if (npy_promotion_state == NPY_USE_WEAK_PROMOTION) { + if (get_npy_promotion_state() == NPY_USE_WEAK_PROMOTION) { PyObject *descr = PyObject_GetAttr(from_obj, npy_ma_str_dtype); if (descr == NULL) { goto finish; From 18a3404c018201ba1aee4b826aac070d54342f42 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 8 May 2024 12:11:47 -0600 Subject: [PATCH 305/980] MNT: apply code review comments --- numpy/_core/src/multiarray/arraytypes.c.src | 4 ++-- numpy/_core/src/multiarray/convert_datatype.c | 2 +- numpy/_core/src/multiarray/convert_datatype.h | 1 - 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index 39a2dad619e0..cafacd2c5ccb 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -276,8 +276,8 @@ static int PyArray_Descr *descr = PyArray_DescrFromType(NPY_@TYPE@); int promotion_state = get_npy_promotion_state(); if (promotion_state == NPY_USE_LEGACY_PROMOTION || ( - get_npy_promotion_state() == NPY_USE_WEAK_PROMOTION_AND_WARN - && !npy_give_promotion_warnings())) { + promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN + && !npy_give_promotion_warnings())) { /* * This path will be taken both for the "promotion" case such as * `uint8_arr + 123` as well as the assignment case. diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 166ee6c2b555..5711bce7bc08 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -56,7 +56,7 @@ NPY_NO_EXPORT PyObject *NO_NEP50_WARNING_CTX = NULL; NPY_NO_EXPORT PyObject *npy_DTypePromotionError = NULL; NPY_NO_EXPORT PyObject *npy_UFuncNoLoopError = NULL; -NPY_NO_EXPORT NPY_TLS int npy_promotion_state = NPY_USE_LEGACY_PROMOTION; +static NPY_TLS int npy_promotion_state = NPY_USE_LEGACY_PROMOTION; NPY_NO_EXPORT int get_npy_promotion_state() { diff --git a/numpy/_core/src/multiarray/convert_datatype.h b/numpy/_core/src/multiarray/convert_datatype.h index 4eda59d236cc..02f25ad0b383 100644 --- a/numpy/_core/src/multiarray/convert_datatype.h +++ b/numpy/_core/src/multiarray/convert_datatype.h @@ -13,7 +13,6 @@ extern NPY_NO_EXPORT npy_intp REQUIRED_STR_LEN[]; #define NPY_USE_WEAK_PROMOTION 1 #define NPY_USE_WEAK_PROMOTION_AND_WARN 2 -extern NPY_NO_EXPORT NPY_TLS int npy_promotion_state; extern NPY_NO_EXPORT PyObject *NO_NEP50_WARNING_CTX; extern NPY_NO_EXPORT PyObject *npy_DTypePromotionError; extern NPY_NO_EXPORT PyObject *npy_UFuncNoLoopError; From dcde9ffe68bc57c59fb0b53c0aa584f96c33cdb7 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 6 May 2024 14:07:07 -0600 Subject: [PATCH 306/980] BUG: support nan-like null strings in [l,r]strip --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 86 ++++++++++++++------ numpy/_core/tests/test_stringdtype.py | 41 ++++++++-- 2 files changed, 96 insertions(+), 31 deletions(-) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index 052c4381a4b5..8550605cc732 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1046,6 +1046,7 @@ string_lrstrip_chars_strided_loop( PyArray_StringDTypeObject *s1descr = (PyArray_StringDTypeObject *)context->descriptors[0]; int has_null = s1descr->na_object != NULL; int has_string_na = s1descr->has_string_na; + int has_nan_na = s1descr->has_nan_na; const npy_static_string *default_string = &s1descr->default_string; npy_intp N = dimensions[0]; @@ -1072,28 +1073,46 @@ string_lrstrip_chars_strided_loop( s2 = *default_string; } } + else if (has_nan_na) { + if (s2_isnull) { + npy_gil_error(PyExc_ValueError, + "Cannot use a null string that is not a " + "string as the %s delimiter", ufunc_name); + } + if (s1_isnull) { + if (NpyString_pack_null(oallocator, ops) < 0) { + npy_gil_error(PyExc_MemoryError, + "Failed to deallocate string in %s", + ufunc_name); + goto fail; + } + goto next_step; + } + } else { npy_gil_error(PyExc_ValueError, - "Cannot strip null values that are not strings"); + "Can only strip null values that are strings " + "or NaN-like values"); goto fail; } } + { + char *new_buf = (char *)PyMem_RawCalloc(s1.size, 1); + Buffer buf1((char *)s1.buf, s1.size); + Buffer buf2((char *)s2.buf, s2.size); + Buffer outbuf(new_buf, s1.size); + size_t new_buf_size = string_lrstrip_chars + (buf1, buf2, outbuf, striptype); + if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { + npy_gil_error(PyExc_MemoryError, "Failed to pack string in %s", + ufunc_name); + goto fail; + } - char *new_buf = (char *)PyMem_RawCalloc(s1.size, 1); - Buffer buf1((char *)s1.buf, s1.size); - Buffer buf2((char *)s2.buf, s2.size); - Buffer outbuf(new_buf, s1.size); - size_t new_buf_size = string_lrstrip_chars - (buf1, buf2, outbuf, striptype); - - if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { - npy_gil_error(PyExc_MemoryError, "Failed to pack string in %s", - ufunc_name); - goto fail; + PyMem_RawFree(new_buf); } - - PyMem_RawFree(new_buf); + next_step: in1 += strides[0]; in2 += strides[1]; @@ -1150,8 +1169,9 @@ string_lrstrip_whitespace_strided_loop( const char *ufunc_name = ((PyUFuncObject *)context->caller)->name; STRIPTYPE striptype = *(STRIPTYPE *)context->method->static_data; PyArray_StringDTypeObject *descr = (PyArray_StringDTypeObject *)context->descriptors[0]; - int has_string_na = descr->has_string_na; int has_null = descr->na_object != NULL; + int has_string_na = descr->has_string_na; + int has_nan_na = descr->has_nan_na; const npy_static_string *default_string = &descr->default_string; npy_string_allocator *allocators[2] = {}; @@ -1169,6 +1189,7 @@ string_lrstrip_whitespace_strided_loop( npy_static_string s = {0, NULL}; int s_isnull = NpyString_load(allocator, ps, &s); + if (s_isnull == -1) { npy_gil_error(PyExc_MemoryError, "Failed to load string in %s", ufunc_name); @@ -1181,26 +1202,39 @@ string_lrstrip_whitespace_strided_loop( if (has_string_na || !has_null) { s = *default_string; } + else if (has_nan_na) { + if (NpyString_pack_null(oallocator, ops) < 0) { + npy_gil_error(PyExc_MemoryError, + "Failed to deallocate string in %s", + ufunc_name); + goto fail; + } + goto next_step; + } else { npy_gil_error(PyExc_ValueError, - "Cannot strip null values that are not strings"); + "Can only strip null values that are strings or " + "NaN-like values"); goto fail; } } + { + char *new_buf = (char *)PyMem_RawCalloc(s.size, 1); + Buffer buf((char *)s.buf, s.size); + Buffer outbuf(new_buf, s.size); + size_t new_buf_size = string_lrstrip_whitespace( + buf, outbuf, striptype); - char *new_buf = (char *)PyMem_RawCalloc(s.size, 1); - Buffer buf((char *)s.buf, s.size); - Buffer outbuf(new_buf, s.size); - size_t new_buf_size = string_lrstrip_whitespace( - buf, outbuf, striptype); + if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { + npy_gil_error(PyExc_MemoryError, "Failed to pack string in %s", + ufunc_name); + goto fail; + } - if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { - npy_gil_error(PyExc_MemoryError, "Failed to pack string in %s", - ufunc_name); - goto fail; + PyMem_RawFree(new_buf); } - PyMem_RawFree(new_buf); + next_step: in += strides[0]; out += strides[1]; diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index dd6ac36999e6..a24e643b1e64 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1080,7 +1080,13 @@ def unicode_array(): "capitalize", "expandtabs", "lower", - "splitlines" "swapcase" "title" "upper", + "lstrip", + "rstrip", + "splitlines", + "strip", + "swapcase", + "title", + "upper", ] BOOL_OUTPUT_FUNCTIONS = [ @@ -1107,7 +1113,10 @@ def unicode_array(): "istitle", "isupper", "lower", + "lstrip", + "rstrip", "splitlines", + "strip", "swapcase", "title", "upper", @@ -1129,10 +1138,20 @@ def unicode_array(): "upper", ] +ONLY_IN_NP_CHAR = [ + "join", + "split", + "rsplit", + "splitlines" +] + @pytest.mark.parametrize("function_name", UNARY_FUNCTIONS) def test_unary(string_array, unicode_array, function_name): - func = getattr(np.char, function_name) + if function_name in ONLY_IN_NP_CHAR: + func = getattr(np.char, function_name) + else: + func = getattr(np.strings, function_name) dtype = string_array.dtype sres = func(string_array) ures = func(unicode_array) @@ -1173,6 +1192,10 @@ def test_unary(string_array, unicode_array, function_name): with pytest.raises(ValueError): func(na_arr) return + if not (is_nan or is_str): + with pytest.raises(ValueError): + func(na_arr) + return res = func(na_arr) if is_nan and function_name in NAN_PRESERVING_FUNCTIONS: assert res[0] is dtype.na_object @@ -1197,13 +1220,17 @@ def test_unary(string_array, unicode_array, function_name): ("index", (None, "e")), ("join", ("-", None)), ("ljust", (None, 12)), + ("lstrip", (None, "A")), ("partition", (None, "A")), ("replace", (None, "A", "B")), ("rfind", (None, "A")), ("rindex", (None, "e")), ("rjust", (None, 12)), + ("rsplit", (None, "A")), + ("rstrip", (None, "A")), ("rpartition", (None, "A")), ("split", (None, "A")), + ("strip", (None, "A")), ("startswith", (None, "A")), ("zfill", (None, 12)), ] @@ -1260,10 +1287,13 @@ def call_func(func, args, array, sanitize=True): @pytest.mark.parametrize("function_name, args", BINARY_FUNCTIONS) def test_binary(string_array, unicode_array, function_name, args): - func = getattr(np.char, function_name) + if function_name in ONLY_IN_NP_CHAR: + func = getattr(np.char, function_name) + else: + func = getattr(np.strings, function_name) sres = call_func(func, args, string_array) ures = call_func(func, args, unicode_array, sanitize=False) - if sres.dtype == StringDType(): + if not isinstance(sres, tuple) and sres.dtype == StringDType(): ures = ures.astype(StringDType()) assert_array_equal(sres, ures) @@ -1462,7 +1492,8 @@ def test_setup(self): view = self.get_view(self.a) sizes = np.where(is_short, view['size_and_flags'] & 0xf, view['size']) - assert_array_equal(sizes, np.strings.str_len(self.a)) + assert_array_equal(sizes, np.strings + .str_len(self.a)) assert_array_equal(view['xsiz'][2:], np.void(b'\x00' * (self.sizeofstr // 4 - 1))) # Check that the medium string uses only 1 byte for its length From 44a90a5c1cc60747d7f31fd507a9836695d12c9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Fri, 19 Apr 2024 12:05:22 +0200 Subject: [PATCH 307/980] BUG: vecdot signature --- numpy/__init__.pyi | 5 ++-- numpy/_core/multiarray.py | 4 +-- numpy/_core/numeric.py | 6 ++-- numpy/_core/numeric.pyi | 33 ---------------------- numpy/_core/umath.py | 2 +- numpy/_typing/_ufunc.pyi | 8 ++---- numpy/typing/tests/data/reveal/numeric.pyi | 6 ---- numpy/typing/tests/data/reveal/ufuncs.pyi | 10 +++++++ tools/ci/array-api-skips.txt | 4 +-- 9 files changed, 23 insertions(+), 55 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 1a52e1b85d10..98b953444de5 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -364,7 +364,6 @@ from numpy._core.numeric import ( convolve as convolve, outer as outer, tensordot as tensordot, - vecdot as vecdot, roll as roll, rollaxis as rollaxis, moveaxis as moveaxis, @@ -3321,7 +3320,7 @@ logical_and: _UFunc_Nin2_Nout1[L['logical_and'], L[20], L[True]] logical_not: _UFunc_Nin1_Nout1[L['logical_not'], L[20], None] logical_or: _UFunc_Nin2_Nout1[L['logical_or'], L[20], L[False]] logical_xor: _UFunc_Nin2_Nout1[L['logical_xor'], L[19], L[False]] -matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None] +matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"]] maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None] minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None] mod: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] @@ -3350,7 +3349,7 @@ tan: _UFunc_Nin1_Nout1[L['tan'], L[8], None] tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None] true_divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None] -vecdot: _GUFunc_Nin2_Nout1[L['vecdot'], L[19], None] +vecdot: _GUFunc_Nin2_Nout1[L['vecdot'], L[19], None, L["(n),(n)->()"]] abs = absolute acos = arccos diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 27c2662c6a61..77e249a85828 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -35,8 +35,8 @@ 'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat', 'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'get_handler_name', 'get_handler_version', 'inner', 'interp', - 'interp_complex', 'is_busday', 'lexsort', 'matmul', 'may_share_memory', - 'min_scalar_type', 'ndarray', 'nditer', 'nested_iters', + 'interp_complex', 'is_busday', 'lexsort', 'matmul', 'vecdot', + 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer', 'nested_iters', 'normalize_axis_index', 'packbits', 'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar', 'set_datetimeparse_function', 'set_legacy_print_mode', diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index d5116cee2756..82755a0eff46 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -18,7 +18,7 @@ fromstring, inner, lexsort, matmul, may_share_memory, min_scalar_type, ndarray, nditer, nested_iters, promote_types, putmask, result_type, shares_memory, vdot, where, zeros, normalize_axis_index, - _get_promotion_state, _set_promotion_state + _get_promotion_state, _set_promotion_state, vecdot ) from . import overrides @@ -52,8 +52,8 @@ 'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones', 'identity', 'allclose', 'putmask', 'flatnonzero', 'inf', 'nan', 'False_', 'True_', 'bitwise_not', - 'full', 'full_like', 'matmul', 'shares_memory', 'may_share_memory', - '_get_promotion_state', '_set_promotion_state'] + 'full', 'full_like', 'matmul', 'vecdot', 'shares_memory', + 'may_share_memory', '_get_promotion_state', '_set_promotion_state'] def _zeros_like_dispatcher( diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index a24c368cbd08..8871cf9d264a 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -497,39 +497,6 @@ def tensordot( axes: int | tuple[_ShapeLike, _ShapeLike] = ..., ) -> NDArray[object_]: ... -@overload -def vecdot( - x1: _ArrayLikeUnknown, x2: _ArrayLikeUnknown, axis: int = ... -) -> NDArray[Any]: ... -@overload -def vecdot( - x1: _ArrayLikeBool_co, x2: _ArrayLikeBool_co, axis: int = ... -) -> NDArray[np.bool]: ... -@overload -def vecdot( - x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, axis: int = ... -) -> NDArray[unsignedinteger[Any]]: ... -@overload -def vecdot( - x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, axis: int = ... -) -> NDArray[signedinteger[Any]]: ... -@overload -def vecdot( - x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, axis: int = ... -) -> NDArray[floating[Any]]: ... -@overload -def vecdot( - x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, axis: int = ... -) -> NDArray[complexfloating[Any, Any]]: ... -@overload -def vecdot( - x1: _ArrayLikeTD64_co, x2: _ArrayLikeTD64_co, axis: int = ... -) -> NDArray[timedelta64]: ... -@overload -def vecdot( - x1: _ArrayLikeObject_co, x2: _ArrayLikeObject_co, axis: int = ... -) -> NDArray[object_]: ... - @overload def roll( a: _ArrayLike[_SCT], diff --git a/numpy/_core/umath.py b/numpy/_core/umath.py index ae5514de4c26..8e51cd1694af 100644 --- a/numpy/_core/umath.py +++ b/numpy/_core/umath.py @@ -37,4 +37,4 @@ 'multiply', 'negative', 'nextafter', 'not_equal', 'pi', 'positive', 'power', 'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift', 'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', - 'subtract', 'tan', 'tanh', 'true_divide', 'trunc', 'vecdot'] + 'subtract', 'tan', 'tanh', 'true_divide', 'trunc'] diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index f693341b521c..b6e4db4b5e13 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -33,6 +33,7 @@ _4Tuple = tuple[_T, _T, _T, _T] _NTypes = TypeVar("_NTypes", bound=int) _IDType = TypeVar("_IDType", bound=Any) _NameType = TypeVar("_NameType", bound=str) +_Signature = TypeVar("_Signature", bound=str) class _SupportsArrayUFunc(Protocol): @@ -366,7 +367,7 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i signature: str | _4Tuple[None | str] = ..., ) -> _2Tuple[NDArray[Any]]: ... -class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] +class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @property @@ -379,11 +380,8 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: def nout(self) -> Literal[1]: ... @property def nargs(self) -> Literal[3]: ... - - # NOTE: In practice the only gufunc in the main namespace is `matmul`, - # so we can use its signature here @property - def signature(self) -> Literal["(n?,k),(k,m?)->(n?,m?)"]: ... + def signature(self) -> _Signature: ... @property def reduce(self) -> None: ... @property diff --git a/numpy/typing/tests/data/reveal/numeric.pyi b/numpy/typing/tests/data/reveal/numeric.pyi index 8f21ce405b89..1f0a8b36fff8 100644 --- a/numpy/typing/tests/data/reveal/numeric.pyi +++ b/numpy/typing/tests/data/reveal/numeric.pyi @@ -91,12 +91,6 @@ assert_type(np.tensordot(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any assert_type(np.tensordot(AR_i8, AR_m), npt.NDArray[np.timedelta64]) assert_type(np.tensordot(AR_O, AR_O), npt.NDArray[np.object_]) -assert_type(np.vecdot(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.vecdot(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.vecdot(AR_b, AR_u8), npt.NDArray[np.unsignedinteger[Any]]) -assert_type(np.vecdot(AR_i8, AR_b), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.vecdot(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) - assert_type(np.isscalar(i8), bool) assert_type(np.isscalar(AR_i8), bool) assert_type(np.isscalar(B), bool) diff --git a/numpy/typing/tests/data/reveal/ufuncs.pyi b/numpy/typing/tests/data/reveal/ufuncs.pyi index 28e189411802..859c202c3766 100644 --- a/numpy/typing/tests/data/reveal/ufuncs.pyi +++ b/numpy/typing/tests/data/reveal/ufuncs.pyi @@ -76,6 +76,16 @@ assert_type(np.matmul.identity, None) assert_type(np.matmul(AR_f8, AR_f8), Any) assert_type(np.matmul(AR_f8, AR_f8, axes=[(0, 1), (0, 1), (0, 1)]), Any) +assert_type(np.vecdot.__name__, Literal["vecdot"]) +assert_type(np.vecdot.ntypes, Literal[19]) +assert_type(np.vecdot.identity, None) +assert_type(np.vecdot.nin, Literal[2]) +assert_type(np.vecdot.nout, Literal[1]) +assert_type(np.vecdot.nargs, Literal[3]) +assert_type(np.vecdot.signature, Literal["(n),(n)->()"]) +assert_type(np.vecdot.identity, None) +assert_type(np.vecdot(AR_f8, AR_f8), Any) + assert_type(np.bitwise_count.__name__, Literal['bitwise_count']) assert_type(np.bitwise_count.ntypes, Literal[11]) assert_type(np.bitwise_count.identity, None) diff --git a/tools/ci/array-api-skips.txt b/tools/ci/array-api-skips.txt index b57462275051..74c4b49c5dfc 100644 --- a/tools/ci/array-api-skips.txt +++ b/tools/ci/array-api-skips.txt @@ -14,6 +14,6 @@ array_api_tests/test_signatures.py::test_func_signature[reshape] array_api_tests/test_signatures.py::test_func_signature[argsort] array_api_tests/test_signatures.py::test_func_signature[sort] -# TODO: check why in CI `inspect.signature(np.vecdot)` returns (*arg, **kwarg) -# instead of raising ValueError. mtsokol: couldn't reproduce locally +# ufuncs signature on linux is always +# np.vecdot is the only ufunc with a keyword argument which causes a failure array_api_tests/test_signatures.py::test_func_signature[vecdot] From 8966ad05c3d1509025225c934d9e75c28cea8309 Mon Sep 17 00:00:00 2001 From: Yuki K Date: Thu, 22 Feb 2024 23:40:42 +0000 Subject: [PATCH 308/980] DOC: Fix some typos and incorrect markups [skip cirrus] [skip azp] [skip actions] --- doc/source/dev/howto-docs.rst | 2 +- doc/source/reference/c-api/array.rst | 18 +++++++++--------- doc/source/user/absolute_beginners.rst | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/doc/source/dev/howto-docs.rst b/doc/source/dev/howto-docs.rst index 5f23d544145f..097456fad0b4 100644 --- a/doc/source/dev/howto-docs.rst +++ b/doc/source/dev/howto-docs.rst @@ -376,7 +376,7 @@ membergroups and members-only options: :outline: :no-link: -Checkout the `doxygenclass documentation _` +Checkout the `doxygenclass documentation `__ for more details and to see it in action. ``doxygennamespace`` diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 7d1296cba8fe..eded7e184e04 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -1802,14 +1802,14 @@ the functions that must be implemented for each slot. "default" value that may differ from the "identity" value normally used. For example: - - ``0.0`` is the default for ``sum([])``. But ``-0.0`` is the correct - identity otherwise as it preserves the sign for ``sum([-0.0])``. - - We use no identity for object, but return the default of ``0`` and - ``1`` for the empty ``sum([], dtype=object)`` and - ``prod([], dtype=object)``. - This allows ``np.sum(np.array(["a", "b"], dtype=object))`` to work. - - ``-inf`` or ``INT_MIN`` for ``max`` is an identity, but at least - ``INT_MIN`` not a good *default* when there are no items. + - ``0.0`` is the default for ``sum([])``. But ``-0.0`` is the correct + identity otherwise as it preserves the sign for ``sum([-0.0])``. + - We use no identity for object, but return the default of ``0`` and + ``1`` for the empty ``sum([], dtype=object)`` and + ``prod([], dtype=object)``. + This allows ``np.sum(np.array(["a", "b"], dtype=object))`` to work. + - ``-inf`` or ``INT_MIN`` for ``max`` is an identity, but at least + ``INT_MIN`` not a good *default* when there are no items. *initial* is a pointer to the data for the initial value, which should be filled in. Returns -1, 0, or 1 indicating error, no initial value, and the @@ -3912,7 +3912,7 @@ are defined: * If neither is defined, the C-API is declared to ``static void **PyArray_API``, so it is only visible within the - compilation unit/file using ``#includes numpy/arrayobject.h``. + compilation unit/file using ``#include ``. * If only ``PY_ARRAY_UNIQUE_SYMBOL`` is defined (it could be empty) then the it is declared to a non-static ``void **`` allowing it to be used by other files which are linked. diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index fb5fcd9240df..61468132879f 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -235,7 +235,7 @@ only one "data type". The data type is recorded in the ``dtype`` attribute. >>> a.dtype dtype('int64') # "int" for integer, "64" for 64-bit -ref:`Read more about array attributes here ` and learn about +:ref:`Read more about array attributes here ` and learn about :ref:`array objects here `. How to create a basic array From e438a8611f07bd2979ef853c03e7ac98f728d652 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 10 May 2024 11:16:05 -0600 Subject: [PATCH 309/980] MNT: apply review comments --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 2 +- numpy/_core/tests/test_stringdtype.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index 8550605cc732..3135f9cbf9c0 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1107,6 +1107,7 @@ string_lrstrip_chars_strided_loop( if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { npy_gil_error(PyExc_MemoryError, "Failed to pack string in %s", ufunc_name); + PyMem_RawFree(new_buf); goto fail; } @@ -1189,7 +1190,6 @@ string_lrstrip_whitespace_strided_loop( npy_static_string s = {0, NULL}; int s_isnull = NpyString_load(allocator, ps, &s); - if (s_isnull == -1) { npy_gil_error(PyExc_MemoryError, "Failed to load string in %s", ufunc_name); diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index a24e643b1e64..b5d91d402a10 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1492,8 +1492,7 @@ def test_setup(self): view = self.get_view(self.a) sizes = np.where(is_short, view['size_and_flags'] & 0xf, view['size']) - assert_array_equal(sizes, np.strings - .str_len(self.a)) + assert_array_equal(sizes, np.strings.str_len(self.a)) assert_array_equal(view['xsiz'][2:], np.void(b'\x00' * (self.sizeofstr // 4 - 1))) # Check that the medium string uses only 1 byte for its length From aa5a932f44f8a75b29b91e883d069d3509db8b8e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 May 2024 17:58:26 +0000 Subject: [PATCH 310/980] MAINT: Bump ossf/scorecard-action from 2.3.1 to 2.3.3 Bumps [ossf/scorecard-action](https://github.com/ossf/scorecard-action) from 2.3.1 to 2.3.3. - [Release notes](https://github.com/ossf/scorecard-action/releases) - [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md) - [Commits](https://github.com/ossf/scorecard-action/compare/0864cf19026789058feabb7e87baa5f140aac736...dc50aa9510b46c811795eb24b2f1ba02a914e534) --- updated-dependencies: - dependency-name: ossf/scorecard-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/scorecards.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 9fc7e4757afe..b5f851b64540 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -30,7 +30,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1 + uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # v2.3.3 with: results_file: results.sarif results_format: sarif From bc15ac933d8f0f5e6aa800fb141887d6250cff81 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 10 May 2024 14:19:01 -0600 Subject: [PATCH 311/980] MNT: clean up references to array_owned==2 case in StringDType --- numpy/_core/src/multiarray/stringdtype/dtype.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 9ba48e26bac4..6e12084a9707 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -33,7 +33,6 @@ new_stringdtype_instance(PyObject *na_object, int coerce) char *default_string_buf = NULL; char *na_name_buf = NULL; - char array_owned = 0; npy_string_allocator *allocator = NpyString_new_allocator(PyMem_RawMalloc, PyMem_RawFree, PyMem_RawRealloc); @@ -138,7 +137,7 @@ new_stringdtype_instance(PyObject *na_object, int coerce) if (na_name_buf != NULL) { PyMem_RawFree(na_name_buf); } - if (allocator != NULL && array_owned != 2) { + if (allocator != NULL) { NpyString_free_allocator(allocator); } return NULL; @@ -660,7 +659,7 @@ stringdtype_dealloc(PyArray_StringDTypeObject *self) { Py_XDECREF(self->na_object); // this can be null if an error happens while initializing an instance - if (self->allocator != NULL && self->array_owned != 2) { + if (self->allocator != NULL) { NpyString_free_allocator(self->allocator); } PyMem_RawFree((char *)self->na_name.buf); From 63603b4b13496077fa96c15bd6bd3007831839a6 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 10 May 2024 14:18:08 -0600 Subject: [PATCH 312/980] DOC: add reference docs for NpyString C API [skip azp] [skip cirrus] [skip actions] --- doc/source/reference/c-api/index.rst | 1 + doc/source/reference/c-api/strings.rst | 268 +++++++++++++++++++++++++ 2 files changed, 269 insertions(+) create mode 100644 doc/source/reference/c-api/strings.rst diff --git a/doc/source/reference/c-api/index.rst b/doc/source/reference/c-api/index.rst index e7f86d3ff7a8..2a7a627fde3e 100644 --- a/doc/source/reference/c-api/index.rst +++ b/doc/source/reference/c-api/index.rst @@ -47,6 +47,7 @@ code. iterator ufunc generalized-ufuncs + strings coremath datetimes deprecations diff --git a/doc/source/reference/c-api/strings.rst b/doc/source/reference/c-api/strings.rst new file mode 100644 index 000000000000..43d280d14e09 --- /dev/null +++ b/doc/source/reference/c-api/strings.rst @@ -0,0 +1,268 @@ +NpyString API +============= + +.. sectionauthor:: Nathan Goldbaum + +.. versionadded:: 2.0 + +This API allows access to the UTF-8 string data stored in NumPy StringDType +arrays. See `NEP-55 `_ for +more in-depth details into the design of StringDType. + +Examples +-------- + +Loading a String +^^^^^^^^^^^^^^^^ + +Say we are writing a ufunc implementation for ``StringDType``. If we are given +``const char *buf`` pointer to the beginning of a ``StringDType`` array entry, and a +``PyArray_Descr *`` pointer to the array descriptor, one can +access the underlying string data like so: + +.. code-block:: C + + npy_string_allocator *allocator = NpyString_acquire_allocator( + (PyArray_StringDTypeObject *)descr); + + npy_static_string sdata = {0, NULL}; + npy_packed_static_string *packed_string = (npy_packed_static_string *)buf; + int is_null = 0; + + is_null = NpyString_load(allocator, packed_string, &sdata); + + if (is_null == -1) { + // failed to load string, set error + return -1; + } + else if (is_null) { + // handle missing string + // sdata->buf is NULL + // sdata->size is 0 + } + else { + // sdata->buf is a pointer to the beginning of a string + // sdata->size is the size of the string + } + NpyString_release_allocator(allocator); + +Packing a String +^^^^^^^^^^^^^^^^ + +This example shows how to pack a new string entry into an array: + +.. code-block:: C + + char *str = "Hello world"; + size_t size = 11; + npy_packed_static_string *packed_string = (npy_packed_static_string *)buf; + + npy_string_allocator *allocator = NpyString_acquire_allocator( + (PyArray_StringDTypeObject *)descr); + + // copy contents of str into packed_string + if (NpyString_pack(allocator, packed_string, str, size) == -1) { + // string packing failed, set error + return -1; + } + + // packed_string contains a copy of "Hello world" + + NpyString_release_allocator(allocator); + +Types +----- + +.. c:type:: npy_packed_static_string + + An opaque struct that represents "packed" encoded strings. Individual + entries in array buffers are instances of this struct. Direct access + to the data in the struct is undefined and future version of the library may + change the packed representation of strings. + +.. c:type:: npy_static_string + + An unpacked string allowing access to the UTF-8 string data. + + .. code-block:: c + + typedef struct npy_unpacked_static_string { + size_t size; + const char *buf; + } npy_static_string; + + .. c:member:: size_t size + + The size of the string, in bytes. + + .. c:member:: const char *buf + + The string buffer. Holds UTF-8-encoded bytes. Does not currently end in + a null string but we may decide to add null termination in the + future, so do not rely on the presence or absence of null-termination. + + Note that this is a ``const`` buffer. If you want to alter an + entry in an array, you should create a new string and pack it + into the array entry. + +.. c:type:: npy_string_allocator + + An opaque pointer to an object that handles string allocation. + Before using the allocator, you must acquire the allocator lock and release + the lock after you are done interacting with strings managed by the + allocator. + +.. c:type:: PyArray_StringDTypeObject + + The C struct backing instances of StringDType in Python. Attributes store + the settings the object was created with, an instance of + ``npy_string_allocator`` that manages string allocations for arrays + associated with the DType instance, and several attributes caching + information about the missing string object that is commonly needed in cast + and ufunc loop implementations. + + .. code-block:: c + + typedef struct { + PyArray_Descr base; + PyObject *na_object; + char coerce; + char has_nan_na; + char has_string_na; + char array_owned; + npy_static_string default_string; + npy_static_string na_name; + npy_string_allocator *allocator; + } PyArray_StringDTypeObject; + + .. c:member:: PyArray_Descr base + + The base object. Use this member to access fields common to all + descriptor objects. + + .. c:member:: PyObject *na_object + + A reference to the object representing the null value. If there is no + null value (the default) this will be NULL. + + .. c:member:: char coerce + + 1 if string coercion is enabled, 0 otherwise. + + .. c:member:: char has_nan_na + + 1 if the missing string object (if any) is NaN-like, 0 otherwise. + + .. c:member:: char has_string_na + + 1 if the missing string object (if any) is a string, 0 otherwise. + + .. c:member:: char array_owned + + 1 if an array owns the StringDType instance, 0 otherwise. + + .. c:member:: npy_static_string default_string + + The default string to use in operations. If the missing string object + is a string, this will contain the string data for the missing string. + + .. c:member:: npy_static_string na_name + + The name of the missing string object, if any. An empty string + otherwise. + + .. c:member:: npy_string_allocator allocator + + The allocator instance associated with the array that owns this + descriptor instance. The allocator should only be directly accessed + after acquiring the allocator_lock and the lock should be released + immediately after the allocator is no longer needed + + +Functions +--------- + +.. c:function:: npy_string_allocator *NpyString_acquire_allocator( \ + const PyArray_StringDTypeObject *descr) + + Acquire the mutex locking the allocator attached to + ``descr``. ``NpyString_release_allocator`` must be called on the allocator + returned by this function exactly once. Note that functions requiring the + GIL should not be called while the allocator mutex is held, as doing so may + cause deadlocks. + +.. c:function:: void NpyString_acquire_allocators( \ + size_t n_descriptors, PyArray_Descr *const descrs[], \ + npy_string_allocator *allocators[]) + + Simultaneously acquire the mutexes locking the allocators attached to + multiple descriptors. Writes a pointer to the associated allocator in the + allocators array for each StringDType descriptor in the array. If any of + the descriptors are not StringDType instances, write NULL to the allocators + array for that entry. + + ``n_descriptors`` is the number of descriptors in the descrs array that + should be examined. Any descriptor after ``n_descriptors`` elements is + ignored. A buffer overflow will happen if the ``descrs`` array does not + contain n_descriptors elements. + + If pointers to the same descriptor are passed multiple times, only acquires + the allocator mutex once but sets identical allocator pointers appropriately. + The allocator mutexes must be released after this function returns, see + ``NpyString_release_allocators``. + + Note that functions requiring the GIL should not be called while the + allocator mutex is held, as doing so may cause deadlocks. + +.. c:function:: void NpyString_release_allocator( \ + npy_string_allocator *allocator) + + Release the mutex locking an allocator. This must be called exactly once + after acquiring the allocator mutex and all operations requiring the + allocator are done. + + If you need to release multiple allocators, see + NpyString_release_allocators, which can correctly handle releasing the + allocator once when given several references to the same allocator. + +.. c:function:: void NpyString_release_allocators( \ + size_t length, npy_string_allocator *allocators[]) + + Release the mutexes locking N allocators. ``length`` is the length of the + allocators array. NULL entries are ignored. + + If pointers to the same allocator are passed multiple times, only releases + the allocator mutex once. + +.. c:function:: int NpyString_load(npy_string_allocator *allocator, \ + const npy_packed_static_string *packed_string, \ + npy_static_string *unpacked_string) + + Extract the packed contents of ``packed_string`` into ``unpacked_string``. + + The ``unpacked_string`` is a read-only view onto the ``packed_string`` data + and should not be used to modify the string data. If ``packed_string`` is + the null string, sets ``unpacked_string.buf`` to the NULL + pointer. Returns -1 if unpacking the string fails, returns 1 if + ``packed_string`` is the null string, and returns 0 otherwise. + + A useful pattern is to define a stack-allocated npy_static_string instance + initialized to ``{0, NULL}`` and pass a pointer to the stack-allocated + unpacked string to this function. This function can be used to + simultaneously unpack a string and determine if it is a null string. + +.. c:function:: int NpyString_pack_null( \ + npy_string_allocator *allocator, \ + npy_packed_static_string *packed_string) + + Pack the null string into ``packed_string``. Returns 0 on success and -1 on + failure. + +.. c:function:: int NpyString_pack( \ + npy_string_allocator *allocator, \ + npy_packed_static_string *packed_string, \ + const char *buf, \ + size_t size) + + Copy and pack the first ``size`` entries of the buffer pointed to by ``buf`` + into the ``packed_string``. Returns 0 on success and -1 on failure. From 300096d384046eee479b0c7a70f79e308da52bff Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Sat, 11 May 2024 09:41:32 +1000 Subject: [PATCH 313/980] MAINT: updated instructions to get MachAr byte pattern (#26415) * MAINT: updated instructions to get MachAr byte pattern --- numpy/_core/getlimits.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index c582a79e5fb2..b01e47fade43 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -146,10 +146,12 @@ def _float_to_str(self, value): title = _title_fmt.format('half'))} # Key to identify the floating point type. Key is result of -# ftype('-0.1').newbyteorder('<').tobytes() # -# 20230201 - use (ftype(-1.0) / ftype(10.0)).newbyteorder('<').tobytes() -# instead because stold may have deficiencies on some platforms. +# ftype = np.longdouble # or float64, float32, etc. +# v = (ftype(-1.0) / ftype(10.0)) +# v.view(v.dtype.newbyteorder('<')).tobytes() +# +# Uses division to work around deficiencies in strtold on some platforms. # See: # https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure From 050cb1f1d88c1306fab5bab46d259f5f656043be Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sat, 11 May 2024 22:29:50 +0200 Subject: [PATCH 314/980] lint --- numpy/_core/tests/test_umath.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 4d73fce3b727..1ebd5633be6c 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -1244,9 +1244,9 @@ def test_power_fast_paths(self): result = np.power(a, 2.) assert_array_max_ulp(result, expected.astype(dt), maxulp=1) - expected = np.sqrt(a) + expected = np.sqrt(a).astype(dt) result = np.power(a, 0.5) - assert_array_max_ulp(result[:-1], expected[:-1].astype(dt), maxulp=1) + assert_array_max_ulp(result[:-1], expected[:-1], maxulp=1) class TestFloat_power: From 3ff27fee75a39b5025a1a3a28ffbda9ea2e35ec2 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sat, 11 May 2024 22:51:10 +0200 Subject: [PATCH 315/980] fix tests --- numpy/_core/tests/test_umath.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 1ebd5633be6c..621579b4da2b 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -1239,15 +1239,19 @@ def test_power_fast_paths(self): # gh-26055 for dt in [np.float32, np.float64]: a = np.array([0, 1.1, 2, 12e12, -10., np.inf, -np.inf], dt) - expected = np.array([0.0, 1.21, 4., 1.44e+26, 100, np.inf, np.inf]) result = np.power(a, 2.) assert_array_max_ulp(result, expected.astype(dt), maxulp=1) + a = np.array([0, 1.1, 2, 12e12, np.inf], dt) expected = np.sqrt(a).astype(dt) result = np.power(a, 0.5) - assert_array_max_ulp(result[:-1], expected[:-1], maxulp=1) + assert_array_max_ulp(result, expected, maxulp=1) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.power(-10, .5))) + assert_(w[0].category is RuntimeWarning) class TestFloat_power: def test_type_conversion(self): From 2eff77fb9cbc4a5abc642b9acaecd5a05137d2f9 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sat, 11 May 2024 22:59:03 +0200 Subject: [PATCH 316/980] apply branching pattern --- numpy/_core/src/umath/fast_loop_macros.h | 10 ---------- numpy/_core/src/umath/loops_umath_fp.dispatch.c.src | 4 ++-- 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/numpy/_core/src/umath/fast_loop_macros.h b/numpy/_core/src/umath/fast_loop_macros.h index 366facb5a100..b8c1926b2f7e 100644 --- a/numpy/_core/src/umath/fast_loop_macros.h +++ b/numpy/_core/src/umath/fast_loop_macros.h @@ -90,16 +90,6 @@ abs_ptrdiff(char *a, char *b) BINARY_DEFS\ BINARY_LOOP_SLIDING -/** (ip1, ip2) -> (op1), for case ip2 has zero stride*/ -#define BINARY_DEFS_ZERO_STRIDE\ - char *ip1 = args[0], *ip2 = args[1], *op1 = args[2];\ - npy_intp is1 = steps[0], os1 = steps[2];\ - npy_intp n = dimensions[0];\ - npy_intp i;\ - -#define BINARY_LOOP_SLIDING_ZERO_STRIDE \ - for (i = 0; i < n; i++, ip1 += is1, op1 += os1) - /** (ip1, ip2) -> (op1, op2) */ #define BINARY_LOOP_TWO_OUT\ char *ip1 = args[0], *ip2 = args[1], *op1 = args[2], *op2 = args[3];\ diff --git a/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src b/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src index 8b6b06c4199a..74af8edaa1f5 100644 --- a/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src +++ b/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src @@ -237,10 +237,10 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) { int stride_zero = steps[1]==0; if (stride_zero) { - BINARY_DEFS_ZERO_STRIDE + BINARY_DEFS const @type@ in2 = *(@type@ *)ip2; if (in2 == 2.0) { - BINARY_LOOP_SLIDING_ZERO_STRIDE { + BINARY_LOOP_SLIDING { const @type@ in1 = *(@type@ *)ip1; *(@type@ *)op1 = in1 * in1; } From 1a599d52fd98669436634d5c9f21e8bfe3704318 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sat, 11 May 2024 23:10:22 +0200 Subject: [PATCH 317/980] do not add special test for sqrt --- numpy/_core/tests/test_umath.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 621579b4da2b..ad02b101022e 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -1248,10 +1248,6 @@ def test_power_fast_paths(self): result = np.power(a, 0.5) assert_array_max_ulp(result, expected, maxulp=1) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_(np.isnan(np.power(-10, .5))) - assert_(w[0].category is RuntimeWarning) class TestFloat_power: def test_type_conversion(self): From 51bedd5b7592a316e55ccd01f6f5586e0cfadc7e Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sun, 12 May 2024 00:22:16 +0200 Subject: [PATCH 318/980] fix tests on all platforms --- numpy/_core/tests/test_umath.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index ad02b101022e..7a3d1078647a 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -1243,7 +1243,7 @@ def test_power_fast_paths(self): result = np.power(a, 2.) assert_array_max_ulp(result, expected.astype(dt), maxulp=1) - a = np.array([0, 1.1, 2, 12e12, np.inf], dt) + a = np.array([0, 1.1, 2, 12e12], dt) expected = np.sqrt(a).astype(dt) result = np.power(a, 0.5) assert_array_max_ulp(result, expected, maxulp=1) From 36ab1b580693beec0c5be61f883b94f2907bc1d0 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 13 May 2024 16:51:21 +0200 Subject: [PATCH 319/980] TYP,CI: Bump mypy to 1.10.0 --- environment.yml | 2 +- requirements/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 8a6561b7891a..d03275d0bbce 100644 --- a/environment.yml +++ b/environment.yml @@ -26,7 +26,7 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.2.0 # needed for python < 3.10 - - mypy=1.7.1 + - mypy=1.10.0 # For building docs - sphinx>=4.5.0 - sphinx-design diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 7352f230bb3a..4e53f86d355c 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -15,7 +15,7 @@ cffi; python_version < '3.10' # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.7.1; platform_python_implementation != "PyPy" +mypy==1.10.0; platform_python_implementation != "PyPy" typing_extensions>=4.2.0 # for optional f2py encoding detection charset-normalizer From 53a65bbfc7014b24f04578bb2f15d0bf5e75a34e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 May 2024 17:50:25 +0000 Subject: [PATCH 320/980] MAINT: Bump pypa/cibuildwheel from 2.17.0 to 2.18.0 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 2.17.0 to 2.18.0. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/8d945475ac4b1aac4ae08b2fd27db9917158b6ce...711a3d017d0729f3edde18545fee967f03d65f65) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 39868abf6dff..a821448cdb0c 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -149,7 +149,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@8d945475ac4b1aac4ae08b2fd27db9917158b6ce # v2.17.0 + uses: pypa/cibuildwheel@711a3d017d0729f3edde18545fee967f03d65f65 # v2.18.0 env: CIBW_PRERELEASE_PYTHONS: True CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From b5daae56483a934878db0b8e37b79e0343af76f1 Mon Sep 17 00:00:00 2001 From: Jean Lecordier <47030586+jlecordier@users.noreply.github.com> Date: Mon, 13 May 2024 20:14:08 +0200 Subject: [PATCH 321/980] TYP: npyio: loadtxt: usecols: add None type According to the previous type hint, None was not allowed, but since None is the default value, then None IS allowed --- numpy/lib/_npyio_impl.pyi | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index d9b43578d798..f1dcbfd52d01 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -158,7 +158,7 @@ def loadtxt( delimiter: None | str = ..., converters: None | Mapping[int | str, Callable[[str], Any]] = ..., skiprows: int = ..., - usecols: int | Sequence[int] = ..., + usecols: int | Sequence[int] | None = ..., unpack: bool = ..., ndmin: L[0, 1, 2] = ..., encoding: None | str = ..., @@ -175,7 +175,7 @@ def loadtxt( delimiter: None | str = ..., converters: None | Mapping[int | str, Callable[[str], Any]] = ..., skiprows: int = ..., - usecols: int | Sequence[int] = ..., + usecols: int | Sequence[int] | None = ..., unpack: bool = ..., ndmin: L[0, 1, 2] = ..., encoding: None | str = ..., @@ -192,7 +192,7 @@ def loadtxt( delimiter: None | str = ..., converters: None | Mapping[int | str, Callable[[str], Any]] = ..., skiprows: int = ..., - usecols: int | Sequence[int] = ..., + usecols: int | Sequence[int] | None = ..., unpack: bool = ..., ndmin: L[0, 1, 2] = ..., encoding: None | str = ..., From 8dd384a2a2811e345dd9087363bb6327700ebf76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Mon, 13 May 2024 22:48:34 +0200 Subject: [PATCH 322/980] Add release note --- doc/release/upcoming_changes/26313.change.rst | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 doc/release/upcoming_changes/26313.change.rst diff --git a/doc/release/upcoming_changes/26313.change.rst b/doc/release/upcoming_changes/26313.change.rst new file mode 100644 index 000000000000..99c8b1d879f9 --- /dev/null +++ b/doc/release/upcoming_changes/26313.change.rst @@ -0,0 +1,2 @@ +* As `numpy.vecdot` is now a ufunc it has a less precise signature. + This is due to the limitations of ufunc's typing stub. From 0b6e452bb04d15cdef9120a67f434787d732eca5 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 13 May 2024 15:12:32 -0600 Subject: [PATCH 323/980] TST: skip test_frompyfunc_leaks in the free-threaded build --- numpy/lib/tests/test_function_base.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 3a517f5c93bf..b3cffa2703d5 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -20,8 +20,9 @@ from numpy.exceptions import AxisError from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_almost_equal, - assert_array_almost_equal, assert_raises, assert_allclose, IS_PYPY, - assert_warns, assert_raises_regex, suppress_warnings, HAS_REFCOUNT, IS_WASM + assert_array_almost_equal, assert_raises, assert_allclose, + assert_warns, assert_raises_regex, suppress_warnings, HAS_REFCOUNT, + IS_WASM, NOGIL_BUILD ) import numpy.lib._function_base_impl as nfb from numpy.random import rand @@ -1921,6 +1922,9 @@ def unbound(*args): return 0 @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + @pytest.mark.skipif(NOGIL_BUILD, + reason=("Functions are immortalized if a thread is " + "launched, making this test flaky")) @pytest.mark.parametrize('name, incr', [ ('bound', A.iters), ('unbound', 0), From 7bef38435aa91cc47b30223ba82a06343c6b6073 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 13 May 2024 17:29:24 -0600 Subject: [PATCH 324/980] MAINT: Add some PR prefixes to the labeler. This adds "MNT:" as an alternate spelling of "MAINT" and synchronizes the documentation with the labeler. Because, why not, and Nathan uses it :) [skip azp] [skip cirrus] --- .github/pr-prefix-labeler.yml | 7 ++++--- doc/source/dev/development_workflow.rst | 4 +++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/pr-prefix-labeler.yml b/.github/pr-prefix-labeler.yml index 2b95f1f314f6..4905b502045d 100644 --- a/.github/pr-prefix-labeler.yml +++ b/.github/pr-prefix-labeler.yml @@ -1,15 +1,16 @@ "API": "30 - API" "BENCH": "28 - Benchmark" -"BUG": "00 - Bug" "BLD": "36 - Build" +"BUG": "00 - Bug" "DEP": "07 - Deprecation" "DEV": "16 - Development" "DOC": "04 - Documentation" "ENH": "01 - Enhancement" "MAINT": "03 - Maintenance" +"MNT": "03 - Maintenance" +"REL": "14 - Release" "REV": "34 - Reversion" "STY": "03 - Maintenance" "TST": "05 - Testing" -"REL": "14 - Release" -"WIP": "25 - WIP" "TYP": "static typing" +"WIP": "25 - WIP" diff --git a/doc/source/dev/development_workflow.rst b/doc/source/dev/development_workflow.rst index d3ee762445e6..1af4521482e0 100644 --- a/doc/source/dev/development_workflow.rst +++ b/doc/source/dev/development_workflow.rst @@ -171,11 +171,13 @@ Standard acronyms to start the commit message with are:: DOC: documentation ENH: enhancement MAINT: maintenance commit (refactoring, typos, etc.) + MNT: alias for MAINT + REL: related to releasing numpy REV: revert an earlier commit STY: style fix (whitespace, PEP8) TST: addition or modification of tests TYP: static typing - REL: related to releasing numpy + WIP: work in progress, do not merge Commands to skip continuous integration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ From 1d29c7a349e930ce5bc71895a4836bd7e6bb8103 Mon Sep 17 00:00:00 2001 From: mattip Date: Wed, 15 May 2024 18:57:14 +0300 Subject: [PATCH 325/980] BUG: int32 and intc should both appear in sctypes --- numpy/_core/_type_aliases.py | 11 +++++++---- numpy/_core/tests/test_numerictypes.py | 8 ++++++++ 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/numpy/_core/_type_aliases.py b/numpy/_core/_type_aliases.py index 13f39a11cb9b..80a59e7b3f52 100644 --- a/numpy/_core/_type_aliases.py +++ b/numpy/_core/_type_aliases.py @@ -93,9 +93,10 @@ # Building `sctypes` #################### -sctypes = {"int": [], "uint": [], "float": [], "complex": [], "others": []} +sctypes = {"int": set(), "uint": set(), "float": set(), + "complex": set(), "others": set()} -for type_info in set(typeinfo.values()): +for type_info in typeinfo.values(): if type_info.kind in ["M", "m"]: # exclude timedelta and datetime continue @@ -108,9 +109,11 @@ ("others", ma.generic) ]: if issubclass(concrete_type, abstract_type): - sctypes[type_group].append(concrete_type) + sctypes[type_group].add(concrete_type) break # sort sctype groups by bitsize -for sctype_list in sctypes.values(): +for sctype_key in sctypes.keys(): + sctype_list = list(sctypes[sctype_key]) sctype_list.sort(key=lambda x: dtype(x).itemsize) + sctypes[sctype_key] = sctype_list diff --git a/numpy/_core/tests/test_numerictypes.py b/numpy/_core/tests/test_numerictypes.py index 1134e32025fb..ea7e4cdb9c0c 100644 --- a/numpy/_core/tests/test_numerictypes.py +++ b/numpy/_core/tests/test_numerictypes.py @@ -473,6 +473,14 @@ def test_isdtype_invalid_args(self): with assert_raises_regex(TypeError, r".*kind argument must.*"): np.isdtype(np.int64, "int64") + def test_sctypes_complete(self): + # issue 26439: int32/intc were masking eachother on 32-bit builds + assert np.int32 in sctypes['int'] + assert np.intc in sctypes['int'] + assert np.int64 in sctypes['int'] + assert np.uint32 in sctypes['uint'] + assert np.uintc in sctypes['uint'] + assert np.uint64 in sctypes['uint'] class TestSctypeDict: def test_longdouble(self): From 9a582e20916194f0863509b74d158e6e19da5e72 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 15 May 2024 10:53:42 -0600 Subject: [PATCH 326/980] TST: temporarily pin spin to work around issue in 0.9 release --- environment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environment.yml b/environment.yml index d03275d0bbce..9850418666ac 100644 --- a/environment.yml +++ b/environment.yml @@ -17,7 +17,7 @@ dependencies: - pkg-config - meson-python - pip - - spin + - spin=0.8 # Unpin when spin 0.9.1 is released - ccache # For testing - pytest From 1ab359112325be0d8eb025584151cbf23a8bb140 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 15 May 2024 12:28:13 -0600 Subject: [PATCH 327/980] TST: fix xfailed tests on pypy 7.3.16 --- numpy/_core/tests/test_strings.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 37cb0f15afa2..64cf42e05adb 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -18,6 +18,7 @@ MAX = np.iinfo(np.int64).max +IS_PYPY_LT_7_3_16 = IS_PYPY and sys.implementation.version < (7, 3, 16) @pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) def test_mixed_string_comparison_ufuncs_fail(op, ufunc, sym): @@ -903,7 +904,7 @@ def test_replace_unicode(self, buf, old, new, count, res, dt): '\U00011066', '\U000104A0', pytest.param('\U0001F107', marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY, + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, reason="PYPY bug in Py_UNICODE_ISALNUM", strict=True)), ]) @@ -920,7 +921,7 @@ def test_isalnum_unicode(self, in_, dt): ('\U0001F46F', False), ('\u2177', True), pytest.param('\U00010429', True, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY, + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, reason="PYPY bug in Py_UNICODE_ISLOWER", strict=True)), ('\U0001044E', True), @@ -938,7 +939,7 @@ def test_islower_unicode(self, in_, out, dt): ('\U0001F46F', False), ('\u2177', False), pytest.param('\U00010429', False, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY, + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, reason="PYPY bug in Py_UNICODE_ISUPPER", strict=True)), ('\U0001044E', False), @@ -951,12 +952,12 @@ def test_isupper_unicode(self, in_, out, dt): ('\u1FFc', True), ('Greek \u1FFcitlecases ...', True), pytest.param('\U00010401\U00010429', True, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY, + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, reason="PYPY bug in Py_UNICODE_ISISTITLE", strict=True)), ('\U00010427\U0001044E', True), pytest.param('\U00010429', False, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY, + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, reason="PYPY bug in Py_UNICODE_ISISTITLE", strict=True)), ('\U0001044E', False), From 7c9a7cda12d31d97d67c8faffcc4a7c8954c3d52 Mon Sep 17 00:00:00 2001 From: Paul Juma Otieno <103896399+otieno-juma@users.noreply.github.com> Date: Wed, 15 May 2024 22:17:01 +0300 Subject: [PATCH 328/980] DOC: Adding links to polymonial table. (#26442) I added missing links to polymonial classes table. [skip azp] [skip actions] [skip cirrus] --- .../routines.polynomials.classes.rst | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/doc/source/reference/routines.polynomials.classes.rst b/doc/source/reference/routines.polynomials.classes.rst index 9f7a26c05719..16cc65a35919 100644 --- a/doc/source/reference/routines.polynomials.classes.rst +++ b/doc/source/reference/routines.polynomials.classes.rst @@ -3,16 +3,16 @@ Using the convenience classes The convenience classes provided by the polynomial package are: -============ ================ -Name Provides -============ ================ -Polynomial Power series -Chebyshev Chebyshev series -Legendre Legendre series -Laguerre Laguerre series -Hermite Hermite series -HermiteE HermiteE series -============ ================ +================================================ ================ +Name Provides +================================================ ================ +:class:`~numpy.polynomial.polynomial.Polynomial` Power series +:class:`~numpy.polynomial.chebyshev.Chebyshev` Chebyshev series +:class:`~numpy.polynomial.legendre.Legendre` Legendre series +:class:`~numpy.polynomial.laguerre.Laguerre` Laguerre series +:class:`~numpy.polynomial.hermite.Hermite` Hermite series +:class:`~numpy.polynomial.hermite_e.HermiteE` HermiteE series +================================================ ================ The series in this context are finite sums of the corresponding polynomial basis functions multiplied by coefficients. For instance, a power series From f764f544564df3c37dd29865b47b95042a29100f Mon Sep 17 00:00:00 2001 From: Ogidi <82846833+ogidig5@users.noreply.github.com> Date: Wed, 15 May 2024 22:41:50 +0300 Subject: [PATCH 329/980] DOC: Remove outdated authentication instructions (#26444) This is meant to remove GitHub authentication instructions from the NumPy documentation. [skip azp] [skip actions] [skip cirrus] --- doc/source/dev/index.rst | 3 --- 1 file changed, 3 deletions(-) diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst index 6c9ebd577b2e..d2846f48b833 100644 --- a/doc/source/dev/index.rst +++ b/doc/source/dev/index.rst @@ -88,9 +88,6 @@ Here's the short summary, complete TOC links are below: git push origin linspace-speedups - * Enter your GitHub username and password (repeat contributors or advanced - users can remove this step by connecting to GitHub with SSH). - * Go to GitHub. The new branch will show up with a green Pull Request button. Make sure the title and message are clear, concise, and self- explanatory. Then click the button to submit it. From 68839055cbd83aded871336ff36dbd0d77bfff4e Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 15 May 2024 16:58:21 -0600 Subject: [PATCH 330/980] TST: attempt to fix intel SDE SIMD CI --- .github/workflows/linux_simd.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index 386bc7ba0c98..23de4d3a548e 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -152,7 +152,7 @@ jobs: intel_sde_avx512: needs: [baseline_only] - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: @@ -206,7 +206,7 @@ jobs: intel_sde_spr: needs: [baseline_only] - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: From 59ccad2b42512b8a5f69ae6956e804892a8d4a6b Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 15 May 2024 17:12:26 -0600 Subject: [PATCH 331/980] MNT: remove unncessary dependency installation steps --- .github/workflows/linux_simd.yml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index 23de4d3a548e..a3e162cb135c 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -170,10 +170,6 @@ jobs: - name: Install dependencies run: | - sudo apt update - sudo apt install -y g++-13 - sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-13 1 - sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-13 1 python -m pip install -r requirements/build_requirements.txt python -m pip install pytest pytest-xdist hypothesis typing_extensions @@ -224,10 +220,6 @@ jobs: - name: Install dependencies run: | - sudo apt update - sudo apt install -y g++-13 - sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-13 1 - sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-13 1 python -m pip install -r requirements/build_requirements.txt python -m pip install pytest pytest-xdist hypothesis typing_extensions From 42e7987f383735cfae8f248a308c2633ff572613 Mon Sep 17 00:00:00 2001 From: Leo Singer Date: Wed, 15 May 2024 23:14:06 -0400 Subject: [PATCH 332/980] MAINT: fix typo --- doc/source/reference/c-api/array.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index eded7e184e04..7a2f0cbcda91 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -3371,13 +3371,13 @@ Data-type descriptors can also be used with the "O&" character in PyArg_ParseTuple processing. -.. c:function:: int Pyarray_DescrAlignConverter( \ +.. c:function:: int PyArray_DescrAlignConverter( \ PyObject* obj, PyArray_Descr** dtype) Like :c:func:`PyArray_DescrConverter` except it aligns C-struct-like objects on word-boundaries as the compiler would. -.. c:function:: int Pyarray_DescrAlignConverter2( \ +.. c:function:: int PyArray_DescrAlignConverter2( \ PyObject* obj, PyArray_Descr** dtype) Like :c:func:`PyArray_DescrConverter2` except it aligns C-struct-like From 98e8913d2ac49ef9203e2f651a3d54fb166d1bd4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jyn=20Spring=20=E7=90=B4=E6=98=A5?= Date: Thu, 16 May 2024 11:47:33 +0200 Subject: [PATCH 333/980] BUG: Use Python pickle protocol version 4 for np.save (#26388) * BUG: Use default Python pickle protocol version rather than outdated protocol 3 * Update default pickle of np.save to 4 * Adds document for default pickle protocol 4 --- doc/release/upcoming_changes/26388.performance.rst | 3 +++ numpy/lib/_npyio_impl.py | 2 +- numpy/lib/format.py | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 doc/release/upcoming_changes/26388.performance.rst diff --git a/doc/release/upcoming_changes/26388.performance.rst b/doc/release/upcoming_changes/26388.performance.rst new file mode 100644 index 000000000000..885bc28c4a78 --- /dev/null +++ b/doc/release/upcoming_changes/26388.performance.rst @@ -0,0 +1,3 @@ + * `numpy.save` now uses pickle protocol version 4 for saving arrays with + object dtype, which allows for pickle objects larger than 4GB and improves + saving speed by about 5% for large arrays. \ No newline at end of file diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 8986b94fd500..8940ca4463c2 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -520,7 +520,7 @@ def save(file, arr, allow_pickle=True, fix_imports=True): arbitrary code) and portability (pickled objects may not be loadable on different Python installations, for example if the stored objects require libraries that are not available, and not all pickled data is - compatible between Python 2 and Python 3). + compatible between different versions of Python). Default: True fix_imports : bool, optional Only useful in forcing objects in object arrays on Python 3 to be diff --git a/numpy/lib/format.py b/numpy/lib/format.py index 87f35a7a4f60..8e14dfe4bcab 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -741,7 +741,7 @@ def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None): "when allow_pickle=False") if pickle_kwargs is None: pickle_kwargs = {} - pickle.dump(array, fp, protocol=3, **pickle_kwargs) + pickle.dump(array, fp, protocol=4, **pickle_kwargs) elif array.flags.f_contiguous and not array.flags.c_contiguous: if isfileobj(fp): array.T.tofile(fp) From 7f9feb76ed44b3b460ddae1df5753edf8254cf39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jyn=20Spring=20=E7=90=B4=E6=98=A5?= Date: Thu, 16 May 2024 12:22:46 +0200 Subject: [PATCH 334/980] Deprecate 'fix_imports' flag in numpy.save --- numpy/lib/_npyio_impl.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 8940ca4463c2..0b9382c904d7 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -501,7 +501,7 @@ def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None): @array_function_dispatch(_save_dispatcher) -def save(file, arr, allow_pickle=True, fix_imports=True): +def save(file, arr, allow_pickle=True, fix_imports=None): """ Save an array to a binary file in NumPy ``.npy`` format. @@ -523,10 +523,12 @@ def save(file, arr, allow_pickle=True, fix_imports=True): compatible between different versions of Python). Default: True fix_imports : bool, optional - Only useful in forcing objects in object arrays on Python 3 to be - pickled in a Python 2 compatible way. If `fix_imports` is True, pickle - will try to map the new Python 3 names to the old module names used in - Python 2, so that the pickle data stream is readable with Python 2. + The `fix_imports` flag is deprecated and has no effect. + .. deprecated:: 2.0 + Historically, this flag was used to control compatibility support + for objects saved in Python 3 to be loadable in Python 2. This flag + is ignored after NumPy 1.17, and deprecated in NumPy 2.0. It will + be removed in a future release. See Also -------- @@ -561,6 +563,10 @@ def save(file, arr, allow_pickle=True, fix_imports=True): >>> print(a, b) # [1 2] [1 3] """ + if fix_imports is not None: + warnings.warn( + "The 'fix_imports' flag is deprecated and has no effect.", + DeprecationWarning, stacklevel=2) if hasattr(file, 'write'): file_ctx = contextlib.nullcontext(file) else: From ea3a8d552e877b498f6ae772929fd3c0a9ac7c37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jyn=20Spring=20=E7=90=B4=E6=98=A5?= Date: Thu, 16 May 2024 16:49:37 +0200 Subject: [PATCH 335/980] Add test cases for fix_imports deprecation --- numpy/_core/tests/test_deprecations.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index f0fc5645122a..a9c7eaba67ad 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -716,3 +716,19 @@ class TestDeprecatedDTypeParenthesizedRepeatCount(_DeprecationTestCase): @pytest.mark.parametrize("string", ["(2)i,", "(3)3S,", "f,(2)f"]) def test_parenthesized_repeat_count(self, string): self.assert_deprecated(np.dtype, args=(string,)) + + +class TestDeprecatedSaveFixImports(_DeprecationTestCase): + # Deprecated in Numpy 2.0, 2024-05 + message = "Passing fix_imports into numpy.save" + + def test_deprecated(self): + sample_args = ('a.npy', np.array(np.zeros((1024, 10)))) + self.assert_not_deprecated(np.save, args=sample_args) + self.assert_deprecated(np.save, args=sample_args, kwargs={'fix_imports': True}) + self.assert_deprecated(np.save, args=sample_args, kwargs={'fix_imports': False}) + for allow_pickle in [True, False]: + self.assert_not_deprecated(np.save, args=sample_args, kwargs={'allow_pickle': allow_pickle}) + self.assert_deprecated(np.save, args=sample_args, kwargs={'allow_pickle': allow_pickle, 'fix_imports': True}) + self.assert_deprecated(np.save, args=sample_args, kwargs={'allow_pickle': allow_pickle, 'fix_imports': False}) + \ No newline at end of file From 42e7d3f44832b6adad67e31ca6d25c1f7e42edc0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jyn=20Spring=20=E7=90=B4=E6=98=A5?= Date: Thu, 16 May 2024 16:54:16 +0200 Subject: [PATCH 336/980] Fix lint --- numpy/_core/tests/test_deprecations.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index a9c7eaba67ad..fb2f07548ec3 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -725,10 +725,16 @@ class TestDeprecatedSaveFixImports(_DeprecationTestCase): def test_deprecated(self): sample_args = ('a.npy', np.array(np.zeros((1024, 10)))) self.assert_not_deprecated(np.save, args=sample_args) - self.assert_deprecated(np.save, args=sample_args, kwargs={'fix_imports': True}) - self.assert_deprecated(np.save, args=sample_args, kwargs={'fix_imports': False}) + self.assert_deprecated(np.save, args=sample_args, + kwargs={'fix_imports': True}) + self.assert_deprecated(np.save, args=sample_args, + kwargs={'fix_imports': False}) for allow_pickle in [True, False]: - self.assert_not_deprecated(np.save, args=sample_args, kwargs={'allow_pickle': allow_pickle}) - self.assert_deprecated(np.save, args=sample_args, kwargs={'allow_pickle': allow_pickle, 'fix_imports': True}) - self.assert_deprecated(np.save, args=sample_args, kwargs={'allow_pickle': allow_pickle, 'fix_imports': False}) - \ No newline at end of file + self.assert_not_deprecated(np.save, args=sample_args, + kwargs={'allow_pickle': allow_pickle}) + self.assert_deprecated(np.save, args=sample_args, + kwargs={'allow_pickle': allow_pickle, + 'fix_imports': True}) + self.assert_deprecated(np.save, args=sample_args, + kwargs={'allow_pickle': allow_pickle, + 'fix_imports': False}) From 9a57fb4e5beb0d4a9b48ff8565da696e8a1d7ea1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jyn=20Spring=20=E7=90=B4=E6=98=A5?= Date: Thu, 16 May 2024 17:03:01 +0200 Subject: [PATCH 337/980] Match the warning --- numpy/_core/tests/test_deprecations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index fb2f07548ec3..33086ac57ab1 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -720,7 +720,7 @@ def test_parenthesized_repeat_count(self, string): class TestDeprecatedSaveFixImports(_DeprecationTestCase): # Deprecated in Numpy 2.0, 2024-05 - message = "Passing fix_imports into numpy.save" + message = "The 'fix_imports' flag is deprecated and has no effect." def test_deprecated(self): sample_args = ('a.npy', np.array(np.zeros((1024, 10)))) From 7b79f51507115b73c78b7df485f59c68f25083dd Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 16 May 2024 09:05:34 -0600 Subject: [PATCH 338/980] MNT: set CC and CXX for intel SDE tests --- .github/workflows/linux_simd.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index a3e162cb135c..aa4fe75f14cf 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -174,7 +174,7 @@ jobs: python -m pip install pytest pytest-xdist hypothesis typing_extensions - name: Build - run: spin build -- -Dallow-noblas=true -Dcpu-baseline=avx512_skx -Dtest-simd='BASELINE,AVX512_KNL,AVX512_KNM,AVX512_SKX,AVX512_CLX,AVX512_CNL,AVX512_ICL,AVX512_SPR' + run: CC=gcc-13 CXX=g++-13 spin build -- -Dallow-noblas=true -Dcpu-baseline=avx512_skx -Dtest-simd='BASELINE,AVX512_KNL,AVX512_KNM,AVX512_SKX,AVX512_CLX,AVX512_CNL,AVX512_ICL,AVX512_SPR' - name: Meson Log if: always() @@ -224,7 +224,7 @@ jobs: python -m pip install pytest pytest-xdist hypothesis typing_extensions - name: Build - run: spin build -- -Dallow-noblas=true -Dcpu-baseline=avx512_spr + run: CC=gcc-13 CXX=g++-13 spin build -- -Dallow-noblas=true -Dcpu-baseline=avx512_spr - name: Meson Log if: always() From e617b2393a3116c53557084de1f9172c99d39b4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jyn=20Spring=20=E7=90=B4=E6=98=A5?= Date: Thu, 16 May 2024 17:12:29 +0200 Subject: [PATCH 339/980] Improve warning message & doc --- numpy/lib/_npyio_impl.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 0b9382c904d7..5e08b3141680 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -524,12 +524,9 @@ def save(file, arr, allow_pickle=True, fix_imports=None): Default: True fix_imports : bool, optional The `fix_imports` flag is deprecated and has no effect. - .. deprecated:: 2.0 - Historically, this flag was used to control compatibility support - for objects saved in Python 3 to be loadable in Python 2. This flag - is ignored after NumPy 1.17, and deprecated in NumPy 2.0. It will - be removed in a future release. - + .. deprecated:: 2.1 + This flag is ignored since NumPy 1.17 and was only needed to + support loading some files in Python 2 written in Python 3. See Also -------- savez : Save several arrays into a ``.npz`` archive @@ -564,8 +561,10 @@ def save(file, arr, allow_pickle=True, fix_imports=None): # [1 2] [1 3] """ if fix_imports is not None: + # Deprecated 2024-05-16, NumPy 2.1 warnings.warn( - "The 'fix_imports' flag is deprecated and has no effect.", + "The 'fix_imports' flag is deprecated and has no effect. " + "(Deprecated in NumPy 2.1)", DeprecationWarning, stacklevel=2) if hasattr(file, 'write'): file_ctx = contextlib.nullcontext(file) From 39644cfc546ca96163eb321e4c577150e1d9c83e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jyn=20Spring=20=E7=90=B4=E6=98=A5?= Date: Thu, 16 May 2024 17:31:44 +0200 Subject: [PATCH 340/980] Use temppath to generate test save target path --- numpy/_core/tests/test_deprecations.py | 29 +++++++++++++------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 33086ac57ab1..a725700bff40 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -14,7 +14,7 @@ import numpy as np from numpy.testing import ( assert_raises, assert_warns, assert_, assert_array_equal, SkipTest, - KnownFailureException, break_cycles, + KnownFailureException, break_cycles, temppath ) from numpy._core._multiarray_tests import fromstring_null_term_c_api @@ -723,18 +723,19 @@ class TestDeprecatedSaveFixImports(_DeprecationTestCase): message = "The 'fix_imports' flag is deprecated and has no effect." def test_deprecated(self): - sample_args = ('a.npy', np.array(np.zeros((1024, 10)))) - self.assert_not_deprecated(np.save, args=sample_args) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'fix_imports': True}) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'fix_imports': False}) - for allow_pickle in [True, False]: - self.assert_not_deprecated(np.save, args=sample_args, - kwargs={'allow_pickle': allow_pickle}) + with temppath(suffix='.npy') as path: + sample_args = (path, np.array(np.zeros((1024, 10)))) + self.assert_not_deprecated(np.save, args=sample_args) self.assert_deprecated(np.save, args=sample_args, - kwargs={'allow_pickle': allow_pickle, - 'fix_imports': True}) + kwargs={'fix_imports': True}) self.assert_deprecated(np.save, args=sample_args, - kwargs={'allow_pickle': allow_pickle, - 'fix_imports': False}) + kwargs={'fix_imports': False}) + for allow_pickle in [True, False]: + self.assert_not_deprecated(np.save, args=sample_args, + kwargs={'allow_pickle': allow_pickle}) + self.assert_deprecated(np.save, args=sample_args, + kwargs={'allow_pickle': allow_pickle, + 'fix_imports': True}) + self.assert_deprecated(np.save, args=sample_args, + kwargs={'allow_pickle': allow_pickle, + 'fix_imports': False}) From d6df5658076042b5f2eb112af09c9de77a0c2941 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jyn=20Spring=20=E7=90=B4=E6=98=A5?= Date: Thu, 16 May 2024 17:42:32 +0200 Subject: [PATCH 341/980] Update depracate target version --- numpy/_core/tests/test_deprecations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index a725700bff40..a10c6138e10e 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -719,7 +719,7 @@ def test_parenthesized_repeat_count(self, string): class TestDeprecatedSaveFixImports(_DeprecationTestCase): - # Deprecated in Numpy 2.0, 2024-05 + # Deprecated in Numpy 2.1, 2024-05 message = "The 'fix_imports' flag is deprecated and has no effect." def test_deprecated(self): From 49260bc49b1810f68624914b5a48df0582106d8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jyn=20Spring=20=E7=90=B4=E6=98=A5?= Date: Thu, 16 May 2024 18:13:02 +0200 Subject: [PATCH 342/980] Use np._NoValue for deprecated keyword default --- numpy/lib/_npyio_impl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 5e08b3141680..dae8f33a0d4b 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -501,7 +501,7 @@ def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None): @array_function_dispatch(_save_dispatcher) -def save(file, arr, allow_pickle=True, fix_imports=None): +def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): """ Save an array to a binary file in NumPy ``.npy`` format. @@ -560,7 +560,7 @@ def save(file, arr, allow_pickle=True, fix_imports=None): >>> print(a, b) # [1 2] [1 3] """ - if fix_imports is not None: + if fix_imports is not np._NoValue: # Deprecated 2024-05-16, NumPy 2.1 warnings.warn( "The 'fix_imports' flag is deprecated and has no effect. " From 731a10a5663178ff5e08b4f90dca8d93dbf4b28c Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 16 May 2024 12:15:59 -0600 Subject: [PATCH 343/980] BUG: fixes for three related stringdtype issues (#26436) * BUG: fix broken fancy indexing for stringdtype * BUG: fix incorrect casting for stringdtype in PyArray_Where * BUG: ensure casting to bool and nonzero treats null strings the same way * MNT: refactor so itemsizes are correct * MNT: refactor so trivial copy check aligns with casting setup --- numpy/_core/src/multiarray/mapping.c | 2 +- numpy/_core/src/multiarray/multiarraymodule.c | 80 ++++++++++++------- .../_core/src/multiarray/stringdtype/casts.c | 10 ++- .../_core/src/multiarray/stringdtype/dtype.c | 17 +++- numpy/_core/tests/test_stringdtype.py | 44 ++++++++-- 5 files changed, 116 insertions(+), 37 deletions(-) diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index f17e4ffa65c1..1861241a040e 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -1580,7 +1580,7 @@ array_subscript(PyArrayObject *self, PyObject *op) if (PyArray_GetDTypeTransferFunction(is_aligned, itemsize, itemsize, - PyArray_DESCR(self), PyArray_DESCR(self), + PyArray_DESCR(self), PyArray_DESCR((PyArrayObject *)result), 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { goto finish; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 37b2f4860b1a..4946465617bc 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -3258,7 +3258,8 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) return NULL; } - NPY_cast_info cast_info = {.func = NULL}; + NPY_cast_info x_cast_info = {.func = NULL}; + NPY_cast_info y_cast_info = {.func = NULL}; ax = (PyArrayObject*)PyArray_FROM_O(x); if (ax == NULL) { @@ -3282,13 +3283,33 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) NPY_ITER_READONLY | NPY_ITER_ALIGNED, NPY_ITER_READONLY | NPY_ITER_ALIGNED }; + common_dt = PyArray_ResultType(2, &op_in[2], 0, NULL); if (common_dt == NULL) { goto fail; } + npy_intp itemsize = common_dt->elsize; + + // If x and y don't have references, we ask the iterator to create buffers + // using the common data type of x and y and then do fast trivial copies + // in the loop below. + // Otherwise trivial copies aren't possible and we handle the cast item by item + // in the loop. + PyArray_Descr *x_dt, *y_dt; + int trivial_copy_loop = !PyDataType_REFCHK(common_dt) && + ((itemsize == 16) || (itemsize == 8) || (itemsize == 4) || + (itemsize == 2) || (itemsize == 1)); + if (trivial_copy_loop) { + x_dt = common_dt; + y_dt = common_dt; + } + else { + x_dt = PyArray_DESCR(op_in[2]); + y_dt = PyArray_DESCR(op_in[3]); + } /* `PyArray_DescrFromType` cannot fail for simple builtin types: */ - PyArray_Descr * op_dt[4] = {common_dt, PyArray_DescrFromType(NPY_BOOL), - common_dt, common_dt}; + PyArray_Descr * op_dt[4] = {common_dt, PyArray_DescrFromType(NPY_BOOL), x_dt, y_dt}; + NpyIter * iter; NPY_BEGIN_THREADS_DEF; @@ -3302,26 +3323,27 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) /* Get the result from the iterator object array */ ret = (PyObject*)NpyIter_GetOperandArray(iter)[0]; - - npy_intp itemsize = common_dt->elsize; - - int has_ref = PyDataType_REFCHK(common_dt); + PyArray_Descr *ret_dt = PyArray_DESCR((PyArrayObject *)ret); NPY_ARRAYMETHOD_FLAGS transfer_flags = 0; - npy_intp transfer_strides[2] = {itemsize, itemsize}; + npy_intp x_strides[2] = {x_dt->elsize, itemsize}; + npy_intp y_strides[2] = {y_dt->elsize, itemsize}; npy_intp one = 1; - if (has_ref || ((itemsize != 16) && (itemsize != 8) && (itemsize != 4) && - (itemsize != 2) && (itemsize != 1))) { + if (!trivial_copy_loop) { // The iterator has NPY_ITER_ALIGNED flag so no need to check alignment // of the input arrays. - // - // There's also no need to set up a cast for y, since the iterator - // ensures both casts are identical. if (PyArray_GetDTypeTransferFunction( - 1, itemsize, itemsize, common_dt, common_dt, 0, - &cast_info, &transfer_flags) != NPY_SUCCEED) { + 1, x_strides[0], x_strides[1], + PyArray_DESCR(op_in[2]), ret_dt, 0, + &x_cast_info, &transfer_flags) != NPY_SUCCEED) { + goto fail; + } + if (PyArray_GetDTypeTransferFunction( + 1, y_strides[0], y_strides[1], + PyArray_DESCR(op_in[3]), ret_dt, 0, + &y_cast_info, &transfer_flags) != NPY_SUCCEED) { goto fail; } } @@ -3353,19 +3375,19 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) npy_intp ystride = strides[3]; /* constant sizes so compiler replaces memcpy */ - if (!has_ref && itemsize == 16) { + if (trivial_copy_loop && itemsize == 16) { INNER_WHERE_LOOP(16); } - else if (!has_ref && itemsize == 8) { + else if (trivial_copy_loop && itemsize == 8) { INNER_WHERE_LOOP(8); } - else if (!has_ref && itemsize == 4) { + else if (trivial_copy_loop && itemsize == 4) { INNER_WHERE_LOOP(4); } - else if (!has_ref && itemsize == 2) { + else if (trivial_copy_loop && itemsize == 2) { INNER_WHERE_LOOP(2); } - else if (!has_ref && itemsize == 1) { + else if (trivial_copy_loop && itemsize == 1) { INNER_WHERE_LOOP(1); } else { @@ -3374,18 +3396,18 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) if (*csrc) { char *args[2] = {xsrc, dst}; - if (cast_info.func( - &cast_info.context, args, &one, - transfer_strides, cast_info.auxdata) < 0) { + if (x_cast_info.func( + &x_cast_info.context, args, &one, + x_strides, x_cast_info.auxdata) < 0) { goto fail; } } else { char *args[2] = {ysrc, dst}; - if (cast_info.func( - &cast_info.context, args, &one, - transfer_strides, cast_info.auxdata) < 0) { + if (y_cast_info.func( + &y_cast_info.context, args, &one, + y_strides, y_cast_info.auxdata) < 0) { goto fail; } } @@ -3405,7 +3427,8 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) Py_DECREF(ax); Py_DECREF(ay); Py_DECREF(common_dt); - NPY_cast_info_xfree(&cast_info); + NPY_cast_info_xfree(&x_cast_info); + NPY_cast_info_xfree(&y_cast_info); if (NpyIter_Deallocate(iter) != NPY_SUCCEED) { Py_DECREF(ret); @@ -3419,7 +3442,8 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) Py_XDECREF(ax); Py_XDECREF(ay); Py_XDECREF(common_dt); - NPY_cast_info_xfree(&cast_info); + NPY_cast_info_xfree(&x_cast_info); + NPY_cast_info_xfree(&y_cast_info); return NULL; } diff --git a/numpy/_core/src/multiarray/stringdtype/casts.c b/numpy/_core/src/multiarray/stringdtype/casts.c index b896d09ace65..42c588199890 100644 --- a/numpy/_core/src/multiarray/stringdtype/casts.c +++ b/numpy/_core/src/multiarray/stringdtype/casts.c @@ -395,6 +395,7 @@ string_to_bool(PyArrayMethod_Context *context, char *const data[], npy_string_allocator *allocator = NpyString_acquire_allocator(descr); int has_null = descr->na_object != NULL; int has_string_na = descr->has_string_na; + int has_nan_na = descr->has_nan_na; const npy_static_string *default_string = &descr->default_string; npy_intp N = dimensions[0]; @@ -415,8 +416,13 @@ string_to_bool(PyArrayMethod_Context *context, char *const data[], } else if (is_null) { if (has_null && !has_string_na) { - // numpy treats NaN as truthy, following python - *out = NPY_TRUE; + if (has_nan_na) { + // numpy treats NaN as truthy, following python + *out = NPY_TRUE; + } + else { + *out = NPY_FALSE; + } } else { *out = (npy_bool)(default_string->size == 0); diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 6e12084a9707..bcaeaa5be5f8 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -415,8 +415,23 @@ stringdtype_getitem(PyArray_StringDTypeObject *descr, char **dataptr) // PyArray_NonzeroFunc // Unicode strings are nonzero if their length is nonzero. npy_bool -nonzero(void *data, void *NPY_UNUSED(arr)) +nonzero(void *data, void *arr) { + PyArray_StringDTypeObject *descr = (PyArray_StringDTypeObject *)PyArray_DESCR(arr); + int has_null = descr->na_object != NULL; + int has_nan_na = descr->has_nan_na; + int has_string_na = descr->has_string_na; + if (has_null && NpyString_isnull((npy_packed_static_string *)data)) { + if (!has_string_na) { + if (has_nan_na) { + // numpy treats NaN as truthy, following python + return 1; + } + else { + return 0; + } + } + } return NpyString_size((npy_packed_static_string *)data) != 0; } diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index b5d91d402a10..9c0324c91f71 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -40,8 +40,7 @@ def na_object(request): return request.param -@pytest.fixture() -def dtype(na_object, coerce): +def get_dtype(na_object, coerce=True): # explicit is check for pd_NA because != with pd_NA returns pd_NA if na_object is pd_NA or na_object != "unset": return StringDType(na_object=na_object, coerce=coerce) @@ -49,6 +48,11 @@ def dtype(na_object, coerce): return StringDType(coerce=coerce) +@pytest.fixture() +def dtype(na_object, coerce): + return get_dtype(na_object, coerce) + + # second copy for cast tests to do a cartesian product over dtypes @pytest.fixture(params=[True, False]) def coerce2(request): @@ -456,11 +460,41 @@ def test_sort(strings, arr_sorted): ["", "a", "😸", "ááðfáíóåéë"], ], ) -def test_nonzero(strings): - arr = np.array(strings, dtype="T") - is_nonzero = np.array([i for i, item in enumerate(arr) if len(item) != 0]) +def test_nonzero(strings, na_object): + dtype = get_dtype(na_object) + arr = np.array(strings, dtype=dtype) + is_nonzero = np.array( + [i for i, item in enumerate(strings) if len(item) != 0]) assert_array_equal(arr.nonzero()[0], is_nonzero) + if na_object is not pd_NA and na_object == 'unset': + return + + strings_with_na = np.array(strings + [na_object], dtype=dtype) + is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0] + + if is_nan: + assert strings_with_na.nonzero()[0][-1] == 4 + else: + assert strings_with_na.nonzero()[0][-1] == 3 + + # check that the casting to bool and nonzero give consistent results + assert_array_equal(strings_with_na[strings_with_na.nonzero()], + strings_with_na[strings_with_na.astype(bool)]) + + +def test_where(string_list, na_object): + dtype = get_dtype(na_object) + a = np.array(string_list, dtype=dtype) + b = a[::-1] + res = np.where([True, False, True, False, True, False], a, b) + assert_array_equal(res, [a[0], b[1], a[2], b[3], a[4], b[5]]) + + +def test_fancy_indexing(string_list): + sarr = np.array(string_list, dtype="T") + assert_array_equal(sarr, sarr[np.arange(sarr.shape[0])]) + def test_creation_functions(): assert_array_equal(np.zeros(3, dtype="T"), ["", "", ""]) From edbcdfbc7d1b0e3759e3732dbf06174d1d0eab6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jyn=20Spring=20=E7=90=B4=E6=98=A5?= Date: Thu, 16 May 2024 20:23:10 +0200 Subject: [PATCH 344/980] Try fix doc generation --- numpy/lib/_npyio_impl.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index dae8f33a0d4b..0ae4ee2e9386 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -524,9 +524,11 @@ def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): Default: True fix_imports : bool, optional The `fix_imports` flag is deprecated and has no effect. + .. deprecated:: 2.1 This flag is ignored since NumPy 1.17 and was only needed to support loading some files in Python 2 written in Python 3. + See Also -------- savez : Save several arrays into a ``.npz`` archive From af74b207251406dabd0a1c03ccecd865bd75b6ec Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 16 May 2024 23:04:33 +0200 Subject: [PATCH 345/980] BLD: update vendored-meson to current Meson master (1.4.99) Closes gh-26223 Addresses https://github.com/numpy/meson/pull/13 --- vendored-meson/meson | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendored-meson/meson b/vendored-meson/meson index 4e370ca8ab73..31161eef3fc8 160000 --- a/vendored-meson/meson +++ b/vendored-meson/meson @@ -1 +1 @@ -Subproject commit 4e370ca8ab73c07f7b84abe8a4b937caace050a4 +Subproject commit 31161eef3fc8cf0bf834edc1dd29e490fc6d7713 From 5de51e70c20b107cd3c17c0a0c64256a4be1f8ef Mon Sep 17 00:00:00 2001 From: Justus Magin Date: Fri, 17 May 2024 14:54:04 +0200 Subject: [PATCH 346/980] ENH: improve the error raised by ``numpy.isdtype`` (#26456) * improve the error message raised by ``numpy.isdtype`` if the kind is a string --- numpy/_core/numerictypes.py | 36 +++++++++++++++++++------- numpy/_core/tests/test_numerictypes.py | 2 ++ 2 files changed, 28 insertions(+), 10 deletions(-) diff --git a/numpy/_core/numerictypes.py b/numpy/_core/numerictypes.py index 602ba9a051dd..ac52cff49db2 100644 --- a/numpy/_core/numerictypes.py +++ b/numpy/_core/numerictypes.py @@ -360,7 +360,11 @@ def issubsctype(arg1, arg2): return issubclass(obj2sctype(arg1), obj2sctype(arg2)) -def _preprocess_dtype(dtype, err_msg): +class _PreprocessDTypeError(Exception): + pass + + +def _preprocess_dtype(dtype): """ Preprocess dtype argument by: 1. fetching type from a data type @@ -369,7 +373,7 @@ def _preprocess_dtype(dtype, err_msg): if isinstance(dtype, ma.dtype): dtype = dtype.type if isinstance(dtype, ndarray) or dtype not in allTypes.values(): - raise TypeError(f"{err_msg}, but it is a {type(dtype)}.") + raise _PreprocessDTypeError() return dtype @@ -414,9 +418,13 @@ def isdtype(dtype, kind): True """ - dtype = _preprocess_dtype( - dtype, err_msg="dtype argument must be a NumPy dtype" - ) + try: + dtype = _preprocess_dtype(dtype) + except _PreprocessDTypeError: + raise TypeError( + "dtype argument must be a NumPy dtype, " + f"but it is a {type(dtype)}." + ) from None input_kinds = kind if isinstance(kind, tuple) else (kind,) @@ -440,12 +448,20 @@ def isdtype(dtype, kind): sctypes["int"] + sctypes["uint"] + sctypes["float"] + sctypes["complex"] ) - else: - kind = _preprocess_dtype( - kind, - err_msg="kind argument must be comprised of " - "NumPy dtypes or strings only" + elif isinstance(kind, str): + raise ValueError( + "kind argument is a string, but" + f" {repr(kind)} is not a known kind name." ) + else: + try: + kind = _preprocess_dtype(kind) + except _PreprocessDTypeError: + raise TypeError( + "kind argument must be comprised of " + "NumPy dtypes or strings only, " + f"but is a {type(kind)}." + ) from None processed_kinds.add(kind) return dtype in processed_kinds diff --git a/numpy/_core/tests/test_numerictypes.py b/numpy/_core/tests/test_numerictypes.py index ea7e4cdb9c0c..f09622e422a1 100644 --- a/numpy/_core/tests/test_numerictypes.py +++ b/numpy/_core/tests/test_numerictypes.py @@ -471,6 +471,8 @@ def test_isdtype_invalid_args(self): with assert_raises_regex(TypeError, r".*must be a NumPy dtype.*"): np.isdtype("int64", np.int64) with assert_raises_regex(TypeError, r".*kind argument must.*"): + np.isdtype(np.int64, 1) + with assert_raises_regex(ValueError, r".*not a known kind name.*"): np.isdtype(np.int64, "int64") def test_sctypes_complete(self): From 74805a3a8a073783460babad084a20d53a1bcd1f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 May 2024 17:06:50 +0000 Subject: [PATCH 347/980] MAINT: Bump github/codeql-action from 2.13.4 to 3.25.5 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2.13.4 to 3.25.5. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/cdcdbb579706841c47f7063dda365e292e5cad7a...b7cec7526559c32f1616476ff32d17ba4c59b2d6) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 81eef46e30a4..41a56eb9aaa5 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -45,7 +45,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@cdcdbb579706841c47f7063dda365e292e5cad7a # v2.13.4 + uses: github/codeql-action/init@b7cec7526559c32f1616476ff32d17ba4c59b2d6 # v3.25.5 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -55,7 +55,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@cdcdbb579706841c47f7063dda365e292e5cad7a # v2.13.4 + uses: github/codeql-action/autobuild@b7cec7526559c32f1616476ff32d17ba4c59b2d6 # v3.25.5 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -68,6 +68,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@cdcdbb579706841c47f7063dda365e292e5cad7a # v2.13.4 + uses: github/codeql-action/analyze@b7cec7526559c32f1616476ff32d17ba4c59b2d6 # v3.25.5 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index b5f851b64540..225cc0fdb790 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@cdcdbb579706841c47f7063dda365e292e5cad7a # v2.1.27 + uses: github/codeql-action/upload-sarif@b7cec7526559c32f1616476ff32d17ba4c59b2d6 # v2.1.27 with: sarif_file: results.sarif From 1148cd63ed7daa4c2d00b22148d7ddeefe72e4ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jyn=20Spring=20=E7=90=B4=E6=98=A5?= Date: Fri, 17 May 2024 19:21:43 +0200 Subject: [PATCH 348/980] Add release note --- doc/release/upcoming_changes/26452.deprecation.rst | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 doc/release/upcoming_changes/26452.deprecation.rst diff --git a/doc/release/upcoming_changes/26452.deprecation.rst b/doc/release/upcoming_changes/26452.deprecation.rst new file mode 100644 index 000000000000..146b50af048c --- /dev/null +++ b/doc/release/upcoming_changes/26452.deprecation.rst @@ -0,0 +1,4 @@ + * The `fix_imports` keyword argument in `numpy.save` is deprecated. Since + NumPy 1.17, `numpy.save` uses a pickle protocol that no longer supports + Python 2, and ignored `fix_imports` keyword. This keyword is kept only + for backward compatibility. It is now deprecated. \ No newline at end of file From 0743123052896aacc874760ca977bccf3cc91704 Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Sat, 18 May 2024 14:48:20 +1000 Subject: [PATCH 349/980] BLD: cp313 [wheel build] --- .github/workflows/wheels.yml | 2 +- tools/wheels/cibw_before_build.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index a821448cdb0c..f585a1adc973 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -85,7 +85,7 @@ jobs: - [macos-14, macosx_arm64, accelerate] # always use accelerate - [windows-2019, win_amd64, ""] - [windows-2019, win32, ""] - python: ["cp310", "cp311", "cp312", "pp310"] + python: ["cp310", "cp311", "cp312", "pp310", "cp313"] exclude: # Don't build PyPy 32-bit windows - buildplat: [windows-2019, win32, ""] diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index 24a295005727..2c0e90efc7be 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -50,5 +50,5 @@ EOF fi if [[ $RUNNER_OS == "Windows" ]]; then # delvewheel is the equivalent of delocate/auditwheel for windows. - python -m pip install delvewheel + python -m pip install delvewheel wheel fi From 5d404db53b60359784415058194d1ec106a7729b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20R=C3=B6hling?= Date: Sat, 18 May 2024 19:56:42 +0200 Subject: [PATCH 350/980] Skip API documentation for numpy.distutils with Python 3.12 and later --- doc/source/conf.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/source/conf.py b/doc/source/conf.py index f57646c3a19e..8a4713e1c721 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -143,6 +143,10 @@ class PyTypeObject(ctypes.Structure): # for source files. exclude_dirs = [] +exclude_patterns = [] +if sys.version_info[:2] >= (3, 12): + exclude_patterns += ["reference/distutils.rst"] + # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False From 10c871108fc9c891934f4af373b3198c8b6bb882 Mon Sep 17 00:00:00 2001 From: bmwoodruff Date: Sat, 18 May 2024 22:43:03 -0600 Subject: [PATCH 351/980] DOC: Set default as `-j 1` for spin docs and move `-W` to SPHINXOPTS This commit tries to solve two problems. - The pydata_sphinx_theme extension warning can be avoided by setting the default `--jobs` to `1`. This matches `spin test`. - The -W option is currently hardcoded in ALLSPHINXOPTS and impossible to override. This commit moves `-W` to SPHINXOPTS which allows a local machine to remove -W if needed, as described in the documentation. This adds to the discussion in PR #26125. [skip actions] [skip azp] [skip cirrus] --- .circleci/config.yml | 2 +- .spin/cmds.py | 6 ++++-- doc/Makefile | 4 ++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 9a6d1784c487..530631281c80 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -74,7 +74,7 @@ jobs: . venv/bin/activate cd doc # Don't use -q, show warning summary" - SPHINXOPTS="-n" make -e html + SPHINXOPTS="-W -n" make -e html if [[ $(find build/html -type f | wc -l) -lt 1000 ]]; then echo "doc build failed: build/html is empty" exit -1 diff --git a/.spin/cmds.py b/.spin/cmds.py index d221ad18a1f4..b78c0393e708 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -132,8 +132,10 @@ def build(ctx, meson_args, with_scipy_openblas, jobs=None, clean=False, verbose= @click.option( '--jobs', '-j', metavar='N_JOBS', - default="auto", - help="Number of parallel build jobs" + # Avoids pydata_sphinx_theme extension warning from default="auto". + default="1", + help=("Number of parallel build jobs." + "Can be set to `auto` to use all cores.") ) @click.pass_context def docs(ctx, sphinx_target, clean, first_build, jobs, *args, **kwargs): diff --git a/doc/Makefile b/doc/Makefile index 2f04c7084ce9..eccd40b1adef 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -11,7 +11,7 @@ PYVER:=$(shell python3 -c 'from sys import version_info as v; print("{0}.{1}".fo PYTHON = python$(PYVER) # You can set these variables from the command line. -SPHINXOPTS ?= +SPHINXOPTS ?= -W SPHINXBUILD ?= LANG=C sphinx-build PAPER ?= DOXYGEN ?= doxygen @@ -25,7 +25,7 @@ FILES= # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -WT --keep-going -d build/doctrees $(PAPEROPT_$(PAPER)) \ +ALLSPHINXOPTS = -T --keep-going -d build/doctrees $(PAPEROPT_$(PAPER)) \ $(SPHINXOPTS) source .PHONY: help clean html web htmlhelp latex changes linkcheck \ From 6b67436abca782b134bf1f9e20ccc11479d057db Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 19 May 2024 12:21:48 +0200 Subject: [PATCH 352/980] TYP: fix type annotation for `newbyteorder` Closes gh-26473 [skip circle] [skip cirrus] [skip azp] --- numpy/__init__.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 98b953444de5..d5a0dd796424 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -636,7 +636,7 @@ def show_config() -> None: ... _NdArraySubClass = TypeVar("_NdArraySubClass", bound=NDArray[Any]) _DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic) -_ByteOrder = L["S", "<", ">", "=", "|", "L", "B", "N", "I"] +_ByteOrder = L["S", "<", ">", "=", "|", "L", "B", "N", "I", "little", "big", "native"] @final class dtype(Generic[_DTypeScalar_co]): From 70f05c8e0ced1f8862cf3547eba74166316762b5 Mon Sep 17 00:00:00 2001 From: KarthikKaiplody Date: Sun, 19 May 2024 09:26:17 -0400 Subject: [PATCH 353/980] Imporve documentation of numpy.ma.filled --- numpy/ma/core.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 89b4f07031d5..6b5066bc0ef6 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -608,7 +608,9 @@ def common_fill_value(a, b): def filled(a, fill_value=None): """ - Return input as an array with masked data replaced by a fill value. + Return a copy of `a`, with masked values replaced by a fill value. + However, if there are no masked values to fill, `a` will be returned + instead as an ndarray. If `a` is not a `MaskedArray`, `a` itself is returned. If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to From 1223a1206e267980fba504cce1121f333f655f90 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 May 2024 17:42:51 +0000 Subject: [PATCH 354/980] MAINT: Bump github/codeql-action from 3.25.5 to 3.25.6 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.25.5 to 3.25.6. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/b7cec7526559c32f1616476ff32d17ba4c59b2d6...9fdb3e49720b44c48891d036bb502feb25684276) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 41a56eb9aaa5..8157fb818a16 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -45,7 +45,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@b7cec7526559c32f1616476ff32d17ba4c59b2d6 # v3.25.5 + uses: github/codeql-action/init@9fdb3e49720b44c48891d036bb502feb25684276 # v3.25.6 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -55,7 +55,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@b7cec7526559c32f1616476ff32d17ba4c59b2d6 # v3.25.5 + uses: github/codeql-action/autobuild@9fdb3e49720b44c48891d036bb502feb25684276 # v3.25.6 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -68,6 +68,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@b7cec7526559c32f1616476ff32d17ba4c59b2d6 # v3.25.5 + uses: github/codeql-action/analyze@9fdb3e49720b44c48891d036bb502feb25684276 # v3.25.6 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 225cc0fdb790..ab42bf1f48bd 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@b7cec7526559c32f1616476ff32d17ba4c59b2d6 # v2.1.27 + uses: github/codeql-action/upload-sarif@9fdb3e49720b44c48891d036bb502feb25684276 # v2.1.27 with: sarif_file: results.sarif From 99cb3580f610bcb14a66ce29ec479b7da5f881f0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 May 2024 17:42:55 +0000 Subject: [PATCH 355/980] MAINT: Bump pypa/cibuildwheel from 2.18.0 to 2.18.1 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 2.18.0 to 2.18.1. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/711a3d017d0729f3edde18545fee967f03d65f65...ba8be0d98853f5744f24e7f902c8adef7ae2e7f3) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index f585a1adc973..175c9ec134d9 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -149,7 +149,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@711a3d017d0729f3edde18545fee967f03d65f65 # v2.18.0 + uses: pypa/cibuildwheel@ba8be0d98853f5744f24e7f902c8adef7ae2e7f3 # v2.18.1 env: CIBW_PRERELEASE_PYTHONS: True CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From c11d7ef2af287c6a6043d079dcdddeaa57d85ae1 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Mon, 20 May 2024 16:20:06 -0300 Subject: [PATCH 356/980] DOC: add examples to get_printoptions --- numpy/_core/arrayprint.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index ec11beae3f58..d12746c7ce52 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -329,6 +329,18 @@ def get_printoptions(): -------- set_printoptions, printoptions + Examples + -------- + + >>> np.get_printoptions() + {'edgeitems': 3, 'threshold': 1000, ..., 'legacy': False} + + >>> np.get_printoptions()['linewidth'] + 75 + >>> np.set_printoptions(linewidth=100) + >>> np.get_printoptions()['linewidth'] + 100 + """ opts = _format_options.copy() opts['legacy'] = { From 2c193fa517eab36e04f353eea278a81d1295a4a3 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Mon, 20 May 2024 16:40:14 -0300 Subject: [PATCH 357/980] DOC: add example to get_include --- numpy/lib/_utils_impl.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/numpy/lib/_utils_impl.py b/numpy/lib/_utils_impl.py index 8a2c4b5c61e7..32aa7a3c17df 100644 --- a/numpy/lib/_utils_impl.py +++ b/numpy/lib/_utils_impl.py @@ -99,6 +99,11 @@ def get_include(): $ pkg-config --cflags -I/path/to/site-packages/numpy/_core/include + Examples + -------- + >>> np.get_include() + '/home/guido/.local/lib/python3.12/site-packages/numpy/core/include' + """ import numpy if numpy.show_config is None: From e5905e635061aa96af50098aa51279ffa5a99627 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Mon, 20 May 2024 17:48:55 -0300 Subject: [PATCH 358/980] DOC: add # may vary to get_include example --- numpy/lib/_utils_impl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_utils_impl.py b/numpy/lib/_utils_impl.py index 32aa7a3c17df..0c5d08ee7d9c 100644 --- a/numpy/lib/_utils_impl.py +++ b/numpy/lib/_utils_impl.py @@ -102,7 +102,7 @@ def get_include(): Examples -------- >>> np.get_include() - '/home/guido/.local/lib/python3.12/site-packages/numpy/core/include' + '.../site-packages/numpy/core/include' # may vary """ import numpy From 27b52f63d3fe18d2371220f39807e3ecd3748262 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Tue, 21 May 2024 02:13:05 -0300 Subject: [PATCH 359/980] DOC: fix rng.random example in numpy-for-matlab-users (#26492) --- doc/source/user/numpy-for-matlab-users.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst index 7c7fd0898490..d9b5c460944c 100644 --- a/doc/source/user/numpy-for-matlab-users.rst +++ b/doc/source/user/numpy-for-matlab-users.rst @@ -392,7 +392,7 @@ Linear algebra equivalents from numpy.random import default_rng rng = default_rng(42) - rng.random(3, 4) + rng.random((3, 4)) or older version: ``random.rand((3, 4))`` From 1801aa263821b32022e657859e9519be7874f874 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 16 May 2024 14:48:22 -0600 Subject: [PATCH 360/980] TST: add basic free-threaded CI testing [skip azp] [skip cirrus] [skip circle] --- .github/workflows/linux.yml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 3c59b639baa1..bb9b13e0bcae 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -287,3 +287,25 @@ jobs: rm -rf build-install ./vendored-meson/meson/meson.py install -C build --destdir ../build-install --tags=runtime,python-runtime,devel python tools/check_installed_files.py $(find ./build-install -path '*/site-packages/numpy') --no-tests + + free-threaded: + needs: [smoke_test] + runs-on: ubuntu-latest + if: github.event_name != 'push' + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + submodules: recursive + fetch-tags: true + # TODO: replace with setup-python when there is support + - uses: deadsnakes/action@6c8b9b82fe0b4344f4b98f2775fcc395df45e494 # v3.1.0 + with: + python-version: '3.13-dev' + nogil: true + # TODO: remove cython nightly install when cython does a release + - name: Install nightly Cython + run: | + pip install git+https://github.com/cython/cython + - uses: ./.github/meson_actions + env: + PYTHON_GIL: 0 From aad6c9cd1958309110f9892d10b28b1beea9c670 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20R=C3=B6hling?= Date: Tue, 21 May 2024 21:12:45 +0200 Subject: [PATCH 361/980] BLD: Make NumPy build reproducibly (#26474) * Write actual lapack dependency to __config__.py * Build .s assembly files as source files --- numpy/_core/meson.build | 3 +-- numpy/meson.build | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 5a343340d315..dbe76e0a3dea 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1206,8 +1206,7 @@ py.extension_module('_multiarray_umath', src_numpy_api[1], # __multiarray_api.h src_umath_doc_h, npy_math_internal_h, - ], - objects: svml_objects, + ] + svml_objects, c_args: c_args_common, cpp_args: cpp_args_common, include_directories: [ diff --git a/numpy/meson.build b/numpy/meson.build index 1d35e7dc4fec..1190c00e0042 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -366,7 +366,7 @@ conf_data.set('PYTHON_VERSION', py.language_version()) # `np.show_config()`; needs some special handling for the case BLAS was found # but CBLAS not (and hence BLAS was also disabled) dependency_map = { - 'LAPACK': lapack_dep, + 'LAPACK': lapack, } if have_blas dependency_map += {'BLAS': blas} From fb60521895b51e5fcf86bbf374cea233549c8caf Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 22 May 2024 14:24:38 +0200 Subject: [PATCH 362/980] ENH: Support dlpack version 1 ABI --- numpy/_core/src/common/dlpack/dlpack.h | 27 +- numpy/_core/src/common/npy_dlpack.h | 3 + numpy/_core/src/multiarray/dlpack.c | 432 +++++++++++++----- numpy/_core/src/multiarray/multiarraymodule.c | 5 + numpy/_core/src/multiarray/multiarraymodule.h | 1 + numpy/_core/tests/test_dlpack.py | 5 +- 6 files changed, 362 insertions(+), 111 deletions(-) diff --git a/numpy/_core/src/common/dlpack/dlpack.h b/numpy/_core/src/common/dlpack/dlpack.h index f0cbf61368c7..1b447b0389fe 100644 --- a/numpy/_core/src/common/dlpack/dlpack.h +++ b/numpy/_core/src/common/dlpack/dlpack.h @@ -1,5 +1,6 @@ // Taken from: -// https://github.com/dmlc/dlpack/blob/ca4d00ad3e2e0f410eeab3264d21b8a39397f362/include/dlpack/dlpack.h +// https://github.com/dmlc/dlpack/blob/bbd2f4d32427e548797929af08cfe2a9cbb3cf12/include/dlpack/dlpack.h +// but added typedef to DLManagedTensorVersioned /*! * Copyright (c) 2017 by Contributors * \file dlpack.h @@ -118,6 +119,8 @@ typedef enum { kDLWebGPU = 15, /*! \brief Qualcomm Hexagon DSP */ kDLHexagon = 16, + /*! \brief Microsoft MAIA devices */ + kDLMAIA = 17, } DLDeviceType; /*! @@ -197,7 +200,7 @@ typedef struct { * `byte_offset` field should be used to point to the beginning of the data. * * Note that as of Nov 2021, multiply libraries (CuPy, PyTorch, TensorFlow, - * TVM, perhaps others) do not adhere to this 256 byte alignment requirement + * TVM, perhaps others) do not adhere to this 256 byte aligment requirement * on CPU/CUDA/ROCm, and always use `byte_offset=0`. This must be fixed * (after which this note will be updated); at the moment it is recommended * to not rely on the data pointer being correctly aligned. @@ -215,6 +218,9 @@ typedef struct { * return size; * } * \endcode + * + * Note that if the tensor is of size zero, then the data pointer should be + * set to `NULL`. */ void* data; /*! \brief The device of the tensor */ @@ -259,7 +265,7 @@ typedef struct DLManagedTensor { * \brief Destructor - this should be called * to destruct the manager_ctx which backs the DLManagedTensor. It can be * NULL if there is no way for the caller to provide a reasonable destructor. - * The destructors deletes the argument self as well. + * The destructor deletes the argument self as well. */ void (*deleter)(struct DLManagedTensor * self); } DLManagedTensor; @@ -269,6 +275,14 @@ typedef struct DLManagedTensor { /*! \brief bit mask to indicate that the tensor is read only. */ #define DLPACK_FLAG_BITMASK_READ_ONLY (1UL << 0UL) +/*! + * \brief bit mask to indicate that the tensor is a copy made by the producer. + * + * If set, the tensor is considered solely owned throughout its lifetime by the + * consumer, until the producer-provided deleter is invoked. + */ +#define DLPACK_FLAG_BITMASK_IS_COPIED (1UL << 1UL) + /*! * \brief A versioned and managed C Tensor object, manage memory of DLTensor. * @@ -279,7 +293,7 @@ typedef struct DLManagedTensor { * * \note This is the current standard DLPack exchange data structure. */ -struct DLManagedTensorVersioned { +typedef struct DLManagedTensorVersioned { /*! * \brief The API and ABI version of the current managed Tensor */ @@ -296,7 +310,7 @@ struct DLManagedTensorVersioned { * * This should be called to destruct manager_ctx which holds the DLManagedTensorVersioned. * It can be NULL if there is no way for the caller to provide a reasonable - * destructor. The destructors deletes the argument self as well. + * destructor. The destructor deletes the argument self as well. */ void (*deleter)(struct DLManagedTensorVersioned *self); /*! @@ -308,11 +322,12 @@ struct DLManagedTensorVersioned { * stable, to ensure that deleter can be correctly called. * * \sa DLPACK_FLAG_BITMASK_READ_ONLY + * \sa DLPACK_FLAG_BITMASK_IS_COPIED */ uint64_t flags; /*! \brief DLTensor which is being memory managed */ DLTensor dl_tensor; -}; +} DLManagedTensorVersioned; #ifdef __cplusplus } // DLPACK_EXTERN_C diff --git a/numpy/_core/src/common/npy_dlpack.h b/numpy/_core/src/common/npy_dlpack.h index cb926a26271d..9bd4c3201f20 100644 --- a/numpy/_core/src/common/npy_dlpack.h +++ b/numpy/_core/src/common/npy_dlpack.h @@ -6,12 +6,15 @@ // Part of the Array API specification. #define NPY_DLPACK_CAPSULE_NAME "dltensor" +#define NPY_DLPACK_VERSIONED_CAPSULE_NAME "dltensor_versioned" #define NPY_DLPACK_USED_CAPSULE_NAME "used_dltensor" +#define NPY_DLPACK_VERSIONED_USED_CAPSULE_NAME "used_dltensor_versioned" // Used internally by NumPy to store a base object // as it has to release a reference to the original // capsule. #define NPY_DLPACK_INTERNAL_CAPSULE_NAME "numpy_dltensor" +#define NPY_DLPACK_VERSIONED_INTERNAL_CAPSULE_NAME "numpy_dltensor_versioned" PyObject * array_dlpack(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index 5029e87e87b2..decd73d53a47 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -8,9 +8,14 @@ #include "numpy/arrayobject.h" #include "npy_argparse.h" #include "npy_dlpack.h" +#include "multiarraymodule.h" + +/* + * Deleter for a NumPy exported dlpack DLManagedTensor(Versioned). + */ static void -array_dlpack_deleter(DLManagedTensor *self) +array_dlpack_deleter(DLManagedTensorVersioned *self) { /* * Leak the pyobj if not initialized. This can happen if we are running @@ -32,16 +37,40 @@ array_dlpack_deleter(DLManagedTensor *self) PyGILState_Release(state); } -/* This is exactly as mandated by dlpack */ -static void dlpack_capsule_deleter(PyObject *self) { - if (PyCapsule_IsValid(self, NPY_DLPACK_USED_CAPSULE_NAME)) { +/* TODO: Basically same as above until dlpack v0 is removed: */ +static void +array_dlpack_deleter_unversioned(DLManagedTensor *self) +{ + if (!Py_IsInitialized()) { return; } - DLManagedTensor *managed = - (DLManagedTensor *)PyCapsule_GetPointer(self, NPY_DLPACK_CAPSULE_NAME); + PyGILState_STATE state = PyGILState_Ensure(); + + PyArrayObject *array = (PyArrayObject *)self->manager_ctx; + PyMem_Free(self); + Py_XDECREF(array); + + PyGILState_Release(state); +} + + +/* + * Deleter for a DLPack capsule wrapping a DLManagedTensor(Versioed). + * + * This is exactly as mandated by dlpack + */ +static void +dlpack_capsule_deleter(PyObject *self) { + if (PyCapsule_IsValid(self, NPY_DLPACK_VERSIONED_USED_CAPSULE_NAME)) { + return; + } + + DLManagedTensorVersioned *managed = + (DLManagedTensorVersioned *)PyCapsule_GetPointer( + self, NPY_DLPACK_VERSIONED_CAPSULE_NAME); if (managed == NULL) { - PyErr_WriteUnraisable(self); + PyErr_WriteUnraisable(NULL); return; } /* @@ -53,18 +82,41 @@ static void dlpack_capsule_deleter(PyObject *self) { } } -/* used internally, almost identical to dlpack_capsule_deleter() */ -static void array_dlpack_internal_capsule_deleter(PyObject *self) -{ - /* an exception may be in-flight, we must save it in case we create another one */ - PyObject *type, *value, *traceback; - PyErr_Fetch(&type, &value, &traceback); +/* TODO: Basically same as above until dlpack v0 is removed: */ +static void +dlpack_capsule_deleter_unversioned(PyObject *self) { + if (PyCapsule_IsValid(self, NPY_DLPACK_USED_CAPSULE_NAME)) { + return; + } DLManagedTensor *managed = - (DLManagedTensor *)PyCapsule_GetPointer(self, NPY_DLPACK_INTERNAL_CAPSULE_NAME); + (DLManagedTensor *)PyCapsule_GetPointer(self, NPY_DLPACK_CAPSULE_NAME); if (managed == NULL) { - PyErr_WriteUnraisable(self); - goto done; + PyErr_WriteUnraisable(NULL); + return; + } + + if (managed->deleter) { + managed->deleter(managed); + } +} + + +/* + * Deleter for the capsule used as a `base` in `from_dlpack`. + * + * This is almost identical to the above used internally as the base for our array + * so that we can consume (rename) the original capsule. + */ +static void +array_dlpack_internal_capsule_deleter(PyObject *self) +{ + DLManagedTensorVersioned *managed = + (DLManagedTensorVersioned *)PyCapsule_GetPointer( + self, NPY_DLPACK_VERSIONED_CAPSULE_NAME); + if (managed == NULL) { + PyErr_WriteUnraisable(NULL); + return; } /* * the spec says the deleter can be NULL if there is no way for the caller @@ -75,9 +127,23 @@ static void array_dlpack_internal_capsule_deleter(PyObject *self) /* TODO: is the deleter allowed to set a python exception? */ assert(!PyErr_Occurred()); } +} + +/* TODO: Basically same as above until dlpack v0 is removed: */ +static void +array_dlpack_internal_capsule_deleter_unversioned(PyObject *self) +{ + DLManagedTensor *managed = + (DLManagedTensor *)PyCapsule_GetPointer(self, NPY_DLPACK_CAPSULE_NAME); + if (managed == NULL) { + PyErr_WriteUnraisable(NULL); + return; + } -done: - PyErr_Restore(type, value, traceback); + if (managed->deleter) { + managed->deleter(managed); + assert(!PyErr_Occurred()); + } } @@ -99,41 +165,33 @@ array_get_dl_device(PyArrayObject *self) { // The outer if is due to the fact that NumPy arrays are on the CPU // by default (if not created from DLPack). if (PyCapsule_IsValid(base, NPY_DLPACK_INTERNAL_CAPSULE_NAME)) { - DLManagedTensor *managed = PyCapsule_GetPointer( + DLManagedTensor *managed = (DLManagedTensor *)PyCapsule_GetPointer( base, NPY_DLPACK_INTERNAL_CAPSULE_NAME); if (managed == NULL) { return ret; } return managed->dl_tensor.device; } + else if (PyCapsule_IsValid(base, NPY_DLPACK_VERSIONED_INTERNAL_CAPSULE_NAME)) { + DLManagedTensorVersioned *managed = (DLManagedTensorVersioned *)PyCapsule_GetPointer( + base, NPY_DLPACK_VERSIONED_INTERNAL_CAPSULE_NAME); + if (managed == NULL) { + return ret; + } + return managed->dl_tensor.device; + } return ret; } -PyObject * -array_dlpack(PyArrayObject *self, - PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) +/* + * Fill the dl_tensor struct from the `self` array. + * This struct could be versioned, but as of now is not. + */ +static int +fill_dl_tensor_information( + DLTensor *dl_tensor, PyArrayObject *self) { - PyObject *stream = Py_None; - NPY_PREPARE_ARGPARSER; - if (npy_parse_arguments("__dlpack__", args, len_args, kwnames, - "$stream", NULL, &stream, NULL, NULL, NULL)) { - return NULL; - } - - if (stream != Py_None) { - PyErr_SetString(PyExc_RuntimeError, - "NumPy only supports stream=None."); - return NULL; - } - - if ( !(PyArray_FLAGS(self) & NPY_ARRAY_WRITEABLE)) { - PyErr_SetString(PyExc_BufferError, - "Cannot export readonly array since signalling readonly " - "is unsupported by DLPack."); - return NULL; - } - npy_intp itemsize = PyArray_ITEMSIZE(self); int ndim = PyArray_NDIM(self); npy_intp *strides = PyArray_STRIDES(self); @@ -145,7 +203,7 @@ array_dlpack(PyArrayObject *self, PyErr_SetString(PyExc_BufferError, "DLPack only supports strides which are a multiple of " "itemsize."); - return NULL; + return -1; } } } @@ -156,7 +214,7 @@ array_dlpack(PyArrayObject *self, if (PyDataType_ISBYTESWAPPED(dtype)) { PyErr_SetString(PyExc_BufferError, "DLPack only supports native byte order."); - return NULL; + return -1; } managed_dtype.bits = 8 * itemsize; @@ -178,7 +236,7 @@ array_dlpack(PyArrayObject *self, PyErr_SetString(PyExc_BufferError, "DLPack only supports IEEE floating point types " "without padding (longdouble typically is not IEEE)."); - return NULL; + return -1; } managed_dtype.code = kDLFloat; } @@ -189,7 +247,7 @@ array_dlpack(PyArrayObject *self, PyErr_SetString(PyExc_BufferError, "DLPack only supports IEEE floating point types " "without padding (longdouble typically is not IEEE)."); - return NULL; + return -1; } managed_dtype.code = kDLComplex; } @@ -197,25 +255,14 @@ array_dlpack(PyArrayObject *self, PyErr_SetString(PyExc_BufferError, "DLPack only supports signed/unsigned integers, float " "and complex dtypes."); - return NULL; + return -1; } DLDevice device = array_get_dl_device(self); if (PyErr_Occurred()) { - return NULL; + return -1; } - // ensure alignment - int offset = sizeof(DLManagedTensor) % sizeof(void *); - void *ptr = PyMem_Malloc(sizeof(DLManagedTensor) + offset + - (sizeof(int64_t) * ndim * 2)); - if (ptr == NULL) { - PyErr_NoMemory(); - return NULL; - } - - DLManagedTensor *managed = ptr; - /* * Note: the `dlpack.h` header suggests/standardizes that `data` must be * 256-byte aligned. We ignore this intentionally, because `__dlpack__` @@ -229,34 +276,90 @@ array_dlpack(PyArrayObject *self, * that NumPy MUST use `byte_offset` to adhere to the standard (as * specified in the header)! */ - managed->dl_tensor.data = PyArray_DATA(self); - managed->dl_tensor.byte_offset = 0; - managed->dl_tensor.device = device; - managed->dl_tensor.dtype = managed_dtype; + dl_tensor->data = PyArray_DATA(self); + dl_tensor->byte_offset = 0; + dl_tensor->device = device; + dl_tensor->dtype = managed_dtype; - int64_t *managed_shape_strides = (int64_t *)((char *)ptr + - sizeof(DLManagedTensor) + offset); - - int64_t *managed_shape = managed_shape_strides; - int64_t *managed_strides = managed_shape_strides + ndim; for (int i = 0; i < ndim; ++i) { - managed_shape[i] = shape[i]; + dl_tensor->shape[i] = shape[i]; // Strides in DLPack are items; in NumPy are bytes. - managed_strides[i] = strides[i] / itemsize; + dl_tensor->strides[i] = strides[i] / itemsize; } - managed->dl_tensor.ndim = ndim; - managed->dl_tensor.shape = managed_shape; - managed->dl_tensor.strides = NULL; - if (PyArray_SIZE(self) != 1 && !PyArray_IS_C_CONTIGUOUS(self)) { - managed->dl_tensor.strides = managed_strides; + dl_tensor->ndim = ndim; + if (PyArray_IS_C_CONTIGUOUS(self)) { + /* No need to pass strides, so just NULL it again */ + dl_tensor->strides = NULL; } - managed->dl_tensor.byte_offset = 0; - managed->manager_ctx = self; - managed->deleter = array_dlpack_deleter; + dl_tensor->byte_offset = 0; + + return 0; +} - PyObject *capsule = PyCapsule_New(managed, NPY_DLPACK_CAPSULE_NAME, - dlpack_capsule_deleter); + +static PyObject * +create_dlpack_capsule(PyArrayObject *self, int versioned) +{ + int ndim = PyArray_NDIM(self); + + /* + * We align shape and strides at the end but need to align them, offset + * gives the offset of the shape (and strides) including the struct size. + */ + size_t align = sizeof(int64_t); + size_t struct_size = ( + versioned ? sizeof(DLManagedTensorVersioned) : sizeof(DLManagedTensor)); + + size_t offset = (struct_size + align - 1) / align * align; + void *ptr = PyMem_Malloc(offset + (sizeof(int64_t) * ndim * 2)); + if (ptr == NULL) { + PyErr_NoMemory(); + return NULL; + } + + DLTensor *dl_tensor; + PyCapsule_Destructor capsule_deleter; + const char *capsule_name; + + if (versioned) { + DLManagedTensorVersioned *managed = (DLManagedTensorVersioned *)ptr; + capsule_name = NPY_DLPACK_VERSIONED_CAPSULE_NAME; + capsule_deleter = (PyCapsule_Destructor)dlpack_capsule_deleter; + managed->deleter = array_dlpack_deleter; + managed->manager_ctx = self; + + dl_tensor = &managed->dl_tensor; + + /* The versioned tensor has additional fields that we need to set */ + managed->version.major = 1; + managed->version.minor = 0; + + managed->flags = 0; + if (!PyArray_CHKFLAGS(self, NPY_ARRAY_WRITEABLE)) { + managed->flags |= DLPACK_FLAG_BITMASK_READ_ONLY; + } + } + else { + DLManagedTensor *managed = (DLManagedTensor *)ptr; + capsule_name = NPY_DLPACK_CAPSULE_NAME; + capsule_deleter = (PyCapsule_Destructor)dlpack_capsule_deleter_unversioned; + managed->deleter = array_dlpack_deleter_unversioned; + managed->manager_ctx = self; + + dl_tensor = &managed->dl_tensor; + } + + dl_tensor->shape = (int64_t *)((char *)ptr + offset); + /* Note that strides may be set to NULL later if C-contiguous */ + dl_tensor->strides = dl_tensor->shape + ndim; + + if (fill_dl_tensor_information(dl_tensor, self) < 0) { + PyMem_Free(ptr); + return NULL; + } + + PyObject *capsule = PyCapsule_New(ptr, capsule_name, capsule_deleter); if (capsule == NULL) { PyMem_Free(ptr); return NULL; @@ -264,9 +367,62 @@ array_dlpack(PyArrayObject *self, // the capsule holds a reference Py_INCREF(self); + return capsule; } + +PyObject * +array_dlpack(PyArrayObject *self, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) +{ + PyObject *stream = Py_None; + PyObject *max_version = Py_None; + long major_version = 0; + NPY_PREPARE_ARGPARSER; + if (npy_parse_arguments("__dlpack__", args, len_args, kwnames, + "$stream", NULL, &stream, + "$max_version", NULL, &max_version, + NULL, NULL, NULL)) { + return NULL; + } + + if (max_version != Py_None) { + if (!PyTuple_Check(max_version) || PyTuple_GET_SIZE(max_version) != 2) { + PyErr_SetString(PyExc_TypeError, + "max_version must be None or a tuple with two elements."); + return NULL; + } + major_version = PyLong_AsLong(PyTuple_GET_ITEM(max_version, 0)); + if (major_version == -1 && PyErr_Occurred()) { + return NULL; + } + } + + if (stream != Py_None) { + PyErr_SetString(PyExc_RuntimeError, + "NumPy only supports stream=None."); + return NULL; + } + + if (major_version < 1 && !(PyArray_FLAGS(self) & NPY_ARRAY_WRITEABLE)) { + PyErr_SetString(PyExc_BufferError, + "Cannot export readonly array since signalling readonly " + "is unsupported by DLPack (supported by newer DLPack version)."); + return NULL; + } + + /* + * TODO: The versioned and non-versioned structs of DLPack are very + * similar but not ABI compatible so that the function called here requires + * branching (templating didn't seem worthwhile). + * + * Version 0 support should be deprecated in NumPy 2.1 and the branches + * can then be removed again. + */ + return create_dlpack_capsule(self, major_version >= 1); +} + PyObject * array_dlpack_device(PyArrayObject *self, PyObject *NPY_UNUSED(args)) { @@ -279,22 +435,78 @@ array_dlpack_device(PyArrayObject *self, PyObject *NPY_UNUSED(args)) NPY_NO_EXPORT PyObject * from_dlpack(PyObject *NPY_UNUSED(self), PyObject *obj) { - PyObject *capsule = PyObject_CallMethod((PyObject *)obj->ob_type, - "__dlpack__", "O", obj); + static PyObject *kwnames = NULL; + static PyObject *max_version = NULL; + + if (kwnames == NULL) { + kwnames = Py_BuildValue("(s)", "max_version"); + if (kwnames == NULL) { + return NULL; + } + } + if (max_version == NULL) { + max_version = Py_BuildValue("(i,i)", 1, 0); + if (max_version == NULL) { + return NULL; + } + } + + PyObject *args[2] = {obj, max_version}; + Py_ssize_t nargsf = 1 | PY_VECTORCALL_ARGUMENTS_OFFSET; + + PyObject *capsule = PyObject_VectorcallMethod( + npy_ma_str___dlpack__, args, nargsf, kwnames); if (capsule == NULL) { - return NULL; + /* + * TODO: This path should be deprecated in NumPy 2.1. Once deprecated + * the below code can be simplified w.r.t. to versioned/unversioned. + */ + if (PyErr_ExceptionMatches(PyExc_TypeError)) { + /* max_version may be unsupported, try without kwargs */ + PyErr_Clear(); + capsule = PyObject_VectorcallMethod( + npy_ma_str___dlpack__, args, nargsf, NULL); + } + if (capsule == NULL) { + return NULL; + } } - DLManagedTensor *managed = - (DLManagedTensor *)PyCapsule_GetPointer(capsule, - NPY_DLPACK_CAPSULE_NAME); + void *managed_ptr; + DLTensor dl_tensor; + int readonly; + int versioned = PyCapsule_IsValid(capsule, NPY_DLPACK_VERSIONED_CAPSULE_NAME); + if (versioned) { + managed_ptr = PyCapsule_GetPointer(capsule, NPY_DLPACK_VERSIONED_CAPSULE_NAME); + DLManagedTensorVersioned *managed = (DLManagedTensorVersioned *)managed_ptr; + if (managed == NULL) { + Py_DECREF(capsule); + return NULL; + } - if (managed == NULL) { - Py_DECREF(capsule); - return NULL; + if (managed->version.major > 1) { + PyErr_SetString(PyExc_BufferError, + "from_dlpack(): the exported DLPack major version is too " + "high to be imported by this version of NumPy."); + Py_DECREF(capsule); + return NULL; + } + + dl_tensor = managed->dl_tensor; + readonly = (managed->flags & DLPACK_FLAG_BITMASK_READ_ONLY) != 0; + } + else { + managed_ptr = PyCapsule_GetPointer(capsule, NPY_DLPACK_CAPSULE_NAME); + DLManagedTensor *managed = (DLManagedTensor *)managed_ptr; + if (managed == NULL) { + Py_DECREF(capsule); + return NULL; + } + dl_tensor = managed->dl_tensor; + readonly = 0; } - const int ndim = managed->dl_tensor.ndim; + const int ndim = dl_tensor.ndim; if (ndim > NPY_MAXDIMS) { PyErr_SetString(PyExc_RuntimeError, "maxdims of DLPack tensor is higher than the supported " @@ -303,7 +515,7 @@ from_dlpack(PyObject *NPY_UNUSED(self), PyObject *obj) { return NULL; } - DLDeviceType device_type = managed->dl_tensor.device.device_type; + DLDeviceType device_type = dl_tensor.device.device_type; if (device_type != kDLCPU && device_type != kDLCUDAHost && device_type != kDLROCMHost && @@ -314,7 +526,7 @@ from_dlpack(PyObject *NPY_UNUSED(self), PyObject *obj) { return NULL; } - if (managed->dl_tensor.dtype.lanes != 1) { + if (dl_tensor.dtype.lanes != 1) { PyErr_SetString(PyExc_RuntimeError, "Unsupported lanes in DLTensor dtype."); Py_DECREF(capsule); @@ -322,9 +534,9 @@ from_dlpack(PyObject *NPY_UNUSED(self), PyObject *obj) { } int typenum = -1; - const uint8_t bits = managed->dl_tensor.dtype.bits; + const uint8_t bits = dl_tensor.dtype.bits; const npy_intp itemsize = bits / 8; - switch (managed->dl_tensor.dtype.code) { + switch (dl_tensor.dtype.code) { case kDLBool: if (bits == 8) { typenum = NPY_BOOL; @@ -376,15 +588,14 @@ from_dlpack(PyObject *NPY_UNUSED(self), PyObject *obj) { npy_intp strides[NPY_MAXDIMS]; for (int i = 0; i < ndim; ++i) { - shape[i] = managed->dl_tensor.shape[i]; + shape[i] = dl_tensor.shape[i]; // DLPack has elements as stride units, NumPy has bytes. - if (managed->dl_tensor.strides != NULL) { - strides[i] = managed->dl_tensor.strides[i] * itemsize; + if (dl_tensor.strides != NULL) { + strides[i] = dl_tensor.strides[i] * itemsize; } } - char *data = (char *)managed->dl_tensor.data + - managed->dl_tensor.byte_offset; + char *data = (char *)dl_tensor.data + dl_tensor.byte_offset; PyArray_Descr *descr = PyArray_DescrFromType(typenum); if (descr == NULL) { @@ -393,15 +604,27 @@ from_dlpack(PyObject *NPY_UNUSED(self), PyObject *obj) { } PyObject *ret = PyArray_NewFromDescr(&PyArray_Type, descr, ndim, shape, - managed->dl_tensor.strides != NULL ? strides : NULL, data, 0, NULL); + dl_tensor.strides != NULL ? strides : NULL, data, 0, NULL); if (ret == NULL) { Py_DECREF(capsule); return NULL; } + if (readonly) { + PyArray_CLEARFLAGS((PyArrayObject *)ret, NPY_ARRAY_WRITEABLE); + } - PyObject *new_capsule = PyCapsule_New(managed, + PyObject *new_capsule; + if (versioned) { + new_capsule = PyCapsule_New(managed_ptr, + NPY_DLPACK_VERSIONED_INTERNAL_CAPSULE_NAME, + (PyCapsule_Destructor)array_dlpack_internal_capsule_deleter); + } + else { + new_capsule = PyCapsule_New(managed_ptr, NPY_DLPACK_INTERNAL_CAPSULE_NAME, - array_dlpack_internal_capsule_deleter); + (PyCapsule_Destructor)array_dlpack_internal_capsule_deleter_unversioned); + } + if (new_capsule == NULL) { Py_DECREF(capsule); Py_DECREF(ret); @@ -414,7 +637,10 @@ from_dlpack(PyObject *NPY_UNUSED(self), PyObject *obj) { return NULL; } - if (PyCapsule_SetName(capsule, NPY_DLPACK_USED_CAPSULE_NAME) < 0) { + const char *new_name = ( + versioned ? NPY_DLPACK_VERSIONED_USED_CAPSULE_NAME + : NPY_DLPACK_USED_CAPSULE_NAME); + if (PyCapsule_SetName(capsule, new_name) < 0) { Py_DECREF(capsule); Py_DECREF(ret); return NULL; diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 37b2f4860b1a..14502c585114 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4795,6 +4795,7 @@ NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_convert_if_no_array = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_cpu = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_dtype = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_err_msg_substr = NULL; +NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str___dlpack__ = NULL; static int intern_strings(void) @@ -4881,6 +4882,10 @@ intern_strings(void) if (npy_ma_str_array_err_msg_substr == NULL) { return -1; } + npy_ma_str___dlpack__ = PyUnicode_InternFromString("__dlpack__"); + if (npy_ma_str___dlpack__ == NULL) { + return -1; + } return 0; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index 52ca654804d0..b3f15686dfe0 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -21,5 +21,6 @@ NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_convert_if_no_array; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_cpu; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_dtype; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_err_msg_substr; +NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str___dlpack__; #endif /* NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ */ diff --git a/numpy/_core/tests/test_dlpack.py b/numpy/_core/tests/test_dlpack.py index da648fd36afb..068115068ebc 100644 --- a/numpy/_core/tests/test_dlpack.py +++ b/numpy/_core/tests/test_dlpack.py @@ -7,9 +7,10 @@ class TestDLPack: @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.") - def test_dunder_dlpack_refcount(self): + @pytest.mark.parametrize(max_version, [(0, 0), None, (1, 0), (100, 3)]) + def test_dunder_dlpack_refcount(self, max_version): x = np.arange(5) - y = x.__dlpack__() + y = x.__dlpack__(max_version=max_version) assert sys.getrefcount(x) == 3 del y assert sys.getrefcount(x) == 2 From 5324283d3d17327580d11e10c632179f3737dbc0 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 22 May 2024 14:56:26 +0200 Subject: [PATCH 363/980] Fixups and support dl_device and copy kwargs --- numpy/_core/src/common/npy_dlpack.h | 7 +- numpy/_core/src/multiarray/dlpack.c | 140 +++++++++++++++--- numpy/_core/src/multiarray/multiarraymodule.c | 2 +- numpy/_core/tests/test_dlpack.py | 69 +++++++-- 4 files changed, 179 insertions(+), 39 deletions(-) diff --git a/numpy/_core/src/common/npy_dlpack.h b/numpy/_core/src/common/npy_dlpack.h index 9bd4c3201f20..1dd3ae7f88e5 100644 --- a/numpy/_core/src/common/npy_dlpack.h +++ b/numpy/_core/src/common/npy_dlpack.h @@ -16,16 +16,17 @@ #define NPY_DLPACK_INTERNAL_CAPSULE_NAME "numpy_dltensor" #define NPY_DLPACK_VERSIONED_INTERNAL_CAPSULE_NAME "numpy_dltensor_versioned" -PyObject * +NPY_NO_EXPORT PyObject * array_dlpack(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames); -PyObject * +NPY_NO_EXPORT PyObject * array_dlpack_device(PyArrayObject *self, PyObject *NPY_UNUSED(args)); NPY_NO_EXPORT PyObject * -from_dlpack(PyObject *NPY_UNUSED(self), PyObject *obj); +from_dlpack(PyObject *NPY_UNUSED(self), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames); #endif diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index decd73d53a47..69bcb507bfe2 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -9,12 +9,13 @@ #include "npy_argparse.h" #include "npy_dlpack.h" #include "multiarraymodule.h" +#include "conversion_utils.h" /* * Deleter for a NumPy exported dlpack DLManagedTensor(Versioned). */ -static void +NPY_NO_EXPORT static void array_dlpack_deleter(DLManagedTensorVersioned *self) { /* @@ -113,7 +114,7 @@ array_dlpack_internal_capsule_deleter(PyObject *self) { DLManagedTensorVersioned *managed = (DLManagedTensorVersioned *)PyCapsule_GetPointer( - self, NPY_DLPACK_VERSIONED_CAPSULE_NAME); + self, NPY_DLPACK_VERSIONED_INTERNAL_CAPSULE_NAME); if (managed == NULL) { PyErr_WriteUnraisable(NULL); return; @@ -134,7 +135,8 @@ static void array_dlpack_internal_capsule_deleter_unversioned(PyObject *self) { DLManagedTensor *managed = - (DLManagedTensor *)PyCapsule_GetPointer(self, NPY_DLPACK_CAPSULE_NAME); + (DLManagedTensor *)PyCapsule_GetPointer( + self, NPY_DLPACK_INTERNAL_CAPSULE_NAME); if (managed == NULL) { PyErr_WriteUnraisable(NULL); return; @@ -190,7 +192,7 @@ array_get_dl_device(PyArrayObject *self) { */ static int fill_dl_tensor_information( - DLTensor *dl_tensor, PyArrayObject *self) + DLTensor *dl_tensor, PyArrayObject *self, DLDevice *result_device) { npy_intp itemsize = PyArray_ITEMSIZE(self); int ndim = PyArray_NDIM(self); @@ -258,11 +260,6 @@ fill_dl_tensor_information( return -1; } - DLDevice device = array_get_dl_device(self); - if (PyErr_Occurred()) { - return -1; - } - /* * Note: the `dlpack.h` header suggests/standardizes that `data` must be * 256-byte aligned. We ignore this intentionally, because `__dlpack__` @@ -278,7 +275,7 @@ fill_dl_tensor_information( */ dl_tensor->data = PyArray_DATA(self); dl_tensor->byte_offset = 0; - dl_tensor->device = device; + dl_tensor->device = *result_device; dl_tensor->dtype = managed_dtype; for (int i = 0; i < ndim; ++i) { @@ -299,7 +296,8 @@ fill_dl_tensor_information( static PyObject * -create_dlpack_capsule(PyArrayObject *self, int versioned) +create_dlpack_capsule( + PyArrayObject *self, int versioned, DLDevice *result_device) { int ndim = PyArray_NDIM(self); @@ -354,7 +352,7 @@ create_dlpack_capsule(PyArrayObject *self, int versioned) /* Note that strides may be set to NULL later if C-contiguous */ dl_tensor->strides = dl_tensor->shape + ndim; - if (fill_dl_tensor_information(dl_tensor, self) < 0) { + if (fill_dl_tensor_information(dl_tensor, self, result_device) < 0) { PyMem_Free(ptr); return NULL; } @@ -372,17 +370,55 @@ create_dlpack_capsule(PyArrayObject *self, int versioned) } -PyObject * +static int +device_converter(PyObject *obj, DLDevice *result_device) +{ + int type, id; + if (obj == Py_None) { + return NPY_SUCCEED; + } + if (!PyTuple_Check(obj)) { + PyErr_SetString(PyExc_TypeError, "dl_device must be a tuple"); + return NPY_FAIL; + } + if (!PyArg_ParseTuple(obj, "ii", &type, &id)) { + return NPY_FAIL; + } + /* We can honor the request if matches the existing one or is CPU */ + if (type == result_device->device_type && id == result_device->device_id) { + return NPY_SUCCEED; + } + if (type == kDLCPU && id == 0) { + result_device->device_type = type; + result_device->device_id = id; + return NPY_SUCCEED; + } + + PyErr_SetString(PyExc_ValueError, "unsupported device requested"); + return NPY_FAIL; +} + + +NPY_NO_EXPORT PyObject * array_dlpack(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { PyObject *stream = Py_None; PyObject *max_version = Py_None; + NPY_COPYMODE copy_mode = NPY_COPY_IF_NEEDED; long major_version = 0; + /* We allow the user to request a result device in principle. */ + DLDevice result_device = array_get_dl_device(self); + if (PyErr_Occurred()) { + return NULL; + } + NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("__dlpack__", args, len_args, kwnames, "$stream", NULL, &stream, "$max_version", NULL, &max_version, + "$dl_device", &device_converter, &result_device, + "$copy", &PyArray_CopyConverter, ©_mode, NULL, NULL, NULL)) { return NULL; } @@ -405,10 +441,23 @@ array_dlpack(PyArrayObject *self, return NULL; } + /* If the user requested a copy be made, honor that here already */ + if (copy_mode == NPY_COPY_ALWAYS) { + /* TODO: It may be good to check ability to export dtype first. */ + self = (PyArrayObject *)PyArray_NewCopy(self, NPY_KEEPORDER); + if (self == NULL) { + return NULL; + } + } + else { + Py_INCREF(self); + } + if (major_version < 1 && !(PyArray_FLAGS(self) & NPY_ARRAY_WRITEABLE)) { PyErr_SetString(PyExc_BufferError, "Cannot export readonly array since signalling readonly " "is unsupported by DLPack (supported by newer DLPack version)."); + Py_DECREF(self); return NULL; } @@ -420,7 +469,11 @@ array_dlpack(PyArrayObject *self, * Version 0 support should be deprecated in NumPy 2.1 and the branches * can then be removed again. */ - return create_dlpack_capsule(self, major_version >= 1); + PyObject *res = create_dlpack_capsule( + self, major_version >= 1, &result_device); + Py_DECREF(self); + + return res; } PyObject * @@ -434,13 +487,33 @@ array_dlpack_device(PyArrayObject *self, PyObject *NPY_UNUSED(args)) } NPY_NO_EXPORT PyObject * -from_dlpack(PyObject *NPY_UNUSED(self), PyObject *obj) { - static PyObject *kwnames = NULL; +from_dlpack(PyObject *NPY_UNUSED(self), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) +{ + PyObject *obj, *copy = Py_None, *device = Py_None; + NPY_PREPARE_ARGPARSER; + if (npy_parse_arguments("from_dlpack", args, len_args, kwnames, + "obj", NULL, &obj, + "$copy", NULL, ©, + "$device", NULL, &device, + NULL, NULL, NULL) < 0) { + return NULL; + } + + /* Prepare the arguments to call objects __dlpack__() method */ + static PyObject *call_kwnames = NULL; + static PyObject *dl_cpu_device_tuple = NULL; static PyObject *max_version = NULL; - if (kwnames == NULL) { - kwnames = Py_BuildValue("(s)", "max_version"); - if (kwnames == NULL) { + if (call_kwnames == NULL) { + call_kwnames = Py_BuildValue("(sss)", "dl_device", "copy", "max_version"); + if (call_kwnames == NULL) { + return NULL; + } + } + if (dl_cpu_device_tuple == NULL) { + dl_cpu_device_tuple = Py_BuildValue("(i,i)", 1, 0); + if (dl_cpu_device_tuple == NULL) { return NULL; } } @@ -451,21 +524,42 @@ from_dlpack(PyObject *NPY_UNUSED(self), PyObject *obj) { } } - PyObject *args[2] = {obj, max_version}; + /* + * Prepare arguments for the full call. We always forward copy and pass + * our max_version. `device` is always passed as `None`, but if the user + * provided a device, we will replace it with the "cpu": (1, 0). + */ + PyObject *call_args[] = {obj, Py_None, copy, max_version}; Py_ssize_t nargsf = 1 | PY_VECTORCALL_ARGUMENTS_OFFSET; + /* If device is passed it must be "cpu" and replace it with (1, 0) */ + if (device != Py_None) { + /* test that device is actually CPU */ + NPY_DEVICE device_request = NPY_DEVICE_CPU; + if (!PyArray_DeviceConverterOptional(device, &device_request)) { + return NULL; + } + assert(device_request == NPY_DEVICE_CPU); + call_args[1] = dl_cpu_device_tuple; + } + + PyObject *capsule = PyObject_VectorcallMethod( - npy_ma_str___dlpack__, args, nargsf, kwnames); + npy_ma_str___dlpack__, call_args, nargsf, call_kwnames); if (capsule == NULL) { /* * TODO: This path should be deprecated in NumPy 2.1. Once deprecated * the below code can be simplified w.r.t. to versioned/unversioned. + * + * We try without any arguments if both device and copy are None, + * since the exporter may not support older versions of the protocol. */ - if (PyErr_ExceptionMatches(PyExc_TypeError)) { + if (PyErr_ExceptionMatches(PyExc_TypeError) + && device == Py_None && copy == Py_None) { /* max_version may be unsupported, try without kwargs */ PyErr_Clear(); capsule = PyObject_VectorcallMethod( - npy_ma_str___dlpack__, args, nargsf, NULL); + npy_ma_str___dlpack__, call_args, nargsf, NULL); } if (capsule == NULL) { return NULL; diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 14502c585114..54cedc30435e 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4604,7 +4604,7 @@ static struct PyMethodDef array_module_methods[] = { METH_NOARGS, "Give a warning on reload and big warning in sub-interpreters."}, {"from_dlpack", (PyCFunction)from_dlpack, - METH_O, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; diff --git a/numpy/_core/tests/test_dlpack.py b/numpy/_core/tests/test_dlpack.py index 068115068ebc..7ab664761152 100644 --- a/numpy/_core/tests/test_dlpack.py +++ b/numpy/_core/tests/test_dlpack.py @@ -5,9 +5,19 @@ from numpy.testing import assert_array_equal, IS_PYPY +def new_and_old_dlpack(): + yield np.arange(5) + # Support only old version: + class OldDLPack(np.ndarray): + def __dlpack__(self, stream=None): + return super().__dlpack__(stream=None) + + yield np.arange(5).view(OldDLPack) + + class TestDLPack: @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.") - @pytest.mark.parametrize(max_version, [(0, 0), None, (1, 0), (100, 3)]) + @pytest.mark.parametrize("max_version", [(0, 0), None, (1, 0), (100, 3)]) def test_dunder_dlpack_refcount(self, max_version): x = np.arange(5) y = x.__dlpack__(max_version=max_version) @@ -22,6 +32,13 @@ def test_dunder_dlpack_stream(self): with pytest.raises(RuntimeError): x.__dlpack__(stream=1) + def test_dunder_dlpack_copy(self): + x = np.arange(5) + x.__dlpack__(stream=None) + + with pytest.raises(RuntimeError): + x.__dlpack__(stream=1) + def test_strides_not_multiple_of_itemsize(self): dt = np.dtype([('int', np.int32), ('char', np.int8)]) y = np.zeros((5,), dtype=dt) @@ -31,12 +48,13 @@ def test_strides_not_multiple_of_itemsize(self): np.from_dlpack(z) @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.") - def test_from_dlpack_refcount(self): - x = np.arange(5) - y = np.from_dlpack(x) - assert sys.getrefcount(x) == 3 + @pytest.mark.parametrize("arr", new_and_old_dlpack()) + def test_from_dlpack_refcount(self, arr): + arr = arr.copy() + y = np.from_dlpack(arr) + assert sys.getrefcount(arr) == 3 del y - assert sys.getrefcount(x) == 2 + assert sys.getrefcount(arr) == 2 @pytest.mark.parametrize("dtype", [ np.bool, @@ -45,8 +63,9 @@ def test_from_dlpack_refcount(self): np.float16, np.float32, np.float64, np.complex64, np.complex128 ]) - def test_dtype_passthrough(self, dtype): - x = np.arange(5).astype(dtype) + @pytest.mark.parametrize("arr", new_and_old_dlpack()) + def test_dtype_passthrough(self, arr, dtype): + x = arr.astype(dtype) y = np.from_dlpack(x) assert y.dtype == x.dtype @@ -98,21 +117,27 @@ def test_dlpack_device(self): z = y[::2] assert z.__dlpack_device__() == (1, 0) - def dlpack_deleter_exception(self): + def dlpack_deleter_exception(self, max_version): x = np.arange(5) - _ = x.__dlpack__() + _ = x.__dlpack__(max_version=max_version) raise RuntimeError - def test_dlpack_destructor_exception(self): + @pytest.mark.parametrize("max_version", [None, (1, 0)]) + def test_dlpack_destructor_exception(self, max_version): with pytest.raises(RuntimeError): - self.dlpack_deleter_exception() + self.dlpack_deleter_exception(max_version=max_version) def test_readonly(self): x = np.arange(5) x.flags.writeable = False + # Raises without max_version with pytest.raises(BufferError): x.__dlpack__() + # But works fine if we try with version + y = np.from_dlpack(x) + assert not y.flags.writeable + def test_ndim0(self): x = np.array(1.0) y = np.from_dlpack(x) @@ -123,3 +148,23 @@ def test_size1dims_arrays(self): buffer=np.ones(1000, dtype=np.uint8), order='F') y = np.from_dlpack(x) assert_array_equal(x, y) + + def test_copy(self): + x = np.arange(5) + + y = np.from_dlpack(x) + assert np.may_share_memory(x, y) + y = np.from_dlpack(x, copy=True) + assert not np.may_share_memory(x, y) + + def test_device(self): + x = np.arange(5) + # requesting (1, 0), i.e. CPU device works in both calls: + x.__dlpack__(dl_device=(1, 0)) + np.from_dlpack(x, device="cpu") + np.from_dlpack(x, device=None) + + with pytest.raises(ValueError): + x.__dlpack__(dl_device=(10, 0)) + with pytest.raises(ValueError): + np.from_dlpack(x, device="gpu") From 902e9b1ffd7386eeb2d6c791a940b9dba5623282 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 22 May 2024 16:01:20 +0200 Subject: [PATCH 364/980] Small fixups for linter/compiler warnings --- numpy/_core/src/multiarray/dlpack.c | 4 ++-- numpy/_core/tests/test_dlpack.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index 69bcb507bfe2..bdc5936919c3 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -15,7 +15,7 @@ /* * Deleter for a NumPy exported dlpack DLManagedTensor(Versioned). */ -NPY_NO_EXPORT static void +static void array_dlpack_deleter(DLManagedTensorVersioned *self) { /* @@ -476,7 +476,7 @@ array_dlpack(PyArrayObject *self, return res; } -PyObject * +NPY_NO_EXPORT PyObject * array_dlpack_device(PyArrayObject *self, PyObject *NPY_UNUSED(args)) { DLDevice device = array_get_dl_device(self); diff --git a/numpy/_core/tests/test_dlpack.py b/numpy/_core/tests/test_dlpack.py index 7ab664761152..ee59ea8aa1bc 100644 --- a/numpy/_core/tests/test_dlpack.py +++ b/numpy/_core/tests/test_dlpack.py @@ -7,8 +7,9 @@ def new_and_old_dlpack(): yield np.arange(5) - # Support only old version: + class OldDLPack(np.ndarray): + # Support only the "old" version def __dlpack__(self, stream=None): return super().__dlpack__(stream=None) From 3c02936df6c28f674ef83f1c918e3d554f825262 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 22 May 2024 11:53:01 -0600 Subject: [PATCH 365/980] TST: work around flaky test on free-threaded build --- numpy/_core/tests/test_nep50_promotions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_nep50_promotions.py b/numpy/_core/tests/test_nep50_promotions.py index 741359bc63cd..e603254f6fec 100644 --- a/numpy/_core/tests/test_nep50_promotions.py +++ b/numpy/_core/tests/test_nep50_promotions.py @@ -362,7 +362,7 @@ def weak_warn(): np._set_promotion_state("weak") b.wait() assert np._get_promotion_state() == "weak" - with pytest.warns(RuntimeWarning): + with pytest.raises(RuntimeWarning): np.float16(1) + 131008 task1 = threading.Thread(target=legacy_no_warn) From 7b4aa8f90586cb6f2da852bd410af491fa0ab9f8 Mon Sep 17 00:00:00 2001 From: warren Date: Wed, 22 May 2024 14:31:45 -0400 Subject: [PATCH 366/980] DOC: Copy-edit numpy 2.0 migration guide. Fix a few typos and do a touch of copy-editing. [skip actions] [skip azp] [skip cirrus] --- doc/source/numpy_2_0_migration_guide.rst | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index 8cd02628497c..78be8843cd40 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -19,7 +19,7 @@ Ruff plugin =========== Many of the changes covered in the 2.0 release notes and in this migration -guide can be automatically adapted to in downstream code with a dedicated +guide can be automatically adapted in downstream code with a dedicated `Ruff `__ rule, namely rule `NPY201 `__. @@ -43,9 +43,8 @@ NumPy 2.0 changes promotion (the result of combining dissimilar data types) as per :ref:`NEP 50 `. Please see the NEP for details on this change. It includes a table of example changes and a backwards compatibility section. -The largest backwards compatibility change of this is that it means that -the precision of scalars is now preserved consistently. -Two examples are: +The largest backwards compatibility change is that the precision of scalars +is now preserved consistently. Two examples are: * ``np.float32(3) + 3.`` now returns a float32 when it previously returned a float64. @@ -97,7 +96,7 @@ C-API Changes ============= Some definitions were removed or replaced due to being outdated or -unmaintainable. Some new API definition will evaluate differently at +unmaintainable. Some new API definitions will evaluate differently at runtime between NumPy 2.0 and NumPy 1.x. Some are defined in ``numpy/_core/include/numpy/npy_2_compat.h`` (for example ``NPY_DEFAULT_INT``) which can be vendored in full or part @@ -116,10 +115,10 @@ The ``PyArray_Descr`` struct has been changed One of the most impactful C-API changes is that the ``PyArray_Descr`` struct is now more opaque to allow us to add additional flags and have itemsizes not limited by the size of ``int`` as well as allow improving -structured dtypes in the future and not burdon new dtypes with their fields. +structured dtypes in the future and not burden new dtypes with their fields. Code which only uses the type number and other initial fields is unaffected. -Most code will hopefull mainly access the ``->elsize`` field, when the +Most code will hopefully mainly access the ``->elsize`` field, when the dtype/descriptor itself is attached to an array (e.g. ``arr->descr->elsize``) this is best replaced with ``PyArray_ITEMSIZE(arr)``. @@ -127,7 +126,7 @@ Where not possible, new accessor functions are required: * ``PyDataType_ELSIZE`` and ``PyDataType_SET_ELSIZE`` (note that the result is now ``npy_intp`` and not ``int``). -* ``PyDataType_ALIGNENT`` +* ``PyDataType_ALIGNMENT`` * ``PyDataType_FIELDS``, ``PyDataType_NAMES``, ``PyDataType_SUBARRAY`` * ``PyDataType_C_METADATA`` @@ -146,7 +145,7 @@ or adding ``npy2_compat.h`` into your code base and explicitly include it when compiling with NumPy 1.x (as they are new API). Including the file has no effect on NumPy 2. -Please do not hesitate to open a NumPy issue, if you require assistence or +Please do not hesitate to open a NumPy issue, if you require assistance or the provided functions are not sufficient. **Custom User DTypes:** @@ -180,7 +179,7 @@ Functionality which previously did not require import includes: Increased maximum number of dimensions -------------------------------------- -The maximum number of dimensions (and arguments) was increased to 64, this +The maximum number of dimensions (and arguments) was increased to 64. This affects the ``NPY_MAXDIMS`` and ``NPY_MAXARGS`` macros. It may be good to review their use, and we generally encourage you to not use these macros (especially ``NPY_MAXARGS``), so that a future version of @@ -237,7 +236,7 @@ Please refer to `NEP 52 Date: Thu, 16 May 2024 22:20:42 +0200 Subject: [PATCH 367/980] DOC: update the NumPy Roadmap With NumPy 2.0 RCs available, that release is feature-complete and a lot on the roadmap is outdated. So time for a large update. [skip actions] [skip cirrus] [skip azp] --- doc/neps/roadmap.rst | 148 ++++++++++++++++++++++++++++--------------- 1 file changed, 98 insertions(+), 50 deletions(-) diff --git a/doc/neps/roadmap.rst b/doc/neps/roadmap.rst index 6a9761b05230..4b73c3258f56 100644 --- a/doc/neps/roadmap.rst +++ b/doc/neps/roadmap.rst @@ -18,25 +18,19 @@ may include (among other things) interoperability protocols, better duck typing support and ndarray subclass handling. The key goal is: *make it easy for code written for NumPy to also work with -other NumPy-like projects.* This will enable GPU support via, e.g, CuPy or JAX, +other NumPy-like projects.* This will enable GPU support via, e.g, CuPy, JAX or PyTorch, distributed array support via Dask, and writing special-purpose arrays (either from scratch, or as a ``numpy.ndarray`` subclass) that work well with SciPy, -scikit-learn and other such packages. +scikit-learn and other such packages. A large step forward in this area was +made in NumPy 2.0, with adoption of and compliance with the array API standard +(v2022.12, see :ref:`NEP47`). Future work in this direction will include +support for newer versions of the array API standard, and adding features as +needed based on real-world experience and needs. -The ``__array_ufunc__`` and ``__array_function__`` protocols are stable, but -do not cover the whole API. New protocols for overriding other functionality -in NumPy are needed. Work in this area aims to bring to completion one or more -of the following proposals: - -- :ref:`NEP30` -- :ref:`NEP31` -- :ref:`NEP35` -- :ref:`NEP37` - -In addition we aim to provide ways to make it easier for other libraries to -implement a NumPy-compatible API. This may include defining consistent subsets -of the API, as discussed in `this section of NEP 37 -`__. +In addition, the ``__array_ufunc__`` and ``__array_function__`` protocols +fulfill a role here - they are stable and used by several downstream projects. +They do not cover the whole API, so use of the array API standard is preferred +for new code. Performance @@ -46,17 +40,25 @@ Improvements to NumPy's performance are important to many users. We have focused this effort on Universal SIMD (see :ref:`NEP38`) intrinsics which provide nice improvements across various hardware platforms via an abstraction layer. The infrastructure is in place, and we welcome follow-on PRs to add -SIMD support across all relevant NumPy functions. +SIMD support across relevant NumPy functionality. + +Transitioning from C to C++, both in the SIMD infrastructure and in NumPy +internals more widely, is in progress. We have also started to make use of +Google Highway (see :ref:`NEP54`), and that usage is likely to expand. Work +towards support for newer SIMD instruction sets, like SVE on arm64, is ongoing. Other performance improvement ideas include: - A better story around parallel execution. - Optimizations in individual functions. -- Reducing ufunc and ``__array_function__`` overhead. Furthermore we would like to improve the benchmarking system, in terms of coverage, -easy of use, and publication of the results (now -`here `__) as part of the docs or website. +easy of use, and publication of the results. Benchmarking PRs/branches compared +to the `main` branch is a primary purpose, and required for PRs that are +performance-focused (e.g., adding SIMD acceleration to a function). In +addition, we'd like a performance overview like the one we had `here +`__, set up in a way that is more +maintainable long-term. Documentation and website @@ -68,30 +70,30 @@ documentation on many topics are missing or outdated. See :ref:`NEP44` for planned improvements. Adding more tutorials is underway in the `numpy-tutorials repo `__. -Our website (https://numpy.org) was completely redesigned recently. We aim to -further improve it by adding translations, more case studies and other -high-level content, and more (see `this tracking issue `__). +We also intend to make all the example code in our documentation interactive - +work is underway to do so via ``jupyterlite-sphinx`` and Pyodide. + +Our website (https://numpy.org) is in good shape. Further work on expanding the +number of languages that the website is translated in is desirable. As are +improvements to the interactive notebook widget, through JupyterLite. Extensibility ------------- -We aim to make it much easier to extend NumPy. The primary topic here is to -improve the dtype system - see :ref:`NEP41` and related NEPs linked from it. -Concrete goals for the dtype system rewrite are: - -- Easier custom dtypes: +We aim to continue making it easier to extend NumPy. The primary topic here is to +improve the dtype system - see for example :ref:`NEP41` and related NEPs linked +from it. In NumPy 2.0, a new C API for user-defined dtypes was made public. We aim +to encourage its usage and improve this API further. - - Simplify and/or wrap the current C-API - - More consistent support for dtype metadata - - Support for writing a dtype in Python +Ideas for new dtypes that may be developed outside of the main NumPy repository +first, and that could potentially be upstreamed into NumPy later, include: -- Allow adding (a) new string dtype(s). This could be encoded strings with - fixed-width storage (e.g., ``utf8`` or ``latin1``), and/or a variable length - string dtype. The latter could share an implementation with ``dtype=object``, - but be explicitly type-checked. - One of these should probably be the default for text data. The current - string dtype support is neither efficient nor user friendly. +- A quad-precision (128-bit) dtype +- A ``bfloat16`` dtype +- A fixed-width string dtype which supports encodings (e.g., ``utf8`` or + ``latin1``) +- A unit dtype User experience @@ -99,38 +101,84 @@ User experience Type annotations ```````````````` -NumPy 1.20 adds type annotations for most NumPy functionality, so users can use -tools like `mypy`_ to type check their code and IDEs can improve their support +Type annotations for most NumPy functionality is complete (although some +submodules like ``numpy.ma`` are missing return types), so users can use tools +like `mypy`_ to type check their code and IDEs can improve their support for NumPy. Improving those type annotations, for example to support annotating -array shapes and dtypes, is ongoing. +array shapes (see `gh-16544 `__), +is ongoing. Platform support ```````````````` We aim to increase our support for different hardware architectures. This includes adding CI coverage when CI services are available, providing wheels on -PyPI for POWER8/9 (``ppc64le``), providing better build and install -documentation, and resolving build issues on other platforms like AIX. +PyPI for platforms that are in high enough demand (e.g., we added ``musllinux`` +ones for NumPy 2.0), and resolving build issues on platforms that we don't test +in CI (e.g., AIX). + +We intend to write a NEP covering the support levels we provide and what is +required for a platform to move to a higher tier of support, similar to +`PEP 11 `__. + +CPython 3.13 will be the first release to offer a free-threaded build (i.e., +a CPython build with the GIL disabled). Work is in progress to support this +well in NumPy. After that is stable and complete, there may be opportunities to +actually make use of the potential for performance improvements from +free-threaded CPython, or make it easier to do so for NumPy's users. + +Binary size reduction +````````````````````` +The number of downloads of NumPy from PyPI and other platforms continues to +increase - as of May 2024 we're at >200 million downloads/month from PyPI +alone). Reducing the size of an installed NumPy package has many benefits: +faster installs, lower disk space usage, smaller load on PyPI, less +environmental impact, easier to fit more packages on top of NumPy into an AWS +Lambda layer, lower latency for Pyodide users, and so on. We aim for +significant reductions, as well as making it easier for end users and packagers +to produce smaller custom builds (e.g., we added support for stripping tests +before 2.1.0). See `gh-25737 `__ +for details. + + +NumPy 2.0 stabilization & downstream usage +------------------------------------------ + +We made a very large amount of changes (and improvements!) in NumPy 2.0. The +release process has taken a very long time, and part of the ecosystem is still +catching up. We may need to slow down for a while, and possible help the rest +of the ecosystem with adapting to the ABI and API changes. + +We will need to assess the costs and benefits to NumPy itself, +downstream package authors, and end users. Based on that assessment + + +Security +-------- + +NumPy is quite secure - we get only a limited number of reports about potential +vulnerabilities, and most of those are incorrect. We have made strides with a +documented security policy, a private disclosure method, and maintaining an +OpenSSF scorecard (with a high score). However, we have not changed much in how +we approach supply chain security in quite a while. We aim to make improvements +here, for example achieving fully reproducible builds for all the build +artifacts we publish - and providing full provenance information for them. Maintenance ----------- -- ``MaskedArray`` needs to be improved, ideas include: +- ``numpy.ma`` is still in poor shape and under-maintained. It needs to be + improved, ideas include: - Rewrite masked arrays to not be a ndarray subclass -- maybe in a separate project? - MaskedArray as a duck-array type, and/or - dtypes that support missing values -- Fortran integration via ``numpy.f2py`` requires a number of improvements, see - `this tracking issue `__. -- A backend system for ``numpy.fft`` (so that e.g. ``fft-mkl`` doesn't need to monkeypatch numpy). - Write a strategy on how to deal with overlap between NumPy and SciPy for ``linalg``. -- Deprecate ``np.matrix`` (very slowly). +- Deprecate ``np.matrix`` (very slowly) - this is feasible ones the switch-over + from sparse matrices to sparse arrays in SciPy is complete. - Add new indexing modes for "vectorized indexing" and "outer indexing" (see :ref:`NEP21`). - Make the polynomial API easier to use. -- Integrate an improved text file loader. -- Ufunc and gufunc improvements, see `gh-8892 `__ - and `gh-11492 `__. .. _`mypy`: https://mypy.readthedocs.io From 809b7c536cfb9c716f5585ed7a1df600ff9399f2 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 22 May 2024 16:26:04 -0600 Subject: [PATCH 368/980] MAINT: mark temp elision address cache as thread local --- numpy/_core/src/multiarray/temp_elide.c | 18 +++++++++--------- numpy/_core/tests/test_multithreading.py | 18 ++++++++++++++++++ 2 files changed, 27 insertions(+), 9 deletions(-) diff --git a/numpy/_core/src/multiarray/temp_elide.c b/numpy/_core/src/multiarray/temp_elide.c index 289040673571..662a2fa52b06 100644 --- a/numpy/_core/src/multiarray/temp_elide.c +++ b/numpy/_core/src/multiarray/temp_elide.c @@ -124,22 +124,22 @@ check_callers(int * cannot) * TODO some calls go over scalarmath in umath but we cannot get the base * address of it from multiarraymodule as it is not linked against it */ - static int init = 0; + NPY_TLS static int init = 0; /* * measured DSO object memory start and end, if an address is located * inside these bounds it is part of that library so we don't need to call * dladdr on it (assuming linear memory) */ - static void * pos_python_start; - static void * pos_python_end; - static void * pos_ma_start; - static void * pos_ma_end; + NPY_TLS static void * pos_python_start; + NPY_TLS static void * pos_python_end; + NPY_TLS static void * pos_ma_start; + NPY_TLS static void * pos_ma_end; /* known address storage to save dladdr calls */ - static void * py_addr[64]; - static void * pyeval_addr[64]; - static npy_intp n_py_addr = 0; - static npy_intp n_pyeval = 0; + NPY_TLS static void * py_addr[64]; + NPY_TLS static void * pyeval_addr[64]; + NPY_TLS static npy_intp n_py_addr = 0; + NPY_TLS static npy_intp n_pyeval = 0; void *buffer[NPY_MAX_STACKSIZE]; int i, nptrs; diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 1a2534e78aaf..af6f94a08d55 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -36,3 +36,21 @@ def func(): np.isnan(arr) run_threaded(func, 500) + +def test_temp_elision_thread_safety(): + amid = np.ones(50000) + bmid = np.ones(50000) + alarge = np.ones(1000000) + blarge = np.ones(1000000) + + def func(count): + if count % 4 == 0: + (amid * 2) + bmid + elif count % 4 == 1: + (amid + bmid) - 2 + elif count % 4 == 2: + (alarge * 2) + blarge + else: + (alarge + blarge) - 2 + + run_threaded(func, 100, pass_count=True) From bb0cd9b3ca7827451f9afc815f7055166e7e2b1b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 23 May 2024 17:48:39 +0000 Subject: [PATCH 369/980] MAINT: Bump mamba-org/setup-micromamba from 1.8.1 to 1.9.0 Bumps [mamba-org/setup-micromamba](https://github.com/mamba-org/setup-micromamba) from 1.8.1 to 1.9.0. - [Release notes](https://github.com/mamba-org/setup-micromamba/releases) - [Commits](https://github.com/mamba-org/setup-micromamba/compare/422500192359a097648154e8db4e39bdb6c6eed7...f8b8a1e23a26f60a44c853292711bacfd3eac822) --- updated-dependencies: - dependency-name: mamba-org/setup-micromamba dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 175c9ec134d9..50453bef6ee1 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -159,7 +159,7 @@ jobs: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl - - uses: mamba-org/setup-micromamba@422500192359a097648154e8db4e39bdb6c6eed7 + - uses: mamba-org/setup-micromamba@f8b8a1e23a26f60a44c853292711bacfd3eac822 with: # for installation of anaconda-client, required for upload to # anaconda.org From a93e7ce4d78e77ae094e0045c1a6cf3a4bc7f8f0 Mon Sep 17 00:00:00 2001 From: Christopher Titchen <109701765+christopher-titchen@users.noreply.github.com> Date: Thu, 23 May 2024 20:48:15 +0100 Subject: [PATCH 370/980] BENCH: :white_check_mark: Benchmark ma.cov and ma.corrcoef functions. --- benchmarks/benchmarks/bench_ma.py | 44 +++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/benchmarks/benchmarks/bench_ma.py b/benchmarks/benchmarks/bench_ma.py index 26c977c9748c..f17da1a9ebe1 100644 --- a/benchmarks/benchmarks/bench_ma.py +++ b/benchmarks/benchmarks/bench_ma.py @@ -265,3 +265,47 @@ def time_where(self, mtype, msize): fun(self.nmxs > 2, self.nmxs, self.nmys) elif msize == 'big': fun(self.nmxl > 2, self.nmxl, self.nmyl) + + +class Cov(Benchmark): + param_names = ["size"] + params = [["small", "large"]] + + def setup(self, size): + # Set the proportion of masked values. + prop_mask = 0.2 + # Set up a "small" array with 10 vars and 10 obs. + rng = np.random.default_rng() + data = rng.random((10, 10), dtype=np.float32) + self.small = np.ma.array(data, mask=(data <= prop_mask)) + # Set up a "large" array with 100 vars and 100 obs. + data = rng.random((100, 100), dtype=np.float32) + self.large = np.ma.array(data, mask=(data <= prop_mask)) + + def time_cov(self, size): + if size == "small": + np.ma.cov(self.small) + if size == "large": + np.ma.cov(self.large) + + +class Corrcoef(Benchmark): + param_names = ["size"] + params = [["small", "large"]] + + def setup(self, size): + # Set the proportion of masked values. + prop_mask = 0.2 + # Set up a "small" array with 10 vars and 10 obs. + rng = np.random.default_rng() + data = rng.random((10, 10), dtype=np.float32) + self.small = np.ma.array(data, mask=(data <= prop_mask)) + # Set up a "large" array with 100 vars and 100 obs. + data = rng.random((100, 100), dtype=np.float32) + self.large = np.ma.array(data, mask=(data <= prop_mask)) + + def time_corrcoef(self, size): + if size == "small": + np.ma.corrcoef(self.small) + if size == "large": + np.ma.corrcoef(self.large) From 978a7eb401d286f2782b2181cd9c97d78ac92a81 Mon Sep 17 00:00:00 2001 From: Christopher Titchen <109701765+christopher-titchen@users.noreply.github.com> Date: Thu, 23 May 2024 20:51:17 +0100 Subject: [PATCH 371/980] DOC: :memo: Document upcoming changes for change log. --- doc/release/upcoming_changes/26285.change.rst | 13 +++++++++++++ doc/release/upcoming_changes/26285.performance.rst | 5 +++++ 2 files changed, 18 insertions(+) create mode 100644 doc/release/upcoming_changes/26285.change.rst create mode 100644 doc/release/upcoming_changes/26285.performance.rst diff --git a/doc/release/upcoming_changes/26285.change.rst b/doc/release/upcoming_changes/26285.change.rst new file mode 100644 index 000000000000..d652c58dc799 --- /dev/null +++ b/doc/release/upcoming_changes/26285.change.rst @@ -0,0 +1,13 @@ +``ma.corrcoef`` may return a slightly different result +------------------------------------------------------ +A pairwise observation approach is currently used in `ma.corrcoef` to +calculate the standard deviations for each pair of variables. This has been +changed as it is being used to normalise the covariance, estimated using +`ma.cov`, which does not consider the observations for each variable in a +pairwise manner, rendering it unnecessary. The normalisation has been +replaced by the more appropriate standard deviation for each variable, +which significantly reduces the wall time, but will return slightly different +estimates of the correlation coefficients in cases where the observations +between a pair of variables are not aligned. However, it will return the same +estimates in all other cases, including returning the same correlation matrix +as `corrcoef` when using a masked array with no masked values. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26285.performance.rst b/doc/release/upcoming_changes/26285.performance.rst new file mode 100644 index 000000000000..2271cd0fd7ac --- /dev/null +++ b/doc/release/upcoming_changes/26285.performance.rst @@ -0,0 +1,5 @@ +``ma.cov`` and ``ma.corrcoef`` are now significantly faster +----------------------------------------------------------- +The private function `ma.extras._covhelper` has been refactored along with +`ma.cov` and `ma.corrcoef`. They are now significantly faster, particularly +on large, masked arrays. \ No newline at end of file From d46645dda0288f09339052af9185f98ee8dd1913 Mon Sep 17 00:00:00 2001 From: Christopher Titchen <109701765+christopher-titchen@users.noreply.github.com> Date: Thu, 23 May 2024 21:33:55 +0100 Subject: [PATCH 372/980] MAINT: :rotating_light: Fix ref warnings. --- doc/release/upcoming_changes/26285.performance.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/release/upcoming_changes/26285.performance.rst b/doc/release/upcoming_changes/26285.performance.rst index 2271cd0fd7ac..79009f662a0f 100644 --- a/doc/release/upcoming_changes/26285.performance.rst +++ b/doc/release/upcoming_changes/26285.performance.rst @@ -1,5 +1,5 @@ ``ma.cov`` and ``ma.corrcoef`` are now significantly faster ----------------------------------------------------------- -The private function `ma.extras._covhelper` has been refactored along with -`ma.cov` and `ma.corrcoef`. They are now significantly faster, particularly -on large, masked arrays. \ No newline at end of file +The private function has been refactored along with `ma.cov` and +`ma.corrcoef`. They are now significantly faster, particularly on large, +masked arrays. \ No newline at end of file From 56fe9f4ff98f346e63526f48888a015e0ca91285 Mon Sep 17 00:00:00 2001 From: Stefan van der Walt Date: Thu, 23 May 2024 16:37:15 -0700 Subject: [PATCH 373/980] Avoid gcc compiler warning warning: template-id not allowed for constructor in C++20 --- numpy/_core/src/umath/string_buffer.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index 4afb2432b5ce..c5fc8949f994 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -262,12 +262,12 @@ struct Buffer { char *buf; char *after; - inline Buffer() + inline Buffer() { buf = after = NULL; } - inline Buffer(char *buf_, npy_int64 elsize_) + inline Buffer(char *buf_, npy_int64 elsize_) { buf = buf_; after = buf_ + elsize_; From 620346716c20ca94fe91f2bc1768f79d47bfa80c Mon Sep 17 00:00:00 2001 From: serge-sans-paille Date: Fri, 24 May 2024 08:39:21 +0200 Subject: [PATCH 374/980] Fix GCC -Wmaybe-uninitialized warning GCC is unable to verify data-dependent initialization, so help him a bit and default-initialized some data structures Related to #26513 --- numpy/linalg/umath_linalg.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/linalg/umath_linalg.cpp b/numpy/linalg/umath_linalg.cpp index 4b0bb30cb56f..219c76dd6e2f 100644 --- a/numpy/linalg/umath_linalg.cpp +++ b/numpy/linalg/umath_linalg.cpp @@ -1523,7 +1523,7 @@ eigh_wrapper(char JOBZ, UPLO, (fortran_int)dimensions[0], dispatch_scalar())) { LINEARIZE_DATA_t matrix_in_ld; - LINEARIZE_DATA_t eigenvectors_out_ld; + LINEARIZE_DATA_t eigenvectors_out_ld = {}; /* silence uninitialized warning */ LINEARIZE_DATA_t eigenvalues_out_ld; init_linearize_data(&matrix_in_ld, @@ -2465,8 +2465,8 @@ eig_wrapper(char JOBVL, (fortran_int)dimensions[0], dispatch_scalar())) { LINEARIZE_DATA_t a_in; LINEARIZE_DATA_t w_out; - LINEARIZE_DATA_t vl_out; - LINEARIZE_DATA_t vr_out; + LINEARIZE_DATA_t vl_out = {}; /* silence uninitialized warning */ + LINEARIZE_DATA_t vr_out = {}; /* silence uninitialized warning */ init_linearize_data(&a_in, geev_params.N, geev_params.N, From d8de7a39b6c86b32496780c21e3abab5ea3be9b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Fri, 24 May 2024 11:12:38 +0200 Subject: [PATCH 375/980] Add np.alltrue to migration guide [skip actions] [skip azp] [skip cirrus] --- doc/source/numpy_2_0_migration_guide.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index 78be8843cd40..7de294bb8d86 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -246,6 +246,7 @@ removed member migration guideline add_docstring It's still available as ``np.lib.add_docstring``. add_newdoc It's still available as ``np.lib.add_newdoc``. add_newdoc_ufunc It's an internal function and doesn't have a replacement. +alltrue Use ``all`` instead. asfarray Use ``np.asarray`` with a float dtype instead. byte_bounds Now it's available under ``np.lib.array_utils.byte_bounds`` cast Use ``np.asarray(arr, dtype=dtype)`` instead. @@ -253,6 +254,7 @@ cfloat Use ``np.complex128`` instead. clongfloat Use ``np.clongdouble`` instead. compat There's no replacement, as Python 2 is no longer supported. complex\_ Use ``np.complex128`` instead. +cumproduct Use ``np.cumprod`` instead. DataSource It's still available as ``np.lib.npyio.DataSource``. deprecate Emit ``DeprecationWarning`` with ``warnings.warn`` directly, or use ``typing.deprecated``. @@ -286,6 +288,7 @@ longfloat Use ``np.longdouble`` instead. lookfor Search NumPy's documentation directly. obj2sctype Use ``np.dtype(obj).type`` instead. PINF Use ``np.inf`` instead. +product Use ``np.prod`` instead. PZERO Use ``0.0`` instead. recfromcsv Use ``np.genfromtxt`` with comma delimiter instead. recfromtxt Use ``np.genfromtxt`` instead. @@ -301,6 +304,7 @@ set_string_function Use ``np.set_printoptions`` instead with a formatter for custom printing of NumPy objects. singlecomplex Use ``np.complex64`` instead. string\_ Use ``np.bytes_`` instead. +sometrue Use ``any`` instead. source Use ``inspect.getsource`` instead. tracemalloc_domain It's now available from ``np.lib``. unicode\_ Use ``np.str_`` instead. From fe61f00d1b380a497e91c61898c0dd3dbda66c9e Mon Sep 17 00:00:00 2001 From: serge-sans-paille Date: Fri, 24 May 2024 11:07:57 +0200 Subject: [PATCH 376/980] Avoid by-pointer parameter passing for LINEARIZE_DATA_t initialization Use value semantics instead. A constructor would be a viable approach too, just trying to keep the changes minimal. --- numpy/linalg/umath_linalg.cpp | 144 ++++++++++++++-------------------- 1 file changed, 57 insertions(+), 87 deletions(-) diff --git a/numpy/linalg/umath_linalg.cpp b/numpy/linalg/umath_linalg.cpp index 219c76dd6e2f..086e1c00bf25 100644 --- a/numpy/linalg/umath_linalg.cpp +++ b/numpy/linalg/umath_linalg.cpp @@ -539,39 +539,33 @@ const f2c_doublecomplex numeric_limits::nan = {NPY_NAN, NPY_N * column_strides: the number of bytes between consecutive columns. * output_lead_dim: BLAS/LAPACK-side leading dimension, in elements */ -typedef struct linearize_data_struct +struct linearize_data { npy_intp rows; npy_intp columns; npy_intp row_strides; npy_intp column_strides; npy_intp output_lead_dim; -} LINEARIZE_DATA_t; +}; -static inline void -init_linearize_data_ex(LINEARIZE_DATA_t *lin_data, - npy_intp rows, +static inline +linearize_data init_linearize_data_ex(npy_intp rows, npy_intp columns, npy_intp row_strides, npy_intp column_strides, npy_intp output_lead_dim) { - lin_data->rows = rows; - lin_data->columns = columns; - lin_data->row_strides = row_strides; - lin_data->column_strides = column_strides; - lin_data->output_lead_dim = output_lead_dim; + return {rows, columns, row_strides, column_strides, output_lead_dim}; } -static inline void -init_linearize_data(LINEARIZE_DATA_t *lin_data, - npy_intp rows, +static inline +linearize_data init_linearize_data(npy_intp rows, npy_intp columns, npy_intp row_strides, npy_intp column_strides) { - init_linearize_data_ex( - lin_data, rows, columns, row_strides, column_strides, columns); + return init_linearize_data_ex( + rows, columns, row_strides, column_strides, columns); } #if _UMATH_LINALG_DEBUG @@ -601,7 +595,7 @@ dump_ufunc_object(PyUFuncObject* ufunc) } static inline void -dump_linearize_data(const char* name, const LINEARIZE_DATA_t* params) +dump_linearize_data(const char* name, const linearize_data* params) { TRACE_TXT("\n\t%s rows: %zd columns: %zd"\ "\n\t\trow_strides: %td column_strides: %td"\ @@ -843,7 +837,7 @@ template static inline void * linearize_matrix(typ *dst, typ *src, - const LINEARIZE_DATA_t* data) + const linearize_data* data) { using ftyp = fortran_type_t; if (dst) { @@ -888,7 +882,7 @@ template static inline void * delinearize_matrix(typ *dst, typ *src, - const LINEARIZE_DATA_t* data) + const linearize_data* data) { using ftyp = fortran_type_t; @@ -935,7 +929,7 @@ using ftyp = fortran_type_t; template static inline void -nan_matrix(typ *dst, const LINEARIZE_DATA_t* data) +nan_matrix(typ *dst, const linearize_data* data) { int i, j; for (i = 0; i < data->rows; i++) { @@ -951,7 +945,7 @@ nan_matrix(typ *dst, const LINEARIZE_DATA_t* data) template static inline void -zero_matrix(typ *dst, const LINEARIZE_DATA_t* data) +zero_matrix(typ *dst, const linearize_data* data) { int i, j; for (i = 0; i < data->rows; i++) { @@ -1166,9 +1160,8 @@ slogdet(char **args, tmp_buff = (char *)malloc(matrix_size + pivot_size); if (tmp_buff) { - LINEARIZE_DATA_t lin_data; /* swapped steps to get matrix in FORTRAN order */ - init_linearize_data(&lin_data, m, m, steps[1], steps[0]); + linearize_data lin_data = init_linearize_data(m, m, steps[1], steps[0]); BEGIN_OUTER_LOOP_3 linearize_matrix((typ*)tmp_buff, (typ*)args[0], &lin_data); slogdet_single_element(m, @@ -1218,11 +1211,11 @@ det(char **args, tmp_buff = (char *)malloc(matrix_size + pivot_size); if (tmp_buff) { - LINEARIZE_DATA_t lin_data; + /* swapped steps to get matrix in FORTRAN order */ + linearize_data lin_data = init_linearize_data(m, m, steps[1], steps[0]); + typ sign; basetyp logdet; - /* swapped steps to get matrix in FORTRAN order */ - init_linearize_data(&lin_data, m, m, steps[1], steps[0]); BEGIN_OUTER_LOOP_2 linearize_matrix((typ*)tmp_buff, (typ*)args[0], &lin_data); @@ -1522,20 +1515,11 @@ eigh_wrapper(char JOBZ, JOBZ, UPLO, (fortran_int)dimensions[0], dispatch_scalar())) { - LINEARIZE_DATA_t matrix_in_ld; - LINEARIZE_DATA_t eigenvectors_out_ld = {}; /* silence uninitialized warning */ - LINEARIZE_DATA_t eigenvalues_out_ld; - - init_linearize_data(&matrix_in_ld, - eigh_params.N, eigh_params.N, - steps[1], steps[0]); - init_linearize_data(&eigenvalues_out_ld, - 1, eigh_params.N, - 0, steps[2]); + linearize_data matrix_in_ld = init_linearize_data(eigh_params.N, eigh_params.N, steps[1], steps[0]); + linearize_data eigenvalues_out_ld = init_linearize_data(1, eigh_params.N, 0, steps[2]); + linearize_data eigenvectors_out_ld = {}; /* silence uninitialized warning */ if ('V' == eigh_params.JOBZ) { - init_linearize_data(&eigenvectors_out_ld, - eigh_params.N, eigh_params.N, - steps[4], steps[3]); + eigenvectors_out_ld = init_linearize_data(eigh_params.N, eigh_params.N, steps[4], steps[3]); } for (iter = 0; iter < outer_dim; ++iter) { @@ -1739,11 +1723,9 @@ using ftyp = fortran_type_t; n = (fortran_int)dimensions[0]; nrhs = (fortran_int)dimensions[1]; if (init_gesv(¶ms, n, nrhs)) { - LINEARIZE_DATA_t a_in, b_in, r_out; - - init_linearize_data(&a_in, n, n, steps[1], steps[0]); - init_linearize_data(&b_in, nrhs, n, steps[3], steps[2]); - init_linearize_data(&r_out, nrhs, n, steps[5], steps[4]); + linearize_data a_in = init_linearize_data(n, n, steps[1], steps[0]); + linearize_data b_in = init_linearize_data(nrhs, n, steps[3], steps[2]); + linearize_data r_out = init_linearize_data(nrhs, n, steps[5], steps[4]); BEGIN_OUTER_LOOP_3 int not_ok; @@ -1778,10 +1760,9 @@ using ftyp = fortran_type_t; n = (fortran_int)dimensions[0]; if (init_gesv(¶ms, n, 1)) { - LINEARIZE_DATA_t a_in, b_in, r_out; - init_linearize_data(&a_in, n, n, steps[1], steps[0]); - init_linearize_data(&b_in, 1, n, 1, steps[2]); - init_linearize_data(&r_out, 1, n, 1, steps[3]); + linearize_data a_in = init_linearize_data(n, n, steps[1], steps[0]); + linearize_data b_in = init_linearize_data(1, n, 1, steps[2]); + linearize_data r_out = init_linearize_data(1, n, 1, steps[3]); BEGIN_OUTER_LOOP_3 int not_ok; @@ -1815,9 +1796,8 @@ using ftyp = fortran_type_t; n = (fortran_int)dimensions[0]; if (init_gesv(¶ms, n, n)) { - LINEARIZE_DATA_t a_in, r_out; - init_linearize_data(&a_in, n, n, steps[1], steps[0]); - init_linearize_data(&r_out, n, n, steps[3], steps[2]); + linearize_data a_in = init_linearize_data(n, n, steps[1], steps[0]); + linearize_data r_out = init_linearize_data(n, n, steps[3], steps[2]); BEGIN_OUTER_LOOP_2 int not_ok; @@ -1976,9 +1956,8 @@ cholesky(char uplo, char **args, npy_intp const *dimensions, npy_intp const *ste n = (fortran_int)dimensions[0]; if (init_potrf(¶ms, uplo, n)) { - LINEARIZE_DATA_t a_in, r_out; - init_linearize_data(&a_in, n, n, steps[1], steps[0]); - init_linearize_data(&r_out, n, n, steps[3], steps[2]); + linearize_data a_in = init_linearize_data(n, n, steps[1], steps[0]); + linearize_data r_out = init_linearize_data(n, n, steps[3], steps[2]); BEGIN_OUTER_LOOP_2 int not_ok; linearize_matrix(params.A, (ftyp*)args[0], &a_in); @@ -2463,27 +2442,25 @@ eig_wrapper(char JOBVL, if (init_geev(&geev_params, JOBVL, JOBVR, (fortran_int)dimensions[0], dispatch_scalar())) { - LINEARIZE_DATA_t a_in; - LINEARIZE_DATA_t w_out; - LINEARIZE_DATA_t vl_out = {}; /* silence uninitialized warning */ - LINEARIZE_DATA_t vr_out = {}; /* silence uninitialized warning */ + linearize_data vl_out = {}; /* silence uninitialized warning */ + linearize_data vr_out = {}; /* silence uninitialized warning */ - init_linearize_data(&a_in, + linearize_data a_in = init_linearize_data( geev_params.N, geev_params.N, steps[1], steps[0]); steps += 2; - init_linearize_data(&w_out, + linearize_data w_out = init_linearize_data( 1, geev_params.N, 0, steps[0]); steps += 1; if ('V' == geev_params.JOBVL) { - init_linearize_data(&vl_out, + vl_out = init_linearize_data( geev_params.N, geev_params.N, steps[1], steps[0]); steps += 2; } if ('V' == geev_params.JOBVR) { - init_linearize_data(&vr_out, + vr_out = init_linearize_data( geev_params.N, geev_params.N, steps[1], steps[0]); } @@ -2951,13 +2928,13 @@ using basetyp = basetype_t; (fortran_int)dimensions[0], (fortran_int)dimensions[1], dispatch_scalar())) { - LINEARIZE_DATA_t a_in, u_out = {}, s_out = {}, v_out = {}; + linearize_data u_out = {}, s_out = {}, v_out = {}; fortran_int min_m_n = params.M < params.N ? params.M : params.N; - init_linearize_data(&a_in, params.N, params.M, steps[1], steps[0]); + linearize_data a_in = init_linearize_data(params.N, params.M, steps[1], steps[0]); if ('N' == params.JOBZ) { /* only the singular values are wanted */ - init_linearize_data(&s_out, 1, min_m_n, 0, steps[2]); + s_out = init_linearize_data(1, min_m_n, 0, steps[2]); } else { fortran_int u_columns, v_rows; if ('S' == params.JOBZ) { @@ -2967,13 +2944,13 @@ dispatch_scalar())) { u_columns = params.M; v_rows = params.N; } - init_linearize_data(&u_out, + u_out = init_linearize_data( u_columns, params.M, steps[3], steps[2]); - init_linearize_data(&s_out, + s_out = init_linearize_data( 1, min_m_n, 0, steps[4]); - init_linearize_data(&v_out, + v_out = init_linearize_data( params.N, v_rows, steps[6], steps[5]); } @@ -3294,10 +3271,9 @@ using ftyp = fortran_type_t; n = (fortran_int)dimensions[1]; if (init_geqrf(¶ms, m, n)) { - LINEARIZE_DATA_t a_in, tau_out; - init_linearize_data(&a_in, n, m, steps[1], steps[0]); - init_linearize_data(&tau_out, 1, fortran_int_min(m, n), 1, steps[2]); + linearize_data a_in = init_linearize_data(n, m, steps[1], steps[0]); + linearize_data tau_out = init_linearize_data(1, fortran_int_min(m, n), 1, steps[2]); BEGIN_OUTER_LOOP_2 int not_ok; @@ -3588,11 +3564,9 @@ using ftyp = fortran_type_t; n = (fortran_int)dimensions[1]; if (init_gqr(¶ms, m, n)) { - LINEARIZE_DATA_t a_in, tau_in, q_out; - - init_linearize_data(&a_in, n, m, steps[1], steps[0]); - init_linearize_data(&tau_in, 1, fortran_int_min(m, n), 1, steps[2]); - init_linearize_data(&q_out, fortran_int_min(m, n), m, steps[4], steps[3]); + linearize_data a_in = init_linearize_data(n, m, steps[1], steps[0]); + linearize_data tau_in = init_linearize_data(1, fortran_int_min(m, n), 1, steps[2]); + linearize_data q_out = init_linearize_data(fortran_int_min(m, n), m, steps[4], steps[3]); BEGIN_OUTER_LOOP_3 int not_ok; @@ -3644,11 +3618,9 @@ using ftyp = fortran_type_t; if (init_gqr_complete(¶ms, m, n)) { - LINEARIZE_DATA_t a_in, tau_in, q_out; - - init_linearize_data(&a_in, n, m, steps[1], steps[0]); - init_linearize_data(&tau_in, 1, fortran_int_min(m, n), 1, steps[2]); - init_linearize_data(&q_out, m, m, steps[4], steps[3]); + linearize_data a_in = init_linearize_data(n, m, steps[1], steps[0]); + linearize_data tau_in = init_linearize_data(1, fortran_int_min(m, n), 1, steps[2]); + linearize_data q_out = init_linearize_data(m, m, steps[4], steps[3]); BEGIN_OUTER_LOOP_3 int not_ok; @@ -4051,13 +4023,11 @@ using basetyp = basetype_t; excess = m - n; if (init_gelsd(¶ms, m, n, nrhs, dispatch_scalar{})) { - LINEARIZE_DATA_t a_in, b_in, x_out, s_out, r_out; - - init_linearize_data(&a_in, n, m, steps[1], steps[0]); - init_linearize_data_ex(&b_in, nrhs, m, steps[3], steps[2], fortran_int_max(n, m)); - init_linearize_data_ex(&x_out, nrhs, n, steps[5], steps[4], fortran_int_max(n, m)); - init_linearize_data(&r_out, 1, nrhs, 1, steps[6]); - init_linearize_data(&s_out, 1, fortran_int_min(n, m), 1, steps[7]); + linearize_data a_in = init_linearize_data(n, m, steps[1], steps[0]); + linearize_data b_in = init_linearize_data_ex(nrhs, m, steps[3], steps[2], fortran_int_max(n, m)); + linearize_data x_out = init_linearize_data_ex(nrhs, n, steps[5], steps[4], fortran_int_max(n, m)); + linearize_data r_out = init_linearize_data(1, nrhs, 1, steps[6]); + linearize_data s_out = init_linearize_data(1, fortran_int_min(n, m), 1, steps[7]); BEGIN_OUTER_LOOP_7 int not_ok; From 19ef0d1102654c3e4f4ee0d6b3bdffbce084558c Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Fri, 24 May 2024 09:02:57 -0400 Subject: [PATCH 377/980] BUG: Fix handling of size=() in Generator.choice when a.ndim > 1. Closes gh-26518. --- numpy/random/_generator.pyx | 2 +- numpy/random/tests/test_generator_mt19937.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 82129afd0a27..0d134c823588 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -992,7 +992,7 @@ cdef class Generator: if a.ndim == 0: return idx - if not is_scalar and idx.ndim == 0: + if not is_scalar and idx.ndim == 0 and a.ndim == 1: # If size == () then the user requested a 0-d array as opposed to # a scalar object when size is None. However a[idx] is always a # scalar and not an array. So this makes sure the result is an diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index aca1ccde24af..0f3f9fcf9658 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -933,6 +933,11 @@ def test_choice_large_sample(self): res = hashlib.sha256(actual.view(np.int8)).hexdigest() assert_(choice_hash == res) + def test_choice_array_size_empty_tuple(self): + assert_array_equal(random.choice([[1, 2, 3]], size=()), [1, 2, 3]) + assert_array_equal(random.choice([[1]], size=()), [1]) + assert_array_equal(random.choice([[1]], size=(), axis=1), [1]) + def test_bytes(self): random = Generator(MT19937(self.seed)) actual = random.bytes(10) From 4532fdffe6714f0f1bb2c121c76ab12d94a2c999 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Fri, 24 May 2024 09:56:59 -0400 Subject: [PATCH 378/980] TST: Add another test of Generator.choice with size=(). Also add 'strict=True' to a couple of the existing equality assertions. --- numpy/random/tests/test_generator_mt19937.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 0f3f9fcf9658..514f9af2ce8c 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -934,9 +934,13 @@ def test_choice_large_sample(self): assert_(choice_hash == res) def test_choice_array_size_empty_tuple(self): + random = Generator(MT19937(self.seed)) + assert_array_equal(random.choice([1, 2, 3], size=()), np.array(1), + strict=True) assert_array_equal(random.choice([[1, 2, 3]], size=()), [1, 2, 3]) - assert_array_equal(random.choice([[1]], size=()), [1]) - assert_array_equal(random.choice([[1]], size=(), axis=1), [1]) + assert_array_equal(random.choice([[1]], size=()), [1], strict=True) + assert_array_equal(random.choice([[1]], size=(), axis=1), [1], + strict=True) def test_bytes(self): random = Generator(MT19937(self.seed)) From 5479fc5f63c5bfd1ab797a6d09c9f16f660f32f0 Mon Sep 17 00:00:00 2001 From: Ross Barnowski Date: Fri, 24 May 2024 10:37:13 -0700 Subject: [PATCH 379/980] DOC: Touchup wording and mv detail to extended summary [skip azp] [skip actions] [skip cirrus] --- numpy/ma/core.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 6b5066bc0ef6..453c63614d2e 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -608,11 +608,12 @@ def common_fill_value(a, b): def filled(a, fill_value=None): """ - Return a copy of `a`, with masked values replaced by a fill value. - However, if there are no masked values to fill, `a` will be returned - instead as an ndarray. + Return input as an `~numpy.ndarray`, with masked values replaced by + `fill_value`. If `a` is not a `MaskedArray`, `a` itself is returned. + If `a` is a `MaskedArray` with no masked values, then ``a.data`` is + returned. If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to ``a.fill_value``. From e7da8d97700851cfc407e70ff7664516cae2d8cd Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 24 May 2024 11:58:13 -0600 Subject: [PATCH 380/980] BUG: fix incorrect error handling for dtype('a') deprecation --- numpy/_core/src/multiarray/conversion_utils.c | 18 ++++++++++++------ numpy/_core/src/multiarray/descriptor.c | 16 ++++++++-------- numpy/_core/tests/test_deprecations.py | 14 +++++++++----- 3 files changed, 29 insertions(+), 19 deletions(-) diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index ac1fb3ae38dc..7e89859fc124 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -1341,13 +1341,19 @@ PyArray_TypestrConvert(int itemsize, int gentype) break; case NPY_DEPRECATED_STRINGLTR2: - DEPRECATE( - "Data type alias `a` was removed in NumPy 2.0. " - "Use `S` alias instead." - ); - newtype = NPY_STRING; + { + /* + * raise a deprecation warning, which might be an exception + * if warnings are errors, so leave newtype unset in that + * case + */ + int ret = DEPRECATE("Data type alias 'a' was deprecated in NumPy 2.0. " + "Use the 'S' alias instead."); + if (ret == 0) { + newtype = NPY_STRING; + } break; - + } case NPY_UNICODELTR: newtype = NPY_UNICODE; break; diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 0617a7b8de44..1564902be674 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -1828,10 +1828,10 @@ _convert_from_str(PyObject *obj, int align) break; case NPY_DEPRECATED_STRINGLTR2: - DEPRECATE( - "Data type alias `a` was removed in NumPy 2.0. " - "Use `S` alias instead." - ); + if (DEPRECATE("Data type alias 'a' was deprecated in NumPy 2.0. " + "Use the 'S' alias instead.") < 0) { + return NULL; + } check_num = NPY_STRING; break; @@ -1900,10 +1900,10 @@ _convert_from_str(PyObject *obj, int align) } if (strcmp(type, "a") == 0) { - DEPRECATE( - "Data type alias `a` was removed in NumPy 2.0. " - "Use `S` alias instead." - ); + if (DEPRECATE("Data type alias 'a' was deprecated in NumPy 2.0. " + "Use the 'S' alias instead.") < 0) { + return NULL; + } } /* diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index a10c6138e10e..648a1d22ea99 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -670,18 +670,22 @@ def test_lib_functions_deprecation_call(self): class TestDeprecatedDTypeAliases(_DeprecationTestCase): - @staticmethod - def _check_for_warning(func): + def _check_for_warning(self, func): with warnings.catch_warnings(record=True) as caught_warnings: func() assert len(caught_warnings) == 1 w = caught_warnings[0] assert w.category is DeprecationWarning - assert "alias `a` was removed in NumPy 2.0" in str(w.message) + assert "alias 'a' was deprecated in NumPy 2.0" in str(w.message) def test_a_dtype_alias(self): - self._check_for_warning(lambda: np.dtype("a")) - self._check_for_warning(lambda: np.dtype("a10")) + for dtype in ["a", "a10"]: + f = lambda: np.dtype(dtype) + self._check_for_warning(f) + self.assert_deprecated(f) + f = lambda: np.array(["hello", "world"]).astype("a10") + self._check_for_warning(f) + self.assert_deprecated(f) class TestDeprecatedArrayWrap(_DeprecationTestCase): From 92e52863672a8674313afb5f365884647a43f3bb Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 24 May 2024 14:06:24 -0600 Subject: [PATCH 381/980] BUG: fix assert in PyArry_ConcatenateArrays with StringDType --- numpy/_core/src/multiarray/multiarraymodule.c | 1 - numpy/_core/tests/test_stringdtype.py | 7 +++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 4946465617bc..646b2611a712 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -467,7 +467,6 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis, if (ret == NULL) { return NULL; } - assert(PyArray_DESCR(ret) == descr); } /* diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 9c0324c91f71..228b5e949cfd 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -504,6 +504,13 @@ def test_creation_functions(): assert np.empty(3, dtype="T")[0] == "" +def test_concatenate(string_list): + sarr = np.array(string_list, dtype="T") + sarr_cat = np.array(string_list + string_list, dtype="T") + + assert_array_equal(np.concatenate([sarr], axis=0), sarr) + + def test_create_with_copy_none(string_list): arr = np.array(string_list, dtype=StringDType()) # create another stringdtype array with an arena that has a different From 82232918903818f0d5ebbdb75cb96166baeda87e Mon Sep 17 00:00:00 2001 From: quotuva Date: Sat, 25 May 2024 17:02:35 +0530 Subject: [PATCH 382/980] BUG: Fix entry-point of Texinfo docs The info documentation was buildable after this change was applied to v1.26.5. [skip azp] [skip circle] [skip cirrus] --- doc/source/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 8a4713e1c721..83c58c2c3c2d 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -388,7 +388,7 @@ def setup(app): # ----------------------------------------------------------------------------- texinfo_documents = [ - ("contents", 'numpy', 'NumPy Documentation', _stdauthor, 'NumPy', + ("index", 'numpy', 'NumPy Documentation', _stdauthor, 'NumPy', "NumPy: array processing for numbers, strings, records, and objects.", 'Programming', 1), From 0206af4820cccd03d134b6005bbd957aeebfca21 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 26 May 2024 11:26:14 +0200 Subject: [PATCH 383/980] DOC: update for review comments [skip actions] [skip cirrus] [skip azp] --- doc/neps/roadmap.rst | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/doc/neps/roadmap.rst b/doc/neps/roadmap.rst index 4b73c3258f56..7ea322c51182 100644 --- a/doc/neps/roadmap.rst +++ b/doc/neps/roadmap.rst @@ -83,7 +83,8 @@ Extensibility We aim to continue making it easier to extend NumPy. The primary topic here is to improve the dtype system - see for example :ref:`NEP41` and related NEPs linked -from it. In NumPy 2.0, a new C API for user-defined dtypes was made public. We aim +from it. In NumPy 2.0, a `new C API for user-defined dtypes `__ +was made public. We aim to encourage its usage and improve this API further. Ideas for new dtypes that may be developed outside of the main NumPy repository @@ -145,11 +146,14 @@ NumPy 2.0 stabilization & downstream usage We made a very large amount of changes (and improvements!) in NumPy 2.0. The release process has taken a very long time, and part of the ecosystem is still -catching up. We may need to slow down for a while, and possible help the rest +catching up. We may need to slow down for a while, and possibly help the rest of the ecosystem with adapting to the ABI and API changes. We will need to assess the costs and benefits to NumPy itself, -downstream package authors, and end users. Based on that assessment +downstream package authors, and end users. Based on that assessment, we need to +come to a conclusion on whether it's realistic to do another ABI-breaking +release again in the future or not. This will also inform the future evolution +of our C API. Security @@ -175,7 +179,7 @@ Maintenance - dtypes that support missing values - Write a strategy on how to deal with overlap between NumPy and SciPy for ``linalg``. -- Deprecate ``np.matrix`` (very slowly) - this is feasible ones the switch-over +- Deprecate ``np.matrix`` (very slowly) - this is feasible once the switch-over from sparse matrices to sparse arrays in SciPy is complete. - Add new indexing modes for "vectorized indexing" and "outer indexing" (see :ref:`NEP21`). - Make the polynomial API easier to use. From c654203a63ea5e93e1c8096cc303f0f870371187 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Sun, 26 May 2024 16:04:24 +0300 Subject: [PATCH 384/980] BUG: cast missing in PyPy-specific f2py code, pin spin in CI (#26534) * BUG: cast missing in PyPy-specific f2py code * CI: use build_requirements.txt --- .github/workflows/linux.yml | 2 +- .github/workflows/windows.yml | 2 +- numpy/f2py/cb_rules.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index bb9b13e0bcae..aef580c00e30 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -166,7 +166,7 @@ jobs: run: | sudo apt-get update sudo apt-get install libopenblas-dev ninja-build - pip install spin cython asv virtualenv packaging + pip install asv virtualenv packaging -r requirements/build_requirements.txt - name: Install NumPy run: | spin build -- -Dcpu-dispatch=none diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index f6e0f1b3a5ca..4d6c811b1409 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -37,7 +37,7 @@ jobs: - name: Install build dependencies from PyPI run: | - python -m pip install spin Cython + python -m pip install -r requirements/build_requirements.txt - name: Install pkg-config run: | diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py index 721e075b6c73..faf8dd401301 100644 --- a/numpy/f2py/cb_rules.py +++ b/numpy/f2py/cb_rules.py @@ -122,7 +122,7 @@ #setdims# #ifdef PYPY_VERSION #define CAPI_ARGLIST_SETITEM(idx, value) PyList_SetItem((PyObject *)capi_arglist_list, idx, value) - capi_arglist_list = PySequence_List(capi_arglist); + capi_arglist_list = PySequence_List((PyObject *)capi_arglist); if (capi_arglist_list == NULL) goto capi_fail; #else #define CAPI_ARGLIST_SETITEM(idx, value) PyTuple_SetItem((PyObject *)capi_arglist, idx, value) From 93916d6d1f1d9524265361a2719d53c84ac0f505 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Sun, 26 May 2024 14:26:51 -0300 Subject: [PATCH 385/980] DOC: improve np.gradient examples --- numpy/lib/_function_base_impl.py | 48 ++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index f2441d16fbc2..b508fd256e13 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -1079,6 +1079,54 @@ def gradient(f, *varargs, axis=None, edge_order=1): array([[ 2., 2., -1.], [ 2., 2., -1.]]) + The `varargs` argument define the spacing between sample points in the + input array. It can take two forms: + + 1. Receives scalars representing sample distance: + + >>> dx = 2 + >>> x = np.array([0., 2., 4., 6., 8.]) + >>> y = x ** 2 + >>> np.gradient(y, dx, edge_order=2) + array([ 0., 4., 8., 12., 16.]) # y = 2x + + 2. Receives arrays specifying coordinates: + + >>> np.gradient(y, x, edge_order=2) + array([ 0., 4., 8., 12., 16.]) # y = 2x + + It's possible to provide different data for spacing along each dimension. + The number of arguments must match the number of dimensions in the input + data. + + >>> dx = 2 + >>> dy = 3 + >>> x = np.arange(0, 6, dx) + >>> y = np.arange(0, 9, dy) + >>> xs, ys = np.meshgrid(x, y) + >>> zs = xs + ys + zs + array([[ 0, 2, 4], + [ 3, 5, 7], + [ 6, 8, 10]]) + >>> np.gradient(zs, dy, dx) # Passing two scalars + (array([[1., 1., 1.], + [1., 1., 1.], + [1., 1., 1.]]), + array([[1., 1., 1.], + [1., 1., 1.], + [1., 1., 1.]])) + + Mixing scalars and arrays is also allowed: + + >>> np.gradient(zs, y, dx) # Passing one array and one scalar + (array([[1., 1., 1.], + [1., 1., 1.], + [1., 1., 1.]]), + array([[1., 1., 1.], + [1., 1., 1.], + [1., 1., 1.]])) + Notes ----- Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous From 00039295bc68e9cd2f947377162421173ab204a4 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Sun, 26 May 2024 14:33:26 -0300 Subject: [PATCH 386/980] DOC: update np.gradient examples --- numpy/lib/_function_base_impl.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index b508fd256e13..1d7a762aaee5 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -1024,7 +1024,7 @@ def gradient(f, *varargs, axis=None, edge_order=1): Examples -------- - >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float) + >>> f = np.array([1, 2, 4, 7, 11, 16]) >>> np.gradient(f) array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) >>> np.gradient(f, 2) @@ -1040,7 +1040,7 @@ def gradient(f, *varargs, axis=None, edge_order=1): Or a non uniform one: - >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float) + >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.]) >>> np.gradient(f, x) array([1. , 3. , 3.5, 6.7, 6.9, 2.5]) @@ -1048,20 +1048,22 @@ def gradient(f, *varargs, axis=None, edge_order=1): axis. In this example the first array stands for the gradient in rows and the second one in columns direction: - >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float)) - [array([[ 2., 2., -1.], - [ 2., 2., -1.]]), array([[1. , 2.5, 4. ], - [1. , 1. , 1. ]])] + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]])) + (array([[ 2., 2., -1.], + [ 2., 2., -1.]]), + array([[1. , 2.5, 4. ], + [1. , 1. , 1. ]])) In this example the spacing is also specified: uniform for axis=0 and non uniform for axis=1 >>> dx = 2. >>> y = [1., 1.5, 3.5] - >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y) - [array([[ 1. , 1. , -0.5], - [ 1. , 1. , -0.5]]), array([[2. , 2. , 2. ], - [2. , 1.7, 0.5]])] + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]]), dx, y) + (array([[ 1. , 1. , -0.5], + [ 1. , 1. , -0.5]]), + array([[2. , 2. , 2. ], + [2. , 1.7, 0.5]])) It is possible to specify how boundaries are treated using `edge_order` From c7d3acd2f8f0ef687a4190a89bb236837adce536 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sun, 26 May 2024 17:09:35 +0000 Subject: [PATCH 387/980] TST: Add minimal reproducer for gh-26148 Co-authored-by: xiangyi-wang Co-authored-by: RibomBalt Co-authored-by: gmloose --- numpy/f2py/tests/src/regression/f77comments.f | 13 +++++++++++++ numpy/f2py/tests/test_regression.py | 12 ++++++++++++ 2 files changed, 25 insertions(+) create mode 100644 numpy/f2py/tests/src/regression/f77comments.f diff --git a/numpy/f2py/tests/src/regression/f77comments.f b/numpy/f2py/tests/src/regression/f77comments.f new file mode 100644 index 000000000000..2dd921d166ae --- /dev/null +++ b/numpy/f2py/tests/src/regression/f77comments.f @@ -0,0 +1,13 @@ + SUBROUTINE TESTSUB( + & INPUT1, INPUT2, !Input + & OUTPUT1, OUTPUT2) !Output + + IMPLICIT NONE + INTEGER, INTENT(IN) :: INPUT1, INPUT2 + INTEGER, INTENT(OUT) :: OUTPUT1, OUTPUT2 + + OUTPUT1 = INPUT1 + INPUT2 + OUTPUT2 = INPUT1 * INPUT2 + + RETURN + END SUBROUTINE TESTSUB diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index c0a8045d91b9..9755f9a1b426 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -76,3 +76,15 @@ def test_gh25344(self): exp = 7.0 res = self.module.add(3.0, 4.0) assert exp == res + +class TestF77Comments(util.F2PyTest): + # Check that comments are stripped from F77 continuation lines + sources = [util.getpath("tests", "src", "regression", "f77comments.f")] + + @pytest.mark.slow + def test_gh26148(self): + x1 = np.array(3, dtype=np.int32) + x2 = np.array(5, dtype=np.int32) + res=self.module.testsub(x1, x2) + assert(res[0] == 8) + assert(res[1] == 15) From 59d133f7a71caedced16800ac63e1a7f13a5486e Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sun, 26 May 2024 17:17:26 +0000 Subject: [PATCH 388/980] MAINT: Fix spelling and formatting issue [f2py] --- numpy/f2py/_backends/_distutils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/f2py/_backends/_distutils.py b/numpy/f2py/_backends/_distutils.py index e9b22a3921a5..f2436f86a7e6 100644 --- a/numpy/f2py/_backends/_distutils.py +++ b/numpy/f2py/_backends/_distutils.py @@ -13,9 +13,9 @@ class DistutilsBackend(Backend): def __init__(sef, *args, **kwargs): warnings.warn( - "distutils has been deprecated since NumPy 1.26.x" + "\ndistutils has been deprecated since NumPy 1.26.x\n" "Use the Meson backend instead, or generate wrappers" - "without -c and use a custom build script", + " without -c and use a custom build script", VisibleDeprecationWarning, stacklevel=2, ) From 3e1b67a069add0c883a646fcb08b034d08d18659 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sun, 26 May 2024 17:17:48 +0000 Subject: [PATCH 389/980] BUG: Fix comment handling for ! in F77 Closes gh-26148 --- numpy/f2py/crackfortran.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 8d3fc27608bd..84a82581fc14 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -425,11 +425,11 @@ def readfortrancode(ffile, dowithline=show, istop=1): if l[-1] not in "\n\r\f": break l = l[:-1] - if not strictf77: - (l, rl) = split_by_unquoted(l, '!') - l += ' ' - if rl[:5].lower() == '!f2py': # f2py directive - l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!') + # Unconditionally remove comments + (l, rl) = split_by_unquoted(l, '!') + l += ' ' + if rl[:5].lower() == '!f2py': # f2py directive + l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!') if l.strip() == '': # Skip empty line if sourcecodeform == 'free': # In free form, a statement continues in the next line From 0a3d0b1ad6505277daffde343796ab0a7cbae5ba Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sun, 26 May 2024 17:24:31 +0000 Subject: [PATCH 390/980] MAINT: Cleanup now unnecessary strictf77 path --- numpy/f2py/crackfortran.py | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 84a82581fc14..2c6fa83889ca 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -466,25 +466,13 @@ def readfortrancode(ffile, dowithline=show, istop=1): finalline = '' origfinalline = '' else: - if not strictf77: - # F90 continuation - r = cont1.match(l) - if r: - l = r.group('line') # Continuation follows .. - if cont: - ll = ll + cont2.match(l).group('line') - finalline = '' - origfinalline = '' - else: - # clean up line beginning from possible digits. - l = ' ' + l[5:] - if localdolowercase: - finalline = ll.lower() - else: - finalline = ll - origfinalline = ll - ll = l - cont = (r is not None) + r = cont1.match(l) + if r: + l = r.group('line') # Continuation follows .. + if cont: + ll = ll + cont2.match(l).group('line') + finalline = '' + origfinalline = '' else: # clean up line beginning from possible digits. l = ' ' + l[5:] From 53e64c36f24208c5f7969c0d383433b5389182d5 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sun, 26 May 2024 17:24:48 +0000 Subject: [PATCH 391/980] TST: Ensure simplified strictf77 path works --- numpy/f2py/tests/src/regression/f90continuation.f90 | 9 +++++++++ numpy/f2py/tests/test_regression.py | 12 ++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 numpy/f2py/tests/src/regression/f90continuation.f90 diff --git a/numpy/f2py/tests/src/regression/f90continuation.f90 b/numpy/f2py/tests/src/regression/f90continuation.f90 new file mode 100644 index 000000000000..879e716bbec6 --- /dev/null +++ b/numpy/f2py/tests/src/regression/f90continuation.f90 @@ -0,0 +1,9 @@ +SUBROUTINE TESTSUB(INPUT1, & ! Hello +! commenty +INPUT2, OUTPUT1, OUTPUT2) ! more comments + INTEGER, INTENT(IN) :: INPUT1, INPUT2 + INTEGER, INTENT(OUT) :: OUTPUT1, OUTPUT2 + OUTPUT1 = INPUT1 + & + INPUT2 + OUTPUT2 = INPUT1 * INPUT2 +END SUBROUTINE TESTSUB diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index 9755f9a1b426..ebddcacbdf83 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -88,3 +88,15 @@ def test_gh26148(self): res=self.module.testsub(x1, x2) assert(res[0] == 8) assert(res[1] == 15) + +class TestF90Contiuation(util.F2PyTest): + # Check that comments are stripped from F90 continuation lines + sources = [util.getpath("tests", "src", "regression", "f90continuation.f90")] + + @pytest.mark.slow + def test_gh26148b(self): + x1 = np.array(3, dtype=np.int32) + x2 = np.array(5, dtype=np.int32) + res=self.module.testsub(x1, x2) + assert(res[0] == 8) + assert(res[1] == 15) From 088857718dd8212a0b6dbe38fe75d4eb4d125f1c Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sun, 26 May 2024 17:34:06 +0000 Subject: [PATCH 392/980] TST: Add one for gh-26466 Closes gh-26466 Since that is also related to comment parsing in F77 --- numpy/f2py/tests/src/regression/f77comments.f | 13 +++++++++++++ numpy/f2py/tests/test_regression.py | 8 ++++++++ 2 files changed, 21 insertions(+) diff --git a/numpy/f2py/tests/src/regression/f77comments.f b/numpy/f2py/tests/src/regression/f77comments.f index 2dd921d166ae..452a01a14439 100644 --- a/numpy/f2py/tests/src/regression/f77comments.f +++ b/numpy/f2py/tests/src/regression/f77comments.f @@ -11,3 +11,16 @@ SUBROUTINE TESTSUB( RETURN END SUBROUTINE TESTSUB + + SUBROUTINE TESTSUB2(OUTPUT) + IMPLICIT NONE + INTEGER, PARAMETER :: N = 10 ! Array dimension + REAL, INTENT(OUT) :: OUTPUT(N) + INTEGER :: I + + DO I = 1, N + OUTPUT(I) = I * 2.0 + END DO + + RETURN + END diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index ebddcacbdf83..7da62d6cb287 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -2,6 +2,7 @@ import pytest import numpy as np +import numpy.testing as npt from . import util @@ -89,6 +90,13 @@ def test_gh26148(self): assert(res[0] == 8) assert(res[1] == 15) + @pytest.mark.slow + def test_gh26466(self): + # Check that comments after PARAMETER directions are stripped + expected = np.arange(1, 11, dtype=np.float32)*2 + res=self.module.testsub2() + npt.assert_allclose(expected, res) + class TestF90Contiuation(util.F2PyTest): # Check that comments are stripped from F90 continuation lines sources = [util.getpath("tests", "src", "regression", "f90continuation.f90")] From 0a3df247dd470aceb17a65d17470f119b082c8ae Mon Sep 17 00:00:00 2001 From: Andras Deak Date: Mon, 27 May 2024 00:26:12 +0200 Subject: [PATCH 393/980] MAINT: Remove redundant print from bug report issue template --- .github/ISSUE_TEMPLATE/bug-report.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 0367f937a74f..b237d52424ac 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -56,7 +56,7 @@ body: label: "Runtime Environment:" description: | 1. Install `threadpoolctl` (e.g. with `pip` or `conda`) - 2. Paste the output of `import numpy; print(numpy.show_runtime())`. + 2. Paste the output of `import numpy; numpy.show_runtime()`. Note: Only valid for NumPy 1.24 or newer. validations: From ce2817c77a44be98fe484c5672e105266346e583 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 27 May 2024 09:19:06 +0200 Subject: [PATCH 394/980] BUG: Fix typo in array-wrap code that lead to memory leak The typo lead to the reference not being correctly XDECREF'd, there was another (almost never taken) path that was missing the decref (could use goto there also, but OK). Also adds a regression test for the code where it was noticed. --- numpy/_core/src/multiarray/arraywrap.c | 7 +++++-- numpy/_core/tests/test_umath.py | 13 ++++++++++++- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/arraywrap.c b/numpy/_core/src/multiarray/arraywrap.c index d72fab0e4c98..8b37798f983b 100644 --- a/numpy/_core/src/multiarray/arraywrap.c +++ b/numpy/_core/src/multiarray/arraywrap.c @@ -159,7 +159,7 @@ npy_apply_wrap( } else { /* Replace passed wrap/wrap_type (borrowed refs) with new_wrap/type. */ - PyObject *new_wrap = PyArray_LookupSpecial_OnInstance( + new_wrap = PyArray_LookupSpecial_OnInstance( original_out, npy_ma_str_array_wrap); if (new_wrap != NULL) { wrap = new_wrap; @@ -177,11 +177,13 @@ npy_apply_wrap( */ if (!return_scalar && !force_wrap && (PyObject *)Py_TYPE(obj) == wrap_type) { + Py_XDECREF(new_wrap); Py_INCREF(obj); return obj; } if (wrap == Py_None) { + Py_XDECREF(new_wrap); Py_INCREF(obj); if (return_scalar) { /* @@ -239,8 +241,9 @@ npy_apply_wrap( wrap, arr, py_context, (return_scalar && PyArray_NDIM(arr) == 0) ? Py_True : Py_False, NULL); - if (res != NULL) + if (res != NULL) { goto finish; + } else if (!PyErr_ExceptionMatches(PyExc_TypeError)) { goto finish; } diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 7a3d1078647a..ca95dffd6fe3 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -18,7 +18,7 @@ assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings, _gen_alignment_data, assert_array_almost_equal_nulp, IS_WASM, IS_MUSL, - IS_PYPY + IS_PYPY, HAS_REFCOUNT ) from numpy.testing._private.utils import _glibc_older_than @@ -263,6 +263,17 @@ def __array_wrap__(self, arr, context=None, return_scalar=False): # Out argument must be tuple, since there are multiple outputs. r1, r2 = np.frexp(d, out=o1, subok=subok) + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_out_wrap_no_leak(self): + # Regression test for gh-26545 + class ArrSubclass(np.ndarray): + pass + + arr = np.arange(10).view(ArrSubclass) + + arr *= 1 + assert sys.getrefcount(arr) == 2 + class TestComparisons: import operator From 450db4ba3860e8f28f8a0b48952b9ffd6068801c Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 27 May 2024 14:21:12 +0200 Subject: [PATCH 395/980] BUG: Fix in1d fast-path range The code never worked when the first array was uint and the second one had negative values, NumPy 2 makes that slightly worse because the reverse is also true (the second one is uint). I did that by just using intp. The indexing operation has to cast anyway, so it seems unlikely that we have much of a downside in general. Casting there seems to make one bounds check just unnecessary, so removed it. (Yes, I guess indexing could use a buffered iterator so if both ar1 and ar2 are huge and ar1[basic_mask] is so huge it barely fits the memory, that is a downside. I don't think I care, but if someone does it can be modified probably) --- numpy/lib/_arraysetops_impl.py | 24 +++++------------------- numpy/lib/tests/test_arraysetops.py | 7 +++---- 2 files changed, 8 insertions(+), 23 deletions(-) diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index c8e1fa888295..aaf6c2f02386 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -853,30 +853,16 @@ def _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): if ar2.dtype == bool: ar2 = ar2.astype(np.uint8) - ar2_min = np.min(ar2) - ar2_max = np.max(ar2) + ar2_min = int(np.min(ar2)) + ar2_max = int(np.max(ar2)) - ar2_range = int(ar2_max) - int(ar2_min) + ar2_range = ar2_max - ar2_min # Constraints on whether we can actually use the table method: # 1. Assert memory usage is not too large below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size) # 2. Check overflows for (ar2 - ar2_min); dtype=ar2.dtype range_safe_from_overflow = ar2_range <= np.iinfo(ar2.dtype).max - # 3. Check overflows for (ar1 - ar2_min); dtype=ar1.dtype - if ar1.size > 0: - ar1_min = np.min(ar1) - ar1_max = np.max(ar1) - - # After masking, the range of ar1 is guaranteed to be - # within the range of ar2: - ar1_upper = min(int(ar1_max), int(ar2_max)) - ar1_lower = max(int(ar1_min), int(ar2_min)) - - range_safe_from_overflow &= all(( - ar1_upper - int(ar2_min) <= np.iinfo(ar1.dtype).max, - ar1_lower - int(ar2_min) >= np.iinfo(ar1.dtype).min - )) # Optimal performance is for approximately # log10(size) > (log10(range) - 2.27) / 0.927. @@ -906,8 +892,8 @@ def _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): # Mask out elements we know won't work basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min) - outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] - - ar2_min] + outgoing_array[basic_mask] = isin_helper_ar[ + np.subtract(ar1[basic_mask], ar2_min, dtype=np.intp)] return outgoing_array elif kind == 'table': # not range_safe_from_overflow diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index f537621482c0..9819589b137f 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -400,6 +400,7 @@ def test_isin_table_timedelta_fails(self): (np.uint16, np.uint8), (np.uint8, np.int16), (np.int16, np.uint8), + (np.uint64, np.int64), ] ) @pytest.mark.parametrize("kind", [None, "sort", "table"]) @@ -415,10 +416,8 @@ def test_isin_mixed_dtype(self, dtype1, dtype2, kind): expected = np.array([True, True, False, False]) - expect_failure = kind == "table" and any(( - dtype1 == np.int8 and dtype2 == np.int16, - dtype1 == np.int16 and dtype2 == np.int8 - )) + expect_failure = kind == "table" and ( + dtype1 == np.int16 and dtype2 == np.int8) if expect_failure: with pytest.raises(RuntimeError, match="exceed the maximum"): From 55da74ccac75e34e4bbac0b0b26915c311367567 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 23 May 2024 14:00:36 -0600 Subject: [PATCH 396/980] CI: enable free-threaded wheel builds [wheel build] [skip cirrus] [skip azp] [skip circle] --- .github/workflows/free-threaded-wheels.yml | 178 +++++++++++++++++++++ tools/wheels/cibw_before_build.sh | 8 + tools/wheels/cibw_test_command.sh | 13 ++ 3 files changed, 199 insertions(+) create mode 100644 .github/workflows/free-threaded-wheels.yml diff --git a/.github/workflows/free-threaded-wheels.yml b/.github/workflows/free-threaded-wheels.yml new file mode 100644 index 000000000000..54b6dd9fd3fb --- /dev/null +++ b/.github/workflows/free-threaded-wheels.yml @@ -0,0 +1,178 @@ +# Workflow to build and test wheels for the free-threaded Python build. +# +# This should be merged back into wheels.yml when free-threaded wheel +# builds can be uploaded to pypi along with the rest of numpy's release +# artifacts. +# +# To work on the wheel building infrastructure on a fork, comment out: +# +# if: github.repository == 'numpy/numpy' +# +# in the get_commit_message job. Be sure to include [wheel build] in your commit +# message to trigger the build. All files related to wheel building are located +# at tools/wheels/ +name: Free-Threaded Wheel Builder + +on: + schedule: + # ┌───────────── minute (0 - 59) + # │ ┌───────────── hour (0 - 23) + # │ │ ┌───────────── day of the month (1 - 31) + # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) + # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) + # │ │ │ │ │ + - cron: "42 2 * * SUN,WED" + pull_request: + branches: + - main + - maintenance/** + # we don't want to upload free-threaded wheels to pypi yet + # so we don't build on tags + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +permissions: + contents: read # to fetch code (actions/checkout) + +jobs: + get_commit_message: + name: Get commit message + runs-on: ubuntu-latest + # To enable this job and subsequent jobs on a fork, comment out: + if: github.repository == 'numpy/numpy' + outputs: + message: ${{ steps.commit_message.outputs.message }} + steps: + - name: Checkout numpy + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + # Gets the correct commit message for pull request + with: + ref: ${{ github.event.pull_request.head.sha }} + - name: Get commit message + id: commit_message + run: | + set -xe + COMMIT_MSG=$(git log --no-merges -1 --oneline) + echo "message=$COMMIT_MSG" >> $GITHUB_OUTPUT + echo github.ref ${{ github.ref }} + + build_wheels: + name: Build wheel ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} + needs: get_commit_message + if: >- + contains(needs.get_commit_message.outputs.message, '[wheel build]') || + github.event_name == 'schedule' || + github.event_name == 'workflow_dispatch' + runs-on: ${{ matrix.buildplat[0] }} + strategy: + # Ensure that a wheel builder finishes even if another fails + fail-fast: false + matrix: + # Github Actions doesn't support pairing matrix values together, let's improvise + # https://github.com/github/feedback/discussions/7835#discussioncomment-1769026 + buildplat: + - [ubuntu-20.04, manylinux_x86_64, ""] + - [ubuntu-20.04, musllinux_x86_64, ""] + # TODO: build numpy and set up Windows and MacOS + # cibuildwheel does not yet support Mac for free-threaded python + # windows is supported but numpy doesn't build on the image yet + python: ["cp313t"] + env: + IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} + IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} + steps: + - name: Checkout numpy + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + submodules: true + + - name: Setup MSVC (32-bit) + if: ${{ matrix.buildplat[1] == 'win32' }} + uses: bus1/cabuild/action/msdevshell@e22aba57d6e74891d059d66501b6b5aed8123c4d # v1 + with: + architecture: 'x86' + + - name: pkg-config-for-win + run: | + choco install -y --no-progress --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite + $CIBW = "${{ github.workspace }}/.openblas" + # pkgconfig needs a complete path, and not just "./openblas since the + # build is run in a tmp dir (?) + # It seems somewhere in the env passing, `\` is not + # passed through, so convert it to '/' + $CIBW = $CIBW.replace("\","/") + echo "CIBW_ENVIRONMENT_WINDOWS=PKG_CONFIG_PATH=$CIBW" >> $env:GITHUB_ENV + if: runner.os == 'windows' + + # Used to push the built wheels + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + with: + python-version: "3.x" + + - name: Setup macOS + if: matrix.buildplat[0] == 'macos-13' || matrix.buildplat[0] == 'macos-14' + run: | + if [[ ${{ matrix.buildplat[2] }} == 'accelerate' ]]; then + # macosx_arm64 and macosx_x86_64 with accelerate + # only target Sonoma onwards + CIBW="MACOSX_DEPLOYMENT_TARGET=14.0 INSTALL_OPENBLAS=false RUNNER_OS=macOS" + echo "CIBW_ENVIRONMENT_MACOS=$CIBW" >> "$GITHUB_ENV" + + # the macos-13 image that's used for building the x86_64 wheel can't test + # a wheel with deployment target >= 14 without further work + echo "CIBW_TEST_SKIP=*-macosx_x86_64" >> "$GITHUB_ENV" + else + # macosx_x86_64 with OpenBLAS + # if INSTALL_OPENBLAS isn't specified then scipy-openblas is automatically installed + CIBW="RUNNER_OS=macOS" + PKG_CONFIG_PATH="$PWD/.openblas" + DYLD="$DYLD_LIBRARY_PATH:/$PWD/.openblas/lib" + echo "CIBW_ENVIRONMENT_MACOS=$CIBW PKG_CONFIG_PATH=$PKG_CONFIG_PATH DYLD_LIBRARY_PATH=$DYLD" >> "$GITHUB_ENV" + fi + + - name: Build wheels + uses: pypa/cibuildwheel@ba8be0d98853f5744f24e7f902c8adef7ae2e7f3 # v2.18.1 + env: + CIBW_PRERELEASE_PYTHONS: True + CIBW_FREE_THREADED_SUPPORT: True + CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} + # TODO: remove along with installing build deps in + # cibw_before_build.sh when a released cython can build numpy + CIBW_BUILD_FRONTEND: "pip; args: --no-build-isolation" + + - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + with: + name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} + path: ./wheelhouse/*.whl + + - uses: mamba-org/setup-micromamba@422500192359a097648154e8db4e39bdb6c6eed7 + with: + # for installation of anaconda-client, required for upload to + # anaconda.org + # Note that this step is *after* specific pythons have been used to + # build and test the wheel + # for installation of anaconda-client, for upload to anaconda.org + # environment will be activated after creation, and in future bash steps + init-shell: bash + environment-name: upload-env + create-args: >- + anaconda-client + + - name: Upload wheels + if: success() + shell: bash -el {0} + # see https://github.com/marketplace/actions/setup-miniconda for why + # `-el {0}` is required. + env: + NUMPY_NIGHTLY_UPLOAD_TOKEN: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} + run: | + source tools/wheels/upload_wheels.sh + set_upload_vars + # trigger an upload to + # https://anaconda.org/scientific-python-nightly-wheels/numpy + # for cron jobs or "Run workflow" (restricted to main branch). + # The tokens were originally generated at anaconda.org + upload_wheels diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index 2c0e90efc7be..2fc5fa144d26 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -52,3 +52,11 @@ if [[ $RUNNER_OS == "Windows" ]]; then # delvewheel is the equivalent of delocate/auditwheel for windows. python -m pip install delvewheel wheel fi + +# TODO: delete along with enabling build isolation by unsetting +# CIBW_BUILD_FRONTEND when numpy is buildable under free-threaded +# python with a released version of cython +FREE_THREADED_BUILD="$(python -c"import sysconfig; print(bool(sysconfig.get_config_var('Py_GIL_DISABLED')))")" +if [[ $FREE_THREADED_BUILD == "True" ]]; then + python -m pip install git+https://github.com/cython/cython meson-python ninja +fi diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index 693a271efd41..6fcad20236ff 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -6,6 +6,7 @@ PROJECT_DIR="$1" python -m pip install threadpoolctl python -c "import numpy; numpy.show_config()" + if [[ $RUNNER_OS == "Windows" ]]; then # GH 20391 PY_DIR=$(python -c "import sys; print(sys.prefix)") @@ -26,6 +27,18 @@ fi # Set available memory value to avoid OOM problems on aarch64. # See gh-22418. export NPY_AVAILABLE_MEM="4 GB" + +FREE_THREADED_BUILD="$(python -c"import sysconfig; print(bool(sysconfig.get_config_var('Py_GIL_DISABLED')))")" +if [[ $FREE_THREADED_BUILD == "True" ]]; then + # TODO: delete when numpy is buildable under free-threaded python + # with a released version of cython + python -m pip install git+https://github.com/cython/cython + # TODO: delete when importing numpy no longer enables the GIL + # setting to zero ensures the GIL is disabled while running the + # tests under free-threaded python + export PYTHON_GIL=0 +fi + # Run full tests with -n=auto. This makes pytest-xdist distribute tests across # the available N CPU cores: 2 by default for Linux instances and 4 for macOS arm64 python -c "import sys; import numpy; sys.exit(not numpy.test(label='full', extra_argv=['-n=auto']))" From fcf488a27605db53dd3cd19431b36d15ea649df9 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Mon, 27 May 2024 22:57:32 -0300 Subject: [PATCH 397/980] DOC: fix gradient return value --- numpy/lib/_function_base_impl.py | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 1d7a762aaee5..07e72048f6ed 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -1017,10 +1017,10 @@ def gradient(f, *varargs, axis=None, edge_order=1): Returns ------- - gradient : ndarray or list of ndarray - A list of ndarrays (or a single ndarray if there is only one dimension) - corresponding to the derivatives of f with respect to each dimension. - Each derivative has the same shape as f. + gradient : ndarray or tuple of ndarray + A tuple of ndarrays (or a single ndarray if there is only one + dimension) corresponding to the derivatives of f with respect + to each dimension. Each derivative has the same shape as f. Examples -------- @@ -1106,15 +1106,11 @@ def gradient(f, *varargs, axis=None, edge_order=1): >>> x = np.arange(0, 6, dx) >>> y = np.arange(0, 9, dy) >>> xs, ys = np.meshgrid(x, y) - >>> zs = xs + ys - zs - array([[ 0, 2, 4], - [ 3, 5, 7], - [ 6, 8, 10]]) + >>> zs = xs + 2 * ys >>> np.gradient(zs, dy, dx) # Passing two scalars - (array([[1., 1., 1.], - [1., 1., 1.], - [1., 1., 1.]]), + (array([[2., 2., 2.], + [2., 2., 2.], + [2., 2., 2.]]), array([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]])) @@ -1122,9 +1118,9 @@ def gradient(f, *varargs, axis=None, edge_order=1): Mixing scalars and arrays is also allowed: >>> np.gradient(zs, y, dx) # Passing one array and one scalar - (array([[1., 1., 1.], - [1., 1., 1.], - [1., 1., 1.]]), + (array([[2., 2., 2.], + [2., 2., 2.], + [2., 2., 2.]]), array([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]])) From 64570d1440d07e0a4079eb218a6e220e342b0479 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 28 May 2024 11:19:18 +0200 Subject: [PATCH 398/980] DOC: Fix typo in vendored code and add release fragment for dlpack v1 --- doc/release/upcoming_changes/26501.new_feature.rst | 2 ++ numpy/_core/src/common/dlpack/dlpack.h | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 doc/release/upcoming_changes/26501.new_feature.rst diff --git a/doc/release/upcoming_changes/26501.new_feature.rst b/doc/release/upcoming_changes/26501.new_feature.rst new file mode 100644 index 000000000000..c7465925295c --- /dev/null +++ b/doc/release/upcoming_changes/26501.new_feature.rst @@ -0,0 +1,2 @@ +* NumPy now supports DLPack v1, support for older versions will + be deprecated in the future. diff --git a/numpy/_core/src/common/dlpack/dlpack.h b/numpy/_core/src/common/dlpack/dlpack.h index 1b447b0389fe..e05e600304d9 100644 --- a/numpy/_core/src/common/dlpack/dlpack.h +++ b/numpy/_core/src/common/dlpack/dlpack.h @@ -200,7 +200,7 @@ typedef struct { * `byte_offset` field should be used to point to the beginning of the data. * * Note that as of Nov 2021, multiply libraries (CuPy, PyTorch, TensorFlow, - * TVM, perhaps others) do not adhere to this 256 byte aligment requirement + * TVM, perhaps others) do not adhere to this 256 byte alignment requirement * on CPU/CUDA/ROCm, and always use `byte_offset=0`. This must be fixed * (after which this note will be updated); at the moment it is recommended * to not rely on the data pointer being correctly aligned. From 122c808be69be8b4f60f4be221609a3a8222d1ec Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 May 2024 17:32:36 +0000 Subject: [PATCH 399/980] MAINT: Bump mamba-org/setup-micromamba from 1.8.1 to 1.9.0 Bumps [mamba-org/setup-micromamba](https://github.com/mamba-org/setup-micromamba) from 1.8.1 to 1.9.0. - [Release notes](https://github.com/mamba-org/setup-micromamba/releases) - [Commits](https://github.com/mamba-org/setup-micromamba/compare/v1.8.1...f8b8a1e23a26f60a44c853292711bacfd3eac822) --- updated-dependencies: - dependency-name: mamba-org/setup-micromamba dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/free-threaded-wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/free-threaded-wheels.yml b/.github/workflows/free-threaded-wheels.yml index 54b6dd9fd3fb..edbe8dcc2387 100644 --- a/.github/workflows/free-threaded-wheels.yml +++ b/.github/workflows/free-threaded-wheels.yml @@ -148,7 +148,7 @@ jobs: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl - - uses: mamba-org/setup-micromamba@422500192359a097648154e8db4e39bdb6c6eed7 + - uses: mamba-org/setup-micromamba@f8b8a1e23a26f60a44c853292711bacfd3eac822 with: # for installation of anaconda-client, required for upload to # anaconda.org From 568606e9da1a2a80a2abc62b3905479c202fe647 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 28 May 2024 21:44:55 +0200 Subject: [PATCH 400/980] BUG: Fix leak in half repr --- numpy/_core/src/multiarray/scalartypes.c.src | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 17098af5d3a6..a5185cba60aa 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -1201,7 +1201,9 @@ halftype_@kind@(PyObject *self) if (string == NULL || npy_legacy_print_mode <= 125) { return string; } - return PyUnicode_FromFormat("np.float16(%S)", string); + PyObject *res = PyUnicode_FromFormat("np.float16(%S)", string); + Py_DECREF(string); + return res; #endif } From 426c0c330a47db6d0eb48607e11243638a828257 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 28 May 2024 21:45:12 +0200 Subject: [PATCH 401/980] BUG: Fix dtype leak in `np.empty` --- numpy/_core/src/multiarray/multiarraymodule.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 646b2611a712..e4f9a394be22 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -1997,14 +1997,11 @@ array_empty(PyObject *NPY_UNUSED(ignored), ret = (PyArrayObject *)PyArray_Empty_int( shape.len, shape.ptr, dt_info.descr, dt_info.dtype, is_f_order); - npy_free_cache_dim_obj(shape); - return (PyObject *)ret; - fail: Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); npy_free_cache_dim_obj(shape); - return NULL; + return (PyObject *)ret; } static PyObject * From d80b83e434ad0e528839ebc734bbe3e16ad467a8 Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Wed, 29 May 2024 02:43:39 +0530 Subject: [PATCH 402/980] BLD: Bump to Pyodide 0.26.0 This commit bumps the version of Pyodide in the out-of-tree build for NumPy to 0.26.0, which comes with bumps to the Python version (3.12.1) and the Emscripten version (3.1.58). [skip azp] [skip circle] [skip cirrus] --- .github/workflows/emscripten.yml | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 5406e8329129..ad2f08a9348b 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -45,13 +45,13 @@ jobs: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' env: - PYODIDE_VERSION: 0.25.1 + PYODIDE_VERSION: 0.26.0 # PYTHON_VERSION and EMSCRIPTEN_VERSION are determined by PYODIDE_VERSION. # The appropriate versions can be found in the Pyodide repodata.json # "info" field, or in Makefile.envs: # https://github.com/pyodide/pyodide/blob/main/Makefile.envs#L2 - PYTHON_VERSION: 3.11.3 - EMSCRIPTEN_VERSION: 3.1.46 + PYTHON_VERSION: 3.12.1 + EMSCRIPTEN_VERSION: 3.1.58 NODE_VERSION: 18 steps: - name: Checkout NumPy @@ -75,7 +75,7 @@ jobs: actions-cache-folder: emsdk-cache - name: Install pyodide-build - run: pip install "pydantic<2" pyodide-build==${{ env.PYODIDE_VERSION }} + run: pip install pyodide-build==${{ env.PYODIDE_VERSION }} - name: Find installation for pyodide-build shell: python @@ -93,7 +93,11 @@ jobs: - name: Build NumPy for Pyodide run: | - pyodide build -Cbuild-dir=build -Csetup-args="--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross" -Csetup-args="-Dblas=none" -Csetup-args="-Dlapack=none" + pyodide build \ + -Cbuild-dir=build \ + -Csetup-args="--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross" \ + -Csetup-args="-Dblas=none" \ + -Csetup-args="-Dlapack=none" - name: Set up Node.js uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 From 881eea221411981e03cbb0910893558e6dd0cef9 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 29 May 2024 07:40:06 +0200 Subject: [PATCH 403/980] BUG: Move str/unicode `__new__` obj creation to avoid leaking it --- numpy/_core/src/multiarray/dtypemeta.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 1dd2eb96739c..062243aa1402 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -444,12 +444,6 @@ string_unicode_new(PyArray_DTypeMeta *self, PyObject *args, PyObject *kwargs) return NULL; } - PyArray_Descr *res = PyArray_DescrNewFromType(self->type_num); - - if (res == NULL) { - return NULL; - } - if (self->type_num == NPY_UNICODE) { // unicode strings are 4 bytes per character if (npy_mul_sizes_with_overflow(&size, size, 4)) { @@ -466,6 +460,12 @@ string_unicode_new(PyArray_DTypeMeta *self, PyObject *args, PyObject *kwargs) return NULL; } + PyArray_Descr *res = PyArray_DescrNewFromType(self->type_num); + + if (res == NULL) { + return NULL; + } + res->elsize = (int)size; return (PyObject *)res; } From 4ea21f3bef79e797323be950ef64ee1273c9a6b1 Mon Sep 17 00:00:00 2001 From: Jules <57632293+JuliaPoo@users.noreply.github.com> Date: Wed, 29 May 2024 14:32:45 +0800 Subject: [PATCH 404/980] BUG: fix setxor1d when input arrays aren't 1D (#26559) * BUG: fix setxor1d when input arrays aren't 1D See #14670. Previously, when `assume_unique=True`, `setxor1d` attemptes to concatenate arrays with potentially incompatible dimensions. The choice to call `np.ravel` instead of enforcing that the input arrays are 1D stems from the consensus in the linked issue to avoid breaking existing code. * Simplified implementation according to comments * Shift new tests into seperate functions --- numpy/lib/_arraysetops_impl.py | 4 ++-- numpy/lib/tests/test_arraysetops.py | 15 +++++++++++++++ numpy/ma/extras.py | 2 +- numpy/ma/tests/test_extras.py | 19 +++++++++++++++++++ 4 files changed, 37 insertions(+), 3 deletions(-) diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index aaf6c2f02386..b143466ac6f6 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -686,7 +686,7 @@ def setxor1d(ar1, ar2, assume_unique=False): Input arrays. assume_unique : bool If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. + can speed up the calculation. Default is False. Returns ------- @@ -706,7 +706,7 @@ def setxor1d(ar1, ar2, assume_unique=False): ar1 = unique(ar1) ar2 = unique(ar2) - aux = np.concatenate((ar1, ar2)) + aux = np.concatenate((ar1, ar2), axis=None) if aux.size == 0: return aux diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index 9819589b137f..33b43b57a381 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -107,6 +107,21 @@ def test_setxor1d(self): assert_array_equal([], setxor1d([], [])) + def test_setxor1d_unique(self): + a = np.array([1, 8, 2, 3]) + b = np.array([6, 5, 4, 8]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + a = np.array([[1], [8], [2], [3]]) + b = np.array([[6, 5], [4, 8]]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + def test_ediff1d(self): zero_elem = np.array([]) one_elem = np.array([1]) diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 743f4bead446..4e8bc6f9ec09 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1395,7 +1395,7 @@ def setxor1d(ar1, ar2, assume_unique=False): ar1 = unique(ar1) ar2 = unique(ar2) - aux = ma.concatenate((ar1, ar2)) + aux = ma.concatenate((ar1, ar2), axis=None) if aux.size == 0: return aux aux.sort() diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index ad6bdf38f45c..3a8e5bca55df 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -1661,6 +1661,25 @@ def test_setxor1d(self): # assert_array_equal([], setxor1d([], [])) + def test_setxor1d_unique(self): + # Test setxor1d with assume_unique=True + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = [1, 2, 3, 4, 5] + test = setxor1d(a, b, assume_unique=True) + assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1])) + # + a = array([1, 8, 2, 3], mask=[0, 1, 0, 0]) + b = array([6, 5, 4, 8], mask=[0, 0, 0, 1]) + test = setxor1d(a, b, assume_unique=True) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + a = array([[1], [8], [2], [3]]) + b = array([[6, 5], [4, 8]]) + test = setxor1d(a, b, assume_unique=True) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + def test_isin(self): # the tests for in1d cover most of isin's behavior # if in1d is removed, would need to change those tests to test From 8fdce22e18603dfa66e1b5affd03e68ba9cba1e8 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 29 May 2024 13:25:30 +0200 Subject: [PATCH 405/980] BUG: Should use default free for new-style dtypes --- numpy/_core/src/multiarray/descriptor.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 1564902be674..949b70e1ddf4 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -2025,6 +2025,8 @@ arraydescr_dealloc(PyArray_Descr *self) { Py_XDECREF(self->typeobj); if (!PyDataType_ISLEGACY(self)) { + /* non legacy dtypes must not have fields, etc. */ + Py_TYPE(self)->tp_free((PyObject *)self); return; } _PyArray_LegacyDescr *lself = (_PyArray_LegacyDescr *)self; From 546b02929006ab1f3e637c6977057ca132bc4319 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Wed, 29 May 2024 10:40:49 -0300 Subject: [PATCH 406/980] Update numpy/lib/_function_base_impl.py Co-authored-by: Nathan Goldbaum --- numpy/lib/_function_base_impl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 07e72048f6ed..3a2581757be8 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -1081,7 +1081,7 @@ def gradient(f, *varargs, axis=None, edge_order=1): array([[ 2., 2., -1.], [ 2., 2., -1.]]) - The `varargs` argument define the spacing between sample points in the + The `varargs` argument defines the spacing between sample points in the input array. It can take two forms: 1. Receives scalars representing sample distance: From 3e60d861f75bec95535db34837d1d594deb97229 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Wed, 29 May 2024 10:40:59 -0300 Subject: [PATCH 407/980] Update numpy/lib/_function_base_impl.py Co-authored-by: Nathan Goldbaum --- numpy/lib/_function_base_impl.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 3a2581757be8..c7a8f306bbcf 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -1084,18 +1084,19 @@ def gradient(f, *varargs, axis=None, edge_order=1): The `varargs` argument defines the spacing between sample points in the input array. It can take two forms: - 1. Receives scalars representing sample distance: + 1. An array, specifying coordinates, which may be unevenly spaced: + - >>> dx = 2 >>> x = np.array([0., 2., 4., 6., 8.]) >>> y = x ** 2 - >>> np.gradient(y, dx, edge_order=2) - array([ 0., 4., 8., 12., 16.]) # y = 2x + >>> np.gradient(y, x, edge_order=2) + array([ 0., 4., 8., 12., 16.]) - 2. Receives arrays specifying coordinates: + 2. A scalar, representing the fixed sample distance: - >>> np.gradient(y, x, edge_order=2) - array([ 0., 4., 8., 12., 16.]) # y = 2x + >>> dx = 2 + >>> np.gradient(y, dx, edge_order=2) + array([ 0., 4., 8., 12., 16.]) It's possible to provide different data for spacing along each dimension. The number of arguments must match the number of dimensions in the input From 5421d2c67287ea2fe446fe9d306442370112dc61 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Wed, 29 May 2024 11:41:14 -0300 Subject: [PATCH 408/980] DOC: change gradient unevenly spaced example --- numpy/lib/_function_base_impl.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index c7a8f306bbcf..00486e6fa536 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -986,7 +986,7 @@ def gradient(f, *varargs, axis=None, edge_order=1): ---------- f : array_like An N-dimensional array containing samples of a scalar function. - varargs : list of scalar or array, optional + varargs : list of scalar or array, optional. (see Examples below). Spacing between f values. Default unitary spacing for all dimensions. Spacing can be specified using: @@ -1077,7 +1077,7 @@ def gradient(f, *varargs, axis=None, edge_order=1): The `axis` keyword can be used to specify a subset of axes of which the gradient is calculated - >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), axis=0) + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]]), axis=0) array([[ 2., 2., -1.], [ 2., 2., -1.]]) @@ -1087,14 +1087,16 @@ def gradient(f, *varargs, axis=None, edge_order=1): 1. An array, specifying coordinates, which may be unevenly spaced: - >>> x = np.array([0., 2., 4., 6., 8.]) + >>> x = np.array([0., 2., 3., 6., 8.]) >>> y = x ** 2 >>> np.gradient(y, x, edge_order=2) - array([ 0., 4., 8., 12., 16.]) + array([ 0., 4., 6., 12., 16.]) 2. A scalar, representing the fixed sample distance: >>> dx = 2 + >>> x = np.array([0., 2., 4., 6., 8.]) + >>> y = x ** 2 >>> np.gradient(y, dx, edge_order=2) array([ 0., 4., 8., 12., 16.]) From acf9e427f7c325d96d2490fecb4f16316ffcb7b9 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Wed, 29 May 2024 12:09:34 -0300 Subject: [PATCH 409/980] DOC: gradient remove blank line --- numpy/lib/_function_base_impl.py | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 00486e6fa536..87c450fb6b07 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -1086,7 +1086,6 @@ def gradient(f, *varargs, axis=None, edge_order=1): 1. An array, specifying coordinates, which may be unevenly spaced: - >>> x = np.array([0., 2., 3., 6., 8.]) >>> y = x ** 2 >>> np.gradient(y, x, edge_order=2) From 75d316e246c4b40d41f56e84312d209e7b124b48 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Melissa=20Weber=20Mendon=C3=A7a?= Date: Wed, 29 May 2024 12:39:20 -0300 Subject: [PATCH 410/980] MAINT: Unpin pydata-sphinx-theme Also adds towncrier to docs dependencies for conda [skip azp][skip cirrus][skip actions] --- environment.yml | 3 ++- requirements/doc_requirements.txt | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 9850418666ac..7e347bccb6c9 100644 --- a/environment.yml +++ b/environment.yml @@ -35,8 +35,9 @@ dependencies: - scipy - pandas - matplotlib - - pydata-sphinx-theme=0.15.2 + - pydata-sphinx-theme>=0.15.2 - doxygen + - towncrier # NOTE: breathe 4.33.0 collides with sphinx.ext.graphviz - breathe>4.33.0 # For linting diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index afee84a51c4c..7dfb228c83f1 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -1,7 +1,7 @@ # doxygen required, use apt-get or dnf sphinx==7.2.6 numpydoc==1.4 -pydata-sphinx-theme==0.15.2 +pydata-sphinx-theme>=0.15.2 sphinx-design scipy matplotlib From c3f04b74df589ada8455ac9b80080a60b12081d8 Mon Sep 17 00:00:00 2001 From: bmwoodruff Date: Wed, 29 May 2024 12:25:48 -0600 Subject: [PATCH 411/980] DOC: Added web docs for missing ma routines This commit updates the routines.ma.rst file to enable web docs for several routines in the `ma` module. Similar to #23352 See #21351 for a script to locate similar items in any module. [skip actions] [skip azp] [skip cirrus] --- doc/source/reference/routines.ma.rst | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/doc/source/reference/routines.ma.rst b/doc/source/reference/routines.ma.rst index 607a15b91f1e..2b1b5dac1710 100644 --- a/doc/source/reference/routines.ma.rst +++ b/doc/source/reference/routines.ma.rst @@ -416,10 +416,25 @@ Miscellanea ma.allequal ma.allclose + ma.amax + ma.amin ma.apply_along_axis ma.apply_over_axes ma.arange ma.choose + ma.compress_nd + ma.convolve + ma.correlate ma.ediff1d + ma.flatten_mask + ma.flatten_structured_array + ma.fromflex ma.indices + ma.left_shift + ma.ndim + ma.put + ma.putmask + ma.right_shift + ma.round_ + ma.take ma.where From 17af9ea7f3348dc56c64d0228d07b22dbdc7b2fc Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Wed, 29 May 2024 13:56:16 -0600 Subject: [PATCH 412/980] Add array API inspection functions These are new functions defined by the array API 2023.12 standard to inspect details about the array API namespace. See https://data-apis.org/array-api/latest/API_specification/inspection.html for more details. --- numpy/__init__.py | 2 + numpy/lib/_info.py | 162 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 164 insertions(+) create mode 100644 numpy/lib/_info.py diff --git a/numpy/__init__.py b/numpy/__init__.py index e4696ba2108b..0e6a1bcfbe42 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -235,6 +235,8 @@ ix_, c_, r_, s_, ogrid, mgrid, unravel_index, ravel_multi_index, index_exp ) + from .lib._info import __array_namespace_info__ + from . import matrixlib as _mat from .matrixlib import ( asmatrix, bmat, matrix diff --git a/numpy/lib/_info.py b/numpy/lib/_info.py new file mode 100644 index 000000000000..945d145784d6 --- /dev/null +++ b/numpy/lib/_info.py @@ -0,0 +1,162 @@ +""" +Array API Inspection namespace + +This is the namespace for inspection functions as defined by the array API standard. +See https://data-apis.org/array-api/latest/API_specification/inspection.html +for more details. +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +from numpy import (bool, int8, int16, int32, int64, uint8, uint16, uint32, + uint64, float32, float64, complex64, complex128) + +if TYPE_CHECKING: + from typing import Optional, Union, Tuple, List, ModuleType, TypedDict + from numpy.dtyping import DtypeLike + + Capabilities = TypedDict( + "Capabilities", {"boolean indexing": bool, "data-dependent shapes": bool} + ) + + DefaultDataTypes = TypedDict( + "DefaultDataTypes", + { + "real floating": DtypeLike, + "complex floating": DtypeLike, + "integral": DtypeLike, + "indexing": DtypeLike, + }, + ) + + DataTypes = TypedDict( + "DataTypes", + { + "bool": DtypeLike, + "float32": DtypeLike, + "float64": DtypeLike, + "complex64": DtypeLike, + "complex128": DtypeLike, + "int8": DtypeLike, + "int16": DtypeLike, + "int32": DtypeLike, + "int64": DtypeLike, + "uint8": DtypeLike, + "uint16": DtypeLike, + "uint32": DtypeLike, + "uint64": DtypeLike, + }, + total=False, + ) + + +def __array_namespace_info__() -> ModuleType: + import numpy.lib._info + return numpy.lib._info + +def capabilities() -> Capabilities: + return {"boolean indexing": True, + "data-dependent shapes": True, + } + +def default_device() -> str: + return 'cpu' + +def default_dtypes( + *, + device: Optional[str] = None, +) -> DefaultDataTypes: + if device not in ['cpu', None]: + raise ValueError(f'Device not understood. Only "cpu" is allowed, but received: {device}') + return { + "real floating": float64, + "complex floating": complex128, + "integral": int64, + "indexing": int64, + } + +def dtypes( + *, + device: Optional[str] = None, + kind: Optional[Union[str, Tuple[str, ...]]] = None, +) -> DataTypes: + if device not in ['cpu', None]: + raise ValueError(f'Device not understood. Only "cpu" is allowed, but received: {device}') + if kind is None: + return { + "bool": bool, + "int8": int8, + "int16": int16, + "int32": int32, + "int64": int64, + "uint8": uint8, + "uint16": uint16, + "uint32": uint32, + "uint64": uint64, + "float32": float32, + "float64": float64, + "complex64": complex64, + "complex128": complex128, + } + if kind == "bool": + return {"bool": bool} + if kind == "signed integer": + return { + "int8": int8, + "int16": int16, + "int32": int32, + "int64": int64, + } + if kind == "unsigned integer": + return { + "uint8": uint8, + "uint16": uint16, + "uint32": uint32, + "uint64": uint64, + } + if kind == "integral": + return { + "int8": int8, + "int16": int16, + "int32": int32, + "int64": int64, + "uint8": uint8, + "uint16": uint16, + "uint32": uint32, + "uint64": uint64, + } + if kind == "real floating": + return { + "float32": float32, + "float64": float64, + } + if kind == "complex floating": + return { + "complex64": complex64, + "complex128": complex128, + } + if kind == "numeric": + return { + "int8": int8, + "int16": int16, + "int32": int32, + "int64": int64, + "uint8": uint8, + "uint16": uint16, + "uint32": uint32, + "uint64": uint64, + "float32": float32, + "float64": float64, + "complex64": complex64, + "complex128": complex128, + } + if isinstance(kind, tuple): + res = {} + for k in kind: + res.update(dtypes(kind=k)) + return res + raise ValueError(f"unsupported kind: {kind!r}") + +def devices() -> List[str]: + return ['cpu'] From feba087e7fc90a5ee59438ee10e09bd9ba2f4288 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Wed, 29 May 2024 13:57:55 -0600 Subject: [PATCH 413/980] Add "max rank" to the capabilities() dictionary This is new in the unreleased 2024.12 standard, but I don't see an issue with including it now. --- numpy/lib/_info.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/numpy/lib/_info.py b/numpy/lib/_info.py index 945d145784d6..ca62a0de70ce 100644 --- a/numpy/lib/_info.py +++ b/numpy/lib/_info.py @@ -17,7 +17,11 @@ from numpy.dtyping import DtypeLike Capabilities = TypedDict( - "Capabilities", {"boolean indexing": bool, "data-dependent shapes": bool} + "Capabilities", { + "boolean indexing": bool, + "data-dependent shapes": bool, + "max rank": int | None, + }, ) DefaultDataTypes = TypedDict( @@ -58,6 +62,8 @@ def __array_namespace_info__() -> ModuleType: def capabilities() -> Capabilities: return {"boolean indexing": True, "data-dependent shapes": True, + # Note: 'max rank' is part of the draft 2024.12 standard + "max rank": 32, } def default_device() -> str: From 0298ecc0b1777081d4c6840f436a793de8f444ca Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Wed, 29 May 2024 13:58:39 -0600 Subject: [PATCH 414/980] Add documentation to the inspection API functions Unfortunately, I don't see an easy way to include these docstrings in Sphinx, other than __array_namespace_info__, because they are hidden behind a private namespace that is only accessible from np.__array_namespace_info__(). --- numpy/lib/_info.py | 194 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 194 insertions(+) diff --git a/numpy/lib/_info.py b/numpy/lib/_info.py index ca62a0de70ce..0c2f1a71958a 100644 --- a/numpy/lib/_info.py +++ b/numpy/lib/_info.py @@ -56,10 +56,75 @@ def __array_namespace_info__() -> ModuleType: + """ + Get the array API inspection namespace for NumPy. + + The array API inspection namespace defines the following functions: + + - capabilities() + - default_device() + - default_dtypes() + - dtypes() + - devices() + + See + https://data-apis.org/array-api/latest/API_specification/inspection.html + for more details. + + Returns + ------- + info : ModuleType + The array API inspection namespace for NumPy. + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.default_dtypes() + {'real floating': numpy.float64, + 'complex floating': numpy.complex128, + 'integral': numpy.int64, + 'indexing': numpy.int64} + """ import numpy.lib._info return numpy.lib._info def capabilities() -> Capabilities: + """ + Return a dictionary of array API library capabilities. + + The resulting dictionary has the following keys: + + - **"boolean indexing"**: boolean indicating whether an array library + supports boolean indexing. Always ``True`` for NumPy. + + - **"data-dependent shapes"**: boolean indicating whether an array library + supports data-dependent output shapes. Always ``True`` for NumPy. + + - **"max rank"**: The maximum number of supported dimensions for arrays. + Always ``32`` for NumPy. + + See + https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html + for more details. + + See Also + -------- + default_device, default_dtypes, dtypes, devices + + Returns + ------- + capabilities : Capabilities + A dictionary of array API library capabilities. + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.capabilities() + {'boolean indexing': True, + 'data-dependent shapes': True, + 'max rank': 32} + + """ return {"boolean indexing": True, "data-dependent shapes": True, # Note: 'max rank' is part of the draft 2024.12 standard @@ -67,12 +132,69 @@ def capabilities() -> Capabilities: } def default_device() -> str: + """ + The default device used for new NumPy arrays. + + For NumPy, this always returns ``'cpu'``. + + See Also + -------- + capabilities, default_dtypes, dtypes, devices + + Returns + ------- + device : str + The default device used for new NumPy arrays. + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.default_device() + 'cpu' + + """ return 'cpu' def default_dtypes( *, device: Optional[str] = None, ) -> DefaultDataTypes: + """ + The default data types used for new NumPy arrays. + + For NumPy, this always returns the following dictionary: + + - **"real floating"**: ``numpy.float64`` + - **"complex floating"**: ``numpy.complex128`` + - **"integral"**: ``numpy.int64`` + - **"indexing"**: ``numpy.int64`` + + Parameters + ---------- + device : str, optional + The device to get the default data types for. For NumPy, only + ``'cpu'`` is allowed. + + Returns + ------- + dtypes : DefaultDataTypes + A dictionary describing the default data types used for new NumPy + arrays. + + See Also + -------- + capabilities, default_device, dtypes, devices + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.default_dtypes() + {'real floating': numpy.float64, + 'complex floating': numpy.complex128, + 'integral': numpy.int64, + 'indexing': numpy.int64} + + """ if device not in ['cpu', None]: raise ValueError(f'Device not understood. Only "cpu" is allowed, but received: {device}') return { @@ -87,6 +209,57 @@ def dtypes( device: Optional[str] = None, kind: Optional[Union[str, Tuple[str, ...]]] = None, ) -> DataTypes: + """ + The array API data types supported by NumPy. + + Note that this function only returns data types that are defined by the + array API. + + Parameters + ---------- + device : str, optional + The device to get the data types for. For NumPy, only ``'cpu'`` is + allowed. + kind : str or tuple of str, optional + The kind of data types to return. If ``None``, all data types are + returned. If a string, only data types of that kind are returned. If a + tuple, a dictionary containing the union of the given kinds is + returned. The following kinds are supported: + + - ``'bool'``: boolean data types (e.g., ``bool``). + - ``'signed integer'``: signed integer data types (e.g., ``int8``, + ``int16``, ``int32``, ``int64``). + - ``'unsigned integer'``: unsigned integer data types (e.g., + ``uint8``, ``uint16``, ``uint32``, ``uint64``). + - ``'integral'``: integer data types. Shorthand for ``('signed + integer', 'unsigned integer')``. + - ``'real floating'``: real-valued floating-point data types + (e.g., ``float32``, ``float64``). + - ``'complex floating'``: complex floating-point data types (e.g., + ``complex64``, ``complex128``). + - ``'numeric'``: numeric data types. Shorthand for ``('integral', + 'real floating', 'complex floating')``. + + Returns + ------- + dtypes : DataTypes + A dictionary mapping the names of data types to the corresponding + NumPy data types. + + See Also + -------- + capabilities, default_device, default_dtypes, devices + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.dtypes(kind='signed integer') + {'int8': numpy.int8, + 'int16': numpy.int16, + 'int32': numpy.int32, + 'int64': numpy.int64} + + """ if device not in ['cpu', None]: raise ValueError(f'Device not understood. Only "cpu" is allowed, but received: {device}') if kind is None: @@ -165,4 +338,25 @@ def dtypes( raise ValueError(f"unsupported kind: {kind!r}") def devices() -> List[str]: + """ + The devices supported by NumPy. + + For NumPy, this always returns ``['cpu']``. + + Returns + ------- + devices : list of str + The devices supported by NumPy. + + See Also + -------- + capabilities, default_device, default_dtypes, dtypes + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.devices() + ['cpu'] + + """ return ['cpu'] From b134742ad293f2aedef277e379795341b6d028a6 Mon Sep 17 00:00:00 2001 From: bmwoodruff Date: Wed, 29 May 2024 14:07:14 -0600 Subject: [PATCH 415/980] DOC: Added web docs for missing strings routines This commit updates routines.strings.rst to enable webdocs for for several routings in the `strings` module. See #21351 for a script to automatically locate similar issues. [skip actions] [skip azp] [skip cirrus] --- doc/source/reference/routines.strings.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/source/reference/routines.strings.rst b/doc/source/reference/routines.strings.rst index a8ea56a4b2be..f0af9475d10f 100644 --- a/doc/source/reference/routines.strings.rst +++ b/doc/source/reference/routines.strings.rst @@ -31,6 +31,11 @@ String operations :toctree: generated/ add + center + capitalize + decode + encode + expandtabs ljust lower lstrip From 4d84b0242ec7bbf6b6261eb99c0751e3cd17e71a Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Wed, 29 May 2024 14:20:21 -0600 Subject: [PATCH 416/980] Add basic tests for the array API inspection functions --- numpy/lib/tests/test_info.py | 96 ++++++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 numpy/lib/tests/test_info.py diff --git a/numpy/lib/tests/test_info.py b/numpy/lib/tests/test_info.py new file mode 100644 index 000000000000..85473aef9af9 --- /dev/null +++ b/numpy/lib/tests/test_info.py @@ -0,0 +1,96 @@ +import numpy as np +import pytest + +info = np.__array_namespace_info__() + +def test_capabilities(): + caps = info.capabilities() + assert caps["boolean indexing"] == True + assert caps["data-dependent shapes"] == True + assert caps["max rank"] == 32 + +def test_default_device(): + assert info.default_device() == 'cpu' + +def test_default_dtypes(): + dtypes = info.default_dtypes() + assert dtypes["real floating"] == np.float64 + assert dtypes["complex floating"] == np.complex128 + assert dtypes["integral"] == np.int64 + assert dtypes["indexing"] == np.int64 + + with pytest.raises(ValueError, match='Device not understood'): + info.default_dtypes(device='gpu') + +def test_dtypes_all(): + dtypes = info.dtypes() + assert dtypes == { + "bool": np.bool_, + "int8": np.int8, + "int16": np.int16, + "int32": np.int32, + "int64": np.int64, + "uint8": np.uint8, + "uint16": np.uint16, + "uint32": np.uint32, + "uint64": np.uint64, + "float32": np.float32, + "float64": np.float64, + "complex64": np.complex64, + "complex128": np.complex128, + } + +@pytest.mark.parametrize("kind,expected", [ + ("bool", {"bool": np.bool_}), + ("signed integer", { + "int8": np.int8, + "int16": np.int16, + "int32": np.int32, + "int64": np.int64 + }), + ("unsigned integer", { + "uint8": np.uint8, + "uint16": np.uint16, + "uint32": np.uint32, + "uint64": np.uint64, + }), + ("integral", { + "int8": np.int8, + "int16": np.int16, + "int32": np.int32, + "int64": np.int64, + "uint8": np.uint8, + "uint16": np.uint16, + "uint32": np.uint32, + "uint64": np.uint64, + }), + ("real floating", {"float32": np.float32, "float64": np.float64}), + ("complex floating", {"complex64": np.complex64, "complex128": np.complex128}), +]) +def test_dtypes_kind(kind, expected): + assert info.dtypes(kind=kind) == expected + +def test_dtypes_tuple(): + dtypes = info.dtypes(kind=("bool", "integral")) + assert dtypes == { + "bool": np.bool_, + "int8": np.int8, + "int16": np.int16, + "int32": np.int32, + "int64": np.int64, + "uint8": np.uint8, + "uint16": np.uint16, + "uint32": np.uint32, + "uint64": np.uint64, + } + +def test_dtypes_invalid_kind(): + with pytest.raises(ValueError, match="unsupported kind"): + info.dtypes(kind="invalid") + +def test_dtypes_invalid_device(): + with pytest.raises(ValueError, match='Device not understood'): + info.dtypes(device='gpu') + +def test_devices(): + assert info.devices() == ['cpu'] From 81839c7e417c7e2b21c922566746957225aff0fe Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Wed, 29 May 2024 14:24:50 -0600 Subject: [PATCH 417/980] Fix capabilities()['max rank'] to be 64 --- numpy/lib/_info.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/lib/_info.py b/numpy/lib/_info.py index 0c2f1a71958a..9b68be39f0f7 100644 --- a/numpy/lib/_info.py +++ b/numpy/lib/_info.py @@ -101,7 +101,7 @@ def capabilities() -> Capabilities: supports data-dependent output shapes. Always ``True`` for NumPy. - **"max rank"**: The maximum number of supported dimensions for arrays. - Always ``32`` for NumPy. + Always ``64`` for NumPy. See https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html @@ -122,13 +122,13 @@ def capabilities() -> Capabilities: >>> info.capabilities() {'boolean indexing': True, 'data-dependent shapes': True, - 'max rank': 32} + 'max rank': 64} """ return {"boolean indexing": True, "data-dependent shapes": True, # Note: 'max rank' is part of the draft 2024.12 standard - "max rank": 32, + "max rank": 64, } def default_device() -> str: From 56d1231b62dbe82d0e28c422126c6bbea5e4fc11 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Wed, 29 May 2024 14:25:13 -0600 Subject: [PATCH 418/980] Add some consistency checks to the info namespace tests --- numpy/lib/tests/test_info.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/numpy/lib/tests/test_info.py b/numpy/lib/tests/test_info.py index 85473aef9af9..f87a653cae02 100644 --- a/numpy/lib/tests/test_info.py +++ b/numpy/lib/tests/test_info.py @@ -7,17 +7,20 @@ def test_capabilities(): caps = info.capabilities() assert caps["boolean indexing"] == True assert caps["data-dependent shapes"] == True - assert caps["max rank"] == 32 + assert caps["max rank"] == 64 + np.zeros((1,)*64) + with pytest.raises(ValueError): + np.zeros((1,)*65) def test_default_device(): - assert info.default_device() == 'cpu' + assert info.default_device() == 'cpu' == np.asarray(0).device def test_default_dtypes(): dtypes = info.default_dtypes() - assert dtypes["real floating"] == np.float64 - assert dtypes["complex floating"] == np.complex128 - assert dtypes["integral"] == np.int64 - assert dtypes["indexing"] == np.int64 + assert dtypes["real floating"] == np.float64 == np.asarray(0.0).dtype + assert dtypes["complex floating"] == np.complex128 == np.asarray(0.0j).dtype + assert dtypes["integral"] == np.int64 == np.asarray(0).dtype + assert dtypes["indexing"] == np.int64 == np.argmax(np.zeros(10)).dtype with pytest.raises(ValueError, match='Device not understood'): info.default_dtypes(device='gpu') From abb380036e38bb177ed2bca69ac4df883e5e15e7 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Wed, 29 May 2024 16:23:34 -0600 Subject: [PATCH 419/980] Remove the unreleased "max rank" from the capabilities() dictionary It's better to not include unreleased standard things in case they change. --- numpy/lib/_info.py | 12 ++++-------- numpy/lib/tests/test_info.py | 11 +++++++---- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/numpy/lib/_info.py b/numpy/lib/_info.py index 9b68be39f0f7..93041c4b6709 100644 --- a/numpy/lib/_info.py +++ b/numpy/lib/_info.py @@ -20,7 +20,7 @@ "Capabilities", { "boolean indexing": bool, "data-dependent shapes": bool, - "max rank": int | None, + # "max rank": int | None, }, ) @@ -100,9 +100,6 @@ def capabilities() -> Capabilities: - **"data-dependent shapes"**: boolean indicating whether an array library supports data-dependent output shapes. Always ``True`` for NumPy. - - **"max rank"**: The maximum number of supported dimensions for arrays. - Always ``64`` for NumPy. - See https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html for more details. @@ -121,14 +118,13 @@ def capabilities() -> Capabilities: >>> info = np.__array_namespace_info__() >>> info.capabilities() {'boolean indexing': True, - 'data-dependent shapes': True, - 'max rank': 64} + 'data-dependent shapes': True} """ return {"boolean indexing": True, "data-dependent shapes": True, - # Note: 'max rank' is part of the draft 2024.12 standard - "max rank": 64, + # 'max rank' will be part of the 2024.12 standard + # "max rank": 64, } def default_device() -> str: diff --git a/numpy/lib/tests/test_info.py b/numpy/lib/tests/test_info.py index f87a653cae02..8bfa58f88ead 100644 --- a/numpy/lib/tests/test_info.py +++ b/numpy/lib/tests/test_info.py @@ -7,10 +7,13 @@ def test_capabilities(): caps = info.capabilities() assert caps["boolean indexing"] == True assert caps["data-dependent shapes"] == True - assert caps["max rank"] == 64 - np.zeros((1,)*64) - with pytest.raises(ValueError): - np.zeros((1,)*65) + + # This will be added in the 2024.12 release of the array API standard. + + # assert caps["max rank"] == 64 + # np.zeros((1,)*64) + # with pytest.raises(ValueError): + # np.zeros((1,)*65) def test_default_device(): assert info.default_device() == 'cpu' == np.asarray(0).device From 44ff515be7234ad42db45cc7ef7ddf55242088c4 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Wed, 29 May 2024 16:34:24 -0600 Subject: [PATCH 420/980] Reformat info files with black --- numpy/lib/_info.py | 54 ++++++++++++++++++------ numpy/lib/tests/test_info.py | 82 +++++++++++++++++++++--------------- 2 files changed, 89 insertions(+), 47 deletions(-) diff --git a/numpy/lib/_info.py b/numpy/lib/_info.py index 93041c4b6709..370fc9287804 100644 --- a/numpy/lib/_info.py +++ b/numpy/lib/_info.py @@ -5,19 +5,34 @@ See https://data-apis.org/array-api/latest/API_specification/inspection.html for more details. """ + from __future__ import annotations from typing import TYPE_CHECKING -from numpy import (bool, int8, int16, int32, int64, uint8, uint16, uint32, - uint64, float32, float64, complex64, complex128) +from numpy import ( + bool, + int8, + int16, + int32, + int64, + uint8, + uint16, + uint32, + uint64, + float32, + float64, + complex64, + complex128, +) if TYPE_CHECKING: from typing import Optional, Union, Tuple, List, ModuleType, TypedDict from numpy.dtyping import DtypeLike Capabilities = TypedDict( - "Capabilities", { + "Capabilities", + { "boolean indexing": bool, "data-dependent shapes": bool, # "max rank": int | None, @@ -86,8 +101,10 @@ def __array_namespace_info__() -> ModuleType: 'indexing': numpy.int64} """ import numpy.lib._info + return numpy.lib._info + def capabilities() -> Capabilities: """ Return a dictionary of array API library capabilities. @@ -121,11 +138,13 @@ def capabilities() -> Capabilities: 'data-dependent shapes': True} """ - return {"boolean indexing": True, - "data-dependent shapes": True, - # 'max rank' will be part of the 2024.12 standard - # "max rank": 64, - } + return { + "boolean indexing": True, + "data-dependent shapes": True, + # 'max rank' will be part of the 2024.12 standard + # "max rank": 64, + } + def default_device() -> str: """ @@ -149,7 +168,8 @@ def default_device() -> str: 'cpu' """ - return 'cpu' + return "cpu" + def default_dtypes( *, @@ -191,8 +211,10 @@ def default_dtypes( 'indexing': numpy.int64} """ - if device not in ['cpu', None]: - raise ValueError(f'Device not understood. Only "cpu" is allowed, but received: {device}') + if device not in ["cpu", None]: + raise ValueError( + f'Device not understood. Only "cpu" is allowed, but received: {device}' + ) return { "real floating": float64, "complex floating": complex128, @@ -200,6 +222,7 @@ def default_dtypes( "indexing": int64, } + def dtypes( *, device: Optional[str] = None, @@ -256,8 +279,10 @@ def dtypes( 'int64': numpy.int64} """ - if device not in ['cpu', None]: - raise ValueError(f'Device not understood. Only "cpu" is allowed, but received: {device}') + if device not in ["cpu", None]: + raise ValueError( + f'Device not understood. Only "cpu" is allowed, but received: {device}' + ) if kind is None: return { "bool": bool, @@ -333,6 +358,7 @@ def dtypes( return res raise ValueError(f"unsupported kind: {kind!r}") + def devices() -> List[str]: """ The devices supported by NumPy. @@ -355,4 +381,4 @@ def devices() -> List[str]: ['cpu'] """ - return ['cpu'] + return ["cpu"] diff --git a/numpy/lib/tests/test_info.py b/numpy/lib/tests/test_info.py index 8bfa58f88ead..1308a35230cc 100644 --- a/numpy/lib/tests/test_info.py +++ b/numpy/lib/tests/test_info.py @@ -3,6 +3,7 @@ info = np.__array_namespace_info__() + def test_capabilities(): caps = info.capabilities() assert caps["boolean indexing"] == True @@ -15,8 +16,10 @@ def test_capabilities(): # with pytest.raises(ValueError): # np.zeros((1,)*65) + def test_default_device(): - assert info.default_device() == 'cpu' == np.asarray(0).device + assert info.default_device() == "cpu" == np.asarray(0).device + def test_default_dtypes(): dtypes = info.default_dtypes() @@ -25,8 +28,9 @@ def test_default_dtypes(): assert dtypes["integral"] == np.int64 == np.asarray(0).dtype assert dtypes["indexing"] == np.int64 == np.argmax(np.zeros(10)).dtype - with pytest.raises(ValueError, match='Device not understood'): - info.default_dtypes(device='gpu') + with pytest.raises(ValueError, match="Device not understood"): + info.default_dtypes(device="gpu") + def test_dtypes_all(): dtypes = info.dtypes() @@ -46,36 +50,45 @@ def test_dtypes_all(): "complex128": np.complex128, } -@pytest.mark.parametrize("kind,expected", [ - ("bool", {"bool": np.bool_}), - ("signed integer", { - "int8": np.int8, - "int16": np.int16, - "int32": np.int32, - "int64": np.int64 - }), - ("unsigned integer", { - "uint8": np.uint8, - "uint16": np.uint16, - "uint32": np.uint32, - "uint64": np.uint64, - }), - ("integral", { - "int8": np.int8, - "int16": np.int16, - "int32": np.int32, - "int64": np.int64, - "uint8": np.uint8, - "uint16": np.uint16, - "uint32": np.uint32, - "uint64": np.uint64, - }), - ("real floating", {"float32": np.float32, "float64": np.float64}), - ("complex floating", {"complex64": np.complex64, "complex128": np.complex128}), -]) + +@pytest.mark.parametrize( + "kind,expected", + [ + ("bool", {"bool": np.bool_}), + ( + "signed integer", + {"int8": np.int8, "int16": np.int16, "int32": np.int32, "int64": np.int64}, + ), + ( + "unsigned integer", + { + "uint8": np.uint8, + "uint16": np.uint16, + "uint32": np.uint32, + "uint64": np.uint64, + }, + ), + ( + "integral", + { + "int8": np.int8, + "int16": np.int16, + "int32": np.int32, + "int64": np.int64, + "uint8": np.uint8, + "uint16": np.uint16, + "uint32": np.uint32, + "uint64": np.uint64, + }, + ), + ("real floating", {"float32": np.float32, "float64": np.float64}), + ("complex floating", {"complex64": np.complex64, "complex128": np.complex128}), + ], +) def test_dtypes_kind(kind, expected): assert info.dtypes(kind=kind) == expected + def test_dtypes_tuple(): dtypes = info.dtypes(kind=("bool", "integral")) assert dtypes == { @@ -90,13 +103,16 @@ def test_dtypes_tuple(): "uint64": np.uint64, } + def test_dtypes_invalid_kind(): with pytest.raises(ValueError, match="unsupported kind"): info.dtypes(kind="invalid") + def test_dtypes_invalid_device(): - with pytest.raises(ValueError, match='Device not understood'): - info.dtypes(device='gpu') + with pytest.raises(ValueError, match="Device not understood"): + info.dtypes(device="gpu") + def test_devices(): - assert info.devices() == ['cpu'] + assert info.devices() == ["cpu"] From 8e75175ae115f60ad28a50eb20337cd71e79ea0d Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 30 May 2024 10:04:05 +0200 Subject: [PATCH 421/980] BUG: Ensure traverse info copy works also if in-place (used by nditer) Also removes a comment that was always fishy: we should have used this function, and we did, except for a funny little in-place bug! --- numpy/_core/src/multiarray/dtype_traversal.c | 10 +--------- numpy/_core/src/multiarray/dtype_traversal.h | 8 ++++++-- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/numpy/_core/src/multiarray/dtype_traversal.c b/numpy/_core/src/multiarray/dtype_traversal.c index 3e20c8c85c1a..91b1889b7d1f 100644 --- a/numpy/_core/src/multiarray/dtype_traversal.c +++ b/numpy/_core/src/multiarray/dtype_traversal.c @@ -398,13 +398,6 @@ subarray_traverse_data_free(NpyAuxData *data) } -/* - * We seem to be neither using nor exposing this right now, so leave it NULL. - * (The implementation below should be functional.) - */ -#define subarray_traverse_data_clone NULL - -#ifndef subarray_traverse_data_clone /* traverse data copy function */ static NpyAuxData * subarray_traverse_data_clone(NpyAuxData *data) @@ -426,7 +419,6 @@ subarray_traverse_data_clone(NpyAuxData *data) return (NpyAuxData *)newdata; } -#endif static int @@ -469,7 +461,7 @@ get_subarray_traverse_func( auxdata->count = size; auxdata->base.free = &subarray_traverse_data_free; - auxdata->base.clone = subarray_traverse_data_clone; + auxdata->base.clone = &subarray_traverse_data_clone; if (get_traverse_func( traverse_context, dtype, aligned, diff --git a/numpy/_core/src/multiarray/dtype_traversal.h b/numpy/_core/src/multiarray/dtype_traversal.h index 7a06328cb2e0..5e915ba4d40e 100644 --- a/numpy/_core/src/multiarray/dtype_traversal.h +++ b/numpy/_core/src/multiarray/dtype_traversal.h @@ -69,18 +69,22 @@ static inline int NPY_traverse_info_copy( NPY_traverse_info *traverse_info, NPY_traverse_info *original) { - traverse_info->func = NULL; + /* Note that original may be identical to traverse_info! */ if (original->func == NULL) { /* Allow copying also of unused clear info */ + traverse_info->func = NULL; return 0; } - traverse_info->auxdata = NULL; if (original->auxdata != NULL) { traverse_info->auxdata = NPY_AUXDATA_CLONE(original->auxdata); if (traverse_info->auxdata == NULL) { + traverse_info->func = NULL; return -1; } } + else { + traverse_info->auxdata = NULL; + } Py_INCREF(original->descr); traverse_info->descr = original->descr; traverse_info->func = original->func; From e2363798ddc049da031f7b37b60c299f129a89f9 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 30 May 2024 11:45:31 +0200 Subject: [PATCH 422/980] BUG: Fix cleanup in size 0 put error Also clean up code a bit, it's confusing to have ifs that can only take one path. --- numpy/_core/src/multiarray/item_selection.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 656688bda2fc..c45b3694a035 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -397,7 +397,7 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, if ((ni > 0) && (PyArray_Size((PyObject *)self) == 0)) { PyErr_SetString(PyExc_IndexError, "cannot replace elements of an empty array"); - return NULL; + goto fail; } Py_INCREF(PyArray_DESCR(self)); values = (PyArrayObject *)PyArray_FromAny(values0, PyArray_DESCR(self), 0, 0, @@ -419,9 +419,8 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, Py_INCREF(PyArray_DESCR(self)); obj = (PyArrayObject *)PyArray_FromArray(self, PyArray_DESCR(self), flags); - if (obj != self) { - copied = 1; - } + copied = 1; + assert(self != obj); self = obj; } max_item = PyArray_SIZE(self); From e86cca6939abc0b81f7770081614715079db359b Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 30 May 2024 12:45:24 +0200 Subject: [PATCH 423/980] TST: Fix copy __dlpack__ test and also test copy=False --- numpy/_core/tests/test_dlpack.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/numpy/_core/tests/test_dlpack.py b/numpy/_core/tests/test_dlpack.py index ee59ea8aa1bc..d9205912124e 100644 --- a/numpy/_core/tests/test_dlpack.py +++ b/numpy/_core/tests/test_dlpack.py @@ -34,11 +34,16 @@ def test_dunder_dlpack_stream(self): x.__dlpack__(stream=1) def test_dunder_dlpack_copy(self): + # Checks the argument parsing of __dlpack__ explicitly. + # Honoring the flag is tested in the from_dlpack round-tripping test. x = np.arange(5) - x.__dlpack__(stream=None) + x.__dlpack__(copy=True) + x.__dlpack__(copy=None) + x.__dlpack__(copy=False) - with pytest.raises(RuntimeError): - x.__dlpack__(stream=1) + with pytest.raises(ValueError): + # NOTE: The copy converter should be stricter, but not just here. + x.__dlpack__(copy=np.array([1, 2, 3])) def test_strides_not_multiple_of_itemsize(self): dt = np.dtype([('int', np.int32), ('char', np.int8)]) @@ -155,6 +160,8 @@ def test_copy(self): y = np.from_dlpack(x) assert np.may_share_memory(x, y) + y = np.from_dlpack(x, copy=False) + assert np.may_share_memory(x, y) y = np.from_dlpack(x, copy=True) assert not np.may_share_memory(x, y) From fde3a3763b8eb25371605000981a0decc06de567 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 30 May 2024 13:07:04 +0200 Subject: [PATCH 424/980] BUG: Should indicate copied if we can --- numpy/_core/src/multiarray/dlpack.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index bdc5936919c3..05935f608a29 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -297,7 +297,7 @@ fill_dl_tensor_information( static PyObject * create_dlpack_capsule( - PyArrayObject *self, int versioned, DLDevice *result_device) + PyArrayObject *self, int versioned, DLDevice *result_device, int copied) { int ndim = PyArray_NDIM(self); @@ -337,6 +337,9 @@ create_dlpack_capsule( if (!PyArray_CHKFLAGS(self, NPY_ARRAY_WRITEABLE)) { managed->flags |= DLPACK_FLAG_BITMASK_READ_ONLY; } + if (copied) { + managed->flags |= DLPACK_FLAG_BITMASK_IS_COPIED; + } } else { DLManagedTensor *managed = (DLManagedTensor *)ptr; @@ -470,7 +473,8 @@ array_dlpack(PyArrayObject *self, * can then be removed again. */ PyObject *res = create_dlpack_capsule( - self, major_version >= 1, &result_device); + self, major_version >= 1, &result_device, + copy_mode == NPY_COPY_ALWAYS); Py_DECREF(self); return res; From 98e86d52ed79eb8810960bdcfac11271fc6c5434 Mon Sep 17 00:00:00 2001 From: Jules <57632293+JuliaPoo@users.noreply.github.com> Date: Thu, 30 May 2024 20:27:28 +0800 Subject: [PATCH 425/980] DOC: update ufunc tutorials to use setuptools (#26566) * DOC: update ufunc tutorials to use setuptools See #22027. Currently ufunc tutorials use depreciated distutils which is removed in Python 3.12. In addition, I've updated the sample output and fixed a mistake in the last example. --- doc/source/user/c-info.ufunc-tutorial.rst | 105 ++++++++++------------ 1 file changed, 48 insertions(+), 57 deletions(-) diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst index 38baa28c7307..6b1aca65ed00 100644 --- a/doc/source/user/c-info.ufunc-tutorial.rst +++ b/doc/source/user/c-info.ufunc-tutorial.rst @@ -182,21 +182,16 @@ site-packages directory. $python setup.py install will install the module in your site-packages file. - See the distutils section of - 'Extending and Embedding the Python Interpreter' - at docs.python.org for more information. + See the setuptools section 'Building Extension Modules' + at setuptools.pypa.io for more information. ''' + from setuptools import setup, Extension + import numpy as np - from distutils.core import setup, Extension + module1 = Extension('spam', sources=['spammodule.c']) - module1 = Extension('spam', sources=['spammodule.c'], - include_dirs=['/usr/local/lib']) - - setup(name = 'spam', - version='1.0', - description='This is my spam package', - ext_modules = [module1]) + setup(name='spam', version='1.0', ext_modules=[module1]) Once the spam module is imported into python, you can call logit @@ -355,8 +350,8 @@ using ``python setup.py build_ext --inplace``. ''' setup.py file for single_type_logit.c Note that since this is a numpy extension - we use numpy.distutils instead of - distutils from the python standard library. + we add an include_dirs=[get_include()] so that the + extension is built with numpy's C/C++ header files. Calling $python setup.py build_ext --inplace @@ -373,33 +368,26 @@ using ``python setup.py build_ext --inplace``. $python setup.py install will install the module in your site-packages file. - See the distutils section of - 'Extending and Embedding the Python Interpreter' - at docs.python.org and the documentation - on numpy.distutils for more information. + See the setuptools section 'Building Extension Modules' + at setuptools.pypa.io for more information. ''' + from setuptools import setup, Extension + from numpy import get_include - def configuration(parent_package='', top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('npufunc_directory', - parent_package, - top_path) - config.add_extension('npufunc', ['single_type_logit.c']) + npufunc = Extension('npufunc', + sources=['single_type_logit.c'], + include_dirs=[get_include()]) - return config + setup(name='npufunc', version='1.0', ext_modules=[npufunc]) - if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) After the above has been installed, it can be imported and used as follows. >>> import numpy as np >>> import npufunc >>> npufunc.logit(0.5) -0.0 +np.float64(0.0) >>> a = np.linspace(0,1,5) >>> npufunc.logit(a) array([ -inf, -1.09861229, 0. , 1.09861229, inf]) @@ -607,8 +595,10 @@ or installed to site-packages via ``python setup.py install``. ''' setup.py file for multi_type_logit.c Note that since this is a numpy extension - we use numpy.distutils instead of - distutils from the python standard library. + we add an include_dirs=[get_include()] so that the + extension is built with numpy's C/C++ header files. + Furthermore, we also have to include the npymath + lib for half-float d-type. Calling $python setup.py build_ext --inplace @@ -625,38 +615,31 @@ or installed to site-packages via ``python setup.py install``. $python setup.py install will install the module in your site-packages file. - See the distutils section of - 'Extending and Embedding the Python Interpreter' - at docs.python.org and the documentation - on numpy.distutils for more information. + See the setuptools section 'Building Extension Modules' + at setuptools.pypa.io for more information. ''' + from setuptools import setup, Extension + from numpy import get_include + from os import path - def configuration(parent_package='', top_path=None): - from numpy.distutils.misc_util import Configuration, get_info - - #Necessary for the half-float d-type. - info = get_info('npymath') + path_to_npymath = path.join(get_include(), '..', 'lib') + npufunc = Extension('npufunc', + sources=['multi_type_logit.c'], + include_dirs=[get_include()], + # Necessary for the half-float d-type. + library_dirs=[path_to_npymath], + libraries=["npymath"]) - config = Configuration('npufunc_directory', - parent_package, - top_path) - config.add_extension('npufunc', - ['multi_type_logit.c'], - extra_info=info) + setup(name='npufunc', version='1.0', ext_modules=[npufunc]) - return config - - if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) After the above has been installed, it can be imported and used as follows. >>> import numpy as np >>> import npufunc >>> npufunc.logit(0.5) -0.0 +np.float64(0.0) >>> a = np.linspace(0,1,5) >>> npufunc.logit(a) array([ -inf, -1.09861229, 0. , 1.09861229, inf]) @@ -678,13 +661,17 @@ the line .. code-block:: python - config.add_extension('npufunc', ['single_type_logit.c']) + npufunc = Extension('npufunc', + sources=['single_type_logit.c'], + include_dirs=[get_include()]) is replaced with .. code-block:: python - config.add_extension('npufunc', ['multi_arg_logit.c']) + npufunc = Extension('npufunc', + sources=['multi_arg_logit.c'], + include_dirs=[get_include()]) The C file is given below. The ufunc generated takes two arguments ``A`` and ``B``. It returns a tuple whose first element is ``A * B`` and whose second @@ -809,13 +796,17 @@ the line .. code-block:: python - config.add_extension('npufunc', ['single_type_logit.c']) + npufunc = Extension('npufunc', + sources=['single_type_logit.c'], + include_dirs=[get_include()]) is replaced with .. code-block:: python - config.add_extension('npufunc', ['add_triplet.c']) + npufunc = Extension('npufunc', + sources=['add_triplet.c'], + include_dirs=[get_include()]) The C file is given below. @@ -892,7 +883,7 @@ The C file is given below. NULL }; - PyMODINIT_FUNC PyInit_struct_ufunc_test(void) + PyMODINIT_FUNC PyInit_npufunc(void) { PyObject *m, *add_triplet, *d; PyObject *dtype_dict; From 5d712a8ff35303b2fde708f9ff006a4b8e0ec6c4 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Wed, 29 May 2024 17:34:21 -0600 Subject: [PATCH 426/980] ENH: Add unstack() unstack() is a new function from the 2023.12 version of the array API, which serves as the inverse of stack(), that is, unstack(x, axis=axis) returns a tuple of arrays along axis that when stacked along axis would recreate x. The implementation is in pure Python, since it is just a straightforward iteration along an index, but if it is preferable it can be moved to an implementation in C. I haven't yet added any tests. --- numpy/__init__.py | 4 +- numpy/_core/shape_base.py | 82 +++++++++++++++++++++++++++++++++++++-- 2 files changed, 80 insertions(+), 6 deletions(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index e4696ba2108b..94962783542b 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -165,8 +165,8 @@ str_, subtract, sum, swapaxes, take, tan, tanh, tensordot, timedelta64, trace, transpose, true_divide, trunc, typecodes, ubyte, ufunc, uint, uint16, uint32, uint64, uint8, uintc, uintp, ulong, - ulonglong, unsignedinteger, ushort, var, vdot, vecdot, void, vstack, - where, zeros, zeros_like + ulonglong, unsignedinteger, unstack, ushort, var, vdot, vecdot, void, + vstack, where, zeros, zeros_like ) # NOTE: It's still under discussion whether these aliases diff --git a/numpy/_core/shape_base.py b/numpy/_core/shape_base.py index 07f185ed0c10..da5a9d897261 100644 --- a/numpy/_core/shape_base.py +++ b/numpy/_core/shape_base.py @@ -1,5 +1,5 @@ __all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack', - 'stack', 'vstack'] + 'stack', 'unstack', 'vstack'] import functools import itertools @@ -11,7 +11,6 @@ from .multiarray import array, asanyarray, normalize_axis_index from . import fromnumeric as _from_nx - array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') @@ -261,6 +260,7 @@ def vstack(tup, *, dtype=None, casting="same_kind"): dstack : Stack arrays in sequence depth wise (along third axis). column_stack : Stack 1-D arrays as columns into a 2-D array. vsplit : Split an array into multiple sub-arrays vertically (row-wise). + unstack : Split an array into a tuple of sub-arrays along an axis. Examples -------- @@ -331,8 +331,9 @@ def hstack(tup, *, dtype=None, casting="same_kind"): vstack : Stack arrays in sequence vertically (row wise). dstack : Stack arrays in sequence depth wise (along third axis). column_stack : Stack 1-D arrays as columns into a 2-D array. - hsplit : Split an array into multiple sub-arrays + hsplit : Split an array into multiple sub-arrays horizontally (column-wise). + unstack : Split an array into a tuple of sub-arrays along an axis. Examples -------- @@ -414,6 +415,7 @@ def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): concatenate : Join a sequence of arrays along an existing axis. block : Assemble an nd-array from nested lists of blocks. split : Split array into a list of multiple sub-arrays of equal size. + unstack : Split an array into a tuple of sub-arrays along an axis. Examples -------- @@ -455,6 +457,77 @@ def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): return _nx.concatenate(expanded_arrays, axis=axis, out=out, dtype=dtype, casting=casting) +def _unstack_dispatcher(x, *, axis=None): + return (x,) + +@array_function_dispatch(_unstack_dispatcher) +def unstack(x, /, *, axis=0): + """ + Splits an array into a sequence of arrays along the given axis. + + The ``axis`` parameter specifies the axis along which the array will be + split. of the new axis in the dimensions of the result. For example, if + ``axis=0`` it will be the first dimension and if ``axis=-1`` it will be + the last dimension. + + The result is a tuple of arrays split along ``axis``. ``unstack`` serves + as the reverse operation of :py:func:`stack`, i.e., ``stack(unstack(x, + axis=axis), axis=axis) == x``. + + .. versionadded:: 2.1.0 + + Parameters + ---------- + x : ndarray + The array to be unstacked. + + Returns + ------- + unstacked : tuple of ndarrays + The unstacked arrays. + + See Also + -------- + stack : Join a sequence of arrays along a new axis. + concatenate : Join a sequence of arrays along an existing axis. + vstack : Stack arrays in sequence vertically (row wise). + hstack : Stack arrays in sequence horizontally (column wise). + dstack : Stack arrays in sequence depth wise (along third axis). + column_stack : Stack 1-D arrays as columns into a 2-D array. + vsplit : Split an array into multiple sub-arrays vertically (row-wise). + unstack : Split an array into a tuple of sub-arrays along an axis. + block : Assemble an nd-array from nested lists of blocks. + split : Split array into a list of multiple sub-arrays of equal size. + + Examples + -------- + >>> arr = np.arange(24).reshape((2, 3, 4)) + >>> np.unstack(arr) + (array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]), + array([[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]])) + >>> np.unstack(arr, axis=1) + (array([[ 0, 1, 2, 3], + [12, 13, 14, 15]]), + array([[ 4, 5, 6, 7], + [16, 17, 18, 19]]), + array([[ 8, 9, 10, 11], + [20, 21, 22, 23]])) + >>> arr2 = np.stack(np.unstack(arr, axis=1), axis=1) + >>> arr2.shape + (2, 3, 4) + >>> np.all(arr == arr2) + np.True_ + + """ + x = asanyarray(x) + + axis = normalize_axis_index(axis, x.ndim) + slices = (slice(None),) * axis + return tuple(x[slices + (i, ...)] for i in range(x.shape[axis])) # Internal functions to eliminate the overhead of repeated dispatch in one of # the two possible paths inside np.block. @@ -709,7 +782,7 @@ def block(arrays): second-last dimension (-2), and so on until the outermost list is reached. Blocks can be of any dimension, but will not be broadcasted using - the normal rules. Instead, leading axes of size 1 are inserted, + the normal rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim`` the same for all blocks. This is primarily useful for working with scalars, and means that code like ``np.block([v, 1])`` is valid, where ``v.ndim == 1``. @@ -755,6 +828,7 @@ def block(arrays): dstack : Stack arrays in sequence depth wise (along third axis). column_stack : Stack 1-D arrays as columns into a 2-D array. vsplit : Split an array into multiple sub-arrays vertically (row-wise). + unstack : Split an array into a tuple of sub-arrays along an axis. Notes ----- From c79a5fb895aba1311a82493649c0daf43be4ad1c Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Thu, 30 May 2024 15:40:55 -0300 Subject: [PATCH 427/980] DOC: move gradient see examples text --- numpy/lib/_function_base_impl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 87c450fb6b07..a6a7241e3649 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -986,7 +986,7 @@ def gradient(f, *varargs, axis=None, edge_order=1): ---------- f : array_like An N-dimensional array containing samples of a scalar function. - varargs : list of scalar or array, optional. (see Examples below). + varargs : list of scalar or array, optional Spacing between f values. Default unitary spacing for all dimensions. Spacing can be specified using: @@ -999,7 +999,7 @@ def gradient(f, *varargs, axis=None, edge_order=1): 4. Any combination of N scalars/arrays with the meaning of 2. and 3. If `axis` is given, the number of varargs must equal the number of axes. - Default: 1. + Default: 1. (see Examples below). edge_order : {1, 2}, optional Gradient is calculated using N-th order accurate differences From 29ec7b02f303f8d13195174cfd6884c30775e516 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 30 May 2024 14:47:21 -0600 Subject: [PATCH 428/980] Change __array_namespace_info__ to a class This makes the functions on the namespace easier to access for things like Sphinx. --- numpy/lib/_info.py | 519 ++++++++++++++++++++++----------------------- 1 file changed, 258 insertions(+), 261 deletions(-) diff --git a/numpy/lib/_info.py b/numpy/lib/_info.py index 370fc9287804..e2c0237a91f0 100644 --- a/numpy/lib/_info.py +++ b/numpy/lib/_info.py @@ -70,7 +70,7 @@ ) -def __array_namespace_info__() -> ModuleType: +class __array_namespace_info__: """ Get the array API inspection namespace for NumPy. @@ -100,285 +100,282 @@ def __array_namespace_info__() -> ModuleType: 'integral': numpy.int64, 'indexing': numpy.int64} """ - import numpy.lib._info - return numpy.lib._info + def capabilities(self) -> Capabilities: + """ + Return a dictionary of array API library capabilities. + The resulting dictionary has the following keys: -def capabilities() -> Capabilities: - """ - Return a dictionary of array API library capabilities. - - The resulting dictionary has the following keys: + - **"boolean indexing"**: boolean indicating whether an array library + supports boolean indexing. Always ``True`` for NumPy. - - **"boolean indexing"**: boolean indicating whether an array library - supports boolean indexing. Always ``True`` for NumPy. + - **"data-dependent shapes"**: boolean indicating whether an array library + supports data-dependent output shapes. Always ``True`` for NumPy. - - **"data-dependent shapes"**: boolean indicating whether an array library - supports data-dependent output shapes. Always ``True`` for NumPy. + See + https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html + for more details. - See - https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html - for more details. + See Also + -------- + __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes, __array_namespace_info__.devices - See Also - -------- - default_device, default_dtypes, dtypes, devices + Returns + ------- + capabilities : Capabilities + A dictionary of array API library capabilities. - Returns - ------- - capabilities : Capabilities - A dictionary of array API library capabilities. - - Examples - -------- - >>> info = np.__array_namespace_info__() - >>> info.capabilities() - {'boolean indexing': True, - 'data-dependent shapes': True} + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.capabilities() + {'boolean indexing': True, + 'data-dependent shapes': True} - """ - return { - "boolean indexing": True, - "data-dependent shapes": True, - # 'max rank' will be part of the 2024.12 standard - # "max rank": 64, - } - - -def default_device() -> str: - """ - The default device used for new NumPy arrays. - - For NumPy, this always returns ``'cpu'``. + """ + return { + "boolean indexing": True, + "data-dependent shapes": True, + # 'max rank' will be part of the 2024.12 standard + # "max rank": 64, + } - See Also - -------- - capabilities, default_dtypes, dtypes, devices - Returns - ------- - device : str + def default_device(self) -> str: + """ The default device used for new NumPy arrays. - Examples - -------- - >>> info = np.__array_namespace_info__() - >>> info.default_device() - 'cpu' - - """ - return "cpu" - - -def default_dtypes( - *, - device: Optional[str] = None, -) -> DefaultDataTypes: - """ - The default data types used for new NumPy arrays. - - For NumPy, this always returns the following dictionary: - - - **"real floating"**: ``numpy.float64`` - - **"complex floating"**: ``numpy.complex128`` - - **"integral"**: ``numpy.int64`` - - **"indexing"**: ``numpy.int64`` - - Parameters - ---------- - device : str, optional - The device to get the default data types for. For NumPy, only - ``'cpu'`` is allowed. - - Returns - ------- - dtypes : DefaultDataTypes - A dictionary describing the default data types used for new NumPy - arrays. - - See Also - -------- - capabilities, default_device, dtypes, devices - - Examples - -------- - >>> info = np.__array_namespace_info__() - >>> info.default_dtypes() - {'real floating': numpy.float64, - 'complex floating': numpy.complex128, - 'integral': numpy.int64, - 'indexing': numpy.int64} - - """ - if device not in ["cpu", None]: - raise ValueError( - f'Device not understood. Only "cpu" is allowed, but received: {device}' - ) - return { - "real floating": float64, - "complex floating": complex128, - "integral": int64, - "indexing": int64, - } - - -def dtypes( - *, - device: Optional[str] = None, - kind: Optional[Union[str, Tuple[str, ...]]] = None, -) -> DataTypes: - """ - The array API data types supported by NumPy. - - Note that this function only returns data types that are defined by the - array API. - - Parameters - ---------- - device : str, optional - The device to get the data types for. For NumPy, only ``'cpu'`` is - allowed. - kind : str or tuple of str, optional - The kind of data types to return. If ``None``, all data types are - returned. If a string, only data types of that kind are returned. If a - tuple, a dictionary containing the union of the given kinds is - returned. The following kinds are supported: - - - ``'bool'``: boolean data types (e.g., ``bool``). - - ``'signed integer'``: signed integer data types (e.g., ``int8``, - ``int16``, ``int32``, ``int64``). - - ``'unsigned integer'``: unsigned integer data types (e.g., - ``uint8``, ``uint16``, ``uint32``, ``uint64``). - - ``'integral'``: integer data types. Shorthand for ``('signed - integer', 'unsigned integer')``. - - ``'real floating'``: real-valued floating-point data types - (e.g., ``float32``, ``float64``). - - ``'complex floating'``: complex floating-point data types (e.g., - ``complex64``, ``complex128``). - - ``'numeric'``: numeric data types. Shorthand for ``('integral', - 'real floating', 'complex floating')``. - - Returns - ------- - dtypes : DataTypes - A dictionary mapping the names of data types to the corresponding - NumPy data types. - - See Also - -------- - capabilities, default_device, default_dtypes, devices - - Examples - -------- - >>> info = np.__array_namespace_info__() - >>> info.dtypes(kind='signed integer') - {'int8': numpy.int8, - 'int16': numpy.int16, - 'int32': numpy.int32, - 'int64': numpy.int64} - - """ - if device not in ["cpu", None]: - raise ValueError( - f'Device not understood. Only "cpu" is allowed, but received: {device}' - ) - if kind is None: - return { - "bool": bool, - "int8": int8, - "int16": int16, - "int32": int32, - "int64": int64, - "uint8": uint8, - "uint16": uint16, - "uint32": uint32, - "uint64": uint64, - "float32": float32, - "float64": float64, - "complex64": complex64, - "complex128": complex128, - } - if kind == "bool": - return {"bool": bool} - if kind == "signed integer": + For NumPy, this always returns ``'cpu'``. + + See Also + -------- + __array_namespace_info__.capabilities, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes, __array_namespace_info__.devices + + Returns + ------- + device : str + The default device used for new NumPy arrays. + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.default_device() + 'cpu' + + """ + return "cpu" + + def default_dtypes( + self, + *, + device: Optional[str] = None, + ) -> DefaultDataTypes: + """ + The default data types used for new NumPy arrays. + + For NumPy, this always returns the following dictionary: + + - **"real floating"**: ``numpy.float64`` + - **"complex floating"**: ``numpy.complex128`` + - **"integral"**: ``numpy.int64`` + - **"indexing"**: ``numpy.int64`` + + Parameters + ---------- + device : str, optional + The device to get the default data types for. For NumPy, only + ``'cpu'`` is allowed. + + Returns + ------- + dtypes : DefaultDataTypes + A dictionary describing the default data types used for new NumPy + arrays. + + See Also + -------- + __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.dtypes, __array_namespace_info__.devices + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.default_dtypes() + {'real floating': numpy.float64, + 'complex floating': numpy.complex128, + 'integral': numpy.int64, + 'indexing': numpy.int64} + + """ + if device not in ["cpu", None]: + raise ValueError( + f'Device not understood. Only "cpu" is allowed, but received: {device}' + ) return { - "int8": int8, - "int16": int16, - "int32": int32, - "int64": int64, + "real floating": float64, + "complex floating": complex128, + "integral": int64, + "indexing": int64, } - if kind == "unsigned integer": - return { - "uint8": uint8, - "uint16": uint16, - "uint32": uint32, - "uint64": uint64, - } - if kind == "integral": - return { - "int8": int8, - "int16": int16, - "int32": int32, - "int64": int64, - "uint8": uint8, - "uint16": uint16, - "uint32": uint32, - "uint64": uint64, - } - if kind == "real floating": - return { - "float32": float32, - "float64": float64, - } - if kind == "complex floating": - return { - "complex64": complex64, - "complex128": complex128, - } - if kind == "numeric": - return { - "int8": int8, - "int16": int16, - "int32": int32, - "int64": int64, - "uint8": uint8, - "uint16": uint16, - "uint32": uint32, - "uint64": uint64, - "float32": float32, - "float64": float64, - "complex64": complex64, - "complex128": complex128, - } - if isinstance(kind, tuple): - res = {} - for k in kind: - res.update(dtypes(kind=k)) - return res - raise ValueError(f"unsupported kind: {kind!r}") -def devices() -> List[str]: - """ - The devices supported by NumPy. + def dtypes( + self, + *, + device: Optional[str] = None, + kind: Optional[Union[str, Tuple[str, ...]]] = None, + ) -> DataTypes: + """ + The array API data types supported by NumPy. + + Note that this function only returns data types that are defined by the + array API. + + Parameters + ---------- + device : str, optional + The device to get the data types for. For NumPy, only ``'cpu'`` is + allowed. + kind : str or tuple of str, optional + The kind of data types to return. If ``None``, all data types are + returned. If a string, only data types of that kind are returned. If a + tuple, a dictionary containing the union of the given kinds is + returned. The following kinds are supported: + + - ``'bool'``: boolean data types (e.g., ``bool``). + - ``'signed integer'``: signed integer data types (e.g., ``int8``, + ``int16``, ``int32``, ``int64``). + - ``'unsigned integer'``: unsigned integer data types (e.g., + ``uint8``, ``uint16``, ``uint32``, ``uint64``). + - ``'integral'``: integer data types. Shorthand for ``('signed + integer', 'unsigned integer')``. + - ``'real floating'``: real-valued floating-point data types + (e.g., ``float32``, ``float64``). + - ``'complex floating'``: complex floating-point data types (e.g., + ``complex64``, ``complex128``). + - ``'numeric'``: numeric data types. Shorthand for ``('integral', + 'real floating', 'complex floating')``. + + Returns + ------- + dtypes : DataTypes + A dictionary mapping the names of data types to the corresponding + NumPy data types. + + See Also + -------- + __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.devices + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.dtypes(kind='signed integer') + {'int8': numpy.int8, + 'int16': numpy.int16, + 'int32': numpy.int32, + 'int64': numpy.int64} + + """ + if device not in ["cpu", None]: + raise ValueError( + f'Device not understood. Only "cpu" is allowed, but received: {device}' + ) + if kind is None: + return { + "bool": bool, + "int8": int8, + "int16": int16, + "int32": int32, + "int64": int64, + "uint8": uint8, + "uint16": uint16, + "uint32": uint32, + "uint64": uint64, + "float32": float32, + "float64": float64, + "complex64": complex64, + "complex128": complex128, + } + if kind == "bool": + return {"bool": bool} + if kind == "signed integer": + return { + "int8": int8, + "int16": int16, + "int32": int32, + "int64": int64, + } + if kind == "unsigned integer": + return { + "uint8": uint8, + "uint16": uint16, + "uint32": uint32, + "uint64": uint64, + } + if kind == "integral": + return { + "int8": int8, + "int16": int16, + "int32": int32, + "int64": int64, + "uint8": uint8, + "uint16": uint16, + "uint32": uint32, + "uint64": uint64, + } + if kind == "real floating": + return { + "float32": float32, + "float64": float64, + } + if kind == "complex floating": + return { + "complex64": complex64, + "complex128": complex128, + } + if kind == "numeric": + return { + "int8": int8, + "int16": int16, + "int32": int32, + "int64": int64, + "uint8": uint8, + "uint16": uint16, + "uint32": uint32, + "uint64": uint64, + "float32": float32, + "float64": float64, + "complex64": complex64, + "complex128": complex128, + } + if isinstance(kind, tuple): + res = {} + for k in kind: + res.update(self.dtypes(kind=k)) + return res + raise ValueError(f"unsupported kind: {kind!r}") + + + def devices(self) -> List[str]: + """ + The devices supported by NumPy. - For NumPy, this always returns ``['cpu']``. + For NumPy, this always returns ``['cpu']``. - Returns - ------- - devices : list of str - The devices supported by NumPy. + Returns + ------- + devices : list of str + The devices supported by NumPy. - See Also - -------- - capabilities, default_device, default_dtypes, dtypes + See Also + -------- + __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes - Examples - -------- - >>> info = np.__array_namespace_info__() - >>> info.devices() - ['cpu'] + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.devices() + ['cpu'] - """ - return ["cpu"] + """ + return ["cpu"] From d6b2831ef4382945a284d5c70232f5c8ca3a12c1 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 30 May 2024 14:51:46 -0600 Subject: [PATCH 429/980] Rename numpy/lib/_info.py to numpy/_array_api_info.py --- numpy/__init__.py | 3 ++- numpy/{lib/_info.py => _array_api_info.py} | 0 .../tests/test_info.py => _core/tests/test_array_api_info.py} | 0 numpy/meson.build | 1 + 4 files changed, 3 insertions(+), 1 deletion(-) rename numpy/{lib/_info.py => _array_api_info.py} (100%) rename numpy/{lib/tests/test_info.py => _core/tests/test_array_api_info.py} (100%) diff --git a/numpy/__init__.py b/numpy/__init__.py index 0e6a1bcfbe42..21318aee7da9 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -235,7 +235,6 @@ ix_, c_, r_, s_, ogrid, mgrid, unravel_index, ravel_multi_index, index_exp ) - from .lib._info import __array_namespace_info__ from . import matrixlib as _mat from .matrixlib import ( @@ -293,6 +292,8 @@ __array_api_version__ = "2022.12" + from ._array_api_info import __array_namespace_info__ + # now that numpy core module is imported, can initialize limits _core.getlimits._register_known_types() diff --git a/numpy/lib/_info.py b/numpy/_array_api_info.py similarity index 100% rename from numpy/lib/_info.py rename to numpy/_array_api_info.py diff --git a/numpy/lib/tests/test_info.py b/numpy/_core/tests/test_array_api_info.py similarity index 100% rename from numpy/lib/tests/test_info.py rename to numpy/_core/tests/test_array_api_info.py diff --git a/numpy/meson.build b/numpy/meson.build index 1190c00e0042..63c6b0e6d5aa 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -226,6 +226,7 @@ python_sources = [ '__init__.pxd', '__init__.py', '__init__.pyi', + '_array_api_info.py', '_configtool.py', '_distributor_init.py', '_globals.py', From fed25f16557c059d255961c510b780d1c7898bfd Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 30 May 2024 14:57:28 -0600 Subject: [PATCH 430/980] Change the default integral and indexing dtypes from int64 to intp --- numpy/_array_api_info.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/numpy/_array_api_info.py b/numpy/_array_api_info.py index e2c0237a91f0..5abe3482d34d 100644 --- a/numpy/_array_api_info.py +++ b/numpy/_array_api_info.py @@ -12,6 +12,7 @@ from numpy import ( bool, + intp, int8, int16, int32, @@ -178,8 +179,8 @@ def default_dtypes( - **"real floating"**: ``numpy.float64`` - **"complex floating"**: ``numpy.complex128`` - - **"integral"**: ``numpy.int64`` - - **"indexing"**: ``numpy.int64`` + - **"integral"**: ``numpy.intp`` + - **"indexing"**: ``numpy.intp`` Parameters ---------- @@ -214,8 +215,8 @@ def default_dtypes( return { "real floating": float64, "complex floating": complex128, - "integral": int64, - "indexing": int64, + "integral": intp, + "indexing": intp, } From 960d8d7caabba64117178cefcc1a4140f96d8e4d Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 30 May 2024 15:06:30 -0600 Subject: [PATCH 431/980] Update default_dtypes() test to be more indirect --- numpy/_core/tests/test_array_api_info.py | 54 ++++++++---------------- 1 file changed, 17 insertions(+), 37 deletions(-) diff --git a/numpy/_core/tests/test_array_api_info.py b/numpy/_core/tests/test_array_api_info.py index 1308a35230cc..0adc193d47e7 100644 --- a/numpy/_core/tests/test_array_api_info.py +++ b/numpy/_core/tests/test_array_api_info.py @@ -51,43 +51,23 @@ def test_dtypes_all(): } -@pytest.mark.parametrize( - "kind,expected", - [ - ("bool", {"bool": np.bool_}), - ( - "signed integer", - {"int8": np.int8, "int16": np.int16, "int32": np.int32, "int64": np.int64}, - ), - ( - "unsigned integer", - { - "uint8": np.uint8, - "uint16": np.uint16, - "uint32": np.uint32, - "uint64": np.uint64, - }, - ), - ( - "integral", - { - "int8": np.int8, - "int16": np.int16, - "int32": np.int32, - "int64": np.int64, - "uint8": np.uint8, - "uint16": np.uint16, - "uint32": np.uint32, - "uint64": np.uint64, - }, - ), - ("real floating", {"float32": np.float32, "float64": np.float64}), - ("complex floating", {"complex64": np.complex64, "complex128": np.complex128}), - ], -) -def test_dtypes_kind(kind, expected): - assert info.dtypes(kind=kind) == expected - +dtype_categories = { + "bool": {'bool': np.bool_}, + "signed integer": {'int8': np.int8, 'int16': np.int16, 'int32': np.int32, 'int64': np.int64}, + "unsigned integer": {'uint8': np.uint8, 'uint16': np.uint16, 'uint32': np.uint32, 'uint64': np.uint64}, + "integral": ('signed integer', 'unsigned integer'), + "real floating": {'float32': np.float32, 'float64': np.float64}, + "complex floating": {'complex64': np.complex64, 'complex128': np.complex128}, + "numeric": ('integral', 'real floating', 'complex floating'), +} + +@pytest.mark.parametrize("kind", dtype_categories) +def test_dtypes_kind(kind): + expected = dtype_categories[kind] + if isinstance(expected, tuple): + assert info.dtypes(kind=kind) == info.dtypes(kind=expected) + else: + assert info.dtypes(kind=kind) == expected def test_dtypes_tuple(): dtypes = info.dtypes(kind=("bool", "integral")) From fd946aae2b72a998274ddc33744db457b146a49e Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Thu, 30 May 2024 15:10:36 -0600 Subject: [PATCH 432/980] Change the array API inspection functions to return dtype instances --- numpy/_array_api_info.py | 103 ++++++++++++++++++++------------------- 1 file changed, 52 insertions(+), 51 deletions(-) diff --git a/numpy/_array_api_info.py b/numpy/_array_api_info.py index 5abe3482d34d..56ac8bc93bb4 100644 --- a/numpy/_array_api_info.py +++ b/numpy/_array_api_info.py @@ -10,7 +10,8 @@ from typing import TYPE_CHECKING -from numpy import ( +from numpy._core import ( + dtype, bool, intp, int8, @@ -28,7 +29,7 @@ ) if TYPE_CHECKING: - from typing import Optional, Union, Tuple, List, ModuleType, TypedDict + from typing import Optional, Union, Tuple, List, TypedDict from numpy.dtyping import DtypeLike Capabilities = TypedDict( @@ -213,10 +214,10 @@ def default_dtypes( f'Device not understood. Only "cpu" is allowed, but received: {device}' ) return { - "real floating": float64, - "complex floating": complex128, - "integral": intp, - "indexing": intp, + "real floating": dtype(float64), + "complex floating": dtype(complex128), + "integral": dtype(intp), + "indexing": dtype(intp), } @@ -283,71 +284,71 @@ def dtypes( ) if kind is None: return { - "bool": bool, - "int8": int8, - "int16": int16, - "int32": int32, - "int64": int64, - "uint8": uint8, - "uint16": uint16, - "uint32": uint32, - "uint64": uint64, - "float32": float32, - "float64": float64, - "complex64": complex64, - "complex128": complex128, + "bool": dtype(bool), + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + "float32": dtype(float32), + "float64": dtype(float64), + "complex64": dtype(complex64), + "complex128": dtype(complex128), } if kind == "bool": return {"bool": bool} if kind == "signed integer": return { - "int8": int8, - "int16": int16, - "int32": int32, - "int64": int64, + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), } if kind == "unsigned integer": return { - "uint8": uint8, - "uint16": uint16, - "uint32": uint32, - "uint64": uint64, + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), } if kind == "integral": return { - "int8": int8, - "int16": int16, - "int32": int32, - "int64": int64, - "uint8": uint8, - "uint16": uint16, - "uint32": uint32, - "uint64": uint64, + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), } if kind == "real floating": return { - "float32": float32, - "float64": float64, + "float32": dtype(float32), + "float64": dtype(float64), } if kind == "complex floating": return { - "complex64": complex64, - "complex128": complex128, + "complex64": dtype(complex64), + "complex128": dtype(complex128), } if kind == "numeric": return { - "int8": int8, - "int16": int16, - "int32": int32, - "int64": int64, - "uint8": uint8, - "uint16": uint16, - "uint32": uint32, - "uint64": uint64, - "float32": float32, - "float64": float64, - "complex64": complex64, - "complex128": complex128, + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + "float32": dtype(float32), + "float64": dtype(float64), + "complex64": dtype(complex64), + "complex128": dtype(complex128), } if isinstance(kind, tuple): res = {} From 00171681b26f0c0e8c87e061b7a88ad5c5f9fed6 Mon Sep 17 00:00:00 2001 From: Jules Date: Fri, 31 May 2024 12:48:49 +0800 Subject: [PATCH 433/980] ENH: Add copy and device keyword to np.asanyarray to match np.asarray See #26196 --- numpy/_core/_add_newdocs.py | 14 ++++++++++++++ numpy/_core/multiarray.pyi | 10 ++++++++++ numpy/_core/src/multiarray/multiarraymodule.c | 6 +++++- 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index e967a298fa84..87a316e0c458 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -1030,6 +1030,20 @@ 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise 'K' (keep) preserve input order Defaults to 'C'. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + copy : bool, optional + If ``True``, then the object is copied. If ``None`` then the object is + copied only if needed, i.e. if ``__array__`` returns a copy, if obj + is a nested sequence, or if a copy is needed to satisfy any of + the other requirements (``dtype``, ``order``, etc.). + For ``False`` it raises a ``ValueError`` if a copy cannot be avoided. + Default: ``None``. + + .. versionadded:: 2.0.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 74cc86e64e79..1e284be13f0a 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -525,6 +525,8 @@ def asanyarray( dtype: None = ..., order: _OrderKACF = ..., *, + device: None | L["cpu"] = ..., + copy: None | bool = ..., like: None | _SupportsArrayFunc = ..., ) -> _ArrayType: ... @overload @@ -533,6 +535,8 @@ def asanyarray( dtype: None = ..., order: _OrderKACF = ..., *, + device: None | L["cpu"] = ..., + copy: None | bool = ..., like: None | _SupportsArrayFunc = ..., ) -> NDArray[_SCT]: ... @overload @@ -541,6 +545,8 @@ def asanyarray( dtype: None = ..., order: _OrderKACF = ..., *, + device: None | L["cpu"] = ..., + copy: None | bool = ..., like: None | _SupportsArrayFunc = ..., ) -> NDArray[Any]: ... @overload @@ -549,6 +555,8 @@ def asanyarray( dtype: _DTypeLike[_SCT], order: _OrderKACF = ..., *, + device: None | L["cpu"] = ..., + copy: None | bool = ..., like: None | _SupportsArrayFunc = ..., ) -> NDArray[_SCT]: ... @overload @@ -557,6 +565,8 @@ def asanyarray( dtype: DTypeLike, order: _OrderKACF = ..., *, + device: None | L["cpu"] = ..., + copy: None | bool = ..., like: None | _SupportsArrayFunc = ..., ) -> NDArray[Any]: ... diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index e4f9a394be22..0b5cbd6ea2db 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -1781,8 +1781,10 @@ array_asanyarray(PyObject *NPY_UNUSED(ignored), PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { PyObject *op; + NPY_COPYMODE copy = NPY_COPY_IF_NEEDED; npy_dtype_info dt_info = {NULL, NULL}; NPY_ORDER order = NPY_KEEPORDER; + NPY_DEVICE device = NPY_DEVICE_CPU; PyObject *like = Py_None; NPY_PREPARE_ARGPARSER; @@ -1791,6 +1793,8 @@ array_asanyarray(PyObject *NPY_UNUSED(ignored), "a", NULL, &op, "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, "|order", &PyArray_OrderConverter, &order, + "$device", &PyArray_DeviceConverterOptional, &device, + "$copy", &PyArray_CopyConverter, ©, "$like", NULL, &like, NULL, NULL, NULL) < 0) { Py_XDECREF(dt_info.descr); @@ -1812,7 +1816,7 @@ array_asanyarray(PyObject *NPY_UNUSED(ignored), } PyObject *res = _array_fromobject_generic( - op, dt_info.descr, dt_info.dtype, NPY_COPY_IF_NEEDED, order, NPY_TRUE, 0); + op, dt_info.descr, dt_info.dtype, copy, order, NPY_TRUE, 0); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; From 6fe7bb3fd14780d91b9b66f5244f5389c7f571ad Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 31 May 2024 13:03:47 +0200 Subject: [PATCH 434/980] BUG: Wrapping arraymethod doesn't increase cache num (practically unused) --- numpy/_core/src/umath/wrapping_array_method.c | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/src/umath/wrapping_array_method.c b/numpy/_core/src/umath/wrapping_array_method.c index 756f09507954..9b3970561f3f 100644 --- a/numpy/_core/src/umath/wrapping_array_method.c +++ b/numpy/_core/src/umath/wrapping_array_method.c @@ -96,6 +96,7 @@ wrapping_auxdata_free(wrapping_auxdata *wrapping_auxdata) if (wrapping_auxdata_freenum < WRAPPING_AUXDATA_FREELIST_SIZE) { wrapping_auxdata_freelist[wrapping_auxdata_freenum] = wrapping_auxdata; + wrapping_auxdata_freenum++; } else { PyMem_Free(wrapping_auxdata); From cf39be180570c9a3f2f972502710f58c2f7574f4 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 31 May 2024 08:59:41 -0600 Subject: [PATCH 435/980] BUG: fix memory leak in string to float casts --- numpy/_core/src/multiarray/stringdtype/casts.c | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/src/multiarray/stringdtype/casts.c b/numpy/_core/src/multiarray/stringdtype/casts.c index 42c588199890..44ae6c92d128 100644 --- a/numpy/_core/src/multiarray/stringdtype/casts.c +++ b/numpy/_core/src/multiarray/stringdtype/casts.c @@ -894,6 +894,7 @@ string_to_pyfloat(char *in, int has_null, goto fail; \ } \ double dval = PyFloat_AS_DOUBLE(pyfloat_value); \ + Py_DECREF(pyfloat_value); \ npy_##typename fval = (double_to_float)(dval); \ \ if (NPY_UNLIKELY(isinf_name(fval) && !(npy_isinf(dval)))) { \ From 76b13e3bf1df8f10e6f0924bd476c9471dda8188 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 31 May 2024 17:06:54 +0000 Subject: [PATCH 436/980] MAINT: Bump github/codeql-action from 3.25.6 to 3.25.7 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.25.6 to 3.25.7. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/9fdb3e49720b44c48891d036bb502feb25684276...f079b8493333aace61c81488f8bd40919487bd9f) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 8157fb818a16..98db6ab4fb33 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -45,7 +45,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@9fdb3e49720b44c48891d036bb502feb25684276 # v3.25.6 + uses: github/codeql-action/init@f079b8493333aace61c81488f8bd40919487bd9f # v3.25.7 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -55,7 +55,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@9fdb3e49720b44c48891d036bb502feb25684276 # v3.25.6 + uses: github/codeql-action/autobuild@f079b8493333aace61c81488f8bd40919487bd9f # v3.25.7 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -68,6 +68,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@9fdb3e49720b44c48891d036bb502feb25684276 # v3.25.6 + uses: github/codeql-action/analyze@f079b8493333aace61c81488f8bd40919487bd9f # v3.25.7 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index ab42bf1f48bd..78d5d7431c65 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@9fdb3e49720b44c48891d036bb502feb25684276 # v2.1.27 + uses: github/codeql-action/upload-sarif@f079b8493333aace61c81488f8bd40919487bd9f # v2.1.27 with: sarif_file: results.sarif From edb449d59608db23fe65af7d8905dc23ed3e59f6 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 31 May 2024 09:17:35 -0600 Subject: [PATCH 437/980] BUG: fix memory leaks found by valgrind --- numpy/_core/src/multiarray/arraytypes.c.src | 5 +++-- numpy/_core/src/multiarray/descriptor.c | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index cafacd2c5ccb..1ecfc6d94cd7 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -4222,6 +4222,7 @@ NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromType(int type) { PyArray_Descr *ret = NULL; + npy_bool is_stringdtype = (type == NPY_VSTRING || type == NPY_VSTRINGLTR); if (type < 0) { /* @@ -4233,7 +4234,7 @@ PyArray_DescrFromType(int type) */ ret = NULL; } - else if (type == NPY_VSTRING || type == NPY_VSTRINGLTR) { + else if (is_stringdtype) { ret = (PyArray_Descr *)new_stringdtype_instance(NULL, 1); } // builtin legacy dtypes @@ -4280,7 +4281,7 @@ PyArray_DescrFromType(int type) PyErr_SetString(PyExc_ValueError, "Invalid data-type for array"); } - else { + else if (!is_stringdtype) { Py_INCREF(ret); } diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 1564902be674..cb031cc43c58 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -2025,6 +2025,7 @@ arraydescr_dealloc(PyArray_Descr *self) { Py_XDECREF(self->typeobj); if (!PyDataType_ISLEGACY(self)) { + Py_TYPE(self)->tp_free((PyObject *)self); return; } _PyArray_LegacyDescr *lself = (_PyArray_LegacyDescr *)self; From d2700907b8683a2eef2e4fb9dfd26f7e72a0ffd7 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 31 May 2024 15:54:08 -0600 Subject: [PATCH 438/980] Add inspection functions to Sphinx --- doc/source/reference/array_api.rst | 15 +++++++++++++ numpy/_array_api_info.py | 36 +++++++++++++++--------------- 2 files changed, 33 insertions(+), 18 deletions(-) diff --git a/doc/source/reference/array_api.rst b/doc/source/reference/array_api.rst index 08bae3fec918..6f130efc8ca8 100644 --- a/doc/source/reference/array_api.rst +++ b/doc/source/reference/array_api.rst @@ -62,3 +62,18 @@ an entry point. instead, as do function signatures; the only known incompatibility that may remain is that the standard forbids unsafe casts for in-place operators while NumPy supports those. + +Inspection +========== + +NumPy implements the `array API inspection utilities +`__. +These functions can be accessed via the ``__array_namespace_info__()`` +function, which returns a namespace containing the inspection utilities. + +.. currentmodule:: numpy + +.. autosummary:: + :toctree: generated + + __array_namespace_info__ diff --git a/numpy/_array_api_info.py b/numpy/_array_api_info.py index 56ac8bc93bb4..5694b0b5f78f 100644 --- a/numpy/_array_api_info.py +++ b/numpy/_array_api_info.py @@ -113,7 +113,7 @@ def capabilities(self) -> Capabilities: supports boolean indexing. Always ``True`` for NumPy. - **"data-dependent shapes"**: boolean indicating whether an array library - supports data-dependent output shapes. Always ``True`` for NumPy. + supports data-dependent output shapes. Always ``True`` for NumPy. See https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html @@ -240,23 +240,23 @@ def dtypes( allowed. kind : str or tuple of str, optional The kind of data types to return. If ``None``, all data types are - returned. If a string, only data types of that kind are returned. If a - tuple, a dictionary containing the union of the given kinds is - returned. The following kinds are supported: - - - ``'bool'``: boolean data types (e.g., ``bool``). - - ``'signed integer'``: signed integer data types (e.g., ``int8``, - ``int16``, ``int32``, ``int64``). - - ``'unsigned integer'``: unsigned integer data types (e.g., - ``uint8``, ``uint16``, ``uint32``, ``uint64``). - - ``'integral'``: integer data types. Shorthand for ``('signed - integer', 'unsigned integer')``. - - ``'real floating'``: real-valued floating-point data types - (e.g., ``float32``, ``float64``). - - ``'complex floating'``: complex floating-point data types (e.g., - ``complex64``, ``complex128``). - - ``'numeric'``: numeric data types. Shorthand for ``('integral', - 'real floating', 'complex floating')``. + returned. If a string, only data types of that kind are returned. + If a tuple, a dictionary containing the union of the given kinds + is returned. The following kinds are supported: + + - ``'bool'``: boolean data types (i.e., ``bool``). + - ``'signed integer'``: signed integer data types (i.e., ``int8``, + ``int16``, ``int32``, ``int64``). + - ``'unsigned integer'``: unsigned integer data types (i.e., + ``uint8``, ``uint16``, ``uint32``, ``uint64``). + - ``'integral'``: integer data types. Shorthand for ``('signed + integer', 'unsigned integer')``. + - ``'real floating'``: real-valued floating-point data types + (i.e., ``float32``, ``float64``). + - ``'complex floating'``: complex floating-point data types (i.e., + ``complex64``, ``complex128``). + - ``'numeric'``: numeric data types. Shorthand for ``('integral', + 'real floating', 'complex floating')``. Returns ------- From 046c8d9bfaf98885c63d14bada4247688d05b255 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 31 May 2024 15:58:26 -0600 Subject: [PATCH 439/980] Reformat files with black --- numpy/_array_api_info.py | 3 --- numpy/_core/tests/test_array_api_info.py | 26 +++++++++++++++++------- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/numpy/_array_api_info.py b/numpy/_array_api_info.py index 5694b0b5f78f..90703c537ed4 100644 --- a/numpy/_array_api_info.py +++ b/numpy/_array_api_info.py @@ -143,7 +143,6 @@ def capabilities(self) -> Capabilities: # "max rank": 64, } - def default_device(self) -> str: """ The default device used for new NumPy arrays. @@ -220,7 +219,6 @@ def default_dtypes( "indexing": dtype(intp), } - def dtypes( self, *, @@ -357,7 +355,6 @@ def dtypes( return res raise ValueError(f"unsupported kind: {kind!r}") - def devices(self) -> List[str]: """ The devices supported by NumPy. diff --git a/numpy/_core/tests/test_array_api_info.py b/numpy/_core/tests/test_array_api_info.py index 0adc193d47e7..0b8a77b0b660 100644 --- a/numpy/_core/tests/test_array_api_info.py +++ b/numpy/_core/tests/test_array_api_info.py @@ -52,15 +52,26 @@ def test_dtypes_all(): dtype_categories = { - "bool": {'bool': np.bool_}, - "signed integer": {'int8': np.int8, 'int16': np.int16, 'int32': np.int32, 'int64': np.int64}, - "unsigned integer": {'uint8': np.uint8, 'uint16': np.uint16, 'uint32': np.uint32, 'uint64': np.uint64}, - "integral": ('signed integer', 'unsigned integer'), - "real floating": {'float32': np.float32, 'float64': np.float64}, - "complex floating": {'complex64': np.complex64, 'complex128': np.complex128}, - "numeric": ('integral', 'real floating', 'complex floating'), + "bool": {"bool": np.bool_}, + "signed integer": { + "int8": np.int8, + "int16": np.int16, + "int32": np.int32, + "int64": np.int64, + }, + "unsigned integer": { + "uint8": np.uint8, + "uint16": np.uint16, + "uint32": np.uint32, + "uint64": np.uint64, + }, + "integral": ("signed integer", "unsigned integer"), + "real floating": {"float32": np.float32, "float64": np.float64}, + "complex floating": {"complex64": np.complex64, "complex128": np.complex128}, + "numeric": ("integral", "real floating", "complex floating"), } + @pytest.mark.parametrize("kind", dtype_categories) def test_dtypes_kind(kind): expected = dtype_categories[kind] @@ -69,6 +80,7 @@ def test_dtypes_kind(kind): else: assert info.dtypes(kind=kind) == expected + def test_dtypes_tuple(): dtypes = info.dtypes(kind=("bool", "integral")) assert dtypes == { From 611113c0d90f8a31560965526ed831a177b773cf Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 31 May 2024 16:04:26 -0600 Subject: [PATCH 440/980] Set __module__ = 'numpy' on __array_namespace_info__ --- numpy/_array_api_info.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/_array_api_info.py b/numpy/_array_api_info.py index 90703c537ed4..00c6b3fa8572 100644 --- a/numpy/_array_api_info.py +++ b/numpy/_array_api_info.py @@ -103,6 +103,8 @@ class __array_namespace_info__: 'indexing': numpy.int64} """ + __module__ = 'numpy' + def capabilities(self) -> Capabilities: """ Return a dictionary of array API library capabilities. From 7e08ec482859792f9b16e165d367413db61b3d49 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Fri, 31 May 2024 16:24:18 -0600 Subject: [PATCH 441/980] Fix linter errors --- numpy/_array_api_info.py | 49 +++++++++++++++++------- numpy/_core/tests/test_array_api_info.py | 6 ++- 2 files changed, 39 insertions(+), 16 deletions(-) diff --git a/numpy/_array_api_info.py b/numpy/_array_api_info.py index 00c6b3fa8572..423898d6da9b 100644 --- a/numpy/_array_api_info.py +++ b/numpy/_array_api_info.py @@ -1,9 +1,11 @@ """ Array API Inspection namespace -This is the namespace for inspection functions as defined by the array API standard. -See https://data-apis.org/array-api/latest/API_specification/inspection.html -for more details. +This is the namespace for inspection functions as defined by the array API +standard. See +https://data-apis.org/array-api/latest/API_specification/inspection.html for +more details. + """ from __future__ import annotations @@ -101,6 +103,7 @@ class __array_namespace_info__: 'complex floating': numpy.complex128, 'integral': numpy.int64, 'indexing': numpy.int64} + """ __module__ = 'numpy' @@ -114,8 +117,9 @@ def capabilities(self) -> Capabilities: - **"boolean indexing"**: boolean indicating whether an array library supports boolean indexing. Always ``True`` for NumPy. - - **"data-dependent shapes"**: boolean indicating whether an array library - supports data-dependent output shapes. Always ``True`` for NumPy. + - **"data-dependent shapes"**: boolean indicating whether an array + library supports data-dependent output shapes. Always ``True`` for + NumPy. See https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html @@ -123,7 +127,10 @@ def capabilities(self) -> Capabilities: See Also -------- - __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes, __array_namespace_info__.devices + __array_namespace_info__.default_device, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.dtypes, + __array_namespace_info__.devices Returns ------- @@ -153,7 +160,10 @@ def default_device(self) -> str: See Also -------- - __array_namespace_info__.capabilities, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes, __array_namespace_info__.devices + __array_namespace_info__.capabilities, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.dtypes, + __array_namespace_info__.devices Returns ------- @@ -198,7 +208,10 @@ def default_dtypes( See Also -------- - __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.dtypes, __array_namespace_info__.devices + __array_namespace_info__.capabilities, + __array_namespace_info__.default_device, + __array_namespace_info__.dtypes, + __array_namespace_info__.devices Examples -------- @@ -212,7 +225,8 @@ def default_dtypes( """ if device not in ["cpu", None]: raise ValueError( - f'Device not understood. Only "cpu" is allowed, but received: {device}' + 'Device not understood. Only "cpu" is allowed, but received:' + f' {device}' ) return { "real floating": dtype(float64), @@ -230,8 +244,8 @@ def dtypes( """ The array API data types supported by NumPy. - Note that this function only returns data types that are defined by the - array API. + Note that this function only returns data types that are defined by + the array API. Parameters ---------- @@ -266,7 +280,10 @@ def dtypes( See Also -------- - __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.devices + __array_namespace_info__.capabilities, + __array_namespace_info__.default_device, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.devices Examples -------- @@ -280,7 +297,8 @@ def dtypes( """ if device not in ["cpu", None]: raise ValueError( - f'Device not understood. Only "cpu" is allowed, but received: {device}' + 'Device not understood. Only "cpu" is allowed, but received:' + f' {device}' ) if kind is None: return { @@ -370,7 +388,10 @@ def devices(self) -> List[str]: See Also -------- - __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes + __array_namespace_info__.capabilities, + __array_namespace_info__.default_device, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.dtypes Examples -------- diff --git a/numpy/_core/tests/test_array_api_info.py b/numpy/_core/tests/test_array_api_info.py index 0b8a77b0b660..c4415f6d38ca 100644 --- a/numpy/_core/tests/test_array_api_info.py +++ b/numpy/_core/tests/test_array_api_info.py @@ -24,7 +24,8 @@ def test_default_device(): def test_default_dtypes(): dtypes = info.default_dtypes() assert dtypes["real floating"] == np.float64 == np.asarray(0.0).dtype - assert dtypes["complex floating"] == np.complex128 == np.asarray(0.0j).dtype + assert dtypes["complex floating"] == np.complex128 == \ + np.asarray(0.0j).dtype assert dtypes["integral"] == np.int64 == np.asarray(0).dtype assert dtypes["indexing"] == np.int64 == np.argmax(np.zeros(10)).dtype @@ -67,7 +68,8 @@ def test_dtypes_all(): }, "integral": ("signed integer", "unsigned integer"), "real floating": {"float32": np.float32, "float64": np.float64}, - "complex floating": {"complex64": np.complex64, "complex128": np.complex128}, + "complex floating": {"complex64": np.complex64, "complex128": + np.complex128}, "numeric": ("integral", "real floating", "complex floating"), } From 8bc583793482707537503be5e8a8f79831251754 Mon Sep 17 00:00:00 2001 From: partev Date: Sat, 1 Jun 2024 23:56:30 -0400 Subject: [PATCH 442/980] DOC: Update constants.rst: fix URL redirect fix URL redirect --- doc/source/reference/constants.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/constants.rst b/doc/source/reference/constants.rst index 9db0da787712..2e2795a8b29f 100644 --- a/doc/source/reference/constants.rst +++ b/doc/source/reference/constants.rst @@ -28,7 +28,7 @@ NumPy includes several constants: .. rubric:: References - https://en.wikipedia.org/wiki/Euler-Mascheroni_constant + https://en.wikipedia.org/wiki/Euler%27s_constant .. data:: inf From a03e0ef7dbcd94ced4221be62a7757cf20b24cc4 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sun, 2 Jun 2024 09:21:18 +0200 Subject: [PATCH 443/980] ENH: Improve performance of np.broadcast_arrays and np.broadcast_shapes (#26160) * ENH: Improve performance of np.broadcast_arrays * modify tests * lint * whitespace * lint * improve performance of broadcast_shapes --- numpy/lib/_stride_tricks_impl.py | 13 ++++++------- numpy/lib/tests/test_stride_tricks.py | 20 +++++++++++--------- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/numpy/lib/_stride_tricks_impl.py b/numpy/lib/_stride_tricks_impl.py index 0cfbbcfe9c81..c64ab8b94d4c 100644 --- a/numpy/lib/_stride_tricks_impl.py +++ b/numpy/lib/_stride_tricks_impl.py @@ -478,7 +478,7 @@ def broadcast_shapes(*args): >>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7)) (5, 6, 7) """ - arrays = [np.empty(x, dtype=[]) for x in args] + arrays = [np.empty(x, dtype=bool) for x in args] return _broadcast_shape(*arrays) @@ -546,13 +546,12 @@ def broadcast_arrays(*args, subok=False): # return np.nditer(args, flags=['multi_index', 'zerosize_ok'], # order='C').itviews - args = tuple(np.array(_m, copy=None, subok=subok) for _m in args) + args = [np.array(_m, copy=None, subok=subok) for _m in args] shape = _broadcast_shape(*args) - if all(array.shape == shape for array in args): - # Common case where nothing needs to be broadcasted. - return args + result = [array if array.shape == shape + else _broadcast_to(array, shape, subok=subok, readonly=False) + for array in args] + return tuple(result) - return tuple(_broadcast_to(array, shape, subok=subok, readonly=False) - for array in args) diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py index 543a2d6c5d4b..3cbebbdd552e 100644 --- a/numpy/lib/tests/test_stride_tricks.py +++ b/numpy/lib/tests/test_stride_tricks.py @@ -341,7 +341,7 @@ def test_broadcast_shapes_raises(): [(2, 3), (2,)], [(3,), (3,), (4,)], [(1, 3, 4), (2, 3, 3)], - [(1, 2), (3,1), (3,2), (10, 5)], + [(1, 2), (3, 1), (3, 2), (10, 5)], [2, (2, 3)], ] for input_shapes in data: @@ -578,11 +578,12 @@ def test_writeable(): # but the result of broadcast_arrays needs to be writeable, to # preserve backwards compatibility - for is_broadcast, results in [(False, broadcast_arrays(original,)), - (True, broadcast_arrays(0, original))]: - for result in results: + test_cases = [((False,), broadcast_arrays(original,)), + ((True, False), broadcast_arrays(0, original))] + for is_broadcast, results in test_cases: + for array_is_broadcast, result in zip(is_broadcast, results): # This will change to False in a future version - if is_broadcast: + if array_is_broadcast: with assert_warns(FutureWarning): assert_equal(result.flags.writeable, True) with assert_warns(DeprecationWarning): @@ -623,11 +624,12 @@ def test_writeable_memoryview(): # See gh-13929. original = np.array([1, 2, 3]) - for is_broadcast, results in [(False, broadcast_arrays(original,)), - (True, broadcast_arrays(0, original))]: - for result in results: + test_cases = [((False, ), broadcast_arrays(original,)), + ((True, False), broadcast_arrays(0, original))] + for is_broadcast, results in test_cases: + for array_is_broadcast, result in zip(is_broadcast, results): # This will change to False in a future version - if is_broadcast: + if array_is_broadcast: # memoryview(result, writable=True) will give warning but cannot # be tested using the python API. assert memoryview(result).readonly From e41b9c14e17bdb2088f1955ca2aa7f10ace895d7 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Sun, 2 Jun 2024 06:18:03 -0300 Subject: [PATCH 444/980] BUG: ``PyDataMem_SetHandler`` check capsule name (#26529) Closes #26137. Tests capsule name and sets PyErr if not valid: if (!PyCapsule_IsValid(handler, MEM_HANDLER_CAPSULE_NAME)) { PyErr_SetString(PyExc_ValueError, "Capsule must be named 'mem_handler'") return NULL; } --- numpy/_core/src/multiarray/alloc.c | 22 ++++++++++++++----- numpy/_core/src/multiarray/alloc.h | 1 + numpy/_core/src/multiarray/multiarraymodule.c | 3 ++- numpy/_core/tests/__init__.py | 0 numpy/_core/tests/test_mem_policy.py | 14 ++++++++++++ 5 files changed, 33 insertions(+), 7 deletions(-) delete mode 100644 numpy/_core/tests/__init__.py diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index 2f3c82bc5909..df64a13a26e8 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -354,7 +354,8 @@ NPY_NO_EXPORT void * PyDataMem_UserNEW(size_t size, PyObject *mem_handler) { void *result; - PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer(mem_handler, "mem_handler"); + PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer( + mem_handler, MEM_HANDLER_CAPSULE_NAME); if (handler == NULL) { return NULL; } @@ -368,7 +369,8 @@ NPY_NO_EXPORT void * PyDataMem_UserNEW_ZEROED(size_t nmemb, size_t size, PyObject *mem_handler) { void *result; - PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer(mem_handler, "mem_handler"); + PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer( + mem_handler, MEM_HANDLER_CAPSULE_NAME); if (handler == NULL) { return NULL; } @@ -381,7 +383,8 @@ PyDataMem_UserNEW_ZEROED(size_t nmemb, size_t size, PyObject *mem_handler) NPY_NO_EXPORT void PyDataMem_UserFREE(void *ptr, size_t size, PyObject *mem_handler) { - PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer(mem_handler, "mem_handler"); + PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer( + mem_handler, MEM_HANDLER_CAPSULE_NAME); if (handler == NULL) { WARN_NO_RETURN(PyExc_RuntimeWarning, "Could not get pointer to 'mem_handler' from PyCapsule"); @@ -395,7 +398,8 @@ NPY_NO_EXPORT void * PyDataMem_UserRENEW(void *ptr, size_t size, PyObject *mem_handler) { void *result; - PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer(mem_handler, "mem_handler"); + PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer( + mem_handler, MEM_HANDLER_CAPSULE_NAME); if (handler == NULL) { return NULL; } @@ -427,6 +431,10 @@ PyDataMem_SetHandler(PyObject *handler) if (handler == NULL) { handler = PyDataMem_DefaultHandler; } + if (!PyCapsule_IsValid(handler, MEM_HANDLER_CAPSULE_NAME)) { + PyErr_SetString(PyExc_ValueError, "Capsule must be named 'mem_handler'"); + return NULL; + } token = PyContextVar_Set(current_handler, handler); if (token == NULL) { Py_DECREF(old_handler); @@ -477,7 +485,8 @@ get_handler_name(PyObject *NPY_UNUSED(self), PyObject *args) return NULL; } } - handler = (PyDataMem_Handler *) PyCapsule_GetPointer(mem_handler, "mem_handler"); + handler = (PyDataMem_Handler *) PyCapsule_GetPointer( + mem_handler, MEM_HANDLER_CAPSULE_NAME); if (handler == NULL) { Py_DECREF(mem_handler); return NULL; @@ -514,7 +523,8 @@ get_handler_version(PyObject *NPY_UNUSED(self), PyObject *args) return NULL; } } - handler = (PyDataMem_Handler *) PyCapsule_GetPointer(mem_handler, "mem_handler"); + handler = (PyDataMem_Handler *) PyCapsule_GetPointer( + mem_handler, MEM_HANDLER_CAPSULE_NAME); if (handler == NULL) { Py_DECREF(mem_handler); return NULL; diff --git a/numpy/_core/src/multiarray/alloc.h b/numpy/_core/src/multiarray/alloc.h index 186eb54870ab..aed2095fe73c 100644 --- a/numpy/_core/src/multiarray/alloc.h +++ b/numpy/_core/src/multiarray/alloc.h @@ -6,6 +6,7 @@ #include "numpy/ndarraytypes.h" #define NPY_TRACE_DOMAIN 389047 +#define MEM_HANDLER_CAPSULE_NAME "mem_handler" NPY_NO_EXPORT PyObject * _get_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)); diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index e4f9a394be22..76b56ba9a0c9 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -5230,7 +5230,8 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { /* * Initialize the default PyDataMem_Handler capsule singleton. */ - PyDataMem_DefaultHandler = PyCapsule_New(&default_handler, "mem_handler", NULL); + PyDataMem_DefaultHandler = PyCapsule_New( + &default_handler, MEM_HANDLER_CAPSULE_NAME, NULL); if (PyDataMem_DefaultHandler == NULL) { goto err; } diff --git a/numpy/_core/tests/__init__.py b/numpy/_core/tests/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/numpy/_core/tests/test_mem_policy.py b/numpy/_core/tests/test_mem_policy.py index 3c7f6bb34661..9540d17d03cb 100644 --- a/numpy/_core/tests/test_mem_policy.py +++ b/numpy/_core/tests/test_mem_policy.py @@ -46,6 +46,16 @@ def get_module(tmp_path): Py_DECREF(secret_data); return old; """), + ("set_wrong_capsule_name_data_policy", "METH_NOARGS", """ + PyObject *wrong_name_capsule = + PyCapsule_New(&secret_data_handler, "not_mem_handler", NULL); + if (wrong_name_capsule == NULL) { + return NULL; + } + PyObject *old = PyDataMem_SetHandler(wrong_name_capsule); + Py_DECREF(wrong_name_capsule); + return old; + """), ("set_old_policy", "METH_O", """ PyObject *old; if (args != NULL && PyCapsule_CheckExact(args)) { @@ -252,6 +262,10 @@ def test_set_policy(get_module): get_module.set_old_policy(orig_policy) assert get_handler_name() == orig_policy_name + with pytest.raises(ValueError, + match="Capsule must be named 'mem_handler'"): + get_module.set_wrong_capsule_name_data_policy() + @pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_default_policy_singleton(get_module): From 7a647eabe666bd008003a8af75a26e8d36cd74b9 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Sun, 2 Jun 2024 14:08:08 -0400 Subject: [PATCH 445/980] ENH: use size-zero dtype for broadcast-shapes This makes the speed independent of the actual shapes (as it used to be before gh-26160), but still fast. --- numpy/lib/_stride_tricks_impl.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/numpy/lib/_stride_tricks_impl.py b/numpy/lib/_stride_tricks_impl.py index c64ab8b94d4c..0d437ea1e416 100644 --- a/numpy/lib/_stride_tricks_impl.py +++ b/numpy/lib/_stride_tricks_impl.py @@ -137,7 +137,7 @@ def sliding_window_view(x, window_shape, axis=None, *, Also known as rolling or moving window, the window slides across all dimensions of the array and extracts subsets of the array at all window positions. - + .. versionadded:: 1.20.0 Parameters @@ -439,6 +439,9 @@ def _broadcast_shape(*args): return b.shape +_size0_dtype = np.dtype([]) + + @set_module('numpy') def broadcast_shapes(*args): """ @@ -478,7 +481,7 @@ def broadcast_shapes(*args): >>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7)) (5, 6, 7) """ - arrays = [np.empty(x, dtype=bool) for x in args] + arrays = [np.empty(x, dtype=_size0_dtype) for x in args] return _broadcast_shape(*arrays) @@ -554,4 +557,3 @@ def broadcast_arrays(*args, subok=False): else _broadcast_to(array, shape, subok=subok, readonly=False) for array in args] return tuple(result) - From 14120b426d66a575ffafbf2af7ff71065cc2fc2c Mon Sep 17 00:00:00 2001 From: Jules Date: Mon, 3 Jun 2024 12:24:45 +0800 Subject: [PATCH 446/980] Add release note --- doc/release/upcoming_changes/26580.new_feature.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 doc/release/upcoming_changes/26580.new_feature.rst diff --git a/doc/release/upcoming_changes/26580.new_feature.rst b/doc/release/upcoming_changes/26580.new_feature.rst new file mode 100644 index 000000000000..a837b51834b8 --- /dev/null +++ b/doc/release/upcoming_changes/26580.new_feature.rst @@ -0,0 +1 @@ +* `numpy.asanyarray` now support ``copy`` and ``device`` arguments, matching `numpy.asarray`. From 870936df435ac99913e3afab2664642884a629a1 Mon Sep 17 00:00:00 2001 From: Jules Date: Mon, 3 Jun 2024 17:27:23 +0800 Subject: [PATCH 447/980] TST: Re-enable int8/uint8 einsum tests It seems #24732 fixed itself, see the following CI runs: - github.com/JuliaPoo/numpy/actions/runs/9344912430/job/25716839862 - github.com/JuliaPoo/numpy/actions/runs/9344767573/job/25716459859 --- numpy/_core/tests/test_einsum.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 55f2546185e7..bc5927122786 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -621,23 +621,9 @@ def check_einsum_sums(self, dtype, do_opt=False): [2.]) # contig_stride0_outstride0_two def test_einsum_sums_int8(self): - if ( - (sys.platform == 'darwin' and platform.machine() == 'x86_64') - or - USING_CLANG_CL - ): - pytest.xfail('Fails on macOS x86-64 and when using clang-cl ' - 'with Meson, see gh-23838') self.check_einsum_sums('i1') def test_einsum_sums_uint8(self): - if ( - (sys.platform == 'darwin' and platform.machine() == 'x86_64') - or - USING_CLANG_CL - ): - pytest.xfail('Fails on macOS x86-64 and when using clang-cl ' - 'with Meson, see gh-23838') self.check_einsum_sums('u1') def test_einsum_sums_int16(self): From e13bb57c5ce788db96c9ed256a26b828d4b96eb6 Mon Sep 17 00:00:00 2001 From: Jules Date: Mon, 3 Jun 2024 18:01:59 +0800 Subject: [PATCH 448/980] Change versionadded to 2.1.0 --- numpy/_core/_add_newdocs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 87a316e0c458..e7efec88d4dc 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -1034,7 +1034,7 @@ The device on which to place the created array. Default: None. For Array-API interoperability only, so must be ``"cpu"`` if passed. - .. versionadded:: 2.0.0 + .. versionadded:: 2.1.0 copy : bool, optional If ``True``, then the object is copied. If ``None`` then the object is copied only if needed, i.e. if ``__array__`` returns a copy, if obj @@ -1043,7 +1043,7 @@ For ``False`` it raises a ``ValueError`` if a copy cannot be avoided. Default: ``None``. - .. versionadded:: 2.0.0 + .. versionadded:: 2.1.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 From 923d2ad49d787de015996219adc612e614d8f59d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Mon, 3 Jun 2024 17:08:15 +0200 Subject: [PATCH 449/980] BUG: Disallow string inputs for copy keyword in np.array and np.asarray --- numpy/_core/src/multiarray/conversion_utils.c | 6 ++++++ numpy/_core/tests/test_multiarray.py | 8 ++++++++ 2 files changed, 14 insertions(+) diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index 7e89859fc124..9eba190323ea 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -249,6 +249,12 @@ PyArray_CopyConverter(PyObject *obj, NPY_COPYMODE *copymode) { return NPY_FAIL; } } + else if(PyUnicode_Check(obj)) { + PyErr_SetString(PyExc_ValueError, + "strings are not allowed for 'copy' keyword. " + "Use True/False/None instead."); + return NPY_FAIL; + } else { npy_bool bool_copymode; if (!PyArray_BoolConverter(obj, &bool_copymode)) { diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 99cb9453c6ae..63d8f4919678 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -503,6 +503,14 @@ def test_array_copy_true(self): assert_array_equal(e, [[1, 3, 7], [1, 2, 3]]) assert_array_equal(d, [[1, 5, 3], [1,2,3]]) + def test_array_copy_str(self): + with pytest.raises( + ValueError, + match="strings are not allowed for 'copy' keyword. " + "Use True/False/None instead." + ): + np.array([1, 2, 3], copy="always") + def test_array_cont(self): d = np.ones(10)[::2] assert_(np.ascontiguousarray(d).flags.c_contiguous) From 3fba65946ff0adbbc122e3285726604d9a5c4c8b Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Mon, 3 Jun 2024 20:21:19 +0300 Subject: [PATCH 450/980] TST: run smoke-docs if available --- numpy/conftest.py | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/numpy/conftest.py b/numpy/conftest.py index a6c329790e16..3f1cc66ed580 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -3,6 +3,8 @@ """ import os import tempfile +from contextlib import contextmanager +import warnings import hypothesis import pytest @@ -10,6 +12,12 @@ from numpy._core._multiarray_tests import get_fpu_mode +try: + from scipy_doctest.conftest import dt_config + HAVE_SCPDT = True +except ModuleNotFoundError: + HAVE_SCPDT = False + _old_fpu_mode = None _collect_results = {} @@ -136,3 +144,39 @@ def weak_promotion(request): yield request.param numpy._set_promotion_state(state) + + +if HAVE_SCPDT: + + @contextmanager + def warnings_errors_and_rng(test=None): + """Filter out the wall of DeprecationWarnings. + """ + msgs = ["The numpy.linalg.linalg", + "The numpy.fft.helper", + "dep_util", + "pkg_resources", + "numpy.core.umath", + "msvccompiler", + "Deprecated call", + "numpy.core", + "`np.compat`", + "Importing from numpy.matlib" + ] + + msg = "|".join(msgs) + + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', category=DeprecationWarning, message=msg) + yield + + # find and check doctests under this context manager + dt_config.user_context_mgr = warnings_errors_and_rng + + # numpy specific tweaks from refguide-check + dt_config.rndm_markers.add('#uninitialized') + dt_config.rndm_markers.add('# uninitialized') + + import doctest + dt_config.optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS + From 72e7dc76b59ee9c94336f0ba4b45a8f4350c8fe6 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Mon, 3 Jun 2024 21:10:38 +0300 Subject: [PATCH 451/980] DOC: fix several doctest errors --- numpy/_core/_add_newdocs.py | 8 ++++---- numpy/_core/_add_newdocs_scalars.py | 6 +++--- numpy/_core/multiarray.py | 18 +++++++++--------- numpy/_typing/_add_docstring.py | 2 +- numpy/conftest.py | 3 +++ numpy/ma/__init__.py | 4 ++-- numpy/polynomial/__init__.py | 2 ++ 7 files changed, 24 insertions(+), 19 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index e967a298fa84..4e358aeb0fa5 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -6596,7 +6596,7 @@ as a timedelta >>> np.datetime64('2010', np.datetime_data(dt_25s)) - numpy.datetime64('2010-01-01T00:00:00','25s') + np.datetime64('2010-01-01T00:00:00','25s') """) @@ -6937,19 +6937,19 @@ def refer_to_array_attribute(attr, method=True): array(["hello", "world"], dtype=StringDType()) >>> arr = np.array(["hello", None, "world"], - dtype=StringDType(na_object=None)) + ... dtype=StringDType(na_object=None)) >>> arr array(["hello", None, "world", dtype=StringDType(na_object=None)) >>> arr[1] is None True >>> arr = np.array(["hello", np.nan, "world"], - dtype=StringDType(na_object=np.nan)) + ... dtype=StringDType(na_object=np.nan)) >>> np.isnan(arr) array([False, True, False]) >>> np.array([1.2, object(), "hello world"], - dtype=StringDType(coerce=True)) + ... dtype=StringDType(coerce=True)) ValueError: StringDType only allows string data when string coercion is disabled. diff --git a/numpy/_core/_add_newdocs_scalars.py b/numpy/_core/_add_newdocs_scalars.py index 2ad1d22ee8f1..b363aa9a64a1 100644 --- a/numpy/_core/_add_newdocs_scalars.py +++ b/numpy/_core/_add_newdocs_scalars.py @@ -301,11 +301,11 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): offset of +0000. >>> np.datetime64(10, 'Y') - numpy.datetime64('1980') + np.datetime64('1980') >>> np.datetime64('1980', 'Y') - numpy.datetime64('1980') + np.datetime64('1980') >>> np.datetime64(10, 'D') - numpy.datetime64('1970-01-11') + np.datetime64('1970-01-11') See :ref:`arrays.datetime` for more information. """) diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 77e249a85828..5840888eac3d 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -151,7 +151,7 @@ def empty_like( [ 0, 0, -1073741821]]) >>> a = np.array([[1., 2., 3.],[4.,5.,6.]]) >>> np.empty_like(a) - array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninit + array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]]) """ @@ -1580,27 +1580,27 @@ def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, -------- >>> # First business day in October 2011 (not accounting for holidays) ... np.busday_offset('2011-10', 0, roll='forward') - numpy.datetime64('2011-10-03') + np.datetime64('2011-10-03') >>> # Last business day in February 2012 (not accounting for holidays) ... np.busday_offset('2012-03', -1, roll='forward') - numpy.datetime64('2012-02-29') + np.datetime64('2012-02-29') >>> # Third Wednesday in January 2011 ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed') - numpy.datetime64('2011-01-19') + np.datetime64('2011-01-19') >>> # 2012 Mother's Day in Canada and the U.S. ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun') - numpy.datetime64('2012-05-13') + np.datetime64('2012-05-13') >>> # First business day on or after a date ... np.busday_offset('2011-03-20', 0, roll='forward') - numpy.datetime64('2011-03-21') + np.datetime64('2011-03-21') >>> np.busday_offset('2011-03-22', 0, roll='forward') - numpy.datetime64('2011-03-22') + np.datetime64('2011-03-22') >>> # First business day after a date ... np.busday_offset('2011-03-20', 1, roll='backward') - numpy.datetime64('2011-03-21') + np.datetime64('2011-03-21') >>> np.busday_offset('2011-03-22', 1, roll='backward') - numpy.datetime64('2011-03-23') + np.datetime64('2011-03-23') """ return (dates, offsets, weekmask, holidays, out) diff --git a/numpy/_typing/_add_docstring.py b/numpy/_typing/_add_docstring.py index 722d713a7076..758d1a5be5ea 100644 --- a/numpy/_typing/_add_docstring.py +++ b/numpy/_typing/_add_docstring.py @@ -136,7 +136,7 @@ def _parse_docstrings() -> str: >>> import numpy.typing as npt >>> print(npt.NDArray) - numpy.ndarray[typing.Any, numpy.dtype[+ScalarType]] + numpy.ndarray[typing.Any, numpy.dtype[+_ScalarType_co]] >>> print(npt.NDArray[np.float64]) numpy.ndarray[typing.Any, numpy.dtype[numpy.float64]] diff --git a/numpy/conftest.py b/numpy/conftest.py index 3f1cc66ed580..0c5092ddc4cb 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -180,3 +180,6 @@ def warnings_errors_and_rng(test=None): import doctest dt_config.optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS + # recognize the StringDType repr + dt_config.check_namespace['StringDType'] = numpy.dtypes.StringDType + diff --git a/numpy/ma/__init__.py b/numpy/ma/__init__.py index 870cc4ef2daa..03e9fcd075cc 100644 --- a/numpy/ma/__init__.py +++ b/numpy/ma/__init__.py @@ -22,8 +22,8 @@ >>> m = np.ma.masked_array(x, np.isnan(x)) >>> m -masked_array(data = [2.0 1.0 3.0 -- 5.0 2.0 3.0 --], - mask = [False False False True False False False True], +masked_array(data=[2.0, 1.0, 3.0, --, 5.0, 2.0, 3.0, --], + mask=[False, False, False, True, False, False, False, True], fill_value=1e+20) Here, we construct a masked array that suppress all ``NaN`` values. We diff --git a/numpy/polynomial/__init__.py b/numpy/polynomial/__init__.py index 2a31e52f2aa4..b22ade5e28a8 100644 --- a/numpy/polynomial/__init__.py +++ b/numpy/polynomial/__init__.py @@ -41,6 +41,8 @@ `~chebyshev.Chebyshev.fit` class method:: >>> from numpy.polynomial import Chebyshev + >>> xdata = [1, 2, 3, 4] + >>> ydata = [1, 4, 9, 16] >>> c = Chebyshev.fit(xdata, ydata, deg=1) is preferred over the `chebyshev.chebfit` function from the From 501fecf0a1dfc54983947daed4b5c285389e193e Mon Sep 17 00:00:00 2001 From: Ben Woodruff Date: Mon, 3 Jun 2024 11:06:17 -0600 Subject: [PATCH 452/980] DOC: Update randn() to use rng.standard_normal() Updates all references to `randn` to use `standard_normal` from Generator. I left the matlib versions alone. In a few spots I added `seed=123`. I left the `# may vary` tag on those examples. This tag could be removed with the guaranteed same output now. I had to reformat a few chunks of longer code and output. [skip actions] [skip azp] [skip cirrus] --- numpy/_core/shape_base.py | 3 ++- numpy/lib/_function_base_impl.py | 3 ++- numpy/lib/_histograms_impl.py | 3 ++- numpy/lib/_twodim_base_impl.py | 6 ++++-- numpy/linalg/_linalg.py | 24 +++++++++++++++++------- numpy/polynomial/hermite.py | 5 +++-- numpy/polynomial/hermite_e.py | 6 +++--- numpy/polynomial/laguerre.py | 5 +++-- numpy/polynomial/polynomial.py | 26 +++++++++++++++----------- 9 files changed, 51 insertions(+), 30 deletions(-) diff --git a/numpy/_core/shape_base.py b/numpy/_core/shape_base.py index 07f185ed0c10..ece77538f4c9 100644 --- a/numpy/_core/shape_base.py +++ b/numpy/_core/shape_base.py @@ -417,7 +417,8 @@ def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): Examples -------- - >>> arrays = [np.random.randn(3, 4) for _ in range(10)] + >>> rng = np.random.default_rng() + >>> arrays = [rng.standard_normal(size=(3,4)) for _ in range(10)] >>> np.stack(arrays, axis=0).shape (10, 3, 4) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index a6a7241e3649..040e2491fa24 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -323,7 +323,8 @@ def flip(m, axis=None): [7, 6]], [[1, 0], [3, 2]]]) - >>> A = np.random.randn(3,4,5) + >>> rng = np.random.default_rng() + >>> A = rng.standard_normal(size=(3,4,5)) >>> np.all(np.flip(A,2) == A[:,:,::-1,...]) True """ diff --git a/numpy/lib/_histograms_impl.py b/numpy/lib/_histograms_impl.py index 1439f49592fa..321c21868312 100644 --- a/numpy/lib/_histograms_impl.py +++ b/numpy/lib/_histograms_impl.py @@ -972,7 +972,8 @@ def histogramdd(sample, bins=10, range=None, density=None, weights=None): Examples -------- - >>> r = np.random.randn(100,3) + >>> rng = np.random.default_rng() + >>> r = rng.standard_normal(size=(100,3)) >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) >>> H.shape, edges[0].size, edges[1].size, edges[2].size ((5, 8, 4), 6, 9, 5) diff --git a/numpy/lib/_twodim_base_impl.py b/numpy/lib/_twodim_base_impl.py index dd6372429687..de4f373b4d6f 100644 --- a/numpy/lib/_twodim_base_impl.py +++ b/numpy/lib/_twodim_base_impl.py @@ -89,7 +89,8 @@ def fliplr(m): [0., 2., 0.], [3., 0., 0.]]) - >>> A = np.random.randn(2,3,5) + >>> rng = np.random.default_rng() + >>> A = rng.standard_normal(size=(2,3,5)) >>> np.all(np.fliplr(A) == A[:,::-1,...]) True @@ -142,7 +143,8 @@ def flipud(m): [0., 2., 0.], [1., 0., 0.]]) - >>> A = np.random.randn(2,3,5) + >>> rng = np.random.default_rng() + >>> A = rng.standard_normal(size=(2,3,5)) >>> np.all(np.flipud(A) == A[::-1,...]) True diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 63aafaffce1a..a8c9332d4763 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -274,7 +274,8 @@ def tensorsolve(a, b, axes=None): -------- >>> a = np.eye(2*3*4) >>> a.shape = (2*3, 4, 2, 3, 4) - >>> b = np.random.randn(2*3, 4) + >>> rng = np.random.default_rng() + >>> b = rng.standard_normal(size=(2*3, 4)) >>> x = np.linalg.tensorsolve(a, b) >>> x.shape (2, 3, 4) @@ -456,7 +457,8 @@ def tensorinv(a, ind=2): >>> ainv = np.linalg.tensorinv(a, ind=2) >>> ainv.shape (8, 3, 4, 6) - >>> b = np.random.randn(4, 6) + >>> rng = np.random.default_rng() + >>> b = rng.standard_normal(size=(4, 6)) >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) True @@ -465,7 +467,8 @@ def tensorinv(a, ind=2): >>> ainv = np.linalg.tensorinv(a, ind=1) >>> ainv.shape (8, 3, 24) - >>> b = np.random.randn(24) + >>> rng = np.random.default_rng() + >>> b = rng.standard_normal(24) >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) True @@ -978,7 +981,8 @@ def qr(a, mode='reduced'): Examples -------- - >>> a = np.random.randn(9, 6) + >>> rng = np.random.default_rng() + >>> a = rng.standard_normal(size=(9, 6)) >>> Q, R = np.linalg.qr(a) >>> np.allclose(a, np.dot(Q, R)) # a does equal QR True @@ -1703,8 +1707,13 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): Examples -------- - >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6) - >>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3) + >>> rng = np.random.default_rng() + >>> a_re = rng.standard_normal(size=(9, 6)) + >>> a_im = rng.standard_normal(size=(9, 6)) + >>> a = a_re + 1j*a_im + >>> b_re = rng.standard_normal(size=(2, 7, 8, 3)) + >>> b_im = rng.standard_normal(size=(2, 7, 8, 3)) + >>> b = b_re + 1j*b_im Reconstruction based on full SVD, 2D case: @@ -2181,7 +2190,8 @@ def pinv(a, rcond=None, hermitian=False, *, rtol=_NoValue): The following example checks that ``a * a+ * a == a`` and ``a+ * a * a+ == a+``: - >>> a = np.random.randn(9, 6) + >>> rng = np.random.default_rng() + >>> a = rng.standard_normal(size=(9, 6)) >>> B = np.linalg.pinv(a) >>> np.allclose(a, np.dot(a, np.dot(B, a))) True diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index 4671f93244bd..b843942cabcf 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -1460,10 +1460,11 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): -------- >>> from numpy.polynomial.hermite import hermfit, hermval >>> x = np.linspace(-10, 10) - >>> err = np.random.randn(len(x))/10 + >>> rng = np.random.default_rng(seed=123) + >>> err = rng.standard_normal(size=(len(x)))/10 >>> y = hermval(x, [1, 2, 3]) + err >>> hermfit(x, y, 2) - array([1.0218, 1.9986, 2.9999]) # may vary + array([1.02294967, 2.00016403, 2.99994614]) # may vary """ return pu._fit(hermvander, x, y, deg, rcond, full, w) diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index f50b9d2449f3..14006663d561 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -1387,11 +1387,11 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): -------- >>> from numpy.polynomial.hermite_e import hermefit, hermeval >>> x = np.linspace(-10, 10) - >>> np.random.seed(123) - >>> err = np.random.randn(len(x))/10 + >>> rng = np.random.default_rng(seed=123) + >>> err = rng.standard_normal(size=(len(x)))/10 >>> y = hermeval(x, [1, 2, 3]) + err >>> hermefit(x, y, 2) - array([ 1.01690445, 1.99951418, 2.99948696]) # may vary + array([1.02284196, 2.00032805, 2.99978457]) # may vary """ return pu._fit(hermevander, x, y, deg, rcond, full, w) diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index 11e2ac7229ca..7c746cd5a204 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -1441,10 +1441,11 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): -------- >>> from numpy.polynomial.laguerre import lagfit, lagval >>> x = np.linspace(0, 10) - >>> err = np.random.randn(len(x))/10 + >>> rng = np.random.default_rng(seed=123) + >>> err = rng.standard_normal(size=(len(x)))/10 >>> y = lagval(x, [1, 2, 3]) + err >>> lagfit(x, y, 2) - array([ 0.96971004, 2.00193749, 3.00288744]) # may vary + array([1.00578369, 1.99417356, 2.99827656]) # may vary """ return pu._fit(lagvander, x, y, deg, rcond, full, w) diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 2241c49235a4..32042b28c74f 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -1441,27 +1441,31 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): Examples -------- - >>> np.random.seed(123) >>> from numpy.polynomial import polynomial as P >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] - >>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + Gaussian noise + >>> rng = np.random.default_rng(seed=123) + >>> err = rng.standard_normal(size=len(x)) + >>> y = x**3 - x + err # x^3 - x + Gaussian noise >>> c, stats = P.polyfit(x,y,3,full=True) - >>> np.random.seed(123) - >>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1 - array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286]) # may vary + >>> c # c[0], c[1] approx. -1, c[2] should be approx. 0, c[3] approx. 1 + array([ 0.23111996, -1.02785049, -0.2241444 , 1.08405657]) # may vary >>> stats # note the large SSR, explaining the rather poor results - [array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316, # may vary - 0.28853036]), 1.1324274851176597e-014] + [array([48.312088]), # may vary + 4, # may vary + array([1.38446749, 1.32119158, 0.50443316, 0.28853036]), # may vary + 1.1324274851176597e-14] # may vary Same thing without the added noise >>> y = x**3 - x >>> c, stats = P.polyfit(x,y,3,full=True) - >>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1 - array([-6.36925336e-18, -1.00000000e+00, -4.08053781e-16, 1.00000000e+00]) + >>> c # c[0], c[1] ~= -1, c[2] should be "very close to 0", c[3] ~= 1 + array([-6.73496154e-17, -1.00000000e+00, 0.00000000e+00, 1.00000000e+00]) >>> stats # note the minuscule SSR - [array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158, # may vary - 0.50443316, 0.28853036]), 1.1324274851176597e-014] + [array([8.79579319e-31]), # may vary + 4, # may vary + array([1.38446749, 1.32119158, 0.50443316, 0.28853036]), # may vary + 1.1324274851176597e-14] # may vary """ return pu._fit(polyvander, x, y, deg, rcond, full, w) From 83af26d68b87f17bd0d38d717004ecf859f6d974 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Mon, 3 Jun 2024 13:31:53 -0600 Subject: [PATCH 453/980] Fix test for default indexing and integer dtypes --- numpy/_core/tests/test_array_api_info.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_array_api_info.py b/numpy/_core/tests/test_array_api_info.py index c4415f6d38ca..154b3837325d 100644 --- a/numpy/_core/tests/test_array_api_info.py +++ b/numpy/_core/tests/test_array_api_info.py @@ -26,8 +26,8 @@ def test_default_dtypes(): assert dtypes["real floating"] == np.float64 == np.asarray(0.0).dtype assert dtypes["complex floating"] == np.complex128 == \ np.asarray(0.0j).dtype - assert dtypes["integral"] == np.int64 == np.asarray(0).dtype - assert dtypes["indexing"] == np.int64 == np.argmax(np.zeros(10)).dtype + assert dtypes["integral"] == np.intp == np.asarray(0).dtype + assert dtypes["indexing"] == np.intp == np.argmax(np.zeros(10)).dtype with pytest.raises(ValueError, match="Device not understood"): info.default_dtypes(device="gpu") From 71ab27f9f663b165b3c5a667b694e07a727049be Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Mon, 3 Jun 2024 13:33:21 -0600 Subject: [PATCH 454/980] Fix docs build errors --- numpy/_array_api_info.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_array_api_info.py b/numpy/_array_api_info.py index 423898d6da9b..a6bb615aba2f 100644 --- a/numpy/_array_api_info.py +++ b/numpy/_array_api_info.py @@ -134,7 +134,7 @@ def capabilities(self) -> Capabilities: Returns ------- - capabilities : Capabilities + capabilities : dict A dictionary of array API library capabilities. Examples @@ -202,7 +202,7 @@ def default_dtypes( Returns ------- - dtypes : DefaultDataTypes + dtypes : dict A dictionary describing the default data types used for new NumPy arrays. @@ -274,7 +274,7 @@ def dtypes( Returns ------- - dtypes : DataTypes + dtypes : dict A dictionary mapping the names of data types to the corresponding NumPy data types. From a500831e255a91c08e21ba065991fd9ca7a4609a Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Mon, 3 Jun 2024 13:49:41 -0600 Subject: [PATCH 455/980] Cleanup the unstack see-also list --- numpy/_core/shape_base.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/numpy/_core/shape_base.py b/numpy/_core/shape_base.py index da5a9d897261..e9a3cdfc86b3 100644 --- a/numpy/_core/shape_base.py +++ b/numpy/_core/shape_base.py @@ -490,12 +490,6 @@ def unstack(x, /, *, axis=0): -------- stack : Join a sequence of arrays along a new axis. concatenate : Join a sequence of arrays along an existing axis. - vstack : Stack arrays in sequence vertically (row wise). - hstack : Stack arrays in sequence horizontally (column wise). - dstack : Stack arrays in sequence depth wise (along third axis). - column_stack : Stack 1-D arrays as columns into a 2-D array. - vsplit : Split an array into multiple sub-arrays vertically (row-wise). - unstack : Split an array into a tuple of sub-arrays along an axis. block : Assemble an nd-array from nested lists of blocks. split : Split array into a list of multiple sub-arrays of equal size. From 221299d0a740f234269e6c410020215acf8b8042 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Mon, 3 Jun 2024 13:49:52 -0600 Subject: [PATCH 456/980] Simplify the implementation of unstack() --- numpy/_core/shape_base.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/numpy/_core/shape_base.py b/numpy/_core/shape_base.py index e9a3cdfc86b3..a3e6699b97f1 100644 --- a/numpy/_core/shape_base.py +++ b/numpy/_core/shape_base.py @@ -517,11 +517,7 @@ def unstack(x, /, *, axis=0): np.True_ """ - x = asanyarray(x) - - axis = normalize_axis_index(axis, x.ndim) - slices = (slice(None),) * axis - return tuple(x[slices + (i, ...)] for i in range(x.shape[axis])) + return tuple(_nx.moveaxis(x, axis, 0)) # Internal functions to eliminate the overhead of repeated dispatch in one of # the two possible paths inside np.block. From ac73b1c9b483ba123651e4bbbc04788e9354eaf7 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Mon, 3 Jun 2024 13:52:45 -0600 Subject: [PATCH 457/980] Add a notes section to the unstack docstring --- numpy/_core/shape_base.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/numpy/_core/shape_base.py b/numpy/_core/shape_base.py index a3e6699b97f1..e5cec7c00df9 100644 --- a/numpy/_core/shape_base.py +++ b/numpy/_core/shape_base.py @@ -468,11 +468,7 @@ def unstack(x, /, *, axis=0): The ``axis`` parameter specifies the axis along which the array will be split. of the new axis in the dimensions of the result. For example, if ``axis=0`` it will be the first dimension and if ``axis=-1`` it will be - the last dimension. - - The result is a tuple of arrays split along ``axis``. ``unstack`` serves - as the reverse operation of :py:func:`stack`, i.e., ``stack(unstack(x, - axis=axis), axis=axis) == x``. + the last dimension. The result is a tuple of arrays split along ``axis``. .. versionadded:: 2.1.0 @@ -493,6 +489,14 @@ def unstack(x, /, *, axis=0): block : Assemble an nd-array from nested lists of blocks. split : Split array into a list of multiple sub-arrays of equal size. + Notes + ----- + ``unstack`` serves as the reverse operation of :py:func:`stack`, i.e., + ``stack(unstack(x, axis=axis), axis=axis) == x``. + + This function is equivalent to tuple(np.moveaxis(x, axis, 0)), since + iterating on an array iterates along the first axis. + Examples -------- >>> arr = np.arange(24).reshape((2, 3, 4)) From dc56f27cb9915479709619c7743c37a7e34379f6 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Mon, 3 Jun 2024 13:59:18 -0600 Subject: [PATCH 458/980] Add a test for unstack() --- numpy/_core/tests/test_shape_base.py | 32 ++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index 5b9de37f5f60..4b4bde19cd81 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -490,6 +490,38 @@ def test_stack(): stack((a, b), dtype=np.int64, axis=1, casting="safe") +def test_unstack(): + a = np.arange(24).reshape((2, 3, 4)) + + for stacks in [np.unstack(a), + np.unstack(a, axis=0), + np.unstack(a, axis=-3)]: + assert isinstance(stacks, tuple) + assert len(stacks) == 2 + assert_array_equal(stacks[0], a[0]) + assert_array_equal(stacks[1], a[1]) + + for stacks in [np.unstack(a, axis=1), + np.unstack(a, axis=-2)]: + assert isinstance(stacks, tuple) + assert len(stacks) == 3 + assert_array_equal(stacks[0], a[:, 0]) + assert_array_equal(stacks[1], a[:, 1]) + assert_array_equal(stacks[2], a[:, 2]) + + for stacks in [np.unstack(a, axis=2), + np.unstack(a, axis=-1)]: + assert isinstance(stacks, tuple) + assert len(stacks) == 4 + assert_array_equal(stacks[0], a[:, :, 0]) + assert_array_equal(stacks[1], a[:, :, 1]) + assert_array_equal(stacks[2], a[:, :, 2]) + assert_array_equal(stacks[3], a[:, :, 3]) + + assert_raises(ValueError, np.unstack, a, axis=3) + assert_raises(ValueError, np.unstack, a, axis=-4) + + @pytest.mark.parametrize("axis", [0]) @pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8"]) @pytest.mark.parametrize("casting", From cb899767a4c9b0fadda10c853fbc5836d2d36927 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Mon, 3 Jun 2024 14:10:55 -0600 Subject: [PATCH 459/980] Add unstack to Sphinx and cleanup the docstring --- .../reference/routines.array-manipulation.rst | 1 + numpy/_core/shape_base.py | 13 +++++++------ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/doc/source/reference/routines.array-manipulation.rst b/doc/source/reference/routines.array-manipulation.rst index be2b1120e080..619458de8224 100644 --- a/doc/source/reference/routines.array-manipulation.rst +++ b/doc/source/reference/routines.array-manipulation.rst @@ -88,6 +88,7 @@ Splitting arrays dsplit hsplit vsplit + unstack Tiling arrays ============= diff --git a/numpy/_core/shape_base.py b/numpy/_core/shape_base.py index e5cec7c00df9..bf0933d973e7 100644 --- a/numpy/_core/shape_base.py +++ b/numpy/_core/shape_base.py @@ -463,12 +463,13 @@ def _unstack_dispatcher(x, *, axis=None): @array_function_dispatch(_unstack_dispatcher) def unstack(x, /, *, axis=0): """ - Splits an array into a sequence of arrays along the given axis. + Split an array into a sequence of arrays along the given axis. - The ``axis`` parameter specifies the axis along which the array will be - split. of the new axis in the dimensions of the result. For example, if - ``axis=0`` it will be the first dimension and if ``axis=-1`` it will be - the last dimension. The result is a tuple of arrays split along ``axis``. + The ``axis`` parameter specifies the dimension along which the array will + be split. For example, if ``axis=0`` (the default) it will be the first + dimension and if ``axis=-1`` it will be the last dimension. + + The result is a tuple of arrays split along ``axis``. .. versionadded:: 2.1.0 @@ -494,7 +495,7 @@ def unstack(x, /, *, axis=0): ``unstack`` serves as the reverse operation of :py:func:`stack`, i.e., ``stack(unstack(x, axis=axis), axis=axis) == x``. - This function is equivalent to tuple(np.moveaxis(x, axis, 0)), since + This function is equivalent to ``tuple(np.moveaxis(x, axis, 0))``, since iterating on an array iterates along the first axis. Examples From 4ecffd70d6bf67f5e9533b258c0a17aa3c373e85 Mon Sep 17 00:00:00 2001 From: Tsvika Shapira Date: Tue, 4 Jun 2024 01:11:12 +0300 Subject: [PATCH 460/980] DOC: fix typos in numpy v2.0 documentation (#26605) * typo "Data Data Types" * fix typo "unit8" * remove "=" in headings to fit new size --- doc/neps/nep-0050-scalar-promotion.rst | 2 +- doc/source/user/basics.types.rst | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/neps/nep-0050-scalar-promotion.rst b/doc/neps/nep-0050-scalar-promotion.rst index ec330ef03300..fc161ef9629f 100644 --- a/doc/neps/nep-0050-scalar-promotion.rst +++ b/doc/neps/nep-0050-scalar-promotion.rst @@ -191,7 +191,7 @@ arrays that are not 0-D, such as ``array([2])``. * - ``array([1], uint8) + int64(1)`` or ``array([1], uint8) + array(1, int64)`` - - ``array([2], unit8)`` + - ``array([2], uint8)`` - ``array([2], int64)`` [T2]_ * - ``array([1.], float32) + float64(1.)`` or diff --git a/doc/source/user/basics.types.rst b/doc/source/user/basics.types.rst index 3dd947002c20..afecdf0d77f1 100644 --- a/doc/source/user/basics.types.rst +++ b/doc/source/user/basics.types.rst @@ -142,8 +142,8 @@ Advanced types, not listed above, are explored in section .. _canonical-python-and-c-types: -Relationship Between NumPy Data Types and C Data Data Types -=========================================================== +Relationship Between NumPy Data Types and C Data Types +====================================================== NumPy provides both bit sized type names and names based on the names of C types. Since the definition of C types are platform dependent, this means the explicitly From 75cc0b22d94f147b42048c341a3dedbbc94ee583 Mon Sep 17 00:00:00 2001 From: Jules Date: Tue, 4 Jun 2024 11:32:02 +0800 Subject: [PATCH 461/980] TST: Add device keyword tests --- numpy/_core/tests/test_multiarray.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 99cb9453c6ae..105cc08b422b 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -10233,10 +10233,22 @@ class TestDevice: """ Test arr.device attribute and arr.to_device() method. """ - def test_device(self): - arr = np.arange(5) - + @pytest.mark.parametrize("func, arg", [ + (np.arange, 5), + (np.empty_like, []), + (np.zeros, 5), + (np.empty, (5,5)), + (np.asarray, []), + (np.asanyarray, []), + ]) + def test_device(self, func, arg): + arr = func(arg) assert arr.device == "cpu" + arr = func(arg, device=None) + assert arr.device == "cpu" + arr = func(arg, device="cpu") + assert arr.device == "cpu" + with assert_raises_regex( AttributeError, r"attribute 'device' of '(numpy.|)ndarray' objects is " From ee98c984f7e96537b3a15f17939df6627cc58485 Mon Sep 17 00:00:00 2001 From: Jules Date: Tue, 4 Jun 2024 11:36:27 +0800 Subject: [PATCH 462/980] TST: Add negative test for device keyword --- numpy/_core/tests/test_multiarray.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 105cc08b422b..8df84a5a82ff 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -10249,6 +10249,13 @@ def test_device(self, func, arg): arr = func(arg, device="cpu") assert arr.device == "cpu" + with assert_raises_regex( + ValueError, + r"Device not understood. Only \"cpu\" is allowed, " + r"but received: nonsense" + ): + func(arg, device="nonsense") + with assert_raises_regex( AttributeError, r"attribute 'device' of '(numpy.|)ndarray' objects is " From 19a68c78e51f9f24ec586d43ebdfbf6ab240a067 Mon Sep 17 00:00:00 2001 From: Jules Date: Tue, 4 Jun 2024 11:39:07 +0800 Subject: [PATCH 463/980] fix lint error --- numpy/_core/tests/test_multiarray.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 8df84a5a82ff..5deff07ef1f3 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -10237,7 +10237,7 @@ class TestDevice: (np.arange, 5), (np.empty_like, []), (np.zeros, 5), - (np.empty, (5,5)), + (np.empty, (5, 5)), (np.asarray, []), (np.asanyarray, []), ]) From a51e61fcf2cd3fb4a8f69dcb7142c1ee6fb00b84 Mon Sep 17 00:00:00 2001 From: Ebigide Jude <147154874+jud-sdev@users.noreply.github.com> Date: Tue, 4 Jun 2024 05:45:34 +0000 Subject: [PATCH 464/980] updated insert function with improved note section --- numpy/lib/_function_base_impl.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index a6a7241e3649..e31de58b9e68 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -5416,9 +5416,16 @@ def insert(arr, obj, values, axis=None): Notes ----- - Note that for higher dimensional inserts ``obj=0`` behaves very different + Note that for higher dimensional inserts ``obj=0`` behaves very differently from ``obj=[0]`` just like ``arr[:,0,:] = values`` is different from - ``arr[:,[0],:] = values``. + ``arr[:,[0],:] = values``. This difference arises due to the broadcasting + rules and axis manipulation in NumPy: + + - Using `obj=0` specifies a single position, leading to direct insertion along the specified axis. + - Using `obj=[0]` treats the index as a list, which results in different broadcasting behavior and can affect how values are inserted. + + For a more detailed explanation of broadcasting and indexing, refer to the + :ref:`NumPy documentation on indexing ` and :ref:`broadcasting `. Examples -------- From ded3ec3f57c8710a193f670c9363f8f51b872f8f Mon Sep 17 00:00:00 2001 From: Ebigide Jude <147154874+jud-sdev@users.noreply.github.com> Date: Tue, 4 Jun 2024 06:38:33 +0000 Subject: [PATCH 465/980] Corrected the line length error --- numpy/lib/_function_base_impl.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index e31de58b9e68..66b745d6e51e 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -5416,16 +5416,20 @@ def insert(arr, obj, values, axis=None): Notes ----- - Note that for higher dimensional inserts ``obj=0`` behaves very differently - from ``obj=[0]`` just like ``arr[:,0,:] = values`` is different from - ``arr[:,[0],:] = values``. This difference arises due to the broadcasting - rules and axis manipulation in NumPy: + Note that for higher dimensional inserts ``obj=0`` behaves very + differently from ``obj=[0]`` just like ``arr[:,0,:] = values`` is + different from ``arr[:,[0],:] = values``. + This difference arises due to the broadcasting rules and + axis manipulation in NumPy: - - Using `obj=0` specifies a single position, leading to direct insertion along the specified axis. - - Using `obj=[0]` treats the index as a list, which results in different broadcasting behavior and can affect how values are inserted. - - For a more detailed explanation of broadcasting and indexing, refer to the - :ref:`NumPy documentation on indexing ` and :ref:`broadcasting `. + - Using `obj=0` specifies a single position, leading to direct + insertion along the specified axis. + - Using `obj=[0]` treats the index as a list, which results in different + broadcasting behavior and can affect how values are inserted. + + For a more detailed explanation of broadcasting and indexing, + refer to the :ref:`NumPy documentation on indexing ` and + :ref:`broadcasting `. Examples -------- From 952f35ca9907d2513de1d170cbac61ad04af7f6b Mon Sep 17 00:00:00 2001 From: Ebigide Jude <147154874+jud-sdev@users.noreply.github.com> Date: Tue, 4 Jun 2024 07:30:56 +0000 Subject: [PATCH 466/980] Built docs and removed bullets from the notes --- numpy/lib/_function_base_impl.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 66b745d6e51e..29a4c13b3c66 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -5417,14 +5417,14 @@ def insert(arr, obj, values, axis=None): Notes ----- Note that for higher dimensional inserts ``obj=0`` behaves very - differently from ``obj=[0]`` just like ``arr[:,0,:] = values`` is - different from ``arr[:,[0],:] = values``. + differently from ``obj=[0]`` just like ``arr[:,0,:] = values`` + is different from ``arr[:,[0],:] = values``. This difference arises due to the broadcasting rules and axis manipulation in NumPy: - - Using `obj=0` specifies a single position, leading to direct + Using `obj=0` specifies a single position, leading to direct insertion along the specified axis. - - Using `obj=[0]` treats the index as a list, which results in different + Using `obj=[0]` treats the index as a list, which results in different broadcasting behavior and can affect how values are inserted. For a more detailed explanation of broadcasting and indexing, From 0dd77b405b8e61c80127740a917f605809eee581 Mon Sep 17 00:00:00 2001 From: Jules Date: Tue, 4 Jun 2024 18:26:58 +0800 Subject: [PATCH 467/980] BUG: np.take handle 64-bit indices on 32-bit platforms See #25607 --- numpy/_core/src/multiarray/item_selection.c | 21 ++++++++++---- numpy/_core/tests/test_numeric.py | 32 +++++++++++++++++++++ 2 files changed, 48 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index c45b3694a035..db72c339f122 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -231,21 +231,32 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis, PyArrayObject *out, NPY_CLIPMODE clipmode) { PyArray_Descr *dtype; - PyArrayObject *obj = NULL, *self, *indices; + PyArrayObject *obj = NULL, *self, *indices, *tmp; npy_intp nd, i, n, m, max_item, chunk, itemsize, nelem; npy_intp shape[NPY_MAXDIMS]; npy_bool needs_refcounting; - indices = NULL; + indices = tmp = NULL; self = (PyArrayObject *)PyArray_CheckAxis(self0, &axis, NPY_ARRAY_CARRAY_RO); if (self == NULL) { return NULL; } - indices = (PyArrayObject *)PyArray_ContiguousFromAny(indices0, - NPY_INTP, - 0, 0); + tmp = (PyArrayObject *)PyArray_ContiguousFromAny(indices0, + NPY_INT64, + 0, 0); + if (tmp == NULL) { + goto fail; + } + + if (NPY_SIZEOF_INTP != 8) { + indices = (PyArrayObject *)PyArray_Cast(tmp, NPY_INTP); + Py_DECREF(tmp); + } else { + indices = tmp; + } + if (indices == NULL) { goto fail; } diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 4d560df6456e..d20dea0d0b0a 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -324,6 +324,38 @@ def test_take(self): out = np.take(a, indices) assert_equal(out, tgt) + # take 32 64 + x32 = np.array([1, 2, 3, 4, 5], dtype=np.int32) + x64 = np.array([0, 2, 2, 3], dtype=np.int64) + tgt = np.array([1, 3, 3, 4], dtype=np.int32) + out = np.take(x32, x64) + assert_equal(out, tgt) + assert_equal(out.dtype, tgt.dtype) + + # take 64 32 + x64 = np.array([1, 2, 3, 4, 5], dtype=np.int64) + x32 = np.array([0, 2, 2, 3], dtype=np.int32) + tgt = np.array([1, 3, 3, 4], dtype=np.int64) + out = np.take(x64, x32) + assert_equal(out, tgt) + assert_equal(out.dtype, tgt.dtype) + + # take 32 32 + x32_0 = np.array([1, 2, 3, 4, 5], dtype=np.int32) + x32_1 = np.array([0, 2, 2, 3], dtype=np.int32) + tgt = np.array([1, 3, 3, 4], dtype=np.int32) + out = np.take(x32_0, x32_1) + assert_equal(out, tgt) + assert_equal(out.dtype, tgt.dtype) + + # take 64 64 + x64_0 = np.array([1, 2, 3, 4, 5], dtype=np.int64) + x64_1 = np.array([0, 2, 2, 3], dtype=np.int64) + tgt = np.array([1, 3, 3, 4], dtype=np.int64) + out = np.take(x64_0, x64_1) + assert_equal(out, tgt) + assert_equal(out.dtype, tgt.dtype) + def test_trace(self): c = [[1, 2], [3, 4], [5, 6]] assert_equal(np.trace(c), 5) From 8ec8dc2809626e02a08e7bc46224c57b3d7d83cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Tue, 4 Jun 2024 15:10:56 +0200 Subject: [PATCH 468/980] Create typing stub --- numpy/__init__.py | 2 +- numpy/__init__.pyi | 2 + numpy/_array_api_info.py | 67 ++----------------- numpy/_array_api_info.pyi | 62 +++++++++++++++++ numpy/meson.build | 1 + .../tests/data/reveal/array_api_info.pyi | 18 +++++ 6 files changed, 89 insertions(+), 63 deletions(-) create mode 100644 numpy/_array_api_info.pyi create mode 100644 numpy/typing/tests/data/reveal/array_api_info.pyi diff --git a/numpy/__init__.py b/numpy/__init__.py index 21318aee7da9..0d0e09ceb716 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -315,7 +315,7 @@ set(lib._polynomial_impl.__all__) | set(lib._npyio_impl.__all__) | set(lib._index_tricks_impl.__all__) | - {"emath", "show_config", "__version__"} + {"emath", "show_config", "__version__", "__array_namespace_info__"} ) # Filter out Cython harmless warnings diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d5a0dd796424..6a6d133e335d 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -170,6 +170,8 @@ from numpy._typing._extended_precision import ( complex512 as complex512, ) +from numpy._array_api_info import __array_namespace_info__ as __array_namespace_info__ + from collections.abc import ( Callable, Iterable, diff --git a/numpy/_array_api_info.py b/numpy/_array_api_info.py index a6bb615aba2f..b040a514a642 100644 --- a/numpy/_array_api_info.py +++ b/numpy/_array_api_info.py @@ -7,11 +7,6 @@ more details. """ - -from __future__ import annotations - -from typing import TYPE_CHECKING - from numpy._core import ( dtype, bool, @@ -30,49 +25,6 @@ complex128, ) -if TYPE_CHECKING: - from typing import Optional, Union, Tuple, List, TypedDict - from numpy.dtyping import DtypeLike - - Capabilities = TypedDict( - "Capabilities", - { - "boolean indexing": bool, - "data-dependent shapes": bool, - # "max rank": int | None, - }, - ) - - DefaultDataTypes = TypedDict( - "DefaultDataTypes", - { - "real floating": DtypeLike, - "complex floating": DtypeLike, - "integral": DtypeLike, - "indexing": DtypeLike, - }, - ) - - DataTypes = TypedDict( - "DataTypes", - { - "bool": DtypeLike, - "float32": DtypeLike, - "float64": DtypeLike, - "complex64": DtypeLike, - "complex128": DtypeLike, - "int8": DtypeLike, - "int16": DtypeLike, - "int32": DtypeLike, - "int64": DtypeLike, - "uint8": DtypeLike, - "uint16": DtypeLike, - "uint32": DtypeLike, - "uint64": DtypeLike, - }, - total=False, - ) - class __array_namespace_info__: """ @@ -108,7 +60,7 @@ class __array_namespace_info__: __module__ = 'numpy' - def capabilities(self) -> Capabilities: + def capabilities(self): """ Return a dictionary of array API library capabilities. @@ -152,7 +104,7 @@ def capabilities(self) -> Capabilities: # "max rank": 64, } - def default_device(self) -> str: + def default_device(self): """ The default device used for new NumPy arrays. @@ -179,11 +131,7 @@ def default_device(self) -> str: """ return "cpu" - def default_dtypes( - self, - *, - device: Optional[str] = None, - ) -> DefaultDataTypes: + def default_dtypes(self, *, device): """ The default data types used for new NumPy arrays. @@ -235,12 +183,7 @@ def default_dtypes( "indexing": dtype(intp), } - def dtypes( - self, - *, - device: Optional[str] = None, - kind: Optional[Union[str, Tuple[str, ...]]] = None, - ) -> DataTypes: + def dtypes(self, *, device, kind=None): """ The array API data types supported by NumPy. @@ -375,7 +318,7 @@ def dtypes( return res raise ValueError(f"unsupported kind: {kind!r}") - def devices(self) -> List[str]: + def devices(self): """ The devices supported by NumPy. diff --git a/numpy/_array_api_info.pyi b/numpy/_array_api_info.pyi new file mode 100644 index 000000000000..f86aeb63fd2b --- /dev/null +++ b/numpy/_array_api_info.pyi @@ -0,0 +1,62 @@ +from typing import TypedDict, Optional, Union, Tuple, List +from numpy._typing import DtypeLike + +Capabilities = TypedDict( + "Capabilities", + { + "boolean indexing": bool, + "data-dependent shapes": bool, + }, +) + +DefaultDataTypes = TypedDict( + "DefaultDataTypes", + { + "real floating": DtypeLike, + "complex floating": DtypeLike, + "integral": DtypeLike, + "indexing": DtypeLike, + }, +) + +DataTypes = TypedDict( + "DataTypes", + { + "bool": DtypeLike, + "float32": DtypeLike, + "float64": DtypeLike, + "complex64": DtypeLike, + "complex128": DtypeLike, + "int8": DtypeLike, + "int16": DtypeLike, + "int32": DtypeLike, + "int64": DtypeLike, + "uint8": DtypeLike, + "uint16": DtypeLike, + "uint32": DtypeLike, + "uint64": DtypeLike, + }, + total=False, +) + +class __array_namespace_info__: + __module__: str + + def capabilities(self) -> Capabilities: ... + + def default_device(self) -> str: ... + + def default_dtypes( + self, + *, + device: Optional[str] = None, + ) -> DefaultDataTypes: ... + + def dtypes( + self, + *, + device: Optional[str] = None, + kind: Optional[Union[str, Tuple[str, ...]]] = None, + ) -> DataTypes: ... + + def devices(self) -> List[str]: ... diff --git a/numpy/meson.build b/numpy/meson.build index 63c6b0e6d5aa..7e9ec5244cc9 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -227,6 +227,7 @@ python_sources = [ '__init__.py', '__init__.pyi', '_array_api_info.py', + '_array_api_info.pyi', '_configtool.py', '_distributor_init.py', '_globals.py', diff --git a/numpy/typing/tests/data/reveal/array_api_info.pyi b/numpy/typing/tests/data/reveal/array_api_info.pyi new file mode 100644 index 000000000000..056547681366 --- /dev/null +++ b/numpy/typing/tests/data/reveal/array_api_info.pyi @@ -0,0 +1,18 @@ +import sys +from typing import List + +import numpy as np + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + +array_namespace_info = np.__array_namespace_info__() + +assert_type(array_namespace_info.__module__, str) +assert_type(array_namespace_info.capabilities(), np._array_api_info.Capabilities) +assert_type(array_namespace_info.default_device(), str) +assert_type(array_namespace_info.default_dtypes(), np._array_api_info.DefaultDataTypes) +assert_type(array_namespace_info.dtypes(), np._array_api_info.DataTypes) +assert_type(array_namespace_info.devices(), List[str]) From b76051047bd49b0a6f0471a4531ae00f53eaa3f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Tue, 4 Jun 2024 15:31:43 +0200 Subject: [PATCH 469/980] Add missing default values --- numpy/_array_api_info.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_array_api_info.py b/numpy/_array_api_info.py index b040a514a642..0167a2fe7985 100644 --- a/numpy/_array_api_info.py +++ b/numpy/_array_api_info.py @@ -131,7 +131,7 @@ def default_device(self): """ return "cpu" - def default_dtypes(self, *, device): + def default_dtypes(self, *, device=None): """ The default data types used for new NumPy arrays. @@ -183,7 +183,7 @@ def default_dtypes(self, *, device): "indexing": dtype(intp), } - def dtypes(self, *, device, kind=None): + def dtypes(self, *, device=None, kind=None): """ The array API data types supported by NumPy. From de897afd8038fe5ebf75306399497aad3421e9dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Tue, 4 Jun 2024 17:05:34 +0200 Subject: [PATCH 470/980] MNT: Remove set_string_function --- doc/source/reference/c-api/array.rst | 15 ---- .../reference/c-api/types-and-structures.rst | 11 ++- numpy/_core/_add_newdocs.py | 9 --- numpy/_core/arrayprint.py | 75 ------------------- numpy/_core/src/multiarray/multiarraymodule.c | 28 ------- numpy/_core/src/multiarray/strfuncs.c | 31 +------- numpy/_core/tests/test_multiarray.py | 6 +- numpy/_core/tests/test_numeric.py | 19 ----- 8 files changed, 9 insertions(+), 185 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 7a2f0cbcda91..167bdb7d49ac 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -4077,21 +4077,6 @@ extension with the lowest :c:data:`NPY_FEATURE_VERSION` as possible. :c:data:`NPY_FEATURE_VERSION` changes whenever the API changes (e.g. a function is added). A changed value does not always require a recompile. -Internal Flexibility -~~~~~~~~~~~~~~~~~~~~ - -.. c:function:: void PyArray_SetStringFunction(PyObject* op, int repr) - - This function allows you to alter the tp_str and tp_repr methods - of the array object to any Python function. Thus you can alter - what happens for all arrays when str(arr) or repr(arr) is called - from Python. The function to be called is passed in as *op*. If - *repr* is non-zero, then this function will be called in response - to repr(arr), otherwise the function will be called in response to - str(arr). No check on whether or not *op* is callable is - performed. The callable passed in to *op* should expect an array - argument and should return a string to be printed. - Memory management ~~~~~~~~~~~~~~~~~ diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index b0f274f38a74..8d57153d8803 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -215,12 +215,11 @@ The :c:data:`PyArray_Type` can also be sub-typed. .. tip:: - The ``tp_as_number`` methods use a generic approach to call whatever - function has been registered for handling the operation. When the - ``_multiarray_umath module`` is imported, it sets the numeric operations - for all arrays to the corresponding ufuncs. This choice can be changed with - :c:func:`PyUFunc_ReplaceLoopBySignature` The ``tp_str`` and ``tp_repr`` - methods can also be altered using :c:func:`PyArray_SetStringFunction`. + The :c:member:`tp_as_number ` methods use + a generic approach to call whatever function has been registered for + handling the operation. When the ``_multiarray_umath`` module is imported, + it sets the numeric operations for all arrays to the corresponding ufuncs. + This choice can be changed with :c:func:`PyUFunc_ReplaceLoopBySignature`. PyGenericArrType_Type --------------------- diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index e967a298fa84..1499a9682d62 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -1830,15 +1830,6 @@ """) - -add_newdoc('numpy._core.multiarray', 'set_string_function', - """ - set_string_function(f, repr=1) - - Internal method to set a function to be used when pretty printing arrays. - - """) - add_newdoc('numpy._core.multiarray', 'promote_types', """ promote_types(type1, type2) diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index d12746c7ce52..0ab4890d8f35 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -1727,78 +1727,3 @@ def array_str(a, max_line_width=None, precision=None, suppress_small=None): array2string=_array2string_impl) _default_array_repr = functools.partial(_array_repr_implementation, array2string=_array2string_impl) - - -def set_string_function(f, repr=True): - """ - Set a Python function to be used when pretty printing arrays. - - .. deprecated:: 2.0 - Use `np.set_printoptions` instead with a formatter for custom - printing of NumPy objects. - - Parameters - ---------- - f : function or None - Function to be used to pretty print arrays. The function should expect - a single array argument and return a string of the representation of - the array. If None, the function is reset to the default NumPy function - to print arrays. - repr : bool, optional - If True (default), the function for pretty printing (``__repr__``) - is set, if False the function that returns the default string - representation (``__str__``) is set. - - See Also - -------- - set_printoptions, get_printoptions - - Examples - -------- - >>> from numpy._core.arrayprint import set_string_function - >>> def pprint(arr): - ... return 'HA! - What are you going to do now?' - ... - >>> set_string_function(pprint) - >>> a = np.arange(10) - >>> a - HA! - What are you going to do now? - >>> _ = a - >>> # [0 1 2 3 4 5 6 7 8 9] - - We can reset the function to the default: - - >>> set_string_function(None) - >>> a - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - `repr` affects either pretty printing or normal string representation. - Note that ``__repr__`` is still affected by setting ``__str__`` - because the width of each array element in the returned string becomes - equal to the length of the result of ``__str__()``. - - >>> x = np.arange(4) - >>> set_string_function(lambda x:'random', repr=False) - >>> x.__str__() - 'random' - >>> x.__repr__() - 'array([0, 1, 2, 3])' - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`set_string_function` is deprecated. Use `np.set_printoptions` " - "with a formatter for custom printing NumPy objects. " - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - if f is None: - if repr: - return multiarray.set_string_function(_default_array_repr, 1) - else: - return multiarray.set_string_function(_default_array_str, 0) - else: - return multiarray.set_string_function(f, repr) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 76b56ba9a0c9..2e3cce7bc645 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -3172,31 +3172,6 @@ array__reconstruct(PyObject *NPY_UNUSED(dummy), PyObject *args) return NULL; } -static PyObject * -array_set_string_function(PyObject *NPY_UNUSED(self), PyObject *args, - PyObject *kwds) -{ - PyObject *op = NULL; - int repr = 1; - static char *kwlist[] = {"f", "repr", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|Oi:set_string_function", kwlist, &op, &repr)) { - return NULL; - } - /* reset the array_repr function to built-in */ - if (op == Py_None) { - op = NULL; - } - if (op != NULL && !PyCallable_Check(op)) { - PyErr_SetString(PyExc_TypeError, - "Argument must be callable."); - return NULL; - } - PyArray_SetStringFunction(op, repr); - Py_RETURN_NONE; -} - - static PyObject * array_set_datetimeparse_function(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args), PyObject *NPY_UNUSED(kwds)) @@ -4413,9 +4388,6 @@ static struct PyMethodDef array_module_methods[] = { {"_reconstruct", (PyCFunction)array__reconstruct, METH_VARARGS, NULL}, - {"set_string_function", - (PyCFunction)array_set_string_function, - METH_VARARGS|METH_KEYWORDS, NULL}, {"set_datetimeparse_function", (PyCFunction)array_set_datetimeparse_function, METH_VARARGS|METH_KEYWORDS, NULL}, diff --git a/numpy/_core/src/multiarray/strfuncs.c b/numpy/_core/src/multiarray/strfuncs.c index 8b9966373466..596a32e64aaf 100644 --- a/numpy/_core/src/multiarray/strfuncs.c +++ b/numpy/_core/src/multiarray/strfuncs.c @@ -9,10 +9,6 @@ #include "npy_import.h" #include "strfuncs.h" -static PyObject *PyArray_StrFunction = NULL; -static PyObject *PyArray_ReprFunction = NULL; - - static void npy_PyErr_SetStringChained(PyObject *type, const char *message) { @@ -30,22 +26,7 @@ npy_PyErr_SetStringChained(PyObject *type, const char *message) NPY_NO_EXPORT void PyArray_SetStringFunction(PyObject *op, int repr) { - if (repr) { - /* Dispose of previous callback */ - Py_XDECREF(PyArray_ReprFunction); - /* Add a reference to new callback */ - Py_XINCREF(op); - /* Remember new callback */ - PyArray_ReprFunction = op; - } - else { - /* Dispose of previous callback */ - Py_XDECREF(PyArray_StrFunction); - /* Add a reference to new callback */ - Py_XINCREF(op); - /* Remember new callback */ - PyArray_StrFunction = op; - } + PyErr_SetString(PyExc_ValueError, "PyArray_SetStringFunction was removed"); } @@ -53,11 +34,6 @@ NPY_NO_EXPORT PyObject * array_repr(PyArrayObject *self) { static PyObject *repr = NULL; - - if (PyArray_ReprFunction != NULL) { - return PyObject_CallFunctionObjArgs(PyArray_ReprFunction, self, NULL); - } - /* * We need to do a delayed import here as initialization on module load * leads to circular import problems. @@ -76,11 +52,6 @@ NPY_NO_EXPORT PyObject * array_str(PyArrayObject *self) { static PyObject *str = NULL; - - if (PyArray_StrFunction != NULL) { - return PyObject_CallFunctionObjArgs(PyArray_StrFunction, self, NULL); - } - /* * We need to do a delayed import here as initialization on module load leads * to circular import problems. diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 99cb9453c6ae..7086c109d2ac 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -586,16 +586,16 @@ def assign(v): ) def test_unicode_assignment(self): # gh-5049 - from numpy._core.arrayprint import set_string_function + from numpy._core.arrayprint import set_printoptions @contextmanager def inject_str(s): """ replace ndarray.__str__ temporarily """ - set_string_function(lambda x: s, repr=False) + set_printoptions(formatter={"all": lambda x: s}) try: yield finally: - set_string_function(None, repr=False) + set_printoptions() a1d = np.array(['test']) a0d = np.array('done') diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 4d560df6456e..d2b551b12015 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -9,7 +9,6 @@ import numpy as np from numpy._core import umath, sctypes from numpy._core.numerictypes import obj2sctype -from numpy._core.arrayprint import set_string_function from numpy.exceptions import AxisError from numpy.random import rand, randint, randn from numpy.testing import ( @@ -3582,24 +3581,6 @@ def test_list(self): assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]]) -@pytest.mark.filterwarnings( - "ignore:.*set_string_function.*:DeprecationWarning" -) -class TestStringFunction: - - def test_set_string_function(self): - a = np.array([1]) - set_string_function(lambda x: "FOO", repr=True) - assert_equal(repr(a), "FOO") - set_string_function(None, repr=True) - assert_equal(repr(a), "array([1])") - - set_string_function(lambda x: "FOO", repr=False) - assert_equal(str(a), "FOO") - set_string_function(None, repr=False) - assert_equal(str(a), "[1]") - - class TestRoll: def test_roll1d(self): x = np.arange(10) From 24d80d0c00aed5f1b01c415f5e8b9fdff0e436e7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 4 Jun 2024 17:48:31 +0000 Subject: [PATCH 471/980] MAINT: Bump github/codeql-action from 3.25.7 to 3.25.8 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.25.7 to 3.25.8. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/f079b8493333aace61c81488f8bd40919487bd9f...2e230e8fe0ad3a14a340ad0815ddb96d599d2aff) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 98db6ab4fb33..deeb3e08e300 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -45,7 +45,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@f079b8493333aace61c81488f8bd40919487bd9f # v3.25.7 + uses: github/codeql-action/init@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -55,7 +55,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@f079b8493333aace61c81488f8bd40919487bd9f # v3.25.7 + uses: github/codeql-action/autobuild@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -68,6 +68,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@f079b8493333aace61c81488f8bd40919487bd9f # v3.25.7 + uses: github/codeql-action/analyze@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 78d5d7431c65..5bd8f6cd0fce 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@f079b8493333aace61c81488f8bd40919487bd9f # v2.1.27 + uses: github/codeql-action/upload-sarif@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v2.1.27 with: sarif_file: results.sarif From 8b28d4f5452c7d398c8904d83577f13bb3d76ac0 Mon Sep 17 00:00:00 2001 From: warren Date: Fri, 18 Aug 2023 09:40:27 -0400 Subject: [PATCH 472/980] TST: add some tests of np.log for complex input. These are values where some platforms might lose precision. [skip circle] --- numpy/_core/tests/test_umath.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index ca95dffd6fe3..df8ec07dc3f5 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -1404,6 +1404,36 @@ def test_log_strides(self): assert_array_almost_equal_nulp(np.log(x_f64[::jj]), y_true[::jj], nulp=2) assert_array_almost_equal_nulp(np.log(x_special[::jj]), y_special[::jj], nulp=2) + # Reference values were computed with mpmath, with mp.dps = 200. + @pytest.mark.parametrize( + 'z, wref', + [(1 + 1e-12j, 5e-25 + 1e-12j), + (1.000000000000001 + 3e-08j, + 1.5602230246251546e-15 + 2.999999999999996e-08j), + (0.9999995000000417 + 0.0009999998333333417j, + 7.831475869017683e-18 + 0.001j), + (0.9999999999999996 + 2.999999999999999e-08j, + 5.9107901499372034e-18 + 3e-08j), + (0.99995000042 - 0.009999833j, + -7.015159763822903e-15 - 0.009999999665816696j)], + ) + def test_log_precision_float64(self, z, wref): + w = np.log(z) + assert_allclose(w, wref, rtol=1e-15) + + # Reference values were computed with mpmath, with mp.dps = 200. + @pytest.mark.parametrize( + 'z, wref', + [(np.complex64(1.0 + 3e-6j), np.complex64(4.5e-12+3e-06j)), + (np.complex64(1.0 - 2e-5j), np.complex64(1.9999999e-10 - 2e-5j)), + (np.complex64(0.9999999 + 1e-06j), + np.complex64(-1.192088e-07+1.0000001e-06j))], + ) + def test_log_precision_float32(self, z, wref): + w = np.log(z) + assert_allclose(w, wref, rtol=1e-6) + + class TestExp: def test_exp_values(self): x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] From d93e5eb80b32f4432ff86c27123367c2b3e75226 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Tue, 4 Jun 2024 19:32:50 -0400 Subject: [PATCH 473/980] BLD: blocklist complex log function clog on musl. [skip circle] --- numpy/_core/src/common/npy_config.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/numpy/_core/src/common/npy_config.h b/numpy/_core/src/common/npy_config.h index e590366888aa..459449cb58c1 100644 --- a/numpy/_core/src/common/npy_config.h +++ b/numpy/_core/src/common/npy_config.h @@ -182,6 +182,16 @@ #undef HAVE_CACOSHF #undef HAVE_CACOSHL +/* + * musl's clog is low precision for some inputs. As of MUSL 1.2.5, + * the first comment in clog.c is "// FIXME". + * See https://github.com/numpy/numpy/pull/24416#issuecomment-1678208628 + * and https://github.com/numpy/numpy/pull/24448 + */ +#undef HAVE_CLOG +#undef HAVE_CLOGF +#undef HAVE_CLOGL + #endif /* defined(__GLIBC) */ #endif /* defined(HAVE_FEATURES_H) */ From 9cdefc33813ade7b1b803a774e66b8ed563805cf Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Tue, 4 Jun 2024 21:56:58 -0400 Subject: [PATCH 474/980] BLD: blocklist complex log functions clog* on cygwin. [skip circle] --- numpy/_core/src/common/npy_config.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/numpy/_core/src/common/npy_config.h b/numpy/_core/src/common/npy_config.h index 459449cb58c1..82641a85509e 100644 --- a/numpy/_core/src/common/npy_config.h +++ b/numpy/_core/src/common/npy_config.h @@ -126,6 +126,14 @@ #undef HAVE_CPOWL #undef HAVE_CEXPL +/* + * cygwin uses newlib, which has naive implementations of the + * complex log functions. + */ +#undef HAVE_CLOG +#undef HAVE_CLOGF +#undef HAVE_CLOGL + #include #if CYGWIN_VERSION_DLL_MAJOR < 3003 // rather than blocklist cabsl, hypotl, modfl, sqrtl, error out From ba241ef6ae262b5921f647bad4eb4c5b013033fd Mon Sep 17 00:00:00 2001 From: Jules <57632293+JuliaPoo@users.noreply.github.com> Date: Wed, 5 Jun 2024 10:58:50 +0800 Subject: [PATCH 475/980] Update numpy/_core/tests/test_numeric.py Co-authored-by: Pieter Eendebak --- numpy/_core/tests/test_numeric.py | 38 +++++++------------------------ 1 file changed, 8 insertions(+), 30 deletions(-) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index d20dea0d0b0a..2e83680a6225 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -324,37 +324,15 @@ def test_take(self): out = np.take(a, indices) assert_equal(out, tgt) - # take 32 64 - x32 = np.array([1, 2, 3, 4, 5], dtype=np.int32) - x64 = np.array([0, 2, 2, 3], dtype=np.int64) - tgt = np.array([1, 3, 3, 4], dtype=np.int32) - out = np.take(x32, x64) - assert_equal(out, tgt) - assert_equal(out.dtype, tgt.dtype) - - # take 64 32 - x64 = np.array([1, 2, 3, 4, 5], dtype=np.int64) - x32 = np.array([0, 2, 2, 3], dtype=np.int32) - tgt = np.array([1, 3, 3, 4], dtype=np.int64) - out = np.take(x64, x32) - assert_equal(out, tgt) - assert_equal(out.dtype, tgt.dtype) + pairs = [(np.int32, np.int32), (np.int32, np.int64), (np.int64, np.int32), (np.int64, np.int64)] + for array_type, indices_type in pairs: + x = np.array([1, 2, 3, 4, 5], dtype=array_type) + ind = np.array([0, 2, 2, 3], dtype=indices_type ) + tgt = np.array([1, 3, 3, 4], dtype=array_type) + out = np.take(x, ind) + assert_equal(out, tgt) + assert_equal(out.dtype, tgt.dtype) - # take 32 32 - x32_0 = np.array([1, 2, 3, 4, 5], dtype=np.int32) - x32_1 = np.array([0, 2, 2, 3], dtype=np.int32) - tgt = np.array([1, 3, 3, 4], dtype=np.int32) - out = np.take(x32_0, x32_1) - assert_equal(out, tgt) - assert_equal(out.dtype, tgt.dtype) - - # take 64 64 - x64_0 = np.array([1, 2, 3, 4, 5], dtype=np.int64) - x64_1 = np.array([0, 2, 2, 3], dtype=np.int64) - tgt = np.array([1, 3, 3, 4], dtype=np.int64) - out = np.take(x64_0, x64_1) - assert_equal(out, tgt) - assert_equal(out.dtype, tgt.dtype) def test_trace(self): c = [[1, 2], [3, 4], [5, 6]] From 540cdb34af1963346de87292f9f0f35cba3e2e77 Mon Sep 17 00:00:00 2001 From: Jules Date: Wed, 5 Jun 2024 11:50:36 +0800 Subject: [PATCH 476/980] Change if statement to a macro --- numpy/_core/src/multiarray/item_selection.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index db72c339f122..bc953054b0b6 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -231,18 +231,21 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis, PyArrayObject *out, NPY_CLIPMODE clipmode) { PyArray_Descr *dtype; - PyArrayObject *obj = NULL, *self, *indices, *tmp; + PyArrayObject *obj = NULL, *self, *indices; npy_intp nd, i, n, m, max_item, chunk, itemsize, nelem; npy_intp shape[NPY_MAXDIMS]; npy_bool needs_refcounting; - indices = tmp = NULL; + indices = NULL; self = (PyArrayObject *)PyArray_CheckAxis(self0, &axis, NPY_ARRAY_CARRAY_RO); if (self == NULL) { return NULL; } + +#if NPY_SIZEOF_INTP == NPY_SIZEOF_INT + PyArrayObject *tmp; tmp = (PyArrayObject *)PyArray_ContiguousFromAny(indices0, NPY_INT64, 0, 0); @@ -250,12 +253,13 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis, goto fail; } - if (NPY_SIZEOF_INTP != 8) { - indices = (PyArrayObject *)PyArray_Cast(tmp, NPY_INTP); - Py_DECREF(tmp); - } else { - indices = tmp; - } + indices = (PyArrayObject *)PyArray_Cast(tmp, NPY_INTP); + Py_DECREF(tmp); +#else + indices = (PyArrayObject *)PyArray_ContiguousFromAny(indices0, + NPY_INT64, + 0, 0); +#endif if (indices == NULL) { goto fail; From eacef413246d0bd5c39ba43d2da1294e3edac8d1 Mon Sep 17 00:00:00 2001 From: Jules Date: Wed, 5 Jun 2024 12:24:52 +0800 Subject: [PATCH 477/980] Fix lint errors --- numpy/_core/tests/test_numeric.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 2e83680a6225..72f5b74107cb 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -324,15 +324,17 @@ def test_take(self): out = np.take(a, indices) assert_equal(out, tgt) - pairs = [(np.int32, np.int32), (np.int32, np.int64), (np.int64, np.int32), (np.int64, np.int64)] + pairs = [ + (np.int32, np.int32), (np.int32, np.int64), + (np.int64, np.int32), (np.int64, np.int64) + ] for array_type, indices_type in pairs: - x = np.array([1, 2, 3, 4, 5], dtype=array_type) - ind = np.array([0, 2, 2, 3], dtype=indices_type ) - tgt = np.array([1, 3, 3, 4], dtype=array_type) - out = np.take(x, ind) - assert_equal(out, tgt) - assert_equal(out.dtype, tgt.dtype) - + x = np.array([1, 2, 3, 4, 5], dtype=array_type) + ind = np.array([0, 2, 2, 3], dtype=indices_type) + tgt = np.array([1, 3, 3, 4], dtype=array_type) + out = np.take(x, ind) + assert_equal(out, tgt) + assert_equal(out.dtype, tgt.dtype) def test_trace(self): c = [[1, 2], [3, 4], [5, 6]] From 30dfe94351aca7702ae0fe3cede4f670b838d4b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Wed, 5 Jun 2024 09:45:08 +0200 Subject: [PATCH 478/980] Add release note. Remove unused cython imports --- doc/release/upcoming_changes/26611.expired.rst | 2 ++ numpy/__init__.cython-30.pxd | 1 - numpy/__init__.pxd | 1 - numpy/_core/code_generators/numpy_api.py | 2 ++ 4 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 doc/release/upcoming_changes/26611.expired.rst diff --git a/doc/release/upcoming_changes/26611.expired.rst b/doc/release/upcoming_changes/26611.expired.rst new file mode 100644 index 000000000000..1df220d2b2a7 --- /dev/null +++ b/doc/release/upcoming_changes/26611.expired.rst @@ -0,0 +1,2 @@ +* ``set_string_function`` internal function was removed and ``PyArray_SetStringFunction`` + was stubbed out. diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd index 0270f0ee988f..2151a18b1e80 100644 --- a/numpy/__init__.cython-30.pxd +++ b/numpy/__init__.cython-30.pxd @@ -608,7 +608,6 @@ cdef extern from "numpy/arrayobject.h": # more than is probably needed until it can be checked further. int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF... int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF... - void PyArray_SetStringFunction (object, int) dtype PyArray_DescrFromType (int) object PyArray_TypeObjectFromType (int) char * PyArray_Zero (ndarray) diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd index aebb71fffa9c..8e7583bcb97d 100644 --- a/numpy/__init__.pxd +++ b/numpy/__init__.pxd @@ -523,7 +523,6 @@ cdef extern from "numpy/arrayobject.h": # more than is probably needed until it can be checked further. int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF... int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF... - void PyArray_SetStringFunction (object, int) dtype PyArray_DescrFromType (int) object PyArray_TypeObjectFromType (int) char * PyArray_Zero (ndarray) diff --git a/numpy/_core/code_generators/numpy_api.py b/numpy/_core/code_generators/numpy_api.py index 30e2222e557e..ffdd70b6fe00 100644 --- a/numpy/_core/code_generators/numpy_api.py +++ b/numpy/_core/code_generators/numpy_api.py @@ -116,6 +116,8 @@ def get_annotations(): # Unused slot 41, was `PyArray_GetNumericOps`, 'PyArray_INCREF': (42,), 'PyArray_XDECREF': (43,), + # `PyArray_SetStringFunction` was stubbed out + # and should be removed in the future. 'PyArray_SetStringFunction': (44,), 'PyArray_DescrFromType': (45,), 'PyArray_TypeObjectFromType': (46,), From 731fd5f7332b71d7c40615cf03e92a729e771f29 Mon Sep 17 00:00:00 2001 From: Jules Date: Wed, 5 Jun 2024 15:51:13 +0800 Subject: [PATCH 479/980] [wheel build] Clean-up USING_CLANG_CL from test file --- numpy/_core/tests/test_einsum.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index bc5927122786..0a97693f73b0 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -10,12 +10,6 @@ assert_raises, suppress_warnings, assert_raises_regex, assert_allclose ) -try: - COMPILERS = np.show_config(mode="dicts")["Compilers"] - USING_CLANG_CL = COMPILERS["c"]["name"] == "clang-cl" -except TypeError: - USING_CLANG_CL = False - # Setup for optimize einsum chars = 'abcdefghij' sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) From 224cb40f217c078104611fee44fab5fe7561e871 Mon Sep 17 00:00:00 2001 From: Jules Date: Wed, 5 Jun 2024 16:36:53 +0800 Subject: [PATCH 480/980] TST: [wheel build] re-enable test_shift_all_bits on clang-cl This is a follow up of #26602 and #24162 where previously the test fails on Clang-cl. I've tried a few CI runs on my fork and it seems the test passes now. --- numpy/_core/tests/test_scalarmath.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index 057f84d17633..3517023cb5f0 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -18,12 +18,6 @@ assert_warns, _SUPPORTS_SVE, ) -try: - COMPILERS = np.show_config(mode="dicts")["Compilers"] - USING_CLANG_CL = COMPILERS["c"]["name"] == "clang-cl" -except TypeError: - USING_CLANG_CL = False - types = [np.bool, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, np.int_, np.uint, np.longlong, np.ulonglong, np.single, np.double, np.longdouble, np.csingle, @@ -805,12 +799,6 @@ class TestBitShifts: [operator.rshift, operator.lshift], ids=['>>', '<<']) def test_shift_all_bits(self, type_code, op): """Shifts where the shift amount is the width of the type or wider """ - if ( - USING_CLANG_CL and - type_code in ("l", "L") and - op is operator.lshift - ): - pytest.xfail("Failing on clang-cl builds") # gh-2449 dt = np.dtype(type_code) nbits = dt.itemsize * 8 From 302a297dbe3cd74c52380fb28cf925501c0ba3e2 Mon Sep 17 00:00:00 2001 From: Jules Date: Wed, 5 Jun 2024 18:10:51 +0800 Subject: [PATCH 481/980] implement NPY_ARRAY_SAME_KIND_CASTING and use in np.take --- numpy/_core/src/multiarray/arrayobject.h | 8 ++++++++ numpy/_core/src/multiarray/ctors.c | 5 +++++ numpy/_core/src/multiarray/item_selection.c | 22 +++++---------------- 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/numpy/_core/src/multiarray/arrayobject.h b/numpy/_core/src/multiarray/arrayobject.h index 476b87a9d7e1..03e59c41ca92 100644 --- a/numpy/_core/src/multiarray/arrayobject.h +++ b/numpy/_core/src/multiarray/arrayobject.h @@ -55,6 +55,14 @@ static const int NPY_ARRAY_WAS_PYTHON_COMPLEX = (1 << 28); static const int NPY_ARRAY_WAS_INT_AND_REPLACED = (1 << 27); static const int NPY_ARRAY_WAS_PYTHON_LITERAL = (1 << 30 | 1 << 29 | 1 << 28); +/* + * This flag allows same kind casting, similar to NPY_ARRAY_FORCECAST. + * + * An array never has this flag set; they're only used as parameter + * flags to the various FromAny functions. + */ +static const int NPY_ARRAY_SAME_KIND_CASTING = (1 << 26); + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index efd89aad6521..25319f2f6bf5 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -7,6 +7,7 @@ #include #include "numpy/arrayobject.h" +#include "arrayobject.h" #include "numpy/arrayscalars.h" #include "numpy/npy_math.h" @@ -1908,6 +1909,10 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) newtype->elsize = oldtype->elsize; } + if (flags & NPY_ARRAY_SAME_KIND_CASTING) { + casting = NPY_SAME_KIND_CASTING; + } + /* If the casting if forced, use the 'unsafe' casting rule */ if (flags & NPY_ARRAY_FORCECAST) { casting = NPY_UNSAFE_CASTING; diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index bc953054b0b6..4d1115e57138 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -244,23 +244,11 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis, return NULL; } -#if NPY_SIZEOF_INTP == NPY_SIZEOF_INT - PyArrayObject *tmp; - tmp = (PyArrayObject *)PyArray_ContiguousFromAny(indices0, - NPY_INT64, - 0, 0); - if (tmp == NULL) { - goto fail; - } - - indices = (PyArrayObject *)PyArray_Cast(tmp, NPY_INTP); - Py_DECREF(tmp); -#else - indices = (PyArrayObject *)PyArray_ContiguousFromAny(indices0, - NPY_INT64, - 0, 0); -#endif - + indices = (PyArrayObject *)PyArray_FromAny(indices0, + PyArray_DescrFromType(NPY_INTP), + 0, 0, + NPY_ARRAY_SAME_KIND_CASTING, + NULL); if (indices == NULL) { goto fail; } From f307a5dcb86af1b429e5d738b95660ac32018f84 Mon Sep 17 00:00:00 2001 From: bmwoodruff Date: Wed, 5 Jun 2024 06:43:41 -0600 Subject: [PATCH 482/980] DOC: Update randn() to use rng.normal() Updates all references to `randn` to use `normal` from Generator. I left the matlib versions alone. In a few spots I added `seed=123`. I removed the `# may vary` tag from all examples. I reformatted some longer outputs to use Jupyter style output. [skip actions] [skip azp] [skip cirrus] --- numpy/_core/shape_base.py | 2 +- numpy/lib/_function_base_impl.py | 2 +- numpy/lib/_histograms_impl.py | 2 +- numpy/lib/_twodim_base_impl.py | 4 ++-- numpy/linalg/_linalg.py | 19 ++++++++----------- numpy/polynomial/hermite.py | 4 ++-- numpy/polynomial/hermite_e.py | 4 ++-- numpy/polynomial/laguerre.py | 4 ++-- numpy/polynomial/polynomial.py | 20 ++++++++++---------- 9 files changed, 29 insertions(+), 32 deletions(-) diff --git a/numpy/_core/shape_base.py b/numpy/_core/shape_base.py index ece77538f4c9..8ecd5f61903c 100644 --- a/numpy/_core/shape_base.py +++ b/numpy/_core/shape_base.py @@ -418,7 +418,7 @@ def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): Examples -------- >>> rng = np.random.default_rng() - >>> arrays = [rng.standard_normal(size=(3,4)) for _ in range(10)] + >>> arrays = [rng.normal(size=(3,4)) for _ in range(10)] >>> np.stack(arrays, axis=0).shape (10, 3, 4) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 040e2491fa24..b9ac78af869f 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -324,7 +324,7 @@ def flip(m, axis=None): [[1, 0], [3, 2]]]) >>> rng = np.random.default_rng() - >>> A = rng.standard_normal(size=(3,4,5)) + >>> A = rng.normal(size=(3,4,5)) >>> np.all(np.flip(A,2) == A[:,:,::-1,...]) True """ diff --git a/numpy/lib/_histograms_impl.py b/numpy/lib/_histograms_impl.py index 321c21868312..80eeffb6a03c 100644 --- a/numpy/lib/_histograms_impl.py +++ b/numpy/lib/_histograms_impl.py @@ -973,7 +973,7 @@ def histogramdd(sample, bins=10, range=None, density=None, weights=None): Examples -------- >>> rng = np.random.default_rng() - >>> r = rng.standard_normal(size=(100,3)) + >>> r = rng.normal(size=(100,3)) >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) >>> H.shape, edges[0].size, edges[1].size, edges[2].size ((5, 8, 4), 6, 9, 5) diff --git a/numpy/lib/_twodim_base_impl.py b/numpy/lib/_twodim_base_impl.py index de4f373b4d6f..8eb6eccfcfbd 100644 --- a/numpy/lib/_twodim_base_impl.py +++ b/numpy/lib/_twodim_base_impl.py @@ -90,7 +90,7 @@ def fliplr(m): [3., 0., 0.]]) >>> rng = np.random.default_rng() - >>> A = rng.standard_normal(size=(2,3,5)) + >>> A = rng.normal(size=(2,3,5)) >>> np.all(np.fliplr(A) == A[:,::-1,...]) True @@ -144,7 +144,7 @@ def flipud(m): [1., 0., 0.]]) >>> rng = np.random.default_rng() - >>> A = rng.standard_normal(size=(2,3,5)) + >>> A = rng.normal(size=(2,3,5)) >>> np.all(np.flipud(A) == A[::-1,...]) True diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index a8c9332d4763..6e3b18fef94d 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -275,7 +275,7 @@ def tensorsolve(a, b, axes=None): >>> a = np.eye(2*3*4) >>> a.shape = (2*3, 4, 2, 3, 4) >>> rng = np.random.default_rng() - >>> b = rng.standard_normal(size=(2*3, 4)) + >>> b = rng.normal(size=(2*3, 4)) >>> x = np.linalg.tensorsolve(a, b) >>> x.shape (2, 3, 4) @@ -458,7 +458,7 @@ def tensorinv(a, ind=2): >>> ainv.shape (8, 3, 4, 6) >>> rng = np.random.default_rng() - >>> b = rng.standard_normal(size=(4, 6)) + >>> b = rng.normal(size=(4, 6)) >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) True @@ -468,7 +468,7 @@ def tensorinv(a, ind=2): >>> ainv.shape (8, 3, 24) >>> rng = np.random.default_rng() - >>> b = rng.standard_normal(24) + >>> b = rng.normal(size=24) >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) True @@ -982,7 +982,7 @@ def qr(a, mode='reduced'): Examples -------- >>> rng = np.random.default_rng() - >>> a = rng.standard_normal(size=(9, 6)) + >>> a = rng.normal(size=(9, 6)) >>> Q, R = np.linalg.qr(a) >>> np.allclose(a, np.dot(Q, R)) # a does equal QR True @@ -1708,12 +1708,9 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): Examples -------- >>> rng = np.random.default_rng() - >>> a_re = rng.standard_normal(size=(9, 6)) - >>> a_im = rng.standard_normal(size=(9, 6)) - >>> a = a_re + 1j*a_im - >>> b_re = rng.standard_normal(size=(2, 7, 8, 3)) - >>> b_im = rng.standard_normal(size=(2, 7, 8, 3)) - >>> b = b_re + 1j*b_im + >>> a = rng.normal(size=(9, 6)) + 1j*rng.normal(size=(9, 6)) + >>> b = rng.normal(size=(2, 7, 8, 3)) + 1j*rng.normal(size=(2, 7, 8, 3)) + Reconstruction based on full SVD, 2D case: @@ -2191,7 +2188,7 @@ def pinv(a, rcond=None, hermitian=False, *, rtol=_NoValue): ``a+ * a * a+ == a+``: >>> rng = np.random.default_rng() - >>> a = rng.standard_normal(size=(9, 6)) + >>> a = rng.normal(size=(9, 6)) >>> B = np.linalg.pinv(a) >>> np.allclose(a, np.dot(a, np.dot(B, a))) True diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index b843942cabcf..ade85f46e8f9 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -1461,10 +1461,10 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): >>> from numpy.polynomial.hermite import hermfit, hermval >>> x = np.linspace(-10, 10) >>> rng = np.random.default_rng(seed=123) - >>> err = rng.standard_normal(size=(len(x)))/10 + >>> err = rng.normal(scale=1./10,size=len(x)) >>> y = hermval(x, [1, 2, 3]) + err >>> hermfit(x, y, 2) - array([1.02294967, 2.00016403, 2.99994614]) # may vary + array([1.02294967, 2.00016403, 2.99994614]) """ return pu._fit(hermvander, x, y, deg, rcond, full, w) diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index 14006663d561..6e80e0d4ae0b 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -1388,10 +1388,10 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): >>> from numpy.polynomial.hermite_e import hermefit, hermeval >>> x = np.linspace(-10, 10) >>> rng = np.random.default_rng(seed=123) - >>> err = rng.standard_normal(size=(len(x)))/10 + >>> err = rng.normal(scale=1./10,size=len(x)) >>> y = hermeval(x, [1, 2, 3]) + err >>> hermefit(x, y, 2) - array([1.02284196, 2.00032805, 2.99978457]) # may vary + array([1.02284196, 2.00032805, 2.99978457]) """ return pu._fit(hermevander, x, y, deg, rcond, full, w) diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index 7c746cd5a204..bb1d80a3917a 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -1442,10 +1442,10 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): >>> from numpy.polynomial.laguerre import lagfit, lagval >>> x = np.linspace(0, 10) >>> rng = np.random.default_rng(seed=123) - >>> err = rng.standard_normal(size=(len(x)))/10 + >>> err = rng.normal(scale=1./10,size=len(x)) >>> y = lagval(x, [1, 2, 3]) + err >>> lagfit(x, y, 2) - array([1.00578369, 1.99417356, 2.99827656]) # may vary + array([1.00578369, 1.99417356, 2.99827656]) """ return pu._fit(lagvander, x, y, deg, rcond, full, w) diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 32042b28c74f..a47912f2dd95 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -1444,16 +1444,16 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): >>> from numpy.polynomial import polynomial as P >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] >>> rng = np.random.default_rng(seed=123) - >>> err = rng.standard_normal(size=len(x)) + >>> err = rng.normal(size=len(x)) >>> y = x**3 - x + err # x^3 - x + Gaussian noise >>> c, stats = P.polyfit(x,y,3,full=True) >>> c # c[0], c[1] approx. -1, c[2] should be approx. 0, c[3] approx. 1 - array([ 0.23111996, -1.02785049, -0.2241444 , 1.08405657]) # may vary + array([ 0.23111996, -1.02785049, -0.2241444 , 1.08405657]) >>> stats # note the large SSR, explaining the rather poor results - [array([48.312088]), # may vary - 4, # may vary - array([1.38446749, 1.32119158, 0.50443316, 0.28853036]), # may vary - 1.1324274851176597e-14] # may vary + [array([48.312088]), + 4, + array([1.38446749, 1.32119158, 0.50443316, 0.28853036]), + 1.1324274851176597e-14] Same thing without the added noise @@ -1462,10 +1462,10 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): >>> c # c[0], c[1] ~= -1, c[2] should be "very close to 0", c[3] ~= 1 array([-6.73496154e-17, -1.00000000e+00, 0.00000000e+00, 1.00000000e+00]) >>> stats # note the minuscule SSR - [array([8.79579319e-31]), # may vary - 4, # may vary - array([1.38446749, 1.32119158, 0.50443316, 0.28853036]), # may vary - 1.1324274851176597e-14] # may vary + [array([8.79579319e-31]), + 4, + array([1.38446749, 1.32119158, 0.50443316, 0.28853036]), + 1.1324274851176597e-14] """ return pu._fit(polyvander, x, y, deg, rcond, full, w) From e7e56da9ccbde2f3332465dc961640cd2d22dcf1 Mon Sep 17 00:00:00 2001 From: bmwoodruff Date: Wed, 5 Jun 2024 08:32:18 -0600 Subject: [PATCH 483/980] DOC: Update randn() to use rng.normal() Updates all references to `randn` to use `normal` from Generator. I left the matlib versions alone. I remove all references to `seed=123` and left `# may vary`. Longer outputs were reformatted to use Jupyter style output. [skip actions] [skip azp] [skip cirrus] --- numpy/polynomial/hermite.py | 4 ++-- numpy/polynomial/hermite_e.py | 4 ++-- numpy/polynomial/laguerre.py | 4 ++-- numpy/polynomial/polynomial.py | 6 +++--- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index ade85f46e8f9..ce678df5774c 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -1460,11 +1460,11 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): -------- >>> from numpy.polynomial.hermite import hermfit, hermval >>> x = np.linspace(-10, 10) - >>> rng = np.random.default_rng(seed=123) + >>> rng = np.random.default_rng() >>> err = rng.normal(scale=1./10,size=len(x)) >>> y = hermval(x, [1, 2, 3]) + err >>> hermfit(x, y, 2) - array([1.02294967, 2.00016403, 2.99994614]) + array([1.02294967, 2.00016403, 2.99994614]) # may vary """ return pu._fit(hermvander, x, y, deg, rcond, full, w) diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index 6e80e0d4ae0b..02027eafbbf6 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -1387,11 +1387,11 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): -------- >>> from numpy.polynomial.hermite_e import hermefit, hermeval >>> x = np.linspace(-10, 10) - >>> rng = np.random.default_rng(seed=123) + >>> rng = np.random.default_rng() >>> err = rng.normal(scale=1./10,size=len(x)) >>> y = hermeval(x, [1, 2, 3]) + err >>> hermefit(x, y, 2) - array([1.02284196, 2.00032805, 2.99978457]) + array([1.02284196, 2.00032805, 2.99978457]) # may vary """ return pu._fit(hermevander, x, y, deg, rcond, full, w) diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index bb1d80a3917a..f5fa0fbd53ac 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -1441,11 +1441,11 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): -------- >>> from numpy.polynomial.laguerre import lagfit, lagval >>> x = np.linspace(0, 10) - >>> rng = np.random.default_rng(seed=123) + >>> rng = np.random.default_rng() >>> err = rng.normal(scale=1./10,size=len(x)) >>> y = lagval(x, [1, 2, 3]) + err >>> lagfit(x, y, 2) - array([1.00578369, 1.99417356, 2.99827656]) + array([1.00578369, 1.99417356, 2.99827656]) # may vary """ return pu._fit(lagvander, x, y, deg, rcond, full, w) diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index a47912f2dd95..7b78005fa396 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -1443,14 +1443,14 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): -------- >>> from numpy.polynomial import polynomial as P >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] - >>> rng = np.random.default_rng(seed=123) + >>> rng = np.random.default_rng() >>> err = rng.normal(size=len(x)) >>> y = x**3 - x + err # x^3 - x + Gaussian noise >>> c, stats = P.polyfit(x,y,3,full=True) >>> c # c[0], c[1] approx. -1, c[2] should be approx. 0, c[3] approx. 1 - array([ 0.23111996, -1.02785049, -0.2241444 , 1.08405657]) + array([ 0.23111996, -1.02785049, -0.2241444 , 1.08405657]) # may vary >>> stats # note the large SSR, explaining the rather poor results - [array([48.312088]), + [array([48.312088]), # may vary 4, array([1.38446749, 1.32119158, 0.50443316, 0.28853036]), 1.1324274851176597e-14] From 25a93a3e1d2f593362a48239196e38f5240a1572 Mon Sep 17 00:00:00 2001 From: Ebigide Jude Date: Wed, 5 Jun 2024 18:54:26 -0500 Subject: [PATCH 484/980] DOC: Updated notes and examples for np.insert This closes issue #25895. Continues PR #26114. I added a note with a link. I changed the examples to use non-repeating numbers, to make the output more obvious. I added an example to show the difference between `obj=1` and `obj=[1]`. [skip actions] [skip azp] [skip cirrus] --- numpy/lib/_function_base_impl.py | 73 +++++++++++++++----------------- 1 file changed, 34 insertions(+), 39 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 29a4c13b3c66..5cc20ac50ebe 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -5416,56 +5416,51 @@ def insert(arr, obj, values, axis=None): Notes ----- - Note that for higher dimensional inserts ``obj=0`` behaves very - differently from ``obj=[0]`` just like ``arr[:,0,:] = values`` - is different from ``arr[:,[0],:] = values``. - This difference arises due to the broadcasting rules and - axis manipulation in NumPy: - - Using `obj=0` specifies a single position, leading to direct - insertion along the specified axis. - Using `obj=[0]` treats the index as a list, which results in different - broadcasting behavior and can affect how values are inserted. - - For a more detailed explanation of broadcasting and indexing, - refer to the :ref:`NumPy documentation on indexing ` and - :ref:`broadcasting `. + Note that for higher dimensional inserts ``obj=0`` behaves very different + from ``obj=[0]`` just like ``arr[:,0,:] = values`` is different from + ``arr[:,[0],:] = values``. This is because of the difference between basic + and advanced :ref:`indexing `. Examples -------- - >>> a = np.array([[1, 1], [2, 2], [3, 3]]) + >>> a = np.arange(6).reshape(3, 2) >>> a - array([[1, 1], - [2, 2], - [3, 3]]) - >>> np.insert(a, 1, 5) - array([1, 5, 1, ..., 2, 3, 3]) - >>> np.insert(a, 1, 5, axis=1) - array([[1, 5, 1], - [2, 5, 2], - [3, 5, 3]]) - - Difference between sequence and scalars: - - >>> np.insert(a, [1], [[1],[2],[3]], axis=1) - array([[1, 1, 1], - [2, 2, 2], - [3, 3, 3]]) - >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1), - ... np.insert(a, [1], [[1],[2],[3]], axis=1)) + array([[0, 1], + [2, 3], + [4, 5]]) + >>> np.insert(a, 1, 6) + array([0, 6, 1, 2, 3, 4, 5]) + >>> np.insert(a, 1, 6, axis=1) + array([[0, 6, 1], + [2, 6, 3], + [4, 6, 5]]) + + Difference between sequence and scalars, + showing how ``obj=[1]`` behaves different from ``obj=1``: + + >>> np.insert(a, [1], [[7],[8],[9]], axis=1) + array([[0, 7, 1], + [2, 8, 3], + [4, 9, 5]]) + >>> np.insert(a, 1, [[7],[8],[9]], axis=1) + array([[0, 7, 8, 9, 1], + [2, 7, 8, 9, 3], + [4, 7, 8, 9, 5]]) + >>> np.array_equal(np.insert(a, 1, [7, 8, 9], axis=1), + ... np.insert(a, [1], [[7],[8],[9]], axis=1)) True >>> b = a.flatten() >>> b - array([1, 1, 2, 2, 3, 3]) - >>> np.insert(b, [2, 2], [5, 6]) - array([1, 1, 5, ..., 2, 3, 3]) + array([0, 1, 2, 3, 4, 5]) + >>> np.insert(b, [2, 2], [6, 7]) + array([0, 1, 6, 7, 2, 3, 4, 5]) - >>> np.insert(b, slice(2, 4), [5, 6]) - array([1, 1, 5, ..., 2, 3, 3]) + >>> np.insert(b, slice(2, 4), [7, 8]) + array([0, 1, 7, 2, 8, 3, 4, 5]) >>> np.insert(b, [2, 2], [7.13, False]) # type casting - array([1, 1, 7, ..., 2, 3, 3]) + array([0, 1, 7, 0, 2, 3, 4, 5]) >>> x = np.arange(8).reshape(2, 4) >>> idx = (1, 3) From aeebbbe27034ac7dfd5694829267b53bd4703335 Mon Sep 17 00:00:00 2001 From: Jules <57632293+JuliaPoo@users.noreply.github.com> Date: Thu, 6 Jun 2024 09:13:59 +0800 Subject: [PATCH 485/980] Update doc/release/upcoming_changes/26580.new_feature.rst Co-authored-by: Matti Picus --- doc/release/upcoming_changes/26580.new_feature.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/release/upcoming_changes/26580.new_feature.rst b/doc/release/upcoming_changes/26580.new_feature.rst index a837b51834b8..c625e9b9d8a2 100644 --- a/doc/release/upcoming_changes/26580.new_feature.rst +++ b/doc/release/upcoming_changes/26580.new_feature.rst @@ -1 +1 @@ -* `numpy.asanyarray` now support ``copy`` and ``device`` arguments, matching `numpy.asarray`. +* `numpy.asanyarray` now supports ``copy`` and ``device`` arguments, matching `numpy.asarray`. From 71eb077b70896876ade4e4c4116eaab8a26c0c0b Mon Sep 17 00:00:00 2001 From: Jules <57632293+JuliaPoo@users.noreply.github.com> Date: Thu, 6 Jun 2024 09:14:57 +0800 Subject: [PATCH 486/980] Update numpy/_core/_add_newdocs.py Co-authored-by: Matti Picus --- numpy/_core/_add_newdocs.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index e7efec88d4dc..e878ccef0954 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -1035,6 +1035,7 @@ For Array-API interoperability only, so must be ``"cpu"`` if passed. .. versionadded:: 2.1.0 + copy : bool, optional If ``True``, then the object is copied. If ``None`` then the object is copied only if needed, i.e. if ``__array__`` returns a copy, if obj From a9b773da875893083ae9a77fea79ec1e20bf8728 Mon Sep 17 00:00:00 2001 From: Jules <57632293+JuliaPoo@users.noreply.github.com> Date: Thu, 6 Jun 2024 09:15:05 +0800 Subject: [PATCH 487/980] Update numpy/_core/_add_newdocs.py Co-authored-by: Matti Picus --- numpy/_core/_add_newdocs.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index e878ccef0954..177462384e81 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -1045,6 +1045,7 @@ Default: ``None``. .. versionadded:: 2.1.0 + ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 From fe7cf281e5617fd8ac8ce96dcc7f82ea2badb935 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Wed, 5 Jun 2024 22:20:25 -0300 Subject: [PATCH 488/980] DOC: add getbufsize example --- numpy/_core/_ufunc_config.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/numpy/_core/_ufunc_config.py b/numpy/_core/_ufunc_config.py index 1dee8a84a23d..33f942a2656f 100644 --- a/numpy/_core/_ufunc_config.py +++ b/numpy/_core/_ufunc_config.py @@ -184,6 +184,11 @@ def getbufsize(): getbufsize : int Size of ufunc buffer in bytes. + Examples + -------- + >>> np.getbufsize() + 8192 + """ return _get_extobj_dict()["bufsize"] From fb98231d38fadc4faf0404b00e29ada974c9845c Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Wed, 5 Jun 2024 22:26:23 -0300 Subject: [PATCH 489/980] DOC: add setbufsize example --- numpy/_core/_ufunc_config.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/numpy/_core/_ufunc_config.py b/numpy/_core/_ufunc_config.py index 1dee8a84a23d..d7d9e51b4c7e 100644 --- a/numpy/_core/_ufunc_config.py +++ b/numpy/_core/_ufunc_config.py @@ -167,6 +167,18 @@ def setbufsize(size): size : int Size of buffer. + Returns + ------- + bufsize : int + Previous size of ufunc buffer in bytes. + + Examples + -------- + >>> np.setbufsize(4096) + 8192 + >>> np.getbufsize() + 4096 + """ old = _get_extobj_dict()["bufsize"] extobj = _make_extobj(bufsize=size) From cc55d184c4339afd2dcf2de90dd40a661643eb2f Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Wed, 5 Jun 2024 22:40:17 -0300 Subject: [PATCH 490/980] DOC: add `matrix_transpose` example --- numpy/_core/fromnumeric.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index e8ac19e50637..dee671432f72 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -722,6 +722,19 @@ def matrix_transpose(x, /): -------- transpose : Generic transpose method. + Examples + -------- + >>> np.matrix_transpose([[1, 2], [3, 4]]) + array([[1, 3], + [2, 4]]) + + >>> np.matrix_transpose([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) + array([[[1, 3], + [2, 4]], + + [[5, 7], + [6, 8]]]) + """ x = asanyarray(x) if x.ndim < 2: From cfb0971f666de1aaaaeecf6e428ef0a132b7af1a Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Wed, 5 Jun 2024 22:54:22 -0300 Subject: [PATCH 491/980] DOC: add `unique_all` example --- numpy/lib/_arraysetops_impl.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index b143466ac6f6..92df5f1b01a1 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -433,6 +433,15 @@ def unique_all(x): -------- unique : Find the unique elements of an array. + Examples + -------- + >>> np.unique_all([1, 1, 2]) + UniqueAllResult( + values=array([1, 2]), + indices=array([0, 2]), + inverse_indices=array([0, 0, 1]), + counts=array([2, 1])) + """ result = unique( x, From 8ed87251f490512aa6ff022a7f3be6429dbb3b9a Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Wed, 5 Jun 2024 22:58:07 -0300 Subject: [PATCH 492/980] DOC: add `unique_counts` example --- numpy/lib/_arraysetops_impl.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index b143466ac6f6..4bdeb5a9c27c 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -476,6 +476,11 @@ def unique_counts(x): -------- unique : Find the unique elements of an array. + Examples + -------- + >>> np.unique_counts([1, 1, 2]) + UniqueCountsResult(values=array([1, 2]), counts=array([2, 1])) + """ result = unique( x, From c3d970ca5ba4b8c9674b38b138126e4368637a40 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Wed, 5 Jun 2024 23:00:53 -0300 Subject: [PATCH 493/980] DOC: add `unique_inverse` example --- numpy/lib/_arraysetops_impl.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index b143466ac6f6..0b9fb81f9559 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -520,6 +520,11 @@ def unique_inverse(x): -------- unique : Find the unique elements of an array. + Examples + -------- + >>> np.unique_inverse([1, 1, 2]) + UniqueInverseResult(values=array([1, 2]), inverse_indices=array([0, 0, 1])) + """ result = unique( x, From 556994ddf72b6daae42e46c21e66cd06bbd74690 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Wed, 5 Jun 2024 23:04:53 -0300 Subject: [PATCH 494/980] DOC: add `unique_values` example --- numpy/lib/_arraysetops_impl.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index b143466ac6f6..75d9bb7b8f3c 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -560,6 +560,11 @@ def unique_values(x): -------- unique : Find the unique elements of an array. + Examples + -------- + >>> np.unique_values([1, 1, 2]) + array([1, 2]) + """ return unique( x, From 62b628ce350a05021252eb10f08552166779e352 Mon Sep 17 00:00:00 2001 From: Jules Date: Thu, 6 Jun 2024 12:34:35 +0800 Subject: [PATCH 495/980] Add NPY_ARRAY_DEFAULT flag --- numpy/_core/src/multiarray/item_selection.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 4d1115e57138..b4943851938d 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -245,10 +245,10 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis, } indices = (PyArrayObject *)PyArray_FromAny(indices0, - PyArray_DescrFromType(NPY_INTP), - 0, 0, - NPY_ARRAY_SAME_KIND_CASTING, - NULL); + PyArray_DescrFromType(NPY_INTP), + 0, 0, + NPY_ARRAY_SAME_KIND_CASTING | NPY_ARRAY_DEFAULT, + NULL); if (indices == NULL) { goto fail; } From 30e4f695d1b25dad82354c19af36a8b51664c7a1 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 6 Jun 2024 11:19:26 +0300 Subject: [PATCH 496/980] TST: skip conftest.py in test_warnings --- numpy/tests/test_warnings.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/tests/test_warnings.py b/numpy/tests/test_warnings.py index df90fcef8c59..9304c1346cbf 100644 --- a/numpy/tests/test_warnings.py +++ b/numpy/tests/test_warnings.py @@ -67,6 +67,8 @@ def test_warning_calls(): continue if path == base / "random" / "__init__.py": continue + if path == base / "conftest.py": + continue # use tokenize to auto-detect encoding on systems where no # default encoding is defined (e.g. LANG='C') with tokenize.open(str(path)) as file: From 22f5282b1b3af88d1326017f49d190d658d2e939 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 6 Jun 2024 12:04:36 +0300 Subject: [PATCH 497/980] MAINT: fix up several doctests --- numpy/_core/_add_newdocs.py | 2 +- numpy/conftest.py | 5 +++++ numpy/doc/ufuncs.py | 2 +- numpy/exceptions.py | 8 ++++++-- 4 files changed, 13 insertions(+), 4 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 4e358aeb0fa5..3f97b0290a94 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -6939,7 +6939,7 @@ def refer_to_array_attribute(attr, method=True): >>> arr = np.array(["hello", None, "world"], ... dtype=StringDType(na_object=None)) >>> arr - array(["hello", None, "world", dtype=StringDType(na_object=None)) + array(["hello", None, "world"], dtype=StringDType(na_object=None)) >>> arr[1] is None True diff --git a/numpy/conftest.py b/numpy/conftest.py index 0c5092ddc4cb..152f5f256e2e 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -183,3 +183,8 @@ def warnings_errors_and_rng(test=None): # recognize the StringDType repr dt_config.check_namespace['StringDType'] = numpy.dtypes.StringDType + # temporary skips + dt_config.skiplist = set([ + 'numpy.random.Generator.f', # tries to import from scipy.stats + ]) + diff --git a/numpy/doc/ufuncs.py b/numpy/doc/ufuncs.py index c99e9abc99a5..f400981157de 100644 --- a/numpy/doc/ufuncs.py +++ b/numpy/doc/ufuncs.py @@ -113,7 +113,7 @@ output argument is used, the ufunc still returns a reference to the result. >>> x = np.arange(2) - >>> np.add(np.arange(2),np.arange(2.),x) + >>> np.add(np.arange(2, dtype=float), np.arange(2, dtype=float), x, casting='unsafe') array([0, 2]) >>> x array([0, 2]) diff --git a/numpy/exceptions.py b/numpy/exceptions.py index b7df57c69fbd..54088d315363 100644 --- a/numpy/exceptions.py +++ b/numpy/exceptions.py @@ -222,7 +222,9 @@ class DTypePromotionError(TypeError): Datetimes and complex numbers are incompatible classes and cannot be promoted: - >>> np.result_type(np.dtype("M8[s]"), np.complex128) + >>> np.result_type(np.dtype("M8[s]"), np.complex128) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... DTypePromotionError: The DType could not be promoted by . This means that no common DType exists for the given inputs. For example they cannot be stored in a @@ -235,7 +237,9 @@ class DTypePromotionError(TypeError): >>> dtype1 = np.dtype([("field1", np.float64), ("field2", np.int64)]) >>> dtype2 = np.dtype([("field1", np.float64)]) - >>> np.promote_types(dtype1, dtype2) + >>> np.promote_types(dtype1, dtype2) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... DTypePromotionError: field names `('field1', 'field2')` and `('field1',)` mismatch. From b6a275a5ae2c3e14b28a13ace11ad64ecd8aa3fd Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 6 Jun 2024 12:56:29 +0300 Subject: [PATCH 498/980] MAINT: fix minor hiccups in tutorials, add doc/user/conftest.py The conftest is needed to get the smoke-docs configuration from the main conftest. To run doctesting, use (this is scipy's `smoke-tutorial`) $ pytest --doctest-glob=*rst ~/repos/numpy/doc/source/user/ -v --- doc/source/user/basics.dispatch.rst | 2 +- doc/source/user/basics.subclassing.rst | 20 ++++++++++---------- doc/source/user/basics.types.rst | 2 +- doc/source/user/conftest.py | 4 ++++ numpy/conftest.py | 9 +++++++++ 5 files changed, 25 insertions(+), 12 deletions(-) create mode 100644 doc/source/user/conftest.py diff --git a/doc/source/user/basics.dispatch.rst b/doc/source/user/basics.dispatch.rst index daea7474aa1a..1505c9285ea8 100644 --- a/doc/source/user/basics.dispatch.rst +++ b/doc/source/user/basics.dispatch.rst @@ -300,7 +300,7 @@ implement the ``__array_ufunc__`` and ``__array_function__`` protocols in the To check if a Numpy function can be overridden via ``__array_ufunc__``, you can use :func:`~numpy.testing.overrides.allows_array_ufunc_override`: ->>> from np.testing.overrides import allows_array_ufunc_override +>>> from numpy.testing.overrides import allows_array_ufunc_override >>> allows_array_ufunc_override(np.add) True diff --git a/doc/source/user/basics.subclassing.rst b/doc/source/user/basics.subclassing.rst index 83be116b7e7f..e0baba938f16 100644 --- a/doc/source/user/basics.subclassing.rst +++ b/doc/source/user/basics.subclassing.rst @@ -158,21 +158,21 @@ __new__ documentation For example, consider the following Python code: >>> class C: ->>> def __new__(cls, *args): ->>> print('Cls in __new__:', cls) ->>> print('Args in __new__:', args) ->>> # The `object` type __new__ method takes a single argument. ->>> return object.__new__(cls) ->>> def __init__(self, *args): ->>> print('type(self) in __init__:', type(self)) ->>> print('Args in __init__:', args) +... def __new__(cls, *args): +... print('Cls in __new__:', cls) +... print('Args in __new__:', args) +... # The `object` type __new__ method takes a single argument. +... return object.__new__(cls) +... def __init__(self, *args): +... print('type(self) in __init__:', type(self)) +... print('Args in __init__:', args) meaning that we get: >>> c = C('hello') -Cls in __new__: +Cls in __new__: Args in __new__: ('hello',) -type(self) in __init__: +type(self) in __init__: Args in __init__: ('hello',) When we call ``C('hello')``, the ``__new__`` method gets its own class diff --git a/doc/source/user/basics.types.rst b/doc/source/user/basics.types.rst index 3dd947002c20..57a840840d99 100644 --- a/doc/source/user/basics.types.rst +++ b/doc/source/user/basics.types.rst @@ -56,7 +56,7 @@ dtype objects also contain information about the type, such as its bit-width and its byte-order. The data type can also be used indirectly to query properties of the type, such as whether it is an integer:: - >>> d = np.dtype(int64) + >>> d = np.dtype(np.int64) >>> d dtype('int64') diff --git a/doc/source/user/conftest.py b/doc/source/user/conftest.py new file mode 100644 index 000000000000..54f9d6d3158c --- /dev/null +++ b/doc/source/user/conftest.py @@ -0,0 +1,4 @@ +# doctesting configuration from the main conftest +from numpy.conftest import dt_config # noqa: F401 + +#breakpoint() diff --git a/numpy/conftest.py b/numpy/conftest.py index 152f5f256e2e..b36282f387c7 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -188,3 +188,12 @@ def warnings_errors_and_rng(test=None): 'numpy.random.Generator.f', # tries to import from scipy.stats ]) + # xfail problematic tutorials + dt_config.pytest_extra_xfail = { + 'how-to-verify-bug.rst' : '', + 'c-info.ufunc-tutorial.rst': '', + 'basics.interoperability.rst': 'needs pandas', + 'basics.dispatch.rst': 'errors out in /testing/overrides.py', + 'basics.subclassing.rst': '.. testcode:: admonitions not understood' + } + From 1afbd5e2f5dced4bbe18f8c0d64fa5ed90434792 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 6 Jun 2024 13:02:30 +0300 Subject: [PATCH 499/980] appease the linter --- numpy/_core/multiarray.py | 2 +- numpy/conftest.py | 11 ++++++----- numpy/doc/ufuncs.py | 3 ++- numpy/exceptions.py | 2 +- 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 5840888eac3d..75ab59851abf 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -154,7 +154,7 @@ def empty_like( array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]]) - """ + """ # NOQA return (prototype,) diff --git a/numpy/conftest.py b/numpy/conftest.py index b36282f387c7..1e3d20ab3c94 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -161,13 +161,14 @@ def warnings_errors_and_rng(test=None): "Deprecated call", "numpy.core", "`np.compat`", - "Importing from numpy.matlib" - ] + "Importing from numpy.matlib"] msg = "|".join(msgs) with warnings.catch_warnings(): - warnings.filterwarnings('ignore', category=DeprecationWarning, message=msg) + warnings.filterwarnings( + 'ignore', category=DeprecationWarning, message=msg + ) yield # find and check doctests under this context manager @@ -190,10 +191,10 @@ def warnings_errors_and_rng(test=None): # xfail problematic tutorials dt_config.pytest_extra_xfail = { - 'how-to-verify-bug.rst' : '', + 'how-to-verify-bug.rst': '', 'c-info.ufunc-tutorial.rst': '', 'basics.interoperability.rst': 'needs pandas', 'basics.dispatch.rst': 'errors out in /testing/overrides.py', 'basics.subclassing.rst': '.. testcode:: admonitions not understood' - } + } diff --git a/numpy/doc/ufuncs.py b/numpy/doc/ufuncs.py index f400981157de..7324168e1dc8 100644 --- a/numpy/doc/ufuncs.py +++ b/numpy/doc/ufuncs.py @@ -113,7 +113,8 @@ output argument is used, the ufunc still returns a reference to the result. >>> x = np.arange(2) - >>> np.add(np.arange(2, dtype=float), np.arange(2, dtype=float), x, casting='unsafe') + >>> np.add(np.arange(2, dtype=float), np.arange(2, dtype=float), x, + ... casting='unsafe') array([0, 2]) >>> x array([0, 2]) diff --git a/numpy/exceptions.py b/numpy/exceptions.py index 54088d315363..6cacdbcc5227 100644 --- a/numpy/exceptions.py +++ b/numpy/exceptions.py @@ -243,5 +243,5 @@ class DTypePromotionError(TypeError): DTypePromotionError: field names `('field1', 'field2')` and `('field1',)` mismatch. - """ + """ # NOQA pass From 3eca7a65403d3e6dc9363d822e0c57173f63aa0c Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Thu, 6 Jun 2024 09:34:21 -0300 Subject: [PATCH 500/980] DOC: fix +NORMALIZE_WHITESPACE for `unique_all` example [skip actions][skip azp][skip cirrus] --- numpy/lib/_arraysetops_impl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index 92df5f1b01a1..d28c5fe55be4 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -435,7 +435,7 @@ def unique_all(x): Examples -------- - >>> np.unique_all([1, 1, 2]) + >>> np.unique_all([1, 1, 2]) # doctest: +NORMALIZE_WHITESPACE UniqueAllResult( values=array([1, 2]), indices=array([0, 2]), From b22ca2b00c5abf0ea8af941e0d216d68d94d6415 Mon Sep 17 00:00:00 2001 From: Matthew Thompson Date: Thu, 6 Jun 2024 08:58:54 -0400 Subject: [PATCH 501/980] BUG: Replace dots with underscores in meson backend --- numpy/f2py/_backends/_meson.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/f2py/_backends/_meson.py b/numpy/f2py/_backends/_meson.py index d4b650857e74..c396733f2f44 100644 --- a/numpy/f2py/_backends/_meson.py +++ b/numpy/f2py/_backends/_meson.py @@ -92,13 +92,13 @@ def libraries_substitution(self) -> None: self.substitutions["lib_declarations"] = "\n".join( [ - f"{lib} = declare_dependency(link_args : ['-l{lib}'])" + f"{lib.replace('.','_')} = declare_dependency(link_args : ['-l{lib}'])" for lib in self.libraries ] ) self.substitutions["lib_list"] = f"\n{self.indent}".join( - [f"{self.indent}{lib}," for lib in self.libraries] + [f"{self.indent}{lib.replace('.','_')}," for lib in self.libraries] ) self.substitutions["lib_dir_list"] = f"\n{self.indent}".join( [f"{self.indent}lib_dir_{i}," for i in range(len(self.library_dirs))] From 6502a4c71a6e93a0bae27c306e1ade70e32828c0 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Thu, 6 Jun 2024 10:03:05 -0300 Subject: [PATCH 502/980] DOC: fix `matrix_transpose` doctest [skip actions][skip azp][skip cirrus] --- numpy/_core/fromnumeric.py | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index dee671432f72..02bc025b5e40 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -731,7 +731,6 @@ def matrix_transpose(x, /): >>> np.matrix_transpose([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) array([[[1, 3], [2, 4]], - [[5, 7], [6, 8]]]) From ee4b7ea7e563ef3761e9ccd2b364fbfea08b14f9 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Thu, 6 Jun 2024 10:30:16 -0300 Subject: [PATCH 503/980] DOC: add setbufsize example errstate context manager [skip actions][skip azp][skip cirrus] --- numpy/_core/_ufunc_config.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/numpy/_core/_ufunc_config.py b/numpy/_core/_ufunc_config.py index d7d9e51b4c7e..c9c7f9d08988 100644 --- a/numpy/_core/_ufunc_config.py +++ b/numpy/_core/_ufunc_config.py @@ -174,10 +174,16 @@ def setbufsize(size): Examples -------- - >>> np.setbufsize(4096) + When exiting a `numpy.errstate` context manager the bufsize is restored: + + >>> with np.errstate(): + ... np.setbufsize(4096) + ... print(np.getbufsize()) + ... 8192 - >>> np.getbufsize() 4096 + >>> np.getbufsize() + 8192 """ old = _get_extobj_dict()["bufsize"] From ab770f69c1f512dfa07e8e5f44d9c477b1735d62 Mon Sep 17 00:00:00 2001 From: bmwoodruff Date: Thu, 6 Jun 2024 09:51:43 -0600 Subject: [PATCH 504/980] DOC: Update randn() to use rng.normal() Updates all references to `randn` to use `normal` from Generator. I left the matlib versions alone. I removed all references to `seed=123` and left `# may vary`. Longer outputs were reformatted to use Jupyter style output. [skip actions] [skip azp] [skip cirrus] --- numpy/polynomial/hermite.py | 2 +- numpy/polynomial/hermite_e.py | 2 +- numpy/polynomial/laguerre.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index ce678df5774c..58d18cb0d88c 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -1461,7 +1461,7 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): >>> from numpy.polynomial.hermite import hermfit, hermval >>> x = np.linspace(-10, 10) >>> rng = np.random.default_rng() - >>> err = rng.normal(scale=1./10,size=len(x)) + >>> err = rng.normal(scale=1./10, size=len(x)) >>> y = hermval(x, [1, 2, 3]) + err >>> hermfit(x, y, 2) array([1.02294967, 2.00016403, 2.99994614]) # may vary diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index 02027eafbbf6..0aaf2a78c768 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -1388,7 +1388,7 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): >>> from numpy.polynomial.hermite_e import hermefit, hermeval >>> x = np.linspace(-10, 10) >>> rng = np.random.default_rng() - >>> err = rng.normal(scale=1./10,size=len(x)) + >>> err = rng.normal(scale=1./10, size=len(x)) >>> y = hermeval(x, [1, 2, 3]) + err >>> hermefit(x, y, 2) array([1.02284196, 2.00032805, 2.99978457]) # may vary diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index f5fa0fbd53ac..b0de7d9bce35 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -1442,7 +1442,7 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): >>> from numpy.polynomial.laguerre import lagfit, lagval >>> x = np.linspace(0, 10) >>> rng = np.random.default_rng() - >>> err = rng.normal(scale=1./10,size=len(x)) + >>> err = rng.normal(scale=1./10, size=len(x)) >>> y = lagval(x, [1, 2, 3]) + err >>> lagfit(x, y, 2) array([1.00578369, 1.99417356, 2.99827656]) # may vary From 10ca3c0d1d69b53f528f6c4d8be45b4d38c94ed0 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Thu, 6 Jun 2024 13:11:31 -0300 Subject: [PATCH 505/980] DOC: fix `unique_all` extra whitespace in doctest [skip actions][skip azp][skip cirrus] --- numpy/lib/_arraysetops_impl.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index d28c5fe55be4..a150d2584244 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -435,12 +435,11 @@ def unique_all(x): Examples -------- - >>> np.unique_all([1, 1, 2]) # doctest: +NORMALIZE_WHITESPACE - UniqueAllResult( - values=array([1, 2]), - indices=array([0, 2]), - inverse_indices=array([0, 0, 1]), - counts=array([2, 1])) + >>> np.unique_all([1, 1, 2]) + UniqueAllResult(values=array([1, 2]), + indices=array([0, 2]), + inverse_indices=array([0, 0, 1]), + counts=array([2, 1])) """ result = unique( From 1d27ad32683318f7eac89f59972d650821cf9628 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Jun 2024 17:19:36 +0000 Subject: [PATCH 506/980] MAINT: Bump actions/dependency-review-action from 4.3.2 to 4.3.3 Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.3.2 to 4.3.3. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/0c155c5e8556a497adf53f2c18edabf945ed8e70...72eb03d02c7872a771aacd928f3123ac62ad6d3a) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/dependency-review.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 334a89bf6ea7..95a99d6dcf9b 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -17,4 +17,4 @@ jobs: - name: 'Checkout Repository' uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: 'Dependency Review' - uses: actions/dependency-review-action@0c155c5e8556a497adf53f2c18edabf945ed8e70 # v4.3.2 + uses: actions/dependency-review-action@72eb03d02c7872a771aacd928f3123ac62ad6d3a # v4.3.3 From 7483120ba4b80016ddb4ecaccd00741c347efa77 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 6 Jun 2024 21:02:22 +0300 Subject: [PATCH 507/980] ENH: add spin smoke-docs command --- .spin/cmds.py | 104 +++++++++++++++++++++++++++++++++++++++++++++++++ pyproject.toml | 1 + 2 files changed, 105 insertions(+) diff --git a/.spin/cmds.py b/.spin/cmds.py index b78c0393e708..4d70eb8cf73b 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -258,6 +258,110 @@ def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose, *args, **kwargs): ctx.forward(meson.test) +@click.command() +@click.argument("pytest_args", nargs=-1) +@click.option( + "-m", + "markexpr", + metavar='MARKEXPR', + default="not slow", + help="Run tests with the given markers" +) +@click.option( + "-j", + "n_jobs", + metavar='N_JOBS', + default="1", + help=("Number of parallel jobs for testing. " + "Can be set to `auto` to use all cores.") +) +@click.option( + "--tests", "-t", + metavar='TESTS', + help=(""" +Which tests to run. Can be a module, function, class, or method: + + \b + numpy.random + numpy.random.tests.test_generator_mt19937 + numpy.random.tests.test_generator_mt19937::TestMultivariateHypergeometric + numpy.random.tests.test_generator_mt19937::TestMultivariateHypergeometric::test_edge_cases + \b +""") +) +@click.option( + '--verbose', '-v', is_flag=True, default=False +) +@click.pass_context +def smoke_docs(ctx, pytest_args, markexpr, n_jobs, tests, verbose, *args, **kwargs): + """🔧 Run tests + + PYTEST_ARGS are passed through directly to pytest, e.g.: + + spin test -- --pdb + + To run tests on a directory or file: + + \b + spin test numpy/linalg + spin test numpy/linalg/tests/test_linalg.py + + To report the durations of the N slowest tests: + + spin test -- --durations=N + + To run tests that match a given pattern: + + \b + spin test -- -k "geometric" + spin test -- -k "geometric and not rgeometric" + + By default, spin will run `-m 'not slow'`. To run the full test suite, use + `spin -m full` + + For more, see `pytest --help`. + """ # noqa: E501 + if (not pytest_args) and (not tests): + pytest_args = ('numpy',) + + if '-m' not in pytest_args: + if markexpr != "full": + pytest_args = ('-m', markexpr) + pytest_args + + if (n_jobs != "1") and ('-n' not in pytest_args): + pytest_args = ('-n', str(n_jobs)) + pytest_args + + if tests and not ('--pyargs' in pytest_args): + pytest_args = ('--pyargs', tests) + pytest_args + + if verbose: + pytest_args = ('-v',) + pytest_args + + + doctest_args = ( + # ignores are for things fail doctest collection (optionals etc) + '--ignore=numpy/distutils', + '--ignore=numpy/_core/cversions.py', + '--ignore=numpy/_pyinstaller', + '--ignore=numpy/random/_examples', + '--ignore=numpy/compat', + '--ignore=numpy/f2py/_backends/_distutils.py', + # turn doctesting on: + '--doctest-modules', + '--doctest-collect=api' + ) + + pytest_args = pytest_args + doctest_args + + ctx.params['pytest_args'] = pytest_args + + for extra_param in ('markexpr', 'n_jobs', 'tests', 'verbose'): + del ctx.params[extra_param] + + ctx.forward(meson.test) + + + # From scipy: benchmarks/benchmarks/common.py def _set_mem_rlimit(max_mem=None): """ diff --git a/pyproject.toml b/pyproject.toml index b4df3c36d71f..93b6ce6b6b75 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -211,5 +211,6 @@ cli = 'vendored-meson/meson/meson.py' ".spin/cmds.py:docs", ".spin/cmds.py:changelog", ".spin/cmds.py:notes", + ".spin/cmds.py:smoke_docs" ] "Metrics" = [".spin/cmds.py:bench"] From 5ce234e922cc4830ef039bbc5be21420332b88a7 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 6 Jun 2024 13:43:38 -0600 Subject: [PATCH 508/980] BUG: fix incorrect randomized parameterization in bench_linalg --- benchmarks/benchmarks/bench_linalg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py index 307735723707..f3eb819c1803 100644 --- a/benchmarks/benchmarks/bench_linalg.py +++ b/benchmarks/benchmarks/bench_linalg.py @@ -72,7 +72,7 @@ def time_tensordot_a_b_axes_1_0_0_1(self): class Linalg(Benchmark): - params = set(TYPES1) - set(['float16']) + params = sorted(list(set(TYPES1) - set(['float16']))) param_names = ['dtype'] def setup(self, typename): From 7cce1e5ae2013aa91b784cd492670e1e1fdef83e Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 6 Jun 2024 15:27:25 -0600 Subject: [PATCH 509/980] MNT: use reproducible RNG sequences in benchmarks --- benchmarks/benchmarks/bench_clip.py | 8 ++++---- benchmarks/benchmarks/bench_function_base.py | 3 ++- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/benchmarks/benchmarks/bench_clip.py b/benchmarks/benchmarks/bench_clip.py index edbeb745ad60..a602d2fd5ea9 100644 --- a/benchmarks/benchmarks/bench_clip.py +++ b/benchmarks/benchmarks/bench_clip.py @@ -11,8 +11,8 @@ class ClipFloat(Benchmark): ] def setup(self, dtype, size): - rng = np.random.default_rng() - self.array = rng.random(size=size).astype(dtype) + rnd = np.random.RandomState(12345) + self.array = rnd.random(size=size).astype(dtype) self.dataout = np.full_like(self.array, 0.5) def time_clip(self, dtype, size): @@ -27,8 +27,8 @@ class ClipInteger(Benchmark): ] def setup(self, dtype, size): - rng = np.random.default_rng() - self.array = rng.integers(256, size=size, dtype=dtype) + rnd = np.random.RandomState(12345) + self.array = rnd.randint(256, size=size, dtype=dtype) self.dataout = np.full_like(self.array, 128) def time_clip(self, dtype, size): diff --git a/benchmarks/benchmarks/bench_function_base.py b/benchmarks/benchmarks/bench_function_base.py index d4b08a3a0e65..606a057f3cac 100644 --- a/benchmarks/benchmarks/bench_function_base.py +++ b/benchmarks/benchmarks/bench_function_base.py @@ -159,7 +159,8 @@ def random(size, dtype): Returns a randomly-shuffled array. """ arr = np.arange(size, dtype=dtype) - np.random.shuffle(arr) + rnd = np.random.RandomState(12345) + rnd.shuffle(arr) return arr @staticmethod From 1390ea3c0d1e1e7fc9bb03f43e1375a7e31a56f0 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 6 Jun 2024 17:01:22 -0600 Subject: [PATCH 510/980] Update benchmarks/benchmarks/bench_clip.py Co-authored-by: Robert Kern --- benchmarks/benchmarks/bench_clip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_clip.py b/benchmarks/benchmarks/bench_clip.py index a602d2fd5ea9..59e367ed7a17 100644 --- a/benchmarks/benchmarks/bench_clip.py +++ b/benchmarks/benchmarks/bench_clip.py @@ -11,7 +11,7 @@ class ClipFloat(Benchmark): ] def setup(self, dtype, size): - rnd = np.random.RandomState(12345) + rnd = np.random.RandomState(994584855) self.array = rnd.random(size=size).astype(dtype) self.dataout = np.full_like(self.array, 0.5) From 63c0006dd2622cf1e0ad34ba26af83db56aa751a Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 6 Jun 2024 17:01:32 -0600 Subject: [PATCH 511/980] Update benchmarks/benchmarks/bench_clip.py Co-authored-by: Robert Kern --- benchmarks/benchmarks/bench_clip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_clip.py b/benchmarks/benchmarks/bench_clip.py index 59e367ed7a17..ce0511da82a4 100644 --- a/benchmarks/benchmarks/bench_clip.py +++ b/benchmarks/benchmarks/bench_clip.py @@ -27,7 +27,7 @@ class ClipInteger(Benchmark): ] def setup(self, dtype, size): - rnd = np.random.RandomState(12345) + rnd = np.random.RandomState(1301109903) self.array = rnd.randint(256, size=size, dtype=dtype) self.dataout = np.full_like(self.array, 128) From 72026ec000355e67c09359fa1f0854b8cbe47bb4 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 6 Jun 2024 17:01:52 -0600 Subject: [PATCH 512/980] Update benchmarks/benchmarks/bench_function_base.py Co-authored-by: Robert Kern --- benchmarks/benchmarks/bench_function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_function_base.py b/benchmarks/benchmarks/bench_function_base.py index 606a057f3cac..761c56a1691a 100644 --- a/benchmarks/benchmarks/bench_function_base.py +++ b/benchmarks/benchmarks/bench_function_base.py @@ -159,7 +159,7 @@ def random(size, dtype): Returns a randomly-shuffled array. """ arr = np.arange(size, dtype=dtype) - rnd = np.random.RandomState(12345) + rnd = np.random.RandomState(1792364059) rnd.shuffle(arr) return arr From 5e0dded4b8142fa06fec9fae7d7c3c3fbe1407b7 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 6 Jun 2024 16:58:24 -0600 Subject: [PATCH 513/980] MNT: seed more RNGs --- benchmarks/benchmarks/bench_creation.py | 3 +- benchmarks/benchmarks/bench_function_base.py | 72 +++----------------- benchmarks/benchmarks/bench_lib.py | 4 +- benchmarks/benchmarks/bench_random.py | 5 +- benchmarks/benchmarks/common.py | 13 +--- 5 files changed, 21 insertions(+), 76 deletions(-) diff --git a/benchmarks/benchmarks/bench_creation.py b/benchmarks/benchmarks/bench_creation.py index 76d871e2d411..8c06c2125940 100644 --- a/benchmarks/benchmarks/bench_creation.py +++ b/benchmarks/benchmarks/bench_creation.py @@ -13,7 +13,8 @@ class MeshGrid(Benchmark): timeout = 10 def setup(self, size, ndims, ind, ndtype): - self.grid_dims = [(np.random.ranf(size)).astype(ndtype) for + rnd = np.random.RandomState(1864768776) + self.grid_dims = [(rnd.random_sample(size)).astype(ndtype) for x in range(ndims)] def time_meshgrid(self, size, ndims, ind, ndtype): diff --git a/benchmarks/benchmarks/bench_function_base.py b/benchmarks/benchmarks/bench_function_base.py index 761c56a1691a..657db7d2cac7 100644 --- a/benchmarks/benchmarks/bench_function_base.py +++ b/benchmarks/benchmarks/bench_function_base.py @@ -154,18 +154,19 @@ class SortGenerator: @staticmethod @memoize - def random(size, dtype): + def random(size, dtype, rnd): """ Returns a randomly-shuffled array. """ arr = np.arange(size, dtype=dtype) rnd = np.random.RandomState(1792364059) + np.random.shuffle(arr) rnd.shuffle(arr) return arr @staticmethod @memoize - def ordered(size, dtype): + def ordered(size, dtype, rnd): """ Returns an ordered array. """ @@ -173,7 +174,7 @@ def ordered(size, dtype): @staticmethod @memoize - def reversed(size, dtype): + def reversed(size, dtype, rnd): """ Returns an array that's in descending order. """ @@ -188,7 +189,7 @@ def reversed(size, dtype): @staticmethod @memoize - def uniform(size, dtype): + def uniform(size, dtype, rnd): """ Returns an array that has the same value everywhere. """ @@ -196,20 +197,7 @@ def uniform(size, dtype): @staticmethod @memoize - def swapped_pair(size, dtype, swap_frac): - """ - Returns an ordered array, but one that has ``swap_frac * size`` - pairs swapped. - """ - a = np.arange(size, dtype=dtype) - for _ in range(int(size * swap_frac)): - x, y = np.random.randint(0, size, 2) - a[x], a[y] = a[y], a[x] - return a - - @staticmethod - @memoize - def sorted_block(size, dtype, block_size): + def sorted_block(size, dtype, block_size, rnd): """ Returns an array with blocks that are all sorted. """ @@ -222,35 +210,6 @@ def sorted_block(size, dtype, block_size): b.extend(a[i::block_num]) return np.array(b) - @classmethod - @memoize - def random_unsorted_area(cls, size, dtype, frac, area_size=None): - """ - This type of array has random unsorted areas such that they - compose the fraction ``frac`` of the original array. - """ - if area_size is None: - area_size = cls.AREA_SIZE - - area_num = int(size * frac / area_size) - a = np.arange(size, dtype=dtype) - for _ in range(area_num): - start = np.random.randint(size-area_size) - end = start + area_size - np.random.shuffle(a[start:end]) - return a - - @classmethod - @memoize - def random_bubble(cls, size, dtype, bubble_num, bubble_size=None): - """ - This type of array has ``bubble_num`` random unsorted areas. - """ - if bubble_size is None: - bubble_size = cls.BUBBLE_SIZE - frac = bubble_size * bubble_num / size - - return cls.random_unsorted_area(size, dtype, frac, bubble_size) class Sort(Benchmark): """ @@ -271,15 +230,6 @@ class Sort(Benchmark): ('sorted_block', 10), ('sorted_block', 100), ('sorted_block', 1000), - # ('swapped_pair', 0.01), - # ('swapped_pair', 0.1), - # ('swapped_pair', 0.5), - # ('random_unsorted_area', 0.5), - # ('random_unsorted_area', 0.1), - # ('random_unsorted_area', 0.01), - # ('random_bubble', 1), - # ('random_bubble', 5), - # ('random_bubble', 10), ], ] param_names = ['kind', 'dtype', 'array_type'] @@ -288,9 +238,9 @@ class Sort(Benchmark): ARRAY_SIZE = 10000 def setup(self, kind, dtype, array_type): - np.random.seed(1234) + rnd = np.random.RandomState(507582308) array_class = array_type[0] - self.arr = getattr(SortGenerator, array_class)(self.ARRAY_SIZE, dtype, *array_type[1:]) + self.arr = getattr(SortGenerator, array_class)(self.ARRAY_SIZE, dtype, *array_type[1:], rnd) def time_sort(self, kind, dtype, array_type): # Using np.sort(...) instead of arr.sort(...) because it makes a copy. @@ -322,10 +272,10 @@ class Partition(Benchmark): ARRAY_SIZE = 100000 def setup(self, dtype, array_type, k): - np.random.seed(1234) + rnd = np.random.seed(2136297818) array_class = array_type[0] - self.arr = getattr(SortGenerator, array_class)(self.ARRAY_SIZE, - dtype, *array_type[1:]) + self.arr = getattr(SortGenerator, array_class)( + self.ARRAY_SIZE, dtype, *array_type[1:], rnd) def time_partition(self, dtype, array_type, k): temp = np.partition(self.arr, k) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index f792116a6b9c..dc8815ffe95b 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -66,10 +66,10 @@ class Nan(Benchmark): ] def setup(self, array_size, percent_nans): - np.random.seed(123) + rnd = np.random.RandomState(1819780348) # produce a randomly shuffled array with the # approximate desired percentage np.nan content - base_array = np.random.uniform(size=array_size) + base_array = rnd.uniform(size=array_size) base_array[base_array < percent_nans / 100.] = np.nan self.arr = base_array diff --git a/benchmarks/benchmarks/bench_random.py b/benchmarks/benchmarks/bench_random.py index 9482eb04de97..d987426694e9 100644 --- a/benchmarks/benchmarks/bench_random.py +++ b/benchmarks/benchmarks/bench_random.py @@ -147,10 +147,11 @@ class Bounded(Benchmark): ]] def setup(self, bitgen, args): + seed = 707250673 if bitgen == 'numpy': - self.rg = np.random.RandomState() + self.rg = np.random.RandomState(seed) else: - self.rg = Generator(getattr(np.random, bitgen)()) + self.rg = Generator(getattr(np.random, bitgen)(seed)) self.rg.random() def time_bounded(self, bitgen, args): diff --git a/benchmarks/benchmarks/common.py b/benchmarks/benchmarks/common.py index d4c1540ff203..5cbc2f38f31d 100644 --- a/benchmarks/benchmarks/common.py +++ b/benchmarks/benchmarks/common.py @@ -41,7 +41,7 @@ @lru_cache(typed=True) def get_values(): - rnd = np.random.RandomState(1) + rnd = np.random.RandomState(1804169117) values = np.tile(rnd.uniform(0, 100, size=nx*ny//10), 10) return values @@ -60,7 +60,7 @@ def get_square(dtype): @lru_cache(typed=True) def get_squares(): - return {t: get_square(t) for t in TYPES1} + return {t: get_square(t) for t in sorted(TYPES1)} @lru_cache(typed=True) @@ -72,14 +72,7 @@ def get_square_(dtype): @lru_cache(typed=True) def get_squares_(): # smaller squares - return {t: get_square_(t) for t in TYPES1} - - -@lru_cache(typed=True) -def get_vectors(): - # vectors - vectors = {t: s[0] for t, s in get_squares().items()} - return vectors + return {t: get_square_(t) for t in sorted(TYPES1)} @lru_cache(typed=True) From f5d8ada096f57ec24f328d67618cfc35b5c1549b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Fri, 7 Jun 2024 12:27:40 +0200 Subject: [PATCH 514/980] Update 2.0 migration guide [skip actions] [skip azp] [skip cirrus] --- doc/source/numpy_2_0_migration_guide.rst | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index 7de294bb8d86..4377ee368e63 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -23,7 +23,7 @@ guide can be automatically adapted in downstream code with a dedicated `Ruff `__ rule, namely rule `NPY201 `__. -You should install ``ruff>=0.2.0`` and add the ``NPY201`` rule to your +You should install ``ruff>=0.4.8`` and add the ``NPY201`` rule to your ``pyproject.toml``:: [tool.ruff.lint] @@ -149,8 +149,8 @@ Please do not hesitate to open a NumPy issue, if you require assistance or the provided functions are not sufficient. **Custom User DTypes:** -Existing user dtypes must now use ``PyArray_DescrProto`` to define their -dtype and slightly modify the code. See note in `PyArray_RegisterDataType`. +Existing user dtypes must now use :c:type:`PyArray_DescrProto` to define +their dtype and slightly modify the code. See note in :c:func:`PyArray_RegisterDataType`. Functionality moved to headers requiring ``import_array()`` ----------------------------------------------------------- @@ -202,13 +202,13 @@ native C99 types. While the memory layout of those types remains identical to the types used in NumPy 1.x, the API is slightly different, since direct field access (like ``c.real`` or ``c.imag``) is no longer possible. -It is recommended to use the functions `npy_creal` and `npy_cimag` (and the -corresponding float and long double variants) to retrieve +It is recommended to use the functions ``npy_creal`` and ``npy_cimag`` +(and the corresponding float and long double variants) to retrieve the real or imaginary part of a complex number, as these will work with both -NumPy 1.x and with NumPy 2.x. New functions `npy_csetreal` and `npy_csetimag`, -along with compatibility macros `NPY_CSETREAL` and `NPY_CSETIMAG` (and the -corresponding float and long double variants), have been -added for setting the real or imaginary part. +NumPy 1.x and with NumPy 2.x. New functions ``npy_csetreal`` and +``npy_csetimag``, along with compatibility macros ``NPY_CSETREAL`` and +``NPY_CSETIMAG`` (and the corresponding float and long double variants), +have been added for setting the real or imaginary part. The underlying type remains a struct under C++ (all of the above still remains valid). @@ -382,7 +382,6 @@ expired member migration guideline newbyteorder Use ``arr.view(arr.dtype.newbyteorder(order))`` instead. ptp Use ``np.ptp(arr, ...)`` instead. setitem Use ``arr[index] = value`` instead. -... ... ====================== ======================================================== From d10b362266ef8011dbbaa34a4638feee1ef3e6e1 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 31 May 2024 16:53:10 -0600 Subject: [PATCH 515/980] MNT: catch invalid fixed-width dtype sizes --- numpy/_core/src/multiarray/descriptor.c | 25 +++++++++++++++++++------ numpy/_core/src/umath/string_buffer.h | 2 +- numpy/_core/tests/test_dtype.py | 16 ++++++++++++++++ numpy/_core/tests/test_strings.py | 1 + 4 files changed, 37 insertions(+), 7 deletions(-) diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 1564902be674..4000ac7c273d 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -6,6 +6,8 @@ #include #include +#include + #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" #include "numpy/npy_math.h" @@ -1807,19 +1809,27 @@ _convert_from_str(PyObject *obj, int align) /* Python byte string characters are unsigned */ check_num = (unsigned char) type[0]; } - /* A kind + size like 'f8' */ + /* Possibly a kind + size like 'f8' but also could be 'bool' */ else { char *typeend = NULL; int kind; - /* Parse the integer, make sure it's the rest of the string */ - elsize = (int)strtol(type + 1, &typeend, 10); - /* Make sure size is not negative */ - if (elsize < 0) { + /* Attempt to parse the integer, make sure it's the rest of the string */ + errno = 0; + long result = strtol(type + 1, &typeend, 10); + npy_bool some_parsing_happened = !(type == typeend); + npy_bool entire_string_consumed = *typeend == '\0'; + npy_bool parsing_succeeded = + (errno == 0) && some_parsing_happened && entire_string_consumed; + // make sure it doesn't overflow or go negative + if (result > INT_MAX || result < 0) { goto fail; } - if (typeend - type == len) { + elsize = (int)result; + + + if (parsing_succeeded && typeend - type == len) { kind = type[0]; switch (kind) { @@ -1865,6 +1875,9 @@ _convert_from_str(PyObject *obj, int align) } } } + else if (parsing_succeeded) { + goto fail; + } } if (PyErr_Occurred()) { diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index c5fc8949f994..77f230cf9ad5 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -1462,7 +1462,7 @@ string_expandtabs_length(Buffer buf, npy_int64 tabsize) line_pos = 0; } } - if (new_len == PY_SSIZE_T_MAX || new_len < 0) { + if (new_len > INT_MAX || new_len < 0) { npy_gil_error(PyExc_OverflowError, "new string is too long"); return -1; } diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index eb4f915ee452..73e02a84e2e8 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -236,6 +236,22 @@ def test_create_invalid_string_errors(self): with pytest.raises(ValueError): type(np.dtype("U"))(-1) + # OverflowError on 32 bit + with pytest.raises((TypeError, OverflowError)): + # see gh-26556 + type(np.dtype("S"))(2**61) + + with pytest.raises(TypeError): + np.dtype("S1234hello") + + def test_leading_zero_parsing(self): + dt1 = np.dtype('S010') + dt2 = np.dtype('S10') + + assert dt1 == dt2 + assert repr(dt1) == "dtype('S10')" + assert dt1.itemsize == 10 + class TestRecord: def test_equivalent_record(self): diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 64cf42e05adb..f12b743f4daa 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -716,6 +716,7 @@ def test_expandtabs(self, buf, tabsize, res, dt): def test_expandtabs_raises_overflow(self, dt): with pytest.raises(OverflowError, match="new string is too long"): np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), sys.maxsize) + np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), 2**61) FILL_ERROR = "The fill character must be exactly one character long" From 8005610c816e6197a46c65e7217efbb6f459faf8 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sat, 8 Jun 2024 17:50:53 +0300 Subject: [PATCH 516/980] MAINT: smoke-docs: filter out all deprecation and runtime warnings --- numpy/conftest.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/numpy/conftest.py b/numpy/conftest.py index 1e3d20ab3c94..9fe2f073f4f8 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -161,14 +161,27 @@ def warnings_errors_and_rng(test=None): "Deprecated call", "numpy.core", "`np.compat`", - "Importing from numpy.matlib"] - + "Importing from numpy.matlib", + "This function is deprecated.", # random_integers + "Data type alias 'a'", # numpy.rec.fromfile + "Arrays of 2-dimensional vectors", # matlib.cross + "`in1d` is deprecated", + ] msg = "|".join(msgs) + msgs_r = [ + "invalid value encountered", + "divide by zero encountered" + ] + msg_r = "|".join(msgs_r) + with warnings.catch_warnings(): warnings.filterwarnings( 'ignore', category=DeprecationWarning, message=msg ) + warnings.filterwarnings( + 'ignore', category=RuntimeWarning, message=msg_r + ) yield # find and check doctests under this context manager @@ -187,6 +200,8 @@ def warnings_errors_and_rng(test=None): # temporary skips dt_config.skiplist = set([ 'numpy.random.Generator.f', # tries to import from scipy.stats + 'numpy.savez', # unclosed file + 'numpy.matlib.savez', ]) # xfail problematic tutorials From bc606908c1acc0e264c5116b83f3f50356878c44 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sat, 8 Jun 2024 17:57:32 +0300 Subject: [PATCH 517/980] CI: run spin smoke-docs on CircleCI --- .circleci/config.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 530631281c80..6cac961801bd 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -103,6 +103,9 @@ jobs: # `np.polynomial.set_default_printstyle`) python tools/refguide_check.py --rst python tools/refguide_check.py --doctests + # + pip install scipy-doctest spin + spin smoke-docs -v - persist_to_workspace: root: ~/repo From f94af65e1d6b51258efe2b0ef042f5c2f0dab5f7 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sat, 8 Jun 2024 18:32:30 +0300 Subject: [PATCH 518/980] appease the linter --- numpy/conftest.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/conftest.py b/numpy/conftest.py index 9fe2f073f4f8..7782de6c8e0c 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -165,8 +165,7 @@ def warnings_errors_and_rng(test=None): "This function is deprecated.", # random_integers "Data type alias 'a'", # numpy.rec.fromfile "Arrays of 2-dimensional vectors", # matlib.cross - "`in1d` is deprecated", - ] + "`in1d` is deprecated", ] msg = "|".join(msgs) msgs_r = [ From 4d72e882147ddde14863209be25456f9fa19abe7 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sat, 8 Jun 2024 18:32:43 +0300 Subject: [PATCH 519/980] CI: move smoke-docs run to GHA --- .circleci/config.yml | 3 --- .github/workflows/linux.yml | 5 +++++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 6cac961801bd..530631281c80 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -103,9 +103,6 @@ jobs: # `np.polynomial.set_default_printstyle`) python tools/refguide_check.py --rst python tools/refguide_check.py --doctests - # - pip install scipy-doctest spin - spin smoke-docs -v - persist_to_workspace: root: ~/repo diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index aef580c00e30..31b34eaa0b89 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -180,6 +180,11 @@ jobs: shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' run: | spin bench --quick + - name: Run smoke-docs + shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' + run: | + pip install scipy-doctest + spin smoke-docs sdist: needs: [smoke_test] From d439c6f1160e6783e1a4ddf6282305fc2729e5f3 Mon Sep 17 00:00:00 2001 From: bmwoodruff Date: Sat, 8 Jun 2024 16:22:18 -0600 Subject: [PATCH 520/980] DOC: Added clean_dirs to spin docs to remove generated folders Using `spin docs --clean` currently only removes `doc/build`. This adds the other folders that are captured with `make --clean`. When numpy updates spin from 0.8 to >0.10, this can be moved to pyproject.toml with supported functionality. [skip actions] [skip azp] [skip cirrus] --- .spin/cmds.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/.spin/cmds.py b/.spin/cmds.py index b78c0393e708..3dcccae6e496 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -161,6 +161,21 @@ def docs(ctx, sphinx_target, clean, first_build, jobs, *args, **kwargs): """ meson.docs.ignore_unknown_options = True + # See https://github.com/scientific-python/spin/pull/199 + # Can be changed when spin updates to 0.11, and moved to pyproject.toml + if clean: + clean_dirs = [ + './doc/build/', + './doc/source/reference/generated', + './doc/source/reference/random/bit_generators/generated', + './doc/source/reference/random/generated', + ] + + for target_dir in clean_dirs: + if os.path.isdir(target_dir): + print(f"Removing {target_dir!r}") + shutil.rmtree(target_dir) + # Run towncrier without staging anything for commit. This is the way to get # release notes snippets included in a local doc build. cmd = ['towncrier', 'build', '--version', '2.x.y', '--keep', '--draft'] From 5160da13a2e1fa9bc9f9d178af7bb6c57795a48a Mon Sep 17 00:00:00 2001 From: bmwoodruff Date: Sat, 8 Jun 2024 17:16:09 -0600 Subject: [PATCH 521/980] DOC: Enable web docs for numpy.trapezoid and add back links This PR adds back the links that were removed from `trapz` in PR #24445, finishing the migration to `trapezoid` in PR #25738. Currently, no web docs appear for `trapezoid`. --- doc/source/reference/routines.math.rst | 1 + numpy/_core/fromnumeric.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/doc/source/reference/routines.math.rst b/doc/source/reference/routines.math.rst index 2c77b2cc1488..59310f0a714f 100644 --- a/doc/source/reference/routines.math.rst +++ b/doc/source/reference/routines.math.rst @@ -71,6 +71,7 @@ Sums, products, differences ediff1d gradient cross + trapezoid Exponents and logarithms ------------------------ diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 02bc025b5e40..57602293ad80 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -2355,6 +2355,7 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, ndarray.sum : Equivalent method. add: ``numpy.add.reduce`` equivalent function. cumsum : Cumulative sum of array elements. + trapezoid : Integration of array values using composite trapezoidal rule. mean, average @@ -2681,6 +2682,7 @@ def cumsum(a, axis=None, dtype=None, out=None): See Also -------- sum : Sum array elements. + trapezoid : Integration of array values using composite trapezoidal rule. diff : Calculate the n-th discrete difference along given axis. Notes From b8a5e7a685c00002ffc5d3f31cf2c8ad77fe08e3 Mon Sep 17 00:00:00 2001 From: bmwoodruff Date: Sat, 8 Jun 2024 17:24:15 -0600 Subject: [PATCH 522/980] DOC: Enable web docs for numpy.trapezoid and add back links This PR adds back the links that were removed from `trapz` in PR #24445, finishing the migration to `trapezoid` in PR #25738. Currently, no web docs appear for `trapezoid`. [skip actions] [skip azp] [skip cirrus] From cecfc535f253c1d6986200dc7900c888e7705e21 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sat, 8 Jun 2024 18:55:11 +0300 Subject: [PATCH 523/980] CI: smoke-docs: install test/doctest deps (MPL, hypothesis) --- .github/workflows/linux.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 31b34eaa0b89..f23ae35a1df4 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -183,7 +183,7 @@ jobs: - name: Run smoke-docs shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' run: | - pip install scipy-doctest + pip install scipy-doctest hypothesis matplotlib spin smoke-docs sdist: From a8b06d920a0349c517a400a6b1e6bf0581afd0af Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sun, 9 Jun 2024 17:04:02 +0300 Subject: [PATCH 524/980] MAINT: clean up `spin smoke-docs` Follow what pytest --doctest-modules allows: - directories - -k patterns `pytest --doctest-modules path/to/py/file` I'd expect to work, but apparently it fails to collect anything --- and this is not related to scipy-doctests. --- .spin/cmds.py | 64 ++++++++++++++++----------------------------------- 1 file changed, 20 insertions(+), 44 deletions(-) diff --git a/.spin/cmds.py b/.spin/cmds.py index 4d70eb8cf73b..35de6845e2ca 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -260,13 +260,6 @@ def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose, *args, **kwargs): @click.command() @click.argument("pytest_args", nargs=-1) -@click.option( - "-m", - "markexpr", - metavar='MARKEXPR', - default="not slow", - help="Run tests with the given markers" -) @click.option( "-j", "n_jobs", @@ -275,65 +268,48 @@ def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose, *args, **kwargs): help=("Number of parallel jobs for testing. " "Can be set to `auto` to use all cores.") ) -@click.option( - "--tests", "-t", - metavar='TESTS', - help=(""" -Which tests to run. Can be a module, function, class, or method: - - \b - numpy.random - numpy.random.tests.test_generator_mt19937 - numpy.random.tests.test_generator_mt19937::TestMultivariateHypergeometric - numpy.random.tests.test_generator_mt19937::TestMultivariateHypergeometric::test_edge_cases - \b -""") -) @click.option( '--verbose', '-v', is_flag=True, default=False ) @click.pass_context -def smoke_docs(ctx, pytest_args, markexpr, n_jobs, tests, verbose, *args, **kwargs): - """🔧 Run tests +def smoke_docs(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): + """🔧 Run doctests of objects in the public API. PYTEST_ARGS are passed through directly to pytest, e.g.: - spin test -- --pdb + spin smoke-docs -- --pdb - To run tests on a directory or file: + To run tests on a directory: \b - spin test numpy/linalg - spin test numpy/linalg/tests/test_linalg.py + spin smoke-docs numpy/linalg - To report the durations of the N slowest tests: + To report the durations of the N slowest doctests: - spin test -- --durations=N + spin smoke-docs -- --durations=N - To run tests that match a given pattern: + To run doctests that match a given pattern: \b - spin test -- -k "geometric" - spin test -- -k "geometric and not rgeometric" + spin smoke-docs -- -k "slogdet" + spin smoke-docs numpy/linalg -- -k "det and not slogdet" - By default, spin will run `-m 'not slow'`. To run the full test suite, use - `spin -m full` + \b + Note: + ----- + + \b + - This command only runs doctests and skips everything under tests/ + - This command only doctests public objects: those which are accessible + from the top-level `__init__.py` file. - For more, see `pytest --help`. """ # noqa: E501 - if (not pytest_args) and (not tests): + if (not pytest_args): pytest_args = ('numpy',) - if '-m' not in pytest_args: - if markexpr != "full": - pytest_args = ('-m', markexpr) + pytest_args - if (n_jobs != "1") and ('-n' not in pytest_args): pytest_args = ('-n', str(n_jobs)) + pytest_args - if tests and not ('--pyargs' in pytest_args): - pytest_args = ('--pyargs', tests) + pytest_args - if verbose: pytest_args = ('-v',) + pytest_args @@ -355,7 +331,7 @@ def smoke_docs(ctx, pytest_args, markexpr, n_jobs, tests, verbose, *args, **kwar ctx.params['pytest_args'] = pytest_args - for extra_param in ('markexpr', 'n_jobs', 'tests', 'verbose'): + for extra_param in ('n_jobs', 'verbose'): del ctx.params[extra_param] ctx.forward(meson.test) From f2e9f9f972af026d9633a16929ef96cc2fe6e402 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sun, 9 Jun 2024 17:28:39 +0300 Subject: [PATCH 525/980] DOC: random: do not skip import scipy if it is later used --- numpy/conftest.py | 1 - numpy/random/_generator.pyx | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/conftest.py b/numpy/conftest.py index 7782de6c8e0c..eedfdfa4ebaa 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -198,7 +198,6 @@ def warnings_errors_and_rng(test=None): # temporary skips dt_config.skiplist = set([ - 'numpy.random.Generator.f', # tries to import from scipy.stats 'numpy.savez', # unclosed file 'numpy.matlib.savez', ]) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 0d134c823588..df3dc27778e9 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -1548,7 +1548,7 @@ cdef class Generator: and ``m = 20`` is: >>> import matplotlib.pyplot as plt - >>> from scipy import stats # doctest: +SKIP + >>> from scipy import stats >>> dfnum, dfden, size = 20, 20, 10000 >>> s = rng.f(dfnum=dfnum, dfden=dfden, size=size) >>> bins, density, _ = plt.hist(s, 30, density=True) From 7831f129a31d06bd55de54234d4f89af8bc9ae1d Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sun, 9 Jun 2024 17:29:01 +0300 Subject: [PATCH 526/980] CI: install doctest dependencies --- .github/workflows/linux.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index f23ae35a1df4..4b5878efccbf 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -183,8 +183,8 @@ jobs: - name: Run smoke-docs shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' run: | - pip install scipy-doctest hypothesis matplotlib - spin smoke-docs + pip install scipy-doctest hypothesis matplotlib scipy pytz + spin smoke-docs -v sdist: needs: [smoke_test] From 80feb0f49d7ebe14184bdc4401304968b548ebc4 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sun, 9 Jun 2024 17:44:20 +0300 Subject: [PATCH 527/980] TST: doctests: drop dtype(...) into the check namespace --- numpy/conftest.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/numpy/conftest.py b/numpy/conftest.py index eedfdfa4ebaa..1167e8d8892a 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -196,6 +196,9 @@ def warnings_errors_and_rng(test=None): # recognize the StringDType repr dt_config.check_namespace['StringDType'] = numpy.dtypes.StringDType + # __array_namespace_info__ needs np.float64 == dtype('float64') + dt_config.check_namespace['dtype'] = numpy.dtype + # temporary skips dt_config.skiplist = set([ 'numpy.savez', # unclosed file From 1721a0806964b7f2cf9490f4c0624f1264aaab48 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sun, 9 Jun 2024 17:58:45 +0300 Subject: [PATCH 528/980] TST: smoke-docs: skip __array_namespace_info__ --- numpy/conftest.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/numpy/conftest.py b/numpy/conftest.py index 1167e8d8892a..01c9aed08cad 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -196,13 +196,12 @@ def warnings_errors_and_rng(test=None): # recognize the StringDType repr dt_config.check_namespace['StringDType'] = numpy.dtypes.StringDType - # __array_namespace_info__ needs np.float64 == dtype('float64') - dt_config.check_namespace['dtype'] = numpy.dtype - # temporary skips dt_config.skiplist = set([ 'numpy.savez', # unclosed file 'numpy.matlib.savez', + 'numpy.__array_namespace_info__', + 'numpy.matlib.__array_namespace_info__', ]) # xfail problematic tutorials From a48ce39da8b34f29a0aed46f418c4976640fd014 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sun, 9 Jun 2024 19:21:57 +0000 Subject: [PATCH 529/980] TST: Add one for gh-26623 On creating incorrect meson.build files Co-authored-by: mathomp4 --- numpy/f2py/tests/test_regression.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index 7da62d6cb287..5d967ba73353 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -108,3 +108,14 @@ def test_gh26148b(self): res=self.module.testsub(x1, x2) assert(res[0] == 8) assert(res[1] == 15) + +def test_gh26623(): + # Including libraries with . should not generate an incorrect meson.build + try: + aa = util.build_module( + [util.getpath("tests", "src", "regression", "f90continuation.f90")], + ["-lfoo.bar"], + module_name="Blah", + ) + except RuntimeError as rerr: + assert "lparen got assign" not in str(rerr) From 0c1630ebf6407ce0f82be763a818c741ee842d43 Mon Sep 17 00:00:00 2001 From: LEAP <56480632+gangula-karthik@users.noreply.github.com> Date: Mon, 10 Jun 2024 16:13:24 +0800 Subject: [PATCH 530/980] DOC: Update docstring for invert function (#26646) * DOC: Clarify docstring for invert function --- numpy/_core/code_generators/ufunc_docstrings.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index a3e1965151f1..c7bf82fb2a19 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -1589,12 +1589,13 @@ def add_newdoc(place, name, doc): the integers in the input arrays. This ufunc implements the C/Python operator ``~``. - For signed integer inputs, the two's complement is returned. In a - two's-complement system negative numbers are represented by the two's - complement of the absolute value. This is the most common method of - representing signed integers on computers [1]_. A N-bit - two's-complement system can represent every integer in the range - :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. + For signed integer inputs, the bit-wise NOT of the absolute value is + returned. In a two's-complement system, this operation effectively flips + all the bits, resulting in a representation that corresponds to the + negative of the input plus one. This is the most common method of + representing signed integers on computers [1]_. A N-bit two's-complement + system can represent every integer in the range :math:`-2^{N-1}` to + :math:`+2^{N-1}-1`. Parameters ---------- @@ -1646,8 +1647,8 @@ def add_newdoc(place, name, doc): >>> np.binary_repr(x, width=16) '1111111111110010' - When using signed integer types the result is the two's complement of - the result for the unsigned type: + When using signed integer types, the result is the bit-wise NOT of + the unsigned type, interpreted as a signed integer: >>> np.invert(np.array([13], dtype=np.int8)) array([-14], dtype=int8) From cd69c4dc35dc3c4ad5cccc1eebc95ea9ba1088cd Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Mon, 10 Jun 2024 12:39:52 +0300 Subject: [PATCH 531/980] MAINT: smoke-docs: move ignores to conftest --- .spin/cmds.py | 10 +--------- numpy/conftest.py | 10 ++++++++++ 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/.spin/cmds.py b/.spin/cmds.py index 35de6845e2ca..b76f988363bc 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -313,16 +313,8 @@ def smoke_docs(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): if verbose: pytest_args = ('-v',) + pytest_args - + # turn doctesting on: doctest_args = ( - # ignores are for things fail doctest collection (optionals etc) - '--ignore=numpy/distutils', - '--ignore=numpy/_core/cversions.py', - '--ignore=numpy/_pyinstaller', - '--ignore=numpy/random/_examples', - '--ignore=numpy/compat', - '--ignore=numpy/f2py/_backends/_distutils.py', - # turn doctesting on: '--doctest-modules', '--doctest-collect=api' ) diff --git a/numpy/conftest.py b/numpy/conftest.py index 01c9aed08cad..0a5aa3172888 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -213,3 +213,13 @@ def warnings_errors_and_rng(test=None): 'basics.subclassing.rst': '.. testcode:: admonitions not understood' } + # ignores are for things fail doctest collection (optionals etc) + dt_config.pytest_extra_ignore = [ + 'numpy/distutils', + 'numpy/_core/cversions.py', + 'numpy/_pyinstaller', + 'numpy/random/_examples', + 'numpy/compat', + 'numpy/f2py/_backends/_distutils.py', + ] + From 7f9d121bf2b4bb45a4b18aca83352dadd19f6158 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Mon, 10 Jun 2024 15:27:24 +0300 Subject: [PATCH 532/980] MAINT: add scipy-doctest to requirements/test_requirements.txt --- requirements/test_requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 4e53f86d355c..856ecf115ef1 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -5,6 +5,7 @@ wheel==0.38.1 setuptools hypothesis==6.81.1 pytest==7.4.0 +scipy-doctest pytz==2023.3.post1 pytest-cov==4.1.0 meson From f784a59dc6340b39447bc98238c35cafda243008 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Jun 2024 17:15:15 +0000 Subject: [PATCH 533/980] MAINT: Bump pypa/cibuildwheel from 2.18.1 to 2.19.0 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 2.18.1 to 2.19.0. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/ba8be0d98853f5744f24e7f902c8adef7ae2e7f3...a8d190a111314a07eb5116036c4b3fb26a4e3162) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/free-threaded-wheels.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/free-threaded-wheels.yml b/.github/workflows/free-threaded-wheels.yml index edbe8dcc2387..fa804cab6127 100644 --- a/.github/workflows/free-threaded-wheels.yml +++ b/.github/workflows/free-threaded-wheels.yml @@ -134,7 +134,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@ba8be0d98853f5744f24e7f902c8adef7ae2e7f3 # v2.18.1 + uses: pypa/cibuildwheel@a8d190a111314a07eb5116036c4b3fb26a4e3162 # v2.19.0 env: CIBW_PRERELEASE_PYTHONS: True CIBW_FREE_THREADED_SUPPORT: True diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 50453bef6ee1..2215d7fd2685 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -149,7 +149,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@ba8be0d98853f5744f24e7f902c8adef7ae2e7f3 # v2.18.1 + uses: pypa/cibuildwheel@a8d190a111314a07eb5116036c4b3fb26a4e3162 # v2.19.0 env: CIBW_PRERELEASE_PYTHONS: True CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From 92412e93e291392b507d0d49914a2bc1866d308d Mon Sep 17 00:00:00 2001 From: Jules <57632293+JuliaPoo@users.noreply.github.com> Date: Tue, 11 Jun 2024 02:44:10 +0800 Subject: [PATCH 534/980] CI: modified CI job to test editable install (#26655) Closes gh-26337. As suggested by Ralf the `full` job in `linux.yml` was modified to run an editable install. --- .github/workflows/linux.yml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index aef580c00e30..f7c8fb4727af 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -114,7 +114,7 @@ jobs: pytest --pyargs numpy -m "not slow" full: - # Build a wheel, install it, then run the full test suite with code coverage + # Install as editable, then run the full test suite with code coverage needs: [smoke_test] runs-on: ubuntu-22.04 steps: @@ -138,16 +138,14 @@ jobs: mkdir -p ./.openblas python -c"import scipy_openblas32 as ob32; print(ob32.get_pkg_config())" > ./.openblas/scipy-openblas.pc - - name: Build a wheel + - name: Install as editable env: PKG_CONFIG_PATH: ${{ github.workspace }}/.openblas run: | - python -m build --wheel --no-isolation --skip-dependency-check - pip install dist/numpy*.whl + pip install -e . --no-build-isolation - name: Run full test suite run: | - cd tools - pytest --pyargs numpy --cov-report=html:build/coverage + pytest numpy --cov-report=html:build/coverage # TODO: gcov benchmark: From 8767033dea9a82e5af38ec3e13d8947ea6184700 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Tue, 11 Jun 2024 07:36:47 +0300 Subject: [PATCH 535/980] rerun the CI From dc232ed750f44db62940ee1794da18f2a3e13613 Mon Sep 17 00:00:00 2001 From: Arun Date: Tue, 11 Jun 2024 16:55:47 +0530 Subject: [PATCH 536/980] ENH: Better error message for axis=None in `np.put_along_axis` and `np.take_along_axis` (#26597) * ENH: Better error message for axis=None --- numpy/lib/_shape_base_impl.py | 6 ++++++ numpy/lib/tests/test_shape_base.py | 20 ++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/numpy/lib/_shape_base_impl.py b/numpy/lib/_shape_base_impl.py index 68453095db7e..b2e98ab8866a 100644 --- a/numpy/lib/_shape_base_impl.py +++ b/numpy/lib/_shape_base_impl.py @@ -162,6 +162,9 @@ def take_along_axis(arr, indices, axis): """ # normalize inputs if axis is None: + if indices.ndim != 1: + raise ValueError( + 'when axis=None, `indices` must have a single dimension.') arr = arr.flat arr_shape = (len(arr),) # flatiter has no .shape axis = 0 @@ -252,6 +255,9 @@ def put_along_axis(arr, indices, values, axis): """ # normalize inputs if axis is None: + if indices.ndim != 1: + raise ValueError( + 'when axis=None, `indices` must have a single dimension.') arr = arr.flat axis = 0 arr_shape = (len(arr),) # flatiter has no .shape diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py index 609b77720c86..2b03fdae39b5 100644 --- a/numpy/lib/tests/test_shape_base.py +++ b/numpy/lib/tests/test_shape_base.py @@ -63,6 +63,8 @@ def test_invalid(self): assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1) # invalid axis assert_raises(AxisError, take_along_axis, a, ai, axis=10) + # invalid indices + assert_raises(ValueError, take_along_axis, a, ai, axis=None) def test_empty(self): """ Test everything is ok with empty results, even with inserted dims """ @@ -104,6 +106,24 @@ def test_broadcast(self): put_along_axis(a, ai, 20, axis=1) assert_equal(take_along_axis(a, ai, axis=1), 20) + def test_invalid(self): + """ Test invalid inputs """ + a_base = np.array([[10, 30, 20], [60, 40, 50]]) + indices = np.array([[0], [1]]) + values = np.array([[2], [1]]) + + # sanity check + a = a_base.copy() + put_along_axis(a, indices, values, axis=0) + assert np.all(a == [[2, 2, 2], [1, 1, 1]]) + + # invalid indices + a = a_base.copy() + with assert_raises(ValueError) as exc: + put_along_axis(a, indices, values, axis=None) + assert "single dimension" in str(exc.exception) + + class TestApplyAlongAxis: def test_simple(self): From 87966372ace1049e4773aa1d4dc938baff67e0c5 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Tue, 11 Jun 2024 09:18:35 -0300 Subject: [PATCH 537/980] DOC: add CI and NEP commit acronyms [skip actions][skip azp][skip cirrus] --- doc/source/dev/development_workflow.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/dev/development_workflow.rst b/doc/source/dev/development_workflow.rst index 1af4521482e0..a0a247c10957 100644 --- a/doc/source/dev/development_workflow.rst +++ b/doc/source/dev/development_workflow.rst @@ -166,12 +166,14 @@ Standard acronyms to start the commit message with are:: BENCH: changes to the benchmark suite BLD: change related to building numpy BUG: bug fix + CI: continuous integration DEP: deprecate something, or remove a deprecated object DEV: development tool or utility DOC: documentation ENH: enhancement MAINT: maintenance commit (refactoring, typos, etc.) MNT: alias for MAINT + NEP: NumPy enhancement proposals REL: related to releasing numpy REV: revert an earlier commit STY: style fix (whitespace, PEP8) From aaf762bbed32851257156d8d70ec0081fed29b0f Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 11 Jun 2024 13:50:37 -0600 Subject: [PATCH 538/980] MNT: build and upload mac free-threaded nightlies [wheel build] (#26664) [skip azp][skip circle][skip cirrus] --- .github/workflows/free-threaded-wheels.yml | 9 ++++++--- tools/wheels/cibw_before_build.sh | 3 ++- tools/wheels/cibw_test_command.sh | 3 ++- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/.github/workflows/free-threaded-wheels.yml b/.github/workflows/free-threaded-wheels.yml index fa804cab6127..ec6004c5baea 100644 --- a/.github/workflows/free-threaded-wheels.yml +++ b/.github/workflows/free-threaded-wheels.yml @@ -76,9 +76,12 @@ jobs: buildplat: - [ubuntu-20.04, manylinux_x86_64, ""] - [ubuntu-20.04, musllinux_x86_64, ""] - # TODO: build numpy and set up Windows and MacOS - # cibuildwheel does not yet support Mac for free-threaded python - # windows is supported but numpy doesn't build on the image yet + - [macos-13, macosx_x86_64, openblas] + # targeting macos >= 14. Could probably build on macos-14, but it would be a cross-compile + - [macos-13, macosx_x86_64, accelerate] + - [macos-14, macosx_arm64, accelerate] # always use accelerate + # TODO: set up Windows wheels windows is supported on cibuildwheel but + # numpy doesn't build on the image yet python: ["cp313t"] env: IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index 2fc5fa144d26..e2f464d32a2a 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -58,5 +58,6 @@ fi # python with a released version of cython FREE_THREADED_BUILD="$(python -c"import sysconfig; print(bool(sysconfig.get_config_var('Py_GIL_DISABLED')))")" if [[ $FREE_THREADED_BUILD == "True" ]]; then - python -m pip install git+https://github.com/cython/cython meson-python ninja + python -m pip install meson-python ninja + python -m pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple cython fi diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index 6fcad20236ff..e0962d4b36e5 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -32,7 +32,8 @@ FREE_THREADED_BUILD="$(python -c"import sysconfig; print(bool(sysconfig.get_conf if [[ $FREE_THREADED_BUILD == "True" ]]; then # TODO: delete when numpy is buildable under free-threaded python # with a released version of cython - python -m pip install git+https://github.com/cython/cython + python -m pip uninstall -y cython + python -m pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple cython # TODO: delete when importing numpy no longer enables the GIL # setting to zero ensures the GIL is disabled while running the # tests under free-threaded python From addfc7afb5dc62962d42d90fba605883cf9ede5e Mon Sep 17 00:00:00 2001 From: mattip Date: Wed, 12 Jun 2024 22:11:25 +0300 Subject: [PATCH 539/980] change smoke-docs -> check-docs --- .github/workflows/linux.yml | 4 ++-- .spin/cmds.py | 12 ++++++------ pyproject.toml | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 4b5878efccbf..fd6b96fb54dc 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -180,11 +180,11 @@ jobs: shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' run: | spin bench --quick - - name: Run smoke-docs + - name: Check docstests shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' run: | pip install scipy-doctest hypothesis matplotlib scipy pytz - spin smoke-docs -v + spin check-docs -v sdist: needs: [smoke_test] diff --git a/.spin/cmds.py b/.spin/cmds.py index b76f988363bc..57dfe3142e2a 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -272,27 +272,27 @@ def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose, *args, **kwargs): '--verbose', '-v', is_flag=True, default=False ) @click.pass_context -def smoke_docs(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): +def check_docs(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): """🔧 Run doctests of objects in the public API. PYTEST_ARGS are passed through directly to pytest, e.g.: - spin smoke-docs -- --pdb + spin check-docs -- --pdb To run tests on a directory: \b - spin smoke-docs numpy/linalg + spin check-docs numpy/linalg To report the durations of the N slowest doctests: - spin smoke-docs -- --durations=N + spin check-docs -- --durations=N To run doctests that match a given pattern: \b - spin smoke-docs -- -k "slogdet" - spin smoke-docs numpy/linalg -- -k "det and not slogdet" + spin check-docs -- -k "slogdet" + spin check-docs numpy/linalg -- -k "det and not slogdet" \b Note: diff --git a/pyproject.toml b/pyproject.toml index 93b6ce6b6b75..f42e5a1bfdef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -211,6 +211,6 @@ cli = 'vendored-meson/meson/meson.py' ".spin/cmds.py:docs", ".spin/cmds.py:changelog", ".spin/cmds.py:notes", - ".spin/cmds.py:smoke_docs" + ".spin/cmds.py:check_docs" ] "Metrics" = [".spin/cmds.py:bench"] From 1854f269b154aef615e90db61da339e062150ec9 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 12 Jun 2024 21:38:11 +0200 Subject: [PATCH 540/980] CI: upgrade FreeBSD Cirrus job from FreeBSD 13.2 to 14.0 (#26672) [skip actions] [skip azp] [skip circle] --- tools/ci/cirrus_arm.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci/cirrus_arm.yml b/tools/ci/cirrus_arm.yml index 3b48089dcc08..46fed5bbf0c4 100644 --- a/tools/ci/cirrus_arm.yml +++ b/tools/ci/cirrus_arm.yml @@ -67,7 +67,7 @@ freebsd_test_task: use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' compute_engine_instance: image_project: freebsd-org-cloud-dev - image: family/freebsd-13-2 + image: family/freebsd-14-0 platform: freebsd cpu: 1 memory: 4G From dc46fc779dcdb951710b388d18b291661413b8b8 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 12 Jun 2024 16:37:08 -0600 Subject: [PATCH 541/980] MNT: mark evil_global_disable_warn_O4O8_flag as thread-local --- numpy/_core/src/multiarray/conversion_utils.c | 2 +- numpy/_core/src/multiarray/conversion_utils.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index 9eba190323ea..c30b31a633cc 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -1203,7 +1203,7 @@ PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals) * that it is in an unpickle context instead of a normal context without * evil global state like we create here. */ -NPY_NO_EXPORT int evil_global_disable_warn_O4O8_flag = 0; +NPY_NO_EXPORT NPY_TLS int evil_global_disable_warn_O4O8_flag = 0; /* * Convert a gentype (that is actually a generic kind character) and diff --git a/numpy/_core/src/multiarray/conversion_utils.h b/numpy/_core/src/multiarray/conversion_utils.h index f138c3b98529..bff1db0c069d 100644 --- a/numpy/_core/src/multiarray/conversion_utils.h +++ b/numpy/_core/src/multiarray/conversion_utils.h @@ -113,7 +113,7 @@ PyArray_DeviceConverterOptional(PyObject *object, NPY_DEVICE *device); * that it is in an unpickle context instead of a normal context without * evil global state like we create here. */ -extern NPY_NO_EXPORT int evil_global_disable_warn_O4O8_flag; +extern NPY_NO_EXPORT NPY_TLS int evil_global_disable_warn_O4O8_flag; /* * Convert function which replaces np._NoValue with NULL. From 3688fb0f77eac73b27ea1431c7f86c948d2818f6 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 12 Jun 2024 16:24:08 -0600 Subject: [PATCH 542/980] CI: Use default llvm on Windows. `choco install llvm -y --version=16.0.6` causes and error as the default version has increase to 18.1.6. --- .github/workflows/windows.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 4d6c811b1409..bf10ff006649 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -48,7 +48,10 @@ jobs: - name: Install Clang-cl if: matrix.compiler == 'Clang-cl' run: | - choco install llvm -y --version=16.0.6 + # llvm is preinstalled, but leave + # this here in case we need to pin the + # version at some point. + #choco install llvm -y - name: Install NumPy (MSVC) if: matrix.compiler == 'MSVC' From 0ae123a0e1795b9309e60c70a15a4abb19216351 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 12 Jun 2024 23:57:08 +0000 Subject: [PATCH 543/980] MAINT: Bump github/codeql-action from 3.25.8 to 3.25.9 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.25.8 to 3.25.9. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/2e230e8fe0ad3a14a340ad0815ddb96d599d2aff...530d4feaa9c62aaab2d250371e2061eb7a172363) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index deeb3e08e300..bfffdb21b4e4 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -45,7 +45,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 + uses: github/codeql-action/init@530d4feaa9c62aaab2d250371e2061eb7a172363 # v3.25.9 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -55,7 +55,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 + uses: github/codeql-action/autobuild@530d4feaa9c62aaab2d250371e2061eb7a172363 # v3.25.9 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -68,6 +68,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 + uses: github/codeql-action/analyze@530d4feaa9c62aaab2d250371e2061eb7a172363 # v3.25.9 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 5bd8f6cd0fce..ee55e95365c6 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v2.1.27 + uses: github/codeql-action/upload-sarif@530d4feaa9c62aaab2d250371e2061eb7a172363 # v2.1.27 with: sarif_file: results.sarif From 2ef7dc370ed9521686d5c2570782d7558f96f90f Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Wed, 12 Jun 2024 21:35:35 -0300 Subject: [PATCH 544/980] DOC: add `linalg.cross` example [skip actions][skip azp][skip cirrus] The example is an adaptation of `np.cross` examples. --- numpy/linalg/_linalg.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 6e3b18fef94d..9ac97e23be59 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -3170,6 +3170,31 @@ def cross(x1, x2, /, *, axis=-1): -------- numpy.cross + Examples + -------- + Vector cross-product. + + >>> x = np.array([1, 2, 3]) + >>> y = np.array([4, 5, 6]) + >>> np.linalg.cross(x, y) + array([-3, 6, -3]) + + Multiple vector cross-products. Note that the direction of the cross + product vector is defined by the *right-hand rule*. + + >>> x = np.array([[1,2,3], [4,5,6]]) + >>> y = np.array([[4,5,6], [1,2,3]]) + >>> np.cross(x, y) + array([[-3, 6, -3], + [ 3, -6, 3]]) + + >>> x = np.array([[1, 2], [3, 4], [5, 6]]) + >>> y = np.array([[4, 5], [6, 1], [2, 3]]) + >>> np.cross(x, y, axis=0) + array([[-24, 6], + [ 18, 24], + [-6, -18]]) + """ if x1.shape[axis] != 3 or x2.shape[axis] != 3: raise ValueError( From c431b41900368a7e4d6cd12c5feb82c602b3ad20 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Wed, 12 Jun 2024 22:04:03 -0300 Subject: [PATCH 545/980] DOC: add `linalg.diagonal` example [skip actions][skip azp][skip cirrus] The example is an adaptation of `np.diagonal` examples and adds a section to the `offset` argument. --- numpy/linalg/_linalg.py | 56 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 54 insertions(+), 2 deletions(-) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 9ac97e23be59..b33a941047bb 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -3073,6 +3073,58 @@ def diagonal(x, /, *, offset=0): -------- numpy.diagonal + Examples + -------- + >>> a = np.arange(8).reshape(2,2,2); a + array([[0, 1], + [2, 3]]) + >>> np.linalg.diagonal(a) + array([0, 3]) + + A 3-D example: + + >>> a = np.arange(8).reshape(2,2,2); a + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.linalg.diagonal(a) + array([[0, 3], + [4, 7]]) + + Diagonals adjacent to the main diagonal can be obtained by using the + `offset` argument: + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.linalg.diagonal(a, offset=1) # First superdiagonal + array([1, 5]) + >>> np.linalg.diagonal(a, offset=2) # Second superdiagonal + array([2]) + >>> np.linalg.diagonal(a, offset=-1) # First subdiagonal + array([3, 7]) + >>> np.linalg.diagonal(a, offset=-2) # Second subdiagonal + array([6]) + + The anti-diagonal can be obtained by reversing the order of elements + using either `numpy.flipud` or `numpy.fliplr`. + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.linalg.diagonal(np.fliplr(a)) # Horizontal flip + array([2, 4, 6]) + >>> np.linalg.diagonal(np.flipud(a)) # Vertical flip + array([6, 4, 2]) + + Note that the order in which the diagonal is retrieved varies depending + on the flip function. + """ return _core_diagonal(x, offset, axis1=-2, axis2=-1) @@ -3184,13 +3236,13 @@ def cross(x1, x2, /, *, axis=-1): >>> x = np.array([[1,2,3], [4,5,6]]) >>> y = np.array([[4,5,6], [1,2,3]]) - >>> np.cross(x, y) + >>> np.linalg.cross(x, y) array([[-3, 6, -3], [ 3, -6, 3]]) >>> x = np.array([[1, 2], [3, 4], [5, 6]]) >>> y = np.array([[4, 5], [6, 1], [2, 3]]) - >>> np.cross(x, y, axis=0) + >>> np.linalg.cross(x, y, axis=0) array([[-24, 6], [ 18, 24], [-6, -18]]) From 97154177db896b4d5c6d0d29da9b2541e34e1e33 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Wed, 12 Jun 2024 22:17:38 -0300 Subject: [PATCH 546/980] DOC: add `linalg.matmul` example [skip actions][skip azp][skip cirrus] The example is an adaptation of `np.matmul` examples. --- numpy/linalg/_linalg.py | 47 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index b33a941047bb..69cc0e169702 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -3297,6 +3297,53 @@ def matmul(x1, x2, /): -------- numpy.matmul + Examples + -------- + For 2-D arrays it is the matrix product: + + >>> a = np.array([[1, 0], + ... [0, 1]]) + >>> b = np.array([[4, 1], + ... [2, 2]]) + >>> np.linalg.matmul(a, b) + array([[4, 1], + [2, 2]]) + + For 2-D mixed with 1-D, the result is the usual. + + >>> a = np.array([[1, 0], + ... [0, 1]]) + >>> b = np.array([1, 2]) + >>> np.linalg.matmul(a, b) + array([1, 2]) + >>> np.linalg.matmul(b, a) + array([1, 2]) + + + Broadcasting is conventional for stacks of arrays + + >>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4)) + >>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2)) + >>> np.linalg.matmul(a,b).shape + (2, 2, 2) + >>> np.linalg.matmul(a, b)[0, 1, 1] + 98 + >>> sum(a[0, 1, :] * b[0 , :, 1]) + 98 + + Vector, vector returns the scalar inner product, but neither argument + is complex-conjugated: + + >>> np.linalg.matmul([2j, 3j], [2j, 3j]) + (-13+0j) + + Scalar multiplication raises an error. + + >>> np.linalg.matmul([1,2], 3) + Traceback (most recent call last): + ... + ValueError: matmul: Input operand 1 does not have enough dimensions ... + """ return _core_matmul(x1, x2) From 6a770e04f9104ca0bf73484c054725bdd832869b Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Wed, 12 Jun 2024 22:31:33 -0300 Subject: [PATCH 547/980] DOC: add `linalg.matrix_norm` example [skip actions][skip azp][skip cirrus] The example is an adaptation of `np.linalg.norm` examples. --- numpy/linalg/_linalg.py | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 69cc0e169702..2c83a5570db7 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -3075,7 +3075,7 @@ def diagonal(x, /, *, offset=0): Examples -------- - >>> a = np.arange(8).reshape(2,2,2); a + >>> a = np.arange(4).reshape(2, 2); a array([[0, 1], [2, 3]]) >>> np.linalg.diagonal(a) @@ -3083,7 +3083,7 @@ def diagonal(x, /, *, offset=0): A 3-D example: - >>> a = np.arange(8).reshape(2,2,2); a + >>> a = np.arange(8).reshape(2, 2, 2); a array([[[0, 1], [2, 3]], [[4, 5], @@ -3403,6 +3403,39 @@ def matrix_norm(x, /, *, keepdims=False, ord="fro"): -------- numpy.linalg.norm : Generic norm function + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.arange(9) - 4 + >>> a + array([-4, -3, -2, ..., 2, 3, 4]) + >>> b = a.reshape((3, 3)) + >>> b + array([[-4, -3, -2], + [-1, 0, 1], + [ 2, 3, 4]]) + + >>> LA.norm(b) + 7.745966692414834 + >>> LA.matrix_norm(b, ord='fro') + 7.745966692414834 + >>> LA.matrix_norm(b, ord=np.inf) + 9.0 + >>> LA.matrix_norm(b, -np.inf) + 2.0 + + >>> LA.matrix_norm(b, ord=1) + 7.0 + >>> LA.matrix_norm(b, ord=-1) + 6.0 + >>> LA.matrix_norm(b, ord=2) + 7.3484692283495345 + + >>> LA.matrix_norm(a, -2) + 0.0 + >>> LA.matrix_norm(b, -2) + 1.8570331885190563e-016 # may vary + """ x = asanyarray(x) return norm(x, axis=(-2, -1), keepdims=keepdims, ord=ord) From 90f952ad7efd32622231fd8db2ec80664b292cbb Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 12 Jun 2024 19:41:48 -0600 Subject: [PATCH 548/980] MAINT: Add comment lost in previous PR. The line appears to have been lost when merging main into the PR. [skip ci] --- numpy/_core/src/multiarray/descriptor.c | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index f7524473c633..548ecd9d8df1 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -2038,6 +2038,7 @@ arraydescr_dealloc(PyArray_Descr *self) { Py_XDECREF(self->typeobj); if (!PyDataType_ISLEGACY(self)) { + /* non legacy dtypes must not have fields, etc. */ Py_TYPE(self)->tp_free((PyObject *)self); return; } From 2ca826539b8fd9854a23b3077972e7d9938df316 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Wed, 12 Jun 2024 22:45:19 -0300 Subject: [PATCH 549/980] DOC: add `linalg.outer` example [skip actions][skip azp][skip cirrus] The example is an adaptation of `np.outer` examples. --- numpy/linalg/_linalg.py | 43 +++++++++++++++++++++++++++++++++++------ 1 file changed, 37 insertions(+), 6 deletions(-) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 2c83a5570db7..a440d70a4590 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -876,6 +876,40 @@ def outer(x1, x2, /): -------- outer + Examples + -------- + Make a (*very* coarse) grid for computing a Mandelbrot set: + + >>> rl = np.linalg.outer(np.ones((5,)), np.linspace(-2, 2, 5)) + >>> rl + array([[-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.]]) + >>> im = np.linalg.outer(1j*np.linspace(2, -2, 5), np.ones((5,))) + >>> im + array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j], + [0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j], + [0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], + [0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j], + [0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]]) + >>> grid = rl + im + >>> grid + array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j], + [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j], + [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j], + [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j], + [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]]) + + An example using a "vector" of letters: + + >>> x = np.array(['a', 'b', 'c'], dtype=object) + >>> np.linalg.outer(x, [1, 2, 3]) + array([['a', 'aa', 'aaa'], + ['b', 'bb', 'bbb'], + ['c', 'cc', 'ccc']], dtype=object) + """ x1 = asanyarray(x1) x2 = asanyarray(x2) @@ -3415,13 +3449,13 @@ def matrix_norm(x, /, *, keepdims=False, ord="fro"): [-1, 0, 1], [ 2, 3, 4]]) - >>> LA.norm(b) + >>> LA.matrix_norm(b) 7.745966692414834 >>> LA.matrix_norm(b, ord='fro') 7.745966692414834 >>> LA.matrix_norm(b, ord=np.inf) 9.0 - >>> LA.matrix_norm(b, -np.inf) + >>> LA.matrix_norm(b, ord=-np.inf) 2.0 >>> LA.matrix_norm(b, ord=1) @@ -3430,10 +3464,7 @@ def matrix_norm(x, /, *, keepdims=False, ord="fro"): 6.0 >>> LA.matrix_norm(b, ord=2) 7.3484692283495345 - - >>> LA.matrix_norm(a, -2) - 0.0 - >>> LA.matrix_norm(b, -2) + >>> LA.matrix_norm(b, ord=-2) 1.8570331885190563e-016 # may vary """ From 686548fe3bb17271ae37b9d07b7918c56da09411 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 13 Jun 2024 09:43:58 +0300 Subject: [PATCH 550/980] DOC: mention spin check-docs in the devguide --- doc/TESTS.rst | 18 ++++++++++++++++++ doc/source/dev/development_environment.rst | 9 +++++++++ 2 files changed, 27 insertions(+) diff --git a/doc/TESTS.rst b/doc/TESTS.rst index 195935ccf380..ee8a8b4b07e1 100644 --- a/doc/TESTS.rst +++ b/doc/TESTS.rst @@ -74,6 +74,24 @@ Testing a subset of NumPy:: For detailed info on testing, see :ref:`testing-builds` + +Running doctests +---------------- + +NumPy documentation contains code examples, "doctests". To check that the examples +are correct, install the ``scipy-doctest`` package:: + + $ pip install scipy-doctest + +and run one of:: + + $ spin check-docs -v + $ spin check-docs numpy/linalg + $ spin check-docs -- -k 'det and not slogdet' + +Note that the doctests are not run when you use ``spin test``. + + Other methods of running tests ------------------------------ diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index b1cc7d96ffe2..45992b17d123 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -114,6 +114,15 @@ argument to pytest:: $ spin test -v -t numpy/_core/tests/test_multiarray.py -- -k "MatMul and not vector" +To run "doctests" -- to check that the code examples in the documentation is correct -- +use the `check-docs` spin command. It relies on the `scipy-docs` package, which +provides several additional features on top of the standard library ``doctest`` +package. Install ``scipy-doctest`` and run on of:: + + $ spin check-docs -v + $ spin check-docs numpy/linalg + $ spin check-docs -v -- -k 'det and not slogdet' + .. note:: Remember that all tests of NumPy should pass before committing your changes. From c52251738b5e6d1a420f712c881e2566de88aaaa Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 13 Jun 2024 09:56:20 +0300 Subject: [PATCH 551/980] DOC: doctests: fix/skip small glitches in the user tutorials --- doc/source/user/how-to-partition.rst | 4 ++-- numpy/conftest.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/source/user/how-to-partition.rst b/doc/source/user/how-to-partition.rst index e90b39e9440c..74c37c1caa5f 100644 --- a/doc/source/user/how-to-partition.rst +++ b/doc/source/user/how-to-partition.rst @@ -244,10 +244,10 @@ fully-dimensional result array. :: >>> np.ogrid[0:4, 0:6] - [array([[0], + (array([[0], [1], [2], - [3]]), array([[0, 1, 2, 3, 4, 5]])] + [3]]), array([[0, 1, 2, 3, 4, 5]])) All three methods described here can be used to evaluate function values on a grid. diff --git a/numpy/conftest.py b/numpy/conftest.py index 0a5aa3172888..ad331dbb3bed 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -210,7 +210,8 @@ def warnings_errors_and_rng(test=None): 'c-info.ufunc-tutorial.rst': '', 'basics.interoperability.rst': 'needs pandas', 'basics.dispatch.rst': 'errors out in /testing/overrides.py', - 'basics.subclassing.rst': '.. testcode:: admonitions not understood' + 'basics.subclassing.rst': '.. testcode:: admonitions not understood', + 'misc.rst': 'manipulates warnings', } # ignores are for things fail doctest collection (optionals etc) From 7e08ca09cd03c686fd7a62c286b561c846bcfe20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Thu, 13 Jun 2024 10:50:53 +0200 Subject: [PATCH 552/980] Add `override_repr` option to np.printoptions --- numpy/_core/arrayprint.py | 21 ++++++++++++++++----- numpy/_core/arrayprint.pyi | 3 ++- numpy/_core/tests/test_arrayprint.py | 11 +++++++++++ 3 files changed, 29 insertions(+), 6 deletions(-) diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 0ab4890d8f35..b4cab5655b63 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -59,11 +59,14 @@ 'formatter': None, # Internally stored as an int to simplify comparisons; converted from/to # str/False on the way in/out. - 'legacy': sys.maxsize} + 'legacy': sys.maxsize, + 'override_repr': None, +} def _make_options_dict(precision=None, threshold=None, edgeitems=None, linewidth=None, suppress=None, nanstr=None, infstr=None, - sign=None, formatter=None, floatmode=None, legacy=None): + sign=None, formatter=None, floatmode=None, legacy=None, + override_repr=None): """ Make a dictionary out of the non-None arguments, plus conversion of *legacy* and sanity checks. @@ -119,7 +122,7 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None, def set_printoptions(precision=None, threshold=None, edgeitems=None, linewidth=None, suppress=None, nanstr=None, infstr=None, formatter=None, sign=None, floatmode=None, - *, legacy=None): + *, legacy=None, override_repr=None): """ Set printing options. @@ -224,6 +227,9 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, .. versionadded:: 1.14.0 .. versionchanged:: 1.22.0 + override_repr: callable, optional + If set a passed function will be used for generating arrays' repr. + Other options will be ignored. See Also -------- @@ -285,9 +291,10 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, """ opt = _make_options_dict(precision, threshold, edgeitems, linewidth, suppress, nanstr, infstr, sign, formatter, - floatmode, legacy) - # formatter is always reset + floatmode, legacy, override_repr) + # formatter and override_repr are always reset opt['formatter'] = formatter + opt['override_repr'] = override_repr _format_options.update(opt) # set the C variable for legacy mode @@ -1552,6 +1559,10 @@ def _array_repr_implementation( arr, max_line_width=None, precision=None, suppress_small=None, array2string=array2string): """Internal version of array_repr() that allows overriding array2string.""" + override_repr = _format_options["override_repr"] + if override_repr is not None: + return override_repr(arr) + if max_line_width is None: max_line_width = _format_options['linewidth'] diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index 50f10ec694f0..44d77083cd63 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -63,7 +63,8 @@ def set_printoptions( sign: Literal[None, "-", "+", " "] = ..., floatmode: None | _FloatMode = ..., *, - legacy: Literal[None, False, "1.13", "1.21"] = ... + legacy: Literal[None, False, "1.13", "1.21"] = ..., + override_repr: None | Callable[[NDArray[Any]], str] = ..., ) -> None: ... def get_printoptions() -> _FormatOptions: ... def array2string( diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index d9caced3c1bc..1b40a9392c84 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -667,6 +667,17 @@ def test_formatter_reset(self): np.set_printoptions(formatter={'float_kind':None}) assert_equal(repr(x), "array([0., 1., 2.])") + def test_override_repr(self): + x = np.arange(3) + np.set_printoptions(override_repr=lambda x: "FOO") + assert_equal(repr(x), "FOO") + np.set_printoptions(override_repr=None) + assert_equal(repr(x), "array([0, 1, 2])") + + with np.printoptions(override_repr=lambda x: "BAR"): + assert_equal(repr(x), "BAR") + assert_equal(repr(x), "array([0, 1, 2])") + def test_0d_arrays(self): assert_equal(str(np.array('café', ' Date: Thu, 13 Jun 2024 12:05:48 +0300 Subject: [PATCH 553/980] DEV: add spin check-tutorials command --- .spin/cmds.py | 69 ++++++++++++++++++++++++++++++++++++++++++++++++++ pyproject.toml | 3 ++- 2 files changed, 71 insertions(+), 1 deletion(-) diff --git a/.spin/cmds.py b/.spin/cmds.py index f88fa814a2af..1dd527bf41ef 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -344,6 +344,75 @@ def check_docs(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): ctx.forward(meson.test) +@click.command() +@click.argument("pytest_args", nargs=-1) +@click.option( + "-j", + "n_jobs", + metavar='N_JOBS', + default="1", + help=("Number of parallel jobs for testing. " + "Can be set to `auto` to use all cores.") +) +@click.option( + '--verbose', '-v', is_flag=True, default=False +) +@click.pass_context +def check_tutorials(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): + """🔧 Run doctests of user-facing rst tutorials. + + To test all tutorials in the numpy/doc/source/user/ directory, use + + spin check-tutorials + + To run tests on a specific RST file: + + \b + spin check-tutorials numpy/doc/source/user/absolute-beginners.rst + + \b + Note: + ----- + + \b + - This command only runs doctests and skips everything under tests/ + - This command only doctests public objects: those which are accessible + from the top-level `__init__.py` file. + + """ # noqa: E501 + # handle all of + # - `spin check-tutorials` (pytest_args == ()) + # - `spin check-tutorials path/to/rst`, and + # - `spin check-tutorials path/to/rst -- --durations=3` + if (not pytest_args) or all(arg.startswith('-') for arg in pytest_args): + pytest_args = ('numpy/doc/source/user',) + pytest_args + + # make all paths relative to the numpy source folder + pytest_args = tuple( + str(curdir / '..' / '..' / arg) if not arg.startswith('-') else arg + for arg in pytest_args + ) + + if (n_jobs != "1") and ('-n' not in pytest_args): + pytest_args = ('-n', str(n_jobs)) + pytest_args + + if verbose: + pytest_args = ('-v',) + pytest_args + + # turn doctesting on: + doctest_args = ( + '--doctest-glob=*rst', + ) + + pytest_args = pytest_args + doctest_args + + ctx.params['pytest_args'] = pytest_args + + for extra_param in ('n_jobs', 'verbose'): + del ctx.params[extra_param] + + ctx.forward(meson.test) + # From scipy: benchmarks/benchmarks/common.py def _set_mem_rlimit(max_mem=None): diff --git a/pyproject.toml b/pyproject.toml index f42e5a1bfdef..ad4673949a10 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -211,6 +211,7 @@ cli = 'vendored-meson/meson/meson.py' ".spin/cmds.py:docs", ".spin/cmds.py:changelog", ".spin/cmds.py:notes", - ".spin/cmds.py:check_docs" + ".spin/cmds.py:check_docs", + ".spin/cmds.py:check_tutorials", ] "Metrics" = [".spin/cmds.py:bench"] From b858df51c77b20a00d40fa22bbbdd9caee2fc857 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 13 Jun 2024 12:06:36 +0300 Subject: [PATCH 554/980] CI: run spin check-tutorials in CI --- .github/workflows/linux.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 1ceeb61514bb..b3a0731d27e7 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -183,6 +183,7 @@ jobs: run: | pip install scipy-doctest hypothesis matplotlib scipy pytz spin check-docs -v + spin check-tutorials -v sdist: needs: [smoke_test] From c4c5bf01e5df26c71a7d539cabc254beb8d15402 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 13 Jun 2024 12:17:09 +0300 Subject: [PATCH 555/980] rm doctesting --- tools/refguide_check.py | 30 +----------------------------- 1 file changed, 1 insertion(+), 29 deletions(-) diff --git a/tools/refguide_check.py b/tools/refguide_check.py index 6e63ffccf7cc..63105adb0b30 100644 --- a/tools/refguide_check.py +++ b/tools/refguide_check.py @@ -1132,15 +1132,7 @@ def main(argv): parser = ArgumentParser(usage=__doc__.lstrip()) parser.add_argument("module_names", metavar="SUBMODULES", default=[], nargs='*', help="Submodules to check (default: all public)") - parser.add_argument("--doctests", action="store_true", - help="Run also doctests on ") parser.add_argument("-v", "--verbose", action="count", default=0) - parser.add_argument("--doctest-warnings", action="store_true", - help="Enforce warning checking for doctests") - parser.add_argument("--rst", nargs='?', const='doc', default=None, - help=("Run also examples from *rst files " - "discovered walking the directory(s) specified, " - "defaults to 'doc'")) args = parser.parse_args(argv) modules = [] @@ -1149,8 +1141,6 @@ def main(argv): if not args.module_names: args.module_names = list(PUBLIC_SUBMODULES) + [BASE_MODULE] - os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true' - module_names = list(args.module_names) for name in module_names: if name in OTHER_MODULE_DOCS: @@ -1164,9 +1154,6 @@ def main(argv): errormsgs = [] - if args.doctests or args.rst: - init_matplotlib() - for submodule_name in module_names: prefix = BASE_MODULE + '.' if not ( @@ -1186,7 +1173,7 @@ def main(argv): if submodule_name in args.module_names: modules.append(module) - if args.doctests or not args.rst: + if modules: print("Running checks for %d modules:" % (len(modules),)) for module in modules: if dots: @@ -1201,9 +1188,6 @@ def main(argv): module.__name__) mod_results += check_rest(module, set(names).difference(deprecated), dots=dots) - if args.doctests: - mod_results += check_doctests(module, (args.verbose >= 2), dots=dots, - doctest_warnings=args.doctest_warnings) for v in mod_results: assert isinstance(v, tuple), v @@ -1214,18 +1198,6 @@ def main(argv): sys.stderr.write('\n') sys.stderr.flush() - if args.rst: - base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..') - rst_path = os.path.relpath(os.path.join(base_dir, args.rst)) - if os.path.exists(rst_path): - print('\nChecking files in %s:' % rst_path) - check_documentation(rst_path, results, args, dots) - else: - sys.stderr.write(f'\ninvalid --rst argument "{args.rst}"') - errormsgs.append('invalid directory argument to --rst') - if dots: - sys.stderr.write("\n") - sys.stderr.flush() # Report results for module, mod_results in results: From 46462383509d2d083650264b7ef84320136d8e08 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 13 Jun 2024 12:25:55 +0300 Subject: [PATCH 556/980] DEV: rip doctesting from refguide-check --- .circleci/config.yml | 8 +- tools/refguide_check.py | 572 ---------------------------------------- 2 files changed, 2 insertions(+), 578 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 530631281c80..eb267dffd7fb 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -95,14 +95,10 @@ jobs: # destination: neps - run: - name: run doctests on documentation + name: run refguide-check command: | . venv/bin/activate - # Note: keep these two checks separate, because they seem to - # influence each other through changing global state (e.g., via - # `np.polynomial.set_default_printstyle`) - python tools/refguide_check.py --rst - python tools/refguide_check.py --doctests + python tools/refguide_check.py -v - persist_to_workspace: root: ~/repo diff --git a/tools/refguide_check.py b/tools/refguide_check.py index 63105adb0b30..8de816715bdb 100644 --- a/tools/refguide_check.py +++ b/tools/refguide_check.py @@ -27,7 +27,6 @@ """ import copy -import doctest import inspect import io import os @@ -39,7 +38,6 @@ import docutils.core from argparse import ArgumentParser from contextlib import contextmanager, redirect_stderr -from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL from docutils.parsers.rst import directives @@ -49,8 +47,6 @@ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext')) from numpydoc.docscrape_sphinx import get_doc_object -SKIPBLOCK = doctest.register_optionflag('SKIPBLOCK') - # Enable specific Sphinx directives from sphinx.directives.other import SeeAlso, Only directives.register_directive('seealso', SeeAlso) @@ -97,52 +93,6 @@ 'io.arff': 'io', } -# these names are known to fail doctesting and we like to keep it that way -# e.g. sometimes pseudocode is acceptable etc -# -# Optionally, a subset of methods can be skipped by setting dict-values -# to a container of method-names -DOCTEST_SKIPDICT = { - # cases where NumPy docstrings import things from SciPy: - 'numpy.lib.vectorize': None, - 'numpy.random.standard_gamma': None, - 'numpy.random.gamma': None, - 'numpy.random.vonmises': None, - 'numpy.random.power': None, - 'numpy.random.zipf': None, - # cases where NumPy docstrings import things from other 3'rd party libs: - 'numpy._core.from_dlpack': None, - # remote / local file IO with DataSource is problematic in doctest: - 'numpy.lib.npyio.DataSource': None, - 'numpy.lib.Repository': None, -} - -# Skip non-numpy RST files, historical release notes -# Any single-directory exact match will skip the directory and all subdirs. -# Any exact match (like 'doc/release') will scan subdirs but skip files in -# the matched directory. -# Any filename will skip that file -RST_SKIPLIST = [ - 'scipy-sphinx-theme', - 'sphinxext', - 'neps', - 'changelog', - 'doc/release', - 'doc/source/release', - 'doc/release/upcoming_changes', - 'c-info.ufunc-tutorial.rst', - 'c-info.python-as-glue.rst', - 'f2py.getting-started.rst', - 'f2py-examples.rst', - 'arrays.nditer.cython.rst', - 'how-to-verify-bug.rst', - # See PR 17222, these should be fixed - 'basics.dispatch.rst', - 'basics.subclassing.rst', - 'basics.interoperability.rst', - 'misc.rst', - 'TESTS.rst' -] # these names are not required to be present in ALL despite being in # autosummary:: listing @@ -161,14 +111,6 @@ # priority -- focus on just getting docstrings executed / correct r'numpy\.*', ] -# deprecated windows in scipy.signal namespace -for name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman', - 'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop', - 'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning', - 'kaiser', 'nuttall', 'parzen', 'slepian', 'triang', 'tukey'): - REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\.signal\.' + name) - -HAVE_MATPLOTLIB = False def short_path(path, cwd=None): @@ -609,520 +551,6 @@ def check_rest(module, names, dots=True): return results -### Doctest helpers #### - -# the namespace to run examples in -DEFAULT_NAMESPACE = {'np': np} - -# the namespace to do checks in -CHECK_NAMESPACE = { - 'np': np, - 'numpy': np, - 'assert_allclose': np.testing.assert_allclose, - 'assert_equal': np.testing.assert_equal, - # recognize numpy repr's - 'array': np.array, - 'matrix': np.matrix, - 'int64': np.int64, - 'uint64': np.uint64, - 'int8': np.int8, - 'int32': np.int32, - 'float32': np.float32, - 'float64': np.float64, - 'dtype': np.dtype, - 'nan': np.nan, - 'inf': np.inf, - 'StringIO': io.StringIO, -} - - -class DTRunner(doctest.DocTestRunner): - """ - The doctest runner - """ - DIVIDER = "\n" - - def __init__(self, item_name, checker=None, verbose=None, optionflags=0): - self._item_name = item_name - doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose, - optionflags=optionflags) - - def _report_item_name(self, out, new_line=False): - if self._item_name is not None: - if new_line: - out("\n") - self._item_name = None - - def report_start(self, out, test, example): - self._checker._source = example.source - return doctest.DocTestRunner.report_start(self, out, test, example) - - def report_success(self, out, test, example, got): - if self._verbose: - self._report_item_name(out, new_line=True) - return doctest.DocTestRunner.report_success(self, out, test, example, got) - - def report_unexpected_exception(self, out, test, example, exc_info): - self._report_item_name(out) - return doctest.DocTestRunner.report_unexpected_exception( - self, out, test, example, exc_info) - - def report_failure(self, out, test, example, got): - self._report_item_name(out) - return doctest.DocTestRunner.report_failure(self, out, test, - example, got) - -class Checker(doctest.OutputChecker): - """ - Check the docstrings - """ - obj_pattern = re.compile('at 0x[0-9a-fA-F]+>') - vanilla = doctest.OutputChecker() - rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary", - "# uninitialized", "#uninitialized", "# uninit"} - stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(', - 'set_title', 'imshow', 'plt.show', '.axis(', '.plot(', - '.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim', - '# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(', - '.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='} - - def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2): - self.parse_namedtuples = parse_namedtuples - self.atol, self.rtol = atol, rtol - if ns is None: - self.ns = CHECK_NAMESPACE - else: - self.ns = ns - - def check_output(self, want, got, optionflags): - # cut it short if they are equal - if want == got: - return True - - # skip stopwords in source - if any(word in self._source for word in self.stopwords): - return True - - # skip random stuff - if any(word in want for word in self.rndm_markers): - return True - - # skip function/object addresses - if self.obj_pattern.search(got): - return True - - # ignore comments (e.g. signal.freqresp) - if want.lstrip().startswith("#"): - return True - - # try the standard doctest - try: - if self.vanilla.check_output(want, got, optionflags): - return True - except Exception: - pass - - # OK then, convert strings to objects - try: - a_want = eval(want, dict(self.ns)) - a_got = eval(got, dict(self.ns)) - except Exception: - # Maybe we're printing a numpy array? This produces invalid python - # code: `print(np.arange(3))` produces "[0 1 2]" w/o commas between - # values. So, reinsert commas and retry. - # TODO: handle (1) abbreviation (`print(np.arange(10000))`), and - # (2) n-dim arrays with n > 1 - s_want = want.strip() - s_got = got.strip() - cond = (s_want.startswith("[") and s_want.endswith("]") and - s_got.startswith("[") and s_got.endswith("]")) - if cond: - s_want = ", ".join(s_want[1:-1].split()) - s_got = ", ".join(s_got[1:-1].split()) - return self.check_output(s_want, s_got, optionflags) - - if not self.parse_namedtuples: - return False - # suppose that "want" is a tuple, and "got" is smth like - # MoodResult(statistic=10, pvalue=0.1). - # Then convert the latter to the tuple (10, 0.1), - # and then compare the tuples. - try: - num = len(a_want) - regex = (r'[\w\d_]+\(' + - ', '.join([r'[\w\d_]+=(.+)']*num) + - r'\)') - grp = re.findall(regex, got.replace('\n', ' ')) - if len(grp) > 1: # no more than one for now - return False - # fold it back to a tuple - got_again = '(' + ', '.join(grp[0]) + ')' - return self.check_output(want, got_again, optionflags) - except Exception: - return False - - # ... and defer to numpy - try: - return self._do_check(a_want, a_got) - except Exception: - # heterog tuple, eg (1, np.array([1., 2.])) - try: - return all(self._do_check(w, g) for w, g in zip(a_want, a_got)) - except (TypeError, ValueError): - return False - - def _do_check(self, want, got): - # This should be done exactly as written to correctly handle all of - # numpy-comparable objects, strings, and heterogeneous tuples - try: - if want == got: - return True - except Exception: - pass - return np.allclose(want, got, atol=self.atol, rtol=self.rtol) - - -def _run_doctests(tests, full_name, verbose, doctest_warnings): - """ - Run modified doctests for the set of `tests`. - - Parameters - ---------- - tests : list - - full_name : str - - verbose : bool - doctest_warnings : bool - - Returns - ------- - tuple(bool, list) - Tuple of (success, output) - """ - flags = NORMALIZE_WHITESPACE | ELLIPSIS - runner = DTRunner(full_name, checker=Checker(), optionflags=flags, - verbose=verbose) - - output = io.StringIO(newline='') - success = True - - # Redirect stderr to the stdout or output - tmp_stderr = sys.stdout if doctest_warnings else output - - @contextmanager - def temp_cwd(): - cwd = os.getcwd() - tmpdir = tempfile.mkdtemp() - try: - os.chdir(tmpdir) - yield tmpdir - finally: - os.chdir(cwd) - shutil.rmtree(tmpdir) - - # Run tests, trying to restore global state afterward - cwd = os.getcwd() - with np.errstate(), np.printoptions(), temp_cwd() as tmpdir, \ - redirect_stderr(tmp_stderr): - # try to ensure random seed is NOT reproducible - np.random.seed(None) - - ns = {} - for t in tests: - # We broke the tests up into chunks to try to avoid PSEUDOCODE - # This has the unfortunate side effect of restarting the global - # namespace for each test chunk, so variables will be "lost" after - # a chunk. Chain the globals to avoid this - t.globs.update(ns) - t.filename = short_path(t.filename, cwd) - # Process our options - if any([SKIPBLOCK in ex.options for ex in t.examples]): - continue - fails, successes = runner.run(t, out=output.write, clear_globs=False) - if fails > 0: - success = False - ns = t.globs - - output.seek(0) - return success, output.read() - - -def check_doctests(module, verbose, ns=None, - dots=True, doctest_warnings=False): - """ - Check code in docstrings of the module's public symbols. - - Parameters - ---------- - module : ModuleType - Name of module - verbose : bool - Should the result be verbose - ns : dict - Name space of module - dots : bool - - doctest_warnings : bool - - Returns - ------- - results : list - List of [(item_name, success_flag, output), ...] - """ - if ns is None: - ns = dict(DEFAULT_NAMESPACE) - - # Loop over non-deprecated items - results = [] - - for name in get_all_dict(module)[0]: - full_name = module.__name__ + '.' + name - - if full_name in DOCTEST_SKIPDICT: - skip_methods = DOCTEST_SKIPDICT[full_name] - if skip_methods is None: - continue - else: - skip_methods = None - - try: - obj = getattr(module, name) - except AttributeError: - import traceback - results.append((full_name, False, - "Missing item!\n" + - traceback.format_exc())) - continue - - finder = doctest.DocTestFinder() - try: - tests = finder.find(obj, name, globs=dict(ns)) - except Exception: - import traceback - results.append((full_name, False, - "Failed to get doctests!\n" + - traceback.format_exc())) - continue - - if skip_methods is not None: - tests = [i for i in tests if - i.name.partition(".")[2] not in skip_methods] - - success, output = _run_doctests(tests, full_name, verbose, - doctest_warnings) - - if dots: - output_dot('.' if success else 'F') - - results.append((full_name, success, output)) - - if HAVE_MATPLOTLIB: - import matplotlib.pyplot as plt - plt.close('all') - - return results - - -def check_doctests_testfile(fname, verbose, ns=None, - dots=True, doctest_warnings=False): - """ - Check code in a text file. - - Mimic `check_doctests` above, differing mostly in test discovery. - (which is borrowed from stdlib's doctest.testfile here, - https://github.com/python-git/python/blob/master/Lib/doctest.py) - - Parameters - ---------- - fname : str - File name - verbose : bool - - ns : dict - Name space - - dots : bool - - doctest_warnings : bool - - Returns - ------- - list - List of [(item_name, success_flag, output), ...] - - Notes - ----- - - refguide can be signalled to skip testing code by adding - ``#doctest: +SKIP`` to the end of the line. If the output varies or is - random, add ``# may vary`` or ``# random`` to the comment. for example - - >>> plt.plot(...) # doctest: +SKIP - >>> random.randint(0,10) - 5 # random - - We also try to weed out pseudocode: - * We maintain a list of exceptions which signal pseudocode, - * We split the text file into "blocks" of code separated by empty lines - and/or intervening text. - * If a block contains a marker, the whole block is then assumed to be - pseudocode. It is then not being doctested. - - The rationale is that typically, the text looks like this: - - blah - - >>> from numpy import some_module # pseudocode! - >>> func = some_module.some_function - >>> func(42) # still pseudocode - 146 - - blah - - >>> 2 + 3 # real code, doctest it - 5 - - """ - if ns is None: - ns = CHECK_NAMESPACE - results = [] - - _, short_name = os.path.split(fname) - if short_name in DOCTEST_SKIPDICT: - return results - - full_name = fname - with open(fname, encoding='utf-8') as f: - text = f.read() - - PSEUDOCODE = set(['some_function', 'some_module', 'import example', - 'ctypes.CDLL', # likely need compiling, skip it - 'integrate.nquad(func,' # ctypes integrate tutotial - ]) - - # split the text into "blocks" and try to detect and omit pseudocode blocks. - parser = doctest.DocTestParser() - good_parts = [] - base_line_no = 0 - for part in text.split('\n\n'): - try: - tests = parser.get_doctest(part, ns, fname, fname, base_line_no) - except ValueError as e: - if e.args[0].startswith('line '): - # fix line number since `parser.get_doctest` does not increment - # the reported line number by base_line_no in the error message - parts = e.args[0].split() - parts[1] = str(int(parts[1]) + base_line_no) - e.args = (' '.join(parts),) + e.args[1:] - raise - if any(word in ex.source for word in PSEUDOCODE - for ex in tests.examples): - # omit it - pass - else: - # `part` looks like a good code, let's doctest it - good_parts.append((part, base_line_no)) - base_line_no += part.count('\n') + 2 - - # Reassemble the good bits and doctest them: - tests = [] - for good_text, line_no in good_parts: - tests.append(parser.get_doctest(good_text, ns, fname, fname, line_no)) - success, output = _run_doctests(tests, full_name, verbose, - doctest_warnings) - - if dots: - output_dot('.' if success else 'F') - - results.append((full_name, success, output)) - - if HAVE_MATPLOTLIB: - import matplotlib.pyplot as plt - plt.close('all') - - return results - - -def iter_included_files(base_path, verbose=0, suffixes=('.rst',)): - """ - Generator function to walk `base_path` and its subdirectories, skipping - files or directories in RST_SKIPLIST, and yield each file with a suffix in - `suffixes` - - Parameters - ---------- - base_path : str - Base path of the directory to be processed - verbose : int - - suffixes : tuple - - Yields - ------ - path - Path of the directory and its sub directories - """ - if os.path.exists(base_path) and os.path.isfile(base_path): - yield base_path - for dir_name, subdirs, files in os.walk(base_path, topdown=True): - if dir_name in RST_SKIPLIST: - if verbose > 0: - sys.stderr.write('skipping files in %s' % dir_name) - files = [] - for p in RST_SKIPLIST: - if p in subdirs: - if verbose > 0: - sys.stderr.write('skipping %s and subdirs' % p) - subdirs.remove(p) - for f in files: - if (os.path.splitext(f)[1] in suffixes and - f not in RST_SKIPLIST): - yield os.path.join(dir_name, f) - - -def check_documentation(base_path, results, args, dots): - """ - Check examples in any *.rst located inside `base_path`. - Add the output to `results`. - - See Also - -------- - check_doctests_testfile - """ - for filename in iter_included_files(base_path, args.verbose): - if dots: - sys.stderr.write(filename + ' ') - sys.stderr.flush() - - tut_results = check_doctests_testfile( - filename, - (args.verbose >= 2), dots=dots, - doctest_warnings=args.doctest_warnings) - - # stub out a "module" which is needed when reporting the result - def scratch(): - pass - scratch.__name__ = filename - results.append((scratch, tut_results)) - if dots: - sys.stderr.write('\n') - sys.stderr.flush() - - -def init_matplotlib(): - """ - Check feasibility of matplotlib initialization. - """ - global HAVE_MATPLOTLIB - - try: - import matplotlib - matplotlib.use('Agg') - HAVE_MATPLOTLIB = True - except ImportError: - HAVE_MATPLOTLIB = False - def main(argv): """ From 552157c57f18d00ad42d6ef35e4f372ce892f4b7 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 13 Jun 2024 12:53:50 +0300 Subject: [PATCH 557/980] CI: install pandas for absolute_beginners.rst --- .github/workflows/linux.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index b3a0731d27e7..bdf693985334 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -181,7 +181,7 @@ jobs: - name: Check docstests shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' run: | - pip install scipy-doctest hypothesis matplotlib scipy pytz + pip install scipy-doctest hypothesis matplotlib scipy pytz pandas spin check-docs -v spin check-tutorials -v From 2ad1c9d4bbf5b16f237428ba71a2b8a82cb813f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Thu, 13 Jun 2024 11:58:11 +0200 Subject: [PATCH 558/980] Fix docstring --- numpy/_core/arrayprint.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index b4cab5655b63..7e8835044d3d 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -340,7 +340,7 @@ def get_printoptions(): -------- >>> np.get_printoptions() - {'edgeitems': 3, 'threshold': 1000, ..., 'legacy': False} + {'edgeitems': 3, 'threshold': 1000, ..., 'override_repr': None} >>> np.get_printoptions()['linewidth'] 75 From 42a677f91718f5ccc9e685e3eb34a8e6fe47fb99 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Thu, 13 Jun 2024 11:32:54 -0300 Subject: [PATCH 559/980] DOC: add `linalg.svdvals` example [skip actions][skip azp][skip cirrus] Completely new example --- numpy/linalg/_linalg.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index a440d70a4590..8a89c9b7e9d2 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -1886,6 +1886,22 @@ def svdvals(x, /): -------- scipy.linalg.svdvals : Compute singular values of a matrix. + Examples + -------- + + >>> np.linalg.svdvals([[1, 2, 3, 4, 5], + [1, 4, 9, 16, 25], + [1, 8, 27, 64, 125]]) + array([146.68862757, 5.57510612, 0.60393245]) + + Determine the rank of a matrix using singular values: + + >>> s = np.linalg.svdvals([[1, 2, 3], + [2, 4, 6], + [-1, 1, -1]]); s + array([8.38434191e+00, 1.64402274e+00, 2.31534378e-16]) + >>> np.count_nonzero(s > 1e-10) # Matrix of rank 2 + 2 """ return svd(x, compute_uv=False, hermitian=False) From 46f1bc0debadad7c51affd6e5b52e26af8d28692 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Thu, 13 Jun 2024 12:12:04 -0300 Subject: [PATCH 560/980] DOC: add `linalg.trace` example [skip actions][skip azp][skip cirrus] The example is an adaptation of `np.trace` examples and adds a section to the `offset` argument. --- numpy/linalg/_linalg.py | 41 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 37 insertions(+), 4 deletions(-) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 8a89c9b7e9d2..165583c60161 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -1890,18 +1890,19 @@ def svdvals(x, /): -------- >>> np.linalg.svdvals([[1, 2, 3, 4, 5], - [1, 4, 9, 16, 25], - [1, 8, 27, 64, 125]]) + ... [1, 4, 9, 16, 25], + ... [1, 8, 27, 64, 125]]) array([146.68862757, 5.57510612, 0.60393245]) Determine the rank of a matrix using singular values: >>> s = np.linalg.svdvals([[1, 2, 3], - [2, 4, 6], - [-1, 1, -1]]); s + ... [2, 4, 6], + ... [-1, 1, -1]]); s array([8.38434191e+00, 1.64402274e+00, 2.31534378e-16]) >>> np.count_nonzero(s > 1e-10) # Matrix of rank 2 2 + """ return svd(x, compute_uv=False, hermitian=False) @@ -3228,6 +3229,38 @@ def trace(x, /, *, offset=0, dtype=None): -------- numpy.trace + Examples + -------- + >>> np.linalg.trace(np.eye(3)) + 3.0 + >>> a = np.arange(8).reshape((2, 2, 2)) + >>> np.linalg.trace(a) + array([6, 8]) + + Trace is computed with the last two axes as the 2-d sub-arrays. + This behavior differs from :py:func:`numpy.trace` which uses the first two + axes by default. + + >>> a = np.arange(24).reshape((3, 2, 2, 2)) + >>> np.linalg.trace(a).shape + (3, 2) + + Traces adjacent to the main diagonal can be obtained by using the + `offset` argument: + + >>> a = np.arange(9).reshape((3, 3)); a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.linalg.trace(a, offset=1) # First superdiagonal + 12 + >>> np.linalg.trace(a, offset=2) # Second superdiagonal + 2 + >>> np.linalg.trace(a, offset=-1) # First subdiagonal + 10 + >>> np.linalg.trace(a, offset=-2) # Second subdiagonal + 6 + """ return _core_trace(x, offset, axis1=-2, axis2=-1, dtype=dtype) From 974ed52bfe769877937e040e67b2ab62c685b294 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Thu, 13 Jun 2024 13:47:07 -0300 Subject: [PATCH 561/980] DOC: add `linalg.vecdot` example [skip actions][skip azp][skip cirrus] The example is an adaptation of `np.vecdot` examples. --- numpy/linalg/_linalg.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 165583c60161..988a163cfc11 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -3253,7 +3253,7 @@ def trace(x, /, *, offset=0, dtype=None): [3, 4, 5], [6, 7, 8]]) >>> np.linalg.trace(a, offset=1) # First superdiagonal - 12 + 6 >>> np.linalg.trace(a, offset=2) # Second superdiagonal 2 >>> np.linalg.trace(a, offset=-1) # First subdiagonal @@ -3634,5 +3634,14 @@ def vecdot(x1, x2, /, *, axis=-1): -------- numpy.vecdot + Examples + -------- + Get the projected size along a given normal for an array of vectors. + + >>> v = np.array([[0., 5., 0.], [0., 0., 10.], [0., 6., 8.]]) + >>> n = np.array([0., 0.6, 0.8]) + >>> np.linalg.vecdot(v, n) + array([ 3., 8., 10.]) + """ return _core_vecdot(x1, x2, axis=axis) From 54ef78da237319c1b555c9e8e2175d007031dc7e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 Jun 2024 17:34:31 +0000 Subject: [PATCH 562/980] MAINT: Bump pypa/cibuildwheel from 2.19.0 to 2.19.1 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 2.19.0 to 2.19.1. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/a8d190a111314a07eb5116036c4b3fb26a4e3162...932529cab190fafca8c735a551657247fa8f8eaf) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/free-threaded-wheels.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/free-threaded-wheels.yml b/.github/workflows/free-threaded-wheels.yml index ec6004c5baea..231805c50eae 100644 --- a/.github/workflows/free-threaded-wheels.yml +++ b/.github/workflows/free-threaded-wheels.yml @@ -137,7 +137,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@a8d190a111314a07eb5116036c4b3fb26a4e3162 # v2.19.0 + uses: pypa/cibuildwheel@932529cab190fafca8c735a551657247fa8f8eaf # v2.19.1 env: CIBW_PRERELEASE_PYTHONS: True CIBW_FREE_THREADED_SUPPORT: True diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 2215d7fd2685..e1267e8b9fc3 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -149,7 +149,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@a8d190a111314a07eb5116036c4b3fb26a4e3162 # v2.19.0 + uses: pypa/cibuildwheel@932529cab190fafca8c735a551657247fa8f8eaf # v2.19.1 env: CIBW_PRERELEASE_PYTHONS: True CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From 4efd680001c19a97d22fd7dd3856288860f9d803 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 Jun 2024 17:34:42 +0000 Subject: [PATCH 563/980] MAINT: Bump github/codeql-action from 3.25.9 to 3.25.10 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.25.9 to 3.25.10. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/530d4feaa9c62aaab2d250371e2061eb7a172363...23acc5c183826b7a8a97bce3cecc52db901f8251) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index bfffdb21b4e4..b48108f8cbe7 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -45,7 +45,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@530d4feaa9c62aaab2d250371e2061eb7a172363 # v3.25.9 + uses: github/codeql-action/init@23acc5c183826b7a8a97bce3cecc52db901f8251 # v3.25.10 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -55,7 +55,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@530d4feaa9c62aaab2d250371e2061eb7a172363 # v3.25.9 + uses: github/codeql-action/autobuild@23acc5c183826b7a8a97bce3cecc52db901f8251 # v3.25.10 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -68,6 +68,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@530d4feaa9c62aaab2d250371e2061eb7a172363 # v3.25.9 + uses: github/codeql-action/analyze@23acc5c183826b7a8a97bce3cecc52db901f8251 # v3.25.10 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index ee55e95365c6..1a7b50a75841 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@530d4feaa9c62aaab2d250371e2061eb7a172363 # v2.1.27 + uses: github/codeql-action/upload-sarif@23acc5c183826b7a8a97bce3cecc52db901f8251 # v2.1.27 with: sarif_file: results.sarif From b01740a4d49ec52e5edc42ba07ff56f49405dae7 Mon Sep 17 00:00:00 2001 From: Luiz Eduardo Amaral Date: Thu, 13 Jun 2024 14:06:14 -0300 Subject: [PATCH 564/980] DOC: add `linalg.vector_norm` example [skip actions][skip azp][skip cirrus] The example is an adaptation of `np.vector_norm` examples. --- numpy/linalg/_linalg.py | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 988a163cfc11..cf5f9b5c0a81 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -3235,7 +3235,7 @@ def trace(x, /, *, offset=0, dtype=None): 3.0 >>> a = np.arange(8).reshape((2, 2, 2)) >>> np.linalg.trace(a) - array([6, 8]) + array([3, 11]) Trace is computed with the last two axes as the 2-d sub-arrays. This behavior differs from :py:func:`numpy.trace` which uses the first two @@ -3555,6 +3555,34 @@ def vector_norm(x, /, *, axis=None, keepdims=False, ord=2): -------- numpy.linalg.norm : Generic norm function + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.arange(9) + 1 + >>> a + array([1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> b = a.reshape((3, 3)) + >>> b + array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + >>> LA.vector_norm(b) + 16.881943016134134 + >>> LA.vector_norm(b, ord=np.inf) + 9.0 + >>> LA.vector_norm(b, ord=-np.inf) + 1.0 + + >>> LA.vector_norm(b, ord=1) + 45.0 + >>> LA.vector_norm(b, ord=-1) + 0.3534857623790153 + >>> LA.vector_norm(b, ord=2) + 16.881943016134134 + >>> LA.vector_norm(b, ord=-2) + 0.8058837395885292 + """ x = asanyarray(x) shape = list(x.shape) From df9cd071586f349055a6e9b04a12608f0b63d610 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Fri, 14 Jun 2024 14:42:28 +0300 Subject: [PATCH 565/980] BUILD: check for scipy-doctest, remove it from requirements --- .spin/cmds.py | 4 ++++ requirements/test_requirements.txt | 1 - 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.spin/cmds.py b/.spin/cmds.py index f88fa814a2af..a2e8bd0ac656 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -319,6 +319,10 @@ def check_docs(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): from the top-level `__init__.py` file. """ # noqa: E501 + try: + import scipy_doctest + except ModuleNotFoundError as e: + raise ModuleNotFoundError("scipy-doctest not installed") from e if (not pytest_args): pytest_args = ('numpy',) diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 856ecf115ef1..4e53f86d355c 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -5,7 +5,6 @@ wheel==0.38.1 setuptools hypothesis==6.81.1 pytest==7.4.0 -scipy-doctest pytz==2023.3.post1 pytest-cov==4.1.0 meson From b52814fbb3d92ff89d442ea15d396a227eef52c5 Mon Sep 17 00:00:00 2001 From: Ben Woodruff Date: Fri, 14 Jun 2024 06:16:16 -0600 Subject: [PATCH 566/980] BUG: Adds asanyarray to start of linalg.cross (#26667) Currently linalg.cross fails when given two 3D lists. This adds `asanyarray` at the start of the code, mimicing the other Array API compatible additions. This was discussed in PR #26640, with a bug fix requested. --- numpy/linalg/_linalg.py | 3 +++ numpy/linalg/tests/test_linalg.py | 8 ++++++++ 2 files changed, 11 insertions(+) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 6e3b18fef94d..689cdf52ed0b 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -3171,6 +3171,9 @@ def cross(x1, x2, /, *, axis=-1): numpy.cross """ + x1 = asanyarray(x1) + x2 = asanyarray(x2) + if x1.shape[axis] != 3 or x2.shape[axis] != 3: raise ValueError( "Both input arrays must be (arrays of) 3-dimensional vectors, " diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 02e94354399d..969934a36698 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -2307,6 +2307,14 @@ def test_cross(): assert_equal(actual, expected) + # We test that lists are converted to arrays. + u = [1, 2, 3] + v = [4, 5, 6] + actual = np.linalg.cross(u, v) + expected = array([-3, 6, -3]) + + assert_equal(actual, expected) + with assert_raises_regex( ValueError, r"input arrays must be \(arrays of\) 3-dimensional vectors" From 4424ee49176a72556234537cf9f7ab1356b10e74 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Fri, 14 Jun 2024 16:16:35 +0300 Subject: [PATCH 567/980] MNT: comment on the purpose of the import [skip azp][skip cirrus] --- .spin/cmds.py | 1 + 1 file changed, 1 insertion(+) diff --git a/.spin/cmds.py b/.spin/cmds.py index a2e8bd0ac656..d98908666a33 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -320,6 +320,7 @@ def check_docs(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): """ # noqa: E501 try: + # prevent obscure error later import scipy_doctest except ModuleNotFoundError as e: raise ModuleNotFoundError("scipy-doctest not installed") from e From 9fb721821655ddc78a27bfd05be050f988960948 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 14 Jun 2024 14:41:17 +0200 Subject: [PATCH 568/980] DOC: document workaround for deprecation of dim-2 inputs to `cross` This addresses a part of gh-26620. The one-liner was verified and is likely to be used in a future `np.linalg.cross2d` function implementation, in gh-26640. [skip actions] [skip azp] [skip cirrus] --- numpy/_core/numeric.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 82755a0eff46..6c3d880a8656 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -1565,6 +1565,12 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): Supports full broadcasting of the inputs. + Dimension-2 input arrays were deprecated in 2.0.0. If you do need this + functionality, you can use:: + + def cross2d(x, y): + return x[..., 0] * y[..., 1] - x[..., 1] * y[..., 0] + Examples -------- Vector cross-product. From 81763d58391e67e2961bae5274c302938f4076a3 Mon Sep 17 00:00:00 2001 From: Christian Lorentzen Date: Fri, 14 Jun 2024 16:37:36 +0200 Subject: [PATCH 569/980] BUG: weighted nanpercentile, nanquantile and multi-dim q (#26582) * TST add test_nan_value_with_weight * FIX weighted nanquantile * CLN appease linter * Update numpy/lib/tests/test_nanfunctions.py Co-authored-by: Olivier Grisel * Simplify code, expand old and add new ndim test with weights * BUG: Expand test and fix multi-dimensional q in the normal path... --------- Co-authored-by: Sebastian Berg Co-authored-by: Olivier Grisel Co-authored-by: Sebastian Berg --- numpy/lib/_nanfunctions_impl.py | 67 ++++++++++++++++++++++------ numpy/lib/tests/test_nanfunctions.py | 62 +++++++++++++++++++++++-- 2 files changed, 112 insertions(+), 17 deletions(-) diff --git a/numpy/lib/_nanfunctions_impl.py b/numpy/lib/_nanfunctions_impl.py index baedb7d12498..d1fa5c0747f5 100644 --- a/numpy/lib/_nanfunctions_impl.py +++ b/numpy/lib/_nanfunctions_impl.py @@ -141,7 +141,7 @@ def _copyto(a, val, mask): return a -def _remove_nan_1d(arr1d, overwrite_input=False): +def _remove_nan_1d(arr1d, second_arr1d=None, overwrite_input=False): """ Equivalent to arr1d[~arr1d.isnan()], but in a different order @@ -151,6 +151,8 @@ def _remove_nan_1d(arr1d, overwrite_input=False): ---------- arr1d : ndarray Array to remove nans from + second_arr1d : ndarray or None + A second array which will have the same positions removed as arr1d. overwrite_input : bool True if `arr1d` can be modified in place @@ -158,6 +160,8 @@ def _remove_nan_1d(arr1d, overwrite_input=False): ------- res : ndarray Array with nan elements removed + second_res : ndarray or None + Second array with nan element positions of first array removed. overwrite_input : bool True if `res` can be modified in place, given the constraint on the input @@ -172,9 +176,12 @@ def _remove_nan_1d(arr1d, overwrite_input=False): if s.size == arr1d.size: warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=6) - return arr1d[:0], True + if second_arr1d is None: + return arr1d[:0], None, True + else: + return arr1d[:0], second_arr1d[:0], True elif s.size == 0: - return arr1d, overwrite_input + return arr1d, second_arr1d, overwrite_input else: if not overwrite_input: arr1d = arr1d.copy() @@ -183,7 +190,15 @@ def _remove_nan_1d(arr1d, overwrite_input=False): # fill nans in beginning of array with non-nans of end arr1d[s[:enonan.size]] = enonan - return arr1d[:-s.size], True + if second_arr1d is None: + return arr1d[:-s.size], None, True + else: + if not overwrite_input: + second_arr1d = second_arr1d.copy() + enonan = second_arr1d[-s.size:][~c[-s.size:]] + second_arr1d[s[:enonan.size]] = enonan + + return arr1d[:-s.size], second_arr1d[:-s.size], True def _divide_by_count(a, b, out=None): @@ -1061,7 +1076,7 @@ def _nanmedian1d(arr1d, overwrite_input=False): Private function for rank 1 arrays. Compute the median ignoring NaNs. See nanmedian for parameter usage """ - arr1d_parsed, overwrite_input = _remove_nan_1d( + arr1d_parsed, _, overwrite_input = _remove_nan_1d( arr1d, overwrite_input=overwrite_input, ) @@ -1650,13 +1665,36 @@ def _nanquantile_ureduce_func( wgt = None if weights is None else weights.ravel() result = _nanquantile_1d(part, q, overwrite_input, method, weights=wgt) else: - result = np.apply_along_axis(_nanquantile_1d, axis, a, q, - overwrite_input, method, weights) - # apply_along_axis fills in collapsed axis with results. - # Move that axis to the beginning to match percentile's - # convention. - if q.ndim != 0: - result = np.moveaxis(result, axis, 0) + # Note that this code could try to fill in `out` right away + if weights is None: + result = np.apply_along_axis(_nanquantile_1d, axis, a, q, + overwrite_input, method, weights) + # apply_along_axis fills in collapsed axis with results. + # Move those axes to the beginning to match percentile's + # convention. + if q.ndim != 0: + from_ax = [axis + i for i in range(q.ndim)] + result = np.moveaxis(result, from_ax, list(range(q.ndim))) + else: + # We need to apply along axis over 2 arrays, a and weights. + # move operation axes to end for simplicity: + a = np.moveaxis(a, axis, -1) + if weights is not None: + weights = np.moveaxis(weights, axis, -1) + if out is not None: + result = out + else: + # weights are limited to `inverted_cdf` so the result dtype + # is known to be identical to that of `a` here: + result = np.empty_like(a, shape=q.shape + a.shape[:-1]) + + for ii in np.ndindex(a.shape[:-1]): + result[(...,) + ii] = _nanquantile_1d( + a[ii], q, weights=weights[ii], + overwrite_input=overwrite_input, method=method, + ) + # This path dealt with `out` already... + return result if out is not None: out[...] = result @@ -1670,8 +1708,9 @@ def _nanquantile_1d( Private function for rank 1 arrays. Compute quantile ignoring NaNs. See nanpercentile for parameter usage """ - arr1d, overwrite_input = _remove_nan_1d(arr1d, - overwrite_input=overwrite_input) + # TODO: What to do when arr1d = [1, np.nan] and weights = [0, 1]? + arr1d, weights, overwrite_input = _remove_nan_1d(arr1d, + second_arr1d=weights, overwrite_input=overwrite_input) if arr1d.size == 0: # convert to scalar return np.full(q.shape, np.nan, dtype=arr1d.dtype)[()] diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index da3ee0f2a3dc..d196b133005f 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -1144,7 +1144,8 @@ def test_complex(self): assert_raises(TypeError, np.nanpercentile, arr_c, 0.5) @pytest.mark.parametrize("weighted", [False, True]) - def test_result_values(self, weighted): + @pytest.mark.parametrize("use_out", [False, True]) + def test_result_values(self, weighted, use_out): if weighted: percentile = partial(np.percentile, method="inverted_cdf") nanpercentile = partial(np.nanpercentile, method="inverted_cdf") @@ -1160,13 +1161,16 @@ def gen_weights(d): return None tgt = [percentile(d, 28, weights=gen_weights(d)) for d in _rdat] - res = nanpercentile(_ndat, 28, axis=1, weights=gen_weights(_ndat)) + out = np.empty_like(tgt) if use_out else None + res = nanpercentile(_ndat, 28, axis=1, + weights=gen_weights(_ndat), out=out) assert_almost_equal(res, tgt) # Transpose the array to fit the output convention of numpy.percentile tgt = np.transpose([percentile(d, (28, 98), weights=gen_weights(d)) for d in _rdat]) + out = np.empty_like(tgt) if use_out else None res = nanpercentile(_ndat, (28, 98), axis=1, - weights=gen_weights(_ndat)) + weights=gen_weights(_ndat), out=out) assert_almost_equal(res, tgt) @pytest.mark.parametrize("axis", [None, 0, 1]) @@ -1242,6 +1246,58 @@ def test_multiple_percentiles(self): np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6) ) + @pytest.mark.parametrize("nan_weight", [0, 1, 2, 3, 1e200]) + def test_nan_value_with_weight(self, nan_weight): + x = [1, np.nan, 2, 3] + result = np.float64(2.0) + q_unweighted = np.nanpercentile(x, 50, method="inverted_cdf") + assert_equal(q_unweighted, result) + + # The weight value at the nan position should not matter. + w = [1.0, nan_weight, 1.0, 1.0] + q_weighted = np.nanpercentile(x, 50, weights=w, method="inverted_cdf") + assert_equal(q_weighted, result) + + @pytest.mark.parametrize("axis", [0, 1, 2]) + def test_nan_value_with_weight_ndim(self, axis): + # Create a multi-dimensional array to test + np.random.seed(1) + x_no_nan = np.random.random(size=(100, 99, 2)) + # Set some places to NaN (not particularly smart) so there is always + # some non-Nan. + x = x_no_nan.copy() + x[np.arange(99), np.arange(99), 0] = np.nan + + p = np.array([[20., 50., 30], [70, 33, 80]]) + + # We just use ones as weights, but replace it with 0 or 1e200 at the + # NaN positions below. + weights = np.ones_like(x) + + # For comparison use weighted normal percentile with nan weights at + # 0 (and no NaNs); not sure this is strictly identical but should be + # sufficiently so (if a percentile lies exactly on a 0 value). + weights[np.isnan(x)] = 0 + p_expected = np.percentile( + x_no_nan, p, axis=axis, weights=weights, method="inverted_cdf") + + p_unweighted = np.nanpercentile( + x, p, axis=axis, method="inverted_cdf") + # The normal and unweighted versions should be identical: + assert_equal(p_unweighted, p_expected) + + weights[np.isnan(x)] = 1e200 # huge value, shouldn't matter + p_weighted = np.nanpercentile( + x, p, axis=axis, weights=weights, method="inverted_cdf") + assert_equal(p_weighted, p_expected) + # Also check with out passed: + out = np.empty_like(p_weighted) + res = np.nanpercentile( + x, p, axis=axis, weights=weights, out=out, method="inverted_cdf") + + assert res is out + assert_equal(out, p_expected) + class TestNanFunctions_Quantile: # most of this is already tested by TestPercentile From 6f428f2e0cbd2b809ce9ba76a531e01d6c983b9c Mon Sep 17 00:00:00 2001 From: EricXie <161030123+EngineerEricXie@users.noreply.github.com> Date: Sat, 15 Jun 2024 01:55:49 +0800 Subject: [PATCH 570/980] BUG: Fix bug in numpy.pad() (#25963) * BUG: Fix bug in numpy.pad() and add test cases See #25926 for the bug Fix linter bug (E501 line too long) * Increase readability and clarity --- numpy/lib/_arraypad_impl.py | 18 +++++++++++++--- numpy/lib/tests/test_arraypad.py | 36 ++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 3 deletions(-) diff --git a/numpy/lib/_arraypad_impl.py b/numpy/lib/_arraypad_impl.py index 7ec52167f1c0..af6c4da4c3b7 100644 --- a/numpy/lib/_arraypad_impl.py +++ b/numpy/lib/_arraypad_impl.py @@ -293,7 +293,8 @@ def _get_stats(padded, axis, width_pair, length_pair, stat_func): return left_stat, right_stat -def _set_reflect_both(padded, axis, width_pair, method, include_edge=False): +def _set_reflect_both(padded, axis, width_pair, method, + original_period, include_edge=False): """ Pad `axis` of `arr` with reflection. @@ -308,6 +309,8 @@ def _set_reflect_both(padded, axis, width_pair, method, include_edge=False): dimension. method : str Controls method of reflection; options are 'even' or 'odd'. + original_period : int + Original length of data on `axis` of `arr`. include_edge : bool If true, edge value is included in reflection, otherwise the edge value forms the symmetric axis to the reflection. @@ -320,11 +323,20 @@ def _set_reflect_both(padded, axis, width_pair, method, include_edge=False): """ left_pad, right_pad = width_pair old_length = padded.shape[axis] - right_pad - left_pad - + if include_edge: + # Avoid wrapping with only a subset of the original area + # by ensuring period can only be a multiple of the original + # area's length. + old_length = old_length // original_period * original_period # Edge is included, we need to offset the pad amount by 1 edge_offset = 1 else: + # Avoid wrapping with only a subset of the original area + # by ensuring period can only be a multiple of the original + # area's length. + old_length = ((old_length - 1) // (original_period - 1) + * (original_period - 1) + 1) edge_offset = 0 # Edge is not included, no need to offset pad amount old_length -= 1 # but must be omitted from the chunk @@ -865,7 +877,7 @@ def pad(array, pad_width, mode='constant', **kwargs): # the length of the original values in the current dimension. left_index, right_index = _set_reflect_both( roi, axis, (left_index, right_index), - method, include_edge + method, array.shape[axis], include_edge ) elif mode == "wrap": diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py index 8723f4d9ba73..ef3319e901a0 100644 --- a/numpy/lib/tests/test_arraypad.py +++ b/numpy/lib/tests/test_arraypad.py @@ -867,6 +867,42 @@ def test_check_03(self): a = np.pad([1, 2, 3], 4, 'reflect') b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3]) assert_array_equal(a, b) + + def test_check_04(self): + a = np.pad([1, 2, 3], [1, 10], 'reflect') + b = np.array([2, 1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_05(self): + a = np.pad([1, 2, 3, 4], [45, 10], 'reflect') + b = np.array( + [4, 3, 2, 1, 2, 3, 4, 3, 2, 1, + 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, + 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, + 4, 3, 2, 1, 2, 3, 4, 3, 2, 1, + 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, + 2, 1, 2, 3, 4, 3, 2, 1, 2]) + assert_array_equal(a, b) + + def test_check_06(self): + a = np.pad([1, 2, 3, 4], [15, 2], 'symmetric') + b = np.array( + [2, 3, 4, 4, 3, 2, 1, 1, 2, 3, + 4, 4, 3, 2, 1, 1, 2, 3, 4, 4, + 3] + ) + assert_array_equal(a, b) + + def test_check_07(self): + a = np.pad([1, 2, 3, 4, 5, 6], [45, 3], 'symmetric') + b = np.array( + [4, 5, 6, 6, 5, 4, 3, 2, 1, 1, + 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, + 1, 1, 2, 3, 4, 5, 6, 6, 5, 4, + 3, 2, 1, 1, 2, 3, 4, 5, 6, 6, + 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, + 6, 6, 5, 4]) + assert_array_equal(a, b) class TestEmptyArray: From 3e78d4f911d88e99a07b68636576ea3072d9ed94 Mon Sep 17 00:00:00 2001 From: bmwoodruff Date: Sat, 15 Jun 2024 17:46:17 -0500 Subject: [PATCH 571/980] DOC: Added missing See Also sections This adds some missing "See also" sections to a few fucnctions in the polynomial module. This gives a consistent look for the add, sub, mulx, mul, div, pow versions of all 6 types. [skip actions] [skip azp] [skip cirrus] --- numpy/polynomial/chebyshev.py | 4 ++++ numpy/polynomial/hermite_e.py | 4 ++++ numpy/polynomial/legendre.py | 2 +- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index 20ee10c9980d..e7ac1404d343 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -670,6 +670,10 @@ def chebmulx(c): out : ndarray Array representing the result of the multiplication. + See Also + -------- + chebadd, chebsub, chebmul, chebdiv, chebpow + Notes ----- diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index 0aaf2a78c768..e7fe1233cd14 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -408,6 +408,10 @@ def hermemulx(c): out : ndarray Array representing the result of the multiplication. + See Also + -------- + hermeadd, hermesub, hermemul, hermediv, hermepow + Notes ----- The multiplication uses the recursion relationship for Hermite diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index cfbf1486d486..ded9e7821891 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -426,7 +426,7 @@ def legmulx(c): See Also -------- - legadd, legmul, legdiv, legpow + legadd, legsub, legmul, legdiv, legpow Notes ----- From 03c86db672147dea12bc935e0130fb3e84746666 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 15 Jun 2024 23:36:55 +0000 Subject: [PATCH 572/980] MAINT,BUG: Correctly skip distutils options --- numpy/f2py/f2py2e.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index f5fab23ab867..c63111090a9f 100755 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -635,10 +635,13 @@ def run_compile(): r'--((f(90)?compiler(-exec|)|compiler)=|help-compiler)') flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)] sys.argv = [_m for _m in sys.argv if _m not in flib_flags] - _reg4 = re.compile( - r'--((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') - fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in fc_flags] + reg_f77_f90_flags = re.compile(r'--f(77|90)flags=') + reg_distutils_flags = re.compile(r'--((f(77|90)exec|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') + fc_flags = [_m for _m in sys.argv[1:] if reg_f77_f90_flags.match(_m)] + distutils_flags = [_m for _m in sys.argv[1:] if reg_distutils_flags.match(_m)] + if not (MESON_ONLY_VER or backend_key == 'meson'): + fc_flags.extend(distutils_flags) + sys.argv = [_m for _m in sys.argv if _m not in (fc_flags + distutils_flags)] del_list = [] for s in flib_flags: From 08a80a5d4be4fe94b7285906e1748950981dd699 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sun, 16 Jun 2024 00:14:54 +0000 Subject: [PATCH 573/980] BUG: Use fortran args from f2py in meson --- numpy/f2py/_backends/_meson.py | 31 ++++++++++++++++++++++- numpy/f2py/_backends/meson.build.template | 1 + 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/numpy/f2py/_backends/_meson.py b/numpy/f2py/_backends/_meson.py index c396733f2f44..17b0ff008ca0 100644 --- a/numpy/f2py/_backends/_meson.py +++ b/numpy/f2py/_backends/_meson.py @@ -28,7 +28,7 @@ def __init__( include_dirs: list[Path], object_files: list[Path], linker_args: list[str], - c_args: list[str], + fortran_args: list[str], build_type: str, python_exe: str, ): @@ -46,12 +46,18 @@ def __init__( self.include_dirs = [] self.substitutions = {} self.objects = object_files + # Convert args to '' wrapped variant for meson + self.fortran_args = [ + f"'{x}'" if not (x.startswith("'") and x.endswith("'")) else x + for x in fortran_args + ] self.pipeline = [ self.initialize_template, self.sources_substitution, self.deps_substitution, self.include_substitution, self.libraries_substitution, + self.fortran_args_substitution, ] self.build_type = build_type self.python_exe = python_exe @@ -109,6 +115,14 @@ def include_substitution(self) -> None: [f"{self.indent}'''{inc}'''," for inc in self.include_dirs] ) + def fortran_args_substitution(self) -> None: + if self.fortran_args: + self.substitutions["fortran_args"] = ( + f"{self.indent}fortran_args: [{', '.join([arg for arg in self.fortran_args])}]," + ) + else: + self.substitutions["fortran_args"] = "" + def generate_meson_build(self): for node in self.pipeline: node() @@ -126,6 +140,7 @@ def __init__(self, *args, **kwargs): self.build_type = ( "debug" if any("debug" in flag for flag in self.fc_flags) else "release" ) + self.fc_flags = _get_flags(self.fc_flags) def _move_exec_to_root(self, build_dir: Path): walk_dir = Path(build_dir) / self.meson_build_dir @@ -203,3 +218,17 @@ def _prepare_sources(mname, sources, bdir): if not Path(source).suffix == ".pyf" ] return extended_sources + + +def _get_flags(fc_flags): + flag_values = [] + flag_pattern = re.compile(r"--f(77|90)flags=(.*)") + + for flag in fc_flags: + match_result = flag_pattern.match(flag) + if match_result: + values = match_result.group(2).strip().split() + flag_values.extend(values) + # Hacky way to preserve order of flags + unique_flags = list(dict.fromkeys(flag_values)) + return unique_flags diff --git a/numpy/f2py/_backends/meson.build.template b/numpy/f2py/_backends/meson.build.template index 092b1112c262..fdcc1b17ce21 100644 --- a/numpy/f2py/_backends/meson.build.template +++ b/numpy/f2py/_backends/meson.build.template @@ -51,4 +51,5 @@ ${dep_list} ${lib_list} ${lib_dir_list} ], +${fortran_args} install : true) From d982cfa68d2fc8ffb510fc721401c1713929e587 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sun, 16 Jun 2024 03:44:17 +0000 Subject: [PATCH 574/980] TST: Add one for passing arguments to f2py Co-authored-by: warrickball --- .../f2py/tests/src/regression/f77fixedform.f95 | 5 +++++ numpy/f2py/tests/test_regression.py | 18 ++++++++++++++++++ numpy/f2py/tests/util.py | 2 +- 3 files changed, 24 insertions(+), 1 deletion(-) create mode 100644 numpy/f2py/tests/src/regression/f77fixedform.f95 diff --git a/numpy/f2py/tests/src/regression/f77fixedform.f95 b/numpy/f2py/tests/src/regression/f77fixedform.f95 new file mode 100644 index 000000000000..e47a13f7e851 --- /dev/null +++ b/numpy/f2py/tests/src/regression/f77fixedform.f95 @@ -0,0 +1,5 @@ +C This is an invalid file, but it does compile with -ffixed-form + subroutine mwe( + & x) + real x + end subroutine mwe diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index 5d967ba73353..e584dc12fc1e 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -109,6 +109,7 @@ def test_gh26148b(self): assert(res[0] == 8) assert(res[1] == 15) +@pytest.mark.slow def test_gh26623(): # Including libraries with . should not generate an incorrect meson.build try: @@ -119,3 +120,20 @@ def test_gh26623(): ) except RuntimeError as rerr: assert "lparen got assign" not in str(rerr) + + +@pytest.mark.slow +def test_gh25784(): + # Compile dubious file using passed flags + try: + aa = util.build_module( + [util.getpath("tests", "src", "regression", "f77fixedform.f95")], + options=[ + # Meson will collect and dedup these to pass to fortran_args: + "--f77flags='-ffixed-form'", + "--f90flags='-ffixed-form'", + ], + module_name="Blah", + ) + except ImportError as rerr: + assert "unknown_subroutine_" in str(rerr) diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index faedd4cc1597..c2258791e6d9 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -121,7 +121,7 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None): dst_sources.append(dst) base, ext = os.path.splitext(dst) - if ext in (".f90", ".f", ".c", ".pyf"): + if ext in (".f90", ".f95", ".f", ".c", ".pyf"): f2py_sources.append(dst) assert f2py_sources From d5ce926375c882af0b0dba8979a1d2a3b724a2d1 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sun, 16 Jun 2024 04:06:16 +0000 Subject: [PATCH 575/980] MAINT: Be more robust wrt f2py flags --- numpy/f2py/_backends/_meson.py | 2 +- numpy/f2py/f2py2e.py | 1 + numpy/f2py/tests/test_regression.py | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/f2py/_backends/_meson.py b/numpy/f2py/_backends/_meson.py index 17b0ff008ca0..b438ed223433 100644 --- a/numpy/f2py/_backends/_meson.py +++ b/numpy/f2py/_backends/_meson.py @@ -223,11 +223,11 @@ def _prepare_sources(mname, sources, bdir): def _get_flags(fc_flags): flag_values = [] flag_pattern = re.compile(r"--f(77|90)flags=(.*)") - for flag in fc_flags: match_result = flag_pattern.match(flag) if match_result: values = match_result.group(2).strip().split() + values = [val.strip("'\"") for val in values] flag_values.extend(values) # Hacky way to preserve order of flags unique_flags = list(dict.fromkeys(flag_values)) diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index c63111090a9f..c6eac78b71f4 100755 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -635,6 +635,7 @@ def run_compile(): r'--((f(90)?compiler(-exec|)|compiler)=|help-compiler)') flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)] sys.argv = [_m for _m in sys.argv if _m not in flib_flags] + # TODO: Once distutils is dropped completely, i.e. min_ver >= 3.12, unify into --fflags reg_f77_f90_flags = re.compile(r'--f(77|90)flags=') reg_distutils_flags = re.compile(r'--((f(77|90)exec|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') fc_flags = [_m for _m in sys.argv[1:] if reg_f77_f90_flags.match(_m)] diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index e584dc12fc1e..58d6037e61ef 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -130,8 +130,8 @@ def test_gh25784(): [util.getpath("tests", "src", "regression", "f77fixedform.f95")], options=[ # Meson will collect and dedup these to pass to fortran_args: - "--f77flags='-ffixed-form'", - "--f90flags='-ffixed-form'", + "--f77flags='-ffixed-form -O2'", + "--f90flags=\"-ffixed-form -Og\"", ], module_name="Blah", ) From 2a5f2785ed7d4359a841d86e2354b260d4f88a73 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 16 Jun 2024 10:49:14 +0200 Subject: [PATCH 576/980] DOC: address all comments, add sections for limited C API and header-only support [skip actions] [skip azp] [skip cirrus] --- doc/neps/roadmap.rst | 46 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 36 insertions(+), 10 deletions(-) diff --git a/doc/neps/roadmap.rst b/doc/neps/roadmap.rst index 7ea322c51182..12614673b60d 100644 --- a/doc/neps/roadmap.rst +++ b/doc/neps/roadmap.rst @@ -49,7 +49,8 @@ towards support for newer SIMD instruction sets, like SVE on arm64, is ongoing. Other performance improvement ideas include: -- A better story around parallel execution. +- A better story around parallel execution (related is support for free-threaded + CPython, see further down). - Optimizations in individual functions. Furthermore we would like to improve the benchmarking system, in terms of coverage, @@ -84,8 +85,8 @@ Extensibility We aim to continue making it easier to extend NumPy. The primary topic here is to improve the dtype system - see for example :ref:`NEP41` and related NEPs linked from it. In NumPy 2.0, a `new C API for user-defined dtypes `__ -was made public. We aim -to encourage its usage and improve this API further. +was made public. We aim to encourage its usage and improve this API further, +including support for writing a dtype in Python. Ideas for new dtypes that may be developed outside of the main NumPy repository first, and that could potentially be upstreamed into NumPy later, include: @@ -121,6 +122,8 @@ We intend to write a NEP covering the support levels we provide and what is required for a platform to move to a higher tier of support, similar to `PEP 11 `__. +Support for free-threaded CPython +````````````````````````````````` CPython 3.13 will be the first release to offer a free-threaded build (i.e., a CPython build with the GIL disabled). Work is in progress to support this well in NumPy. After that is stable and complete, there may be opportunities to @@ -131,14 +134,37 @@ Binary size reduction ````````````````````` The number of downloads of NumPy from PyPI and other platforms continues to increase - as of May 2024 we're at >200 million downloads/month from PyPI -alone). Reducing the size of an installed NumPy package has many benefits: +alone. Reducing the size of an installed NumPy package has many benefits: faster installs, lower disk space usage, smaller load on PyPI, less -environmental impact, easier to fit more packages on top of NumPy into an AWS -Lambda layer, lower latency for Pyodide users, and so on. We aim for -significant reductions, as well as making it easier for end users and packagers -to produce smaller custom builds (e.g., we added support for stripping tests -before 2.1.0). See `gh-25737 `__ -for details. +environmental impact, easier to fit more packages on top of NumPy in +resource-constrained environments and platforms like AWS Lambda, lower latency +for Pyodide users, and so on. We aim for significant reductions, as well as +making it easier for end users and packagers to produce smaller custom builds +(e.g., we added support for stripping tests before 2.1.0). See +`gh-25737 `__ for details. + +Support use of CPython's limited C API +`````````````````````````````````````` +Use of the CPython limited C API, allowing producing ``abi3`` wheels that use +the stable ABI and are hence independent of CPython feature releases, has +benefits for both downstream packages that use NumPy's C API and for NumPy +itself. In NumPy 2.0, work was done to enable using the limited C API with +the Cython support in NumPy (see `gh-25531 Date: Sun, 16 Jun 2024 09:40:06 +0000 Subject: [PATCH 577/980] TST: Skip an f2py module test on Windows --- numpy/f2py/tests/test_regression.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index 58d6037e61ef..e11ed1a0efa3 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -1,5 +1,6 @@ import os import pytest +import platform import numpy as np import numpy.testing as npt @@ -123,6 +124,7 @@ def test_gh26623(): @pytest.mark.slow +@pytest.mark.skipif(platform.system() not in ['Linux', 'Darwin'], reason='Unsupported on this platform for now') def test_gh25784(): # Compile dubious file using passed flags try: From 4b6111b3f0bd0ac7ad2312d5abfc790ed6dff949 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 16 Jun 2024 16:17:05 -0600 Subject: [PATCH 578/980] MAINT: Update main after 2.0.0 release. --- .mailmap | 70 +- doc/changelog/2.0.0-changelog.rst | 1304 ++++++++++++++++++++++++++++ doc/source/release/2.0.0-notes.rst | 256 +++--- 3 files changed, 1504 insertions(+), 126 deletions(-) create mode 100644 doc/changelog/2.0.0-changelog.rst diff --git a/.mailmap b/.mailmap index 2d910fe98fea..a3b1a3a5856f 100644 --- a/.mailmap +++ b/.mailmap @@ -7,8 +7,8 @@ # # This file is up-to-date if the command git log --format="%aN <%aE>" | sort -u # gives no duplicates. - @8bitmp3 <19637339+8bitmp3@users.noreply.github.com> +@Algorithmist-Girl <36552319+Algorithmist-Girl@users.noreply.github.com> @DWesl <22566757+DWesl@users.noreply.github.com> @Endolith @GalaxySnail @@ -18,33 +18,49 @@ @Lisa <34400837+lyzlisa@users.noreply.github.com> @Patrick <39380924+xamm@users.noreply.github.com> @Scian <65375075+hoony6134@users.noreply.github.com> +@Searchingdays @amagicmuffin <2014wcheng@gmail.com> @code-review-doctor +@cook-1229 <70235336+cook-1229@users.noreply.github.com> @dg3192 <113710955+dg3192@users.noreply.github.com> +@ellaella12 +@ellaella12 <120079323+ellaella12@users.noreply.github.com> @h-vetinari @h6197627 <44726212+h6197627@users.noreply.github.com> @jbCodeHub @juztamau5 @legoffant <58195095+legoffant@users.noreply.github.com> +@liang3zy22 <35164941+liang3zy22@users.noreply.github.com> @luzpaz @luzpaz +@matoro +@mcp292 +@mgunyho <20118130+mgunyho@users.noreply.github.com> +@msavinash <73682349+msavinash@users.noreply.github.com> +@mykykh <49101849+mykykh@users.noreply.github.com> @partev @pkubaj @pmvz +@pojaghi <36278217+pojaghi@users.noreply.github.com> @pratiklp00 @sfolje0 @spacescientist +@stefan6419846 +@stefan6419846 <96178532+stefan6419846@users.noreply.github.com> @tajbinjohn @tautaus +@undermyumbrella1 @xoviat <49173759+xoviat@users.noreply.github.com> @xoviat <49173759+xoviat@users.noreply.github.com> @yan-wyb @yetanothercheer Aaron Baecker +Adrin Jalali Arun Kota Arun Kota Arun Kota Aarthi Agurusa Adarsh Singh ADARSH SINGH +Aditi Saluja <136131452+salujaditi14@users.noreply.github.com> Andrei Batomunkuev Ajay DS Ajay DS @@ -89,6 +105,8 @@ Andrea Bianchi Andrea Bianchi andrea-bia Ankit Dwivedi Ankit Dwivedi +Ankur Singh +Ankur Singh <98346896+ankur0904@users.noreply.github.com> Amir Sarabadani Anas Khan Anatoly Techtonik @@ -126,6 +144,7 @@ Bhargav V <12525622+brpy@users.noreply.github.com> Bas van Beek <43369155+BvB93@users.noreply.github.com> Behzad Nouri Ben Nathanson +Ben Woodruff Benjamin Root Benjamin Root weathergod Bernardt Duvenhage @@ -173,6 +192,7 @@ Chun-Wei Chen Chunlin Fang Chunlin Fang <834352945@qq.com> Chunlin Fang +Cobalt Yang Colin Snyder <8csnyder@gmail.com> <47012605+colinsnyder@users.noreply.github.com> Constanza Fierro Dahyun Kim @@ -205,24 +225,30 @@ Derek Homeier Derek Homeier Derrick Williams Devin Shanahan +Daval Parmar <53395856+DhavalParmar61@users.noreply.github.com> Digya Acharya Dima Pasechnik Dima Pasechnik Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Ding Liu Ding Liu +D.J. Ramones +D.J. Ramones <50655786+djramones@users.noreply.github.com> Dmitriy Shalyga Dmitry Belov Dustan Levenstein <43019642+dustanlevenstein@users.noreply.github.com> Dylan Cutler Ed Schofield Egor Zindy +Élie Goudout +Élie Goudout <114467748+eliegoudout@users.noreply.github.com> Elliott M. Forney Erik M. Bray Erik M. Bray Erik M. Bray Eric Fode Eric Fode Eric Quintero +Eric Xie <161030123+EngineerEricXie@users.noreply.github.com> Ernest N. Mamikonyan Eskild Eriksen Eskild Eriksen <42120229+iameskild@users.noreply.github.com> @@ -277,13 +303,18 @@ Gerhard Hobler Giannis Zapantis Guillaume Peillex Jack J. Woehr +Jacob M. Casey Jaime Fernandez Jaime Fernandez Jaime Fernandez Jake Close +Jake VanderPlas +Jake VanderPlas +Jake VanderPlas Jakob Jakobson Jakob Jakobson <43045863+jakobjakobson13@users.noreply.github.com> James Bourbeau +James Oliver <46758370+jamesoliverh@users.noreply.github.com> James Webber Jamie Macey Jan Schlüter @@ -351,6 +382,8 @@ Khaled Ben Abdallah Okuda Kiko Correoso kikocorreoso Kiko Correoso kikocorreoso Konrad Kapp +Kristoffer Pedersen +Kristoffer Pedersen Kriti Singh Kmol Yuan Kumud Lakara <55556183+kumudlakara@users.noreply.github.com> @@ -366,14 +399,18 @@ Licht Takeuchi Lorenzo Mammana Lillian Zha Lillian Zha +Linus Sommer +Linus Sommer <95619282+linus-md@users.noreply.github.com> Lu Yun Chi <32014765+LuYunChi@users.noreply.github.com> Luis Pedro Coelho +Lucas Colley Luke Zoltan Kelley Madhulika Jain Chambers <53166646+madhulikajc@users.noreply.github.com> Magdalena Proszewska Magdalena Proszewska <38814059+mproszewska@users.noreply.github.com> Malik Idrees Hasan Khan <77000356+MalikIdreesHasanKhan@users.noreply.github.com>C Manoj Kumar +Marcel Loose Marcin Podhajski <36967358+m-podhajski@users.noreply.github.com> Margret Pax Margret Pax <13646646+paxcodes@users.noreply.github.com> @@ -386,6 +423,8 @@ Mark Wiebe Mark Wiebe Mars Lee Mars Lee <46167686+MarsBarLee@users.noreply.github.com> +Marten van Kerkwijk +Marten van Kerkwijk Martin Goodson Martin Reinecke Martin Teichmann @@ -395,18 +434,24 @@ Matheus Vieira Portela Matheus Santana Patriarca Mathieu Lamarre Matías Ríos +Matt Hancock Matt Ord Matt Ord <55235095+Matt-Ord@users.noreply.github.com> -Matt Hancock +Matt Thompson +Matthias Bussonnier Martino Sorbaro Márton Gunyhó Mattheus Ueckermann Matthew Barber Matthew Harrigan Matthias Bussonnier +Matthias Schaufelberger +Matthias Schaufelberger <45293673+maisevector@users.noreply.github.com> Matthieu Darbois Matti Picus Matti Picus mattip +Maya Anderson +Maya Anderson <63074550+andersonm-ibm@users.noreply.github.com> Maximilian Konrad Melissa Weber Mendonça Melissa Weber Mendonça @@ -430,7 +475,9 @@ Miles Cranmer Mircea Akos Bruma Mircea Akos Bruma Mitchell Faas <35742861+Mitchell-Faas@users.noreply.github.com> +Mohaned Qunaibit Muhammad Kasim +Muhammed Muhsin Mukulika Pahari Mukulika Pahari <60316606+Mukulikaa@users.noreply.github.com> Munira Alduraibi @@ -445,24 +492,30 @@ Nicolas Scheffer Nicolas Scheffer nickdg Nicholas McKibben Nick Minkyu Lee fivemok <9394929+fivemok@users.noreply.github.com> +Nyakku Shigure Norwid Behrnd Norwid Behrnd -Oliver Eberle Oleksiy Kononenko Oleksiy Kononenko <35204136+oleksiyskononenko@users.noreply.github.com> +Oliver Eberle +Olivier Barthelemy +Olivier Mattelaer Omar Ali Omid Rajaei Omid Rajaei <89868505+rajaeinet@users.noreply.github.com> Ondřej Čertík Óscar Villellas Guillén +Pablo Losada +Pablo Losada <48804010+TheHawz@users.noreply.github.com> Panos Mavrogiorgos Pantelis Antonoudiou Pantelis Antonoudiou Pat Miller patmiller Paul Ivanov Paul Ivanov -Paul YS Lee Paul Paul Jacobson +Paul Reece +Paul YS Lee Paul Pey Lian Lim Pey Lian Lim <2090236+pllim@users.noreply.github.com> Pearu Peterson @@ -488,6 +541,7 @@ Rakesh Vasudevan Ralf Gommers Ralf Gommers rgommers Rehas Sachdeva +Richard Howe <45905457+rmhowe425@users.noreply.github.com> Ritta Narita Riya Sharma Robert Kern @@ -514,6 +568,7 @@ Sam Radhakrishnan = <=> # committed without an email address Samesh Lakhotia Samesh Lakhotia <43701530+sameshl@users.noreply.github.com> Sami Salonen +Samuel Albanie Sanchez Gonzalez Alvaro Sanya Sinha <83265366+ssanya942@users.noreply.github.com> Saransh Chopra @@ -521,6 +576,8 @@ Saullo Giovani Saurabh Mehta Sayantika Banik Schrijvers Luc +Sean Cheah +Sean Cheah <67928790+thalassemia@users.noreply.github.com> Sebastian Berg Sebastian Schleehauf Serge Guelton @@ -594,9 +651,12 @@ William Spotz Wim Glenn Wojtek Ruszczewski Wojciech Rzadkowski <33913808+wrzadkow@users.noreply.github.com> +Xiangyi Wang Yamada Fuyuka Yang Hau Yang Hau +Yash Pethe +Yash Pethe <83630710+patient74@users.noreply.github.com> Yashasvi Misra Yashasvi Misra <54177363+yashasvimisra2798@users.noreply.github.com> Yogesh Raisinghani <46864533+raisinghanii@users.noreply.github.com> @@ -607,6 +667,7 @@ Yuji Kanagawa Yuki K Yury Kirienko Zac Hatfield-Dodds +Zach Brugh <111941670+zachbrugh@users.noreply.github.com> Zé Vinícius Zhang Na Zixu Zhao @@ -614,4 +675,5 @@ Ziyan Zhou Zieji Pohz Zieji Pohz <8103276+zjpoh@users.noreply.github.com> Zolboo Erdenebaatar +Zolisa Bleki Zolisa Bleki <44142765+zoj613@users.noreply.github.com> diff --git a/doc/changelog/2.0.0-changelog.rst b/doc/changelog/2.0.0-changelog.rst new file mode 100644 index 000000000000..78e250f508d9 --- /dev/null +++ b/doc/changelog/2.0.0-changelog.rst @@ -0,0 +1,1304 @@ + +Contributors +============ + +A total of 212 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* @Algorithmist-Girl + +* @DWesl +* @Illviljan +* @Searchingdays +* @ellaella12 + +* @liang3zy22 + +* @matoro + +* @mcp292 + +* @mgunyho + +* @msavinash + +* @mykykh + +* @pojaghi + +* @pratiklp00 + +* @stefan6419846 + +* @undermyumbrella1 + +* Aaron Meurer +* Aditi Saluja + +* Adrin Jalali + +* Agriya Khetarpal + +* Albert Steppi + +* Alex Cabrera + +* Alexander Grund +* Andrea Bianchi + +* Andreas Florath + +* Andrew Ardill + +* Andrew Ho + +* Andrew Nelson +* Andrey Rybakov + +* Ankur Singh + +* Anton Prosekin + +* Antony Lee +* Arun Kannawadi + +* Bas van Beek +* Ben Woodruff + +* Bharat Raghunathan +* Bhavya Alekhya + +* Brandon Smith + +* Brian Walshe + +* Brigitta Sipőcz +* Brock Mendel +* Carl Meyer + +* Charles Bousseau + +* Charles Harris +* Chris Sidebottom +* Christian Lorentzen +* Christian Veenhuis +* Christoph Reiter +* Christopher Sidebottom +* Clément Robert +* Cédric Hannotier +* Cobalt Yang + +* Gonçalo Bárias + +* D.J. Ramones + +* DanShatford + +* Daniel Li + +* Daniel Vanzo +* Daval Parmar +* Developer-Ecosystem-Engineering +* Dhruv Rawat + +* Dimitri Papadopoulos Orfanos +* Edward E +* Edward Yang + +* Eisuke Kawashima + +* Eliah Kagan + +* Élie Goudout + +* Elliott Sales de Andrade +* Emil Olszewski + +* Emily Hunt + +* Éric Piel + +* Eric Wieser +* Eric Xie + +* Even Rouault + +* Evgeni Burovski +* Filipe Laíns + +* Francisco Sousa + +* Ganesh Kathiresan +* Gonçalo Bárias + +* Gonzalo Tornaría + +* Hans Meine +* Heberto Mayorquin + +* Heinz-Alexander Fuetterer + +* Hood Chatham +* Hugo van Kemenade +* Ivan A. Melnikov + +* Jacob M. Casey + +* Jake Lishman + +* Jake VanderPlas +* James Oliver + +* Jan Wassenberg + +* Janukan Sivajeyan + +* Johann Rohwer + +* Johannes Kaisinger + +* John Muradeli + +* Joris Van den Bossche +* Justus Magin +* Jyn Spring 琴春 +* Kai Striega +* Kevin Sheppard +* Kevin Wu + +* Khawaja Junaid + +* Kit Lee + +* Kristian Minchev + +* Kristoffer Pedersen + +* Kuan-Wei Chiu + +* Lane Votapka + +* Larry Bradley +* Leo Singer +* Liang Yan + +* Linus Sommer + +* Logan Thomas +* Lucas Colley + +* Luiz Eduardo Amaral + +* Lukas Geiger +* Lysandros Nikolaou + +* Maanas Arora + +* Maharshi Basu + +* Mahder Gebremedhin + +* Marcel Bargull + +* Marcel Loose + +* Mark Mentovai + +* Mark Ryan + +* Marten van Kerkwijk +* Mateusz Sokół +* Matt Haberland +* Matt Thompson + +* Matthew Barber +* Matthew Thompson + +* Matthias Bussonnier +* Matthias Koeppe +* Matthias Schaufelberger + +* Matti Picus +* Maxwell Aladago +* Maya Anderson + +* Melissa Weber Mendonça +* Meng Xiangzhuo + +* Michael Kiffer +* Miki Watanabe (渡邉 美希) +* Milan Curcic + +* Miles Cranmer +* Miro Hrončok + +* Mohamed E. BRIKI + +* Mohaned Qunaibit + +* Mohit Kumar + +* Muhammed Muhsin + +* Mukulika Pahari +* Munira Alduraibi + +* Namami Shanker +* Nathan Goldbaum +* Nyakku Shigure + +* Ola x Nilsson + +* Olivier Mattelaer + +* Olivier Grisel +* Omid Rajaei +* Pablo Losada + +* Pamphile Roy +* Paul Reece + +* Pedro Kaj Kjellerup Nacht + +* Peiyuan Liu + +* Peter Hawkins +* Pierre +* Pieter Eendebak +* Quentin Barthélemy + +* Raghuveer Devulapalli +* Ralf Gommers +* Randy Eckenrode + +* Raquel Braunschweig + +* Richard Howe + +* Robert Kern +* Rohit Goswami +* Romain Geissler + +* Ronald van Elburg + +* Ross Barnowski +* Sam James + +* Sam Van Kooten + +* Samuel Albanie + +* Sarah Wang + +* Sarah Zwiep + +* Sarah-Yifei-Wang + +* Sarthak Dawar + +* Sayantika Banik +* Sayed Adel +* Sean Cheah + +* Sebastian Berg +* Serge Guelton +* Shalini Roy + +* Shen Zhou +* Shubhal Gupta + +* Stefan van der Walt +* Stefano Rivera + +* Takumasa N. + +* Taras Tsugrii +* Thomas A Caswell +* Thomas Grainger + +* Thomas Li +* Tim Hoffmann +* Tim Paine + +* Timo Röhling + +* Trey Woodlief + +* Tyler Reddy +* Victor Tang + +* Vladimir Fokow + +* Warren Weckesser +* Warrick Ball + +* Will Ayd +* William Andrea + +* William Ayd + +* Xiangyi Wang + +* Yash Pethe + +* Yuki K +* Zach Brugh + +* Zach Rottman + +* Zolisa Bleki + +Pull requests merged +==================== + +A total of 1078 pull requests were merged for this release. + +* `#15457 `__: BUG: Adds support for array parameter declaration in fortran... +* `#21199 `__: ENH: expose datetime.c functions to cython +* `#21429 `__: ENH: Added ``bitwise_count`` UFuncs +* `#21760 `__: MAINT: Make output of Polynomial representations consistent +* `#21975 `__: ENH: Add binding for random pyx files +* `#22449 `__: ENH: Update scalar representations as per NEP 51 +* `#22657 `__: BUG: Fix common block handling in f2py +* `#23096 `__: BLD, SIMD: The meson CPU dispatcher implementation +* `#23282 `__: BUG: Fix data stmt handling for complex values in f2py +* `#23347 `__: DOC: changed formula in random.Generator.pareto doc #22701 +* `#23351 `__: ENH: Use AVX512-FP16 SVML content for float16 umath functions +* `#23508 `__: DOC: Update scalar types in ``Py{TYPE}ArrType_Type`` +* `#23537 `__: NEP: add NEP on a Python API cleanup for NumPy 2.0 +* `#23611 `__: DOC: Make input/output type consistent and add more examples... +* `#23729 `__: ENH: allow int sequences as shape arguments in numpy.memmap +* `#23762 `__: API: Add .mT attribute for arrays +* `#23764 `__: CI,TYP: Bump mypy to 1.4.1 +* `#23780 `__: BUG: Create complex scalars from real and imaginary parts +* `#23785 `__: DOC: tweak NEP 50 examples +* `#23787 `__: DOC: Add brief note about custom converters to genfromtext. +* `#23789 `__: ENH: add copy parameter for api.reshape function +* `#23795 `__: Use tuple instead of string for (LOWER|UPPER)_TABLEs. +* `#23804 `__: REL: Prepare main for NumPy 2.0.0 development +* `#23809 `__: MAINT: removing the deprecated submodule +* `#23810 `__: MAINT: Bump github/codeql-action from 2.3.3 to 2.3.4 +* `#23813 `__: DOC: Clean up errstate handling in our tests +* `#23814 `__: DOC: switching to use the plot directive +* `#23817 `__: MAINT: Bump github/codeql-action from 2.3.4 to 2.3.5 +* `#23819 `__: BUG: Doctest doesn't have a SHOW_WARNINGS directive. +* `#23822 `__: DOC: Added ``pathlib.Path`` where applicable +* `#23825 `__: BLD: use cython3 for one CI run +* `#23826 `__: MAINT: io.open → open +* `#23828 `__: MAINT: fix typos found by codespell +* `#23830 `__: API: deprecate compat and selected lib utils +* `#23831 `__: DOC: use float64 instead of float128 in docstring +* `#23832 `__: REL: Prepare for the NumPy 1.25.0rc1 release +* `#23834 `__: MAINT: IOError → OSError +* `#23835 `__: MAINT: Update versioneer: 0.26 → 0.28 +* `#23836 `__: DOC: update distutils migration guide +* `#23838 `__: BLD: switch to meson-python as the default build backend +* `#23840 `__: REL: Prepare for the NumPy 1.25.0rc1 release +* `#23841 `__: MAINT: Bump pypa/cibuildwheel from 2.12.3 to 2.13.0 +* `#23843 `__: MAINT: Update download-wheels +* `#23845 `__: MAINT: Do not call PyArray_Item_XDECREF in PyArray_Pack +* `#23846 `__: TST: Add tests for np.argsort +* `#23847 `__: MAINT: const correctness for the generalized ufunc C API +* `#23850 `__: MAINT: Bump actions/dependency-review-action from 3.0.4 to 3.0.6 +* `#23851 `__: CI: Update cirrus nightly wheel upload token +* `#23852 `__: CI: Change "weekly" to "nightly" in cirrus +* `#23854 `__: DOC:removed examples which refers to a non existent function +* `#23855 `__: BUG: make use of locals() in a comprehension fully compatible... +* `#23856 `__: CI: bump nightly upload frequency to twice a week +* `#23857 `__: BUG: fix cron syntax +* `#23859 `__: DOC: Note that f2py isn't consiered safe +* `#23861 `__: MAINT: Remove all "NumPy 2" as that should be main now +* `#23865 `__: MAINT: Bump github/codeql-action from 2.3.5 to 2.3.6 +* `#23868 `__: DOC: Fix ``NPY_OUT_ARRAY`` to ``NPY_ARRAY_OUT_ARRAY`` in how-to-extend... +* `#23871 `__: NEP: Fix NEP 53 file format and minor formatting issue +* `#23878 `__: TST: Add tests for np.argsort +* `#23881 `__: ENH: Add array API standard v2022.12 support to numpy.array_api +* `#23887 `__: TYP,DOC: Annotate and document the ``metadata`` parameter of... +* `#23897 `__: DOC: Fix transpose() description with a correct reference to... +* `#23898 `__: API: Change string to bool conversions to be consistent with... +* `#23902 `__: MAINT: Use ``--allow-downgrade`` option for rtools. +* `#23906 `__: MAINT: Use vectorcall for call forwarding in methods +* `#23907 `__: MAINT: Bump github/codeql-action from 2.3.6 to 2.13.4 +* `#23908 `__: MAINT: Bump actions/checkout from 3.5.2 to 3.5.3 +* `#23911 `__: BUG: Allow np.info on non-hashable objects with a dtype +* `#23912 `__: API: Switch to NEP 50 behavior by default +* `#23913 `__: ENH: let zeros, empty, and empty_like accept dtype classes +* `#23914 `__: DOC: Fix reference ``ComplexWarning`` in release note +* `#23915 `__: DOC: Update development_environment doc. +* `#23916 `__: ABI: Bump C-ABI to 2 but accept older NumPy if compiled against... +* `#23917 `__: ENH: Speed up boolean indexing of flatiters +* `#23918 `__: DOC: Fix references to ``AxisError`` in docstrings +* `#23919 `__: API: Remove interrupt handling and ``noprefix.h`` +* `#23920 `__: DOC: fix DOI on badge +* `#23921 `__: DEP: Expire the PyDataMem_SetEventHook deprecation and remove... +* `#23922 `__: API: Remove ``seterrobj``/``geterrobj``/``extobj=`` and related C-API... +* `#23923 `__: BUG:Fix for call to 'vec_st' is ambiguous +* `#23924 `__: MAINT: Bump pypa/cibuildwheel from 2.13.0 to 2.13.1 +* `#23925 `__: MAINT: Disable SIMD version of float64 sin and cos +* `#23927 `__: DOC: Fix references to ``r_`` in ``mr_class`` docstring +* `#23935 `__: MAINT: Update to latest x86-simd-sort +* `#23936 `__: ENH,API: Make the errstate/extobj a contextvar +* `#23941 `__: BUG: Fix NpyIter cleanup in einsum error path +* `#23942 `__: BUG: Fixup for win64 fwrite issue +* `#23943 `__: DOC: Update required C++ version in building.rst (and copy-edit). +* `#23944 `__: DOC: const correctness in PyUFunc_FromFuncAndData... functions +* `#23950 `__: MAINT: Upgrade install-rtools version +* `#23952 `__: Replace a divider with a colon for _monotonicity +* `#23953 `__: BUG: Fix AVX2 intrinsic npyv_store2_till_s64 on MSVC > 19.29 +* `#23960 `__: DOC: adding release note for 23809 +* `#23961 `__: BLD: update pypy in CI to latest version +* `#23962 `__: TEST: change subprocess call to capture stderr too +* `#23964 `__: MAINT: Remove references to removed functions +* `#23965 `__: MAINT: Simplify codespaces conda environment activation +* `#23967 `__: DOC: Fix references to ``trimseq`` in docstrings +* `#23969 `__: MAINT: Update main after 1.25.0 release. +* `#23971 `__: BUG: Fix private procedures in ``f2py`` modules +* `#23977 `__: MAINT: pipes.quote → shlex.quote +* `#23979 `__: MAINT: Fix typos found by codespell +* `#23980 `__: MAINT: use ``yield from`` where applicable +* `#23982 `__: BLD: Port long double identification to C for meson +* `#23983 `__: BLD: change file extension for installed static libraries back... +* `#23984 `__: BLD: improve handling of CBLAS, add ``-Duse-ilp64`` build option +* `#23985 `__: Revert "TST: disable longdouble string/print tests on Linux aarch64" +* `#23990 `__: DOC: Fix np.vectorize Doc +* `#23991 `__: CI: BLD: build wheels and fix test suite for Python 3.12 +* `#23995 `__: MAINT: Do not use ``--side-by-side`` choco option +* `#23997 `__: MAINT: make naming of C aliases for dtype classes consistent +* `#23998 `__: DEP: Expire ``set_numeric_ops`` and the corresponding C functions... +* `#24004 `__: BUG: Fix reduction ``return NULL`` to be ``goto fail`` +* `#24006 `__: ENH: Use high accuracy SVML for double precision umath functions +* `#24009 `__: DOC: Update __array__ description +* `#24011 `__: API: Remove ``old_defines.h`` (part of NumPy 1.7 deprecated C-API) +* `#24012 `__: MAINT: Remove hardcoded f2py numeric/numarray compatibility switch +* `#24014 `__: BUG: Make errstate decorator compatible with threading +* `#24017 `__: MAINT: Further cleanups for errstate +* `#24018 `__: ENH: Use Highway's VQSort on AArch64 +* `#24020 `__: Fix typo in random sampling documentation +* `#24021 `__: BUG: Fix error message for nanargmin/max of empty sequence +* `#24025 `__: TST: improve test for Cholesky decomposition +* `#24026 `__: DOC: Add note for installing ``asv`` library to run benchmark tests +* `#24027 `__: DOC: Fix reference to ``__array_struct__`` in ``arrays.interface.rst`` +* `#24029 `__: DOC: Add link to NEPs in top navbar +* `#24030 `__: BUG: Avoid undefined behavior in array.astype() +* `#24031 `__: BUG: Ensure ``__array_ufunc__`` works without any kwargs passed +* `#24046 `__: DOC: Fix reference to python module ``string`` in ``routines.char.rst`` +* `#24047 `__: DOC: Fix reference to ``array()`` in release note +* `#24049 `__: MAINT: Update main after 1.24.4 release. +* `#24051 `__: MAINT: Pin urllib3 to avoid anaconda-client bug. +* `#24052 `__: MAINT: Bump ossf/scorecard-action from 2.1.3 to 2.2.0 +* `#24053 `__: ENH: Adopt new macOS Accelerate BLAS/LAPACK Interfaces, including... +* `#24054 `__: BUG: Multiply or divides using SIMD without a full vector can... +* `#24058 `__: DOC: Remove references to ``PyArray_SetNumericOps`` and ``PyArray_GetNumericOps`` in release note +* `#24059 `__: MAINT: Remove ability to enter errstate twice (sequentially) +* `#24060 `__: BLD: use ``-ftrapping-math`` with Clang on macOS in Meson build +* `#24061 `__: DOC: PR adds casting option's description to Glossary and ``numpy.concatenate``. +* `#24068 `__: DOC: Add NpzFile class documentation. +* `#24071 `__: MAINT: Overwrite previous wheels when uploading to anaconda. +* `#24073 `__: API: expose PyUFunc_GiveFloatingpointErrors in the dtype API +* `#24075 `__: DOC: Add missing indentation in ``ma.mT`` docstring +* `#24076 `__: DOC: Fix incorrect reST markups in ``numpy.void`` docstring +* `#24077 `__: DOC: Fix documentation for ``ndarray.mT`` +* `#24082 `__: MAINT: testing for IS_MUSL closes #24074 +* `#24083 `__: ENH: Add ``spin`` command ``gdb``; customize ``docs`` and ``test`` +* `#24085 `__: ENH: Replace npy complex structs with native complex types +* `#24087 `__: NEP: Mark NEP 51 as accepted +* `#24090 `__: MAINT: print error from verify_c_api_version.py failing +* `#24092 `__: TST: Pin pydantic<2 in Pyodide workflow +* `#24094 `__: ENH: Added compiler ``args`` and ``link_args`` +* `#24097 `__: DOC: Add reference to dtype parameter in NDArray +* `#24098 `__: ENH: raise early exception if 0d array is used in np.cross +* `#24100 `__: DOC: Clarify correlate function definition +* `#24101 `__: BUG: Fix empty structured array dtype alignment +* `#24102 `__: DOC: fix rst formatting in datetime C API docs +* `#24103 `__: BUG: Only replace dtype temporarily if dimensions changed +* `#24105 `__: DOC: Correctly use savez_compressed in examples for that function. +* `#24107 `__: ENH: Add ``spin benchmark`` command +* `#24112 `__: DOC: Fix warnings and errors caused by reference/c-api/datetimes +* `#24113 `__: DOC: Fix the reference in the docstring of numpy.meshgrid +* `#24123 `__: BUG: ``spin gdb``: launch Python directly so that breakpoint... +* `#24124 `__: MAINT: Bump actions/setup-node from 3.6.0 to 3.7.0 +* `#24125 `__: MAINT: import numpy as ``np`` in ``spin ipython`` +* `#24126 `__: ENH: add mean keyword to std and var +* `#24130 `__: DOC: Fix warning for PyArray_MapIterNew. +* `#24133 `__: DOC: Update python as glue doc. +* `#24135 `__: DOC: Fix string types in ``arrays.dtypes.rst`` +* `#24138 `__: DOC: add NEP 54 on SIMD - moving to C++ and adopting Highway... +* `#24142 `__: ENH: Allow NEP 42 dtypes to use np.save and np.load +* `#24143 `__: Corrected a grammatical error in doc/source/user/absolute_beginners.rst +* `#24144 `__: API: Remove several niche objects for numpy 2.0 python API cleanup +* `#24149 `__: MAINT: Update main after 1.25.1 release. +* `#24150 `__: BUG: properly handle negative indexes in ufunc_at fast path +* `#24152 `__: DOC: Fix reference warning for recarray. +* `#24153 `__: BLD, TST: refactor test to use meson not setup.py, improve spin... +* `#24154 `__: API: deprecate undocumented functions +* `#24158 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#24159 `__: MAINT: Bump pypa/cibuildwheel from 2.13.1 to 2.14.0 +* `#24160 `__: MAINT: Update cibuildwheel to 2.14.0 +* `#24161 `__: BUG: histogram small range robust +* `#24162 `__: ENH: Improve clang-cl compliance +* `#24163 `__: MAINT: update pytest, hypothesis, pytest-cov, and pytz in test_requirements.txt +* `#24172 `__: DOC: Add note that NEP 29 is superseded by SPEC 0 +* `#24173 `__: MAINT: Bump actions/setup-python from 4.6.1 to 4.7.0 +* `#24176 `__: MAINT: do not use copyswap in flatiter internals +* `#24178 `__: BUG: PyObject_IsTrue and PyObject_Not error handling in setflags +* `#24187 `__: BUG: Fix the signature for np.array_api.take +* `#24188 `__: BUG: fix choose refcount leak +* `#24191 `__: BUG: array2string does not add signs for positive integers. Fixes... +* `#24193 `__: DEP: Remove datetime64 deprecation warning when constructing... +* `#24196 `__: MAINT: Remove versioneer +* `#24199 `__: BLD: update OpenBLAS to an intermediate commit +* `#24201 `__: ENH: Vectorize np.partition and np.argpartition using AVX-512 +* `#24202 `__: MAINT: Bump pypa/cibuildwheel from 2.14.0 to 2.14.1 +* `#24204 `__: BUG: random: Fix check for both uniform variates being 0 in random_beta() +* `#24205 `__: MAINT: Fix new or residual typos found by codespell +* `#24206 `__: TST: convert remaining setup.py tests to meson instead +* `#24208 `__: CI: Add a sanitizer CI job +* `#24211 `__: BUG: Fix reference count leak in str(scalar). +* `#24212 `__: BUG: fix invalid function pointer conversion error +* `#24214 `__: ENH: Create helper for conversion to arrays +* `#24219 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#24220 `__: BUG: random: Fix generation of nan by dirichlet. +* `#24222 `__: BUG: Fix cblas detection for the wheel builds +* `#24223 `__: BUG: Fix undefined behavior in complex pow(). +* `#24224 `__: API: Make 64bit default integer on 64bit windows +* `#24225 `__: DOC: Fix doc build warning for random. +* `#24227 `__: DOC: Update year in doc/source/conf.py to 2023 +* `#24228 `__: DOC: fix some double includes in f2py.getting-started.rst +* `#24231 `__: API: expose NPY_DTYPE macro in the dtype API +* `#24235 `__: BLD: only install the ``f2py`` command, not ``f2py3`` or ``f2py3.X`` +* `#24236 `__: BLD: update requirements to use cython>3.0 +* `#24237 `__: BUG: Added missing PyObject_IsTrue error check (return -1) #24177 +* `#24238 `__: BLD/CI: re-enable ILP64 usage and PyPy job in Azure +* `#24240 `__: BUG: Fix C types in scalartypes +* `#24248 `__: BUG: Factor out slow ``getenv`` call used for memory policy warning +* `#24249 `__: TST: enable test that checks for ``numpy.array_api`` entry point +* `#24250 `__: CI: Test NumPy against OpenBLAS weekly builds +* `#24254 `__: ENH: add weighted quantile for inverted_cdf +* `#24256 `__: DEV: Use ``exec_lines`` and not profile dir for ``spin ipython`` +* `#24257 `__: BUG: Add size check for threaded array assignment +* `#24258 `__: DEP: Remove PyArray complex macros and move PyArray_MIN/MAX +* `#24262 `__: DOC: Fix links to random.Generator methods in quickstart +* `#24263 `__: BUG: Fix use of renamed variable. +* `#24267 `__: BUG: random: Fix generation of nan by beta. +* `#24268 `__: CI: Enable running intel_spr_sde_test with Intel SDE +* `#24270 `__: BUG: Move legacy check for void printing +* `#24271 `__: API: Remove legacy-inner-loop-selector +* `#24272 `__: BUG: do not modify the input to ufunc_at +* `#24273 `__: TYP: Trim down the ``_NestedSequence.__getitem__`` signature +* `#24276 `__: DOC: Remove ``np.source`` and ``np.lookfor`` +* `#24277 `__: DOC: inconsistency between doc and code +* `#24278 `__: DOC: fix a couple typos and rst formatting errors in NEP 0053 +* `#24279 `__: CI/BLD: fail by default if no BLAS/LAPACK, add 32-bit Python... +* `#24281 `__: BUG: Further fixes to indexing loop and added tests +* `#24285 `__: CI: correct URL in cirrus.star +* `#24286 `__: CI: only build cirrus wheels when requested +* `#24287 `__: DOC: Fix some incorrectly formatted documents +* `#24289 `__: DOC: update code comment about ``NPY_USE_BLAS_ILP64`` environment... +* `#24291 `__: CI: improve test suite runtime via pytest parallelism and disabling... +* `#24298 `__: DOC: update stride reference doc. +* `#24299 `__: BUG: Fix assumed length f2py regression +* `#24303 `__: CI: apt update before apt install on cirrus +* `#24304 `__: MAINT: Update main after 1.25.2 release. +* `#24307 `__: CI: Cannot run ``intel_spr_sde_test`` on Intel SDE +* `#24311 `__: BLD: update openblas to newer version +* `#24312 `__: DEP: Finalize ``fastCopyAndTranpose`` and other old C-funcs/members... +* `#24315 `__: DOC: Fix some links in documents +* `#24316 `__: API: Cleaning ``numpy/__init__.py`` and main namespace - Part 1... +* `#24320 `__: DOC: Remove promoting twitter in heading +* `#24321 `__: DEP: Remove deprecated numpy.who +* `#24331 `__: DOC: Fix reference warning for buffer. +* `#24332 `__: DOC: Refactor description of ``PyArray_FromAny/PyArray_CheckFromAny`` +* `#24346 `__: DOC: use nightly dependencies [skip actions] [azp skip] [skip... +* `#24347 `__: DOC: Update to release upcoming change document +* `#24349 `__: BUG: polynomial: Handle non-array inputs in polynomial class... +* `#24354 `__: TST: fix distutils tests for deprecations in recent setuptools... +* `#24357 `__: API: Cleaning numpy/__init__.py and main namespace - Part 2 [NEP... +* `#24358 `__: BUG: flexible inheritance segfault +* `#24360 `__: BENCH: fix small array det benchmark +* `#24362 `__: DOC: Add release notes for complex types changes in 2.x +* `#24364 `__: BUG: Remove #undef complex from npy_common.h +* `#24369 `__: ENH: assert_array_less should report max violations instead of... +* `#24370 `__: BLD: Clean up build for complex +* `#24371 `__: MAINT: Fix codespaces setup.sh script +* `#24372 `__: MAINT: Bump pypa/cibuildwheel from 2.14.1 to 2.15.0 +* `#24373 `__: MAINT: Bump actions/dependency-review-action from 3.0.6 to 3.0.7 +* `#24374 `__: MAINT: Update cibuildwheel for cirrus builds +* `#24376 `__: API: Cleaning ``numpy/__init__.py`` and main namespace - Part 3... +* `#24379 `__: ENH: Vendor meson for multi-target build support +* `#24380 `__: DOC: Remove extra indents in documents +* `#24383 `__: DOC: Fix reference warning for ABCPolyBase. +* `#24393 `__: DOC: Add missing sphinx reference roles +* `#24396 `__: BLD: vendor meson-python to make the Windows builds with SIMD... +* `#24400 `__: TST: revert xfail in ``test_umath.py`` +* `#24402 `__: DOC: Fix reference warning for routines.polynomials.rst. +* `#24407 `__: DOC: add warning to ``allclose``, revise "Notes" in ``isclose`` +* `#24412 `__: [BUG] Return value of use_hugepage in hugepage_setup +* `#24413 `__: BUG: cleanup warnings [skip azp][skip circle][skip travis][skip... +* `#24414 `__: BLD: allow specifying the long double format to avoid the runtime... +* `#24415 `__: MAINT: Bump actions/setup-node from 3.7.0 to 3.8.0 +* `#24419 `__: CI/BUG: add Python 3.12 CI job and fix ``numpy.distutils`` AttributeError +* `#24420 `__: ENH: Introduce tracer for enabled CPU targets on each optimized... +* `#24421 `__: DOC: Remove mixed capitalization +* `#24422 `__: MAINT: Remove unused variable ``i`` +* `#24423 `__: MAINT: Bump actions/dependency-review-action from 3.0.7 to 3.0.8 +* `#24425 `__: CI: only run cirrus on commit to PR [skip actions] +* `#24427 `__: MAINT: revert adding ``distutils`` and ``array_api`` to ``np.__all__`` +* `#24434 `__: DOC: Fix reference warning for types-and-structures.rst. +* `#24435 `__: CI: cirrus run linux_aarch64 first +* `#24437 `__: MAINT: Bump actions/setup-node from 3.8.0 to 3.8.1 +* `#24439 `__: MAINT: Pin upper version of sphinx. +* `#24442 `__: DOC: Fix reference warning in Arrayterator and recfunctions. +* `#24445 `__: API: Cleaning ``numpy/__init__.py`` and main namespace - Part 4... +* `#24452 `__: ENH: Add prefix to _ALIGN Macro +* `#24457 `__: MAINT: Upgrade to spin 0.5 +* `#24461 `__: MAINT: Refactor partial load workaround for Clang +* `#24463 `__: MAINT: Fix broken link in runtests.py +* `#24468 `__: BUG: Fix meson build failure due to unchanged inplace auto-generated... +* `#24469 `__: DEP: Replace deprecation warning for non-integral arguments in... +* `#24471 `__: DOC: Fix some incorrect markups +* `#24473 `__: MAINT: Improve docstring and performance of trimseq +* `#24476 `__: MAINT: Move ``RankWarning`` to exceptions module +* `#24477 `__: MAINT: Remove deprecated functions [NEP 52] +* `#24479 `__: CI: Implements Cross-Compile Builds for armhf, ppc64le, and s390x +* `#24481 `__: DOC: Rm np.who from autosummary. +* `#24483 `__: NEP: add NEP 55 for a variable width string dtype +* `#24484 `__: BUG: fix NPY_cast_info error handling in choose +* `#24485 `__: DOC: Fix some broken links +* `#24486 `__: BUG: ``asv dev`` has been removed, use ``asv run`` instead. +* `#24487 `__: DOC: Fix reference warning in some rst and code files. +* `#24488 `__: MAINT: Stop testing on ppc64le. +* `#24493 `__: CI: GitHub Actions CI job restructuring +* `#24494 `__: API: Remove deprecated ``msort`` function +* `#24498 `__: MAINT: Re-write 16-bit qsort dispatch +* `#24504 `__: DOC: Remove extra indents in docstrings +* `#24505 `__: DOC: Fix mentions in ``isin`` docs +* `#24510 `__: DOC: Add missing changelogs for NEP 52 PRs +* `#24511 `__: BUG: Use a default assignment for git_hash [skip ci] +* `#24513 `__: API: Update ``lib.histograms`` namespace +* `#24515 `__: BUG: fix issue with git-version script, needs a shebang to run +* `#24516 `__: DOC: unpin sphinx +* `#24517 `__: MAINT: Harmonize fortranobject, drop C99 style for loop +* `#24518 `__: MAINT: Add expiration notes for NumPy 2.0 removals +* `#24519 `__: MAINT: remove ``setup.py`` and other files for distutils builds +* `#24520 `__: CI: remove obsolete jobs, and move macOS and conda Azure jobs... +* `#24523 `__: CI: switch the Cygwin job to Meson +* `#24527 `__: TYP: add kind argument to numpy.isin type specification +* `#24528 `__: MAINT: Bump actions/checkout from 3.5.3 to 3.6.0 +* `#24532 `__: ENH: ``meson`` backend for ``f2py`` +* `#24535 `__: CI: remove spurious wheel build action runs +* `#24536 `__: API: Update ``lib.nanfunctions`` namespace +* `#24537 `__: API: Update ``lib.type_check`` namespace +* `#24538 `__: API: Update ``lib.function_base`` namespace +* `#24539 `__: CI: fix CircleCI job for move to Meson +* `#24540 `__: API: Add ``lib.array_utils`` namespace +* `#24543 `__: DOC: re-pin sphinx<7.2 +* `#24547 `__: DOC: Cleanup removed objects +* `#24549 `__: DOC: fix typos in percentile documentation +* `#24551 `__: Update .mailmap 2 +* `#24555 `__: BUG, ENH: Fix ``iso_c_binding`` type maps and fix ``bind(c)``... +* `#24556 `__: BUG: fix comparisons between masked and unmasked structured arrays +* `#24559 `__: BUG: ensure nomask in comparison result is not broadcast +* `#24560 `__: CI/BENCH: move more jobs to Meson and fix all broken benchmarks +* `#24562 `__: DOC: Fix typos +* `#24564 `__: API: Readd ``add_docstring`` and ``add_newdoc`` to ``np.lib`` +* `#24566 `__: API: Update ``lib.shape_base`` namespace +* `#24567 `__: API: Update ``arraypad``,``arraysetops``, ``ufunclike`` and ``utils``... +* `#24570 `__: CI: Exclude import libraries from list of DLLs on Cygwin. +* `#24571 `__: MAINT: Add tests for Polynomial with fractions.Fraction coefficients +* `#24573 `__: DOC: Update building docs to use Meson +* `#24577 `__: API: Update ``lib.twodim_base`` namespace +* `#24578 `__: API: Update ``lib.polynomial`` and ``lib.npyio`` namespaces +* `#24579 `__: DOC: fix ``import mat`` warning. +* `#24580 `__: API: Update ``lib.stride_tricks`` namespace +* `#24581 `__: API: Update ``lib.index_tricks`` namespace +* `#24582 `__: DOC: fix typos in ndarray.setflags doc +* `#24584 `__: BLD: fix ``_umath_linalg`` dependencies +* `#24587 `__: API: Cleaning ``numpy/__init__.py`` and main namespace - Part 5... +* `#24589 `__: NEP: fix typos and formatting in NEP 55 +* `#24596 `__: BUG: Fix hash of user-defined dtype +* `#24598 `__: DOC: fix two misspellings in documentation +* `#24599 `__: DOC: unpin sphinx to pick up 7.2.5 +* `#24600 `__: DOC: wrong name in docs +* `#24601 `__: BLD: meson-cpu: fix SIMD support on platforms with no features +* `#24605 `__: DOC: fix isreal docstring (complex -> imaginary) +* `#24607 `__: DOC: Fix import find_common_type warning[skip actions][skip cirrus][s… +* `#24610 `__: MAINT: Avoid creating an intermediate array in np.quantile +* `#24611 `__: TYP: Add the missing ``casting`` keyword to ``np.clip`` +* `#24612 `__: DOC: Replace "cube cube-root" with "cube root" in cbrt docstring +* `#24618 `__: DOC: Fix markups for code blocks +* `#24620 `__: DOC: Update NEP 52 file +* `#24623 `__: TYP: Explicitly declare ``dtype`` and ``generic`` as hashable +* `#24625 `__: CI: Switch SIMD tests to meson +* `#24626 `__: DOC: add release notes link to PyPI. +* `#24628 `__: TYP: Allow ``binary_repr`` to accept any object implementing... +* `#24631 `__: DOC: Clarify usage of --include-paths as an f2py CLI argument +* `#24634 `__: API: Rename ``numpy/core`` to ``numpy/_core`` [NEP 52] +* `#24635 `__: ENH: Refactor the typing "reveal" tests using ``typing.assert_type`` +* `#24636 `__: MAINT: Bump actions/checkout from 3.6.0 to 4.0.0 +* `#24643 `__: TYP, MAINT: General type annotation maintenance +* `#24644 `__: MAINT: remove the ``oldnumeric.h`` header +* `#24657 `__: Add read-only token to linux_qemu.yml +* `#24658 `__: BUG, ENH: Access ``PyArrayMultiIterObject`` fields using macros. +* `#24663 `__: ENH: optimisation of array_equal +* `#24664 `__: BLD: fix bug in random.mtrand extension, don't link libnpyrandom +* `#24666 `__: MAINT: Bump actions/upload-artifact from 3.1.2 to 3.1.3 +* `#24667 `__: DOC: TEST.rst: add example with ``pytest.mark.parametrize`` +* `#24671 `__: BLD: build wheels for 32-bit Python on Windows, using MSVC +* `#24672 `__: MAINT: Bump actions/dependency-review-action from 3.0.8 to 3.1.0 +* `#24674 `__: DOC: Remove extra indents in documents +* `#24677 `__: DOC: improve the docstring's examples for np.searchsorted +* `#24679 `__: MAINT: Refactor of ``numpy/core/_type_aliases.py`` +* `#24680 `__: ENH: add parameter ``strict`` to ``assert_allclose`` +* `#24681 `__: BUG: Fix weak promotion with some mixed float/int dtypes +* `#24682 `__: API: Remove ``ptp``, ``itemset`` and ``newbyteorder`` from ``np.ndarray``... +* `#24690 `__: DOC: Fix reference warning in some rst files +* `#24691 `__: ENH: Add the Array Iterator API to Cython +* `#24693 `__: DOC: NumPy 2.0 migration guide +* `#24695 `__: CI: enable use of Cirrus CI compute credits by collaborators +* `#24696 `__: DOC: Updated the f2py docs to remove a note on ``-fimplicit-none`` +* `#24697 `__: API: Readd ``sctypeDict`` to the main namespace +* `#24698 `__: BLD: fix issue with compiler selection during cross compilation +* `#24702 `__: DOC: Fix typos +* `#24705 `__: TYP: Add annotations for the py3.12 buffer protocol +* `#24710 `__: BUG: Fix np.quantile([0, 1], 0, method='weibull') +* `#24711 `__: BUG: Fix np.quantile([Fraction(2,1)], 0.5) +* `#24714 `__: DOC: Update asarray docstring to use shares_memory +* `#24715 `__: DOC: Fix trailing backticks characters. +* `#24716 `__: CI: do apt update before apt install +* `#24717 `__: MAINT: remove relaxed strides debug build setting +* `#24721 `__: DOC: Doc fixes and updates. +* `#24725 `__: MAINT: Update main after 1.26.0 release. +* `#24733 `__: BLD, BUG: Fix build failure for host flags e.g. ``-march=native``... +* `#24735 `__: MAINT: Update RELEASE_WALKTHROUGH +* `#24740 `__: MAINT: Bump pypa/cibuildwheel from 2.15.0 to 2.16.0 +* `#24741 `__: MAINT: Remove cibuildwheel pin in cirrus_wheels +* `#24745 `__: ENH: Change default values in polynomial package +* `#24752 `__: DOC: Fix reference warning in some rst files +* `#24753 `__: BLD: add libquadmath to licences and other tweaks +* `#24758 `__: ENH: fix printing structured dtypes with a non-legacy dtype member +* `#24762 `__: BUG: Fix order of Windows OS detection macros. +* `#24766 `__: DOC: add a note on the ``.c.src`` format to the distutils migration... +* `#24770 `__: ENH: add parameter ``strict`` to ``assert_equal`` +* `#24772 `__: MAINT: align test_dispatcher s390x targets with _umath_tests_mtargets +* `#24775 `__: ENH: add parameter ``strict`` to ``assert_array_less`` +* `#24777 `__: BUG: ``numpy.array_api``: fix ``linalg.cholesky`` upper decomp... +* `#24778 `__: BUG: Fix DATA statements for f2py +* `#24780 `__: DOC: Replace http:// by https:// +* `#24781 `__: MAINT, DOC: fix typos found by codespell +* `#24787 `__: DOC: Closes issue #24730, 'sigma' to 'signum' in piecewise example +* `#24791 `__: BUG: Fix f2py to enable use of string optional inout argument +* `#24792 `__: TYP,DOC: Document the ``np.number`` parameter type as invariant +* `#24793 `__: MAINT: fix licence path win +* `#24795 `__: MAINT : fix spelling mistake for "imaginary" param in _read closes... +* `#24798 `__: MAINT: Bump actions/checkout from 4.0.0 to 4.1.0 +* `#24799 `__: MAINT: Bump maxim-lobanov/setup-xcode from 1.5.1 to 1.6.0 +* `#24802 `__: BLD: updated vendored-meson/meson for mips64 fix +* `#24805 `__: DOC: Fix reference warning in some rst files +* `#24806 `__: BUG: Fix build on ppc64 when the baseline set to Power9 or higher +* `#24807 `__: API: Remove zero names from dtype aliases +* `#24811 `__: DOC: explain why we avoid string.ascii_letters +* `#24812 `__: MAINT: Bump pypa/cibuildwheel from 2.16.0 to 2.16.1 +* `#24816 `__: MAINT: Upgrade to spin 0.7 +* `#24817 `__: DOC: Fix markups for emphasis +* `#24818 `__: API: deprecate size-2 inputs for ``np.cross`` [Array API] +* `#24820 `__: MAINT: remove ``wheel`` as a build dependency +* `#24825 `__: DOC: Fix docstring of matrix class +* `#24828 `__: BUG, SIMD: use scalar cmul on bad Apple clang x86_64 +* `#24834 `__: DOC: Update debugging section +* `#24835 `__: ENH: Add ufunc for np.char.isalpha +* `#24839 `__: BLD: use scipy-openblas wheel +* `#24845 `__: MAINT: Bump actions/setup-python from 4.7.0 to 4.7.1 +* `#24847 `__: DOC: Fix reference warning in some rst files +* `#24848 `__: DOC: TESTS.rst: suggest np.testing assertion function strict=True +* `#24854 `__: MAINT: Remove 'a' dtype alias +* `#24858 `__: ENH: Extend np.add ufunc to work with unicode and byte dtypes +* `#24860 `__: MAINT: Bump pypa/cibuildwheel from 2.16.1 to 2.16.2 +* `#24864 `__: MAINT: Xfail test failing on PyPy. +* `#24866 `__: API: Add ``NumpyUnpickler`` +* `#24867 `__: DOC: Update types table +* `#24868 `__: ENH: Add find/rfind ufuncs for unicode and byte dtypes +* `#24869 `__: BUG: Fix ma.convolve if propagate_mask=False +* `#24875 `__: DOC: testing.assert_array_equal: distinguish from assert_equal +* `#24876 `__: BLD: fix math func feature checks, fix FreeBSD build, add CI... +* `#24877 `__: ENH: testing: argument ``err_msg`` of assertion functions can be... +* `#24878 `__: ENH: isclose/allclose: support array_like ``atol``/``rtol`` +* `#24880 `__: BUG: Fix memory leak in timsort's buffer resizing +* `#24883 `__: BLD: fix "Failed to guess install tag" in meson-log.txt, add... +* `#24884 `__: DOC: replace 'a' dtype with 'S' in format_parser docs +* `#24886 `__: DOC: Fix eigenvector typo in linalg.py docs +* `#24887 `__: API: Add ``diagonal`` and ``trace`` to ``numpy.linalg`` [Array API] +* `#24888 `__: API: Make ``intp`` ``ssize_t`` and introduce characters nN +* `#24891 `__: MAINT: Bump ossf/scorecard-action from 2.2.0 to 2.3.0 +* `#24893 `__: ENH: meson: implement BLAS/LAPACK auto-detection and many CI... +* `#24896 `__: API: Add missing deprecation and release note files +* `#24901 `__: MAINT: Bump actions/setup-python from 4.7.0 to 4.7.1 +* `#24904 `__: BUG: loongarch doesn't use REAL(10) +* `#24910 `__: BENCH: Fix benchmark bug leading to failures +* `#24913 `__: DOC: fix typos +* `#24915 `__: API: Allow comparisons with and between any python integers +* `#24920 `__: MAINT: Reenable PyPy wheel builds. +* `#24922 `__: API: Add ``np.long`` and ``np.ulong`` +* `#24923 `__: ENH: Add Cython enumeration for NPY_FR_GENERIC +* `#24925 `__: DOC: Fix parameter markups in ``c-api/ufunc.rst`` +* `#24927 `__: DOC: how-to-io.rst: document solution for NumPy JSON serialization +* `#24930 `__: MAINT: Update main after 1.26.1 release. +* `#24931 `__: ENH: testing: consistent names for actual and desired results +* `#24935 `__: DOC: Update lexsort docstring for axis kwargs +* `#24938 `__: DOC: Add warning about ill-conditioning to linalg.inv docstring +* `#24939 `__: DOC: Add legacy directive to mark outdated objects +* `#24940 `__: API: Add ``svdvals`` to ``numpy.linalg`` [Array API] +* `#24941 `__: MAINT: Bump actions/checkout from 4.1.0 to 4.1.1 +* `#24943 `__: MAINT: don't warn for symbols needed by import_array() +* `#24945 `__: MAINT: Make ``numpy.fft.helper`` private +* `#24946 `__: MAINT: Make ``numpy.linalg.linalg`` private +* `#24947 `__: ENH: Add startswith & endswith ufuncs for unicode and bytes dtypes +* `#24949 `__: API: Enforce ABI version and print info when compiled against... +* `#24950 `__: TEST: Add test for checking functions' one location rule +* `#24951 `__: ENH: Add isdigit/isspace/isdecimal/isnumeric ufuncs for string... +* `#24953 `__: DOC: Indicate shape param of ndarray.reshape is position-only +* `#24958 `__: MAINT: Remove unhelpful error replacements from ``import_array()`` +* `#24959 `__: MAINT: Python API cleanup nitpicks +* `#24967 `__: BLD: use classic linker on macOS, the new one in XCode 15 has... +* `#24968 `__: BLD: mingw-w64 build fixes +* `#24969 `__: MAINT: fix a few issues with CPython main/3.13.0a1 +* `#24970 `__: BLD: Use the correct Python interpreter when running tempita.py +* `#24975 `__: DOC: correct Logo SVG files rendered in dark by Figma +* `#24978 `__: MAINT: testing: rename parameters x/y to actual/desired +* `#24979 `__: BLD: clean up incorrect-but-hardcoded define for ``strtold_l``... +* `#24980 `__: BLD: remove ``NPY_USE_BLAS_ILP64`` environment variable [wheel... +* `#24981 `__: DOC: revisions to "absolute beginners" tutorial +* `#24983 `__: ENH: Added a ``lint`` spin command +* `#24984 `__: DOC: fix reference in user/basics.rec.html#record-arrays +* `#24985 `__: MAINT: Disable warnings for items imported by pybind11 +* `#24986 `__: ENH: Added ``changelog`` spin command +* `#24988 `__: ENH: DType API slot for descriptor finalization before array... +* `#24990 `__: MAINT: Bump ossf/scorecard-action from 2.3.0 to 2.3.1 +* `#24991 `__: DOC: add note to default_rng about requiring non-negative seed +* `#24993 `__: BLD: musllinux_aarch64 [wheel build] +* `#24995 `__: DOC: update vectorize docstring for proper rendering of decorator... +* `#24996 `__: DOC: Clarify a point in basic indexing user guide +* `#24997 `__: DOC: Use ``spin`` to generate changelog +* `#25001 `__: DOC: Visually divide main license and bundled licenses in wheels +* `#25005 `__: MAINT: remove LGTM.com configuration file +* `#25006 `__: DOC: update ndarray.item docstring +* `#25008 `__: BLD: unvendor meson-python +* `#25010 `__: MAINT: test-refactor of ``numpy/_core/numeric.py`` +* `#25016 `__: DOC: standardize capitalization of headings +* `#25017 `__: ENH: Added ``notes`` command for spin +* `#25019 `__: Update .mailmap +* `#25022 `__: TYP: add None to ``__getitem__`` in ``numpy.array_api`` +* `#25029 `__: DOC: "What is NumPy?" section of the documentation +* `#25030 `__: DOC: Include ``np.long`` in ``arrays.scalars.rst`` +* `#25032 `__: MAINT: Add missing ``noexcept`` to shuffle helpers +* `#25037 `__: MAINT: Unpin urllib3 for anaconda-client install +* `#25039 `__: MAINT: Adjust typing for readded ``np.long`` +* `#25040 `__: BLD: make macOS version check for Accelerate NEWLAPACK more robust +* `#25042 `__: BUG: ensure passing ``np.dtype`` to itself doesn't crash +* `#25045 `__: ENH: Vectorize np.sort and np.partition with AVX2 +* `#25050 `__: TST: Ensure test is not run on 32bit platforms +* `#25051 `__: MAINT: Make bitfield integers unsigned +* `#25054 `__: API: Introduce ``np.isdtype`` function [Array API] +* `#25055 `__: BLD: improve detection of Netlib libblas/libcblas/liblapack +* `#25056 `__: DOC: Small fixes for NEP 52 +* `#25057 `__: MAINT: Add ``npy_2_compat.h`` which is designed to work also if... +* `#25059 `__: MAINT: ``np.long`` typing nitpick +* `#25060 `__: DOC: standardize capitalization of NEP headings +* `#25062 `__: ENH: Change add/isalpha ufuncs to use buffer class & general... +* `#25063 `__: BLD: change default of the ``allow-noblas`` option to true +* `#25064 `__: DOC: Fix description of auto bin_width +* `#25067 `__: DOC: add missing word to internals.rst +* `#25068 `__: TST: skip flaky test in test_histogram +* `#25072 `__: MAINT: default to C11 rather than C99, fix most build warnings... +* `#25073 `__: BLD,BUG: quadmath required where available [f2py] +* `#25078 `__: BUG: alpha doesn't use REAL(10) +* `#25079 `__: API: Introduce ``np.astype`` [Array API] +* `#25080 `__: API: Add and redefine ``numpy.bool`` [Array API] +* `#25081 `__: DOC: Provide migration notes for scalar inspection functions +* `#25082 `__: MAINT: Bump actions/dependency-review-action from 3.1.0 to 3.1.1 +* `#25085 `__: BLD: limit scipy-openblas32 wheel to 0.3.23.293.2 +* `#25086 `__: API: Add Array API aliases (math, bitwise, linalg, misc) [Array... +* `#25088 `__: API: Add Array API setops [Array API] +* `#25089 `__: BUG, BLD: Fixed VSX4 feature check +* `#25090 `__: BUG: Make n a long int for np.random.multinomial +* `#25091 `__: MAINT: Bump actions/dependency-review-action from 3.1.1 to 3.1.2 +* `#25092 `__: BLD: Fix features.h detection and blocklist complex trig funcs... +* `#25094 `__: BUG: Avoid intp conversion regression in Cython 3 +* `#25099 `__: DOC: Fix license identifier for OpenBLAS +* `#25101 `__: API: Add ``outer`` to ``numpy.linalg`` [Array API] +* `#25102 `__: MAINT: Print towncrier output file location +* `#25104 `__: ENH: Add str_len & count ufuncs for unicode and bytes dtypes +* `#25105 `__: API: Remove ``__array_prepare__`` +* `#25111 `__: TST: Use ``meson`` for testing ``f2py`` +* `#25123 `__: MAINT,BUG: Never import distutils above 3.12 [f2py] +* `#25124 `__: DOC: ``f2py`` CLI documentation enhancements +* `#25127 `__: DOC: angle: update documentation of convention when magnitude... +* `#25129 `__: BUG: Fix FP overflow error in division when the divisor is scalar +* `#25131 `__: MAINT: Update main after 1.26.2 release. +* `#25133 `__: DOC: std/var: improve documentation of ``ddof`` +* `#25136 `__: BUG: Fix -fsanitize=alignment issue in numpy/_core/src/multiarray/arraytypes.c.src +* `#25138 `__: API: Remove The MapIter API from public +* `#25139 `__: MAINT: Bump actions/dependency-review-action from 3.1.2 to 3.1.3 +* `#25140 `__: DOC: clarify boolean index error message +* `#25141 `__: TST: Explicitly pass NumPy path to cython during tests (also... +* `#25144 `__: DOC: Fix typo in NumPy 2.0 migration guide +* `#25145 `__: API: Add ``cross`` to ``numpy.linalg`` [Array API] +* `#25146 `__: BUG: fix issues with ``newaxis`` and ``linalg.solve`` in ``numpy.array_api`` +* `#25149 `__: API: bump MAXDIMS/MAXARGS to 64 introduce NPY_AXIS_RAVEL +* `#25151 `__: BLD, CI: revert pinning scipy-openblas +* `#25152 `__: ENH: Add strip/lstrip/rstrip ufuncs for unicode and bytes +* `#25154 `__: MAINT: Cleanup mapiter struct a bit +* `#25155 `__: API: Add ``matrix_norm``, ``vector_norm``, ``vecdot`` and ``matrix_transpose`` [Array API] +* `#25156 `__: API: Remove PyArray_REFCNT and NPY_REFCOUNT +* `#25157 `__: DOC: ``np.sort`` doc fix contiguous axis +* `#25158 `__: API: Make ``encoding=None`` the default in loadtxt +* `#25160 `__: BUG: Fix moving compiled executable to root with f2py -c on Windows +* `#25161 `__: API: Remove ``PyArray_GetCastFunc`` and any guarantee that ``->castfuncs``... +* `#25162 `__: NEP: Update NEP 55 +* `#25165 `__: DOC: mention submodule init in source install instructions +* `#25167 `__: MAINT: Add ``array-api-tests`` CI stage, add ``ndarray.__array_namespace__`` +* `#25168 `__: API: Introduce ``copy`` argument for ``np.asarray`` [Array API] +* `#25169 `__: API: Introduce ``correction`` argument for ``np.var`` and ``np.std``... +* `#25171 `__: ENH: Add replace ufunc for bytes and unicode dtypes +* `#25176 `__: DOC: replace integer overflow example +* `#25181 `__: BUG: Disallow shadowed modulenames +* `#25184 `__: MAINT,DOC: Fix inline licenses ``f2py`` +* `#25185 `__: MAINT: Fix sneaky typo [f2py] +* `#25186 `__: BUG: Handle ``common`` blocks with ``kind`` specifications from modules +* `#25193 `__: MAINT: Kill all instances of f2py.compile +* `#25194 `__: DOC: try to be nicer about f2py.compile +* `#25195 `__: BUG: Fix single to half-precision conversion on PPC64/VSX3 +* `#25196 `__: DOC: ``f2py`` rewrite with ``meson`` details +* `#25198 `__: MAINT: Replace deprecated ctypes.ARRAY(item_type, size) with... +* `#25209 `__: ENH: Expose abstract DType classes in the experimental DType... +* `#25212 `__: BUG: Don't try to grab callback modules +* `#25221 `__: TST: f2py: fix issue in test skip condition +* `#25222 `__: DOC: Fix wrong return type for PyArray_CastScalarToCType +* `#25223 `__: MAINT: Bump mymindstorm/setup-emsdk from 12 to 13 +* `#25226 `__: BUG: Handle ``iso_c_type`` mappings more consistently +* `#25228 `__: DOC: Improve description of ``axis`` parameter for ``np.median`` +* `#25230 `__: BUG: Raise error in ``np.einsum_path`` when output subscript is... +* `#25232 `__: DEV: Enable the ``spin lldb`` +* `#25233 `__: API: Add ``device`` and ``to_device`` to ``numpy.ndarray`` [Array... +* `#25238 `__: MAINT: do not use ``long`` type +* `#25243 `__: BUG: Fix non-contiguous 32-bit memory load when ARM/Neon is enabled +* `#25246 `__: CI: Add CI test for riscv64 +* `#25247 `__: ENH: Enable SVE detection for Highway VQSort +* `#25248 `__: DOC: Add release note for Highway VQSort on AArch64 +* `#25250 `__: DOC: fix typo (alignment) +* `#25253 `__: CI: streamline macos_arm64 test +* `#25254 `__: BUG: mips doesn't use REAL(10) +* `#25255 `__: ENH: add new wheel builds using Accelerate on macOS >=14 +* `#25257 `__: TST: PyPy needs another gc.collect on latest versions +* `#25259 `__: BUG: Fix output dtype when calling np.char methods with empty... +* `#25261 `__: MAINT: Bump conda-incubator/setup-miniconda from 2.2.0 to 3.0.0 +* `#25264 `__: MAINT: Bump actions/dependency-review-action from 3.1.3 to 3.1.4 +* `#25267 `__: BUG: Fix module name bug in signature files [urgent] [f2py] +* `#25271 `__: API: Shrink MultiIterObject and make ``NPY_MAXARGS`` a runtime... +* `#25272 `__: DOC: Mention installing threadpoolctl in issue template [skip... +* `#25276 `__: MAINT: Bump actions/checkout from 3 to 4 +* `#25280 `__: TST: Fix fp_noncontiguous and fpclass on riscv64 +* `#25282 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.0 to 3.0.1 +* `#25284 `__: CI: Install Lapack runtime on Cygwin. +* `#25287 `__: BUG: Handle .pyf.src and fix SciPy [urgent] +* `#25291 `__: MAINT: Allow initializing new-style dtypes inside numpy +* `#25292 `__: API: C-API removals +* `#25295 `__: MAINT: expose and use dtype classes in internal API +* `#25297 `__: BUG: enable linking of external libraries in the f2py Meson backend +* `#25299 `__: MAINT: Performance improvement of polyutils.as_series +* `#25300 `__: DOC: Document how to check for a specific dtype +* `#25302 `__: DOC: Clarify virtualenv setup and dependency installation +* `#25308 `__: MAINT: Update environment.yml to match *_requirements.txt +* `#25309 `__: DOC: Fix path to svg logo files +* `#25310 `__: DOC: Improve documentation for fill_diagonal +* `#25313 `__: BUG: Don't use the _Complex extension in C++ mode +* `#25314 `__: MAINT: Bump actions/setup-python from 4.7.1 to 4.8.0 +* `#25315 `__: MAINT: expose PyUFunc_AddPromoter in the internal ufunc API +* `#25316 `__: CI: remove no-blas=true from spin command on macos_arm64 ci [skip... +* `#25317 `__: ENH: Add fft optional extension submodule to numpy.array_api +* `#25321 `__: MAINT: Run f2py's meson backend with the same python that runs... +* `#25322 `__: DOC: Add examples for ``np.char`` functions +* `#25324 `__: DOC: Add examples for ``np.polynomial.polynomial`` functions +* `#25326 `__: DOC: Add examples to functions in ``np.polynomial.hermite`` +* `#25328 `__: DOC: Add ``np.polynomial.laguerre`` examples +* `#25329 `__: BUG: fix refcounting for dtypemeta aliases +* `#25331 `__: MAINT: Bump actions/setup-python from 4.8.0 to 5.0.0 +* `#25335 `__: BUG: Fix np.char for scalars and add tests +* `#25336 `__: API: make arange ``start`` argument positional-only +* `#25338 `__: BLD: update vendored Meson for AIX shared library fix +* `#25339 `__: DOC: fix some rendering and formatting issues in ``unique_*`` docstrings +* `#25340 `__: DOC: devguide cleanup: remove Gitwash and too verbose Git details +* `#25342 `__: DOC: Add more ``np.char`` documentation +* `#25346 `__: ENH: Enable 16-bit VQSort routines on AArch64 +* `#25347 `__: API: Introduce stringdtype [NEP 55] +* `#25350 `__: DOC: add "building from source" docs +* `#25354 `__: DOC: Add example for ``np.random.default_rng().binomial()`` +* `#25355 `__: DOC: Fix typo in ``np.random.default_rng().logistic()`` +* `#25356 `__: DOC: Add example for ``np.random.default_rng().exponential()`` +* `#25357 `__: DOC: Add example for ``np.random.default_rng().geometric()`` +* `#25361 `__: BUG: Fix regression with ``f2py`` wrappers when modules and subroutines... +* `#25364 `__: ENH,BUG: Handle includes for meson backend +* `#25367 `__: DOC: Fix refguide check script +* `#25368 `__: MAINT: add npy_gil_error to acquire the GIL and set an error +* `#25369 `__: DOC: Correct documentation for polyfit() +* `#25370 `__: ENH: Make numpy.array_api more portable +* `#25372 `__: BUG: Fix failing test_features on SapphireRapids +* `#25376 `__: BUG: Fix build issues on SPR and avx512_qsort float16 +* `#25383 `__: MAINT: Init ``base`` in cpu_avx512_kn +* `#25384 `__: MAINT: Add missing modules to refguide test +* `#25388 `__: API: Adjust ``linalg.pinv`` and ``linalg.cholesky`` to Array... +* `#25389 `__: BUG: ufunc api: update multiarray_umath import path +* `#25394 `__: MAINT: Bump actions/upload-artifact from 3.1.3 to 4.0.0 +* `#25397 `__: BUG, SIMD: Fix quicksort build error when Highway/SVE is enabled +* `#25398 `__: DOC: Plot exact distributions in logistic, logseries and weibull... +* `#25404 `__: DOC: Improve ``np.histogram`` docs +* `#25409 `__: API,MAINT: Reorganize array-wrap calling and introduce ``return_scalar`` +* `#25412 `__: DOC: Clean up of ``_generator.pyx`` +* `#25413 `__: DOC: Add example to ``rng.beta(...)`` +* `#25414 `__: DOC: Add missing examples to ``np.ma`` +* `#25416 `__: ENH: define a gufunc for vecdot (with BLAS support) +* `#25417 `__: MAINT: Bump actions/setup-node from 3.8.1 to 4.0.1 +* `#25418 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#25425 `__: BUG: Fix two errors related to not checking for failed allocations +* `#25426 `__: BUG: avoid seg fault from OOB access in RandomState.set_state() +* `#25430 `__: TST: Fix test_numeric on riscv64 +* `#25431 `__: DOC: Improve ``np.mean`` documentation of the out argument +* `#25432 `__: DOC: Add ``numpy.lib`` docs page +* `#25434 `__: API,BUG,DEP: treat trailing comma as a tuple and thus a structured... +* `#25437 `__: API: Add ``rtol`` to ``matrix_rank`` and ``stable`` [Array API] +* `#25438 `__: DEV: add ``ninja`` to ``test_requirements.txt`` and clean up... +* `#25439 `__: BLD: remove ``-fno-strict-aliasing``, ``--strip-debug`` from cibuildwheel... +* `#25440 `__: CI: show meson-log.txt in Cirrus wheel builds +* `#25441 `__: API,ENH: Change definition of complex sign +* `#25443 `__: TST: fix issue with dtype conversion in ``test_avx_based_ufunc`` +* `#25444 `__: TST: remove ``TestNewBufferProtocol.test_error_too_many_dims`` +* `#25446 `__: Downgrade Highway to latest released version (1.0.7) +* `#25448 `__: TYP: Adjust type annotations for Numpy 2.0 changes +* `#25449 `__: TYP,CI: bump mypy from 1.5.1 to 1.7.1 +* `#25450 `__: MAINT: make the import-time check for old Accelerate more specific +* `#25451 `__: DOC: Fix names of subroutines. +* `#25453 `__: TYP,MAINT: Change more overloads to play nice with pyright +* `#25454 `__: DOC: fix typo ``v_stack`` in 2.0 migration guide +* `#25455 `__: BUG: fix macOS version checks for Accelerate support +* `#25456 `__: BLD: optimize BLAS and LAPACK search order +* `#25459 `__: BLD: fix uninitialized variable warnings from simd/neon/memory.h +* `#25462 `__: TST: skip two tests in aarch64 linux wheel builds +* `#25463 `__: ENH: Add np.strings namespace +* `#25473 `__: MAINT: use cholesky_up gufunc for upper Cholesky decomposition +* `#25484 `__: BUG: handle scalar input in np.char.replace +* `#25492 `__: DOC: update signature of PyArray_Conjugate +* `#25495 `__: API: adjust nD fft ``s`` param to array API +* `#25501 `__: DOC: Update a few interpreted text to verbatim/code. +* `#25503 `__: BLD: unpin cibuildwheel [wheel build] +* `#25504 `__: DOC: add pickleshare to doc dependencies +* `#25505 `__: BLD: replace uses of openblas_support with openblas wheels [wheel... +* `#25507 `__: DOC: mention string, bytes, and void dtypes in dtype intro +* `#25510 `__: BUG:Fix incorrect 'inner' method type annotation in __array_ufunc_ +* `#25511 `__: DOC: np.any: add multidimensional example +* `#25512 `__: DOC: add a section for dealing with NumPy 2.0 for downstream... +* `#25515 `__: BUG: three string ufunc bugs, one leading to segfault +* `#25516 `__: MAINT,BUG: Fix ``--dep`` when ``-L -l`` are present +* `#25520 `__: DOC: unambiguous np.histogram dtype description +* `#25521 `__: DOC: Improve error messages for random.choice +* `#25522 `__: BUG: fix incorrect strcmp implementation for unequal length strings +* `#25524 `__: MAINT: Update main after 1.26.3 release. +* `#25525 `__: MAINT: optimization and broadcasting for .replace() method for... +* `#25527 `__: DOC: Improve ``polynomial`` docs +* `#25528 `__: DOC: Add notes to ``rng.bytes()`` +* `#25529 `__: DOC: Add ``rng.f()`` plot +* `#25530 `__: DOC: Add ``rng.chisquare()`` plot +* `#25531 `__: API: allow building in cython with Py_LIMITED_API +* `#25533 `__: DOC: Improve ``poisson`` plot +* `#25534 `__: DOC: Indicate order is kwarg-only for ndarray.reshape. +* `#25535 `__: MAINT: fix ufunc debug tracing +* `#25536 `__: MAINT, ENH: Implement calling pocketfft via gufunc and allow... +* `#25538 `__: MAINT: Bump actions/dependency-review-action from 3.1.4 to 3.1.5 +* `#25540 `__: DOC: Fix typo in random.geometric docstring +* `#25542 `__: NEP: add NEP 56 on array API standard support in main namespace +* `#25545 `__: MAINT: Update copyright to 2024 (LICENSE & DOC) +* `#25549 `__: DOC: Using ``f2py`` with ``fypp`` +* `#25553 `__: BUG: Fix return shape of inverse_indices in unique_inverse +* `#25554 `__: BUG: support axes argument in np.linalg.tensordot +* `#25555 `__: MAINT, BLD: Fix unused inline functions warnings on clang +* `#25558 `__: ENH: Add replace ufunc to np.strings +* `#25560 `__: BUG: np.linalg.vector_norm: return correct shape for keepdims +* `#25563 `__: SIMD: Extend the enabled targets for Google Highway quicksort +* `#25569 `__: DOC: Fix a typo +* `#25570 `__: ENH: change list-of-array to tuple-of-array returns (Numba compat) +* `#25571 `__: MAINT: Return size_t from num_codepoints in string ufuncs Buffer... +* `#25573 `__: MAINT: add a C alias for the default integer DType +* `#25574 `__: DOC: ensure that docstrings for np.ndarray.copy, np.copy and... +* `#25575 `__: ENH: Wrap string ufuncs in np.strings to allow default arguments +* `#25579 `__: MAINT: Bump actions/upload-artifact from 4.0.0 to 4.1.0 +* `#25582 `__: CI: Bump azure pipeline timeout to 120 minutes +* `#25592 `__: BUG: Fix undefined behavior when converting NaN float16 to datetime... +* `#25593 `__: DOC: fix typos in 2.0 migration guide +* `#25594 `__: MAINT: replace uses of cython numpy.math.pxd with native routines +* `#25595 `__: BUG: Allow ``None`` as ``api_version`` in ``__array_namespace__``... +* `#25598 `__: BLD: include fix for MinGW platform detection +* `#25603 `__: DOC: Update tensordot documentation +* `#25608 `__: MAINT: skip installing rtools on azure +* `#25609 `__: DOC: fft: correct docs about recent deprecations +* `#25610 `__: ENH: Vectorize argsort and argselect with AVX2 +* `#25613 `__: BLD: fix building for windows ARM64 +* `#25614 `__: MAINT: Bump actions/dependency-review-action from 3.1.5 to 4.0.0 +* `#25615 `__: MAINT: add ``newaxis`` to ``__all__`` in ``numpy.array_api`` +* `#25625 `__: NEP: update NEP 55 text to match current stringdtype implementation +* `#25627 `__: TST: Fix f2py doc test collection in editable installs +* `#25628 `__: TST: Fix test_warning_calls on Python 3.12 +* `#25629 `__: TST: Bump pytz to 2023.3.post1 +* `#25631 `__: BUG: Use large file fallocate on 32 bit linux platforms +* `#25636 `__: MAINT: Move np.char methods to np.strings +* `#25638 `__: MAINT: Bump actions/upload-artifact from 4.1.0 to 4.2.0 +* `#25641 `__: DOC: Remove a duplicated argument ``shape`` in ``empty_like`` +* `#25646 `__: DOC: Fix links to f2py codes +* `#25648 `__: DOC: fix syntax highlighting issues in added f2py docs +* `#25650 `__: DOC: improve structure of reference guide +* `#25651 `__: ENH: Allow strings in logical ufuncs +* `#25652 `__: BUG: Fix AVX512 build flags on Intel Classic Compiler +* `#25656 `__: DOC: add autosummary API reference for DType clases. +* `#25657 `__: MAINT: fix warning about visibility tag on clang +* `#25660 `__: MAINT: Bump mymindstorm/setup-emsdk from 13 to 14 +* `#25662 `__: BUG: Allow NumPy int scalars to be divided by out-of-bound Python... +* `#25664 `__: DOC: minor improvement to the partition() docstrings +* `#25668 `__: BUG: correct irfft with n=1 on larger input +* `#25669 `__: BLD: fix potential issue with escape sequences in ``__config__.py`` +* `#25671 `__: MAINT: Bump actions/upload-artifact from 4.2.0 to 4.3.0 +* `#25672 `__: BUG: check for overflow when converting a string to an int scalar +* `#25673 `__: BUG: Ensure meson updates generated umath doc correctly. +* `#25674 `__: DOC: add a section on NumPy's module structure to the refguide +* `#25676 `__: NEP: add note on Python integer "exceptions" to NEP 50 +* `#25678 `__: DOC: fix docstring of quantile and percentile +* `#25680 `__: DOC: replace autosummary for numpy.dtypes with enumerated list +* `#25683 `__: DOC: Try add a section on NEP 50 to migration guide +* `#25687 `__: Update to OpenBLAS 0.3.26 +* `#25689 `__: MAINT: Simplify scalar int division a bit (no need for helper... +* `#25692 `__: DOC: Clarify deprecated width Parameter in numpy.binary_repr... +* `#25695 `__: DOC: empty: standardize notes about uninitialized values +* `#25697 `__: CI: add pinning for scipy-openblas wheels +* `#25699 `__: DOC: Fix some references in document +* `#25707 `__: DOC: fix a small np.einsum example +* `#25709 `__: MAINT: Include header defining backtrace +* `#25710 `__: TST: marks on a fixture have no effect +* `#25711 `__: ENH: support float and longdouble in FFT using C++ pocketfft... +* `#25712 `__: API: Make any and all return booleans by default +* `#25715 `__: [MAINT] Add regression test for np.geomspace +* `#25716 `__: CI: pin cygwin python to 3.9.16-1 [skip cirrus][skip azp][skip... +* `#25717 `__: DOC: Fix some minor formatting errors in NEPs +* `#25721 `__: DEP: Finalize future warning move in lstsq default +* `#25723 `__: NEP: Mark NEP 55 accepted +* `#25727 `__: DOC: Remove function name without signature in ``ma`` +* `#25730 `__: ENH: add a pkg-config file and a ``numpy-config`` script +* `#25732 `__: CI: use version 0.3.26.0.2 of scipy-openblas wheels +* `#25734 `__: DOC: Fix markups of code literals in ``polynomial`` +* `#25735 `__: MAINT: Bump pypa/cibuildwheel from 2.16.4 to 2.16.5 +* `#25736 `__: MAINT: Bump actions/cache from 3 to 4 +* `#25738 `__: MAINT: add ``trapezoid`` as the new name for ``trapz`` +* `#25739 `__: TST: run macos_arm64 test on Github Actions +* `#25740 `__: DOC: Fix doctest failure in ``polynomial`` +* `#25745 `__: DEV: add .editorconfig for C/C++ +* `#25751 `__: DOC: Update ruff rule instruction +* `#25753 `__: DOC: Fix ``ufunc.reduceat`` doc for ``dtype`` +* `#25754 `__: API: Expose the dtype C API +* `#25758 `__: DOC: Fix summary table in linalg routines document +* `#25761 `__: DEP: Finalize future warning for shape=1 descriptor dropping... +* `#25763 `__: CI/BLD: fix bash script tests for cibw +* `#25768 `__: DOC: in ufuncs ``dtype`` is not ignored when ``out`` is passed +* `#25772 `__: MAINT: Update main after 1.26.4 release. +* `#25774 `__: DOC: Update docs build dependencies install cmd +* `#25775 `__: ENH: Add index/rindex ufuncs for unicode and bytes dtypes +* `#25776 `__: DOC: Add missing ``np.size`` entry to routines +* `#25779 `__: MAINT: Bump actions/upload-artifact from 4.3.0 to 4.3.1 +* `#25780 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#25783 `__: DOC: Remove references to ``distutils`` in simd document +* `#25785 `__: MAINT: Bump actions/setup-node from 4.0.1 to 4.0.2 +* `#25788 `__: ENH: Improve performance of np.tensordot +* `#25789 `__: MAINT,API: Always export static inline version of array accessor. +* `#25790 `__: MAINT: Private device struct shouldn't be in public header +* `#25791 `__: ENH: Add rest of unary ufuncs for unicode/bytes dtypes +* `#25792 `__: API: Create ``PyArray_DescrProto`` for legacy descriptor registration +* `#25793 `__: MAINT: update docstrings of string ufuncs to mention StringDType +* `#25794 `__: DEP: expire some deprecations +* `#25795 `__: DOC: fix docstring example in f2py.get_include +* `#25796 `__: MAINT: combine string ufuncs by passing on auxilliary data +* `#25797 `__: MAINT: Move ``NPY_VSTRING`` and make ``NPY_NTYPES NPY_TYPES_LEGACY`` +* `#25800 `__: REV: revert tuple/list return type changes for ``*split`` functions +* `#25801 `__: DOC: Update ``np.char.array`` docstring +* `#25802 `__: MAINT,API: Make metadata, c_metadata, fields, and names only... +* `#25803 `__: BLD: restore 'setup-args=-Duse-ilp64=true' in cibuildwheel [wheel... +* `#25804 `__: MAINT: Use preprocessor directive rather than code when adding... +* `#25806 `__: DOC: Update the CPU build options document +* `#25807 `__: DOC: Fix code-block formatting for new PyArray_RegisterDataType... +* `#25812 `__: API: Make ``descr->f`` only accessible through ``PyDataType_GetArrFuncs`` +* `#25813 `__: DOC: Update genfromtxt documentation +* `#25814 `__: MAINT: Use ``_ITEMSIZE`` rather than ``_DESCR(arr)->elsize`` +* `#25816 `__: API: Introduce ``PyDataType_FLAGS`` accessor for public access +* `#25817 `__: ENH: Add more const qualifiers to C API arguments +* `#25821 `__: BUG: ensure that FFT routines can deal with integer and bool... +* `#25822 `__: BLD: use homebrew gfortran +* `#25825 `__: MAINT: Bump actions/dependency-review-action from 4.0.0 to 4.1.0 +* `#25827 `__: DOC: run towncrier to consolidate the 2.0.0 release notes to... +* `#25828 `__: DOC: two minor fixes for DType API doc formatting +* `#25830 `__: DOC: Fix typo in nep 0052 +* `#25832 `__: DOC: add back 2.0.0 release note snippets that went missing +* `#25833 `__: DOC: Fix some reference warnings +* `#25834 `__: BUG: ensure static_string.buf is never NULL for a non-null string +* `#25837 `__: DEP: removed deprecated product/cumproduct/alltrue/sometrue +* `#25838 `__: MAINT: Update pinned setuptools for Python < 3.12 +* `#25839 `__: TST: fix Cython compile test which invokes ``meson`` +* `#25842 `__: DOC: Fix some incorrect rst markups +* `#25843 `__: BUG: ensure empty cholesky upper does not hang. +* `#25845 `__: DOC: Fix some typos +* `#25847 `__: MAINT: Adjust rest of string ufuncs to static_data approach +* `#25851 `__: DOC: Fix some reference warnings +* `#25852 `__: ENH: Support exotic installation of nvfortran +* `#25854 `__: BUG: Correctly refcount array descr in empty_like +* `#25855 `__: MAINT: Bump actions/dependency-review-action from 4.1.0 to 4.1.2 +* `#25856 `__: MAINT: Remove unnnecessary size argument in StringDType initializer +* `#25861 `__: CI: make chocolatey fail when a dependency doesn't install +* `#25862 `__: Revert "API: Make ``descr->f`` only accessible through ``PyDataType_GetArrFuncs``" +* `#25864 `__: ENH: Implement multiply ufunc for unicode & bytes +* `#25865 `__: ENH: print traceback after printing ABI mismatch error +* `#25866 `__: API: Fix compat header and add new import helpers +* `#25868 `__: MAINT: Bump actions/dependency-review-action from 4.1.2 to 4.1.3 +* `#25870 `__: BUG: use print to actually output something +* `#25873 `__: Update Highway to 1.1.0 +* `#25874 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.1 to 3.0.2 +* `#25876 `__: API: Remove no-op C API functions +* `#25877 `__: BUG: Include broadcasting for ``rtol`` argument in ``matrix_rank`` +* `#25879 `__: DOC: Add a document entry of ``PyArray_DescrProto`` +* `#25880 `__: DOC: README.md: point to user-friendly OpenSSF ScoreCard display +* `#25881 `__: BUG: Fix gh-25867 for used functions and subroutines +* `#25883 `__: BUG: fix typo in 'message' static variable of TestDeprecatedDTypeParenthesizedRepeatCount +* `#25884 `__: BUG: Fix typo in LEGACY_CONS_NON_NEGATVE_INBOUNDS_LONG +* `#25885 `__: DOC: fix typos +* `#25886 `__: MAINT: fix code comment typos in numpy/ directory +* `#25887 `__: BUG: Fix ``PyArray_FILLWBYTE`` Cython declaration +* `#25889 `__: CI: run apt update before apt-install in linux-blas workflow +* `#25890 `__: MAINT: refactor StringDType static_string implementation a bit. +* `#25891 `__: ENH: Add expandtabs ufunc for string & unicode dtypes +* `#25894 `__: CI, BLD, TST: Re-enable Emscripten/Pyodide CI job for NumPy +* `#25896 `__: ENH: implement stringdtype <-> timedelta roundtrip casts +* `#25897 `__: API: Make descr->f only accessible through ``PyDataType_GetArrFuncs`` +* `#25900 `__: CI, MAINT: use ``fetch-tags: true`` to speed up NumPy checkouts +* `#25901 `__: BLD: Add meson check to test presence of pocketfft git submodule +* `#25902 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.2 to 3.0.3 +* `#25905 `__: CI: allow job matrixes to run all jobs even when one fails +* `#25911 `__: MAINT: remove ``numpy.array_api`` module +* `#25912 `__: MAINT: Bump actions/cache from 4.0.0 to 4.0.1 +* `#25914 `__: API: Remove broadcasting ambiguity from np.linalg.solve +* `#25915 `__: DOC: Fix some document build errors about rst markups +* `#25919 `__: BUG: Ensure non-array logspace base does not influence dtype... +* `#25920 `__: NEP: update status fields of many NEPs +* `#25921 `__: DOC: update and copy-edit 2.0.0 release notes +* `#25922 `__: BUG: fix handling of copy keyword argument when calling __array__ +* `#25924 `__: BUG: remove vestiges of array_api [wheel build] +* `#25928 `__: DOC: Add note about np.char & np.strings in 2.0 migration guide +* `#25929 `__: DOC: Add mention of complex number changes to migration guide +* `#25931 `__: BUG: fix reference leak in PyArray_FromArrayAttr_int +* `#25932 `__: TST: skip rather than xfail a few tests to address CI log pollution +* `#25933 `__: MAINT: ensure towncrier can be run >1x, and is included in ``spin``... +* `#25937 `__: DOC: 2.0 release highlights and compat notes changes +* `#25939 `__: DOC: Add entries of ``npy_datetime`` and ``npy_timedelta`` +* `#25943 `__: API: Restructure the dtype struct to be new dtype friendly +* `#25944 `__: BUG: avoid incorrect stringdtype allocator sharing from array... +* `#25945 `__: BLD: try to build most macOS wheels on GHA +* `#25946 `__: DOC: Add and fixup/move docs for descriptor changes +* `#25947 `__: DOC: Fix incorrect rst markups of c function directives +* `#25948 `__: MAINT: Introduce NPY_FEATURE_VERSION_STRING and report it in... +* `#25950 `__: BUG: Fix reference leak in niche user old user dtypes +* `#25952 `__: BLD: use hash for mamba action +* `#25954 `__: API: Expose ``PyArray_Pack`` +* `#25955 `__: API: revert position-only 'start' in 'np.arange' +* `#25956 `__: Draft: [BUG] Fix Polynomial representation tests +* `#25958 `__: BUG: avoid incorrect type punning in NpyString_acquire_allocators +* `#25961 `__: TST, MAINT: Loosen tolerance in fft test. +* `#25962 `__: DOC: fix typos and rearrange CI +* `#25965 `__: CI: fix wheel tags for Cirrus macOS arm64 +* `#25973 `__: DOC: Backport gh-25971 and gh-25972 +* `#25977 `__: REL: Prepare for the NumPy 2.0.0b1 release [wheel build] +* `#25983 `__: CI: fix last docbuild warnings +* `#25986 `__: BLD: push a tag builds a wheel +* `#25987 `__: REL: Prepare for the NumPy 2.0.0b1 release (2) [wheel build] +* `#25994 `__: DOC: remove reverted release blurb [skip actions][skip azp][skip... +* `#25996 `__: CI: don't use ``fetch-tags`` in wheel build jobs +* `#25997 `__: REL: Prepare for the NumPy 2.0.0b1 release (3) +* `#26008 `__: DOC: mention the ``exceptions`` namespace in the 2.0.0 release... +* `#26009 `__: MAINT: Remove sdist task from pavement.py +* `#26022 `__: BUG: Fixes np.put receiving empty array causes endless loop +* `#26023 `__: MAINT: Bump pypa/cibuildwheel from 2.16.5 to 2.17.0 +* `#26034 `__: MAINT: remove now-unused ``NPY_USE_C99_FORMAT`` +* `#26035 `__: MAINT: remove the now-unused ``NPY_NO_SIGNAL`` +* `#26036 `__: MAINT: handle ``NPY_ALLOW_THREADS`` and related build option... +* `#26040 `__: BUG: Filter out broken Highway platform +* `#26041 `__: BLD: omit pp39-macosx_arm64 from matrix [wheel build] +* `#26042 `__: BUG: fix kwarg handling in assert_warn [skip cirrus][skip azp] +* `#26047 `__: ENH: install StringDType promoter for add +* `#26048 `__: MAINT: avoid use of flexible array member in public header +* `#26049 `__: BUG: raise error trying to coerce object arrays containing timedelta64('NaT')... +* `#26050 `__: BUG: fix reference count leak in __array__ internals +* `#26051 `__: BUG: add missing error handling in string to int cast internals +* `#26052 `__: MAINT: Remove partition and split-like functions from numpy.strings +* `#26053 `__: CI: clean up some unused ``choco install`` invocations +* `#26068 `__: DOC: Backport np.strings docstrings +* `#26073 `__: DOC clarifications on debugging numpy +* `#26074 `__: BUG: fix logic error in stringdtype maximum/minimum ufunc +* `#26075 `__: BUG: Allow the new string dtype summation to work +* `#26076 `__: MAINT: Make PyArrayMultiIterObject struct "smaller" +* `#26085 `__: MAINT: Bump actions/cache from 4.0.1 to 4.0.2 +* `#26109 `__: BUG: adapt cython files to new complex declarations (#26080) +* `#26110 `__: TYP: Adjust ``np.random.integers`` and ``np.random.randint`` +* `#26111 `__: API: Require reduce promoters to start with None to match +* `#26118 `__: MAINT: install all-string promoter for multiply +* `#26122 `__: BUG: fix reference counting error in stringdtype setup +* `#26124 `__: MAINT,API: Const qualify some new API (mostly new DType API) +* `#26127 `__: BUG: update pocketfft to unconditionaly disable use of aligned_alloc +* `#26131 `__: MAINT: add missing noexcept clauses +* `#26154 `__: MAINT: Bump actions/setup-python from 5.0.0 to 5.1.0 +* `#26167 `__: MAINT: Escalate import warning to an import error +* `#26169 `__: BUG,MAINT: Fix __array__ bugs and simplify code +* `#26170 `__: DOC: mention np.lib.NumPyVersion in the 2.0 migration guide +* `#26171 `__: ENH: inherit numerical dtypes from abstract ones. +* `#26173 `__: DOC, TST: make ``numpy.version`` officially public +* `#26186 `__: MAINT: Update Pyodide to 0.25.1 +* `#26192 `__: BUG: Infinite Loop in numpy.base_repr +* `#26193 `__: BUG: fix reference counting error in wrapping_method_resolve_descriptors +* `#26194 `__: DOC: Mention ``copy=True`` for ``__array__`` method in the migration... +* `#26205 `__: BUG: introduce PyArray_SafeCast to fix issues around stringdtype... +* `#26231 `__: API: Readd np.bool_ typing stub +* `#26256 `__: MAINT: Update array-api-tests job +* `#26259 `__: DOC: Backport various documentation fixes +* `#26262 `__: BLD: update to OpenBLAS 0.3.27.0.1 +* `#26265 `__: MAINT: Fix some typos +* `#26272 `__: BUG: Fixes for ``np.vectorize``. +* `#26283 `__: DOC: correct PR referenced in __array_wraps__ change note +* `#26293 `__: BUG: Ensure seed sequences are restored through pickling (#26260) +* `#26297 `__: BUG: Workaround for Intel Compiler mask conversion bug +* `#26305 `__: DOC: Bump pydata-sphinx-theme version +* `#26306 `__: MAINT: Robust string meson template substitution +* `#26307 `__: BLD: use newer openblas wheels [wheel build] +* `#26312 `__: DOC: Follow-up fixes for new theme +* `#26330 `__: BUG: Fix invalid constructor in string_fastsearch.h with C++... +* `#26331 `__: MAINT: address improper error handling and cleanup for ``spin`` +* `#26332 `__: BUG: use PyArray_SafeCast in array_astype +* `#26334 `__: MAINT: Disable compiler sanitizer tests on 2.0.x +* `#26351 `__: ENH: introduce a notion of "compatible" stringdtype instances... +* `#26357 `__: DOC: Added small clarification note, based on discussion in issue... +* `#26358 `__: BUG: Fix rfft for even input length. +* `#26360 `__: MAINT: Simplify bugfix for even rfft +* `#26373 `__: DOC: fix np.unique release notes [skip cirrus] +* `#26374 `__: ENH: add support for nan-like null strings in string replace +* `#26393 `__: BUG: Make sure that NumPy scalars are supported by can_cast +* `#26400 `__: MNT: more gracefully handle spin adding arguments to functions... +* `#26402 `__: DOC: Add missing methods to numpy.strings docs +* `#26403 `__: DOC: Fix links in random documentation. +* `#26417 `__: BUG: support nan-like null strings in [l,r]strip +* `#26423 `__: DOC: Fix some typos and incorrect markups +* `#26424 `__: DOC: add reference docs for NpyString C API +* `#26425 `__: REL: Prepare for the NumPy 2.0.0rc2 release [wheel build] +* `#26427 `__: TYP: Fix ``fromrecords`` type hint and bump mypy to 1.10.0. +* `#26457 `__: MAINT: Various CI fixes +* `#26458 `__: BUG: Use Python pickle protocol version 4 for np.save (#26388) +* `#26459 `__: BUG: fixes for three related stringdtype issues (#26436) +* `#26460 `__: MAINT: Bump pypa/cibuildwheel from 2.17.0 to 2.18.0 +* `#26461 `__: BUG: int32 and intc should both appear in sctypes +* `#26482 `__: DOC: Skip API documentation for numpy.distutils with Python 3.12... +* `#26527 `__: DOC: fix NEP 50 reference +* `#26536 `__: BUG: cast missing in PyPy-specific f2py code, pin spin in CI... +* `#26539 `__: ENH: improve the error raised by ``numpy.isdtype`` +* `#26540 `__: BLD: Make NumPy build reproducibly +* `#26541 `__: BUG: fix incorrect error handling for dtype('a') deprecation +* `#26543 `__: BUG: fix assert in PyArry_ConcatenateArrays with StringDType +* `#26544 `__: BUG: Fix handling of size=() in Generator.choice when a.ndim... +* `#26554 `__: BUG: Fix in1d fast-path range +* `#26555 `__: BUG: Fix typo in array-wrap code that lead to memory leak +* `#26569 `__: MAINT: Avoid by-pointer parameter passing for LINEARIZE_DATA_t... +* `#26583 `__: BUG: Fix memory leaks found with valgrind +* `#26584 `__: MAINT: Unpin pydata-sphinx-theme +* `#26587 `__: DOC: Added web docs for missing ma and strings routines +* `#26591 `__: BUG: Fix memory leaks found by valgrind +* `#26592 `__: DOC: Various documentation updates +* `#26635 `__: DOC: update 2.0 docs +* `#26651 `__: DOC: Update 2.0 migration guide +* `#26652 `__: BUG: Disallow string inputs for copy keyword in np.array and... +* `#26653 `__: BUG: Fix F77 ! comment handling +* `#26654 `__: DOC: Set default as ``-j 1`` for spin docs and move ``-W`` to... +* `#26657 `__: BUG: fix memory leaks found with valgrind (next) +* `#26659 `__: BUG: Replace dots with underscores in f2py meson backend for... +* `#26673 `__: CI: upgrade FreeBSD Cirrus job from FreeBSD 13.2 to 14.0 +* `#26674 `__: MNT: catch invalid fixed-width dtype sizes +* `#26677 `__: CI: Use default llvm on Windows. +* `#26694 `__: DOC: document workaround for deprecation of dim-2 inputs to `cross` +* `#26695 `__: BUG: Adds asanyarray to start of linalg.cross (#26667) +* `#26696 `__: BUG: weighted nanpercentile, nanquantile and multi-dim q +* `#26697 `__: BUG: Fix bug in numpy.pad() + diff --git a/doc/source/release/2.0.0-notes.rst b/doc/source/release/2.0.0-notes.rst index 633336765928..f4e7ac7da3b4 100644 --- a/doc/source/release/2.0.0-notes.rst +++ b/doc/source/release/2.0.0-notes.rst @@ -12,10 +12,10 @@ NumPy 2.0.0 Release Notes and those full notes should be complete (if not copy-edited well enough yet). -NumPy 2.0.0 is the first major release since 2006. It is the result of X months -of development since the last feature release by Y contributors, and contains a -large amount of exciting new features as well as a large amount of changes to -both the Python and C APIs. +NumPy 2.0.0 is the first major release since 2006. It is the result of 11 +months of development since the last feature release and is the work of 212 +contributors spread over 1078 pull requests. It contains a large number of +exciting new features as well as changes to both the Python and C APIs. This major release includes breaking changes that could not happen in a regular minor (feature) release - including an ABI break, changes to type promotion @@ -50,10 +50,13 @@ Highlights of this release include: that are about 3 times smaller, - `numpy.char` fixed-length string operations have been accelerated by implementing ufuncs that also support `~numpy.dtypes.StringDType` in - addition to the the fixed-length string dtypes, + addition to the fixed-length string dtypes, - A new tracing and introspection API, `~numpy.lib.introspect.opt_func_info`, to determine which hardware-specific kernels are available and will be dispatched to. + - `numpy.save` now uses pickle protocol version 4 for saving arrays with + object dtype, which allows for pickle objects larger than 4GB and improves + saving speed by about 5% for large arrays. - Python API improvements: @@ -76,8 +79,8 @@ Highlights of this release include: - Improved behavior: - - Improvements to type promotion behavior was changed by adopting `NEP - 50 `_. This fixes many user surprises about promotions which + - Improvements to type promotion behavior was changed by adopting :ref:`NEP + 50 `. This fixes many user surprises about promotions which previously often depended on data values of input arrays rather than only their dtypes. Please see the NEP and the :ref:`numpy-2-migration-guide` for details as this change can lead to changes in output dtypes and lower @@ -88,7 +91,7 @@ Highlights of this release include: - Documentation: - - The reference guide navigation was signficantly improved, and there is now + - The reference guide navigation was significantly improved, and there is now documentation on NumPy's :ref:`module structure `, - The :ref:`building from source ` documentation was completely rewritten, @@ -112,7 +115,7 @@ API and behavior improvements and better future extensibility. This price is: 2. Breaking changes to the NumPy ABI. As a result, binaries of packages that use the NumPy C API and were built against a NumPy 1.xx release will not work with NumPy 2.0. On import, such packages will see an ``ImportError`` - with a message about binary incompatibiliy. + with a message about binary incompatibility. It is possible to build binaries against NumPy 2.0 that will work at runtime with both NumPy 2.0 and 1.x. See :ref:`numpy-2-abi-handling` for more details. @@ -255,9 +258,9 @@ NumPy 2.0 Python API removals (`gh-25911 `__) + ``__array_prepare__`` is removed -------------------------------- - UFuncs called ``__array_prepare__`` before running computations for normal ufunc calls (not generalized ufuncs, reductions, etc.). The function was also called instead of ``__array_wrap__`` on the @@ -276,6 +279,15 @@ Deprecations * ``np.compat`` has been deprecated, as Python 2 is no longer supported. +* ``numpy.int8`` and similar classes will no longer support conversion of + out of bounds python integers to integer arrays. For example, + conversion of 255 to int8 will not return -1. + ``numpy.iinfo(dtype)`` can be used to check the machine limits for data types. + For example, ``np.iinfo(np.uint16)`` returns min = 0 and max = 65535. + + ``np.array(value).astype(dtype)`` will give the desired result. + + * ``np.safe_eval`` has been deprecated. ``ast.literal_eval`` should be used instead. (`gh-23830 `__) @@ -298,7 +310,7 @@ Deprecations support for implementations not accepting all three are deprecated. Its signature should be ``__array_wrap__(self, arr, context=None, return_scalar=False)`` - (`gh-25408 `__) + (`gh-25409 `__) * Arrays of 2-dimensional vectors for ``np.cross`` have been deprecated. Use arrays of 3-dimensional vectors instead. @@ -316,9 +328,9 @@ Deprecations (`gh-24978 `__) -`numpy.fft` deprecations for n-D transforms with None values in arguments -------------------------------------------------------------------------- +``numpy.fft`` deprecations for n-D transforms with None values in arguments +--------------------------------------------------------------------------- Using ``fftn``, ``ifftn``, ``rfftn``, ``irfftn``, ``fft2``, ``ifft2``, ``rfft2`` or ``irfft2`` with the ``s`` parameter set to a value that is not ``None`` and the ``axes`` parameter set to ``None`` has been deprecated, in @@ -334,9 +346,9 @@ axis, the ``s`` argument can be omitted. (`gh-25495 `__) + ``np.linalg.lstsq`` now defaults to a new ``rcond`` value --------------------------------------------------------- - `~numpy.linalg.lstsq` now uses the new rcond value of the machine precision times ``max(M, N)``. Previously, the machine precision was used but a FutureWarning was given to notify that this change will happen eventually. @@ -400,7 +412,6 @@ Compatibility notes ``loadtxt`` and ``genfromtxt`` default encoding changed ------------------------------------------------------- - ``loadtxt`` and ``genfromtxt`` now both default to ``encoding=None`` which may mainly modify how ``converters`` work. These will now be passed ``str`` rather than ``bytes``. Pass the @@ -410,48 +421,39 @@ unicode strings rather than bytes. (`gh-25158 `__) + ``f2py`` compatibility notes ---------------------------- +* ``f2py`` will no longer accept ambiguous ``-m`` and ``.pyf`` CLI + combinations. When more than one ``.pyf`` file is passed, an error is + raised. When both ``-m`` and a ``.pyf`` is passed, a warning is emitted and + the ``-m`` provided name is ignored. -``f2py`` will no longer accept ambiguous ``-m`` and ``.pyf`` CLI combinations. -When more than one ``.pyf`` file is passed, an error is raised. When both ``-m`` -and a ``.pyf`` is passed, a warning is emitted and the ``-m`` provided name is -ignored. + (`gh-25181 `__) -(`gh-25181 `__) +* The ``f2py.compile()`` helper has been removed because it leaked memory, has + been marked as experimental for several years now, and was implemented as a + thin ``subprocess.run`` wrapper. It was also one of the test bottlenecks. See + `gh-25122 `_ for the full + rationale. It also used several ``np.distutils`` features which are too + fragile to be ported to work with ``meson``. -The ``f2py.compile()`` helper has been removed because it leaked memory, has -been marked as experimental for several years now, and was implemented as a thin -``subprocess.run`` wrapper. It is also one of the test bottlenecks. See -`gh-25122 `_ for the full -rationale. It also used several ``np.distutils`` features which are too fragile -to be ported to work with ``meson``. +* Users are urged to replace calls to ``f2py.compile`` with calls to + ``subprocess.run("python", "-m", "numpy.f2py",...`` instead, and to use + environment variables to interact with ``meson``. `Native files + `_ are also an option. -Users are urged to replace calls to ``f2py.compile`` with calls to -``subprocess.run("python", "-m", "numpy.f2py",...`` instead, and to use -environment variables to interact with ``meson``. `Native files -`_ are also an option. + (`gh-25193 `__) -(`gh-25193 `__) - -``arange``'s ``start`` argument is positional-only --------------------------------------------------- -The first argument of ``arange`` is now positional only. This way, -specifying a ``start`` argument as a keyword, e.g. ``arange(start=0, stop=4)``, -raises a TypeError. Other behaviors, are unchanged so ``arange(stop=4)``, -``arange(2, stop=4)`` and so on, are still valid and have the same meaning as -before. - -(`gh-25336 `__) Minor changes in behavior of sorting functions ---------------------------------------------- - Due to algorithmic changes and use of SIMD code, sorting functions with methods that aren't stable may return slightly different results in 2.0.0 compared to 1.26.x. This includes the default method of `~numpy.argsort` and `~numpy.argpartition`. + Removed ambiguity when broadcasting in ``np.solve`` --------------------------------------------------- The broadcasting rules for ``np.solve(a, b)`` were ambiguous when ``b`` had 1 @@ -461,6 +463,7 @@ reconstructed by using ``np.solve(a, b[..., None])[..., 0]``. (`gh-25914 `__) + Modified representation for ``Polynomial`` ------------------------------------------ The representation method for `~numpy.polynomial.polynomial.Polynomial` was @@ -477,6 +480,7 @@ C API changes * The ``PyArray_CGT``, ``PyArray_CLT``, ``PyArray_CGE``, ``PyArray_CLE``, ``PyArray_CEQ``, ``PyArray_CNE`` macros have been removed. + * ``PyArray_MIN`` and ``PyArray_MAX`` have been moved from ``ndarraytypes.h`` to ``npy_math.h``. @@ -486,6 +490,7 @@ C API changes This includes functions for acquiring and releasing mutexes which lock access to the string data, as well as packing and unpacking UTF-8 bytestreams from array entries. + * ``NPY_NTYPES`` has been renamed to ``NPY_NTYPES_LEGACY`` as it does not include new NumPy built-in DTypes. In particular the new string DType will likely not work correctly with code that handles legacy DTypes. @@ -519,6 +524,7 @@ C API changes after including ``numpy/ndarrayobject.h`` as it requires ``import_array()``. This includes ``PyDataType_FLAGCHK``, ``PyDataType_REFCHK`` and ``NPY_BEGIN_THREADS_DESCR``. + * The dtype flags on ``PyArray_Descr`` must now be accessed through the ``PyDataType_FLAGS`` inline function to be compatible with both 1.x and 2.x. This function is defined in ``npy_2_compat.h`` to allow backporting. @@ -529,9 +535,9 @@ C API changes (`gh-25816 `__) + Datetime functionality exposed in the C API and Cython bindings --------------------------------------------------------------- - The functions ``NpyDatetime_ConvertDatetime64ToDatetimeStruct``, ``NpyDatetime_ConvertDatetimeStructToDatetime64``, ``NpyDatetime_ConvertPyDateTimeToDatetimeStruct``, @@ -542,9 +548,9 @@ external libraries. (`gh-21199 `__) + Const correctness for the generalized ufunc C API ------------------------------------------------- - The NumPy C API's functions for constructing generalized ufuncs (``PyUFunc_FromFuncAndData``, ``PyUFunc_FromFuncAndDataAndSignature``, ``PyUFunc_FromFuncAndDataAndSignatureAndIdentity``) take ``types`` and ``data`` @@ -557,9 +563,9 @@ code may be. (`gh-23847 `__) + Larger ``NPY_MAXDIMS`` and ``NPY_MAXARGS``, ``NPY_RAVEL_AXIS`` introduced ------------------------------------------------------------------------- - ``NPY_MAXDIMS`` is now 64, you may want to review its use. This is usually used in a stack allocation, where the increase should be safe. However, we do encourage generally to remove any use of ``NPY_MAXDIMS`` and @@ -570,9 +576,9 @@ replaced with ``NPY_RAVEL_AXIS``. See also :ref:`migration_maxdims`. (`gh-25149 `__) + ``NPY_MAXARGS`` not constant and ``PyArrayMultiIterObject`` size change ----------------------------------------------------------------------- - Since ``NPY_MAXARGS`` was increased, it is now a runtime constant and not compile-time constant anymore. We expect almost no users to notice this. But if used for stack allocations @@ -585,9 +591,9 @@ to avoid issues with Cython. (`gh-25271 `__) + Required changes for custom legacy user dtypes ---------------------------------------------- - In order to improve our DTypes it is unfortunately necessary to break the ABI, which requires some changes for dtypes registered with ``PyArray_RegisterDataType``. @@ -596,9 +602,9 @@ to adapt your code and achieve compatibility with both 1.x and 2.x. (`gh-25792 `__) + New Public DType API -------------------- - The C implementation of the NEP 42 DType API is now public. While the DType API has shipped in NumPy for a few versions, it was only usable in sessions with a special environment variable set. It is now possible to write custom DTypes @@ -612,9 +618,9 @@ be updated to work correctly with new DTypes. (`gh-25754 `__) + New C-API import functions -------------------------- - We have now added ``PyArray_ImportNumPyAPI`` and ``PyUFunc_ImportUFuncAPI`` as static inline functions to import the NumPy C-API tables. The new functions have two advantages over ``import_array`` and @@ -663,6 +669,7 @@ NumPy 2.0 C API removals have been removed. We recommend querying ``PyErr_CheckSignals()`` or ``PyOS_InterruptOccurred()`` periodically (these do currently require holding the GIL though). + * The ``noprefix.h`` header has been removed. Replace missing symbols with their prefixed counterparts (usually an added ``NPY_`` or ``npy_``). @@ -716,56 +723,58 @@ NumPy 2.0 C API removals * ``PyArrayFlags_Type`` and ``PyArray_NewFlagsObject`` as well as ``PyArrayFlagsObject`` are private now. There is no known use-case; use the Python API if needed. + * ``PyArray_MoveInto``, ``PyArray_CastTo``, ``PyArray_CastAnyTo`` are removed use ``PyArray_CopyInto`` and if absolutely needed ``PyArray_CopyAnyInto`` (the latter does a flat copy). -* ``PyArray_FillObjectArray`` is removed, its only true use is for + +* ``PyArray_FillObjectArray`` is removed, its only true use was for implementing ``np.empty``. Create a new empty array or use ``PyArray_FillWithScalar()`` (decrefs existing objects). + * ``PyArray_CompareUCS4`` and ``PyArray_CompareString`` are removed. Use the standard C string comparison functions. + * ``PyArray_ISPYTHON`` is removed as it is misleading, has no known use-cases, and is easy to replace. + * ``PyArray_FieldNames`` is removed, as it is unclear what it would be useful for. It also has incorrect semantics in some possible use-cases. + * ``PyArray_TypestrConvert`` is removed, since it seems a misnomer and unlikely to be used by anyone. If you know the size or are limited to few types, just use it explicitly, otherwise go via Python strings. (`gh-25292 `__) - -* ``PyDataType_GetDatetimeMetaData`` has been removed, it did not actually +* ``PyDataType_GetDatetimeMetaData`` is removed, it did not actually do anything since at least NumPy 1.7. (`gh-25802 `__) -``PyArray_GetCastFunc`` was removed ------------------------------------ - -Note that custom legacy user dtypes can still provide a castfunc -as their implementation, but any access to them is now removed. -The reason for this is that NumPy never used these internally -for many years. -If you use simple numeric types, please just use C casts directly. -In case you require an alternative, please let us know so we can -create new API such as ``PyArray_CastBuffer()`` which could -use old or new cast functions depending on the NumPy version. +* ``PyArray_GetCastFunc`` is removed. Note that custom legacy user dtypes + can still provide a castfunc as their implementation, but any access to them + is now removed. The reason for this is that NumPy never used these + internally for many years. If you use simple numeric types, please just use + C casts directly. In case you require an alternative, please let us know so + we can create new API such as ``PyArray_CastBuffer()`` which could use old or + new cast functions depending on the NumPy version. -(`gh-25161 `__) + (`gh-25161 `__) New Features ============ -* ``np.add`` was extended to work with ``unicode`` and ``bytes`` dtypes. +``np.add`` was extended to work with ``unicode`` and ``bytes`` dtypes. +---------------------------------------------------------------------- (`gh-24858 `__) + A new ``bitwise_count`` function -------------------------------- - This new function counts the number of 1-bits in a number. `~numpy.bitwise_count` works on all the numpy integer types and integer-like objects. @@ -779,9 +788,9 @@ integer-like objects. (`gh-19355 `__) + macOS Accelerate support, including the ILP64 --------------------------------------------- - Support for the updated Accelerate BLAS/LAPACK library, including ILP64 (64-bit integer) support, in macOS 13.3 has been added. This brings arm64 support, and significant performance improvements of up to 10x for commonly used linear @@ -796,18 +805,18 @@ PyPI will get wheels built against Accelerate rather than OpenBLAS. (`gh-25255 `__) + Option to use weights for quantile and percentile functions ----------------------------------------------------------- - A ``weights`` keyword is now available for `~numpy.quantile`, `~numpy.percentile`, `~numpy.nanquantile` and `~numpy.nanpercentile`. Only ``method="inverted_cdf"`` supports weights. (`gh-24254 `__) + Improved CPU optimization tracking ---------------------------------- - A new tracer mechanism is available which enables tracking of the enabled targets for each optimized function (i.e., that uses hardware-specific SIMD instructions) in the NumPy library. With this enhancement, it becomes possible @@ -821,9 +830,9 @@ and data type signatures. (`gh-24420 `__) + A new Meson backend for ``f2py`` -------------------------------- - ``f2py`` in compile mode (i.e. ``f2py -c``) now accepts the ``--backend meson`` option. This is the default option for Python >=3.12. For older Python versions, ``f2py`` will still default to ``--backend distutils``. @@ -836,9 +845,9 @@ There are no changes for users of ``f2py`` only as a code generator, i.e. withou (`gh-24532 `__) + ``bind(c)`` support for ``f2py`` -------------------------------- - Both functions and subroutines can be annotated with ``bind(c)``. ``f2py`` will handle both the correct type mapping, and preserve the unique label for other C interfaces. @@ -850,9 +859,9 @@ Fortran. (`gh-24555 `__) + A new ``strict`` option for several testing functions ----------------------------------------------------- - The ``strict`` keyword is now available for `~numpy.testing.assert_allclose`, `~numpy.testing.assert_equal`, and `~numpy.testing.assert_array_less`. Setting ``strict=True`` will disable the broadcasting behaviour for scalars @@ -862,6 +871,7 @@ and ensure that input arrays have the same data type. `gh-24770 `__, `gh-24775 `__) + Add ``np.core.umath.find`` and ``np.core.umath.rfind`` UFuncs ------------------------------------------------------------- Add two ``find`` and ``rfind`` UFuncs that operate on unicode or byte strings @@ -870,9 +880,9 @@ and are used in ``np.char``. They operate similar to ``str.find`` and (`gh-24868 `__) -``diagonal`` and ``trace`` for `numpy.linalg` ---------------------------------------------- +``diagonal`` and ``trace`` for ``numpy.linalg`` +----------------------------------------------- `numpy.linalg.diagonal` and `numpy.linalg.trace` have been added, which are array API standard-compatible variants of `numpy.diagonal` and `numpy.trace`. They differ in the default axis selection which define 2-D @@ -880,18 +890,18 @@ sub-arrays. (`gh-24887 `__) + New ``long`` and ``ulong`` dtypes --------------------------------- - `numpy.long` and `numpy.ulong` have been added as NumPy integers mapping to C's ``long`` and ``unsigned long``. Prior to NumPy 1.24, ``numpy.long`` was an alias to Python's ``int``. (`gh-24922 `__) -``svdvals`` for `numpy.linalg` ------------------------------- +``svdvals`` for ``numpy.linalg`` +-------------------------------- `numpy.linalg.svdvals` has been added. It computes singular values for (a stack of) matrices. Executing ``np.svdvals(x)`` is the same as calling ``np.svd(x, compute_uv=False, hermitian=False)``. @@ -899,25 +909,25 @@ This function is compatible with the array API standard. (`gh-24940 `__) + A new ``isdtype`` function -------------------------- - `numpy.isdtype` was added to provide a canonical way to classify NumPy's dtypes in compliance with the array API standard. (`gh-25054 `__) + A new ``astype`` function ------------------------- - `numpy.astype` was added to provide an array API standard-compatible alternative to the `numpy.ndarray.astype` method. (`gh-25079 `__) + Array API compatible functions' aliases --------------------------------------- - 13 aliases for existing functions were added to improve compatibility with the array API standard: * Trigonometry: ``acos``, ``acosh``, ``asin``, ``asinh``, ``atan``, ``atanh``, ``atan2``. @@ -930,9 +940,9 @@ Array API compatible functions' aliases (`gh-25086 `__) + New ``unique_*`` functions -------------------------- - The `~numpy.unique_all`, `~numpy.unique_counts`, `~numpy.unique_inverse`, and `~numpy.unique_values` functions have been added. They provide functionality of `~numpy.unique` with different sets of flags. They are array API @@ -942,9 +952,9 @@ compilation. (`gh-25088 `__) + Matrix transpose support for ndarrays ------------------------------------- - NumPy now offers support for calculating the matrix transpose of an array (or stack of arrays). The matrix transpose is equivalent to swapping the last two axes of an array. Both ``np.ndarray`` and ``np.ma.MaskedArray`` now expose a @@ -953,9 +963,9 @@ function. (`gh-23762 `__) + Array API compatible functions for ``numpy.linalg`` --------------------------------------------------- - Six new functions and two aliases were added to improve compatibility with the Array API standard for `numpy.linalg`: @@ -984,18 +994,18 @@ the Array API standard for `numpy.linalg`: (`gh-25145 `__) + A ``correction`` argument for ``var`` and ``std`` ------------------------------------------------- - A ``correction`` argument was added to `~numpy.var` and `~numpy.std`, which is an array API standard compatible alternative to ``ddof``. As both arguments serve a similar purpose, only one of them can be provided at the same time. (`gh-25169 `__) + ``ndarray.device`` and ``ndarray.to_device`` -------------------------------------------- - An ``ndarray.device`` attribute and ``ndarray.to_device`` method were added to ``numpy.ndarray`` for array API standard compatibility. @@ -1008,9 +1018,9 @@ For all these new arguments, only ``device="cpu"`` is supported. (`gh-25233 `__) + StringDType has been added to NumPy ----------------------------------- - We have added a new variable-width UTF-8 encoded string data type, implementing a "NumPy array of Python strings", including support for a user-provided missing data sentinel. It is intended as a drop-in replacement for arrays of Python @@ -1020,9 +1030,9 @@ documentation ` for more details. (`gh-25347 `__) + New keywords for ``cholesky`` and ``pinv`` ------------------------------------------ - The ``upper`` and ``rtol`` keywords were added to `numpy.linalg.cholesky` and `numpy.linalg.pinv`, respectively, to improve array API standard compatibility. @@ -1032,9 +1042,9 @@ the future. (`gh-25388 `__) + New keywords for ``sort``, ``argsort`` and ``linalg.matrix_rank`` ----------------------------------------------------------------- - New keyword parameters were added to improve array API standard compatibility: * ``rtol`` was added to `~numpy.linalg.matrix_rank`. @@ -1043,9 +1053,9 @@ New keyword parameters were added to improve array API standard compatibility: (`gh-25437 `__) + New ``numpy.strings`` namespace for string ufuncs ------------------------------------------------- - NumPy now implements some string operations as ufuncs. The old ``np.char`` namespace is still available, and where possible the string manipulation functions in that namespace have been updated to use the new ufuncs, @@ -1057,9 +1067,9 @@ instead of ``np.char``. In the future we may deprecate ``np.char`` in favor of (`gh-25463 `__) -`numpy.fft` support for different precisions and in-place calculations ----------------------------------------------------------------------- +``numpy.fft`` support for different precisions and in-place calculations +------------------------------------------------------------------------ The various FFT routines in `numpy.fft` now do their calculations natively in float, double, or long double precision, depending on the input precision, instead of always calculating in double precision. Hence, the calculation will @@ -1071,9 +1081,9 @@ for in-place calculations. (`gh-25536 `__) + configtool and pkg-config support --------------------------------- - A new ``numpy-config`` CLI script is available that can be queried for the NumPy version and for compile flags needed to use the NumPy C API. This will allow build systems to better support the use of NumPy as a dependency. @@ -1083,9 +1093,9 @@ find its location for use with ``PKG_CONFIG_PATH``, use (`gh-25730 `__) + Array API standard support in the main namespace ------------------------------------------------ - The main ``numpy`` namespace now supports the array API standard. See :ref:`array-api-standard-compatibility` for details. @@ -1094,40 +1104,41 @@ The main ``numpy`` namespace now supports the array API standard. See Improvements ============ -* Strings are now supported by ``any``, ``all``, and the logical ufuncs. +Strings are now supported by ``any``, ``all``, and the logical ufuncs. +---------------------------------------------------------------------- (`gh-25651 `__) + Integer sequences as the shape argument for ``memmap`` ------------------------------------------------------ - `numpy.memmap` can now be created with any integer sequence as the ``shape`` argument, such as a list or numpy array of integers. Previously, only the types of tuple and int could be used without raising an error. (`gh-23729 `__) + ``errstate`` is now faster and context safe ------------------------------------------- - The `numpy.errstate` context manager/decorator is now faster and safer. Previously, it was not context safe and had (rare) issues with thread-safety. (`gh-23936 `__) + AArch64 quicksort speed improved by using Highway's VQSort ---------------------------------------------------------- - The first introduction of the Google Highway library, using VQSort on AArch64. Execution time is improved by up to 16x in some cases, see the PR for benchmark results. Extensions to other platforms will be done in the future. (`gh-24018 `__) + Complex types - underlying C type changes ----------------------------------------- - * The underlying C types for all of NumPy's complex types have been changed to use C99 complex types. @@ -1153,9 +1164,9 @@ Complex types - underlying C type changes (`gh-24085 `__) + ``iso_c_binding`` support and improved common blocks for ``f2py`` ----------------------------------------------------------------- - Previously, users would have to define their own custom ``f2cmap`` file to use type mappings defined by the Fortran2003 ``iso_c_binding`` intrinsic module. These type maps are now natively supported by ``f2py`` @@ -1168,27 +1179,27 @@ modules. This further expands the usability of intrinsics like (`gh-25186 `__) + Call ``str`` automatically on third argument to functions like ``assert_equal`` ------------------------------------------------------------------------------- - The third argument to functions like `~numpy.testing.assert_equal` now has ``str`` called on it automatically. This way it mimics the built-in ``assert`` statement, where ``assert_equal(a, b, obj)`` works like ``assert a == b, obj``. (`gh-24877 `__) + Support for array-like ``atol``/``rtol`` in ``isclose``, ``allclose`` --------------------------------------------------------------------- - The keywords ``atol`` and ``rtol`` in `~numpy.isclose` and `~numpy.allclose` now accept both scalars and arrays. An array, if given, must broadcast to the shapes of the first two array arguments. (`gh-24878 `__) + Consistent failure messages in test functions --------------------------------------------- - Previously, some `numpy.testing` assertions printed messages that referred to the actual and desired results as ``x`` and ``y``. Now, these values are consistently referred to as ``ACTUAL`` and @@ -1196,9 +1207,9 @@ Now, these values are consistently referred to as ``ACTUAL`` and (`gh-24931 `__) + n-D FFT transforms allow ``s[i] == -1`` --------------------------------------- - The `~numpy.fft.fftn`, `~numpy.fft.ifftn`, `~numpy.fft.rfftn`, `~numpy.fft.irfftn`, `~numpy.fft.fft2`, `~numpy.fft.ifft2`, `~numpy.fft.rfft2` and `~numpy.fft.irfft2` functions now use the whole input array along the axis @@ -1206,9 +1217,9 @@ and `~numpy.fft.irfft2` functions now use the whole input array along the axis (`gh-25495 `__) + Guard PyArrayScalar_VAL and PyUnicodeScalarObject for the limited API --------------------------------------------------------------------- - ``PyUnicodeScalarObject`` holds a ``PyUnicodeObject``, which is not available when using ``Py_LIMITED_API``. Add guards to hide it and consequently also make the ``PyArrayScalar_VAL`` macro hidden. @@ -1226,6 +1237,7 @@ Changes * Being fully context and thread-safe, ``np.errstate`` can only be entered once now. + * ``np.setbufsize`` is now tied to ``np.errstate()``: leaving an ``np.errstate`` context will also reset the ``bufsize``. @@ -1252,9 +1264,9 @@ Changes (`gh-25816 `__) + Representation of NumPy scalars changed --------------------------------------- - As per :ref:`NEP 51 `, the scalar representation has been updated to include the type information to avoid confusion with Python scalars. @@ -1272,9 +1284,9 @@ to facilitate updates. (`gh-22449 `__) + Truthiness of NumPy strings changed ----------------------------------- - NumPy strings previously were inconsistent about how they defined if the string is ``True`` or ``False`` and the definition did not match the one used by Python. @@ -1302,9 +1314,9 @@ The change does affect ``np.fromregex`` as it uses direct assignments. (`gh-23871 `__) + A ``mean`` keyword was added to var and std function ---------------------------------------------------- - Often when the standard deviation is needed the mean is also needed. The same holds for the variance and the mean. Until now the mean is then calculated twice, the change introduced here for the `~numpy.var` and `~numpy.std` functions @@ -1313,18 +1325,18 @@ docstrings for details and an example illustrating the speed-up. (`gh-24126 `__) + Remove datetime64 deprecation warning when constructing with timezone --------------------------------------------------------------------- - The `numpy.datetime64` method now issues a UserWarning rather than a DeprecationWarning whenever a timezone is included in the datetime string that is provided. (`gh-24193 `__) + Default integer dtype is now 64-bit on 64-bit Windows ----------------------------------------------------- - The default NumPy integer is now 64-bit on all 64-bit systems as the historic 32-bit default on Windows was a common source of issues. Most users should not notice this. The main issues may occur with code interfacing with libraries @@ -1333,6 +1345,7 @@ written in a compiled language like C. For more information see (`gh-24224 `__) + Renamed ``numpy.core`` to ``numpy._core`` ----------------------------------------- Accessing ``numpy.core`` now emits a DeprecationWarning. In practice @@ -1353,9 +1366,9 @@ the ``NPY_RELAXED_STRIDES_DEBUG`` environment variable or the (`gh-24717 `__) + Redefinition of ``np.intp``/``np.uintp`` (almost never a change) ---------------------------------------------------------------- - Due to the actual use of these types almost always matching the use of ``size_t``/``Py_ssize_t`` this is now the definition in C. Previously, it matched ``intptr_t`` and ``uintptr_t`` which would often @@ -1375,24 +1388,25 @@ However, it means that: (`gh-24888 `__) + ``numpy.fft.helper`` made private --------------------------------- - ``numpy.fft.helper`` was renamed to ``numpy.fft._helper`` to indicate that it is a private submodule. All public functions exported by it should be accessed from `numpy.fft`. (`gh-24945 `__) + ``numpy.linalg.linalg`` made private ------------------------------------ - ``numpy.linalg.linalg`` was renamed to ``numpy.linalg._linalg`` to indicate that it is a private submodule. All public functions exported by it should be accessed from `numpy.linalg`. (`gh-24946 `__) + Out-of-bound axis not the same as ``axis=None`` ----------------------------------------------- In some cases ``axis=32`` or for concatenate any large value @@ -1405,9 +1419,9 @@ Any out of bound axis value will now error, make sure to use .. _copy-keyword-changes-2.0: + New ``copy`` keyword meaning for ``array`` and ``asarray`` constructors ----------------------------------------------------------------------- - Now `numpy.array` and `numpy.asarray` support three values for ``copy`` parameter: * ``None`` - A copy will only be made if it is necessary. @@ -1418,9 +1432,9 @@ The meaning of ``False`` changed as it now raises an exception if a copy is need (`gh-25168 `__) + The ``__array__`` special method now takes a ``copy`` keyword argument. ----------------------------------------------------------------------- - NumPy will pass ``copy`` to the ``__array__`` special method in situations where it would be set to a non-default value (e.g. in a call to ``np.asarray(some_object, copy=False)``). Currently, if an @@ -1432,9 +1446,9 @@ argument with the same meaning as when passed to `numpy.array` or (`gh-25168 `__) + Cleanup of initialization of ``numpy.dtype`` with strings with commas --------------------------------------------------------------------- - The interpretation of strings with commas is changed slightly, in that a trailing comma will now always create a structured dtype. E.g., where previously ``np.dtype("i")`` and ``np.dtype("i,")`` were treated as identical, @@ -1451,9 +1465,9 @@ case for initializations without a comma, like ``np.dtype("(2)i")``. (`gh-25434 `__) + Change in how complex sign is calculated ---------------------------------------- - Following the array API standard, the complex sign is now calculated as ``z / |z|`` (instead of the rather less logical case where the sign of the real part was taken, unless the real part was zero, in which case @@ -1462,9 +1476,9 @@ zero is returned if ``z==0``. (`gh-25441 `__) + Return types of functions that returned a list of arrays -------------------------------------------------------- - Functions that returned a list of ndarrays have been changed to return a tuple of ndarrays instead. Returning tuples consistently whenever a sequence of arrays is returned makes it easier for JIT compilers like Numba, as well as for @@ -1473,9 +1487,9 @@ functions are: `~numpy.atleast_1d`, `~numpy.atleast_2d`, `~numpy.atleast_3d`, `~numpy.broadcast_arrays`, `~numpy.meshgrid`, `~numpy.ogrid`, `~numpy.histogramdd`. + ``np.unique`` ``return_inverse`` shape for multi-dimensional inputs ------------------------------------------------------------------- - When multi-dimensional inputs are passed to ``np.unique`` with ``return_inverse=True``, the ``unique_inverse`` output is now shaped such that the input can be reconstructed directly using ``np.take(unique, unique_inverse)`` when ``axis=None``, and @@ -1484,9 +1498,9 @@ directly using ``np.take(unique, unique_inverse)`` when ``axis=None``, and (`gh-25553 `__, `gh-25570 `__) + ``any`` and ``all`` return booleans for object arrays ----------------------------------------------------- - The ``any`` and ``all`` functions and methods now return booleans also for object arrays. Previously, they did a reduction which behaved like the Python ``or`` and @@ -1507,7 +1521,5 @@ this is currently not implemented. In some cases, this means you may have to add a specific path for: ``if type(obj) in (int, float, complex): ...``. +(`gh-26393 `__) -**Content from release note snippets in doc/release/upcoming_changes:** - -.. include:: notes-towncrier.rst From 72b943c9e467af38337661b9d26b4fb4dc8b3d24 Mon Sep 17 00:00:00 2001 From: Jules Date: Mon, 17 Jun 2024 17:01:18 +0800 Subject: [PATCH 579/980] DOC: Add clarifications np.argpartition --- numpy/_core/fromnumeric.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 57602293ad80..596ff9fa852c 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -918,6 +918,13 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None): Notes ----- + The returned indices are not guaranteed to be sorted according to + the values. Furthermore, the default selection algorithm ``introselect`` + is unstable, and hence the returned indices are not guaranteed + to be the earliest/latest occurrence of the element. + + The treatment of ``np.nan`` in the input array is undefined. + See `partition` for notes on the different selection algorithms. Examples From 0bd7a14810e5809a22eb11606f8da97274a0c8d2 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 17 Jun 2024 12:27:45 +0200 Subject: [PATCH 580/980] DOC: Mention more error paths and try to consolidate import errors The first section was nice, but outdated since NumPy 1.25 effectively solved the issue so that builds have to be quite old to run into the previous paths. OTOH, the NumPy 2 specific section would now overlap if fixing the first. So tried to use the first section but expand/rephrase it. In practice, C-API/Attribute errors, etc. won't be the only reason for things not working (e.g. the error could just as well be a missing attribute due to the Python API change), but let's focus on the C API... My main thought was that mentioning that dtype change error is good, it is an unfortunate thing that Cython seems to check that before trying to import NumPy, but it is also a pretty "obvious" error. --- .../user/troubleshooting-importerror.rst | 84 ++++++++++--------- 1 file changed, 46 insertions(+), 38 deletions(-) diff --git a/doc/source/user/troubleshooting-importerror.rst b/doc/source/user/troubleshooting-importerror.rst index adbc9d898846..807feb119367 100644 --- a/doc/source/user/troubleshooting-importerror.rst +++ b/doc/source/user/troubleshooting-importerror.rst @@ -148,67 +148,75 @@ This may mainly help you if you are not running the python and/or NumPy version you are expecting to run. -C-API incompatibility ---------------------------- +Downstream ImportError, AttributeError or C-API/ABI incompatibility +=================================================================== -If you see an error like: +If you see a message such as:: + A module that was compiled using NumPy 1.x cannot be run in + NumPy 2.0.0 as it may crash. To support both 1.x and 2.x + versions of NumPy, modules must be compiled with NumPy 2.0. + Some module may need to rebuild instead e.g. with 'pybind11>=2.12'. - RuntimeError: module compiled against API version v1 but this version of numpy is v2 +either as an ``ImportError`` or with:: + AttributeError: _ARRAY_API not found -You may have: +or other errors such as:: -* A bad extension "wheel" (binary install) that should use - `oldest-support-numpy `_ ( - with manual constraints if necessary) to build their binary packages. + RuntimeError: module compiled against API version v1 but this version of numpy is v2 -* An environment issue messing with package versions. +or when a package implemented with Cython:: -* Incompatible package versions somehow enforced manually. + ValueError: numpy.dtype size changed, may indicate binary incompatibility. Expected 96 from C header, got 88 from PyObject -* An extension module compiled locally against a very recent version - followed by a NumPy downgrade. +This means that a package depending on NumPy was build in a way that is not +compatible with the NumPy version found. +If this error is due to a recent upgrade to NumPy 2, the easiest solution may +be to simply downgrade NumPy to ``'numpy<2'``. -* A compiled extension copied to a different computer with an - older NumPy version. +To understand the cause, search the traceback (from the back) to find the first +line that isn't inside NumPy to see which package has the incompatibility. +Note your NumPy version and the version of the incompatible package to +help you find the best solution. -The best thing to do if you see this error is to contact -the maintainers of the package that is causing problem -so that they can solve the problem properly. +There can be various reason for the incompatibility: -However, while you wait for a solution, a work around -that usually works is to upgrade the NumPy version:: +* You have recently upgraded NumPy, most likely to NumPy 2, and the other + module now also needs to be upgraded. (NumPy 2 was released in June 2024.) +* Especially if you have version constraints on some packages, ``pip`` may + have found incompatible versions when installing. - pip install numpy --upgrade +* Manual forced versions or setup steps, such as copying a compiled extension + to another computer with a different NumPy version. +The best solution will usually be to upgrade the failing package: -Downstream ImportError or AttributeError -======================================== +* If you installed it for example through ``pip``, try upgrading it with + ``pip install package_name --upgrade``. -If you see a message such as:: +* If it is your own package or it is build locally, you need recompiled + for the new NumPy version (for details see :ref:`depending_on_numpy`). + It may be that a reinstall of the package is sufficient to fix it. - A module that was compiled using NumPy 1.x cannot be run in - NumPy 2.0.0 as it may crash. To support both 1.x and 2.x - versions of NumPy, modules must be compiled with NumPy 2.0. - Some module may need to rebuild instead e.g. with 'pybind11>=2.12'. +When these steps fail, you should inform the package maintainers since they +probably need to make a new, compatible, release. -Either as an ``ImportError`` or with:: +However, upgrading may not always be possible because a compatible version does +not yet exist or cannot be installed for other reasons. In that case: - AttributeError: _ARRAY_API not found +* Install a compatible NumPy version: -Then you are using NumPy 2 together with a module that was build with NumPy 1. -NumPy 2 made some changes that require rebuilding such modules to avoid -possibly incorrect results or crashes. + * Try downgrading NumPy with ``pip install 'numpy<2'`` + (NumPy 2 was released in June 2024). + * If your NumPy version is old, you can try upgrading it for + example with ``pip install numpy --upgrade``. -As the error message suggests, the easiest solution is likely to downgrade -NumPy to `numpy<2`. -Alternatively, you can search the traceback (from the back) to find the first -line that isn't inside NumPy to see which module needs to be updated. +* Add additional version pins to the failing package to help ``pip`` + resolve compatible versions of NumPy and the package. -NumPy 2 was released in the first half of 2024 and especially smaller -modules downstream are expected need time to adapt and publish a new version. +* Investigate how the packages got installed and why incompatibilities arose. Segfaults or crashes From 37745f2d3774c4a8837bfb18633074811752800d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Melissa=20Weber=20Mendon=C3=A7a?= Date: Mon, 17 Jun 2024 10:41:03 -0300 Subject: [PATCH 581/980] DOC, MAINT: Turn on version warning banner provided by PyData Sphinx Theme After https://github.com/numpy/doc/pull/25, this will show a warning banner for past and unreleased versions of the docs. See https://pydata-sphinx-theme.readthedocs.io/en/latest/user_guide/announcements.html#version-warning-banners The warning banner can be dismissed by the user. --- doc/source/conf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/conf.py b/doc/source/conf.py index 83c58c2c3c2d..63c3d7aacd5d 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -266,6 +266,7 @@ def setup(app): "version_match": switcher_version, "json_url": "https://numpy.org/doc/_static/versions.json", }, + "show_version_warning_banner": True, } html_title = "%s v%s Manual" % (project, version) From 4a8933d8f8244e43fc604dfd27c042ac7b60cec8 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 17 Jun 2024 15:49:22 +0200 Subject: [PATCH 582/980] DOC: Update roadmap a bit more Just some thoughts, I didn't include ufunc chaining, although I think it would also be valid to mention it (even if I doubt anyone will work on it). All others are things that have been discussed multiple times at least, so I think they are fine to add, even if I don't know if anyone will work on it: That is true for practically everything. I also always play with the idea that labeled axes might not be all that bad to think about. But it may need a quite a lot of dedication. (It might not be that bad, but its a pretty broad thing.) --- doc/neps/roadmap.rst | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/doc/neps/roadmap.rst b/doc/neps/roadmap.rst index 12614673b60d..255bc3dc7ac3 100644 --- a/doc/neps/roadmap.rst +++ b/doc/neps/roadmap.rst @@ -29,8 +29,6 @@ needed based on real-world experience and needs. In addition, the ``__array_ufunc__`` and ``__array_function__`` protocols fulfill a role here - they are stable and used by several downstream projects. -They do not cover the whole API, so use of the array API standard is preferred -for new code. Performance @@ -51,6 +49,8 @@ Other performance improvement ideas include: - A better story around parallel execution (related is support for free-threaded CPython, see further down). +- Using recent improvements in our ufunc context to allow users for example to + enable faster but less precise versions of certain loops. - Optimizations in individual functions. Furthermore we would like to improve the benchmarking system, in terms of coverage, @@ -97,6 +97,9 @@ first, and that could potentially be upstreamed into NumPy later, include: ``latin1``) - A unit dtype +We further plan to extend the new API around ufuncs as the needs arise. +One possibility here is creating a new, more powerful, API to allow hooking +into existing NumPy ufunc implementations. User experience --------------- @@ -122,6 +125,13 @@ We intend to write a NEP covering the support levels we provide and what is required for a platform to move to a higher tier of support, similar to `PEP 11 `__. +Further consistency fixes to promotion and scalar logic +``````````````````````````````````````````````````````` +NumPy 2 fixed many issues around promotion especially with respect to scalars. +We plan to continue slowly fixing remaining inconsistencies. +For example, NumPy converts 0-D objects to scalars and some promotions +still allowed by NumPy are problematic. + Support for free-threaded CPython ````````````````````````````````` CPython 3.13 will be the first release to offer a free-threaded build (i.e., From a8706ddb1b902f1984295e12b76d2545cef8f3bf Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 18 Jun 2024 10:02:38 +0200 Subject: [PATCH 583/980] DOC: Encorporate/address review comments by Ralf --- doc/neps/roadmap.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/neps/roadmap.rst b/doc/neps/roadmap.rst index 255bc3dc7ac3..cb1b1a29faa1 100644 --- a/doc/neps/roadmap.rst +++ b/doc/neps/roadmap.rst @@ -49,8 +49,8 @@ Other performance improvement ideas include: - A better story around parallel execution (related is support for free-threaded CPython, see further down). -- Using recent improvements in our ufunc context to allow users for example to - enable faster but less precise versions of certain loops. +- Using improvements in the ufunc context (``errstate``) to allow users + for example to enable faster but less precise versions of certain loops. - Optimizations in individual functions. Furthermore we would like to improve the benchmarking system, in terms of coverage, @@ -97,7 +97,7 @@ first, and that could potentially be upstreamed into NumPy later, include: ``latin1``) - A unit dtype -We further plan to extend the new API around ufuncs as the needs arise. +We further plan to extend the ufunc C API as needs arise. One possibility here is creating a new, more powerful, API to allow hooking into existing NumPy ufunc implementations. @@ -127,9 +127,9 @@ required for a platform to move to a higher tier of support, similar to Further consistency fixes to promotion and scalar logic ``````````````````````````````````````````````````````` -NumPy 2 fixed many issues around promotion especially with respect to scalars. -We plan to continue slowly fixing remaining inconsistencies. -For example, NumPy converts 0-D objects to scalars and some promotions +NumPy 2.0 fixed many issues around promotion especially with respect to scalars. +We plan to continue fixing remaining inconsistencies. +For example, NumPy converts 0-D objects to scalars, and some promotions still allowed by NumPy are problematic. Support for free-threaded CPython From 13636e3316e963ddb3aeca328651680e94f6cd08 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 18 Jun 2024 10:18:38 +0200 Subject: [PATCH 584/980] Make an iteration on the ufunc loop choosing point --- doc/neps/roadmap.rst | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/doc/neps/roadmap.rst b/doc/neps/roadmap.rst index cb1b1a29faa1..fb8602981661 100644 --- a/doc/neps/roadmap.rst +++ b/doc/neps/roadmap.rst @@ -49,8 +49,11 @@ Other performance improvement ideas include: - A better story around parallel execution (related is support for free-threaded CPython, see further down). -- Using improvements in the ufunc context (``errstate``) to allow users - for example to enable faster but less precise versions of certain loops. +- Enable the ability to allow NumPy to use faster, but less precise, + implementations for ufuncs. + Until now, the only state modifying ufunc behavior has been ``np.errstate``. + But, with NumPy 2.0 improvements in the ``np.errstate`` and the ufunc C + implementation make this type of addition easier. - Optimizations in individual functions. Furthermore we would like to improve the benchmarking system, in terms of coverage, From 0a8bc2943cf8572067bcdc786e72bd35bcf268c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Tue, 18 Jun 2024 10:27:03 +0200 Subject: [PATCH 585/980] DOC: Extend release notes for 26611 [skip actions] [skip azp] [skip cirrus] --- doc/release/upcoming_changes/26611.new_feature.rst | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 doc/release/upcoming_changes/26611.new_feature.rst diff --git a/doc/release/upcoming_changes/26611.new_feature.rst b/doc/release/upcoming_changes/26611.new_feature.rst new file mode 100644 index 000000000000..6178049cf4ed --- /dev/null +++ b/doc/release/upcoming_changes/26611.new_feature.rst @@ -0,0 +1,2 @@ +* `numpy.printoptions`, `numpy.get_printoptions`, and `numpy.set_printoptions` now support + a new option, ``override_repr``, for defining custom ``repr(array)`` behavior. From 1359ac2423c2e3a12e552a7df4ba3a49728a6063 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Tue, 18 Jun 2024 11:45:51 +0200 Subject: [PATCH 586/980] DOC: Update NEPs statuses [skip actions] [skip azp] [skip cirrus] --- doc/neps/nep-0052-python-api-cleanup.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/neps/nep-0052-python-api-cleanup.rst b/doc/neps/nep-0052-python-api-cleanup.rst index a161dbd91b8f..870877a91bf6 100644 --- a/doc/neps/nep-0052-python-api-cleanup.rst +++ b/doc/neps/nep-0052-python-api-cleanup.rst @@ -8,7 +8,7 @@ NEP 52 — Python API cleanup for NumPy 2.0 :Author: Stéfan van der Walt :Author: Nathan Goldbaum :Author: Mateusz Sokół -:Status: Accepted +:Status: Final :Type: Standards Track :Created: 2023-03-28 :Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/thread/QLMPFTWA67DXE3JCUQT2RIRLQ44INS4F/ From a2ec95eb4c26e511373bd8f0a098fb20c6fbf516 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 18 Jun 2024 13:49:18 +0200 Subject: [PATCH 587/980] DOC: Remove mention of NaN and NAN aliases from constants Remove aliases from docs, they don't exist anymore. Closes gh-26732 --- doc/source/reference/constants.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/doc/source/reference/constants.rst b/doc/source/reference/constants.rst index 2e2795a8b29f..b09cd9bbb21d 100644 --- a/doc/source/reference/constants.rst +++ b/doc/source/reference/constants.rst @@ -88,8 +88,6 @@ NumPy includes several constants: NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. - `NaN` and `NAN` are aliases of `nan`. - .. rubric:: Examples >>> np.nan From 4d1c13df5b7c70f6dac611600eb40e045e0dce47 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 18 Jun 2024 13:54:02 +0200 Subject: [PATCH 588/980] DOC: Mention '1.25' legacy printing mode in ``set_printoptions`` Mention the new printing mode in ``set_printoptions`` and copy the doc also to the duplication in ``array2string``. Closes gh-26731 --- numpy/_core/arrayprint.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 7e8835044d3d..93e803a8216d 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -220,6 +220,10 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, by not inserting spaces after commas that separate fields and after colons. + If set to ``'1.25'`` approximates printing of 1.25 which mainly means + that numeric scalars are printed without their type information, e.g. + as ``3.0`` rather than ``np.float64(3.0)``. + If set to `False`, disables legacy mode. Unrecognized strings will be ignored with a warning for forward @@ -227,6 +231,8 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, .. versionadded:: 1.14.0 .. versionchanged:: 1.22.0 + .. versionchanged:: 2.0 + override_repr: callable, optional If set a passed function will be used for generating arrays' repr. Other options will be ignored. From ccff7fb327ed47db2dc94e2015572d4c75ccc9fd Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 18 Jun 2024 14:02:15 -0600 Subject: [PATCH 589/980] BUG: allow replacement in the dispatch cache (#26693) Allow identical replacement in dispatch cache, since this can be hit with freethreaded Python * TST: add a failing test for dispatch cache thread safety * TST: test replace=True error with new semantics First version added a lock, but maybe some information is interesting: The test code I added raises an unhandled thread exception every time I run it using a linux x86 laptop without the change to dispatching.c in this PR. Still not sure why this failure is only hit under gcc. Here's what I think is happening: right now the locking in the dispatch cache is only internal to the cache itself. The current locking strategy allows a race condition where two threads simultaneously see the cache is empty early in dispatch and then try fill it. The second thread to fill it sees the cache is filled and raises an exception, because the replace parameter for PyArrayIdentityHash_SetItem is 0, so replacements raise an exception. I don't think it's possible to support this replace feature without moving the lock from the dispatch cache struct to somewhere in the dispatching logic in dispatching.c. We'd need to lock around all the spots we check for an entry in the dispatch cache and then later insert an entry into the cache. Happy to try that approach if it turns out replacing entries in this cache is problematic for some reason. I didn't want to do that since this code is hit every time a ufunc is called so I don't want to add even larger blocks of code that need to be locked around. In practice, I don't think it's problematic to simply replace entries when this happens, at least not any more problematic than the current approach, since the dispatch cache holds borrowed references to ArrayMethod instances. Fixes #26690. --- numpy/_core/src/common/npy_hashtable.c | 13 ++++++++----- numpy/_core/tests/test_hashtable.py | 9 +++++++-- numpy/_core/tests/test_multithreading.py | 19 ++++++++++++++++++- 3 files changed, 33 insertions(+), 8 deletions(-) diff --git a/numpy/_core/src/common/npy_hashtable.c b/numpy/_core/src/common/npy_hashtable.c index 02fe5ca29751..d361777d26ca 100644 --- a/numpy/_core/src/common/npy_hashtable.c +++ b/numpy/_core/src/common/npy_hashtable.c @@ -210,10 +210,13 @@ _resize_if_necessary(PyArrayIdentityHash *tb) * @param value Normally a Python object, no reference counting is done. * use NULL to clear an item. If the item does not exist, no * action is performed for NULL. - * @param replace If 1, allow replacements. + * @param replace If 1, allow replacements. If replace is 0 an error is raised + * if the stored value is different from the value to be cached. If the + * value to be cached is identical to the stored value, the value to be + * cached is ignored and no error is raised. * @returns 0 on success, -1 with a MemoryError or RuntimeError (if an item - * is added which is already in the cache). The caller should avoid - * the RuntimeError. + * is added which is already in the cache and replace is 0). The + * caller should avoid the RuntimeError. */ NPY_NO_EXPORT int PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, @@ -228,10 +231,10 @@ PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, PyObject **tb_item = find_item(tb, key); if (value != NULL) { - if (tb_item[0] != NULL && !replace) { + if (tb_item[0] != NULL && tb_item[0] != value && !replace) { UNLOCK_TABLE(tb); PyErr_SetString(PyExc_RuntimeError, - "Identity cache already includes the item."); + "Identity cache already includes an item with this key."); return -1; } tb_item[0] = value; diff --git a/numpy/_core/tests/test_hashtable.py b/numpy/_core/tests/test_hashtable.py index e75cfceea412..41da06be3f2b 100644 --- a/numpy/_core/tests/test_hashtable.py +++ b/numpy/_core/tests/test_hashtable.py @@ -24,7 +24,12 @@ def test_identity_hashtable(key_length, length): res = identityhash_tester(key_length, keys_vals, replace=True) assert res is expected - # check that ensuring one duplicate definitely raises: - keys_vals.insert(0, keys_vals[-2]) + if length == 1: + return + + # add a new item with a key that is already used and a new value, this + # should error if replace is False, see gh-26690 + new_key = (keys_vals[1][0], object()) + keys_vals[0] = new_key with pytest.raises(RuntimeError): identityhash_tester(key_length, keys_vals) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index af6f94a08d55..1511cfaf1982 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -1,4 +1,5 @@ import concurrent.futures +import threading import numpy as np import pytest @@ -30,13 +31,29 @@ def func(seed): def test_parallel_ufunc_execution(): # if the loop data cache or dispatch cache are not thread-safe # computing ufuncs simultaneously in multiple threads leads - # to a data race + # to a data race that causes crashes or spurious exceptions def func(): arr = np.random.random((25,)) np.isnan(arr) run_threaded(func, 500) + # see gh-26690 + NUM_THREADS = 50 + + b = threading.Barrier(NUM_THREADS) + + a = np.ones(1000) + + def f(): + b.wait() + return a.sum() + + threads = [threading.Thread(target=f) for _ in range(NUM_THREADS)] + + [t.start() for t in threads] + [t.join() for t in threads] + def test_temp_elision_thread_safety(): amid = np.ones(50000) bmid = np.ones(50000) From baee89118b90c676b2bcb1ad26a9a81035ebe63d Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 30 May 2024 14:34:34 -0600 Subject: [PATCH 590/980] MNT: move interned strings into a single global struct --- numpy/_core/src/multiarray/array_converter.c | 4 +- .../src/multiarray/arrayfunction_override.c | 8 +- numpy/_core/src/multiarray/arraywrap.c | 4 +- numpy/_core/src/multiarray/conversion_utils.c | 2 +- numpy/_core/src/multiarray/ctors.c | 10 +- numpy/_core/src/multiarray/descriptor.c | 2 +- numpy/_core/src/multiarray/dlpack.c | 4 +- numpy/_core/src/multiarray/item_selection.c | 4 +- numpy/_core/src/multiarray/methods.c | 2 +- numpy/_core/src/multiarray/multiarraymodule.c | 117 ++++++++---------- numpy/_core/src/multiarray/multiarraymodule.h | 48 +++---- numpy/_core/src/multiarray/shape.c | 4 +- numpy/_core/src/umath/extobj.c | 21 ++-- numpy/_core/src/umath/override.c | 14 +-- numpy/_core/src/umath/umathmodule.c | 2 +- 15 files changed, 119 insertions(+), 127 deletions(-) diff --git a/numpy/_core/src/multiarray/array_converter.c b/numpy/_core/src/multiarray/array_converter.c index 5dea748688e9..684178a0f18b 100644 --- a/numpy/_core/src/multiarray/array_converter.c +++ b/numpy/_core/src/multiarray/array_converter.c @@ -186,8 +186,8 @@ static int pyscalar_mode_conv(PyObject *obj, scalar_policy *policy) { PyObject *strings[3] = { - npy_ma_str_convert, npy_ma_str_preserve, - npy_ma_str_convert_if_no_array}; + npy_ma_str->convert, npy_ma_str->preserve, + npy_ma_str->convert_if_no_array}; /* First quick pass using the identity (should practically always match) */ for (int i = 0; i < 3; i++) { diff --git a/numpy/_core/src/multiarray/arrayfunction_override.c b/numpy/_core/src/multiarray/arrayfunction_override.c index 20223e1449fb..8c20d480910a 100644 --- a/numpy/_core/src/multiarray/arrayfunction_override.c +++ b/numpy/_core/src/multiarray/arrayfunction_override.c @@ -41,7 +41,7 @@ get_array_function(PyObject *obj) return ndarray_array_function; } - PyObject *array_function = PyArray_LookupSpecial(obj, npy_ma_str_array_function); + PyObject *array_function = PyArray_LookupSpecial(obj, npy_ma_str->array_function); if (array_function == NULL && PyErr_Occurred()) { PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ } @@ -175,7 +175,7 @@ array_function_method_impl(PyObject *func, PyObject *types, PyObject *args, } } - PyObject *implementation = PyObject_GetAttr(func, npy_ma_str_implementation); + PyObject *implementation = PyObject_GetAttr(func, npy_ma_str->implementation); if (implementation == NULL) { return NULL; } @@ -321,12 +321,12 @@ array_implement_c_array_function_creation( } /* The like argument must be present in the keyword arguments, remove it */ - if (PyDict_DelItem(kwargs, npy_ma_str_like) < 0) { + if (PyDict_DelItem(kwargs, npy_ma_str->like) < 0) { goto finish; } /* Fetch the actual symbol (the long way right now) */ - numpy_module = PyImport_Import(npy_ma_str_numpy); + numpy_module = PyImport_Import(npy_ma_str->numpy); if (numpy_module == NULL) { goto finish; } diff --git a/numpy/_core/src/multiarray/arraywrap.c b/numpy/_core/src/multiarray/arraywrap.c index 8b37798f983b..1119e62633bf 100644 --- a/numpy/_core/src/multiarray/arraywrap.c +++ b/numpy/_core/src/multiarray/arraywrap.c @@ -57,7 +57,7 @@ npy_find_array_wrap( } } else { - PyObject *new_wrap = PyArray_LookupSpecial_OnInstance(obj, npy_ma_str_array_wrap); + PyObject *new_wrap = PyArray_LookupSpecial_OnInstance(obj, npy_ma_str->array_wrap); if (new_wrap == NULL) { if (PyErr_Occurred()) { goto fail; @@ -160,7 +160,7 @@ npy_apply_wrap( else { /* Replace passed wrap/wrap_type (borrowed refs) with new_wrap/type. */ new_wrap = PyArray_LookupSpecial_OnInstance( - original_out, npy_ma_str_array_wrap); + original_out, npy_ma_str->array_wrap); if (new_wrap != NULL) { wrap = new_wrap; wrap_type = (PyObject *)Py_TYPE(original_out); diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index c30b31a633cc..51e1249c81ed 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -1440,7 +1440,7 @@ PyArray_DeviceConverterOptional(PyObject *object, NPY_DEVICE *device) } if (PyUnicode_Check(object) && - PyUnicode_Compare(object, npy_ma_str_cpu) == 0) { + PyUnicode_Compare(object, npy_ma_str->cpu) == 0) { *device = NPY_DEVICE_CPU; return NPY_SUCCEED; } diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 25319f2f6bf5..a319f3d172c9 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -943,7 +943,7 @@ PyArray_NewFromDescr_int( ndarray_array_finalize = PyObject_GetAttr( (PyObject *)&PyArray_Type, npy_ma_str_array_finalize); } - func = PyObject_GetAttr((PyObject *)subtype, npy_ma_str_array_finalize); + func = PyObject_GetAttr((PyObject *)subtype, npy_ma_str->array_finalize); if (func == NULL) { goto fail; } @@ -2045,7 +2045,7 @@ PyArray_FromStructInterface(PyObject *input) PyObject *attr; char endian = NPY_NATBYTE; - attr = PyArray_LookupSpecial_OnInstance(input, npy_ma_str_array_struct); + attr = PyArray_LookupSpecial_OnInstance(input, npy_ma_str->array_struct); if (attr == NULL) { if (PyErr_Occurred()) { return NULL; @@ -2169,7 +2169,7 @@ PyArray_FromInterface(PyObject *origin) npy_intp dims[NPY_MAXDIMS], strides[NPY_MAXDIMS]; int dataflags = NPY_ARRAY_BEHAVED; - iface = PyArray_LookupSpecial_OnInstance(origin, npy_ma_str_array_interface); + iface = PyArray_LookupSpecial_OnInstance(origin, npy_ma_str->array_interface); if (iface == NULL) { if (PyErr_Occurred()) { @@ -2472,7 +2472,7 @@ check_or_clear_and_warn_error_if_due_to_copy_kwarg(PyObject *kwnames) goto restore_error; } int copy_kwarg_unsupported = PyUnicode_Contains( - str_value, npy_ma_str_array_err_msg_substr); + str_value, npy_ma_str->array_err_msg_substr); Py_DECREF(str_value); if (copy_kwarg_unsupported == -1) { goto restore_error; @@ -2524,7 +2524,7 @@ PyArray_FromArrayAttr_int(PyObject *op, PyArray_Descr *descr, int copy, PyObject *new; PyObject *array_meth; - array_meth = PyArray_LookupSpecial_OnInstance(op, npy_ma_str_array); + array_meth = PyArray_LookupSpecial_OnInstance(op, npy_ma_str->array); if (array_meth == NULL) { if (PyErr_Occurred()) { return NULL; diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 548ecd9d8df1..a3ee62da427d 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -2717,7 +2717,7 @@ arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args)) Py_DECREF(ret); return NULL; } - obj = PyObject_GetAttr(mod, npy_ma_str_dtype); + obj = PyObject_GetAttr(mod, npy_ma_str->dtype); Py_DECREF(mod); if (obj == NULL) { Py_DECREF(ret); diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index 05935f608a29..43d08711b82b 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -549,7 +549,7 @@ from_dlpack(PyObject *NPY_UNUSED(self), PyObject *capsule = PyObject_VectorcallMethod( - npy_ma_str___dlpack__, call_args, nargsf, call_kwnames); + npy_ma_str->__dlpack__, call_args, nargsf, call_kwnames); if (capsule == NULL) { /* * TODO: This path should be deprecated in NumPy 2.1. Once deprecated @@ -563,7 +563,7 @@ from_dlpack(PyObject *NPY_UNUSED(self), /* max_version may be unsupported, try without kwargs */ PyErr_Clear(); capsule = PyObject_VectorcallMethod( - npy_ma_str___dlpack__, call_args, nargsf, NULL); + npy_ma_str->__dlpack__, call_args, nargsf, NULL); } if (capsule == NULL) { return NULL; diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index b4943851938d..239b4d8e3d1c 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -2262,10 +2262,10 @@ PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int axis2) } /* Handle negative axes with standard Python indexing rules */ - if (check_and_adjust_axis_msg(&axis1, ndim, npy_ma_str_axis1) < 0) { + if (check_and_adjust_axis_msg(&axis1, ndim, npy_ma_str->axis1) < 0) { return NULL; } - if (check_and_adjust_axis_msg(&axis2, ndim, npy_ma_str_axis2) < 0) { + if (check_and_adjust_axis_msg(&axis2, ndim, npy_ma_str->axis2) < 0) { return NULL; } if (axis1 == axis2) { diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index da9bd30c8b10..d275199e8638 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -1044,7 +1044,7 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) } Py_DECREF(out_kwd_obj); /* check where if it exists */ - where_obj = PyDict_GetItemWithError(kwds, npy_ma_str_where); + where_obj = PyDict_GetItemWithError(kwds, npy_ma_str->where); if (where_obj == NULL) { if (PyErr_Occurred()) { return -1; diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 993258f4543b..f08f8070f818 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -136,7 +136,7 @@ PyArray_GetPriority(PyObject *obj, double default_) return NPY_SCALAR_PRIORITY; } - ret = PyArray_LookupSpecial_OnInstance(obj, npy_ma_str_array_priority); + ret = PyArray_LookupSpecial_OnInstance(obj, npy_ma_str->array_priority); if (ret == NULL) { if (PyErr_Occurred()) { /* TODO[gh-14801]: propagate crashes during attribute access? */ @@ -3493,7 +3493,7 @@ array_can_cast_safely(PyObject *NPY_UNUSED(self), * weak-promotion branch is in practice identical to dtype one. */ if (get_npy_promotion_state() == NPY_USE_WEAK_PROMOTION) { - PyObject *descr = PyObject_GetAttr(from_obj, npy_ma_str_dtype); + PyObject *descr = PyObject_GetAttr(from_obj, npy_ma_str->dtype); if (descr == NULL) { goto finish; } @@ -4771,115 +4771,102 @@ set_flaginfo(PyObject *d) return; } -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_current_allocator = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_function = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_struct = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_interface = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_priority = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_wrap = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_finalize = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_implementation = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_axis1 = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_axis2 = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_like = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_numpy = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_where = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_convert = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_preserve = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_convert_if_no_array = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_cpu = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_dtype = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_err_msg_substr = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str___dlpack__ = NULL; +NPY_VISIBILITY_HIDDEN npy_ma_str_struct *npy_ma_str = NULL; static int intern_strings(void) { - npy_ma_str_current_allocator = PyUnicode_InternFromString("current_allocator"); - if (npy_ma_str_current_allocator == NULL) { + // this is module-level global heap allocation, it is currently + // never freed + npy_ma_str = PyMem_Calloc(sizeof(npy_ma_str_struct), 1); + npy_ma_str->current_allocator = PyUnicode_InternFromString("current_allocator"); + if (npy_ma_str->current_allocator == NULL) { return -1; } - npy_ma_str_array = PyUnicode_InternFromString("__array__"); - if (npy_ma_str_array == NULL) { + npy_ma_str->array = PyUnicode_InternFromString("__array__"); + if (npy_ma_str->array == NULL) { return -1; } - npy_ma_str_array_function = PyUnicode_InternFromString("__array_function__"); - if (npy_ma_str_array_function == NULL) { + npy_ma_str->array_function = PyUnicode_InternFromString("__array_function__"); + if (npy_ma_str->array_function == NULL) { return -1; } - npy_ma_str_array_struct = PyUnicode_InternFromString("__array_struct__"); - if (npy_ma_str_array_struct == NULL) { + npy_ma_str->array_struct = PyUnicode_InternFromString("__array_struct__"); + if (npy_ma_str->array_struct == NULL) { return -1; } - npy_ma_str_array_priority = PyUnicode_InternFromString("__array_priority__"); - if (npy_ma_str_array_priority == NULL) { + npy_ma_str->array_priority = PyUnicode_InternFromString("__array_priority__"); + if (npy_ma_str->array_priority == NULL) { return -1; } - npy_ma_str_array_interface = PyUnicode_InternFromString("__array_interface__"); - if (npy_ma_str_array_interface == NULL) { + npy_ma_str->array_interface = PyUnicode_InternFromString("__array_interface__"); + if (npy_ma_str->array_interface == NULL) { return -1; } - npy_ma_str_array_wrap = PyUnicode_InternFromString("__array_wrap__"); - if (npy_ma_str_array_wrap == NULL) { + npy_ma_str->array_wrap = PyUnicode_InternFromString("__array_wrap__"); + if (npy_ma_str->array_wrap == NULL) { return -1; } - npy_ma_str_array_finalize = PyUnicode_InternFromString("__array_finalize__"); - if (npy_ma_str_array_finalize == NULL) { + npy_ma_str->array_finalize = PyUnicode_InternFromString("__array_finalize__"); + if (npy_ma_str->array_finalize == NULL) { return -1; } - npy_ma_str_implementation = PyUnicode_InternFromString("_implementation"); - if (npy_ma_str_implementation == NULL) { + npy_ma_str->implementation = PyUnicode_InternFromString("_implementation"); + if (npy_ma_str->implementation == NULL) { return -1; } - npy_ma_str_axis1 = PyUnicode_InternFromString("axis1"); - if (npy_ma_str_axis1 == NULL) { + npy_ma_str->axis1 = PyUnicode_InternFromString("axis1"); + if (npy_ma_str->axis1 == NULL) { return -1; } - npy_ma_str_axis2 = PyUnicode_InternFromString("axis2"); - if (npy_ma_str_axis2 == NULL) { + npy_ma_str->axis2 = PyUnicode_InternFromString("axis2"); + if (npy_ma_str->axis2 == NULL) { return -1; } - npy_ma_str_like = PyUnicode_InternFromString("like"); - if (npy_ma_str_like == NULL) { + npy_ma_str->like = PyUnicode_InternFromString("like"); + if (npy_ma_str->like == NULL) { return -1; } - npy_ma_str_numpy = PyUnicode_InternFromString("numpy"); - if (npy_ma_str_numpy == NULL) { + npy_ma_str->numpy = PyUnicode_InternFromString("numpy"); + if (npy_ma_str->numpy == NULL) { return -1; } - npy_ma_str_where = PyUnicode_InternFromString("where"); - if (npy_ma_str_where == NULL) { + npy_ma_str->where = PyUnicode_InternFromString("where"); + if (npy_ma_str->where == NULL) { return -1; } /* scalar policies */ - npy_ma_str_convert = PyUnicode_InternFromString("convert"); - if (npy_ma_str_convert == NULL) { + npy_ma_str->convert = PyUnicode_InternFromString("convert"); + if (npy_ma_str->convert == NULL) { return -1; } - npy_ma_str_preserve = PyUnicode_InternFromString("preserve"); - if (npy_ma_str_preserve == NULL) { + npy_ma_str->preserve = PyUnicode_InternFromString("preserve"); + if (npy_ma_str->preserve == NULL) { return -1; } - npy_ma_str_convert_if_no_array = PyUnicode_InternFromString("convert_if_no_array"); - if (npy_ma_str_convert_if_no_array == NULL) { + npy_ma_str->convert_if_no_array = PyUnicode_InternFromString("convert_if_no_array"); + if (npy_ma_str->convert_if_no_array == NULL) { return -1; } - npy_ma_str_cpu = PyUnicode_InternFromString("cpu"); - if (npy_ma_str_cpu == NULL) { + npy_ma_str->cpu = PyUnicode_InternFromString("cpu"); + if (npy_ma_str->cpu == NULL) { return -1; } - npy_ma_str_dtype = PyUnicode_InternFromString("dtype"); - if (npy_ma_str_dtype == NULL) { + npy_ma_str->dtype = PyUnicode_InternFromString("dtype"); + if (npy_ma_str->dtype == NULL) { return -1; } - npy_ma_str_array_err_msg_substr = PyUnicode_InternFromString( + npy_ma_str->array_err_msg_substr = PyUnicode_InternFromString( "__array__() got an unexpected keyword argument 'copy'"); - if (npy_ma_str_array_err_msg_substr == NULL) { + if (npy_ma_str->array_err_msg_substr == NULL) { return -1; } - npy_ma_str___dlpack__ = PyUnicode_InternFromString("__dlpack__"); - if (npy_ma_str___dlpack__ == NULL) { + npy_ma_str->out = PyUnicode_InternFromString("out"); + if (npy_ma_str->out == NULL) { + return -1; + } + npy_ma_str->__dlpack__ = PyUnicode_InternFromString("__dlpack__"); + if (npy_ma_str->__dlpack__ == NULL) { return -1; } return 0; diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index b3f15686dfe0..0a7238633fd2 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -1,26 +1,32 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ #define NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_current_allocator; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_function; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_struct; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_priority; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_interface; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_wrap; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_finalize; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_implementation; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_axis1; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_axis2; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_like; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_numpy; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_where; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_convert; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_preserve; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_convert_if_no_array; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_cpu; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_dtype; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_err_msg_substr; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str___dlpack__; +typedef struct npy_ma_str_struct { + PyObject *current_allocator; + PyObject *array; + PyObject *array_function; + PyObject *array_struct; + PyObject *array_priority; + PyObject *array_interface; + PyObject *array_wrap; + PyObject *array_finalize; + PyObject *implementation; + PyObject *axis1; + PyObject *axis2; + PyObject *like; + PyObject *numpy; + PyObject *where; + PyObject *convert; + PyObject *preserve; + PyObject *convert_if_no_array; + PyObject *cpu; + PyObject *dtype; + PyObject *array_err_msg_substr; + PyObject *out; + PyObject *errmode_strings[6]; + PyObject *__dlpack__; +} npy_ma_str_struct; + +NPY_VISIBILITY_HIDDEN extern npy_ma_str_struct *npy_ma_str; #endif /* NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ */ diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index e766a61ed12f..ab4003d8dbae 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -668,10 +668,10 @@ PyArray_SwapAxes(PyArrayObject *ap, int a1, int a2) int n = PyArray_NDIM(ap); int i; - if (check_and_adjust_axis_msg(&a1, n, npy_ma_str_axis1) < 0) { + if (check_and_adjust_axis_msg(&a1, n, npy_ma_str->axis1) < 0) { return NULL; } - if (check_and_adjust_axis_msg(&a2, n, npy_ma_str_axis2) < 0) { + if (check_and_adjust_axis_msg(&a2, n, npy_ma_str->axis2) < 0) { return NULL; } diff --git a/numpy/_core/src/umath/extobj.c b/numpy/_core/src/umath/extobj.c index d32feaaa31da..966b633a6ea5 100644 --- a/numpy/_core/src/umath/extobj.c +++ b/numpy/_core/src/umath/extobj.c @@ -47,7 +47,6 @@ NPY_NO_EXPORT PyObject *npy_extobj_contextvar = NULL; /* The python strings for the above error modes defined in extobj.h */ const char *errmode_cstrings[] = { "ignore", "warn", "raise", "call", "print", "log"}; -static PyObject *errmode_strings[6] = {NULL}; /* Default user error mode (underflows are ignored, others warn) */ #define UFUNC_ERR_DEFAULT \ @@ -158,8 +157,9 @@ init_extobj(void) * inputs. */ for (int i = 0; i <= UFUNC_ERR_LOG; i++) { - errmode_strings[i] = PyUnicode_InternFromString(errmode_cstrings[i]); - if (errmode_strings[i] == NULL) { + npy_ma_str->errmode_strings[i] = PyUnicode_InternFromString( + errmode_cstrings[i]); + if (npy_ma_str->errmode_strings[i] == NULL) { return -1; } } @@ -191,7 +191,8 @@ errmodeconverter(PyObject *obj, int *mode) } int i = 0; for (; i <= UFUNC_ERR_LOG; i++) { - int eq = PyObject_RichCompareBool(obj, errmode_strings[i], Py_EQ); + int eq = PyObject_RichCompareBool( + obj, npy_ma_str->errmode_strings[i], Py_EQ); if (eq == -1) { return 0; } @@ -338,19 +339,23 @@ extobj_get_extobj_dict(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(noarg)) } /* Set all error modes: */ mode = (extobj.errmask & UFUNC_MASK_DIVIDEBYZERO) >> UFUNC_SHIFT_DIVIDEBYZERO; - if (PyDict_SetItemString(result, "divide", errmode_strings[mode]) < 0) { + if (PyDict_SetItemString(result, "divide", + npy_ma_str->errmode_strings[mode]) < 0) { goto fail; } mode = (extobj.errmask & UFUNC_MASK_OVERFLOW) >> UFUNC_SHIFT_OVERFLOW; - if (PyDict_SetItemString(result, "over", errmode_strings[mode]) < 0) { + if (PyDict_SetItemString(result, "over", + npy_ma_str->errmode_strings[mode]) < 0) { goto fail; } mode = (extobj.errmask & UFUNC_MASK_UNDERFLOW) >> UFUNC_SHIFT_UNDERFLOW; - if (PyDict_SetItemString(result, "under", errmode_strings[mode]) < 0) { + if (PyDict_SetItemString(result, "under", + npy_ma_str->errmode_strings[mode]) < 0) { goto fail; } mode = (extobj.errmask & UFUNC_MASK_INVALID) >> UFUNC_SHIFT_INVALID; - if (PyDict_SetItemString(result, "invalid", errmode_strings[mode]) < 0) { + if (PyDict_SetItemString(result, "invalid", + npy_ma_str->errmode_strings[mode]) < 0) { goto fail; } diff --git a/numpy/_core/src/umath/override.c b/numpy/_core/src/umath/override.c index d10b86be7b57..717422286e01 100644 --- a/numpy/_core/src/umath/override.c +++ b/numpy/_core/src/umath/override.c @@ -4,6 +4,7 @@ #include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" #include "npy_import.h" +#include "multiarraymodule.h" #include "npy_pycompat.h" #include "override.h" #include "ufunc_override.h" @@ -110,29 +111,22 @@ initialize_normal_kwds(PyObject *out_args, } } } - static PyObject *out_str = NULL; - if (out_str == NULL) { - out_str = PyUnicode_InternFromString("out"); - if (out_str == NULL) { - return -1; - } - } if (out_args != NULL) { /* Replace `out` argument with the normalized version */ - int res = PyDict_SetItem(normal_kwds, out_str, out_args); + int res = PyDict_SetItem(normal_kwds, npy_ma_str->out, out_args); if (res < 0) { return -1; } } else { /* Ensure that `out` is not present. */ - int res = PyDict_Contains(normal_kwds, out_str); + int res = PyDict_Contains(normal_kwds, npy_ma_str->out); if (res < 0) { return -1; } if (res) { - return PyDict_DelItem(normal_kwds, out_str); + return PyDict_DelItem(normal_kwds, npy_ma_str->out); } } return 0; diff --git a/numpy/_core/src/umath/umathmodule.c b/numpy/_core/src/umath/umathmodule.c index 7c774f9fffc3..b3aebff2c845 100644 --- a/numpy/_core/src/umath/umathmodule.c +++ b/numpy/_core/src/umath/umathmodule.c @@ -295,7 +295,7 @@ int initumath(PyObject *m) if (intern_strings() < 0) { PyErr_SetString(PyExc_RuntimeError, - "cannot intern umath strings while initializing _multiarray_umath."); + "cannot intern strings while initializing _multiarray_umath."); return -1; } From 69075c1d4d2ab1d4c72583a2682998032f5f9172 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 21 May 2024 15:02:18 -0600 Subject: [PATCH 591/980] MNT: move cached imports into a global struct --- numpy/_core/src/common/npy_ctypes.h | 10 +- .../src/multiarray/arrayfunction_override.c | 8 +- numpy/_core/src/multiarray/arrayobject.c | 3 +- numpy/_core/src/multiarray/common.h | 20 +--- numpy/_core/src/multiarray/common_dtype.c | 5 +- numpy/_core/src/multiarray/conversion_utils.c | 15 +-- numpy/_core/src/multiarray/convert_datatype.c | 30 +---- numpy/_core/src/multiarray/convert_datatype.h | 4 - numpy/_core/src/multiarray/ctors.c | 11 +- numpy/_core/src/multiarray/descriptor.c | 8 +- numpy/_core/src/multiarray/dtypemeta.c | 22 ++-- numpy/_core/src/multiarray/getset.c | 10 +- numpy/_core/src/multiarray/methods.c | 71 ++++++----- numpy/_core/src/multiarray/methods.h | 17 +-- numpy/_core/src/multiarray/multiarraymodule.c | 113 +++++++++++++----- numpy/_core/src/multiarray/multiarraymodule.h | 77 ++++++++++++ numpy/_core/src/multiarray/number.c | 8 +- numpy/_core/src/multiarray/scalartypes.c.src | 17 +-- numpy/_core/src/multiarray/strfuncs.c | 16 ++- .../_core/src/multiarray/stringdtype/dtype.c | 16 +-- numpy/_core/src/umath/dispatching.c | 2 +- numpy/_core/src/umath/funcs.inc.src | 48 +++----- numpy/_core/src/umath/override.c | 26 ++-- numpy/_core/src/umath/scalarmath.c.src | 7 +- numpy/_core/src/umath/ufunc_object.c | 29 ++--- numpy/_core/src/umath/ufunc_type_resolution.c | 47 ++------ numpy/_core/src/umath/umathmodule.c | 1 + 27 files changed, 326 insertions(+), 315 deletions(-) diff --git a/numpy/_core/src/common/npy_ctypes.h b/numpy/_core/src/common/npy_ctypes.h index 578de06397bd..d7d350ec052b 100644 --- a/numpy/_core/src/common/npy_ctypes.h +++ b/numpy/_core/src/common/npy_ctypes.h @@ -4,6 +4,7 @@ #include #include "npy_import.h" +#include "multiarraymodule.h" /* * Check if a python type is a ctypes class. @@ -17,16 +18,17 @@ static inline int npy_ctypes_check(PyTypeObject *obj) { - static PyObject *py_func = NULL; PyObject *ret_obj; int ret; - npy_cache_import("numpy._core._internal", "npy_ctypes_check", &py_func); - if (py_func == NULL) { + npy_cache_import("numpy._core._internal", "npy_ctypes_check", + &npy_ma_global_data->npy_ctypes_check); + if (npy_ma_global_data->npy_ctypes_check == NULL) { goto fail; } - ret_obj = PyObject_CallFunctionObjArgs(py_func, (PyObject *)obj, NULL); + ret_obj = PyObject_CallFunctionObjArgs(npy_ma_global_data->npy_ctypes_check, + (PyObject *)obj, NULL); if (ret_obj == NULL) { goto fail; } diff --git a/numpy/_core/src/multiarray/arrayfunction_override.c b/numpy/_core/src/multiarray/arrayfunction_override.c index 8c20d480910a..aebcb7986a7b 100644 --- a/numpy/_core/src/multiarray/arrayfunction_override.c +++ b/numpy/_core/src/multiarray/arrayfunction_override.c @@ -252,14 +252,14 @@ get_args_and_kwargs( static void set_no_matching_types_error(PyObject *public_api, PyObject *types) { - static PyObject *errmsg_formatter = NULL; /* No acceptable override found, raise TypeError. */ npy_cache_import("numpy._core._internal", "array_function_errmsg_formatter", - &errmsg_formatter); - if (errmsg_formatter != NULL) { + &npy_ma_global_data->array_function_errmsg_formatter); + if (npy_ma_global_data->array_function_errmsg_formatter != NULL) { PyObject *errmsg = PyObject_CallFunctionObjArgs( - errmsg_formatter, public_api, types, NULL); + npy_ma_global_data->array_function_errmsg_formatter, + public_api, types, NULL); if (errmsg != NULL) { PyErr_SetObject(PyExc_TypeError, errmsg); Py_DECREF(errmsg); diff --git a/numpy/_core/src/multiarray/arrayobject.c b/numpy/_core/src/multiarray/arrayobject.c index 5139bc8b4f00..378b3e0d9c74 100644 --- a/numpy/_core/src/multiarray/arrayobject.c +++ b/numpy/_core/src/multiarray/arrayobject.c @@ -927,7 +927,8 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) */ if (result == NULL && (cmp_op == Py_EQ || cmp_op == Py_NE) - && PyErr_ExceptionMatches(npy_UFuncNoLoopError)) { + && PyErr_ExceptionMatches( + npy_ma_global_data->_UFuncNoLoopError)) { PyErr_Clear(); PyArrayObject *array_other = (PyArrayObject *)PyArray_FROM_O(other); diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index 1a01224b1670..9f6fdf32ca98 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -8,6 +8,7 @@ #include "npy_cpu_dispatch.h" #include "numpy/npy_cpu.h" +#include "multiarraymodule.h" #include "npy_import.h" #include @@ -139,25 +140,14 @@ check_and_adjust_axis_msg(int *axis, int ndim, PyObject *msg_prefix) { /* Check that index is valid, taking into account negative indices */ if (NPY_UNLIKELY((*axis < -ndim) || (*axis >= ndim))) { - /* - * Load the exception type, if we don't already have it. Unfortunately - * we don't have access to npy_cache_import here - */ - static PyObject *AxisError_cls = NULL; - PyObject *exc; - - npy_cache_import("numpy.exceptions", "AxisError", &AxisError_cls); - if (AxisError_cls == NULL) { - return -1; - } - /* Invoke the AxisError constructor */ - exc = PyObject_CallFunction(AxisError_cls, "iiO", - *axis, ndim, msg_prefix); + PyObject *exc = PyObject_CallFunction( + npy_ma_global_data->AxisError, "iiO", *axis, ndim, + msg_prefix); if (exc == NULL) { return -1; } - PyErr_SetObject(AxisError_cls, exc); + PyErr_SetObject(npy_ma_global_data->AxisError, exc); Py_DECREF(exc); return -1; diff --git a/numpy/_core/src/multiarray/common_dtype.c b/numpy/_core/src/multiarray/common_dtype.c index f2ec41e0c7aa..94deddec2c80 100644 --- a/numpy/_core/src/multiarray/common_dtype.c +++ b/numpy/_core/src/multiarray/common_dtype.c @@ -10,6 +10,7 @@ #include "convert_datatype.h" #include "dtypemeta.h" #include "abstractdtypes.h" +#include "multiarraymodule.h" /* @@ -63,7 +64,7 @@ PyArray_CommonDType(PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2) } if (common_dtype == (PyArray_DTypeMeta *)Py_NotImplemented) { Py_DECREF(Py_NotImplemented); - PyErr_Format(npy_DTypePromotionError, + PyErr_Format(npy_ma_global_data->DTypePromotionError, "The DTypes %S and %S do not have a common DType. " "For example they cannot be stored in a single array unless " "the dtype is `object`.", dtype1, dtype2); @@ -284,7 +285,7 @@ PyArray_PromoteDTypeSequence( Py_INCREF(dtypes_in[l]); PyTuple_SET_ITEM(dtypes_in_tuple, l, (PyObject *)dtypes_in[l]); } - PyErr_Format(npy_DTypePromotionError, + PyErr_Format(npy_ma_global_data->DTypePromotionError, "The DType %S could not be promoted by %S. This means that " "no common DType exists for the given inputs. " "For example they cannot be stored in a single array unless " diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index 51e1249c81ed..b5bdfce09486 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -234,10 +234,8 @@ PyArray_CopyConverter(PyObject *obj, NPY_COPYMODE *copymode) { } int int_copymode; - static PyObject* numpy_CopyMode = NULL; - npy_cache_import("numpy", "_CopyMode", &numpy_CopyMode); - if (numpy_CopyMode != NULL && (PyObject *)Py_TYPE(obj) == numpy_CopyMode) { + if ((PyObject *)Py_TYPE(obj) == npy_ma_global_data->_CopyMode) { PyObject* mode_value = PyObject_GetAttrString(obj, "value"); if (mode_value == NULL) { return NPY_FAIL; @@ -271,10 +269,8 @@ NPY_NO_EXPORT int PyArray_AsTypeCopyConverter(PyObject *obj, NPY_ASTYPECOPYMODE *copymode) { int int_copymode; - static PyObject* numpy_CopyMode = NULL; - npy_cache_import("numpy", "_CopyMode", &numpy_CopyMode); - if (numpy_CopyMode != NULL && (PyObject *)Py_TYPE(obj) == numpy_CopyMode) { + if ((PyObject *)Py_TYPE(obj) == npy_ma_global_data->_CopyMode) { PyErr_SetString(PyExc_ValueError, "_CopyMode enum is not allowed for astype function. " "Use true/false instead."); @@ -1415,12 +1411,7 @@ PyArray_IntTupleFromIntp(int len, npy_intp const *vals) NPY_NO_EXPORT int _not_NoValue(PyObject *obj, PyObject **out) { - static PyObject *NoValue = NULL; - npy_cache_import("numpy", "_NoValue", &NoValue); - if (NoValue == NULL) { - return 0; - } - if (obj == NoValue) { + if (obj == npy_ma_global_data->_NoValue) { *out = NULL; } else { diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 5711bce7bc08..423ac456ef3a 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -48,14 +48,6 @@ */ NPY_NO_EXPORT npy_intp REQUIRED_STR_LEN[] = {0, 3, 5, 10, 10, 20, 20, 20, 20}; -/* - * Whether or not legacy value-based promotion/casting is used. - */ - -NPY_NO_EXPORT PyObject *NO_NEP50_WARNING_CTX = NULL; -NPY_NO_EXPORT PyObject *npy_DTypePromotionError = NULL; -NPY_NO_EXPORT PyObject *npy_UFuncNoLoopError = NULL; - static NPY_TLS int npy_promotion_state = NPY_USE_LEGACY_PROMOTION; NPY_NO_EXPORT int @@ -92,13 +84,14 @@ npy_give_promotion_warnings(void) npy_cache_import( "numpy._core._ufunc_config", "NO_NEP50_WARNING", - &NO_NEP50_WARNING_CTX); - if (NO_NEP50_WARNING_CTX == NULL) { + &npy_ma_global_data->NO_NEP50_WARNING); + if (npy_ma_global_data->NO_NEP50_WARNING == NULL) { PyErr_WriteUnraisable(NULL); return 1; } - if (PyContextVar_Get(NO_NEP50_WARNING_CTX, Py_False, &val) < 0) { + if (PyContextVar_Get(npy_ma_global_data->NO_NEP50_WARNING, + Py_False, &val) < 0) { /* Errors should not really happen, but if it does assume we warn. */ PyErr_WriteUnraisable(NULL); return 1; @@ -409,12 +402,7 @@ PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) !PyTypeNum_ISCOMPLEX(type_num) && PyTypeNum_ISNUMBER(type_num) && !PyTypeNum_ISBOOL(type_num)) { - static PyObject *cls = NULL; - npy_cache_import("numpy.exceptions", "ComplexWarning", &cls); - if (cls == NULL) { - return NULL; - } - int ret = PyErr_WarnEx(cls, + int ret = PyErr_WarnEx(npy_ma_global_data->ComplexWarning, "Casting complex values to real discards " "the imaginary part", 1); if (ret < 0) { @@ -2638,13 +2626,7 @@ complex_to_noncomplex_get_loop( PyArrayMethod_StridedLoop **out_loop, NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - static PyObject *cls = NULL; - int ret; - npy_cache_import("numpy.exceptions", "ComplexWarning", &cls); - if (cls == NULL) { - return -1; - } - ret = PyErr_WarnEx(cls, + int ret = PyErr_WarnEx(npy_ma_global_data->ComplexWarning, "Casting complex values to real discards " "the imaginary part", 1); if (ret < 0) { diff --git a/numpy/_core/src/multiarray/convert_datatype.h b/numpy/_core/src/multiarray/convert_datatype.h index 02f25ad0b383..f848ad3b4c8e 100644 --- a/numpy/_core/src/multiarray/convert_datatype.h +++ b/numpy/_core/src/multiarray/convert_datatype.h @@ -13,10 +13,6 @@ extern NPY_NO_EXPORT npy_intp REQUIRED_STR_LEN[]; #define NPY_USE_WEAK_PROMOTION 1 #define NPY_USE_WEAK_PROMOTION_AND_WARN 2 -extern NPY_NO_EXPORT PyObject *NO_NEP50_WARNING_CTX; -extern NPY_NO_EXPORT PyObject *npy_DTypePromotionError; -extern NPY_NO_EXPORT PyObject *npy_UFuncNoLoopError; - NPY_NO_EXPORT int npy_give_promotion_warnings(void); diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index a319f3d172c9..0fc181ca506a 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -611,15 +611,6 @@ PyArray_AssignFromCache(PyArrayObject *self, coercion_cache_obj *cache) { static void raise_memory_error(int nd, npy_intp const *dims, PyArray_Descr *descr) { - static PyObject *exc_type = NULL; - - npy_cache_import( - "numpy._core._exceptions", "_ArrayMemoryError", - &exc_type); - if (exc_type == NULL) { - goto fail; - } - PyObject *shape = PyArray_IntTupleFromIntp(nd, dims); if (shape == NULL) { goto fail; @@ -631,7 +622,7 @@ raise_memory_error(int nd, npy_intp const *dims, PyArray_Descr *descr) if (exc_value == NULL){ goto fail; } - PyErr_SetObject(exc_type, exc_value); + PyErr_SetObject(npy_ma_global_data->_ArrayMemoryError, exc_value); Py_DECREF(exc_value); return; diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index a3ee62da427d..449a786aafe8 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -724,13 +724,13 @@ _convert_from_commastring(PyObject *obj, int align) { PyObject *parsed; PyArray_Descr *res; - static PyObject *_commastring = NULL; assert(PyUnicode_Check(obj)); - npy_cache_import("numpy._core._internal", "_commastring", &_commastring); - if (_commastring == NULL) { + npy_cache_import("numpy._core._internal", "_commastring", + &npy_ma_global_data->_commastring); + if (npy_ma_global_data->_commastring == NULL) { return NULL; } - parsed = PyObject_CallOneArg(_commastring, obj); + parsed = PyObject_CallOneArg(npy_ma_global_data->_commastring, obj); if (parsed == NULL) { return NULL; } diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 062243aa1402..995841a2e1d3 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -752,7 +752,7 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) if (descr1->subarray == NULL && descr1->names == NULL && descr2->subarray == NULL && descr2->names == NULL) { if (descr1->elsize != descr2->elsize) { - PyErr_SetString(npy_DTypePromotionError, + PyErr_SetString(npy_ma_global_data->DTypePromotionError, "Invalid type promotion with void datatypes of different " "lengths. Use the `np.bytes_` datatype instead to pad the " "shorter value with trailing zero bytes."); @@ -764,13 +764,13 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) if (descr1->names != NULL && descr2->names != NULL) { /* If both have fields promoting individual fields may be possible */ - static PyObject *promote_fields_func = NULL; npy_cache_import("numpy._core._internal", "_promote_fields", - &promote_fields_func); - if (promote_fields_func == NULL) { + &npy_ma_global_data->_promote_fields); + if (npy_ma_global_data->_promote_fields == NULL) { return NULL; } - PyObject *result = PyObject_CallFunctionObjArgs(promote_fields_func, + PyObject *result = PyObject_CallFunctionObjArgs( + npy_ma_global_data->_promote_fields, descr1, descr2, NULL); if (result == NULL) { return NULL; @@ -791,7 +791,7 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) return NULL; } if (!cmp) { - PyErr_SetString(npy_DTypePromotionError, + PyErr_SetString(npy_ma_global_data->DTypePromotionError, "invalid type promotion with subarray datatypes " "(shape mismatch)."); return NULL; @@ -821,7 +821,7 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) return new_descr; } - PyErr_SetString(npy_DTypePromotionError, + PyErr_SetString(npy_ma_global_data->DTypePromotionError, "invalid type promotion with structured datatype(s)."); return NULL; } @@ -1238,14 +1238,14 @@ dtypemeta_wrap_legacy_descriptor( /* And it to the types submodule if it is a builtin dtype */ if (!PyTypeNum_ISUSERDEF(descr->type_num)) { - static PyObject *add_dtype_helper = NULL; - npy_cache_import("numpy.dtypes", "_add_dtype_helper", &add_dtype_helper); - if (add_dtype_helper == NULL) { + npy_cache_import("numpy.dtypes", "_add_dtype_helper", + &npy_ma_global_data->_add_dtype_helper); + if (npy_ma_global_data->_add_dtype_helper == NULL) { return -1; } if (PyObject_CallFunction( - add_dtype_helper, + npy_ma_global_data->_add_dtype_helper, "Os", (PyObject *)dtype_class, alias) == NULL) { return -1; } diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index d18463f27bb5..83e540003b07 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -385,16 +385,16 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored)) /* check that we are not reinterpreting memory containing Objects. */ if (_may_have_objects(PyArray_DESCR(self)) || _may_have_objects(newtype)) { - static PyObject *checkfunc = NULL; PyObject *safe; - npy_cache_import("numpy._core._internal", "_view_is_safe", &checkfunc); - if (checkfunc == NULL) { + npy_cache_import("numpy._core._internal", "_view_is_safe", + &npy_ma_global_data->_view_is_safe); + if (npy_ma_global_data->_view_is_safe == NULL) { goto fail; } - safe = PyObject_CallFunction(checkfunc, "OO", - PyArray_DESCR(self), newtype); + safe = PyObject_CallFunction(npy_ma_global_data->_view_is_safe, + "OO", PyArray_DESCR(self), newtype); if (safe == NULL) { goto fail; } diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index d275199e8638..7aeaaeb50ac3 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -111,13 +111,15 @@ npy_forward_method( * initialization is not thread-safe, but relies on the CPython GIL to * be correct. */ -#define NPY_FORWARD_NDARRAY_METHOD(name) \ - static PyObject *callable = NULL; \ - npy_cache_import("numpy._core._methods", name, &callable); \ - if (callable == NULL) { \ - return NULL; \ - } \ - return npy_forward_method(callable, (PyObject *)self, args, len_args, kwnames) +#define NPY_FORWARD_NDARRAY_METHOD(name) \ + npy_cache_import( \ + "numpy._core._methods", #name, \ + &npy_ma_global_data->name); \ + if (npy_ma_global_data->name == NULL) { \ + return NULL; \ + } \ + return npy_forward_method(npy_ma_global_data->name, \ + (PyObject *)self, args, len_args, kwnames) static PyObject * @@ -356,14 +358,14 @@ static PyObject * array_max(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_amax"); + NPY_FORWARD_NDARRAY_METHOD(_amax); } static PyObject * array_min(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_amin"); + NPY_FORWARD_NDARRAY_METHOD(_amin); } static PyObject * @@ -387,7 +389,6 @@ PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset) { PyObject *ret = NULL; PyObject *safe; - static PyObject *checkfunc = NULL; int self_elsize, typed_elsize; if (self == NULL) { @@ -405,14 +406,15 @@ PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset) /* check that we are not reinterpreting memory containing Objects. */ if (_may_have_objects(PyArray_DESCR(self)) || _may_have_objects(typed)) { npy_cache_import("numpy._core._internal", "_getfield_is_safe", - &checkfunc); - if (checkfunc == NULL) { + &npy_ma_global_data->_getfield_is_safe); + if (npy_ma_global_data->_getfield_is_safe == NULL) { Py_DECREF(typed); return NULL; } /* only returns True or raises */ - safe = PyObject_CallFunction(checkfunc, "OOi", PyArray_DESCR(self), + safe = PyObject_CallFunction(npy_ma_global_data->_getfield_is_safe, + "OOi", PyArray_DESCR(self), typed, offset); if (safe == NULL) { Py_DECREF(typed); @@ -2244,17 +2246,19 @@ array_setstate(PyArrayObject *self, PyObject *args) NPY_NO_EXPORT int PyArray_Dump(PyObject *self, PyObject *file, int protocol) { - static PyObject *method = NULL; PyObject *ret; - npy_cache_import("numpy._core._methods", "_dump", &method); - if (method == NULL) { + npy_cache_import("numpy._core._methods", "_dump", + &npy_ma_global_data->_dump); + if (npy_ma_global_data->_dump == NULL) { return -1; } if (protocol < 0) { - ret = PyObject_CallFunction(method, "OO", self, file); + ret = PyObject_CallFunction( + npy_ma_global_data->_dump, "OO", self, file); } else { - ret = PyObject_CallFunction(method, "OOi", self, file, protocol); + ret = PyObject_CallFunction( + npy_ma_global_data->_dump, "OOi", self, file, protocol); } if (ret == NULL) { return -1; @@ -2267,16 +2271,17 @@ PyArray_Dump(PyObject *self, PyObject *file, int protocol) NPY_NO_EXPORT PyObject * PyArray_Dumps(PyObject *self, int protocol) { - static PyObject *method = NULL; - npy_cache_import("numpy._core._methods", "_dumps", &method); - if (method == NULL) { + npy_cache_import("numpy._core._methods", "_dumps", + &npy_ma_global_data->_dumps); + if (npy_ma_global_data->_dumps == NULL) { return NULL; } if (protocol < 0) { - return PyObject_CallFunction(method, "O", self); + return PyObject_CallFunction(npy_ma_global_data->_dumps, "O", self); } else { - return PyObject_CallFunction(method, "Oi", self, protocol); + return PyObject_CallFunction( + npy_ma_global_data->_dumps, "Oi", self, protocol); } } @@ -2285,7 +2290,7 @@ static PyObject * array_dump(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_dump"); + NPY_FORWARD_NDARRAY_METHOD(_dump); } @@ -2293,7 +2298,7 @@ static PyObject * array_dumps(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_dumps"); + NPY_FORWARD_NDARRAY_METHOD(_dumps); } @@ -2345,14 +2350,14 @@ static PyObject * array_mean(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_mean"); + NPY_FORWARD_NDARRAY_METHOD(_mean); } static PyObject * array_sum(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_sum"); + NPY_FORWARD_NDARRAY_METHOD(_sum); } @@ -2382,7 +2387,7 @@ static PyObject * array_prod(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_prod"); + NPY_FORWARD_NDARRAY_METHOD(_prod); } static PyObject * @@ -2442,7 +2447,7 @@ static PyObject * array_any(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_any"); + NPY_FORWARD_NDARRAY_METHOD(_any); } @@ -2450,21 +2455,21 @@ static PyObject * array_all(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_all"); + NPY_FORWARD_NDARRAY_METHOD(_all); } static PyObject * array_stddev(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_std"); + NPY_FORWARD_NDARRAY_METHOD(_std); } static PyObject * array_variance(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_var"); + NPY_FORWARD_NDARRAY_METHOD(_var); } static PyObject * @@ -2545,7 +2550,7 @@ static PyObject * array_clip(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_clip"); + NPY_FORWARD_NDARRAY_METHOD(_clip); } diff --git a/numpy/_core/src/multiarray/methods.h b/numpy/_core/src/multiarray/methods.h index 9d06794de2aa..c2893d3f1161 100644 --- a/numpy/_core/src/multiarray/methods.h +++ b/numpy/_core/src/multiarray/methods.h @@ -1,6 +1,7 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_METHODS_H_ #define NUMPY_CORE_SRC_MULTIARRAY_METHODS_H_ +#include "multiarraymodule.h" #include "npy_import.h" extern NPY_NO_EXPORT PyMethodDef array_methods[]; @@ -13,22 +14,12 @@ extern NPY_NO_EXPORT PyMethodDef array_methods[]; static inline PyObject * NpyPath_PathlikeToFspath(PyObject *file) { - static PyObject *os_PathLike = NULL; - static PyObject *os_fspath = NULL; - npy_cache_import("os", "PathLike", &os_PathLike); - if (os_PathLike == NULL) { - return NULL; - } - npy_cache_import("os", "fspath", &os_fspath); - if (os_fspath == NULL) { - return NULL; - } - - if (!PyObject_IsInstance(file, os_PathLike)) { + if (!PyObject_IsInstance(file, npy_ma_global_data->os_PathLike)) { Py_INCREF(file); return file; } - return PyObject_CallFunctionObjArgs(os_fspath, file, NULL); + return PyObject_CallFunctionObjArgs(npy_ma_global_data->os_fspath, + file, NULL); } #endif /* NUMPY_CORE_SRC_MULTIARRAY_METHODS_H_ */ diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index f08f8070f818..c5e0d3d12512 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4265,11 +4265,8 @@ array_shares_memory_impl(PyObject *args, PyObject *kwds, Py_ssize_t default_max_ } else if (result == MEM_OVERLAP_TOO_HARD) { if (raise_exceptions) { - static PyObject *too_hard_cls = NULL; - npy_cache_import("numpy.exceptions", "TooHardError", &too_hard_cls); - if (too_hard_cls) { - PyErr_SetString(too_hard_cls, "Exceeded max_work"); - } + PyErr_SetString(npy_ma_global_data->TooHardError, + "Exceeded max_work"); return NULL; } else { @@ -4772,6 +4769,7 @@ set_flaginfo(PyObject *d) } NPY_VISIBILITY_HIDDEN npy_ma_str_struct *npy_ma_str = NULL; +NPY_VISIBILITY_HIDDEN npy_ma_global_data_struct *npy_ma_global_data = NULL; static int intern_strings(void) @@ -4872,33 +4870,92 @@ intern_strings(void) return 0; } +#define IMPORT_GLOBAL(base_path, name, object) \ + assert(object == NULL); \ + npy_cache_import(base_path, name, &object); \ + if (object == NULL) { \ + return -1; \ + } /* - * Initializes global constants. At some points these need to be cleaned - * up, and sometimes we also import them where they are needed. But for - * some things, adding an `npy_cache_import` everywhere seems inconvenient. + * Initializes global constants. + * + * All global constants should live inside the npy_ma_global_data + * struct. + * + * Not all entries in the struct are initialized here, some are + * initialized later but care must be taken in those cases to initialize + * the constant in a thread-safe manner, ensuring it is initialized + * exactly once. * - * These globals should not need the C-layer at all and will be imported - * before anything on the C-side is initialized. + * Anything initialized here is initialized during module import which + * the python interpreter ensures is done in a single thread. + * + * Anything imported here should not need the C-layer at all and will be + * imported before anything on the C-side is initialized. */ static int initialize_static_globals(void) { - assert(npy_DTypePromotionError == NULL); - npy_cache_import( - "numpy.exceptions", "DTypePromotionError", - &npy_DTypePromotionError); - if (npy_DTypePromotionError == NULL) { - return -1; - } + // this is module-level global heap allocation, it is currently + // never freed + npy_ma_global_data = PyMem_Calloc(1, sizeof(npy_ma_global_data_struct)); - assert(npy_UFuncNoLoopError == NULL); - npy_cache_import( - "numpy._core._exceptions", "_UFuncNoLoopError", - &npy_UFuncNoLoopError); - if (npy_UFuncNoLoopError == NULL) { - return -1; - } + // cached reference to objects defined in python + + IMPORT_GLOBAL("math", "floor", + npy_ma_global_data->math_floor_func); + + IMPORT_GLOBAL("math", "ceil", + npy_ma_global_data->math_ceil_func); + + IMPORT_GLOBAL("math", "trunc", + npy_ma_global_data->math_trunc_func); + + IMPORT_GLOBAL("math", "gcd", + npy_ma_global_data->math_gcd_func); + + IMPORT_GLOBAL("numpy.exceptions", "AxisError", + npy_ma_global_data->AxisError); + + IMPORT_GLOBAL("numpy.exceptions", "ComplexWarning", + npy_ma_global_data->ComplexWarning); + + IMPORT_GLOBAL("numpy.exceptions", "DTypePromotionError", + npy_ma_global_data->DTypePromotionError); + + IMPORT_GLOBAL("numpy.exceptions", "TooHardError", + npy_ma_global_data->TooHardError); + + IMPORT_GLOBAL("numpy.exceptions", "VisibleDeprecationWarning", + npy_ma_global_data->VisibleDeprecationWarning); + + IMPORT_GLOBAL("numpy._globals", "_CopyMode", + npy_ma_global_data->_CopyMode); + + IMPORT_GLOBAL("numpy._globals", "_NoValue", + npy_ma_global_data->_NoValue); + + IMPORT_GLOBAL("numpy._core._exceptions", "_ArrayMemoryError", + npy_ma_global_data->_ArrayMemoryError); + + IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncBinaryResolutionError", + npy_ma_global_data->_UFuncBinaryResolutionError); + + IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncInputCastingError", + npy_ma_global_data->_UFuncInputCastingError); + + IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncNoLoopError", + npy_ma_global_data->_UFuncNoLoopError); + + IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncOutputCastingError", + npy_ma_global_data->_UFuncOutputCastingError); + + IMPORT_GLOBAL("os", "fspath", + npy_ma_global_data->os_fspath); + + IMPORT_GLOBAL("os", "PathLike", + npy_ma_global_data->os_PathLike); char *env = getenv("NUMPY_WARN_IF_NO_MEM_POLICY"); if ((env != NULL) && (strncmp(env, "1", 1) == 0)) { @@ -5182,14 +5239,14 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { * init_string_dtype() but that needs to happen after * the legacy dtypemeta classes are available. */ - static PyObject *add_dtype_helper = NULL; - npy_cache_import("numpy.dtypes", "_add_dtype_helper", &add_dtype_helper); - if (add_dtype_helper == NULL) { + npy_cache_import("numpy.dtypes", "_add_dtype_helper", + &npy_ma_global_data->_add_dtype_helper); + if (npy_ma_global_data->_add_dtype_helper == NULL) { goto err; } if (PyObject_CallFunction( - add_dtype_helper, + npy_ma_global_data->_add_dtype_helper, "Os", (PyObject *)&PyArray_StringDType, NULL) == NULL) { goto err; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index 0a7238633fd2..a79d38e98692 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -29,4 +29,81 @@ typedef struct npy_ma_str_struct { NPY_VISIBILITY_HIDDEN extern npy_ma_str_struct *npy_ma_str; +typedef struct npy_ma_global_data_struct { + /* + * References to items obtained via an import at module initialization + * + * These are immutable + */ + PyObject *AxisError; + PyObject *ComplexWarning; + PyObject *DTypePromotionError; + PyObject *TooHardError; + PyObject *VisibleDeprecationWarning; + PyObject *_CopyMode; + PyObject *_NoValue; + PyObject *_ArrayMemoryError; + PyObject *_UFuncBinaryResolutionError; + PyObject *_UFuncInputCastingError; + PyObject *_UFuncNoLoopError; + PyObject *_UFuncOutputCastingError; + PyObject *math_floor_func; + PyObject *math_ceil_func; + PyObject *math_trunc_func; + PyObject *math_gcd_func; + PyObject *os_PathLike; + PyObject *os_fspath; + + /* + * The following entries store cached references to object obtained + * via an import. All of these are initialized at runtime by + * npy_cache_import. + * + * Currently these are not initialized in a thread-safe manner but the + * failure mode is a reference leak for references to imported modules so + * it will never lead to a crash unless there is something janky that we + * don't support going on like reloading. + * + * TODO: maybe make each entry a struct that looks like: + * + * struct { + * atomic_int initialized; + * PyObject *value; + * } + * + * so is thread-safe initialization and only the possibility of contention + * before the cache is initialized, not on every single read. + */ + PyObject *_add_dtype_helper; + PyObject *_all; + PyObject *_amax; + PyObject *_amin; + PyObject *_any; + PyObject *_clip; + PyObject *_commastring; + PyObject *_convert_to_stringdtype_kwargs; + PyObject *_default_array_repr; + PyObject *_default_array_str; + PyObject *_dump; + PyObject *_dumps; + PyObject *_getfield_is_safe; + PyObject *_mean; + PyObject *_prod; + PyObject *_promote_fields; + PyObject *_std; + PyObject *_sum; + PyObject *_ufunc_doc_signature_formatter; + PyObject *_var; + PyObject *_view_is_safe; + PyObject *_void_scalar_to_string; + PyObject *array_function_errmsg_formatter; + PyObject *array_ufunc_errmsg_formatter; + PyObject *internal_gcd_func; + PyObject *npy_ctypes_check; + PyObject *numpy_matrix; + PyObject *NO_NEP50_WARNING; +} npy_ma_global_data_struct; + +NPY_VISIBILITY_HIDDEN extern npy_ma_global_data_struct *npy_ma_global_data; + #endif /* NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ */ diff --git a/numpy/_core/src/multiarray/number.c b/numpy/_core/src/multiarray/number.c index 9532662b327a..aea7826bd04c 100644 --- a/numpy/_core/src/multiarray/number.c +++ b/numpy/_core/src/multiarray/number.c @@ -268,12 +268,6 @@ array_matrix_multiply(PyObject *m1, PyObject *m2) static PyObject * array_inplace_matrix_multiply(PyArrayObject *self, PyObject *other) { - static PyObject *AxisError_cls = NULL; - npy_cache_import("numpy.exceptions", "AxisError", &AxisError_cls); - if (AxisError_cls == NULL) { - return NULL; - } - INPLACE_GIVE_UP_IF_NEEDED(self, other, nb_inplace_matrix_multiply, array_inplace_matrix_multiply); @@ -322,7 +316,7 @@ array_inplace_matrix_multiply(PyArrayObject *self, PyObject *other) * AxisError should indicate that the axes argument didn't work out * which should mean the second operand not being 2 dimensional. */ - if (PyErr_ExceptionMatches(AxisError_cls)) { + if (PyErr_ExceptionMatches(npy_ma_global_data->AxisError)) { PyErr_SetString(PyExc_ValueError, "inplace matrix multiplication requires the first operand to " "have at least one and the second at least two dimensions."); diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index a5185cba60aa..554aace740ed 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -609,14 +609,15 @@ _void_to_hex(const char* argbuf, const Py_ssize_t arglen, static PyObject * _void_scalar_to_string(PyObject *obj, int repr) { - static PyObject *tostring_func = NULL; npy_cache_import("numpy._core.arrayprint", - "_void_scalar_to_string", &tostring_func); - if (tostring_func == NULL) { + "_void_scalar_to_string", + &npy_ma_global_data->_void_scalar_to_string); + if (npy_ma_global_data->_void_scalar_to_string == NULL) { return NULL; } PyObject *is_repr = repr ? Py_True : Py_False; - return PyObject_CallFunctionObjArgs(tostring_func, obj, is_repr, NULL); + return PyObject_CallFunctionObjArgs( + npy_ma_global_data->_void_scalar_to_string, obj, is_repr, NULL); } static PyObject * @@ -3036,13 +3037,7 @@ object_arrtype_alloc(PyTypeObject *type, Py_ssize_t items) * Object scalars should not actually exist, if they exist we should * consider it to be a bug. */ - static PyObject *visibleDeprecationWarning = NULL; - npy_cache_import("numpy", "VisibleDeprecationWarning", - &visibleDeprecationWarning); - if (visibleDeprecationWarning == NULL) { - return NULL; - } - if (PyErr_WarnEx(visibleDeprecationWarning, + if (PyErr_WarnEx(npy_ma_global_data->VisibleDeprecationWarning, "Creating a NumPy object scalar. NumPy object scalars should " "never be created. If you see this message please inform the " "NumPy developers. Since this message should never be shown " diff --git a/numpy/_core/src/multiarray/strfuncs.c b/numpy/_core/src/multiarray/strfuncs.c index 596a32e64aaf..1e4c1809b504 100644 --- a/numpy/_core/src/multiarray/strfuncs.c +++ b/numpy/_core/src/multiarray/strfuncs.c @@ -38,13 +38,15 @@ array_repr(PyArrayObject *self) * We need to do a delayed import here as initialization on module load * leads to circular import problems. */ - npy_cache_import("numpy._core.arrayprint", "_default_array_repr", &repr); - if (repr == NULL) { + npy_cache_import("numpy._core.arrayprint", "_default_array_repr", + &npy_ma_global_data->_default_array_repr); + if (npy_ma_global_data->_default_array_repr == NULL) { npy_PyErr_SetStringChained(PyExc_RuntimeError, "Unable to configure default ndarray.__repr__"); return NULL; } - return PyObject_CallFunctionObjArgs(repr, self, NULL); + return PyObject_CallFunctionObjArgs( + npy_ma_global_data->_default_array_repr, self, NULL); } @@ -56,13 +58,15 @@ array_str(PyArrayObject *self) * We need to do a delayed import here as initialization on module load leads * to circular import problems. */ - npy_cache_import("numpy._core.arrayprint", "_default_array_str", &str); - if (str == NULL) { + npy_cache_import("numpy._core.arrayprint", "_default_array_str", + &npy_ma_global_data->_default_array_str); + if (npy_ma_global_data->_default_array_str == NULL) { npy_PyErr_SetStringChained(PyExc_RuntimeError, "Unable to configure default ndarray.__str__"); return NULL; } - return PyObject_CallFunctionObjArgs(str, self, NULL); + return PyObject_CallFunctionObjArgs( + npy_ma_global_data->_default_array_str, self, NULL); } diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index bcaeaa5be5f8..281a67cbe969 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -17,6 +17,7 @@ #include "gil_utils.h" #include "conversion_utils.h" #include "npy_import.h" +#include "multiarraymodule.h" /* * Internal helper to create new instances @@ -707,8 +708,6 @@ stringdtype_repr(PyArray_StringDTypeObject *self) return ret; } -static PyObject *_convert_to_stringdtype_kwargs = NULL; - // implementation of __reduce__ magic method to reconstruct a StringDType // object from the serialized data in the pickle. Uses the python // _convert_to_stringdtype_kwargs for convenience because this isn't @@ -717,18 +716,21 @@ static PyObject * stringdtype__reduce__(PyArray_StringDTypeObject *self, PyObject *NPY_UNUSED(args)) { npy_cache_import("numpy._core._internal", "_convert_to_stringdtype_kwargs", - &_convert_to_stringdtype_kwargs); + &npy_ma_global_data->_convert_to_stringdtype_kwargs); - if (_convert_to_stringdtype_kwargs == NULL) { + if (npy_ma_global_data->_convert_to_stringdtype_kwargs == NULL) { return NULL; } if (self->na_object != NULL) { - return Py_BuildValue("O(iO)", _convert_to_stringdtype_kwargs, - self->coerce, self->na_object); + return Py_BuildValue( + "O(iO)", npy_ma_global_data->_convert_to_stringdtype_kwargs, + self->coerce, self->na_object); } - return Py_BuildValue("O(i)", _convert_to_stringdtype_kwargs, self->coerce); + return Py_BuildValue( + "O(i)", npy_ma_global_data->_convert_to_stringdtype_kwargs, + self->coerce); } static PyMethodDef PyArray_StringDType_methods[] = { diff --git a/numpy/_core/src/umath/dispatching.c b/numpy/_core/src/umath/dispatching.c index 673d4fd68b5c..75d3fd111b7e 100644 --- a/numpy/_core/src/umath/dispatching.c +++ b/numpy/_core/src/umath/dispatching.c @@ -1062,7 +1062,7 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, * then we chain it, because DTypePromotionError effectively means that there * is no loop available. (We failed finding a loop by using promotion.) */ - else if (PyErr_ExceptionMatches(npy_DTypePromotionError)) { + else if (PyErr_ExceptionMatches(npy_ma_global_data->DTypePromotionError)) { PyObject *err_type = NULL, *err_value = NULL, *err_traceback = NULL; PyErr_Fetch(&err_type, &err_value, &err_traceback); raise_no_loop_found_error(ufunc, (PyObject **)op_dtypes); diff --git a/numpy/_core/src/umath/funcs.inc.src b/numpy/_core/src/umath/funcs.inc.src index df81c835034a..0259faab90bf 100644 --- a/numpy/_core/src/umath/funcs.inc.src +++ b/numpy/_core/src/umath/funcs.inc.src @@ -9,7 +9,7 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "npy_import.h" - +#include "multiarraymodule.h" /* ***************************************************************************** @@ -157,35 +157,20 @@ npy_ObjectLogicalNot(PyObject *i1) static PyObject * npy_ObjectFloor(PyObject *obj) { - static PyObject *math_floor_func = NULL; - - npy_cache_import("math", "floor", &math_floor_func); - if (math_floor_func == NULL) { - return NULL; - } - return PyObject_CallFunction(math_floor_func, "O", obj); + return PyObject_CallFunction(npy_ma_global_data->math_floor_func, + "O", obj); } static PyObject * npy_ObjectCeil(PyObject *obj) { - static PyObject *math_ceil_func = NULL; - - npy_cache_import("math", "ceil", &math_ceil_func); - if (math_ceil_func == NULL) { - return NULL; - } - return PyObject_CallFunction(math_ceil_func, "O", obj); + return PyObject_CallFunction(npy_ma_global_data->math_ceil_func, + "O", obj); } static PyObject * npy_ObjectTrunc(PyObject *obj) { - static PyObject *math_trunc_func = NULL; - - npy_cache_import("math", "trunc", &math_trunc_func); - if (math_trunc_func == NULL) { - return NULL; - } - return PyObject_CallFunction(math_trunc_func, "O", obj); + return PyObject_CallFunction(npy_ma_global_data->math_trunc_func, + "O", obj); } static PyObject * @@ -195,13 +180,8 @@ npy_ObjectGCD(PyObject *i1, PyObject *i2) /* use math.gcd if valid on the provided types */ { - static PyObject *math_gcd_func = NULL; - - npy_cache_import("math", "gcd", &math_gcd_func); - if (math_gcd_func == NULL) { - return NULL; - } - gcd = PyObject_CallFunction(math_gcd_func, "OO", i1, i2); + gcd = PyObject_CallFunction(npy_ma_global_data->math_gcd_func, + "OO", i1, i2); if (gcd != NULL) { return gcd; } @@ -211,13 +191,13 @@ npy_ObjectGCD(PyObject *i1, PyObject *i2) /* otherwise, use our internal one, written in python */ { - static PyObject *internal_gcd_func = NULL; - - npy_cache_import("numpy._core._internal", "_gcd", &internal_gcd_func); - if (internal_gcd_func == NULL) { + npy_cache_import("numpy._core._internal", "_gcd", + &npy_ma_global_data->internal_gcd_func); + if (npy_ma_global_data->internal_gcd_func == NULL) { return NULL; } - gcd = PyObject_CallFunction(internal_gcd_func, "OO", i1, i2); + gcd = PyObject_CallFunction(npy_ma_global_data->internal_gcd_func, + "OO", i1, i2); if (gcd == NULL) { return NULL; } diff --git a/numpy/_core/src/umath/override.c b/numpy/_core/src/umath/override.c index 717422286e01..6d2db58b891a 100644 --- a/numpy/_core/src/umath/override.c +++ b/numpy/_core/src/umath/override.c @@ -176,10 +176,8 @@ copy_positional_args_to_kwargs(const char **keywords, * This is only relevant for reduce, which is the only one with * 5 keyword arguments. */ - static PyObject *NoValue = NULL; assert(strcmp(keywords[i], "initial") == 0); - npy_cache_import("numpy", "_NoValue", &NoValue); - if (args[i] == NoValue) { + if (args[i] == npy_ma_global_data->_NoValue) { continue; } } @@ -365,19 +363,19 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, /* Check if there is a method left to call */ if (!override_obj) { /* No acceptable override found. */ - static PyObject *errmsg_formatter = NULL; PyObject *errmsg; - npy_cache_import("numpy._core._internal", - "array_ufunc_errmsg_formatter", - &errmsg_formatter); - - if (errmsg_formatter != NULL) { - /* All tuple items must be set before use */ - Py_INCREF(Py_None); - PyTuple_SET_ITEM(override_args, 0, Py_None); - errmsg = PyObject_Call(errmsg_formatter, override_args, - normal_kwds); + /* All tuple items must be set before use */ + Py_INCREF(Py_None); + PyTuple_SET_ITEM(override_args, 0, Py_None); + npy_cache_import( + "numpy._core._internal", + "array_ufunc_errmsg_formatter", + &npy_ma_global_data->array_ufunc_errmsg_formatter); + if (npy_ma_global_data->array_ufunc_errmsg_formatter != NULL) { + errmsg = PyObject_Call( + npy_ma_global_data->array_ufunc_errmsg_formatter, + override_args, normal_kwds); if (errmsg != NULL) { PyErr_SetObject(PyExc_TypeError, errmsg); Py_DECREF(errmsg); diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index cc8f82aca11b..cf35bc4d7836 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -1788,12 +1788,7 @@ static int static int emit_complexwarning(void) { - static PyObject *cls = NULL; - npy_cache_import("numpy.exceptions", "ComplexWarning", &cls); - if (cls == NULL) { - return -1; - } - return PyErr_WarnEx(cls, + return PyErr_WarnEx(npy_ma_global_data->ComplexWarning, "Casting complex values to real discards the imaginary part", 1); } diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index efad2a7be2b4..50c969bbc3ca 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -1341,8 +1341,6 @@ _check_keepdims_support(PyUFuncObject *ufunc) { static int _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, PyArrayObject **op, int broadcast_ndim, int **remap_axis) { - static PyObject *AxisError_cls = NULL; - int nin = ufunc->nin; int nop = ufunc->nargs; int iop, list_size; @@ -1388,12 +1386,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, if (PyTuple_Check(op_axes_tuple)) { if (PyTuple_Size(op_axes_tuple) != op_ncore) { /* must have been a tuple with too many entries. */ - npy_cache_import( - "numpy.exceptions", "AxisError", &AxisError_cls); - if (AxisError_cls == NULL) { - return -1; - } - PyErr_Format(AxisError_cls, + PyErr_Format(npy_ma_global_data->AxisError, "%s: operand %d has %d core dimensions, " "but %zd dimensions are specified by axes tuple.", ufunc_get_name_cstr(ufunc), iop, op_ncore, @@ -1417,11 +1410,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, return -1; } /* If it is a single integer, inform user that more are needed */ - npy_cache_import("numpy.exceptions", "AxisError", &AxisError_cls); - if (AxisError_cls == NULL) { - return -1; - } - PyErr_Format(AxisError_cls, + PyErr_Format(npy_ma_global_data->AxisError, "%s: operand %d has %d core dimensions, " "but the axes item is a single integer.", ufunc_get_name_cstr(ufunc), iop, op_ncore); @@ -5254,8 +5243,7 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) { PyArrayObject *ap1 = NULL; PyObject *tmp; - static PyObject *_numpy_matrix; - npy_cache_import("numpy", "matrix", &_numpy_matrix); + npy_cache_import("numpy", "matrix", &npy_ma_global_data->numpy_matrix); const char *matrix_deprecation_msg = ( "%s.outer() was passed a numpy matrix as %s argument. " @@ -5266,7 +5254,7 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) tmp = PyTuple_GET_ITEM(args, 0); - if (PyObject_IsInstance(tmp, _numpy_matrix)) { + if (PyObject_IsInstance(tmp, npy_ma_global_data->numpy_matrix)) { /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, matrix_deprecation_msg, ufunc->name, "first") < 0) { @@ -5283,7 +5271,7 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) PyArrayObject *ap2 = NULL; tmp = PyTuple_GET_ITEM(args, 1); - if (PyObject_IsInstance(tmp, _numpy_matrix)) { + if (PyObject_IsInstance(tmp, npy_ma_global_data->numpy_matrix)) { /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, matrix_deprecation_msg, ufunc->name, "second") < 0) { @@ -6432,15 +6420,14 @@ _typecharfromnum(int num) { static PyObject * ufunc_get_doc(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) { - static PyObject *_sig_formatter; PyObject *doc; npy_cache_import( "numpy._core._internal", "_ufunc_doc_signature_formatter", - &_sig_formatter); + &npy_ma_global_data->_ufunc_doc_signature_formatter); - if (_sig_formatter == NULL) { + if (npy_ma_global_data->_ufunc_doc_signature_formatter == NULL) { return NULL; } @@ -6449,7 +6436,7 @@ ufunc_get_doc(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) * introspection on name and nin + nout to automate the first part * of it the doc string shouldn't need the calling convention */ - doc = PyObject_CallFunctionObjArgs(_sig_formatter, + doc = PyObject_CallFunctionObjArgs(npy_ma_global_data->_ufunc_doc_signature_formatter, (PyObject *)ufunc, NULL); if (doc == NULL) { return NULL; diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index f6f231223f63..933c4ad74fbc 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -78,16 +78,8 @@ npy_casting_to_py_object(NPY_CASTING casting) */ static int raise_binary_type_reso_error(PyUFuncObject *ufunc, PyArrayObject **operands) { - static PyObject *exc_type = NULL; PyObject *exc_value; - npy_cache_import( - "numpy._core._exceptions", "_UFuncBinaryResolutionError", - &exc_type); - if (exc_type == NULL) { - return -1; - } - /* produce an error object */ exc_value = Py_BuildValue( "O(OO)", ufunc, @@ -97,7 +89,8 @@ raise_binary_type_reso_error(PyUFuncObject *ufunc, PyArrayObject **operands) { if (exc_value == NULL){ return -1; } - PyErr_SetObject(exc_type, exc_value); + PyErr_SetObject( + npy_ma_global_data->_UFuncBinaryResolutionError, exc_value); Py_DECREF(exc_value); return -1; @@ -110,15 +103,6 @@ NPY_NO_EXPORT int raise_no_loop_found_error( PyUFuncObject *ufunc, PyObject **dtypes) { - static PyObject *exc_type = NULL; - - npy_cache_import( - "numpy._core._exceptions", "_UFuncNoLoopError", - &exc_type); - if (exc_type == NULL) { - return -1; - } - PyObject *dtypes_tup = PyArray_TupleFromItems(ufunc->nargs, dtypes, 1); if (dtypes_tup == NULL) { return -1; @@ -129,7 +113,7 @@ raise_no_loop_found_error( if (exc_value == NULL) { return -1; } - PyErr_SetObject(exc_type, exc_value); + PyErr_SetObject(npy_ma_global_data->_UFuncNoLoopError, exc_value); Py_DECREF(exc_value); return -1; @@ -181,15 +165,8 @@ raise_input_casting_error( PyArray_Descr *to, npy_intp i) { - static PyObject *exc_type = NULL; - npy_cache_import( - "numpy._core._exceptions", "_UFuncInputCastingError", - &exc_type); - if (exc_type == NULL) { - return -1; - } - - return raise_casting_error(exc_type, ufunc, casting, from, to, i); + return raise_casting_error(npy_ma_global_data->_UFuncInputCastingError, + ufunc, casting, from, to, i); } @@ -204,15 +181,8 @@ raise_output_casting_error( PyArray_Descr *to, npy_intp i) { - static PyObject *exc_type = NULL; - npy_cache_import( - "numpy._core._exceptions", "_UFuncOutputCastingError", - &exc_type); - if (exc_type == NULL) { - return -1; - } - - return raise_casting_error(exc_type, ufunc, casting, from, to, i); + return raise_casting_error(npy_ma_global_data->_UFuncOutputCastingError, + ufunc, casting, from, to, i); } @@ -1443,7 +1413,7 @@ PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc, int type_num1, type_num2; static PyObject *default_type_tup = NULL; - /* Set default type for integer inputs to NPY_DOUBLE */ + /* Set default type for integer inputs to NPY_DOUBLE */ if (default_type_tup == NULL) { PyArray_Descr *tmp = PyArray_DescrFromType(NPY_DOUBLE); @@ -1458,6 +1428,7 @@ PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc, Py_DECREF(tmp); } + type_num1 = PyArray_DESCR(operands[0])->type_num; type_num2 = PyArray_DESCR(operands[1])->type_num; diff --git a/numpy/_core/src/umath/umathmodule.c b/numpy/_core/src/umath/umathmodule.c index b3aebff2c845..de561b5bc2e7 100644 --- a/numpy/_core/src/umath/umathmodule.c +++ b/numpy/_core/src/umath/umathmodule.c @@ -31,6 +31,7 @@ #include "stringdtype_ufuncs.h" #include "special_integer_comparisons.h" #include "extobj.h" /* for _extobject_contextvar exposure */ +#include "ufunc_type_resolution.h" /* Automatically generated code to define all ufuncs: */ #include "funcs.inc" From e5c1bd62b4ab92ac8aa47f4b2806c2dc2c109170 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 30 May 2024 15:04:14 -0600 Subject: [PATCH 592/980] MNT: move cpu dispatch registry into global data struct --- numpy/_core/src/common/npy_cpu_dispatch.c | 15 +++++++++------ numpy/_core/src/common/npy_cpu_features.c | 5 +++++ numpy/_core/src/multiarray/multiarraymodule.h | 7 +++++++ 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/common/npy_cpu_dispatch.c b/numpy/_core/src/common/npy_cpu_dispatch.c index 992a470ada04..0f27ea81b48f 100644 --- a/numpy/_core/src/common/npy_cpu_dispatch.c +++ b/numpy/_core/src/common/npy_cpu_dispatch.c @@ -1,11 +1,14 @@ -#include "npy_cpu_dispatch.h" +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE -static PyObject *npy__cpu_dispatch_registery = NULL; +#include "npy_cpu_dispatch.h" +#include "numpy/ndarraytypes.h" +#include "multiarraymodule.h" NPY_VISIBILITY_HIDDEN int npy_cpu_dispatch_tracer_init(PyObject *mod) { - if (npy__cpu_dispatch_registery != NULL) { + if (npy_ma_global_data->cpu_dispatch_registry != NULL) { PyErr_Format(PyExc_RuntimeError, "CPU dispatcher tracer already initlized"); return -1; } @@ -22,7 +25,7 @@ npy_cpu_dispatch_tracer_init(PyObject *mod) if (err != 0) { return -1; } - npy__cpu_dispatch_registery = reg_dict; + npy_ma_global_data->cpu_dispatch_registry = reg_dict; return 0; } @@ -30,13 +33,13 @@ NPY_VISIBILITY_HIDDEN void npy_cpu_dispatch_trace(const char *fname, const char *signature, const char **dispatch_info) { - PyObject *func_dict = PyDict_GetItemString(npy__cpu_dispatch_registery, fname); + PyObject *func_dict = PyDict_GetItemString(npy_ma_global_data->cpu_dispatch_registry, fname); if (func_dict == NULL) { func_dict = PyDict_New(); if (func_dict == NULL) { return; } - int err = PyDict_SetItemString(npy__cpu_dispatch_registery, fname, func_dict); + int err = PyDict_SetItemString(npy_ma_global_data->cpu_dispatch_registry, fname, func_dict); Py_DECREF(func_dict); if (err != 0) { return; diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index 4f4448d13bcd..04a5449e5b8e 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -5,6 +5,11 @@ /******************** Private Definitions *********************/ +// This is initialized during module initialization and thereafter immutable. +// We don't include it in the global data struct because the definitions in +// this file are shared by the _simd, _umath_tests, and +// _multiarray_umath modules + // Hold all CPU features boolean values static unsigned char npy__cpu_have[NPY_CPU_FEATURE_MAX]; diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index a79d38e98692..299da4f5df3f 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -54,6 +54,13 @@ typedef struct npy_ma_global_data_struct { PyObject *os_PathLike; PyObject *os_fspath; + /* + * Used for CPU feature detection and dispatch + * + * Filled in during module initialization and thereafter immutable + */ + PyObject *cpu_dispatch_registry; + /* * The following entries store cached references to object obtained * via an import. All of these are initialized at runtime by From 7719cf2b92a89063fa1ac9a87cfd13e0715541fd Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 30 May 2024 15:06:55 -0600 Subject: [PATCH 593/980] MNT: move ndarray.__array_*__ references to global data struct --- numpy/_core/src/common/ufunc_override.c | 10 ++----- .../src/multiarray/arrayfunction_override.c | 28 ++----------------- numpy/_core/src/multiarray/ctors.c | 8 +----- numpy/_core/src/multiarray/multiarraymodule.c | 8 ++++++ numpy/_core/src/multiarray/multiarraymodule.h | 6 ++++ 5 files changed, 20 insertions(+), 40 deletions(-) diff --git a/numpy/_core/src/common/ufunc_override.c b/numpy/_core/src/common/ufunc_override.c index dd7706d41475..dc8ddec4b6ad 100644 --- a/numpy/_core/src/common/ufunc_override.c +++ b/numpy/_core/src/common/ufunc_override.c @@ -7,6 +7,7 @@ #include "npy_import.h" #include "ufunc_override.h" #include "scalartypes.h" +#include "multiarraymodule.h" /* * Check whether an object has __array_ufunc__ defined on its class and it @@ -19,15 +20,8 @@ NPY_NO_EXPORT PyObject * PyUFuncOverride_GetNonDefaultArrayUfunc(PyObject *obj) { - static PyObject *ndarray_array_ufunc = NULL; PyObject *cls_array_ufunc; - /* On first entry, cache ndarray's __array_ufunc__ */ - if (ndarray_array_ufunc == NULL) { - ndarray_array_ufunc = PyObject_GetAttrString((PyObject *)&PyArray_Type, - "__array_ufunc__"); - } - /* Fast return for ndarray */ if (PyArray_CheckExact(obj)) { return NULL; @@ -49,7 +43,7 @@ PyUFuncOverride_GetNonDefaultArrayUfunc(PyObject *obj) return NULL; } /* Ignore if the same as ndarray.__array_ufunc__ */ - if (cls_array_ufunc == ndarray_array_ufunc) { + if (cls_array_ufunc == npy_ma_global_data->ndarray_array_ufunc) { Py_DECREF(cls_array_ufunc); return NULL; } diff --git a/numpy/_core/src/multiarray/arrayfunction_override.c b/numpy/_core/src/multiarray/arrayfunction_override.c index aebcb7986a7b..aab6aad982bc 100644 --- a/numpy/_core/src/multiarray/arrayfunction_override.c +++ b/numpy/_core/src/multiarray/arrayfunction_override.c @@ -11,17 +11,6 @@ #include "arrayfunction_override.h" -/* Return the ndarray.__array_function__ method. */ -static PyObject * -get_ndarray_array_function(void) -{ - PyObject* method = PyObject_GetAttrString((PyObject *)&PyArray_Type, - "__array_function__"); - assert(method != NULL); - return method; -} - - /* * Get an object's __array_function__ method in the fastest way possible. * Never raises an exception. Returns NULL if the method doesn't exist. @@ -29,16 +18,10 @@ get_ndarray_array_function(void) static PyObject * get_array_function(PyObject *obj) { - static PyObject *ndarray_array_function = NULL; - - if (ndarray_array_function == NULL) { - ndarray_array_function = get_ndarray_array_function(); - } - /* Fast return for ndarray */ if (PyArray_CheckExact(obj)) { - Py_INCREF(ndarray_array_function); - return ndarray_array_function; + Py_INCREF(npy_ma_global_data->ndarray_array_function); + return npy_ma_global_data->ndarray_array_function; } PyObject *array_function = PyArray_LookupSpecial(obj, npy_ma_str->array_function); @@ -142,12 +125,7 @@ get_implementing_args_and_methods(PyObject *relevant_args, static int is_default_array_function(PyObject *obj) { - static PyObject *ndarray_array_function = NULL; - - if (ndarray_array_function == NULL) { - ndarray_array_function = get_ndarray_array_function(); - } - return obj == ndarray_array_function; + return obj == npy_ma_global_data->ndarray_array_function; } diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 0fc181ca506a..5e28e08a16bd 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -928,17 +928,11 @@ PyArray_NewFromDescr_int( */ if (subtype != &PyArray_Type) { PyObject *res, *func; - static PyObject *ndarray_array_finalize = NULL; - /* First time, cache ndarray's __array_finalize__ */ - if (ndarray_array_finalize == NULL) { - ndarray_array_finalize = PyObject_GetAttr( - (PyObject *)&PyArray_Type, npy_ma_str_array_finalize); - } func = PyObject_GetAttr((PyObject *)subtype, npy_ma_str->array_finalize); if (func == NULL) { goto fail; } - else if (func == ndarray_array_finalize) { + else if (func == npy_ma_global_data->ndarray_array_finalize) { Py_DECREF(func); } else if (func == Py_None) { diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index c5e0d3d12512..185ee1f9fe87 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -5230,6 +5230,14 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { goto err; } + // initialize static references to ndarray.__array_*__ special methods + npy_ma_global_data->ndarray_array_finalize = PyObject_GetAttrString( + (PyObject *)&PyArray_Type, "__array_finalize__"); + npy_ma_global_data->ndarray_array_ufunc = PyObject_GetAttrString( + (PyObject *)&PyArray_Type, "__array_ufunc__"); + npy_ma_global_data->ndarray_array_function = PyObject_GetAttrString( + (PyObject *)&PyArray_Type, "__array_function__"); + /* * Initialize np.dtypes.StringDType * diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index 299da4f5df3f..39b1195a2f61 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -30,6 +30,12 @@ typedef struct npy_ma_str_struct { NPY_VISIBILITY_HIDDEN extern npy_ma_str_struct *npy_ma_str; typedef struct npy_ma_global_data_struct { + /* + * A reference to ndarray's implementations for __array_*__ special methods + */ + PyObject *ndarray_array_ufunc; + PyObject *ndarray_array_finalize; + PyObject *ndarray_array_function; /* * References to items obtained via an import at module initialization * From 3cbb68d00f6670b24797a4c603a4f8e8a0aa0e51 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 30 May 2024 15:10:25 -0600 Subject: [PATCH 594/980] MNT: move sys.flags.optimize cache to global data struct --- numpy/_core/src/multiarray/compiled_base.c | 9 +-------- numpy/_core/src/multiarray/multiarraymodule.c | 12 +++++++++++- numpy/_core/src/multiarray/multiarraymodule.h | 6 ++++++ 3 files changed, 18 insertions(+), 9 deletions(-) diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index 27455797cfa3..d16a7f661fc5 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -1414,14 +1414,7 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t /* Don't add docstrings */ #if PY_VERSION_HEX > 0x030b0000 - static long optimize = -1000; - if (optimize < 0) { - PyObject *flags = PySys_GetObject("flags"); /* borrowed object */ - PyObject *level = PyObject_GetAttrString(flags, "optimize"); - optimize = PyLong_AsLong(level); - Py_DECREF(level); - } - if (optimize > 1) { + if (npy_ma_global_data->optimize > 1) { #else if (Py_OptimizeFlag > 1) { #endif diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 185ee1f9fe87..1a998de52b0f 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4965,10 +4965,20 @@ initialize_static_globals(void) numpy_warn_if_no_mem_policy = 0; } + PyObject *flags = PySys_GetObject("flags"); /* borrowed object */ + if (flags == NULL) { + PyErr_SetString(PyExc_AttributeError, "cannot get sys.flags"); + return -1; + } + PyObject *level = PyObject_GetAttrString(flags, "optimize"); + if (level == NULL) { + return -1; + } + npy_ma_global_data->optimize = PyLong_AsLong(level); + Py_DECREF(level); return 0; } - static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "_multiarray_umath", diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index 39b1195a2f61..5fe5bd1d2e14 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -60,6 +60,12 @@ typedef struct npy_ma_global_data_struct { PyObject *os_PathLike; PyObject *os_fspath; + /* + * stores sys.flags.optimize as a long, which is used in the add_docstring + * implementation + */ + long optimize; + /* * Used for CPU feature detection and dispatch * From 2ffcc7143f3422b3328b3168730bd459a2c71420 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 30 May 2024 15:13:05 -0600 Subject: [PATCH 595/980] MNT: set up tuple for truediv in global data struct --- numpy/_core/src/multiarray/multiarraymodule.c | 14 ++++++++++++ numpy/_core/src/multiarray/multiarraymodule.h | 8 +++++++ numpy/_core/src/umath/ufunc_type_resolution.c | 22 +++---------------- 3 files changed, 25 insertions(+), 19 deletions(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 1a998de52b0f..6956b194053f 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4965,6 +4965,20 @@ initialize_static_globals(void) numpy_warn_if_no_mem_policy = 0; } + // default_truediv_type_tup + PyArray_Descr *tmp = PyArray_DescrFromType(NPY_DOUBLE); + if (tmp == NULL) { + return -1; + } + + npy_ma_global_data->default_truediv_type_tup = + PyTuple_Pack(3, tmp, tmp, tmp); + if (npy_ma_global_data->default_truediv_type_tup == NULL) { + Py_DECREF(tmp); + return -1; + } + Py_DECREF(tmp); + PyObject *flags = PySys_GetObject("flags"); /* borrowed object */ if (flags == NULL) { PyErr_SetString(PyExc_AttributeError, "cannot get sys.flags"); diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index 5fe5bd1d2e14..6b34e58e2a85 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -30,6 +30,14 @@ typedef struct npy_ma_str_struct { NPY_VISIBILITY_HIDDEN extern npy_ma_str_struct *npy_ma_str; typedef struct npy_ma_global_data_struct { + /* + * Used in ufunc_type_resolution.c to avoid reconstructing a tuple + * storing the default true division return types + * This is immutable and set at module initialization so can be used + * without acquiring the global data mutex + */ + PyObject *default_truediv_type_tup; + /* * A reference to ndarray's implementations for __array_*__ special methods */ diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index 933c4ad74fbc..e84c1ba6c811 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -1411,23 +1411,6 @@ PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc, PyArray_Descr **out_dtypes) { int type_num1, type_num2; - static PyObject *default_type_tup = NULL; - - /* Set default type for integer inputs to NPY_DOUBLE */ - if (default_type_tup == NULL) { - PyArray_Descr *tmp = PyArray_DescrFromType(NPY_DOUBLE); - - if (tmp == NULL) { - return -1; - } - default_type_tup = PyTuple_Pack(3, tmp, tmp, tmp); - if (default_type_tup == NULL) { - Py_DECREF(tmp); - return -1; - } - Py_DECREF(tmp); - } - type_num1 = PyArray_DESCR(operands[0])->type_num; type_num2 = PyArray_DESCR(operands[1])->type_num; @@ -1435,8 +1418,9 @@ PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc, if (type_tup == NULL && (PyTypeNum_ISINTEGER(type_num1) || PyTypeNum_ISBOOL(type_num1)) && (PyTypeNum_ISINTEGER(type_num2) || PyTypeNum_ISBOOL(type_num2))) { - return PyUFunc_DefaultTypeResolver(ufunc, casting, operands, - default_type_tup, out_dtypes); + return PyUFunc_DefaultTypeResolver( + ufunc, casting, operands, + npy_ma_global_data->default_truediv_type_tup, out_dtypes); } return PyUFunc_DivisionTypeResolver(ufunc, casting, operands, type_tup, out_dtypes); From d2ca21bafb66060a2915d0b86c6b54e3626ccc36 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 30 May 2024 15:23:22 -0600 Subject: [PATCH 596/980] MNT: move unpack_bits LUT into global static struct --- numpy/_core/src/multiarray/compiled_base.c | 31 ++----------------- numpy/_core/src/multiarray/multiarraymodule.c | 18 +++++++++++ numpy/_core/src/multiarray/multiarraymodule.h | 8 +++++ 3 files changed, 29 insertions(+), 28 deletions(-) diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index d16a7f661fc5..127ebdbe8c3d 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -1747,15 +1747,6 @@ pack_bits(PyObject *input, int axis, char order) static PyObject * unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) { - static int unpack_init = 0; - /* - * lookuptable for bitorder big as it has been around longer - * bitorder little is handled via byteswapping in the loop - */ - static union { - npy_uint8 bytes[8]; - npy_uint64 uint64; - } unpack_lookup_big[256]; PyArrayObject *inp; PyArrayObject *new = NULL; PyArrayObject *out = NULL; @@ -1841,22 +1832,6 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) goto fail; } - /* - * setup lookup table under GIL, 256 8 byte blocks representing 8 bits - * expanded to 1/0 bytes - */ - if (unpack_init == 0) { - npy_intp j; - for (j=0; j < 256; j++) { - npy_intp k; - for (k=0; k < 8; k++) { - npy_uint8 v = (j & (1 << k)) == (1 << k); - unpack_lookup_big[j].bytes[7 - k] = v; - } - } - unpack_init = 1; - } - count = PyArray_DIM(new, axis) * 8; if (outdims[axis] > count) { in_n = count / 8; @@ -1883,7 +1858,7 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) /* for unity stride we can just copy out of the lookup table */ if (order == 'b') { for (index = 0; index < in_n; index++) { - npy_uint64 v = unpack_lookup_big[*inptr].uint64; + npy_uint64 v = npy_ma_global_data->unpack_lookup_big[*inptr].uint64; memcpy(outptr, &v, 8); outptr += 8; inptr += in_stride; @@ -1891,7 +1866,7 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) } else { for (index = 0; index < in_n; index++) { - npy_uint64 v = unpack_lookup_big[*inptr].uint64; + npy_uint64 v = npy_ma_global_data->unpack_lookup_big[*inptr].uint64; if (order != 'b') { v = npy_bswap8(v); } @@ -1902,7 +1877,7 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) } /* Clean up the tail portion */ if (in_tail) { - npy_uint64 v = unpack_lookup_big[*inptr].uint64; + npy_uint64 v = npy_ma_global_data->unpack_lookup_big[*inptr].uint64; if (order != 'b') { v = npy_bswap8(v); } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 6956b194053f..97669f1dfdce 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4990,6 +4990,24 @@ initialize_static_globals(void) } npy_ma_global_data->optimize = PyLong_AsLong(level); Py_DECREF(level); + + /* + * see unpack_bits for how this table is used. + * + * LUT for bigendian bitorder, littleendian is handled via + * byteswapping in the loop. + * + * 256 8 byte blocks representing 8 bits expanded to 1 or 0 bytes + */ + npy_intp j; + for (j=0; j < 256; j++) { + npy_intp k; + for (k=0; k < 8; k++) { + npy_uint8 v = (j & (1 << k)) == (1 << k); + npy_ma_global_data->unpack_lookup_big[j].bytes[7 - k] = v; + } + } + return 0; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index 6b34e58e2a85..6b4c679c6332 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -74,6 +74,14 @@ typedef struct npy_ma_global_data_struct { */ long optimize; + /* + * LUT used by unpack_bits + */ + union { + npy_uint8 bytes[8]; + npy_uint64 uint64; + } unpack_lookup_big[256]; + /* * Used for CPU feature detection and dispatch * From a1f72001af018fe8ce7baac9163eae0a659052e8 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 30 May 2024 15:27:57 -0600 Subject: [PATCH 597/980] MNT: move references to int(1) and int(0) to global static struct --- numpy/_core/src/multiarray/convert_datatype.c | 22 ++++--------------- numpy/_core/src/multiarray/multiarraymodule.c | 10 +++++++++ numpy/_core/src/multiarray/multiarraymodule.h | 7 ++++++ 3 files changed, 21 insertions(+), 18 deletions(-) diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 423ac456ef3a..351a9fb7f216 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -2167,7 +2167,6 @@ PyArray_Zero(PyArrayObject *arr) { char *zeroval; int ret, storeflags; - static PyObject * zero_obj = NULL; if (_check_object_rec(PyArray_DESCR(arr)) < 0) { return NULL; @@ -2178,12 +2177,6 @@ PyArray_Zero(PyArrayObject *arr) return NULL; } - if (zero_obj == NULL) { - zero_obj = PyLong_FromLong((long) 0); - if (zero_obj == NULL) { - return NULL; - } - } if (PyArray_ISOBJECT(arr)) { /* XXX this is dangerous, the caller probably is not aware that zeroval is actually a static PyObject* @@ -2191,12 +2184,12 @@ PyArray_Zero(PyArrayObject *arr) if they simply memcpy it into a ndarray without using setitem(), refcount errors will occur */ - memcpy(zeroval, &zero_obj, sizeof(PyObject *)); + memcpy(zeroval, &npy_ma_global_data->zero_obj, sizeof(PyObject *)); return zeroval; } storeflags = PyArray_FLAGS(arr); PyArray_ENABLEFLAGS(arr, NPY_ARRAY_BEHAVED); - ret = PyArray_SETITEM(arr, zeroval, zero_obj); + ret = PyArray_SETITEM(arr, zeroval, npy_ma_global_data->zero_obj); ((PyArrayObject_fields *)arr)->flags = storeflags; if (ret < 0) { PyDataMem_FREE(zeroval); @@ -2213,7 +2206,6 @@ PyArray_One(PyArrayObject *arr) { char *oneval; int ret, storeflags; - static PyObject * one_obj = NULL; if (_check_object_rec(PyArray_DESCR(arr)) < 0) { return NULL; @@ -2224,12 +2216,6 @@ PyArray_One(PyArrayObject *arr) return NULL; } - if (one_obj == NULL) { - one_obj = PyLong_FromLong((long) 1); - if (one_obj == NULL) { - return NULL; - } - } if (PyArray_ISOBJECT(arr)) { /* XXX this is dangerous, the caller probably is not aware that oneval is actually a static PyObject* @@ -2237,13 +2223,13 @@ PyArray_One(PyArrayObject *arr) if they simply memcpy it into a ndarray without using setitem(), refcount errors will occur */ - memcpy(oneval, &one_obj, sizeof(PyObject *)); + memcpy(oneval, &npy_ma_global_data->one_obj, sizeof(PyObject *)); return oneval; } storeflags = PyArray_FLAGS(arr); PyArray_ENABLEFLAGS(arr, NPY_ARRAY_BEHAVED); - ret = PyArray_SETITEM(arr, oneval, one_obj); + ret = PyArray_SETITEM(arr, oneval, npy_ma_global_data->one_obj); ((PyArrayObject_fields *)arr)->flags = storeflags; if (ret < 0) { PyDataMem_FREE(oneval); diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 97669f1dfdce..c087f732c8e1 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -5008,6 +5008,16 @@ initialize_static_globals(void) } } + npy_ma_global_data->one_obj = PyLong_FromLong((long) 1); + if (npy_ma_global_data->one_obj == NULL) { + return -1; + } + + npy_ma_global_data->zero_obj = PyLong_FromLong((long) 0); + if (npy_ma_global_data->zero_obj == NULL) { + return -1; + } + return 0; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index 6b4c679c6332..c42a135add39 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -44,6 +44,13 @@ typedef struct npy_ma_global_data_struct { PyObject *ndarray_array_ufunc; PyObject *ndarray_array_finalize; PyObject *ndarray_array_function; + + /* + * References to the '1' and '0' PyLong objects + */ + PyObject *one_obj; + PyObject *zero_obj; + /* * References to items obtained via an import at module initialization * From 26c243d2e17a998c90e79bf6b20b56e979e70bd5 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 30 May 2024 15:28:56 -0600 Subject: [PATCH 598/980] MNT: move initialization of global ArrayMethods to module initialization --- numpy/_core/src/multiarray/convert_datatype.c | 168 +++++++++--------- 1 file changed, 83 insertions(+), 85 deletions(-) diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 351a9fb7f216..a60b7797d6f7 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -3236,31 +3236,20 @@ nonstructured_to_structured_get_loop( return 0; } +// these are filled in during module initialization +// we do not include these in the global data struct +// to avoid the need to #include array_method.h for +// all users of the global data struct +static PyArrayMethodObject *VoidToGenericMethod = NULL; +static PyArrayMethodObject *GenericToVoidMethod = NULL; +static PyArrayMethodObject *ObjectToGenericMethod = NULL; +static PyArrayMethodObject *GenericToObjectMethod = NULL; static PyObject * PyArray_GetGenericToVoidCastingImpl(void) { - static PyArrayMethodObject *method = NULL; - - if (method != NULL) { - Py_INCREF(method); - return (PyObject *)method; - } - - method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); - if (method == NULL) { - return PyErr_NoMemory(); - } - - method->name = "any_to_void_cast"; - method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; - method->casting = -1; - method->resolve_descriptors = &nonstructured_to_structured_resolve_descriptors; - method->get_strided_loop = &nonstructured_to_structured_get_loop; - method->nin = 1; - method->nout = 1; - - return (PyObject *)method; + Py_INCREF(GenericToVoidMethod); + return (PyObject *)GenericToVoidMethod; } @@ -3397,27 +3386,8 @@ structured_to_nonstructured_get_loop( static PyObject * PyArray_GetVoidToGenericCastingImpl(void) { - static PyArrayMethodObject *method = NULL; - - if (method != NULL) { - Py_INCREF(method); - return (PyObject *)method; - } - - method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); - if (method == NULL) { - return PyErr_NoMemory(); - } - - method->name = "void_to_any_cast"; - method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; - method->casting = -1; - method->resolve_descriptors = &structured_to_nonstructured_resolve_descriptors; - method->get_strided_loop = &structured_to_nonstructured_get_loop; - method->nin = 1; - method->nout = 1; - - return (PyObject *)method; + Py_INCREF(VoidToGenericMethod); + return (PyObject *)VoidToGenericMethod; } @@ -3781,31 +3751,11 @@ object_to_any_resolve_descriptors( static PyObject * PyArray_GetObjectToGenericCastingImpl(void) { - static PyArrayMethodObject *method = NULL; - - if (method != NULL) { - Py_INCREF(method); - return (PyObject *)method; - } - - method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); - if (method == NULL) { - return PyErr_NoMemory(); - } - - method->nin = 1; - method->nout = 1; - method->name = "object_to_any_cast"; - method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; - method->casting = NPY_UNSAFE_CASTING; - method->resolve_descriptors = &object_to_any_resolve_descriptors; - method->get_strided_loop = &object_to_any_get_loop; - - return (PyObject *)method; + Py_INCREF(ObjectToGenericMethod); + return (PyObject *)ObjectToGenericMethod; } - /* Any object is simple (could even use the default) */ static NPY_CASTING any_to_object_resolve_descriptors( @@ -3838,27 +3788,8 @@ any_to_object_resolve_descriptors( static PyObject * PyArray_GetGenericToObjectCastingImpl(void) { - static PyArrayMethodObject *method = NULL; - - if (method != NULL) { - Py_INCREF(method); - return (PyObject *)method; - } - - method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); - if (method == NULL) { - return PyErr_NoMemory(); - } - - method->nin = 1; - method->nout = 1; - method->name = "any_to_object_cast"; - method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; - method->casting = NPY_SAFE_CASTING; - method->resolve_descriptors = &any_to_object_resolve_descriptors; - method->get_strided_loop = &any_to_object_get_loop; - - return (PyObject *)method; + Py_INCREF(GenericToObjectMethod); + return (PyObject *)GenericToObjectMethod; } @@ -3910,6 +3841,68 @@ PyArray_InitializeObjectToObjectCast(void) return res; } +static int +initialize_void_and_object_globals(void) { + VoidToGenericMethod = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); + + if (VoidToGenericMethod == NULL) { + PyErr_NoMemory(); + return -1; + } + + VoidToGenericMethod->name = "void_to_any_cast"; + VoidToGenericMethod->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; + VoidToGenericMethod->casting = -1; + VoidToGenericMethod->resolve_descriptors = &structured_to_nonstructured_resolve_descriptors; + VoidToGenericMethod->get_strided_loop = &structured_to_nonstructured_get_loop; + VoidToGenericMethod->nin = 1; + VoidToGenericMethod->nout = 1; + + GenericToVoidMethod = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); + if (GenericToVoidMethod == NULL) { + PyErr_NoMemory(); + return -1; + } + + GenericToVoidMethod->name = "any_to_void_cast"; + GenericToVoidMethod->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; + GenericToVoidMethod->casting = -1; + GenericToVoidMethod->resolve_descriptors = &nonstructured_to_structured_resolve_descriptors; + GenericToVoidMethod->get_strided_loop = &nonstructured_to_structured_get_loop; + GenericToVoidMethod->nin = 1; + GenericToVoidMethod->nout = 1; + + ObjectToGenericMethod = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); + if (ObjectToGenericMethod == NULL) { + PyErr_NoMemory(); + return -1; + } + + ObjectToGenericMethod->nin = 1; + ObjectToGenericMethod->nout = 1; + ObjectToGenericMethod->name = "object_to_any_cast"; + ObjectToGenericMethod->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; + ObjectToGenericMethod->casting = NPY_UNSAFE_CASTING; + ObjectToGenericMethod->resolve_descriptors = &object_to_any_resolve_descriptors; + ObjectToGenericMethod->get_strided_loop = &object_to_any_get_loop; + + GenericToObjectMethod = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); + if (GenericToObjectMethod == NULL) { + PyErr_NoMemory(); + return -1; + } + + GenericToObjectMethod->nin = 1; + GenericToObjectMethod->nout = 1; + GenericToObjectMethod->name = "any_to_object_cast"; + GenericToObjectMethod->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; + GenericToObjectMethod->casting = NPY_SAFE_CASTING; + GenericToObjectMethod->resolve_descriptors = &any_to_object_resolve_descriptors; + GenericToObjectMethod->get_strided_loop = &any_to_object_get_loop; + + return 0; +} + NPY_NO_EXPORT int PyArray_InitializeCasts() @@ -3930,5 +3923,10 @@ PyArray_InitializeCasts() if (PyArray_InitializeDatetimeCasts() < 0) { return -1; } + + if (initialize_void_and_object_globals() < 0) { + return -1; + } + return 0; } From 536e5fb1f843ac3e51361ae690281689de848848 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 30 May 2024 15:31:02 -0600 Subject: [PATCH 599/980] MNT: move initialization of global tuples to global data struct --- numpy/_core/src/multiarray/ctors.c | 11 +--- numpy/_core/src/multiarray/multiarraymodule.c | 5 ++ numpy/_core/src/multiarray/multiarraymodule.h | 11 ++++ numpy/_core/src/multiarray/number.c | 50 ++++++++++--------- numpy/_core/src/umath/umathmodule.c | 5 +- 5 files changed, 47 insertions(+), 35 deletions(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 5e28e08a16bd..81001849c684 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -2528,15 +2528,6 @@ PyArray_FromArrayAttr_int(PyObject *op, PyArray_Descr *descr, int copy, return Py_NotImplemented; } - static PyObject *kwnames_is_copy = NULL; - if (kwnames_is_copy == NULL) { - kwnames_is_copy = Py_BuildValue("(s)", "copy"); - if (kwnames_is_copy == NULL) { - Py_DECREF(array_meth); - return NULL; - } - } - Py_ssize_t nargs = 0; PyObject *arguments[2]; PyObject *kwnames = NULL; @@ -2552,7 +2543,7 @@ PyArray_FromArrayAttr_int(PyObject *op, PyArray_Descr *descr, int copy, * signature of the __array__ method being called does not have `copy`. */ if (copy != -1) { - kwnames = kwnames_is_copy; + kwnames = npy_ma_global_data->kwnames_is_copy; arguments[nargs] = copy == 1 ? Py_True : Py_False; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index c087f732c8e1..20badf0d0128 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -5008,6 +5008,11 @@ initialize_static_globals(void) } } + npy_ma_global_data->kwnames_is_copy = Py_BuildValue("(s)", "copy"); + if (npy_ma_global_data->kwnames_is_copy == NULL) { + return -1; + } + npy_ma_global_data->one_obj = PyLong_FromLong((long) 1); if (npy_ma_global_data->one_obj == NULL) { return -1; diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index c42a135add39..7e44a8340983 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -89,6 +89,17 @@ typedef struct npy_ma_global_data_struct { npy_uint64 uint64; } unpack_lookup_big[256]; + /* + * Used in the __array__ internals to avoid building a tuple inline + */ + PyObject *kwnames_is_copy; + + /* + * Used in __imatmul__ to avoid building tuples inline + */ + PyObject *axes_1d_obj_kwargs; + PyObject *axes_2d_obj_kwargs; + /* * Used for CPU feature detection and dispatch * diff --git a/numpy/_core/src/multiarray/number.c b/numpy/_core/src/multiarray/number.c index aea7826bd04c..302da1abd95c 100644 --- a/numpy/_core/src/multiarray/number.c +++ b/numpy/_core/src/multiarray/number.c @@ -23,6 +23,10 @@ **************** Implement Number Protocol **************************** *************************************************************************/ +// this is not in the global data struct to avoid needing to include the +// definition of the NumericOps struct in multiarraymodule.h +// +// it is filled in during module initialization in a thread-safe manner NPY_NO_EXPORT NumericOps n_ops; /* NB: static objects initialized to zero */ /* @@ -118,6 +122,20 @@ _PyArray_SetNumericOps(PyObject *dict) SET(conjugate); SET(matmul); SET(clip); + + // initialize static globals needed for matmul + npy_ma_global_data->axes_1d_obj_kwargs = Py_BuildValue( + "{s, [(i), (i, i), (i)]}", "axes", -1, -2, -1, -1); + if (npy_ma_global_data->axes_1d_obj_kwargs == NULL) { + return -1; + } + + npy_ma_global_data->axes_2d_obj_kwargs = Py_BuildValue( + "{s, [(i, i), (i, i), (i, i)]}", "axes", -2, -1, -2, -1, -2, -1); + if (npy_ma_global_data->axes_2d_obj_kwargs == NULL) { + return -1; + } + return 0; } @@ -271,6 +289,12 @@ array_inplace_matrix_multiply(PyArrayObject *self, PyObject *other) INPLACE_GIVE_UP_IF_NEEDED(self, other, nb_inplace_matrix_multiply, array_inplace_matrix_multiply); + PyObject *args = PyTuple_Pack(3, self, other, self); + if (args == NULL) { + return NULL; + } + PyObject *kwargs; + /* * Unlike `matmul(a, b, out=a)` we ensure that the result is not broadcast * if the result without `out` would have less dimensions than `a`. @@ -280,33 +304,11 @@ array_inplace_matrix_multiply(PyArrayObject *self, PyObject *other) * The error here will be confusing, but for now, we enforce this by * passing the correct `axes=`. */ - static PyObject *axes_1d_obj_kwargs = NULL; - static PyObject *axes_2d_obj_kwargs = NULL; - if (NPY_UNLIKELY(axes_1d_obj_kwargs == NULL)) { - axes_1d_obj_kwargs = Py_BuildValue( - "{s, [(i), (i, i), (i)]}", "axes", -1, -2, -1, -1); - if (axes_1d_obj_kwargs == NULL) { - return NULL; - } - } - if (NPY_UNLIKELY(axes_2d_obj_kwargs == NULL)) { - axes_2d_obj_kwargs = Py_BuildValue( - "{s, [(i, i), (i, i), (i, i)]}", "axes", -2, -1, -2, -1, -2, -1); - if (axes_2d_obj_kwargs == NULL) { - return NULL; - } - } - - PyObject *args = PyTuple_Pack(3, self, other, self); - if (args == NULL) { - return NULL; - } - PyObject *kwargs; if (PyArray_NDIM(self) == 1) { - kwargs = axes_1d_obj_kwargs; + kwargs = npy_ma_global_data->axes_1d_obj_kwargs; } else { - kwargs = axes_2d_obj_kwargs; + kwargs = npy_ma_global_data->axes_2d_obj_kwargs; } PyObject *res = PyObject_Call(n_ops.matmul, args, kwargs); Py_DECREF(args); diff --git a/numpy/_core/src/umath/umathmodule.c b/numpy/_core/src/umath/umathmodule.c index de561b5bc2e7..d3fa8d4becbd 100644 --- a/numpy/_core/src/umath/umathmodule.c +++ b/numpy/_core/src/umath/umathmodule.c @@ -287,9 +287,12 @@ int initumath(PyObject *m) s = PyDict_GetItemString(d, "conjugate"); s2 = PyDict_GetItemString(d, "remainder"); + /* Setup the array object's numerical structures with appropriate ufuncs in d*/ - _PyArray_SetNumericOps(d); + if (_PyArray_SetNumericOps(d) < 0) { + return -1; + } PyDict_SetItemString(d, "conj", s); PyDict_SetItemString(d, "mod", s2); From 0c221266337e4d6105908c2d70c31e5d75cb9f77 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 30 May 2024 15:32:13 -0600 Subject: [PATCH 600/980] MNT: move default extobj contextvar to global data dict --- numpy/_core/src/multiarray/multiarraymodule.h | 14 +++++++++++ numpy/_core/src/umath/extobj.c | 25 +++++++------------ numpy/_core/src/umath/extobj.h | 3 --- numpy/_core/src/umath/umathmodule.c | 4 +-- 4 files changed, 25 insertions(+), 21 deletions(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index 7e44a8340983..1743cd1fde24 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -38,6 +38,20 @@ typedef struct npy_ma_global_data_struct { */ PyObject *default_truediv_type_tup; + /* + * Used to set up the default extobj context variable + * + * This is immutable and set at module initialization so can be used + * without acquiring the global data mutex + */ + PyObject *default_extobj_capsule; + + /* + * The global ContextVar to store the extobject. It is exposed to Python + * as `_extobj_contextvar`. + */ + PyObject *npy_extobj_contextvar; + /* * A reference to ndarray's implementations for __array_*__ special methods */ diff --git a/numpy/_core/src/umath/extobj.c b/numpy/_core/src/umath/extobj.c index 966b633a6ea5..2c10dda833e6 100644 --- a/numpy/_core/src/umath/extobj.c +++ b/numpy/_core/src/umath/extobj.c @@ -18,14 +18,6 @@ #include "common.h" -/* - * The global ContextVar to store the extobject. It is exposed to Python - * as `_extobj_contextvar`. - */ -static PyObject *default_extobj_capsule = NULL; -NPY_NO_EXPORT PyObject *npy_extobj_contextvar = NULL; - - #define UFUNC_ERR_IGNORE 0 #define UFUNC_ERR_WARN 1 #define UFUNC_ERR_RAISE 2 @@ -130,7 +122,8 @@ fetch_curr_extobj_state(npy_extobj *extobj) { PyObject *capsule; if (PyContextVar_Get( - npy_extobj_contextvar, default_extobj_capsule, &capsule) < 0) { + npy_ma_global_data->npy_extobj_contextvar, + npy_ma_global_data->default_extobj_capsule, &capsule) < 0) { return -1; } npy_extobj *obj = PyCapsule_GetPointer(capsule, "numpy.ufunc.extobj"); @@ -164,15 +157,15 @@ init_extobj(void) } } - default_extobj_capsule = make_extobj_capsule( + npy_ma_global_data->default_extobj_capsule = make_extobj_capsule( NPY_BUFSIZE, UFUNC_ERR_DEFAULT, Py_None); - if (default_extobj_capsule == NULL) { + if (npy_ma_global_data->default_extobj_capsule == NULL) { return -1; } - npy_extobj_contextvar = PyContextVar_New( - "numpy.ufunc.extobj", default_extobj_capsule); - if (npy_extobj_contextvar == NULL) { - Py_CLEAR(default_extobj_capsule); + npy_ma_global_data->npy_extobj_contextvar = PyContextVar_New( + "numpy.ufunc.extobj", npy_ma_global_data->default_extobj_capsule); + if (npy_ma_global_data->npy_extobj_contextvar == NULL) { + Py_CLEAR(npy_ma_global_data->default_extobj_capsule); return -1; } return 0; @@ -213,7 +206,7 @@ errmodeconverter(PyObject *obj, int *mode) /* * This function is currently exposed as `umath._seterrobj()`, it is private * and returns a capsule representing the errstate. This capsule is then - * assigned to the `npy_extobj_contextvar` in Python. + * assigned to the `_extobj_contextvar` in Python. */ NPY_NO_EXPORT PyObject * extobj_make_extobj(PyObject *NPY_UNUSED(mod), diff --git a/numpy/_core/src/umath/extobj.h b/numpy/_core/src/umath/extobj.h index 0cd5afd76218..9176af6a3539 100644 --- a/numpy/_core/src/umath/extobj.h +++ b/numpy/_core/src/umath/extobj.h @@ -4,9 +4,6 @@ #include /* for NPY_NO_EXPORT */ -/* For the private exposure of the extobject contextvar to Python */ -extern NPY_NO_EXPORT PyObject *npy_extobj_contextvar; - /* * Represent the current ufunc error (and buffer) state. we are using a * capsule for now to store this, but it could make sense to refactor it into diff --git a/numpy/_core/src/umath/umathmodule.c b/numpy/_core/src/umath/umathmodule.c index d3fa8d4becbd..b7e7fca01678 100644 --- a/numpy/_core/src/umath/umathmodule.c +++ b/numpy/_core/src/umath/umathmodule.c @@ -273,8 +273,8 @@ int initumath(PyObject *m) #undef ADDSCONST PyModule_AddIntConstant(m, "UFUNC_BUFSIZE_DEFAULT", (long)NPY_BUFSIZE); - Py_INCREF(npy_extobj_contextvar); - PyModule_AddObject(m, "_extobj_contextvar", npy_extobj_contextvar); + Py_INCREF(npy_ma_global_data->npy_extobj_contextvar); + PyModule_AddObject(m, "_extobj_contextvar", npy_ma_global_data->npy_extobj_contextvar); PyModule_AddObject(m, "PINF", PyFloat_FromDouble(NPY_INFINITY)); PyModule_AddObject(m, "NINF", PyFloat_FromDouble(-NPY_INFINITY)); From dd450d92e7ac42c2c7bb83a578c15b6b45c3fb94 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 19 Jun 2024 21:28:07 +0200 Subject: [PATCH 601/980] TYP: fix incorrect import in `ma/extras.pyi` stub The `numpy.ma` tests for type stubs are missing, so this kind of obvious error has a chance of creeping in. The import in the type stub now matches the one in the corresponding .py file again. Also add the start of a very basic "pass" typing test for numpy.ma [skip azp] [skip cirrus] [skip circle] --- numpy/ma/extras.pyi | 3 ++- numpy/typing/tests/data/pass/ma.py | 8 ++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 numpy/typing/tests/data/pass/ma.py diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 56228b927080..8e458fe165af 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,5 +1,6 @@ from typing import Any -from numpy.lib.index_tricks import AxisConcatenator + +from numpy.lib._index_tricks_impl import AxisConcatenator from numpy.ma.core import ( dot as dot, diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py new file mode 100644 index 000000000000..6b3b138119bb --- /dev/null +++ b/numpy/typing/tests/data/pass/ma.py @@ -0,0 +1,8 @@ +from typing import Any + +import numpy as np +import numpy.ma + + +m : np.ma.MaskedArray[Any, np.dtype[np.float64]] = np.ma.masked_array([1.5, 2, 3], mask=[True, False, True]) + From 90b1f38129eae239e04ecc9eb7ff8d12e9d597ac Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 30 May 2024 15:33:03 -0600 Subject: [PATCH 602/980] MNT: move PyArray_SetStringFunction internals into global data struct --- numpy/_core/src/multiarray/strfuncs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/src/multiarray/strfuncs.c b/numpy/_core/src/multiarray/strfuncs.c index 1e4c1809b504..863399e0eaf1 100644 --- a/numpy/_core/src/multiarray/strfuncs.c +++ b/numpy/_core/src/multiarray/strfuncs.c @@ -7,6 +7,7 @@ #include "numpy/arrayobject.h" #include "npy_pycompat.h" #include "npy_import.h" +#include "multiarraymodule.h" #include "strfuncs.h" static void From 6a296c422c2ad6154d3296f0275bde3696ff2ba5 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 30 May 2024 15:34:32 -0600 Subject: [PATCH 603/980] BUG: remove questionable static initialization of an array object --- numpy/_core/src/umath/ufunc_object.c | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 50c969bbc3ca..98e02828726d 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -698,16 +698,13 @@ convert_ufunc_arguments(PyUFuncObject *ufunc, * TODO: Just like the general dual NEP 50/legacy promotion * support this is meant as a temporary hack for NumPy 1.25. */ - static PyArrayObject *zero_arr = NULL; - if (NPY_UNLIKELY(zero_arr == NULL)) { - zero_arr = (PyArrayObject *)PyArray_ZEROS( - 0, NULL, NPY_LONG, NPY_FALSE); - if (zero_arr == NULL) { - goto fail; - } - ((PyArrayObject_fields *)zero_arr)->flags |= ( - NPY_ARRAY_WAS_PYTHON_INT|NPY_ARRAY_WAS_INT_AND_REPLACED); + PyArrayObject *zero_arr = (PyArrayObject *)PyArray_ZEROS( + 0, NULL, NPY_LONG, NPY_FALSE); + if (zero_arr == NULL) { + goto fail; } + ((PyArrayObject_fields *)zero_arr)->flags |= ( + NPY_ARRAY_WAS_PYTHON_INT|NPY_ARRAY_WAS_INT_AND_REPLACED); Py_INCREF(zero_arr); Py_SETREF(out_op[i], zero_arr); } @@ -6530,10 +6527,6 @@ ufunc_get_signature(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) #undef _typecharfromnum -/* - * Docstring is now set from python - * static char *Ufunctype__doc__ = NULL; - */ static PyGetSetDef ufunc_getset[] = { {"__doc__", (getter)ufunc_get_doc, From 398f09521b7547aab9cb2fae2fe6a15b7068cc08 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 3 Jun 2024 13:42:54 -0600 Subject: [PATCH 604/980] MNT: split global data struct into two structs --- numpy/_core/src/common/npy_cpu_dispatch.c | 8 +- numpy/_core/src/common/npy_ctypes.h | 6 +- numpy/_core/src/common/ufunc_override.c | 2 +- .../src/multiarray/arrayfunction_override.c | 12 +-- numpy/_core/src/multiarray/arrayobject.c | 2 +- numpy/_core/src/multiarray/arraytypes.c.src | 3 +- numpy/_core/src/multiarray/common.h | 4 +- numpy/_core/src/multiarray/common_dtype.c | 4 +- numpy/_core/src/multiarray/compiled_base.c | 8 +- numpy/_core/src/multiarray/conversion_utils.c | 6 +- numpy/_core/src/multiarray/convert_datatype.c | 18 ++--- numpy/_core/src/multiarray/ctors.c | 6 +- numpy/_core/src/multiarray/descriptor.c | 6 +- numpy/_core/src/multiarray/dtypemeta.c | 18 ++--- numpy/_core/src/multiarray/getset.c | 6 +- numpy/_core/src/multiarray/methods.c | 28 +++---- numpy/_core/src/multiarray/methods.h | 4 +- numpy/_core/src/multiarray/multiarraymodule.c | 77 ++++++++++--------- numpy/_core/src/multiarray/multiarraymodule.h | 66 ++++++++++------ numpy/_core/src/multiarray/number.c | 14 ++-- numpy/_core/src/multiarray/scalartypes.c.src | 9 ++- numpy/_core/src/multiarray/strfuncs.c | 12 +-- .../_core/src/multiarray/stringdtype/dtype.c | 8 +- numpy/_core/src/umath/dispatching.c | 2 +- numpy/_core/src/umath/extobj.c | 16 ++-- numpy/_core/src/umath/funcs.inc.src | 14 ++-- numpy/_core/src/umath/override.c | 8 +- numpy/_core/src/umath/scalarmath.c.src | 2 +- numpy/_core/src/umath/ufunc_object.c | 16 ++-- numpy/_core/src/umath/ufunc_type_resolution.c | 10 +-- numpy/_core/src/umath/umathmodule.c | 4 +- 31 files changed, 210 insertions(+), 189 deletions(-) diff --git a/numpy/_core/src/common/npy_cpu_dispatch.c b/numpy/_core/src/common/npy_cpu_dispatch.c index 0f27ea81b48f..79d1a13440de 100644 --- a/numpy/_core/src/common/npy_cpu_dispatch.c +++ b/numpy/_core/src/common/npy_cpu_dispatch.c @@ -8,7 +8,7 @@ NPY_VISIBILITY_HIDDEN int npy_cpu_dispatch_tracer_init(PyObject *mod) { - if (npy_ma_global_data->cpu_dispatch_registry != NULL) { + if (npy_ma_static_data->cpu_dispatch_registry != NULL) { PyErr_Format(PyExc_RuntimeError, "CPU dispatcher tracer already initlized"); return -1; } @@ -25,7 +25,7 @@ npy_cpu_dispatch_tracer_init(PyObject *mod) if (err != 0) { return -1; } - npy_ma_global_data->cpu_dispatch_registry = reg_dict; + npy_ma_static_data->cpu_dispatch_registry = reg_dict; return 0; } @@ -33,13 +33,13 @@ NPY_VISIBILITY_HIDDEN void npy_cpu_dispatch_trace(const char *fname, const char *signature, const char **dispatch_info) { - PyObject *func_dict = PyDict_GetItemString(npy_ma_global_data->cpu_dispatch_registry, fname); + PyObject *func_dict = PyDict_GetItemString(npy_ma_static_data->cpu_dispatch_registry, fname); if (func_dict == NULL) { func_dict = PyDict_New(); if (func_dict == NULL) { return; } - int err = PyDict_SetItemString(npy_ma_global_data->cpu_dispatch_registry, fname, func_dict); + int err = PyDict_SetItemString(npy_ma_static_data->cpu_dispatch_registry, fname, func_dict); Py_DECREF(func_dict); if (err != 0) { return; diff --git a/numpy/_core/src/common/npy_ctypes.h b/numpy/_core/src/common/npy_ctypes.h index d7d350ec052b..1f356741de38 100644 --- a/numpy/_core/src/common/npy_ctypes.h +++ b/numpy/_core/src/common/npy_ctypes.h @@ -22,12 +22,12 @@ npy_ctypes_check(PyTypeObject *obj) int ret; npy_cache_import("numpy._core._internal", "npy_ctypes_check", - &npy_ma_global_data->npy_ctypes_check); - if (npy_ma_global_data->npy_ctypes_check == NULL) { + &npy_ma_thread_unsafe_state->npy_ctypes_check); + if (npy_ma_thread_unsafe_state->npy_ctypes_check == NULL) { goto fail; } - ret_obj = PyObject_CallFunctionObjArgs(npy_ma_global_data->npy_ctypes_check, + ret_obj = PyObject_CallFunctionObjArgs(npy_ma_thread_unsafe_state->npy_ctypes_check, (PyObject *)obj, NULL); if (ret_obj == NULL) { goto fail; diff --git a/numpy/_core/src/common/ufunc_override.c b/numpy/_core/src/common/ufunc_override.c index dc8ddec4b6ad..bd09d7c00be0 100644 --- a/numpy/_core/src/common/ufunc_override.c +++ b/numpy/_core/src/common/ufunc_override.c @@ -43,7 +43,7 @@ PyUFuncOverride_GetNonDefaultArrayUfunc(PyObject *obj) return NULL; } /* Ignore if the same as ndarray.__array_ufunc__ */ - if (cls_array_ufunc == npy_ma_global_data->ndarray_array_ufunc) { + if (cls_array_ufunc == npy_ma_static_data->ndarray_array_ufunc) { Py_DECREF(cls_array_ufunc); return NULL; } diff --git a/numpy/_core/src/multiarray/arrayfunction_override.c b/numpy/_core/src/multiarray/arrayfunction_override.c index aab6aad982bc..11e8deb05d4f 100644 --- a/numpy/_core/src/multiarray/arrayfunction_override.c +++ b/numpy/_core/src/multiarray/arrayfunction_override.c @@ -20,8 +20,8 @@ get_array_function(PyObject *obj) { /* Fast return for ndarray */ if (PyArray_CheckExact(obj)) { - Py_INCREF(npy_ma_global_data->ndarray_array_function); - return npy_ma_global_data->ndarray_array_function; + Py_INCREF(npy_ma_static_data->ndarray_array_function); + return npy_ma_static_data->ndarray_array_function; } PyObject *array_function = PyArray_LookupSpecial(obj, npy_ma_str->array_function); @@ -125,7 +125,7 @@ get_implementing_args_and_methods(PyObject *relevant_args, static int is_default_array_function(PyObject *obj) { - return obj == npy_ma_global_data->ndarray_array_function; + return obj == npy_ma_static_data->ndarray_array_function; } @@ -233,10 +233,10 @@ set_no_matching_types_error(PyObject *public_api, PyObject *types) /* No acceptable override found, raise TypeError. */ npy_cache_import("numpy._core._internal", "array_function_errmsg_formatter", - &npy_ma_global_data->array_function_errmsg_formatter); - if (npy_ma_global_data->array_function_errmsg_formatter != NULL) { + &npy_ma_thread_unsafe_state->array_function_errmsg_formatter); + if (npy_ma_thread_unsafe_state->array_function_errmsg_formatter != NULL) { PyObject *errmsg = PyObject_CallFunctionObjArgs( - npy_ma_global_data->array_function_errmsg_formatter, + npy_ma_thread_unsafe_state->array_function_errmsg_formatter, public_api, types, NULL); if (errmsg != NULL) { PyErr_SetObject(PyExc_TypeError, errmsg); diff --git a/numpy/_core/src/multiarray/arrayobject.c b/numpy/_core/src/multiarray/arrayobject.c index 378b3e0d9c74..da6d13d1ff53 100644 --- a/numpy/_core/src/multiarray/arrayobject.c +++ b/numpy/_core/src/multiarray/arrayobject.c @@ -928,7 +928,7 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) if (result == NULL && (cmp_op == Py_EQ || cmp_op == Py_NE) && PyErr_ExceptionMatches( - npy_ma_global_data->_UFuncNoLoopError)) { + npy_ma_static_data->_UFuncNoLoopError)) { PyErr_Clear(); PyArrayObject *array_other = (PyArrayObject *)PyArray_FROM_O(other); diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index 1ecfc6d94cd7..3e2199b2b2a5 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -4185,8 +4185,7 @@ NPY_NO_EXPORT _PyArray_LegacyDescr @from@_Descr = { /* The smallest type number is ?, the largest bounded by 'z'. */ #define _MAX_LETTER ('z' + 1) -static npy_int16 _letter_to_num[_MAX_LETTER - '?']; -#define LETTER_TO_NUM(letter) _letter_to_num[letter - '?'] +#define LETTER_TO_NUM(letter) npy_ma_static_data->_letter_to_num[letter - '?'] static _PyArray_LegacyDescr *_builtin_descrs[] = { &BOOL_Descr, diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index 9f6fdf32ca98..d9f903461fbf 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -142,12 +142,12 @@ check_and_adjust_axis_msg(int *axis, int ndim, PyObject *msg_prefix) if (NPY_UNLIKELY((*axis < -ndim) || (*axis >= ndim))) { /* Invoke the AxisError constructor */ PyObject *exc = PyObject_CallFunction( - npy_ma_global_data->AxisError, "iiO", *axis, ndim, + npy_ma_static_data->AxisError, "iiO", *axis, ndim, msg_prefix); if (exc == NULL) { return -1; } - PyErr_SetObject(npy_ma_global_data->AxisError, exc); + PyErr_SetObject(npy_ma_static_data->AxisError, exc); Py_DECREF(exc); return -1; diff --git a/numpy/_core/src/multiarray/common_dtype.c b/numpy/_core/src/multiarray/common_dtype.c index 94deddec2c80..c258a1abf3a1 100644 --- a/numpy/_core/src/multiarray/common_dtype.c +++ b/numpy/_core/src/multiarray/common_dtype.c @@ -64,7 +64,7 @@ PyArray_CommonDType(PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2) } if (common_dtype == (PyArray_DTypeMeta *)Py_NotImplemented) { Py_DECREF(Py_NotImplemented); - PyErr_Format(npy_ma_global_data->DTypePromotionError, + PyErr_Format(npy_ma_static_data->DTypePromotionError, "The DTypes %S and %S do not have a common DType. " "For example they cannot be stored in a single array unless " "the dtype is `object`.", dtype1, dtype2); @@ -285,7 +285,7 @@ PyArray_PromoteDTypeSequence( Py_INCREF(dtypes_in[l]); PyTuple_SET_ITEM(dtypes_in_tuple, l, (PyObject *)dtypes_in[l]); } - PyErr_Format(npy_ma_global_data->DTypePromotionError, + PyErr_Format(npy_ma_static_data->DTypePromotionError, "The DType %S could not be promoted by %S. This means that " "no common DType exists for the given inputs. " "For example they cannot be stored in a single array unless " diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index 127ebdbe8c3d..46617d43f11b 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -1414,7 +1414,7 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t /* Don't add docstrings */ #if PY_VERSION_HEX > 0x030b0000 - if (npy_ma_global_data->optimize > 1) { + if (npy_ma_static_data->optimize > 1) { #else if (Py_OptimizeFlag > 1) { #endif @@ -1858,7 +1858,7 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) /* for unity stride we can just copy out of the lookup table */ if (order == 'b') { for (index = 0; index < in_n; index++) { - npy_uint64 v = npy_ma_global_data->unpack_lookup_big[*inptr].uint64; + npy_uint64 v = npy_ma_static_data->unpack_lookup_big[*inptr].uint64; memcpy(outptr, &v, 8); outptr += 8; inptr += in_stride; @@ -1866,7 +1866,7 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) } else { for (index = 0; index < in_n; index++) { - npy_uint64 v = npy_ma_global_data->unpack_lookup_big[*inptr].uint64; + npy_uint64 v = npy_ma_static_data->unpack_lookup_big[*inptr].uint64; if (order != 'b') { v = npy_bswap8(v); } @@ -1877,7 +1877,7 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) } /* Clean up the tail portion */ if (in_tail) { - npy_uint64 v = npy_ma_global_data->unpack_lookup_big[*inptr].uint64; + npy_uint64 v = npy_ma_static_data->unpack_lookup_big[*inptr].uint64; if (order != 'b') { v = npy_bswap8(v); } diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index b5bdfce09486..d66f4eb26ed0 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -235,7 +235,7 @@ PyArray_CopyConverter(PyObject *obj, NPY_COPYMODE *copymode) { int int_copymode; - if ((PyObject *)Py_TYPE(obj) == npy_ma_global_data->_CopyMode) { + if ((PyObject *)Py_TYPE(obj) == npy_ma_static_data->_CopyMode) { PyObject* mode_value = PyObject_GetAttrString(obj, "value"); if (mode_value == NULL) { return NPY_FAIL; @@ -270,7 +270,7 @@ PyArray_AsTypeCopyConverter(PyObject *obj, NPY_ASTYPECOPYMODE *copymode) { int int_copymode; - if ((PyObject *)Py_TYPE(obj) == npy_ma_global_data->_CopyMode) { + if ((PyObject *)Py_TYPE(obj) == npy_ma_static_data->_CopyMode) { PyErr_SetString(PyExc_ValueError, "_CopyMode enum is not allowed for astype function. " "Use true/false instead."); @@ -1411,7 +1411,7 @@ PyArray_IntTupleFromIntp(int len, npy_intp const *vals) NPY_NO_EXPORT int _not_NoValue(PyObject *obj, PyObject **out) { - if (obj == npy_ma_global_data->_NoValue) { + if (obj == npy_ma_static_data->_NoValue) { *out = NULL; } else { diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index a60b7797d6f7..8f63b7716b6a 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -84,13 +84,13 @@ npy_give_promotion_warnings(void) npy_cache_import( "numpy._core._ufunc_config", "NO_NEP50_WARNING", - &npy_ma_global_data->NO_NEP50_WARNING); - if (npy_ma_global_data->NO_NEP50_WARNING == NULL) { + &npy_ma_thread_unsafe_state->NO_NEP50_WARNING); + if (npy_ma_thread_unsafe_state->NO_NEP50_WARNING == NULL) { PyErr_WriteUnraisable(NULL); return 1; } - if (PyContextVar_Get(npy_ma_global_data->NO_NEP50_WARNING, + if (PyContextVar_Get(npy_ma_thread_unsafe_state->NO_NEP50_WARNING, Py_False, &val) < 0) { /* Errors should not really happen, but if it does assume we warn. */ PyErr_WriteUnraisable(NULL); @@ -402,7 +402,7 @@ PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) !PyTypeNum_ISCOMPLEX(type_num) && PyTypeNum_ISNUMBER(type_num) && !PyTypeNum_ISBOOL(type_num)) { - int ret = PyErr_WarnEx(npy_ma_global_data->ComplexWarning, + int ret = PyErr_WarnEx(npy_ma_static_data->ComplexWarning, "Casting complex values to real discards " "the imaginary part", 1); if (ret < 0) { @@ -2184,12 +2184,12 @@ PyArray_Zero(PyArrayObject *arr) if they simply memcpy it into a ndarray without using setitem(), refcount errors will occur */ - memcpy(zeroval, &npy_ma_global_data->zero_obj, sizeof(PyObject *)); + memcpy(zeroval, &npy_ma_static_data->zero_obj, sizeof(PyObject *)); return zeroval; } storeflags = PyArray_FLAGS(arr); PyArray_ENABLEFLAGS(arr, NPY_ARRAY_BEHAVED); - ret = PyArray_SETITEM(arr, zeroval, npy_ma_global_data->zero_obj); + ret = PyArray_SETITEM(arr, zeroval, npy_ma_static_data->zero_obj); ((PyArrayObject_fields *)arr)->flags = storeflags; if (ret < 0) { PyDataMem_FREE(zeroval); @@ -2223,13 +2223,13 @@ PyArray_One(PyArrayObject *arr) if they simply memcpy it into a ndarray without using setitem(), refcount errors will occur */ - memcpy(oneval, &npy_ma_global_data->one_obj, sizeof(PyObject *)); + memcpy(oneval, &npy_ma_static_data->one_obj, sizeof(PyObject *)); return oneval; } storeflags = PyArray_FLAGS(arr); PyArray_ENABLEFLAGS(arr, NPY_ARRAY_BEHAVED); - ret = PyArray_SETITEM(arr, oneval, npy_ma_global_data->one_obj); + ret = PyArray_SETITEM(arr, oneval, npy_ma_static_data->one_obj); ((PyArrayObject_fields *)arr)->flags = storeflags; if (ret < 0) { PyDataMem_FREE(oneval); @@ -2612,7 +2612,7 @@ complex_to_noncomplex_get_loop( PyArrayMethod_StridedLoop **out_loop, NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - int ret = PyErr_WarnEx(npy_ma_global_data->ComplexWarning, + int ret = PyErr_WarnEx(npy_ma_static_data->ComplexWarning, "Casting complex values to real discards " "the imaginary part", 1); if (ret < 0) { diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 81001849c684..8e0746dbff8a 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -622,7 +622,7 @@ raise_memory_error(int nd, npy_intp const *dims, PyArray_Descr *descr) if (exc_value == NULL){ goto fail; } - PyErr_SetObject(npy_ma_global_data->_ArrayMemoryError, exc_value); + PyErr_SetObject(npy_ma_static_data->_ArrayMemoryError, exc_value); Py_DECREF(exc_value); return; @@ -932,7 +932,7 @@ PyArray_NewFromDescr_int( if (func == NULL) { goto fail; } - else if (func == npy_ma_global_data->ndarray_array_finalize) { + else if (func == npy_ma_static_data->ndarray_array_finalize) { Py_DECREF(func); } else if (func == Py_None) { @@ -2543,7 +2543,7 @@ PyArray_FromArrayAttr_int(PyObject *op, PyArray_Descr *descr, int copy, * signature of the __array__ method being called does not have `copy`. */ if (copy != -1) { - kwnames = npy_ma_global_data->kwnames_is_copy; + kwnames = npy_ma_static_data->kwnames_is_copy; arguments[nargs] = copy == 1 ? Py_True : Py_False; } diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 449a786aafe8..55fd96b7f8e4 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -726,11 +726,11 @@ _convert_from_commastring(PyObject *obj, int align) PyArray_Descr *res; assert(PyUnicode_Check(obj)); npy_cache_import("numpy._core._internal", "_commastring", - &npy_ma_global_data->_commastring); - if (npy_ma_global_data->_commastring == NULL) { + &npy_ma_thread_unsafe_state->_commastring); + if (npy_ma_thread_unsafe_state->_commastring == NULL) { return NULL; } - parsed = PyObject_CallOneArg(npy_ma_global_data->_commastring, obj); + parsed = PyObject_CallOneArg(npy_ma_thread_unsafe_state->_commastring, obj); if (parsed == NULL) { return NULL; } diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 995841a2e1d3..3fcafe9b10ed 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -752,7 +752,7 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) if (descr1->subarray == NULL && descr1->names == NULL && descr2->subarray == NULL && descr2->names == NULL) { if (descr1->elsize != descr2->elsize) { - PyErr_SetString(npy_ma_global_data->DTypePromotionError, + PyErr_SetString(npy_ma_static_data->DTypePromotionError, "Invalid type promotion with void datatypes of different " "lengths. Use the `np.bytes_` datatype instead to pad the " "shorter value with trailing zero bytes."); @@ -765,12 +765,12 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) if (descr1->names != NULL && descr2->names != NULL) { /* If both have fields promoting individual fields may be possible */ npy_cache_import("numpy._core._internal", "_promote_fields", - &npy_ma_global_data->_promote_fields); - if (npy_ma_global_data->_promote_fields == NULL) { + &npy_ma_thread_unsafe_state->_promote_fields); + if (npy_ma_thread_unsafe_state->_promote_fields == NULL) { return NULL; } PyObject *result = PyObject_CallFunctionObjArgs( - npy_ma_global_data->_promote_fields, + npy_ma_thread_unsafe_state->_promote_fields, descr1, descr2, NULL); if (result == NULL) { return NULL; @@ -791,7 +791,7 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) return NULL; } if (!cmp) { - PyErr_SetString(npy_ma_global_data->DTypePromotionError, + PyErr_SetString(npy_ma_static_data->DTypePromotionError, "invalid type promotion with subarray datatypes " "(shape mismatch)."); return NULL; @@ -821,7 +821,7 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) return new_descr; } - PyErr_SetString(npy_ma_global_data->DTypePromotionError, + PyErr_SetString(npy_ma_static_data->DTypePromotionError, "invalid type promotion with structured datatype(s)."); return NULL; } @@ -1239,13 +1239,13 @@ dtypemeta_wrap_legacy_descriptor( /* And it to the types submodule if it is a builtin dtype */ if (!PyTypeNum_ISUSERDEF(descr->type_num)) { npy_cache_import("numpy.dtypes", "_add_dtype_helper", - &npy_ma_global_data->_add_dtype_helper); - if (npy_ma_global_data->_add_dtype_helper == NULL) { + &npy_ma_thread_unsafe_state->_add_dtype_helper); + if (npy_ma_thread_unsafe_state->_add_dtype_helper == NULL) { return -1; } if (PyObject_CallFunction( - npy_ma_global_data->_add_dtype_helper, + npy_ma_thread_unsafe_state->_add_dtype_helper, "Os", (PyObject *)dtype_class, alias) == NULL) { return -1; } diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index 83e540003b07..4ddffc212c24 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -388,12 +388,12 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored)) PyObject *safe; npy_cache_import("numpy._core._internal", "_view_is_safe", - &npy_ma_global_data->_view_is_safe); - if (npy_ma_global_data->_view_is_safe == NULL) { + &npy_ma_thread_unsafe_state->_view_is_safe); + if (npy_ma_thread_unsafe_state->_view_is_safe == NULL) { goto fail; } - safe = PyObject_CallFunction(npy_ma_global_data->_view_is_safe, + safe = PyObject_CallFunction(npy_ma_thread_unsafe_state->_view_is_safe, "OO", PyArray_DESCR(self), newtype); if (safe == NULL) { goto fail; diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 7aeaaeb50ac3..68be699e6315 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -114,11 +114,11 @@ npy_forward_method( #define NPY_FORWARD_NDARRAY_METHOD(name) \ npy_cache_import( \ "numpy._core._methods", #name, \ - &npy_ma_global_data->name); \ - if (npy_ma_global_data->name == NULL) { \ + &npy_ma_thread_unsafe_state->name); \ + if (npy_ma_thread_unsafe_state->name == NULL) { \ return NULL; \ } \ - return npy_forward_method(npy_ma_global_data->name, \ + return npy_forward_method(npy_ma_thread_unsafe_state->name, \ (PyObject *)self, args, len_args, kwnames) @@ -406,14 +406,14 @@ PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset) /* check that we are not reinterpreting memory containing Objects. */ if (_may_have_objects(PyArray_DESCR(self)) || _may_have_objects(typed)) { npy_cache_import("numpy._core._internal", "_getfield_is_safe", - &npy_ma_global_data->_getfield_is_safe); - if (npy_ma_global_data->_getfield_is_safe == NULL) { + &npy_ma_thread_unsafe_state->_getfield_is_safe); + if (npy_ma_thread_unsafe_state->_getfield_is_safe == NULL) { Py_DECREF(typed); return NULL; } /* only returns True or raises */ - safe = PyObject_CallFunction(npy_ma_global_data->_getfield_is_safe, + safe = PyObject_CallFunction(npy_ma_thread_unsafe_state->_getfield_is_safe, "OOi", PyArray_DESCR(self), typed, offset); if (safe == NULL) { @@ -2248,17 +2248,17 @@ PyArray_Dump(PyObject *self, PyObject *file, int protocol) { PyObject *ret; npy_cache_import("numpy._core._methods", "_dump", - &npy_ma_global_data->_dump); - if (npy_ma_global_data->_dump == NULL) { + &npy_ma_thread_unsafe_state->_dump); + if (npy_ma_thread_unsafe_state->_dump == NULL) { return -1; } if (protocol < 0) { ret = PyObject_CallFunction( - npy_ma_global_data->_dump, "OO", self, file); + npy_ma_thread_unsafe_state->_dump, "OO", self, file); } else { ret = PyObject_CallFunction( - npy_ma_global_data->_dump, "OOi", self, file, protocol); + npy_ma_thread_unsafe_state->_dump, "OOi", self, file, protocol); } if (ret == NULL) { return -1; @@ -2272,16 +2272,16 @@ NPY_NO_EXPORT PyObject * PyArray_Dumps(PyObject *self, int protocol) { npy_cache_import("numpy._core._methods", "_dumps", - &npy_ma_global_data->_dumps); - if (npy_ma_global_data->_dumps == NULL) { + &npy_ma_thread_unsafe_state->_dumps); + if (npy_ma_thread_unsafe_state->_dumps == NULL) { return NULL; } if (protocol < 0) { - return PyObject_CallFunction(npy_ma_global_data->_dumps, "O", self); + return PyObject_CallFunction(npy_ma_thread_unsafe_state->_dumps, "O", self); } else { return PyObject_CallFunction( - npy_ma_global_data->_dumps, "Oi", self, protocol); + npy_ma_thread_unsafe_state->_dumps, "Oi", self, protocol); } } diff --git a/numpy/_core/src/multiarray/methods.h b/numpy/_core/src/multiarray/methods.h index c2893d3f1161..fd84a1e3b49f 100644 --- a/numpy/_core/src/multiarray/methods.h +++ b/numpy/_core/src/multiarray/methods.h @@ -14,11 +14,11 @@ extern NPY_NO_EXPORT PyMethodDef array_methods[]; static inline PyObject * NpyPath_PathlikeToFspath(PyObject *file) { - if (!PyObject_IsInstance(file, npy_ma_global_data->os_PathLike)) { + if (!PyObject_IsInstance(file, npy_ma_static_data->os_PathLike)) { Py_INCREF(file); return file; } - return PyObject_CallFunctionObjArgs(npy_ma_global_data->os_fspath, + return PyObject_CallFunctionObjArgs(npy_ma_static_data->os_fspath, file, NULL); } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 20badf0d0128..1d3a9c891ad1 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4265,7 +4265,7 @@ array_shares_memory_impl(PyObject *args, PyObject *kwds, Py_ssize_t default_max_ } else if (result == MEM_OVERLAP_TOO_HARD) { if (raise_exceptions) { - PyErr_SetString(npy_ma_global_data->TooHardError, + PyErr_SetString(npy_ma_static_data->TooHardError, "Exceeded max_work"); return NULL; } @@ -4769,7 +4769,8 @@ set_flaginfo(PyObject *d) } NPY_VISIBILITY_HIDDEN npy_ma_str_struct *npy_ma_str = NULL; -NPY_VISIBILITY_HIDDEN npy_ma_global_data_struct *npy_ma_global_data = NULL; +NPY_VISIBILITY_HIDDEN npy_ma_static_data_struct *npy_ma_static_data = NULL; +NPY_VISIBILITY_HIDDEN npy_ma_thread_unsafe_state_struct *npy_ma_thread_unsafe_state = NULL; static int intern_strings(void) @@ -4880,7 +4881,7 @@ intern_strings(void) /* * Initializes global constants. * - * All global constants should live inside the npy_ma_global_data + * All global constants should live inside the npy_ma_static_data * struct. * * Not all entries in the struct are initialized here, some are @@ -4899,63 +4900,63 @@ initialize_static_globals(void) { // this is module-level global heap allocation, it is currently // never freed - npy_ma_global_data = PyMem_Calloc(1, sizeof(npy_ma_global_data_struct)); + npy_ma_static_data = PyMem_Calloc(1, sizeof(npy_ma_static_data_struct)); // cached reference to objects defined in python IMPORT_GLOBAL("math", "floor", - npy_ma_global_data->math_floor_func); + npy_ma_static_data->math_floor_func); IMPORT_GLOBAL("math", "ceil", - npy_ma_global_data->math_ceil_func); + npy_ma_static_data->math_ceil_func); IMPORT_GLOBAL("math", "trunc", - npy_ma_global_data->math_trunc_func); + npy_ma_static_data->math_trunc_func); IMPORT_GLOBAL("math", "gcd", - npy_ma_global_data->math_gcd_func); + npy_ma_static_data->math_gcd_func); IMPORT_GLOBAL("numpy.exceptions", "AxisError", - npy_ma_global_data->AxisError); + npy_ma_static_data->AxisError); IMPORT_GLOBAL("numpy.exceptions", "ComplexWarning", - npy_ma_global_data->ComplexWarning); + npy_ma_static_data->ComplexWarning); IMPORT_GLOBAL("numpy.exceptions", "DTypePromotionError", - npy_ma_global_data->DTypePromotionError); + npy_ma_static_data->DTypePromotionError); IMPORT_GLOBAL("numpy.exceptions", "TooHardError", - npy_ma_global_data->TooHardError); + npy_ma_static_data->TooHardError); IMPORT_GLOBAL("numpy.exceptions", "VisibleDeprecationWarning", - npy_ma_global_data->VisibleDeprecationWarning); + npy_ma_static_data->VisibleDeprecationWarning); IMPORT_GLOBAL("numpy._globals", "_CopyMode", - npy_ma_global_data->_CopyMode); + npy_ma_static_data->_CopyMode); IMPORT_GLOBAL("numpy._globals", "_NoValue", - npy_ma_global_data->_NoValue); + npy_ma_static_data->_NoValue); IMPORT_GLOBAL("numpy._core._exceptions", "_ArrayMemoryError", - npy_ma_global_data->_ArrayMemoryError); + npy_ma_static_data->_ArrayMemoryError); IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncBinaryResolutionError", - npy_ma_global_data->_UFuncBinaryResolutionError); + npy_ma_static_data->_UFuncBinaryResolutionError); IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncInputCastingError", - npy_ma_global_data->_UFuncInputCastingError); + npy_ma_static_data->_UFuncInputCastingError); IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncNoLoopError", - npy_ma_global_data->_UFuncNoLoopError); + npy_ma_static_data->_UFuncNoLoopError); IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncOutputCastingError", - npy_ma_global_data->_UFuncOutputCastingError); + npy_ma_static_data->_UFuncOutputCastingError); IMPORT_GLOBAL("os", "fspath", - npy_ma_global_data->os_fspath); + npy_ma_static_data->os_fspath); IMPORT_GLOBAL("os", "PathLike", - npy_ma_global_data->os_PathLike); + npy_ma_static_data->os_PathLike); char *env = getenv("NUMPY_WARN_IF_NO_MEM_POLICY"); if ((env != NULL) && (strncmp(env, "1", 1) == 0)) { @@ -4971,9 +4972,9 @@ initialize_static_globals(void) return -1; } - npy_ma_global_data->default_truediv_type_tup = + npy_ma_static_data->default_truediv_type_tup = PyTuple_Pack(3, tmp, tmp, tmp); - if (npy_ma_global_data->default_truediv_type_tup == NULL) { + if (npy_ma_static_data->default_truediv_type_tup == NULL) { Py_DECREF(tmp); return -1; } @@ -4988,7 +4989,7 @@ initialize_static_globals(void) if (level == NULL) { return -1; } - npy_ma_global_data->optimize = PyLong_AsLong(level); + npy_ma_static_data->optimize = PyLong_AsLong(level); Py_DECREF(level); /* @@ -5004,22 +5005,22 @@ initialize_static_globals(void) npy_intp k; for (k=0; k < 8; k++) { npy_uint8 v = (j & (1 << k)) == (1 << k); - npy_ma_global_data->unpack_lookup_big[j].bytes[7 - k] = v; + npy_ma_static_data->unpack_lookup_big[j].bytes[7 - k] = v; } } - npy_ma_global_data->kwnames_is_copy = Py_BuildValue("(s)", "copy"); - if (npy_ma_global_data->kwnames_is_copy == NULL) { + npy_ma_static_data->kwnames_is_copy = Py_BuildValue("(s)", "copy"); + if (npy_ma_static_data->kwnames_is_copy == NULL) { return -1; } - npy_ma_global_data->one_obj = PyLong_FromLong((long) 1); - if (npy_ma_global_data->one_obj == NULL) { + npy_ma_static_data->one_obj = PyLong_FromLong((long) 1); + if (npy_ma_static_data->one_obj == NULL) { return -1; } - npy_ma_global_data->zero_obj = PyLong_FromLong((long) 0); - if (npy_ma_global_data->zero_obj == NULL) { + npy_ma_static_data->zero_obj = PyLong_FromLong((long) 0); + if (npy_ma_static_data->zero_obj == NULL) { return -1; } @@ -5288,11 +5289,11 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { } // initialize static references to ndarray.__array_*__ special methods - npy_ma_global_data->ndarray_array_finalize = PyObject_GetAttrString( + npy_ma_static_data->ndarray_array_finalize = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_finalize__"); - npy_ma_global_data->ndarray_array_ufunc = PyObject_GetAttrString( + npy_ma_static_data->ndarray_array_ufunc = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_ufunc__"); - npy_ma_global_data->ndarray_array_function = PyObject_GetAttrString( + npy_ma_static_data->ndarray_array_function = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_function__"); /* @@ -5305,13 +5306,13 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { * the legacy dtypemeta classes are available. */ npy_cache_import("numpy.dtypes", "_add_dtype_helper", - &npy_ma_global_data->_add_dtype_helper); - if (npy_ma_global_data->_add_dtype_helper == NULL) { + &npy_ma_thread_unsafe_state->_add_dtype_helper); + if (npy_ma_thread_unsafe_state->_add_dtype_helper == NULL) { goto err; } if (PyObject_CallFunction( - npy_ma_global_data->_add_dtype_helper, + npy_ma_thread_unsafe_state->_add_dtype_helper, "Os", (PyObject *)&PyArray_StringDType, NULL) == NULL) { goto err; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index 1743cd1fde24..efc349c18c21 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -27,22 +27,25 @@ typedef struct npy_ma_str_struct { PyObject *__dlpack__; } npy_ma_str_struct; -NPY_VISIBILITY_HIDDEN extern npy_ma_str_struct *npy_ma_str; - -typedef struct npy_ma_global_data_struct { +/* + * A struct that stores static global data used throughout + * _multiarray_umath, mostly to cache results that would be + * prohibitively expensive to compute at runtime in a tight loop. + * + * All items in this struct should be initialized during module + * initialization and thereafter should be immutable. Mutating items in + * this struct after module initialization is likely not thread-safe. + */ + +typedef struct npy_ma_static_data_struct { /* * Used in ufunc_type_resolution.c to avoid reconstructing a tuple - * storing the default true division return types - * This is immutable and set at module initialization so can be used - * without acquiring the global data mutex + * storing the default true division return types. */ PyObject *default_truediv_type_tup; /* * Used to set up the default extobj context variable - * - * This is immutable and set at module initialization so can be used - * without acquiring the global data mutex */ PyObject *default_extobj_capsule; @@ -67,8 +70,6 @@ typedef struct npy_ma_global_data_struct { /* * References to items obtained via an import at module initialization - * - * These are immutable */ PyObject *AxisError; PyObject *ComplexWarning; @@ -116,20 +117,34 @@ typedef struct npy_ma_global_data_struct { /* * Used for CPU feature detection and dispatch - * - * Filled in during module initialization and thereafter immutable */ PyObject *cpu_dispatch_registry; /* - * The following entries store cached references to object obtained - * via an import. All of these are initialized at runtime by - * npy_cache_import. + * A look-up table to recover integer type numbers from type characters. + * + * See the _MAX_LETTER and LETTER_TO_NUM macros in arraytypes.c.src. + * + * The smallest type number is ?, the largest is bounded by 'z'. + */ + npy_int16 _letter_to_num['z' + 1 - '?']; +} npy_ma_static_data_struct; + + +/* + * A struct storing thread-unsafe global state for the _multiarray_umath + * module. We should refactor so the global state is thread-safe, + * e.g. by adding locking. + */ +typedef struct npy_ma_thread_unsafe_state_struct { + /* + * Cached references to objects obtained via an import. All of these are + * can be initialized at any time by npy_cache_import. * * Currently these are not initialized in a thread-safe manner but the - * failure mode is a reference leak for references to imported modules so - * it will never lead to a crash unless there is something janky that we - * don't support going on like reloading. + * failure mode is a reference leak for references to imported immortal + * modules so it will never lead to a crash unless users are doing something + * janky that we don't support like reloading. * * TODO: maybe make each entry a struct that looks like: * @@ -138,14 +153,17 @@ typedef struct npy_ma_global_data_struct { * PyObject *value; * } * - * so is thread-safe initialization and only the possibility of contention - * before the cache is initialized, not on every single read. + * so the initialization is thread-safe and the only possibile lock + * contention happens before the cache is initialized, not on every single + * read. */ PyObject *_add_dtype_helper; PyObject *_all; PyObject *_amax; PyObject *_amin; PyObject *_any; + PyObject *array_function_errmsg_formatter; + PyObject *array_ufunc_errmsg_formatter; PyObject *_clip; PyObject *_commastring; PyObject *_convert_to_stringdtype_kwargs; @@ -154,7 +172,11 @@ typedef struct npy_ma_global_data_struct { PyObject *_dump; PyObject *_dumps; PyObject *_getfield_is_safe; + PyObject *internal_gcd_func; PyObject *_mean; + PyObject *NO_NEP50_WARNING; + PyObject *npy_ctypes_check; + PyObject *numpy_matrix; PyObject *_prod; PyObject *_promote_fields; PyObject *_std; @@ -171,6 +193,4 @@ typedef struct npy_ma_global_data_struct { PyObject *NO_NEP50_WARNING; } npy_ma_global_data_struct; -NPY_VISIBILITY_HIDDEN extern npy_ma_global_data_struct *npy_ma_global_data; - #endif /* NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ */ diff --git a/numpy/_core/src/multiarray/number.c b/numpy/_core/src/multiarray/number.c index 302da1abd95c..c0049a637efa 100644 --- a/numpy/_core/src/multiarray/number.c +++ b/numpy/_core/src/multiarray/number.c @@ -124,15 +124,15 @@ _PyArray_SetNumericOps(PyObject *dict) SET(clip); // initialize static globals needed for matmul - npy_ma_global_data->axes_1d_obj_kwargs = Py_BuildValue( + npy_ma_static_data->axes_1d_obj_kwargs = Py_BuildValue( "{s, [(i), (i, i), (i)]}", "axes", -1, -2, -1, -1); - if (npy_ma_global_data->axes_1d_obj_kwargs == NULL) { + if (npy_ma_static_data->axes_1d_obj_kwargs == NULL) { return -1; } - npy_ma_global_data->axes_2d_obj_kwargs = Py_BuildValue( + npy_ma_static_data->axes_2d_obj_kwargs = Py_BuildValue( "{s, [(i, i), (i, i), (i, i)]}", "axes", -2, -1, -2, -1, -2, -1); - if (npy_ma_global_data->axes_2d_obj_kwargs == NULL) { + if (npy_ma_static_data->axes_2d_obj_kwargs == NULL) { return -1; } @@ -305,10 +305,10 @@ array_inplace_matrix_multiply(PyArrayObject *self, PyObject *other) * passing the correct `axes=`. */ if (PyArray_NDIM(self) == 1) { - kwargs = npy_ma_global_data->axes_1d_obj_kwargs; + kwargs = npy_ma_static_data->axes_1d_obj_kwargs; } else { - kwargs = npy_ma_global_data->axes_2d_obj_kwargs; + kwargs = npy_ma_static_data->axes_2d_obj_kwargs; } PyObject *res = PyObject_Call(n_ops.matmul, args, kwargs); Py_DECREF(args); @@ -318,7 +318,7 @@ array_inplace_matrix_multiply(PyArrayObject *self, PyObject *other) * AxisError should indicate that the axes argument didn't work out * which should mean the second operand not being 2 dimensional. */ - if (PyErr_ExceptionMatches(npy_ma_global_data->AxisError)) { + if (PyErr_ExceptionMatches(npy_ma_static_data->AxisError)) { PyErr_SetString(PyExc_ValueError, "inplace matrix multiplication requires the first operand to " "have at least one and the second at least two dimensions."); diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 554aace740ed..7e855b5388d2 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -33,6 +33,7 @@ #include "dragon4.h" #include "npy_longdouble.h" #include "npy_buffer.h" +#include "multiarraymodule.h" #include @@ -611,13 +612,13 @@ static PyObject * _void_scalar_to_string(PyObject *obj, int repr) { npy_cache_import("numpy._core.arrayprint", "_void_scalar_to_string", - &npy_ma_global_data->_void_scalar_to_string); - if (npy_ma_global_data->_void_scalar_to_string == NULL) { + &npy_ma_thread_unsafe_state->_void_scalar_to_string); + if (npy_ma_thread_unsafe_state->_void_scalar_to_string == NULL) { return NULL; } PyObject *is_repr = repr ? Py_True : Py_False; return PyObject_CallFunctionObjArgs( - npy_ma_global_data->_void_scalar_to_string, obj, is_repr, NULL); + npy_ma_thread_unsafe_state->_void_scalar_to_string, obj, is_repr, NULL); } static PyObject * @@ -3037,7 +3038,7 @@ object_arrtype_alloc(PyTypeObject *type, Py_ssize_t items) * Object scalars should not actually exist, if they exist we should * consider it to be a bug. */ - if (PyErr_WarnEx(npy_ma_global_data->VisibleDeprecationWarning, + if (PyErr_WarnEx(npy_ma_static_data->VisibleDeprecationWarning, "Creating a NumPy object scalar. NumPy object scalars should " "never be created. If you see this message please inform the " "NumPy developers. Since this message should never be shown " diff --git a/numpy/_core/src/multiarray/strfuncs.c b/numpy/_core/src/multiarray/strfuncs.c index 863399e0eaf1..91a716f9f13a 100644 --- a/numpy/_core/src/multiarray/strfuncs.c +++ b/numpy/_core/src/multiarray/strfuncs.c @@ -40,14 +40,14 @@ array_repr(PyArrayObject *self) * leads to circular import problems. */ npy_cache_import("numpy._core.arrayprint", "_default_array_repr", - &npy_ma_global_data->_default_array_repr); - if (npy_ma_global_data->_default_array_repr == NULL) { + &npy_ma_thread_unsafe_state->_default_array_repr); + if (npy_ma_thread_unsafe_state->_default_array_repr == NULL) { npy_PyErr_SetStringChained(PyExc_RuntimeError, "Unable to configure default ndarray.__repr__"); return NULL; } return PyObject_CallFunctionObjArgs( - npy_ma_global_data->_default_array_repr, self, NULL); + npy_ma_thread_unsafe_state->_default_array_repr, self, NULL); } @@ -60,14 +60,14 @@ array_str(PyArrayObject *self) * to circular import problems. */ npy_cache_import("numpy._core.arrayprint", "_default_array_str", - &npy_ma_global_data->_default_array_str); - if (npy_ma_global_data->_default_array_str == NULL) { + &npy_ma_thread_unsafe_state->_default_array_str); + if (npy_ma_thread_unsafe_state->_default_array_str == NULL) { npy_PyErr_SetStringChained(PyExc_RuntimeError, "Unable to configure default ndarray.__str__"); return NULL; } return PyObject_CallFunctionObjArgs( - npy_ma_global_data->_default_array_str, self, NULL); + npy_ma_thread_unsafe_state->_default_array_str, self, NULL); } diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 281a67cbe969..83d1c5d04b5c 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -716,20 +716,20 @@ static PyObject * stringdtype__reduce__(PyArray_StringDTypeObject *self, PyObject *NPY_UNUSED(args)) { npy_cache_import("numpy._core._internal", "_convert_to_stringdtype_kwargs", - &npy_ma_global_data->_convert_to_stringdtype_kwargs); + &npy_ma_thread_unsafe_state->_convert_to_stringdtype_kwargs); - if (npy_ma_global_data->_convert_to_stringdtype_kwargs == NULL) { + if (npy_ma_thread_unsafe_state->_convert_to_stringdtype_kwargs == NULL) { return NULL; } if (self->na_object != NULL) { return Py_BuildValue( - "O(iO)", npy_ma_global_data->_convert_to_stringdtype_kwargs, + "O(iO)", npy_ma_thread_unsafe_state->_convert_to_stringdtype_kwargs, self->coerce, self->na_object); } return Py_BuildValue( - "O(i)", npy_ma_global_data->_convert_to_stringdtype_kwargs, + "O(i)", npy_ma_thread_unsafe_state->_convert_to_stringdtype_kwargs, self->coerce); } diff --git a/numpy/_core/src/umath/dispatching.c b/numpy/_core/src/umath/dispatching.c index 75d3fd111b7e..0f7a78666a1b 100644 --- a/numpy/_core/src/umath/dispatching.c +++ b/numpy/_core/src/umath/dispatching.c @@ -1062,7 +1062,7 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, * then we chain it, because DTypePromotionError effectively means that there * is no loop available. (We failed finding a loop by using promotion.) */ - else if (PyErr_ExceptionMatches(npy_ma_global_data->DTypePromotionError)) { + else if (PyErr_ExceptionMatches(npy_ma_static_data->DTypePromotionError)) { PyObject *err_type = NULL, *err_value = NULL, *err_traceback = NULL; PyErr_Fetch(&err_type, &err_value, &err_traceback); raise_no_loop_found_error(ufunc, (PyObject **)op_dtypes); diff --git a/numpy/_core/src/umath/extobj.c b/numpy/_core/src/umath/extobj.c index 2c10dda833e6..a10fdfb5a911 100644 --- a/numpy/_core/src/umath/extobj.c +++ b/numpy/_core/src/umath/extobj.c @@ -122,8 +122,8 @@ fetch_curr_extobj_state(npy_extobj *extobj) { PyObject *capsule; if (PyContextVar_Get( - npy_ma_global_data->npy_extobj_contextvar, - npy_ma_global_data->default_extobj_capsule, &capsule) < 0) { + npy_ma_static_data->npy_extobj_contextvar, + npy_ma_static_data->default_extobj_capsule, &capsule) < 0) { return -1; } npy_extobj *obj = PyCapsule_GetPointer(capsule, "numpy.ufunc.extobj"); @@ -157,15 +157,15 @@ init_extobj(void) } } - npy_ma_global_data->default_extobj_capsule = make_extobj_capsule( + npy_ma_static_data->default_extobj_capsule = make_extobj_capsule( NPY_BUFSIZE, UFUNC_ERR_DEFAULT, Py_None); - if (npy_ma_global_data->default_extobj_capsule == NULL) { + if (npy_ma_static_data->default_extobj_capsule == NULL) { return -1; } - npy_ma_global_data->npy_extobj_contextvar = PyContextVar_New( - "numpy.ufunc.extobj", npy_ma_global_data->default_extobj_capsule); - if (npy_ma_global_data->npy_extobj_contextvar == NULL) { - Py_CLEAR(npy_ma_global_data->default_extobj_capsule); + npy_ma_static_data->npy_extobj_contextvar = PyContextVar_New( + "numpy.ufunc.extobj", npy_ma_static_data->default_extobj_capsule); + if (npy_ma_static_data->npy_extobj_contextvar == NULL) { + Py_CLEAR(npy_ma_static_data->default_extobj_capsule); return -1; } return 0; diff --git a/numpy/_core/src/umath/funcs.inc.src b/numpy/_core/src/umath/funcs.inc.src index 0259faab90bf..f068ab026ec1 100644 --- a/numpy/_core/src/umath/funcs.inc.src +++ b/numpy/_core/src/umath/funcs.inc.src @@ -157,19 +157,19 @@ npy_ObjectLogicalNot(PyObject *i1) static PyObject * npy_ObjectFloor(PyObject *obj) { - return PyObject_CallFunction(npy_ma_global_data->math_floor_func, + return PyObject_CallFunction(npy_ma_static_data->math_floor_func, "O", obj); } static PyObject * npy_ObjectCeil(PyObject *obj) { - return PyObject_CallFunction(npy_ma_global_data->math_ceil_func, + return PyObject_CallFunction(npy_ma_static_data->math_ceil_func, "O", obj); } static PyObject * npy_ObjectTrunc(PyObject *obj) { - return PyObject_CallFunction(npy_ma_global_data->math_trunc_func, + return PyObject_CallFunction(npy_ma_static_data->math_trunc_func, "O", obj); } @@ -180,7 +180,7 @@ npy_ObjectGCD(PyObject *i1, PyObject *i2) /* use math.gcd if valid on the provided types */ { - gcd = PyObject_CallFunction(npy_ma_global_data->math_gcd_func, + gcd = PyObject_CallFunction(npy_ma_static_data->math_gcd_func, "OO", i1, i2); if (gcd != NULL) { return gcd; @@ -192,11 +192,11 @@ npy_ObjectGCD(PyObject *i1, PyObject *i2) /* otherwise, use our internal one, written in python */ { npy_cache_import("numpy._core._internal", "_gcd", - &npy_ma_global_data->internal_gcd_func); - if (npy_ma_global_data->internal_gcd_func == NULL) { + &npy_ma_thread_unsafe_state->internal_gcd_func); + if (npy_ma_thread_unsafe_state->internal_gcd_func == NULL) { return NULL; } - gcd = PyObject_CallFunction(npy_ma_global_data->internal_gcd_func, + gcd = PyObject_CallFunction(npy_ma_thread_unsafe_state->internal_gcd_func, "OO", i1, i2); if (gcd == NULL) { return NULL; diff --git a/numpy/_core/src/umath/override.c b/numpy/_core/src/umath/override.c index 6d2db58b891a..6f31ee40f49e 100644 --- a/numpy/_core/src/umath/override.c +++ b/numpy/_core/src/umath/override.c @@ -177,7 +177,7 @@ copy_positional_args_to_kwargs(const char **keywords, * 5 keyword arguments. */ assert(strcmp(keywords[i], "initial") == 0); - if (args[i] == npy_ma_global_data->_NoValue) { + if (args[i] == npy_ma_static_data->_NoValue) { continue; } } @@ -371,10 +371,10 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, npy_cache_import( "numpy._core._internal", "array_ufunc_errmsg_formatter", - &npy_ma_global_data->array_ufunc_errmsg_formatter); - if (npy_ma_global_data->array_ufunc_errmsg_formatter != NULL) { + &npy_ma_thread_unsafe_state->array_ufunc_errmsg_formatter); + if (npy_ma_thread_unsafe_state->array_ufunc_errmsg_formatter != NULL) { errmsg = PyObject_Call( - npy_ma_global_data->array_ufunc_errmsg_formatter, + npy_ma_thread_unsafe_state->array_ufunc_errmsg_formatter, override_args, normal_kwds); if (errmsg != NULL) { PyErr_SetObject(PyExc_TypeError, errmsg); diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index cf35bc4d7836..024596e9262a 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -1788,7 +1788,7 @@ static int static int emit_complexwarning(void) { - return PyErr_WarnEx(npy_ma_global_data->ComplexWarning, + return PyErr_WarnEx(npy_ma_static_data->ComplexWarning, "Casting complex values to real discards the imaginary part", 1); } diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 98e02828726d..12b05625d02f 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -1383,7 +1383,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, if (PyTuple_Check(op_axes_tuple)) { if (PyTuple_Size(op_axes_tuple) != op_ncore) { /* must have been a tuple with too many entries. */ - PyErr_Format(npy_ma_global_data->AxisError, + PyErr_Format(npy_ma_static_data->AxisError, "%s: operand %d has %d core dimensions, " "but %zd dimensions are specified by axes tuple.", ufunc_get_name_cstr(ufunc), iop, op_ncore, @@ -1407,7 +1407,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, return -1; } /* If it is a single integer, inform user that more are needed */ - PyErr_Format(npy_ma_global_data->AxisError, + PyErr_Format(npy_ma_static_data->AxisError, "%s: operand %d has %d core dimensions, " "but the axes item is a single integer.", ufunc_get_name_cstr(ufunc), iop, op_ncore); @@ -5240,7 +5240,7 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) { PyArrayObject *ap1 = NULL; PyObject *tmp; - npy_cache_import("numpy", "matrix", &npy_ma_global_data->numpy_matrix); + npy_cache_import("numpy", "matrix", &npy_ma_thread_unsafe_state->numpy_matrix); const char *matrix_deprecation_msg = ( "%s.outer() was passed a numpy matrix as %s argument. " @@ -5251,7 +5251,7 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) tmp = PyTuple_GET_ITEM(args, 0); - if (PyObject_IsInstance(tmp, npy_ma_global_data->numpy_matrix)) { + if (PyObject_IsInstance(tmp, npy_ma_thread_unsafe_state->numpy_matrix)) { /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, matrix_deprecation_msg, ufunc->name, "first") < 0) { @@ -5268,7 +5268,7 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) PyArrayObject *ap2 = NULL; tmp = PyTuple_GET_ITEM(args, 1); - if (PyObject_IsInstance(tmp, npy_ma_global_data->numpy_matrix)) { + if (PyObject_IsInstance(tmp, npy_ma_thread_unsafe_state->numpy_matrix)) { /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, matrix_deprecation_msg, ufunc->name, "second") < 0) { @@ -6422,9 +6422,9 @@ ufunc_get_doc(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) npy_cache_import( "numpy._core._internal", "_ufunc_doc_signature_formatter", - &npy_ma_global_data->_ufunc_doc_signature_formatter); + &npy_ma_thread_unsafe_state->_ufunc_doc_signature_formatter); - if (npy_ma_global_data->_ufunc_doc_signature_formatter == NULL) { + if (npy_ma_thread_unsafe_state->_ufunc_doc_signature_formatter == NULL) { return NULL; } @@ -6433,7 +6433,7 @@ ufunc_get_doc(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) * introspection on name and nin + nout to automate the first part * of it the doc string shouldn't need the calling convention */ - doc = PyObject_CallFunctionObjArgs(npy_ma_global_data->_ufunc_doc_signature_formatter, + doc = PyObject_CallFunctionObjArgs(npy_ma_thread_unsafe_state->_ufunc_doc_signature_formatter, (PyObject *)ufunc, NULL); if (doc == NULL) { return NULL; diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index e84c1ba6c811..a8d3f3df4bf8 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -90,7 +90,7 @@ raise_binary_type_reso_error(PyUFuncObject *ufunc, PyArrayObject **operands) { return -1; } PyErr_SetObject( - npy_ma_global_data->_UFuncBinaryResolutionError, exc_value); + npy_ma_static_data->_UFuncBinaryResolutionError, exc_value); Py_DECREF(exc_value); return -1; @@ -113,7 +113,7 @@ raise_no_loop_found_error( if (exc_value == NULL) { return -1; } - PyErr_SetObject(npy_ma_global_data->_UFuncNoLoopError, exc_value); + PyErr_SetObject(npy_ma_static_data->_UFuncNoLoopError, exc_value); Py_DECREF(exc_value); return -1; @@ -165,7 +165,7 @@ raise_input_casting_error( PyArray_Descr *to, npy_intp i) { - return raise_casting_error(npy_ma_global_data->_UFuncInputCastingError, + return raise_casting_error(npy_ma_static_data->_UFuncInputCastingError, ufunc, casting, from, to, i); } @@ -181,7 +181,7 @@ raise_output_casting_error( PyArray_Descr *to, npy_intp i) { - return raise_casting_error(npy_ma_global_data->_UFuncOutputCastingError, + return raise_casting_error(npy_ma_static_data->_UFuncOutputCastingError, ufunc, casting, from, to, i); } @@ -1420,7 +1420,7 @@ PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc, (PyTypeNum_ISINTEGER(type_num2) || PyTypeNum_ISBOOL(type_num2))) { return PyUFunc_DefaultTypeResolver( ufunc, casting, operands, - npy_ma_global_data->default_truediv_type_tup, out_dtypes); + npy_ma_static_data->default_truediv_type_tup, out_dtypes); } return PyUFunc_DivisionTypeResolver(ufunc, casting, operands, type_tup, out_dtypes); diff --git a/numpy/_core/src/umath/umathmodule.c b/numpy/_core/src/umath/umathmodule.c index b7e7fca01678..96a78759c0e9 100644 --- a/numpy/_core/src/umath/umathmodule.c +++ b/numpy/_core/src/umath/umathmodule.c @@ -273,8 +273,8 @@ int initumath(PyObject *m) #undef ADDSCONST PyModule_AddIntConstant(m, "UFUNC_BUFSIZE_DEFAULT", (long)NPY_BUFSIZE); - Py_INCREF(npy_ma_global_data->npy_extobj_contextvar); - PyModule_AddObject(m, "_extobj_contextvar", npy_ma_global_data->npy_extobj_contextvar); + Py_INCREF(npy_ma_static_data->npy_extobj_contextvar); + PyModule_AddObject(m, "_extobj_contextvar", npy_ma_static_data->npy_extobj_contextvar); PyModule_AddObject(m, "PINF", PyFloat_FromDouble(NPY_INFINITY)); PyModule_AddObject(m, "NINF", PyFloat_FromDouble(-NPY_INFINITY)); From 8f84875534eda2a3940e97d233ff6492000a2a0f Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 5 Jun 2024 16:28:00 -0600 Subject: [PATCH 605/980] MNT: add PyArrayMethodObject caches to static data struct --- numpy/_core/src/multiarray/convert_datatype.c | 102 +++++++++--------- numpy/_core/src/multiarray/multiarraymodule.h | 9 ++ 2 files changed, 57 insertions(+), 54 deletions(-) diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 8f63b7716b6a..e5624cf1c2f3 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -3236,20 +3236,11 @@ nonstructured_to_structured_get_loop( return 0; } -// these are filled in during module initialization -// we do not include these in the global data struct -// to avoid the need to #include array_method.h for -// all users of the global data struct -static PyArrayMethodObject *VoidToGenericMethod = NULL; -static PyArrayMethodObject *GenericToVoidMethod = NULL; -static PyArrayMethodObject *ObjectToGenericMethod = NULL; -static PyArrayMethodObject *GenericToObjectMethod = NULL; - static PyObject * PyArray_GetGenericToVoidCastingImpl(void) { - Py_INCREF(GenericToVoidMethod); - return (PyObject *)GenericToVoidMethod; + Py_INCREF(npy_ma_static_data->GenericToVoidMethod); + return npy_ma_static_data->GenericToVoidMethod; } @@ -3386,8 +3377,8 @@ structured_to_nonstructured_get_loop( static PyObject * PyArray_GetVoidToGenericCastingImpl(void) { - Py_INCREF(VoidToGenericMethod); - return (PyObject *)VoidToGenericMethod; + Py_INCREF(npy_ma_static_data->VoidToGenericMethod); + return npy_ma_static_data->VoidToGenericMethod; } @@ -3751,8 +3742,8 @@ object_to_any_resolve_descriptors( static PyObject * PyArray_GetObjectToGenericCastingImpl(void) { - Py_INCREF(ObjectToGenericMethod); - return (PyObject *)ObjectToGenericMethod; + Py_INCREF(npy_ma_static_data->ObjectToGenericMethod); + return npy_ma_static_data->ObjectToGenericMethod; } @@ -3788,8 +3779,8 @@ any_to_object_resolve_descriptors( static PyObject * PyArray_GetGenericToObjectCastingImpl(void) { - Py_INCREF(GenericToObjectMethod); - return (PyObject *)GenericToObjectMethod; + Py_INCREF(npy_ma_static_data->GenericToObjectMethod); + return npy_ma_static_data->GenericToObjectMethod; } @@ -3843,62 +3834,65 @@ PyArray_InitializeObjectToObjectCast(void) static int initialize_void_and_object_globals(void) { - VoidToGenericMethod = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); - - if (VoidToGenericMethod == NULL) { + PyArrayMethodObject *method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); + if (method == NULL) { PyErr_NoMemory(); return -1; } - VoidToGenericMethod->name = "void_to_any_cast"; - VoidToGenericMethod->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; - VoidToGenericMethod->casting = -1; - VoidToGenericMethod->resolve_descriptors = &structured_to_nonstructured_resolve_descriptors; - VoidToGenericMethod->get_strided_loop = &structured_to_nonstructured_get_loop; - VoidToGenericMethod->nin = 1; - VoidToGenericMethod->nout = 1; + method->name = "void_to_any_cast"; + method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; + method->casting = -1; + method->resolve_descriptors = &structured_to_nonstructured_resolve_descriptors; + method->get_strided_loop = &structured_to_nonstructured_get_loop; + method->nin = 1; + method->nout = 1; + npy_ma_static_data->VoidToGenericMethod = (PyObject *)method; - GenericToVoidMethod = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); - if (GenericToVoidMethod == NULL) { + method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); + if (method == NULL) { PyErr_NoMemory(); return -1; } - GenericToVoidMethod->name = "any_to_void_cast"; - GenericToVoidMethod->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; - GenericToVoidMethod->casting = -1; - GenericToVoidMethod->resolve_descriptors = &nonstructured_to_structured_resolve_descriptors; - GenericToVoidMethod->get_strided_loop = &nonstructured_to_structured_get_loop; - GenericToVoidMethod->nin = 1; - GenericToVoidMethod->nout = 1; + method->name = "any_to_void_cast"; + method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; + method->casting = -1; + method->resolve_descriptors = &nonstructured_to_structured_resolve_descriptors; + method->get_strided_loop = &nonstructured_to_structured_get_loop; + method->nin = 1; + method->nout = 1; + npy_ma_static_data->GenericToVoidMethod = (PyObject *)method; - ObjectToGenericMethod = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); - if (ObjectToGenericMethod == NULL) { + method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); + if (method == NULL) { PyErr_NoMemory(); return -1; } - ObjectToGenericMethod->nin = 1; - ObjectToGenericMethod->nout = 1; - ObjectToGenericMethod->name = "object_to_any_cast"; - ObjectToGenericMethod->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; - ObjectToGenericMethod->casting = NPY_UNSAFE_CASTING; - ObjectToGenericMethod->resolve_descriptors = &object_to_any_resolve_descriptors; - ObjectToGenericMethod->get_strided_loop = &object_to_any_get_loop; + method->nin = 1; + method->nout = 1; + method->name = "object_to_any_cast"; + method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; + method->casting = NPY_UNSAFE_CASTING; + method->resolve_descriptors = &object_to_any_resolve_descriptors; + method->get_strided_loop = &object_to_any_get_loop; + npy_ma_static_data->ObjectToGenericMethod = (PyObject *)method; - GenericToObjectMethod = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); - if (GenericToObjectMethod == NULL) { + method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); + if (method == NULL) { PyErr_NoMemory(); return -1; } - GenericToObjectMethod->nin = 1; - GenericToObjectMethod->nout = 1; - GenericToObjectMethod->name = "any_to_object_cast"; - GenericToObjectMethod->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; - GenericToObjectMethod->casting = NPY_SAFE_CASTING; - GenericToObjectMethod->resolve_descriptors = &any_to_object_resolve_descriptors; - GenericToObjectMethod->get_strided_loop = &any_to_object_get_loop; + method->nin = 1; + method->nout = 1; + method->name = "any_to_object_cast"; + method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; + method->casting = NPY_SAFE_CASTING; + method->resolve_descriptors = &any_to_object_resolve_descriptors; + method->get_strided_loop = &any_to_object_get_loop; + npy_ma_static_data->GenericToObjectMethod = (PyObject *)method; return 0; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index efc349c18c21..70416aaf2e7e 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -128,6 +128,15 @@ typedef struct npy_ma_static_data_struct { * The smallest type number is ?, the largest is bounded by 'z'. */ npy_int16 _letter_to_num['z' + 1 - '?']; + + /* + * references to ArrayMethod implementations that are cached + * to avoid repeatedly creating them + */ + PyObject *VoidToGenericMethod; + PyObject *GenericToVoidMethod; + PyObject *ObjectToGenericMethod; + PyObject *GenericToObjectMethod; } npy_ma_static_data_struct; From 402a83ce219c01fd2e0eda8748c31a1e6d352f8c Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 5 Jun 2024 16:32:32 -0600 Subject: [PATCH 606/980] MNT: move some thread-unsafe state in thread-unsafe state struct --- numpy/_core/src/multiarray/alloc.c | 18 ++++++------- numpy/_core/src/multiarray/multiarraymodule.c | 9 +++---- numpy/_core/src/multiarray/multiarraymodule.h | 27 ++++++++++++++----- numpy/_core/src/umath/_scaled_float_dtype.c | 7 ++--- 4 files changed, 35 insertions(+), 26 deletions(-) diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index df64a13a26e8..c6615f1e171f 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -11,6 +11,7 @@ #include "numpy/npy_common.h" #include "npy_config.h" #include "alloc.h" +#include "multiarraymodule.h" #include #ifdef NPY_OS_LINUX @@ -35,13 +36,11 @@ typedef struct { static cache_bucket datacache[NBUCKETS]; static cache_bucket dimcache[NBUCKETS_DIM]; -static int _madvise_hugepage = 1; - /* * This function tells whether NumPy attempts to call `madvise` with * `MADV_HUGEPAGE`. `madvise` is only ever used on linux, so the value - * of `_madvise_hugepage` may be ignored. + * of `madvise_hugepage` may be ignored. * * It is exposed to Python as `np._core.multiarray._get_madvise_hugepage`. */ @@ -49,7 +48,7 @@ NPY_NO_EXPORT PyObject * _get_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { #ifdef NPY_OS_LINUX - if (_madvise_hugepage) { + if (npy_ma_thread_unsafe_state->madvise_hugepage) { Py_RETURN_TRUE; } #endif @@ -59,20 +58,20 @@ _get_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) /* * This function enables or disables the use of `MADV_HUGEPAGE` on Linux - * by modifying the global static `_madvise_hugepage`. - * It returns the previous value of `_madvise_hugepage`. + * by modifying the global static `madvise_hugepage`. + * It returns the previous value of `madvise_hugepage`. * * It is exposed to Python as `np._core.multiarray._set_madvise_hugepage`. */ NPY_NO_EXPORT PyObject * _set_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *enabled_obj) { - int was_enabled = _madvise_hugepage; + int was_enabled = npy_ma_thread_unsafe_state->madvise_hugepage; int enabled = PyObject_IsTrue(enabled_obj); if (enabled < 0) { return NULL; } - _madvise_hugepage = enabled; + npy_ma_thread_unsafe_state->madvise_hugepage = enabled; if (was_enabled) { Py_RETURN_TRUE; } @@ -110,7 +109,8 @@ _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz, #endif #ifdef NPY_OS_LINUX /* allow kernel allocating huge pages for large arrays */ - if (NPY_UNLIKELY(nelem * esz >= ((1u<<22u))) && _madvise_hugepage) { + if (NPY_UNLIKELY(nelem * esz >= ((1u<<22u))) && + npy_ma_thread_unsafe_state->madvise_hugepage) { npy_uintp offset = 4096u - (npy_uintp)p % (4096u); npy_uintp length = nelem * esz - offset; /** diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 1d3a9c891ad1..f6da70b885f1 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4345,8 +4345,6 @@ _set_numpy_warn_if_no_mem_policy(PyObject *NPY_UNUSED(self), PyObject *arg) static PyObject * _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { - static int initialized = 0; - #if !defined(PYPY_VERSION) if (PyThreadState_Get()->interp != PyInterpreterState_Main()) { if (PyErr_WarnEx(PyExc_UserWarning, @@ -4362,11 +4360,11 @@ _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { return NULL; } /* No need to give the other warning in a sub-interpreter as well... */ - initialized = 1; + npy_ma_thread_unsafe_state->reload_guard_initialized = 1; Py_RETURN_NONE; } #endif - if (initialized) { + if (npy_ma_thread_unsafe_state->reload_guard_initialized) { if (PyErr_WarnEx(PyExc_UserWarning, "The NumPy module was reloaded (imported a second time). " "This can in some cases result in small but subtle issues " @@ -4374,7 +4372,7 @@ _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { return NULL; } } - initialized = 1; + npy_ma_thread_unsafe_state->reload_guard_initialized = 1; Py_RETURN_NONE; } @@ -4901,6 +4899,7 @@ initialize_static_globals(void) // this is module-level global heap allocation, it is currently // never freed npy_ma_static_data = PyMem_Calloc(1, sizeof(npy_ma_static_data_struct)); + npy_ma_thread_unsafe_state = PyMem_Calloc(1, sizeof(npy_ma_thread_unsafe_state_struct)); // cached reference to objects defined in python diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index 70416aaf2e7e..95ad1a4cf18a 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -194,12 +194,25 @@ typedef struct npy_ma_thread_unsafe_state_struct { PyObject *_var; PyObject *_view_is_safe; PyObject *_void_scalar_to_string; - PyObject *array_function_errmsg_formatter; - PyObject *array_ufunc_errmsg_formatter; - PyObject *internal_gcd_func; - PyObject *npy_ctypes_check; - PyObject *numpy_matrix; - PyObject *NO_NEP50_WARNING; -} npy_ma_global_data_struct; + + /* + * Used to test the internal-only scaled float test dtype + */ + npy_bool get_sfloat_dtype_initialized; + + /* + * controls the global madvise hugepage setting + */ + int madvise_hugepage; + + /* + * used to detect module reloading in the reload guard + */ + int reload_guard_initialized; +} npy_ma_thread_unsafe_state_struct; + +NPY_VISIBILITY_HIDDEN extern npy_ma_str_struct *npy_ma_str; +NPY_VISIBILITY_HIDDEN extern npy_ma_static_data_struct *npy_ma_static_data; +NPY_VISIBILITY_HIDDEN extern npy_ma_thread_unsafe_state_struct *npy_ma_thread_unsafe_state; #endif /* NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ */ diff --git a/numpy/_core/src/umath/_scaled_float_dtype.c b/numpy/_core/src/umath/_scaled_float_dtype.c index 02278806751f..0bc91f388c08 100644 --- a/numpy/_core/src/umath/_scaled_float_dtype.c +++ b/numpy/_core/src/umath/_scaled_float_dtype.c @@ -867,10 +867,7 @@ sfloat_init_ufuncs(void) { NPY_NO_EXPORT PyObject * get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) { - /* Allow calling the function multiple times. */ - static npy_bool initialized = NPY_FALSE; - - if (initialized) { + if (npy_ma_thread_unsafe_state->get_sfloat_dtype_initialized) { Py_INCREF(&PyArray_SFloatDType); return (PyObject *)&PyArray_SFloatDType; } @@ -899,6 +896,6 @@ get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) return NULL; } - initialized = NPY_TRUE; + npy_ma_thread_unsafe_state->get_sfloat_dtype_initialized = NPY_TRUE; return (PyObject *)&PyArray_SFloatDType; } From e43275a200d49030fe84791431978d7673053a8d Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 5 Jun 2024 14:47:39 -0600 Subject: [PATCH 607/980] MNT: make data structs static instead of heap-allocated --- numpy/_core/src/common/npy_cpu_dispatch.c | 8 +- numpy/_core/src/common/npy_ctypes.h | 6 +- numpy/_core/src/common/ufunc_override.c | 2 +- numpy/_core/src/multiarray/alloc.c | 8 +- numpy/_core/src/multiarray/array_converter.c | 4 +- .../src/multiarray/arrayfunction_override.c | 20 +- numpy/_core/src/multiarray/arrayobject.c | 2 +- numpy/_core/src/multiarray/arraytypes.c.src | 2 +- numpy/_core/src/multiarray/arraywrap.c | 4 +- numpy/_core/src/multiarray/common.h | 4 +- numpy/_core/src/multiarray/common_dtype.c | 4 +- numpy/_core/src/multiarray/compiled_base.c | 8 +- numpy/_core/src/multiarray/conversion_utils.c | 8 +- numpy/_core/src/multiarray/convert_datatype.c | 42 ++-- numpy/_core/src/multiarray/ctors.c | 16 +- numpy/_core/src/multiarray/descriptor.c | 8 +- numpy/_core/src/multiarray/dlpack.c | 4 +- numpy/_core/src/multiarray/dtypemeta.c | 18 +- numpy/_core/src/multiarray/getset.c | 6 +- numpy/_core/src/multiarray/item_selection.c | 4 +- numpy/_core/src/multiarray/methods.c | 30 +-- numpy/_core/src/multiarray/methods.h | 4 +- numpy/_core/src/multiarray/multiarraymodule.c | 181 +++++++++--------- numpy/_core/src/multiarray/multiarraymodule.h | 8 +- numpy/_core/src/multiarray/number.c | 14 +- numpy/_core/src/multiarray/scalartypes.c.src | 8 +- numpy/_core/src/multiarray/shape.c | 4 +- numpy/_core/src/multiarray/strfuncs.c | 14 +- .../_core/src/multiarray/stringdtype/dtype.c | 8 +- numpy/_core/src/umath/_scaled_float_dtype.c | 4 +- numpy/_core/src/umath/dispatching.c | 2 +- numpy/_core/src/umath/extobj.c | 30 +-- numpy/_core/src/umath/funcs.inc.src | 14 +- numpy/_core/src/umath/override.c | 14 +- numpy/_core/src/umath/scalarmath.c.src | 2 +- numpy/_core/src/umath/ufunc_object.c | 16 +- numpy/_core/src/umath/ufunc_type_resolution.c | 10 +- numpy/_core/src/umath/umathmodule.c | 4 +- 38 files changed, 270 insertions(+), 275 deletions(-) diff --git a/numpy/_core/src/common/npy_cpu_dispatch.c b/numpy/_core/src/common/npy_cpu_dispatch.c index 79d1a13440de..942d48ba01fd 100644 --- a/numpy/_core/src/common/npy_cpu_dispatch.c +++ b/numpy/_core/src/common/npy_cpu_dispatch.c @@ -8,7 +8,7 @@ NPY_VISIBILITY_HIDDEN int npy_cpu_dispatch_tracer_init(PyObject *mod) { - if (npy_ma_static_data->cpu_dispatch_registry != NULL) { + if (npy_ma_static_data.cpu_dispatch_registry != NULL) { PyErr_Format(PyExc_RuntimeError, "CPU dispatcher tracer already initlized"); return -1; } @@ -25,7 +25,7 @@ npy_cpu_dispatch_tracer_init(PyObject *mod) if (err != 0) { return -1; } - npy_ma_static_data->cpu_dispatch_registry = reg_dict; + npy_ma_static_data.cpu_dispatch_registry = reg_dict; return 0; } @@ -33,13 +33,13 @@ NPY_VISIBILITY_HIDDEN void npy_cpu_dispatch_trace(const char *fname, const char *signature, const char **dispatch_info) { - PyObject *func_dict = PyDict_GetItemString(npy_ma_static_data->cpu_dispatch_registry, fname); + PyObject *func_dict = PyDict_GetItemString(npy_ma_static_data.cpu_dispatch_registry, fname); if (func_dict == NULL) { func_dict = PyDict_New(); if (func_dict == NULL) { return; } - int err = PyDict_SetItemString(npy_ma_static_data->cpu_dispatch_registry, fname, func_dict); + int err = PyDict_SetItemString(npy_ma_static_data.cpu_dispatch_registry, fname, func_dict); Py_DECREF(func_dict); if (err != 0) { return; diff --git a/numpy/_core/src/common/npy_ctypes.h b/numpy/_core/src/common/npy_ctypes.h index 1f356741de38..e05c79792bf9 100644 --- a/numpy/_core/src/common/npy_ctypes.h +++ b/numpy/_core/src/common/npy_ctypes.h @@ -22,12 +22,12 @@ npy_ctypes_check(PyTypeObject *obj) int ret; npy_cache_import("numpy._core._internal", "npy_ctypes_check", - &npy_ma_thread_unsafe_state->npy_ctypes_check); - if (npy_ma_thread_unsafe_state->npy_ctypes_check == NULL) { + &npy_ma_thread_unsafe_state.npy_ctypes_check); + if (npy_ma_thread_unsafe_state.npy_ctypes_check == NULL) { goto fail; } - ret_obj = PyObject_CallFunctionObjArgs(npy_ma_thread_unsafe_state->npy_ctypes_check, + ret_obj = PyObject_CallFunctionObjArgs(npy_ma_thread_unsafe_state.npy_ctypes_check, (PyObject *)obj, NULL); if (ret_obj == NULL) { goto fail; diff --git a/numpy/_core/src/common/ufunc_override.c b/numpy/_core/src/common/ufunc_override.c index bd09d7c00be0..5e373e4d8f25 100644 --- a/numpy/_core/src/common/ufunc_override.c +++ b/numpy/_core/src/common/ufunc_override.c @@ -43,7 +43,7 @@ PyUFuncOverride_GetNonDefaultArrayUfunc(PyObject *obj) return NULL; } /* Ignore if the same as ndarray.__array_ufunc__ */ - if (cls_array_ufunc == npy_ma_static_data->ndarray_array_ufunc) { + if (cls_array_ufunc == npy_ma_static_data.ndarray_array_ufunc) { Py_DECREF(cls_array_ufunc); return NULL; } diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index c6615f1e171f..922e5fb3254c 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -48,7 +48,7 @@ NPY_NO_EXPORT PyObject * _get_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { #ifdef NPY_OS_LINUX - if (npy_ma_thread_unsafe_state->madvise_hugepage) { + if (npy_ma_thread_unsafe_state.madvise_hugepage) { Py_RETURN_TRUE; } #endif @@ -66,12 +66,12 @@ _get_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) NPY_NO_EXPORT PyObject * _set_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *enabled_obj) { - int was_enabled = npy_ma_thread_unsafe_state->madvise_hugepage; + int was_enabled = npy_ma_thread_unsafe_state.madvise_hugepage; int enabled = PyObject_IsTrue(enabled_obj); if (enabled < 0) { return NULL; } - npy_ma_thread_unsafe_state->madvise_hugepage = enabled; + npy_ma_thread_unsafe_state.madvise_hugepage = enabled; if (was_enabled) { Py_RETURN_TRUE; } @@ -110,7 +110,7 @@ _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz, #ifdef NPY_OS_LINUX /* allow kernel allocating huge pages for large arrays */ if (NPY_UNLIKELY(nelem * esz >= ((1u<<22u))) && - npy_ma_thread_unsafe_state->madvise_hugepage) { + npy_ma_thread_unsafe_state.madvise_hugepage) { npy_uintp offset = 4096u - (npy_uintp)p % (4096u); npy_uintp length = nelem * esz - offset; /** diff --git a/numpy/_core/src/multiarray/array_converter.c b/numpy/_core/src/multiarray/array_converter.c index 684178a0f18b..d4612e3b8ea7 100644 --- a/numpy/_core/src/multiarray/array_converter.c +++ b/numpy/_core/src/multiarray/array_converter.c @@ -186,8 +186,8 @@ static int pyscalar_mode_conv(PyObject *obj, scalar_policy *policy) { PyObject *strings[3] = { - npy_ma_str->convert, npy_ma_str->preserve, - npy_ma_str->convert_if_no_array}; + npy_ma_str.convert, npy_ma_str.preserve, + npy_ma_str.convert_if_no_array}; /* First quick pass using the identity (should practically always match) */ for (int i = 0; i < 3; i++) { diff --git a/numpy/_core/src/multiarray/arrayfunction_override.c b/numpy/_core/src/multiarray/arrayfunction_override.c index 11e8deb05d4f..00c29238a8d8 100644 --- a/numpy/_core/src/multiarray/arrayfunction_override.c +++ b/numpy/_core/src/multiarray/arrayfunction_override.c @@ -20,11 +20,11 @@ get_array_function(PyObject *obj) { /* Fast return for ndarray */ if (PyArray_CheckExact(obj)) { - Py_INCREF(npy_ma_static_data->ndarray_array_function); - return npy_ma_static_data->ndarray_array_function; + Py_INCREF(npy_ma_static_data.ndarray_array_function); + return npy_ma_static_data.ndarray_array_function; } - PyObject *array_function = PyArray_LookupSpecial(obj, npy_ma_str->array_function); + PyObject *array_function = PyArray_LookupSpecial(obj, npy_ma_str.array_function); if (array_function == NULL && PyErr_Occurred()) { PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ } @@ -125,7 +125,7 @@ get_implementing_args_and_methods(PyObject *relevant_args, static int is_default_array_function(PyObject *obj) { - return obj == npy_ma_static_data->ndarray_array_function; + return obj == npy_ma_static_data.ndarray_array_function; } @@ -153,7 +153,7 @@ array_function_method_impl(PyObject *func, PyObject *types, PyObject *args, } } - PyObject *implementation = PyObject_GetAttr(func, npy_ma_str->implementation); + PyObject *implementation = PyObject_GetAttr(func, npy_ma_str.implementation); if (implementation == NULL) { return NULL; } @@ -233,10 +233,10 @@ set_no_matching_types_error(PyObject *public_api, PyObject *types) /* No acceptable override found, raise TypeError. */ npy_cache_import("numpy._core._internal", "array_function_errmsg_formatter", - &npy_ma_thread_unsafe_state->array_function_errmsg_formatter); - if (npy_ma_thread_unsafe_state->array_function_errmsg_formatter != NULL) { + &npy_ma_thread_unsafe_state.array_function_errmsg_formatter); + if (npy_ma_thread_unsafe_state.array_function_errmsg_formatter != NULL) { PyObject *errmsg = PyObject_CallFunctionObjArgs( - npy_ma_thread_unsafe_state->array_function_errmsg_formatter, + npy_ma_thread_unsafe_state.array_function_errmsg_formatter, public_api, types, NULL); if (errmsg != NULL) { PyErr_SetObject(PyExc_TypeError, errmsg); @@ -299,12 +299,12 @@ array_implement_c_array_function_creation( } /* The like argument must be present in the keyword arguments, remove it */ - if (PyDict_DelItem(kwargs, npy_ma_str->like) < 0) { + if (PyDict_DelItem(kwargs, npy_ma_str.like) < 0) { goto finish; } /* Fetch the actual symbol (the long way right now) */ - numpy_module = PyImport_Import(npy_ma_str->numpy); + numpy_module = PyImport_Import(npy_ma_str.numpy); if (numpy_module == NULL) { goto finish; } diff --git a/numpy/_core/src/multiarray/arrayobject.c b/numpy/_core/src/multiarray/arrayobject.c index da6d13d1ff53..54eaee4029ec 100644 --- a/numpy/_core/src/multiarray/arrayobject.c +++ b/numpy/_core/src/multiarray/arrayobject.c @@ -928,7 +928,7 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) if (result == NULL && (cmp_op == Py_EQ || cmp_op == Py_NE) && PyErr_ExceptionMatches( - npy_ma_static_data->_UFuncNoLoopError)) { + npy_ma_static_data._UFuncNoLoopError)) { PyErr_Clear(); PyArrayObject *array_other = (PyArrayObject *)PyArray_FROM_O(other); diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index 3e2199b2b2a5..aca963453efb 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -4185,7 +4185,7 @@ NPY_NO_EXPORT _PyArray_LegacyDescr @from@_Descr = { /* The smallest type number is ?, the largest bounded by 'z'. */ #define _MAX_LETTER ('z' + 1) -#define LETTER_TO_NUM(letter) npy_ma_static_data->_letter_to_num[letter - '?'] +#define LETTER_TO_NUM(letter) npy_ma_static_data._letter_to_num[letter - '?'] static _PyArray_LegacyDescr *_builtin_descrs[] = { &BOOL_Descr, diff --git a/numpy/_core/src/multiarray/arraywrap.c b/numpy/_core/src/multiarray/arraywrap.c index 1119e62633bf..699a9ca3297e 100644 --- a/numpy/_core/src/multiarray/arraywrap.c +++ b/numpy/_core/src/multiarray/arraywrap.c @@ -57,7 +57,7 @@ npy_find_array_wrap( } } else { - PyObject *new_wrap = PyArray_LookupSpecial_OnInstance(obj, npy_ma_str->array_wrap); + PyObject *new_wrap = PyArray_LookupSpecial_OnInstance(obj, npy_ma_str.array_wrap); if (new_wrap == NULL) { if (PyErr_Occurred()) { goto fail; @@ -160,7 +160,7 @@ npy_apply_wrap( else { /* Replace passed wrap/wrap_type (borrowed refs) with new_wrap/type. */ new_wrap = PyArray_LookupSpecial_OnInstance( - original_out, npy_ma_str->array_wrap); + original_out, npy_ma_str.array_wrap); if (new_wrap != NULL) { wrap = new_wrap; wrap_type = (PyObject *)Py_TYPE(original_out); diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index d9f903461fbf..ff8f3b07d854 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -142,12 +142,12 @@ check_and_adjust_axis_msg(int *axis, int ndim, PyObject *msg_prefix) if (NPY_UNLIKELY((*axis < -ndim) || (*axis >= ndim))) { /* Invoke the AxisError constructor */ PyObject *exc = PyObject_CallFunction( - npy_ma_static_data->AxisError, "iiO", *axis, ndim, + npy_ma_static_data.AxisError, "iiO", *axis, ndim, msg_prefix); if (exc == NULL) { return -1; } - PyErr_SetObject(npy_ma_static_data->AxisError, exc); + PyErr_SetObject(npy_ma_static_data.AxisError, exc); Py_DECREF(exc); return -1; diff --git a/numpy/_core/src/multiarray/common_dtype.c b/numpy/_core/src/multiarray/common_dtype.c index c258a1abf3a1..88f4388848bb 100644 --- a/numpy/_core/src/multiarray/common_dtype.c +++ b/numpy/_core/src/multiarray/common_dtype.c @@ -64,7 +64,7 @@ PyArray_CommonDType(PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2) } if (common_dtype == (PyArray_DTypeMeta *)Py_NotImplemented) { Py_DECREF(Py_NotImplemented); - PyErr_Format(npy_ma_static_data->DTypePromotionError, + PyErr_Format(npy_ma_static_data.DTypePromotionError, "The DTypes %S and %S do not have a common DType. " "For example they cannot be stored in a single array unless " "the dtype is `object`.", dtype1, dtype2); @@ -285,7 +285,7 @@ PyArray_PromoteDTypeSequence( Py_INCREF(dtypes_in[l]); PyTuple_SET_ITEM(dtypes_in_tuple, l, (PyObject *)dtypes_in[l]); } - PyErr_Format(npy_ma_static_data->DTypePromotionError, + PyErr_Format(npy_ma_static_data.DTypePromotionError, "The DType %S could not be promoted by %S. This means that " "no common DType exists for the given inputs. " "For example they cannot be stored in a single array unless " diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index 46617d43f11b..9f998b0428b9 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -1414,7 +1414,7 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t /* Don't add docstrings */ #if PY_VERSION_HEX > 0x030b0000 - if (npy_ma_static_data->optimize > 1) { + if (npy_ma_static_data.optimize > 1) { #else if (Py_OptimizeFlag > 1) { #endif @@ -1858,7 +1858,7 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) /* for unity stride we can just copy out of the lookup table */ if (order == 'b') { for (index = 0; index < in_n; index++) { - npy_uint64 v = npy_ma_static_data->unpack_lookup_big[*inptr].uint64; + npy_uint64 v = npy_ma_static_data.unpack_lookup_big[*inptr].uint64; memcpy(outptr, &v, 8); outptr += 8; inptr += in_stride; @@ -1866,7 +1866,7 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) } else { for (index = 0; index < in_n; index++) { - npy_uint64 v = npy_ma_static_data->unpack_lookup_big[*inptr].uint64; + npy_uint64 v = npy_ma_static_data.unpack_lookup_big[*inptr].uint64; if (order != 'b') { v = npy_bswap8(v); } @@ -1877,7 +1877,7 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) } /* Clean up the tail portion */ if (in_tail) { - npy_uint64 v = npy_ma_static_data->unpack_lookup_big[*inptr].uint64; + npy_uint64 v = npy_ma_static_data.unpack_lookup_big[*inptr].uint64; if (order != 'b') { v = npy_bswap8(v); } diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index d66f4eb26ed0..b0ae1d6f8001 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -235,7 +235,7 @@ PyArray_CopyConverter(PyObject *obj, NPY_COPYMODE *copymode) { int int_copymode; - if ((PyObject *)Py_TYPE(obj) == npy_ma_static_data->_CopyMode) { + if ((PyObject *)Py_TYPE(obj) == npy_ma_static_data._CopyMode) { PyObject* mode_value = PyObject_GetAttrString(obj, "value"); if (mode_value == NULL) { return NPY_FAIL; @@ -270,7 +270,7 @@ PyArray_AsTypeCopyConverter(PyObject *obj, NPY_ASTYPECOPYMODE *copymode) { int int_copymode; - if ((PyObject *)Py_TYPE(obj) == npy_ma_static_data->_CopyMode) { + if ((PyObject *)Py_TYPE(obj) == npy_ma_static_data._CopyMode) { PyErr_SetString(PyExc_ValueError, "_CopyMode enum is not allowed for astype function. " "Use true/false instead."); @@ -1411,7 +1411,7 @@ PyArray_IntTupleFromIntp(int len, npy_intp const *vals) NPY_NO_EXPORT int _not_NoValue(PyObject *obj, PyObject **out) { - if (obj == npy_ma_static_data->_NoValue) { + if (obj == npy_ma_static_data._NoValue) { *out = NULL; } else { @@ -1431,7 +1431,7 @@ PyArray_DeviceConverterOptional(PyObject *object, NPY_DEVICE *device) } if (PyUnicode_Check(object) && - PyUnicode_Compare(object, npy_ma_str->cpu) == 0) { + PyUnicode_Compare(object, npy_ma_str.cpu) == 0) { *device = NPY_DEVICE_CPU; return NPY_SUCCEED; } diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index e5624cf1c2f3..3c8b8fcee26b 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -84,13 +84,13 @@ npy_give_promotion_warnings(void) npy_cache_import( "numpy._core._ufunc_config", "NO_NEP50_WARNING", - &npy_ma_thread_unsafe_state->NO_NEP50_WARNING); - if (npy_ma_thread_unsafe_state->NO_NEP50_WARNING == NULL) { + &npy_ma_thread_unsafe_state.NO_NEP50_WARNING); + if (npy_ma_thread_unsafe_state.NO_NEP50_WARNING == NULL) { PyErr_WriteUnraisable(NULL); return 1; } - if (PyContextVar_Get(npy_ma_thread_unsafe_state->NO_NEP50_WARNING, + if (PyContextVar_Get(npy_ma_thread_unsafe_state.NO_NEP50_WARNING, Py_False, &val) < 0) { /* Errors should not really happen, but if it does assume we warn. */ PyErr_WriteUnraisable(NULL); @@ -402,7 +402,7 @@ PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) !PyTypeNum_ISCOMPLEX(type_num) && PyTypeNum_ISNUMBER(type_num) && !PyTypeNum_ISBOOL(type_num)) { - int ret = PyErr_WarnEx(npy_ma_static_data->ComplexWarning, + int ret = PyErr_WarnEx(npy_ma_static_data.ComplexWarning, "Casting complex values to real discards " "the imaginary part", 1); if (ret < 0) { @@ -2184,12 +2184,12 @@ PyArray_Zero(PyArrayObject *arr) if they simply memcpy it into a ndarray without using setitem(), refcount errors will occur */ - memcpy(zeroval, &npy_ma_static_data->zero_obj, sizeof(PyObject *)); + memcpy(zeroval, &npy_ma_static_data.zero_obj, sizeof(PyObject *)); return zeroval; } storeflags = PyArray_FLAGS(arr); PyArray_ENABLEFLAGS(arr, NPY_ARRAY_BEHAVED); - ret = PyArray_SETITEM(arr, zeroval, npy_ma_static_data->zero_obj); + ret = PyArray_SETITEM(arr, zeroval, npy_ma_static_data.zero_obj); ((PyArrayObject_fields *)arr)->flags = storeflags; if (ret < 0) { PyDataMem_FREE(zeroval); @@ -2223,13 +2223,13 @@ PyArray_One(PyArrayObject *arr) if they simply memcpy it into a ndarray without using setitem(), refcount errors will occur */ - memcpy(oneval, &npy_ma_static_data->one_obj, sizeof(PyObject *)); + memcpy(oneval, &npy_ma_static_data.one_obj, sizeof(PyObject *)); return oneval; } storeflags = PyArray_FLAGS(arr); PyArray_ENABLEFLAGS(arr, NPY_ARRAY_BEHAVED); - ret = PyArray_SETITEM(arr, oneval, npy_ma_static_data->one_obj); + ret = PyArray_SETITEM(arr, oneval, npy_ma_static_data.one_obj); ((PyArrayObject_fields *)arr)->flags = storeflags; if (ret < 0) { PyDataMem_FREE(oneval); @@ -2612,7 +2612,7 @@ complex_to_noncomplex_get_loop( PyArrayMethod_StridedLoop **out_loop, NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - int ret = PyErr_WarnEx(npy_ma_static_data->ComplexWarning, + int ret = PyErr_WarnEx(npy_ma_static_data.ComplexWarning, "Casting complex values to real discards " "the imaginary part", 1); if (ret < 0) { @@ -3239,8 +3239,8 @@ nonstructured_to_structured_get_loop( static PyObject * PyArray_GetGenericToVoidCastingImpl(void) { - Py_INCREF(npy_ma_static_data->GenericToVoidMethod); - return npy_ma_static_data->GenericToVoidMethod; + Py_INCREF(npy_ma_static_data.GenericToVoidMethod); + return npy_ma_static_data.GenericToVoidMethod; } @@ -3377,8 +3377,8 @@ structured_to_nonstructured_get_loop( static PyObject * PyArray_GetVoidToGenericCastingImpl(void) { - Py_INCREF(npy_ma_static_data->VoidToGenericMethod); - return npy_ma_static_data->VoidToGenericMethod; + Py_INCREF(npy_ma_static_data.VoidToGenericMethod); + return npy_ma_static_data.VoidToGenericMethod; } @@ -3742,8 +3742,8 @@ object_to_any_resolve_descriptors( static PyObject * PyArray_GetObjectToGenericCastingImpl(void) { - Py_INCREF(npy_ma_static_data->ObjectToGenericMethod); - return npy_ma_static_data->ObjectToGenericMethod; + Py_INCREF(npy_ma_static_data.ObjectToGenericMethod); + return npy_ma_static_data.ObjectToGenericMethod; } @@ -3779,8 +3779,8 @@ any_to_object_resolve_descriptors( static PyObject * PyArray_GetGenericToObjectCastingImpl(void) { - Py_INCREF(npy_ma_static_data->GenericToObjectMethod); - return npy_ma_static_data->GenericToObjectMethod; + Py_INCREF(npy_ma_static_data.GenericToObjectMethod); + return npy_ma_static_data.GenericToObjectMethod; } @@ -3847,7 +3847,7 @@ initialize_void_and_object_globals(void) { method->get_strided_loop = &structured_to_nonstructured_get_loop; method->nin = 1; method->nout = 1; - npy_ma_static_data->VoidToGenericMethod = (PyObject *)method; + npy_ma_static_data.VoidToGenericMethod = (PyObject *)method; method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); if (method == NULL) { @@ -3862,7 +3862,7 @@ initialize_void_and_object_globals(void) { method->get_strided_loop = &nonstructured_to_structured_get_loop; method->nin = 1; method->nout = 1; - npy_ma_static_data->GenericToVoidMethod = (PyObject *)method; + npy_ma_static_data.GenericToVoidMethod = (PyObject *)method; method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); if (method == NULL) { @@ -3877,7 +3877,7 @@ initialize_void_and_object_globals(void) { method->casting = NPY_UNSAFE_CASTING; method->resolve_descriptors = &object_to_any_resolve_descriptors; method->get_strided_loop = &object_to_any_get_loop; - npy_ma_static_data->ObjectToGenericMethod = (PyObject *)method; + npy_ma_static_data.ObjectToGenericMethod = (PyObject *)method; method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); if (method == NULL) { @@ -3892,7 +3892,7 @@ initialize_void_and_object_globals(void) { method->casting = NPY_SAFE_CASTING; method->resolve_descriptors = &any_to_object_resolve_descriptors; method->get_strided_loop = &any_to_object_get_loop; - npy_ma_static_data->GenericToObjectMethod = (PyObject *)method; + npy_ma_static_data.GenericToObjectMethod = (PyObject *)method; return 0; } diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 8e0746dbff8a..1d1a1e34ed36 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -622,7 +622,7 @@ raise_memory_error(int nd, npy_intp const *dims, PyArray_Descr *descr) if (exc_value == NULL){ goto fail; } - PyErr_SetObject(npy_ma_static_data->_ArrayMemoryError, exc_value); + PyErr_SetObject(npy_ma_static_data._ArrayMemoryError, exc_value); Py_DECREF(exc_value); return; @@ -928,11 +928,11 @@ PyArray_NewFromDescr_int( */ if (subtype != &PyArray_Type) { PyObject *res, *func; - func = PyObject_GetAttr((PyObject *)subtype, npy_ma_str->array_finalize); + func = PyObject_GetAttr((PyObject *)subtype, npy_ma_str.array_finalize); if (func == NULL) { goto fail; } - else if (func == npy_ma_static_data->ndarray_array_finalize) { + else if (func == npy_ma_static_data.ndarray_array_finalize) { Py_DECREF(func); } else if (func == Py_None) { @@ -2030,7 +2030,7 @@ PyArray_FromStructInterface(PyObject *input) PyObject *attr; char endian = NPY_NATBYTE; - attr = PyArray_LookupSpecial_OnInstance(input, npy_ma_str->array_struct); + attr = PyArray_LookupSpecial_OnInstance(input, npy_ma_str.array_struct); if (attr == NULL) { if (PyErr_Occurred()) { return NULL; @@ -2154,7 +2154,7 @@ PyArray_FromInterface(PyObject *origin) npy_intp dims[NPY_MAXDIMS], strides[NPY_MAXDIMS]; int dataflags = NPY_ARRAY_BEHAVED; - iface = PyArray_LookupSpecial_OnInstance(origin, npy_ma_str->array_interface); + iface = PyArray_LookupSpecial_OnInstance(origin, npy_ma_str.array_interface); if (iface == NULL) { if (PyErr_Occurred()) { @@ -2457,7 +2457,7 @@ check_or_clear_and_warn_error_if_due_to_copy_kwarg(PyObject *kwnames) goto restore_error; } int copy_kwarg_unsupported = PyUnicode_Contains( - str_value, npy_ma_str->array_err_msg_substr); + str_value, npy_ma_str.array_err_msg_substr); Py_DECREF(str_value); if (copy_kwarg_unsupported == -1) { goto restore_error; @@ -2509,7 +2509,7 @@ PyArray_FromArrayAttr_int(PyObject *op, PyArray_Descr *descr, int copy, PyObject *new; PyObject *array_meth; - array_meth = PyArray_LookupSpecial_OnInstance(op, npy_ma_str->array); + array_meth = PyArray_LookupSpecial_OnInstance(op, npy_ma_str.array); if (array_meth == NULL) { if (PyErr_Occurred()) { return NULL; @@ -2543,7 +2543,7 @@ PyArray_FromArrayAttr_int(PyObject *op, PyArray_Descr *descr, int copy, * signature of the __array__ method being called does not have `copy`. */ if (copy != -1) { - kwnames = npy_ma_static_data->kwnames_is_copy; + kwnames = npy_ma_static_data.kwnames_is_copy; arguments[nargs] = copy == 1 ? Py_True : Py_False; } diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 55fd96b7f8e4..90b1f95af8ab 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -726,11 +726,11 @@ _convert_from_commastring(PyObject *obj, int align) PyArray_Descr *res; assert(PyUnicode_Check(obj)); npy_cache_import("numpy._core._internal", "_commastring", - &npy_ma_thread_unsafe_state->_commastring); - if (npy_ma_thread_unsafe_state->_commastring == NULL) { + &npy_ma_thread_unsafe_state._commastring); + if (npy_ma_thread_unsafe_state._commastring == NULL) { return NULL; } - parsed = PyObject_CallOneArg(npy_ma_thread_unsafe_state->_commastring, obj); + parsed = PyObject_CallOneArg(npy_ma_thread_unsafe_state._commastring, obj); if (parsed == NULL) { return NULL; } @@ -2717,7 +2717,7 @@ arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args)) Py_DECREF(ret); return NULL; } - obj = PyObject_GetAttr(mod, npy_ma_str->dtype); + obj = PyObject_GetAttr(mod, npy_ma_str.dtype); Py_DECREF(mod); if (obj == NULL) { Py_DECREF(ret); diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index 43d08711b82b..b1631acb35f6 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -549,7 +549,7 @@ from_dlpack(PyObject *NPY_UNUSED(self), PyObject *capsule = PyObject_VectorcallMethod( - npy_ma_str->__dlpack__, call_args, nargsf, call_kwnames); + npy_ma_str.__dlpack__, call_args, nargsf, call_kwnames); if (capsule == NULL) { /* * TODO: This path should be deprecated in NumPy 2.1. Once deprecated @@ -563,7 +563,7 @@ from_dlpack(PyObject *NPY_UNUSED(self), /* max_version may be unsupported, try without kwargs */ PyErr_Clear(); capsule = PyObject_VectorcallMethod( - npy_ma_str->__dlpack__, call_args, nargsf, NULL); + npy_ma_str.__dlpack__, call_args, nargsf, NULL); } if (capsule == NULL) { return NULL; diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 3fcafe9b10ed..ee85d1d6771b 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -752,7 +752,7 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) if (descr1->subarray == NULL && descr1->names == NULL && descr2->subarray == NULL && descr2->names == NULL) { if (descr1->elsize != descr2->elsize) { - PyErr_SetString(npy_ma_static_data->DTypePromotionError, + PyErr_SetString(npy_ma_static_data.DTypePromotionError, "Invalid type promotion with void datatypes of different " "lengths. Use the `np.bytes_` datatype instead to pad the " "shorter value with trailing zero bytes."); @@ -765,12 +765,12 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) if (descr1->names != NULL && descr2->names != NULL) { /* If both have fields promoting individual fields may be possible */ npy_cache_import("numpy._core._internal", "_promote_fields", - &npy_ma_thread_unsafe_state->_promote_fields); - if (npy_ma_thread_unsafe_state->_promote_fields == NULL) { + &npy_ma_thread_unsafe_state._promote_fields); + if (npy_ma_thread_unsafe_state._promote_fields == NULL) { return NULL; } PyObject *result = PyObject_CallFunctionObjArgs( - npy_ma_thread_unsafe_state->_promote_fields, + npy_ma_thread_unsafe_state._promote_fields, descr1, descr2, NULL); if (result == NULL) { return NULL; @@ -791,7 +791,7 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) return NULL; } if (!cmp) { - PyErr_SetString(npy_ma_static_data->DTypePromotionError, + PyErr_SetString(npy_ma_static_data.DTypePromotionError, "invalid type promotion with subarray datatypes " "(shape mismatch)."); return NULL; @@ -821,7 +821,7 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) return new_descr; } - PyErr_SetString(npy_ma_static_data->DTypePromotionError, + PyErr_SetString(npy_ma_static_data.DTypePromotionError, "invalid type promotion with structured datatype(s)."); return NULL; } @@ -1239,13 +1239,13 @@ dtypemeta_wrap_legacy_descriptor( /* And it to the types submodule if it is a builtin dtype */ if (!PyTypeNum_ISUSERDEF(descr->type_num)) { npy_cache_import("numpy.dtypes", "_add_dtype_helper", - &npy_ma_thread_unsafe_state->_add_dtype_helper); - if (npy_ma_thread_unsafe_state->_add_dtype_helper == NULL) { + &npy_ma_thread_unsafe_state._add_dtype_helper); + if (npy_ma_thread_unsafe_state._add_dtype_helper == NULL) { return -1; } if (PyObject_CallFunction( - npy_ma_thread_unsafe_state->_add_dtype_helper, + npy_ma_thread_unsafe_state._add_dtype_helper, "Os", (PyObject *)dtype_class, alias) == NULL) { return -1; } diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index 4ddffc212c24..562f47ca43bb 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -388,12 +388,12 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored)) PyObject *safe; npy_cache_import("numpy._core._internal", "_view_is_safe", - &npy_ma_thread_unsafe_state->_view_is_safe); - if (npy_ma_thread_unsafe_state->_view_is_safe == NULL) { + &npy_ma_thread_unsafe_state._view_is_safe); + if (npy_ma_thread_unsafe_state._view_is_safe == NULL) { goto fail; } - safe = PyObject_CallFunction(npy_ma_thread_unsafe_state->_view_is_safe, + safe = PyObject_CallFunction(npy_ma_thread_unsafe_state._view_is_safe, "OO", PyArray_DESCR(self), newtype); if (safe == NULL) { goto fail; diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 239b4d8e3d1c..99639f1373b6 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -2262,10 +2262,10 @@ PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int axis2) } /* Handle negative axes with standard Python indexing rules */ - if (check_and_adjust_axis_msg(&axis1, ndim, npy_ma_str->axis1) < 0) { + if (check_and_adjust_axis_msg(&axis1, ndim, npy_ma_str.axis1) < 0) { return NULL; } - if (check_and_adjust_axis_msg(&axis2, ndim, npy_ma_str->axis2) < 0) { + if (check_and_adjust_axis_msg(&axis2, ndim, npy_ma_str.axis2) < 0) { return NULL; } if (axis1 == axis2) { diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 68be699e6315..40b99920881f 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -114,11 +114,11 @@ npy_forward_method( #define NPY_FORWARD_NDARRAY_METHOD(name) \ npy_cache_import( \ "numpy._core._methods", #name, \ - &npy_ma_thread_unsafe_state->name); \ - if (npy_ma_thread_unsafe_state->name == NULL) { \ + &npy_ma_thread_unsafe_state.name); \ + if (npy_ma_thread_unsafe_state.name == NULL) { \ return NULL; \ } \ - return npy_forward_method(npy_ma_thread_unsafe_state->name, \ + return npy_forward_method(npy_ma_thread_unsafe_state.name, \ (PyObject *)self, args, len_args, kwnames) @@ -406,14 +406,14 @@ PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset) /* check that we are not reinterpreting memory containing Objects. */ if (_may_have_objects(PyArray_DESCR(self)) || _may_have_objects(typed)) { npy_cache_import("numpy._core._internal", "_getfield_is_safe", - &npy_ma_thread_unsafe_state->_getfield_is_safe); - if (npy_ma_thread_unsafe_state->_getfield_is_safe == NULL) { + &npy_ma_thread_unsafe_state._getfield_is_safe); + if (npy_ma_thread_unsafe_state._getfield_is_safe == NULL) { Py_DECREF(typed); return NULL; } /* only returns True or raises */ - safe = PyObject_CallFunction(npy_ma_thread_unsafe_state->_getfield_is_safe, + safe = PyObject_CallFunction(npy_ma_thread_unsafe_state._getfield_is_safe, "OOi", PyArray_DESCR(self), typed, offset); if (safe == NULL) { @@ -1046,7 +1046,7 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) } Py_DECREF(out_kwd_obj); /* check where if it exists */ - where_obj = PyDict_GetItemWithError(kwds, npy_ma_str->where); + where_obj = PyDict_GetItemWithError(kwds, npy_ma_str.where); if (where_obj == NULL) { if (PyErr_Occurred()) { return -1; @@ -2248,17 +2248,17 @@ PyArray_Dump(PyObject *self, PyObject *file, int protocol) { PyObject *ret; npy_cache_import("numpy._core._methods", "_dump", - &npy_ma_thread_unsafe_state->_dump); - if (npy_ma_thread_unsafe_state->_dump == NULL) { + &npy_ma_thread_unsafe_state._dump); + if (npy_ma_thread_unsafe_state._dump == NULL) { return -1; } if (protocol < 0) { ret = PyObject_CallFunction( - npy_ma_thread_unsafe_state->_dump, "OO", self, file); + npy_ma_thread_unsafe_state._dump, "OO", self, file); } else { ret = PyObject_CallFunction( - npy_ma_thread_unsafe_state->_dump, "OOi", self, file, protocol); + npy_ma_thread_unsafe_state._dump, "OOi", self, file, protocol); } if (ret == NULL) { return -1; @@ -2272,16 +2272,16 @@ NPY_NO_EXPORT PyObject * PyArray_Dumps(PyObject *self, int protocol) { npy_cache_import("numpy._core._methods", "_dumps", - &npy_ma_thread_unsafe_state->_dumps); - if (npy_ma_thread_unsafe_state->_dumps == NULL) { + &npy_ma_thread_unsafe_state._dumps); + if (npy_ma_thread_unsafe_state._dumps == NULL) { return NULL; } if (protocol < 0) { - return PyObject_CallFunction(npy_ma_thread_unsafe_state->_dumps, "O", self); + return PyObject_CallFunction(npy_ma_thread_unsafe_state._dumps, "O", self); } else { return PyObject_CallFunction( - npy_ma_thread_unsafe_state->_dumps, "Oi", self, protocol); + npy_ma_thread_unsafe_state._dumps, "Oi", self, protocol); } } diff --git a/numpy/_core/src/multiarray/methods.h b/numpy/_core/src/multiarray/methods.h index fd84a1e3b49f..b0cf34e3fab4 100644 --- a/numpy/_core/src/multiarray/methods.h +++ b/numpy/_core/src/multiarray/methods.h @@ -14,11 +14,11 @@ extern NPY_NO_EXPORT PyMethodDef array_methods[]; static inline PyObject * NpyPath_PathlikeToFspath(PyObject *file) { - if (!PyObject_IsInstance(file, npy_ma_static_data->os_PathLike)) { + if (!PyObject_IsInstance(file, npy_ma_static_data.os_PathLike)) { Py_INCREF(file); return file; } - return PyObject_CallFunctionObjArgs(npy_ma_static_data->os_fspath, + return PyObject_CallFunctionObjArgs(npy_ma_static_data.os_fspath, file, NULL); } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index f6da70b885f1..d0f33637c4b3 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -136,7 +136,7 @@ PyArray_GetPriority(PyObject *obj, double default_) return NPY_SCALAR_PRIORITY; } - ret = PyArray_LookupSpecial_OnInstance(obj, npy_ma_str->array_priority); + ret = PyArray_LookupSpecial_OnInstance(obj, npy_ma_str.array_priority); if (ret == NULL) { if (PyErr_Occurred()) { /* TODO[gh-14801]: propagate crashes during attribute access? */ @@ -3493,7 +3493,7 @@ array_can_cast_safely(PyObject *NPY_UNUSED(self), * weak-promotion branch is in practice identical to dtype one. */ if (get_npy_promotion_state() == NPY_USE_WEAK_PROMOTION) { - PyObject *descr = PyObject_GetAttr(from_obj, npy_ma_str->dtype); + PyObject *descr = PyObject_GetAttr(from_obj, npy_ma_str.dtype); if (descr == NULL) { goto finish; } @@ -4265,7 +4265,7 @@ array_shares_memory_impl(PyObject *args, PyObject *kwds, Py_ssize_t default_max_ } else if (result == MEM_OVERLAP_TOO_HARD) { if (raise_exceptions) { - PyErr_SetString(npy_ma_static_data->TooHardError, + PyErr_SetString(npy_ma_static_data.TooHardError, "Exceeded max_work"); return NULL; } @@ -4360,11 +4360,11 @@ _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { return NULL; } /* No need to give the other warning in a sub-interpreter as well... */ - npy_ma_thread_unsafe_state->reload_guard_initialized = 1; + npy_ma_thread_unsafe_state.reload_guard_initialized = 1; Py_RETURN_NONE; } #endif - if (npy_ma_thread_unsafe_state->reload_guard_initialized) { + if (npy_ma_thread_unsafe_state.reload_guard_initialized) { if (PyErr_WarnEx(PyExc_UserWarning, "The NumPy module was reloaded (imported a second time). " "This can in some cases result in small but subtle issues " @@ -4372,7 +4372,7 @@ _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { return NULL; } } - npy_ma_thread_unsafe_state->reload_guard_initialized = 1; + npy_ma_thread_unsafe_state.reload_guard_initialized = 1; Py_RETURN_NONE; } @@ -4766,104 +4766,104 @@ set_flaginfo(PyObject *d) return; } -NPY_VISIBILITY_HIDDEN npy_ma_str_struct *npy_ma_str = NULL; -NPY_VISIBILITY_HIDDEN npy_ma_static_data_struct *npy_ma_static_data = NULL; -NPY_VISIBILITY_HIDDEN npy_ma_thread_unsafe_state_struct *npy_ma_thread_unsafe_state = NULL; +// static variables are zero-filled by default, no need to explicitly do so +NPY_VISIBILITY_HIDDEN npy_ma_str_struct npy_ma_str; +NPY_VISIBILITY_HIDDEN npy_ma_static_data_struct npy_ma_static_data; +NPY_VISIBILITY_HIDDEN npy_ma_thread_unsafe_state_struct npy_ma_thread_unsafe_state; static int intern_strings(void) { // this is module-level global heap allocation, it is currently // never freed - npy_ma_str = PyMem_Calloc(sizeof(npy_ma_str_struct), 1); - npy_ma_str->current_allocator = PyUnicode_InternFromString("current_allocator"); - if (npy_ma_str->current_allocator == NULL) { + npy_ma_str.current_allocator = PyUnicode_InternFromString("current_allocator"); + if (npy_ma_str.current_allocator == NULL) { return -1; } - npy_ma_str->array = PyUnicode_InternFromString("__array__"); - if (npy_ma_str->array == NULL) { + npy_ma_str.array = PyUnicode_InternFromString("__array__"); + if (npy_ma_str.array == NULL) { return -1; } - npy_ma_str->array_function = PyUnicode_InternFromString("__array_function__"); - if (npy_ma_str->array_function == NULL) { + npy_ma_str.array_function = PyUnicode_InternFromString("__array_function__"); + if (npy_ma_str.array_function == NULL) { return -1; } - npy_ma_str->array_struct = PyUnicode_InternFromString("__array_struct__"); - if (npy_ma_str->array_struct == NULL) { + npy_ma_str.array_struct = PyUnicode_InternFromString("__array_struct__"); + if (npy_ma_str.array_struct == NULL) { return -1; } - npy_ma_str->array_priority = PyUnicode_InternFromString("__array_priority__"); - if (npy_ma_str->array_priority == NULL) { + npy_ma_str.array_priority = PyUnicode_InternFromString("__array_priority__"); + if (npy_ma_str.array_priority == NULL) { return -1; } - npy_ma_str->array_interface = PyUnicode_InternFromString("__array_interface__"); - if (npy_ma_str->array_interface == NULL) { + npy_ma_str.array_interface = PyUnicode_InternFromString("__array_interface__"); + if (npy_ma_str.array_interface == NULL) { return -1; } - npy_ma_str->array_wrap = PyUnicode_InternFromString("__array_wrap__"); - if (npy_ma_str->array_wrap == NULL) { + npy_ma_str.array_wrap = PyUnicode_InternFromString("__array_wrap__"); + if (npy_ma_str.array_wrap == NULL) { return -1; } - npy_ma_str->array_finalize = PyUnicode_InternFromString("__array_finalize__"); - if (npy_ma_str->array_finalize == NULL) { + npy_ma_str.array_finalize = PyUnicode_InternFromString("__array_finalize__"); + if (npy_ma_str.array_finalize == NULL) { return -1; } - npy_ma_str->implementation = PyUnicode_InternFromString("_implementation"); - if (npy_ma_str->implementation == NULL) { + npy_ma_str.implementation = PyUnicode_InternFromString("_implementation"); + if (npy_ma_str.implementation == NULL) { return -1; } - npy_ma_str->axis1 = PyUnicode_InternFromString("axis1"); - if (npy_ma_str->axis1 == NULL) { + npy_ma_str.axis1 = PyUnicode_InternFromString("axis1"); + if (npy_ma_str.axis1 == NULL) { return -1; } - npy_ma_str->axis2 = PyUnicode_InternFromString("axis2"); - if (npy_ma_str->axis2 == NULL) { + npy_ma_str.axis2 = PyUnicode_InternFromString("axis2"); + if (npy_ma_str.axis2 == NULL) { return -1; } - npy_ma_str->like = PyUnicode_InternFromString("like"); - if (npy_ma_str->like == NULL) { + npy_ma_str.like = PyUnicode_InternFromString("like"); + if (npy_ma_str.like == NULL) { return -1; } - npy_ma_str->numpy = PyUnicode_InternFromString("numpy"); - if (npy_ma_str->numpy == NULL) { + npy_ma_str.numpy = PyUnicode_InternFromString("numpy"); + if (npy_ma_str.numpy == NULL) { return -1; } - npy_ma_str->where = PyUnicode_InternFromString("where"); - if (npy_ma_str->where == NULL) { + npy_ma_str.where = PyUnicode_InternFromString("where"); + if (npy_ma_str.where == NULL) { return -1; } /* scalar policies */ - npy_ma_str->convert = PyUnicode_InternFromString("convert"); - if (npy_ma_str->convert == NULL) { + npy_ma_str.convert = PyUnicode_InternFromString("convert"); + if (npy_ma_str.convert == NULL) { return -1; } - npy_ma_str->preserve = PyUnicode_InternFromString("preserve"); - if (npy_ma_str->preserve == NULL) { + npy_ma_str.preserve = PyUnicode_InternFromString("preserve"); + if (npy_ma_str.preserve == NULL) { return -1; } - npy_ma_str->convert_if_no_array = PyUnicode_InternFromString("convert_if_no_array"); - if (npy_ma_str->convert_if_no_array == NULL) { + npy_ma_str.convert_if_no_array = PyUnicode_InternFromString("convert_if_no_array"); + if (npy_ma_str.convert_if_no_array == NULL) { return -1; } - npy_ma_str->cpu = PyUnicode_InternFromString("cpu"); - if (npy_ma_str->cpu == NULL) { + npy_ma_str.cpu = PyUnicode_InternFromString("cpu"); + if (npy_ma_str.cpu == NULL) { return -1; } - npy_ma_str->dtype = PyUnicode_InternFromString("dtype"); - if (npy_ma_str->dtype == NULL) { + npy_ma_str.dtype = PyUnicode_InternFromString("dtype"); + if (npy_ma_str.dtype == NULL) { return -1; } - npy_ma_str->array_err_msg_substr = PyUnicode_InternFromString( + npy_ma_str.array_err_msg_substr = PyUnicode_InternFromString( "__array__() got an unexpected keyword argument 'copy'"); - if (npy_ma_str->array_err_msg_substr == NULL) { + if (npy_ma_str.array_err_msg_substr == NULL) { return -1; } - npy_ma_str->out = PyUnicode_InternFromString("out"); - if (npy_ma_str->out == NULL) { + npy_ma_str.out = PyUnicode_InternFromString("out"); + if (npy_ma_str.out == NULL) { return -1; } - npy_ma_str->__dlpack__ = PyUnicode_InternFromString("__dlpack__"); - if (npy_ma_str->__dlpack__ == NULL) { + npy_ma_str.__dlpack__ = PyUnicode_InternFromString("__dlpack__"); + if (npy_ma_str.__dlpack__ == NULL) { return -1; } return 0; @@ -4896,66 +4896,61 @@ intern_strings(void) static int initialize_static_globals(void) { - // this is module-level global heap allocation, it is currently - // never freed - npy_ma_static_data = PyMem_Calloc(1, sizeof(npy_ma_static_data_struct)); - npy_ma_thread_unsafe_state = PyMem_Calloc(1, sizeof(npy_ma_thread_unsafe_state_struct)); - // cached reference to objects defined in python IMPORT_GLOBAL("math", "floor", - npy_ma_static_data->math_floor_func); + npy_ma_static_data.math_floor_func); IMPORT_GLOBAL("math", "ceil", - npy_ma_static_data->math_ceil_func); + npy_ma_static_data.math_ceil_func); IMPORT_GLOBAL("math", "trunc", - npy_ma_static_data->math_trunc_func); + npy_ma_static_data.math_trunc_func); IMPORT_GLOBAL("math", "gcd", - npy_ma_static_data->math_gcd_func); + npy_ma_static_data.math_gcd_func); IMPORT_GLOBAL("numpy.exceptions", "AxisError", - npy_ma_static_data->AxisError); + npy_ma_static_data.AxisError); IMPORT_GLOBAL("numpy.exceptions", "ComplexWarning", - npy_ma_static_data->ComplexWarning); + npy_ma_static_data.ComplexWarning); IMPORT_GLOBAL("numpy.exceptions", "DTypePromotionError", - npy_ma_static_data->DTypePromotionError); + npy_ma_static_data.DTypePromotionError); IMPORT_GLOBAL("numpy.exceptions", "TooHardError", - npy_ma_static_data->TooHardError); + npy_ma_static_data.TooHardError); IMPORT_GLOBAL("numpy.exceptions", "VisibleDeprecationWarning", - npy_ma_static_data->VisibleDeprecationWarning); + npy_ma_static_data.VisibleDeprecationWarning); IMPORT_GLOBAL("numpy._globals", "_CopyMode", - npy_ma_static_data->_CopyMode); + npy_ma_static_data._CopyMode); IMPORT_GLOBAL("numpy._globals", "_NoValue", - npy_ma_static_data->_NoValue); + npy_ma_static_data._NoValue); IMPORT_GLOBAL("numpy._core._exceptions", "_ArrayMemoryError", - npy_ma_static_data->_ArrayMemoryError); + npy_ma_static_data._ArrayMemoryError); IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncBinaryResolutionError", - npy_ma_static_data->_UFuncBinaryResolutionError); + npy_ma_static_data._UFuncBinaryResolutionError); IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncInputCastingError", - npy_ma_static_data->_UFuncInputCastingError); + npy_ma_static_data._UFuncInputCastingError); IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncNoLoopError", - npy_ma_static_data->_UFuncNoLoopError); + npy_ma_static_data._UFuncNoLoopError); IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncOutputCastingError", - npy_ma_static_data->_UFuncOutputCastingError); + npy_ma_static_data._UFuncOutputCastingError); IMPORT_GLOBAL("os", "fspath", - npy_ma_static_data->os_fspath); + npy_ma_static_data.os_fspath); IMPORT_GLOBAL("os", "PathLike", - npy_ma_static_data->os_PathLike); + npy_ma_static_data.os_PathLike); char *env = getenv("NUMPY_WARN_IF_NO_MEM_POLICY"); if ((env != NULL) && (strncmp(env, "1", 1) == 0)) { @@ -4971,9 +4966,9 @@ initialize_static_globals(void) return -1; } - npy_ma_static_data->default_truediv_type_tup = + npy_ma_static_data.default_truediv_type_tup = PyTuple_Pack(3, tmp, tmp, tmp); - if (npy_ma_static_data->default_truediv_type_tup == NULL) { + if (npy_ma_static_data.default_truediv_type_tup == NULL) { Py_DECREF(tmp); return -1; } @@ -4988,7 +4983,7 @@ initialize_static_globals(void) if (level == NULL) { return -1; } - npy_ma_static_data->optimize = PyLong_AsLong(level); + npy_ma_static_data.optimize = PyLong_AsLong(level); Py_DECREF(level); /* @@ -5004,22 +4999,22 @@ initialize_static_globals(void) npy_intp k; for (k=0; k < 8; k++) { npy_uint8 v = (j & (1 << k)) == (1 << k); - npy_ma_static_data->unpack_lookup_big[j].bytes[7 - k] = v; + npy_ma_static_data.unpack_lookup_big[j].bytes[7 - k] = v; } } - npy_ma_static_data->kwnames_is_copy = Py_BuildValue("(s)", "copy"); - if (npy_ma_static_data->kwnames_is_copy == NULL) { + npy_ma_static_data.kwnames_is_copy = Py_BuildValue("(s)", "copy"); + if (npy_ma_static_data.kwnames_is_copy == NULL) { return -1; } - npy_ma_static_data->one_obj = PyLong_FromLong((long) 1); - if (npy_ma_static_data->one_obj == NULL) { + npy_ma_static_data.one_obj = PyLong_FromLong((long) 1); + if (npy_ma_static_data.one_obj == NULL) { return -1; } - npy_ma_static_data->zero_obj = PyLong_FromLong((long) 0); - if (npy_ma_static_data->zero_obj == NULL) { + npy_ma_static_data.zero_obj = PyLong_FromLong((long) 0); + if (npy_ma_static_data.zero_obj == NULL) { return -1; } @@ -5288,11 +5283,11 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { } // initialize static references to ndarray.__array_*__ special methods - npy_ma_static_data->ndarray_array_finalize = PyObject_GetAttrString( + npy_ma_static_data.ndarray_array_finalize = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_finalize__"); - npy_ma_static_data->ndarray_array_ufunc = PyObject_GetAttrString( + npy_ma_static_data.ndarray_array_ufunc = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_ufunc__"); - npy_ma_static_data->ndarray_array_function = PyObject_GetAttrString( + npy_ma_static_data.ndarray_array_function = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_function__"); /* @@ -5305,13 +5300,13 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { * the legacy dtypemeta classes are available. */ npy_cache_import("numpy.dtypes", "_add_dtype_helper", - &npy_ma_thread_unsafe_state->_add_dtype_helper); - if (npy_ma_thread_unsafe_state->_add_dtype_helper == NULL) { + &npy_ma_thread_unsafe_state._add_dtype_helper); + if (npy_ma_thread_unsafe_state._add_dtype_helper == NULL) { goto err; } if (PyObject_CallFunction( - npy_ma_thread_unsafe_state->_add_dtype_helper, + npy_ma_thread_unsafe_state._add_dtype_helper, "Os", (PyObject *)&PyArray_StringDType, NULL) == NULL) { goto err; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index 95ad1a4cf18a..6058ee640c1a 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -211,8 +211,10 @@ typedef struct npy_ma_thread_unsafe_state_struct { int reload_guard_initialized; } npy_ma_thread_unsafe_state_struct; -NPY_VISIBILITY_HIDDEN extern npy_ma_str_struct *npy_ma_str; -NPY_VISIBILITY_HIDDEN extern npy_ma_static_data_struct *npy_ma_static_data; -NPY_VISIBILITY_HIDDEN extern npy_ma_thread_unsafe_state_struct *npy_ma_thread_unsafe_state; + +NPY_VISIBILITY_HIDDEN extern npy_ma_str_struct npy_ma_str; +NPY_VISIBILITY_HIDDEN extern npy_ma_static_data_struct npy_ma_static_data; +NPY_VISIBILITY_HIDDEN extern npy_ma_thread_unsafe_state_struct npy_ma_thread_unsafe_state; + #endif /* NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ */ diff --git a/numpy/_core/src/multiarray/number.c b/numpy/_core/src/multiarray/number.c index c0049a637efa..15b9eab3282e 100644 --- a/numpy/_core/src/multiarray/number.c +++ b/numpy/_core/src/multiarray/number.c @@ -124,15 +124,15 @@ _PyArray_SetNumericOps(PyObject *dict) SET(clip); // initialize static globals needed for matmul - npy_ma_static_data->axes_1d_obj_kwargs = Py_BuildValue( + npy_ma_static_data.axes_1d_obj_kwargs = Py_BuildValue( "{s, [(i), (i, i), (i)]}", "axes", -1, -2, -1, -1); - if (npy_ma_static_data->axes_1d_obj_kwargs == NULL) { + if (npy_ma_static_data.axes_1d_obj_kwargs == NULL) { return -1; } - npy_ma_static_data->axes_2d_obj_kwargs = Py_BuildValue( + npy_ma_static_data.axes_2d_obj_kwargs = Py_BuildValue( "{s, [(i, i), (i, i), (i, i)]}", "axes", -2, -1, -2, -1, -2, -1); - if (npy_ma_static_data->axes_2d_obj_kwargs == NULL) { + if (npy_ma_static_data.axes_2d_obj_kwargs == NULL) { return -1; } @@ -305,10 +305,10 @@ array_inplace_matrix_multiply(PyArrayObject *self, PyObject *other) * passing the correct `axes=`. */ if (PyArray_NDIM(self) == 1) { - kwargs = npy_ma_static_data->axes_1d_obj_kwargs; + kwargs = npy_ma_static_data.axes_1d_obj_kwargs; } else { - kwargs = npy_ma_static_data->axes_2d_obj_kwargs; + kwargs = npy_ma_static_data.axes_2d_obj_kwargs; } PyObject *res = PyObject_Call(n_ops.matmul, args, kwargs); Py_DECREF(args); @@ -318,7 +318,7 @@ array_inplace_matrix_multiply(PyArrayObject *self, PyObject *other) * AxisError should indicate that the axes argument didn't work out * which should mean the second operand not being 2 dimensional. */ - if (PyErr_ExceptionMatches(npy_ma_static_data->AxisError)) { + if (PyErr_ExceptionMatches(npy_ma_static_data.AxisError)) { PyErr_SetString(PyExc_ValueError, "inplace matrix multiplication requires the first operand to " "have at least one and the second at least two dimensions."); diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 7e855b5388d2..32c2a6cfdb1e 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -612,13 +612,13 @@ static PyObject * _void_scalar_to_string(PyObject *obj, int repr) { npy_cache_import("numpy._core.arrayprint", "_void_scalar_to_string", - &npy_ma_thread_unsafe_state->_void_scalar_to_string); - if (npy_ma_thread_unsafe_state->_void_scalar_to_string == NULL) { + &npy_ma_thread_unsafe_state._void_scalar_to_string); + if (npy_ma_thread_unsafe_state._void_scalar_to_string == NULL) { return NULL; } PyObject *is_repr = repr ? Py_True : Py_False; return PyObject_CallFunctionObjArgs( - npy_ma_thread_unsafe_state->_void_scalar_to_string, obj, is_repr, NULL); + npy_ma_thread_unsafe_state._void_scalar_to_string, obj, is_repr, NULL); } static PyObject * @@ -3038,7 +3038,7 @@ object_arrtype_alloc(PyTypeObject *type, Py_ssize_t items) * Object scalars should not actually exist, if they exist we should * consider it to be a bug. */ - if (PyErr_WarnEx(npy_ma_static_data->VisibleDeprecationWarning, + if (PyErr_WarnEx(npy_ma_static_data.VisibleDeprecationWarning, "Creating a NumPy object scalar. NumPy object scalars should " "never be created. If you see this message please inform the " "NumPy developers. Since this message should never be shown " diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index ab4003d8dbae..079ac5d7df5c 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -668,10 +668,10 @@ PyArray_SwapAxes(PyArrayObject *ap, int a1, int a2) int n = PyArray_NDIM(ap); int i; - if (check_and_adjust_axis_msg(&a1, n, npy_ma_str->axis1) < 0) { + if (check_and_adjust_axis_msg(&a1, n, npy_ma_str.axis1) < 0) { return NULL; } - if (check_and_adjust_axis_msg(&a2, n, npy_ma_str->axis2) < 0) { + if (check_and_adjust_axis_msg(&a2, n, npy_ma_str.axis2) < 0) { return NULL; } diff --git a/numpy/_core/src/multiarray/strfuncs.c b/numpy/_core/src/multiarray/strfuncs.c index 91a716f9f13a..0c15e16b5f0c 100644 --- a/numpy/_core/src/multiarray/strfuncs.c +++ b/numpy/_core/src/multiarray/strfuncs.c @@ -34,40 +34,38 @@ PyArray_SetStringFunction(PyObject *op, int repr) NPY_NO_EXPORT PyObject * array_repr(PyArrayObject *self) { - static PyObject *repr = NULL; /* * We need to do a delayed import here as initialization on module load * leads to circular import problems. */ npy_cache_import("numpy._core.arrayprint", "_default_array_repr", - &npy_ma_thread_unsafe_state->_default_array_repr); - if (npy_ma_thread_unsafe_state->_default_array_repr == NULL) { + &npy_ma_thread_unsafe_state._default_array_repr); + if (npy_ma_thread_unsafe_state._default_array_repr == NULL) { npy_PyErr_SetStringChained(PyExc_RuntimeError, "Unable to configure default ndarray.__repr__"); return NULL; } return PyObject_CallFunctionObjArgs( - npy_ma_thread_unsafe_state->_default_array_repr, self, NULL); + npy_ma_thread_unsafe_state._default_array_repr, self, NULL); } NPY_NO_EXPORT PyObject * array_str(PyArrayObject *self) { - static PyObject *str = NULL; /* * We need to do a delayed import here as initialization on module load leads * to circular import problems. */ npy_cache_import("numpy._core.arrayprint", "_default_array_str", - &npy_ma_thread_unsafe_state->_default_array_str); - if (npy_ma_thread_unsafe_state->_default_array_str == NULL) { + &npy_ma_thread_unsafe_state._default_array_str); + if (npy_ma_thread_unsafe_state._default_array_str == NULL) { npy_PyErr_SetStringChained(PyExc_RuntimeError, "Unable to configure default ndarray.__str__"); return NULL; } return PyObject_CallFunctionObjArgs( - npy_ma_thread_unsafe_state->_default_array_str, self, NULL); + npy_ma_thread_unsafe_state._default_array_str, self, NULL); } diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 83d1c5d04b5c..64b88aa62703 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -716,20 +716,20 @@ static PyObject * stringdtype__reduce__(PyArray_StringDTypeObject *self, PyObject *NPY_UNUSED(args)) { npy_cache_import("numpy._core._internal", "_convert_to_stringdtype_kwargs", - &npy_ma_thread_unsafe_state->_convert_to_stringdtype_kwargs); + &npy_ma_thread_unsafe_state._convert_to_stringdtype_kwargs); - if (npy_ma_thread_unsafe_state->_convert_to_stringdtype_kwargs == NULL) { + if (npy_ma_thread_unsafe_state._convert_to_stringdtype_kwargs == NULL) { return NULL; } if (self->na_object != NULL) { return Py_BuildValue( - "O(iO)", npy_ma_thread_unsafe_state->_convert_to_stringdtype_kwargs, + "O(iO)", npy_ma_thread_unsafe_state._convert_to_stringdtype_kwargs, self->coerce, self->na_object); } return Py_BuildValue( - "O(i)", npy_ma_thread_unsafe_state->_convert_to_stringdtype_kwargs, + "O(i)", npy_ma_thread_unsafe_state._convert_to_stringdtype_kwargs, self->coerce); } diff --git a/numpy/_core/src/umath/_scaled_float_dtype.c b/numpy/_core/src/umath/_scaled_float_dtype.c index 0bc91f388c08..d73139738109 100644 --- a/numpy/_core/src/umath/_scaled_float_dtype.c +++ b/numpy/_core/src/umath/_scaled_float_dtype.c @@ -867,7 +867,7 @@ sfloat_init_ufuncs(void) { NPY_NO_EXPORT PyObject * get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) { - if (npy_ma_thread_unsafe_state->get_sfloat_dtype_initialized) { + if (npy_ma_thread_unsafe_state.get_sfloat_dtype_initialized) { Py_INCREF(&PyArray_SFloatDType); return (PyObject *)&PyArray_SFloatDType; } @@ -896,6 +896,6 @@ get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) return NULL; } - npy_ma_thread_unsafe_state->get_sfloat_dtype_initialized = NPY_TRUE; + npy_ma_thread_unsafe_state.get_sfloat_dtype_initialized = NPY_TRUE; return (PyObject *)&PyArray_SFloatDType; } diff --git a/numpy/_core/src/umath/dispatching.c b/numpy/_core/src/umath/dispatching.c index 0f7a78666a1b..dfe918b5482f 100644 --- a/numpy/_core/src/umath/dispatching.c +++ b/numpy/_core/src/umath/dispatching.c @@ -1062,7 +1062,7 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, * then we chain it, because DTypePromotionError effectively means that there * is no loop available. (We failed finding a loop by using promotion.) */ - else if (PyErr_ExceptionMatches(npy_ma_static_data->DTypePromotionError)) { + else if (PyErr_ExceptionMatches(npy_ma_static_data.DTypePromotionError)) { PyObject *err_type = NULL, *err_value = NULL, *err_traceback = NULL; PyErr_Fetch(&err_type, &err_value, &err_traceback); raise_no_loop_found_error(ufunc, (PyObject **)op_dtypes); diff --git a/numpy/_core/src/umath/extobj.c b/numpy/_core/src/umath/extobj.c index a10fdfb5a911..09ab0be4d3d4 100644 --- a/numpy/_core/src/umath/extobj.c +++ b/numpy/_core/src/umath/extobj.c @@ -122,8 +122,8 @@ fetch_curr_extobj_state(npy_extobj *extobj) { PyObject *capsule; if (PyContextVar_Get( - npy_ma_static_data->npy_extobj_contextvar, - npy_ma_static_data->default_extobj_capsule, &capsule) < 0) { + npy_ma_static_data.npy_extobj_contextvar, + npy_ma_static_data.default_extobj_capsule, &capsule) < 0) { return -1; } npy_extobj *obj = PyCapsule_GetPointer(capsule, "numpy.ufunc.extobj"); @@ -150,22 +150,22 @@ init_extobj(void) * inputs. */ for (int i = 0; i <= UFUNC_ERR_LOG; i++) { - npy_ma_str->errmode_strings[i] = PyUnicode_InternFromString( + npy_ma_str.errmode_strings[i] = PyUnicode_InternFromString( errmode_cstrings[i]); - if (npy_ma_str->errmode_strings[i] == NULL) { + if (npy_ma_str.errmode_strings[i] == NULL) { return -1; } } - npy_ma_static_data->default_extobj_capsule = make_extobj_capsule( + npy_ma_static_data.default_extobj_capsule = make_extobj_capsule( NPY_BUFSIZE, UFUNC_ERR_DEFAULT, Py_None); - if (npy_ma_static_data->default_extobj_capsule == NULL) { + if (npy_ma_static_data.default_extobj_capsule == NULL) { return -1; } - npy_ma_static_data->npy_extobj_contextvar = PyContextVar_New( - "numpy.ufunc.extobj", npy_ma_static_data->default_extobj_capsule); - if (npy_ma_static_data->npy_extobj_contextvar == NULL) { - Py_CLEAR(npy_ma_static_data->default_extobj_capsule); + npy_ma_static_data.npy_extobj_contextvar = PyContextVar_New( + "numpy.ufunc.extobj", npy_ma_static_data.default_extobj_capsule); + if (npy_ma_static_data.npy_extobj_contextvar == NULL) { + Py_CLEAR(npy_ma_static_data.default_extobj_capsule); return -1; } return 0; @@ -185,7 +185,7 @@ errmodeconverter(PyObject *obj, int *mode) int i = 0; for (; i <= UFUNC_ERR_LOG; i++) { int eq = PyObject_RichCompareBool( - obj, npy_ma_str->errmode_strings[i], Py_EQ); + obj, npy_ma_str.errmode_strings[i], Py_EQ); if (eq == -1) { return 0; } @@ -333,22 +333,22 @@ extobj_get_extobj_dict(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(noarg)) /* Set all error modes: */ mode = (extobj.errmask & UFUNC_MASK_DIVIDEBYZERO) >> UFUNC_SHIFT_DIVIDEBYZERO; if (PyDict_SetItemString(result, "divide", - npy_ma_str->errmode_strings[mode]) < 0) { + npy_ma_str.errmode_strings[mode]) < 0) { goto fail; } mode = (extobj.errmask & UFUNC_MASK_OVERFLOW) >> UFUNC_SHIFT_OVERFLOW; if (PyDict_SetItemString(result, "over", - npy_ma_str->errmode_strings[mode]) < 0) { + npy_ma_str.errmode_strings[mode]) < 0) { goto fail; } mode = (extobj.errmask & UFUNC_MASK_UNDERFLOW) >> UFUNC_SHIFT_UNDERFLOW; if (PyDict_SetItemString(result, "under", - npy_ma_str->errmode_strings[mode]) < 0) { + npy_ma_str.errmode_strings[mode]) < 0) { goto fail; } mode = (extobj.errmask & UFUNC_MASK_INVALID) >> UFUNC_SHIFT_INVALID; if (PyDict_SetItemString(result, "invalid", - npy_ma_str->errmode_strings[mode]) < 0) { + npy_ma_str.errmode_strings[mode]) < 0) { goto fail; } diff --git a/numpy/_core/src/umath/funcs.inc.src b/numpy/_core/src/umath/funcs.inc.src index f068ab026ec1..938a5f928cd8 100644 --- a/numpy/_core/src/umath/funcs.inc.src +++ b/numpy/_core/src/umath/funcs.inc.src @@ -157,19 +157,19 @@ npy_ObjectLogicalNot(PyObject *i1) static PyObject * npy_ObjectFloor(PyObject *obj) { - return PyObject_CallFunction(npy_ma_static_data->math_floor_func, + return PyObject_CallFunction(npy_ma_static_data.math_floor_func, "O", obj); } static PyObject * npy_ObjectCeil(PyObject *obj) { - return PyObject_CallFunction(npy_ma_static_data->math_ceil_func, + return PyObject_CallFunction(npy_ma_static_data.math_ceil_func, "O", obj); } static PyObject * npy_ObjectTrunc(PyObject *obj) { - return PyObject_CallFunction(npy_ma_static_data->math_trunc_func, + return PyObject_CallFunction(npy_ma_static_data.math_trunc_func, "O", obj); } @@ -180,7 +180,7 @@ npy_ObjectGCD(PyObject *i1, PyObject *i2) /* use math.gcd if valid on the provided types */ { - gcd = PyObject_CallFunction(npy_ma_static_data->math_gcd_func, + gcd = PyObject_CallFunction(npy_ma_static_data.math_gcd_func, "OO", i1, i2); if (gcd != NULL) { return gcd; @@ -192,11 +192,11 @@ npy_ObjectGCD(PyObject *i1, PyObject *i2) /* otherwise, use our internal one, written in python */ { npy_cache_import("numpy._core._internal", "_gcd", - &npy_ma_thread_unsafe_state->internal_gcd_func); - if (npy_ma_thread_unsafe_state->internal_gcd_func == NULL) { + &npy_ma_thread_unsafe_state.internal_gcd_func); + if (npy_ma_thread_unsafe_state.internal_gcd_func == NULL) { return NULL; } - gcd = PyObject_CallFunction(npy_ma_thread_unsafe_state->internal_gcd_func, + gcd = PyObject_CallFunction(npy_ma_thread_unsafe_state.internal_gcd_func, "OO", i1, i2); if (gcd == NULL) { return NULL; diff --git a/numpy/_core/src/umath/override.c b/numpy/_core/src/umath/override.c index 6f31ee40f49e..2e121f52bfe7 100644 --- a/numpy/_core/src/umath/override.c +++ b/numpy/_core/src/umath/override.c @@ -114,19 +114,19 @@ initialize_normal_kwds(PyObject *out_args, if (out_args != NULL) { /* Replace `out` argument with the normalized version */ - int res = PyDict_SetItem(normal_kwds, npy_ma_str->out, out_args); + int res = PyDict_SetItem(normal_kwds, npy_ma_str.out, out_args); if (res < 0) { return -1; } } else { /* Ensure that `out` is not present. */ - int res = PyDict_Contains(normal_kwds, npy_ma_str->out); + int res = PyDict_Contains(normal_kwds, npy_ma_str.out); if (res < 0) { return -1; } if (res) { - return PyDict_DelItem(normal_kwds, npy_ma_str->out); + return PyDict_DelItem(normal_kwds, npy_ma_str.out); } } return 0; @@ -177,7 +177,7 @@ copy_positional_args_to_kwargs(const char **keywords, * 5 keyword arguments. */ assert(strcmp(keywords[i], "initial") == 0); - if (args[i] == npy_ma_static_data->_NoValue) { + if (args[i] == npy_ma_static_data._NoValue) { continue; } } @@ -371,10 +371,10 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, npy_cache_import( "numpy._core._internal", "array_ufunc_errmsg_formatter", - &npy_ma_thread_unsafe_state->array_ufunc_errmsg_formatter); - if (npy_ma_thread_unsafe_state->array_ufunc_errmsg_formatter != NULL) { + &npy_ma_thread_unsafe_state.array_ufunc_errmsg_formatter); + if (npy_ma_thread_unsafe_state.array_ufunc_errmsg_formatter != NULL) { errmsg = PyObject_Call( - npy_ma_thread_unsafe_state->array_ufunc_errmsg_formatter, + npy_ma_thread_unsafe_state.array_ufunc_errmsg_formatter, override_args, normal_kwds); if (errmsg != NULL) { PyErr_SetObject(PyExc_TypeError, errmsg); diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index 024596e9262a..517435894435 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -1788,7 +1788,7 @@ static int static int emit_complexwarning(void) { - return PyErr_WarnEx(npy_ma_static_data->ComplexWarning, + return PyErr_WarnEx(npy_ma_static_data.ComplexWarning, "Casting complex values to real discards the imaginary part", 1); } diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 12b05625d02f..4d7cdd3d1763 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -1383,7 +1383,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, if (PyTuple_Check(op_axes_tuple)) { if (PyTuple_Size(op_axes_tuple) != op_ncore) { /* must have been a tuple with too many entries. */ - PyErr_Format(npy_ma_static_data->AxisError, + PyErr_Format(npy_ma_static_data.AxisError, "%s: operand %d has %d core dimensions, " "but %zd dimensions are specified by axes tuple.", ufunc_get_name_cstr(ufunc), iop, op_ncore, @@ -1407,7 +1407,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, return -1; } /* If it is a single integer, inform user that more are needed */ - PyErr_Format(npy_ma_static_data->AxisError, + PyErr_Format(npy_ma_static_data.AxisError, "%s: operand %d has %d core dimensions, " "but the axes item is a single integer.", ufunc_get_name_cstr(ufunc), iop, op_ncore); @@ -5240,7 +5240,7 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) { PyArrayObject *ap1 = NULL; PyObject *tmp; - npy_cache_import("numpy", "matrix", &npy_ma_thread_unsafe_state->numpy_matrix); + npy_cache_import("numpy", "matrix", &npy_ma_thread_unsafe_state.numpy_matrix); const char *matrix_deprecation_msg = ( "%s.outer() was passed a numpy matrix as %s argument. " @@ -5251,7 +5251,7 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) tmp = PyTuple_GET_ITEM(args, 0); - if (PyObject_IsInstance(tmp, npy_ma_thread_unsafe_state->numpy_matrix)) { + if (PyObject_IsInstance(tmp, npy_ma_thread_unsafe_state.numpy_matrix)) { /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, matrix_deprecation_msg, ufunc->name, "first") < 0) { @@ -5268,7 +5268,7 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) PyArrayObject *ap2 = NULL; tmp = PyTuple_GET_ITEM(args, 1); - if (PyObject_IsInstance(tmp, npy_ma_thread_unsafe_state->numpy_matrix)) { + if (PyObject_IsInstance(tmp, npy_ma_thread_unsafe_state.numpy_matrix)) { /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, matrix_deprecation_msg, ufunc->name, "second") < 0) { @@ -6422,9 +6422,9 @@ ufunc_get_doc(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) npy_cache_import( "numpy._core._internal", "_ufunc_doc_signature_formatter", - &npy_ma_thread_unsafe_state->_ufunc_doc_signature_formatter); + &npy_ma_thread_unsafe_state._ufunc_doc_signature_formatter); - if (npy_ma_thread_unsafe_state->_ufunc_doc_signature_formatter == NULL) { + if (npy_ma_thread_unsafe_state._ufunc_doc_signature_formatter == NULL) { return NULL; } @@ -6433,7 +6433,7 @@ ufunc_get_doc(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) * introspection on name and nin + nout to automate the first part * of it the doc string shouldn't need the calling convention */ - doc = PyObject_CallFunctionObjArgs(npy_ma_thread_unsafe_state->_ufunc_doc_signature_formatter, + doc = PyObject_CallFunctionObjArgs(npy_ma_thread_unsafe_state._ufunc_doc_signature_formatter, (PyObject *)ufunc, NULL); if (doc == NULL) { return NULL; diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index a8d3f3df4bf8..69fa9b09b61e 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -90,7 +90,7 @@ raise_binary_type_reso_error(PyUFuncObject *ufunc, PyArrayObject **operands) { return -1; } PyErr_SetObject( - npy_ma_static_data->_UFuncBinaryResolutionError, exc_value); + npy_ma_static_data._UFuncBinaryResolutionError, exc_value); Py_DECREF(exc_value); return -1; @@ -113,7 +113,7 @@ raise_no_loop_found_error( if (exc_value == NULL) { return -1; } - PyErr_SetObject(npy_ma_static_data->_UFuncNoLoopError, exc_value); + PyErr_SetObject(npy_ma_static_data._UFuncNoLoopError, exc_value); Py_DECREF(exc_value); return -1; @@ -165,7 +165,7 @@ raise_input_casting_error( PyArray_Descr *to, npy_intp i) { - return raise_casting_error(npy_ma_static_data->_UFuncInputCastingError, + return raise_casting_error(npy_ma_static_data._UFuncInputCastingError, ufunc, casting, from, to, i); } @@ -181,7 +181,7 @@ raise_output_casting_error( PyArray_Descr *to, npy_intp i) { - return raise_casting_error(npy_ma_static_data->_UFuncOutputCastingError, + return raise_casting_error(npy_ma_static_data._UFuncOutputCastingError, ufunc, casting, from, to, i); } @@ -1420,7 +1420,7 @@ PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc, (PyTypeNum_ISINTEGER(type_num2) || PyTypeNum_ISBOOL(type_num2))) { return PyUFunc_DefaultTypeResolver( ufunc, casting, operands, - npy_ma_static_data->default_truediv_type_tup, out_dtypes); + npy_ma_static_data.default_truediv_type_tup, out_dtypes); } return PyUFunc_DivisionTypeResolver(ufunc, casting, operands, type_tup, out_dtypes); diff --git a/numpy/_core/src/umath/umathmodule.c b/numpy/_core/src/umath/umathmodule.c index 96a78759c0e9..56020fd16847 100644 --- a/numpy/_core/src/umath/umathmodule.c +++ b/numpy/_core/src/umath/umathmodule.c @@ -273,8 +273,8 @@ int initumath(PyObject *m) #undef ADDSCONST PyModule_AddIntConstant(m, "UFUNC_BUFSIZE_DEFAULT", (long)NPY_BUFSIZE); - Py_INCREF(npy_ma_static_data->npy_extobj_contextvar); - PyModule_AddObject(m, "_extobj_contextvar", npy_ma_static_data->npy_extobj_contextvar); + Py_INCREF(npy_ma_static_data.npy_extobj_contextvar); + PyModule_AddObject(m, "_extobj_contextvar", npy_ma_static_data.npy_extobj_contextvar); PyModule_AddObject(m, "PINF", PyFloat_FromDouble(NPY_INFINITY)); PyModule_AddObject(m, "NINF", PyFloat_FromDouble(-NPY_INFINITY)); From b7065369cfacb8fee309e74a3e9737661fae5bd9 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 6 Jun 2024 16:50:24 -0600 Subject: [PATCH 608/980] MNT: apply sebastian's refactoring suggestions --- numpy/_core/src/common/npy_cpu_dispatch.c | 8 +- numpy/_core/src/common/npy_ctypes.h | 6 +- numpy/_core/src/common/ufunc_override.c | 2 +- numpy/_core/src/multiarray/alloc.c | 8 +- numpy/_core/src/multiarray/array_converter.c | 4 +- .../src/multiarray/arrayfunction_override.c | 20 +- numpy/_core/src/multiarray/arrayobject.c | 2 +- numpy/_core/src/multiarray/arraytypes.c.src | 2 +- numpy/_core/src/multiarray/arraywrap.c | 4 +- numpy/_core/src/multiarray/common.h | 4 +- numpy/_core/src/multiarray/common_dtype.c | 4 +- numpy/_core/src/multiarray/compiled_base.c | 8 +- numpy/_core/src/multiarray/conversion_utils.c | 8 +- numpy/_core/src/multiarray/convert_datatype.c | 42 ++--- numpy/_core/src/multiarray/ctors.c | 16 +- numpy/_core/src/multiarray/descriptor.c | 8 +- numpy/_core/src/multiarray/dlpack.c | 4 +- numpy/_core/src/multiarray/dtypemeta.c | 18 +- numpy/_core/src/multiarray/getset.c | 6 +- numpy/_core/src/multiarray/item_selection.c | 4 +- numpy/_core/src/multiarray/methods.c | 30 +-- numpy/_core/src/multiarray/methods.h | 4 +- numpy/_core/src/multiarray/multiarraymodule.c | 177 +++++++++--------- numpy/_core/src/multiarray/multiarraymodule.h | 67 +++---- numpy/_core/src/multiarray/number.c | 14 +- numpy/_core/src/multiarray/scalartypes.c.src | 8 +- numpy/_core/src/multiarray/shape.c | 4 +- numpy/_core/src/multiarray/strfuncs.c | 12 +- .../_core/src/multiarray/stringdtype/dtype.c | 8 +- numpy/_core/src/umath/_scaled_float_dtype.c | 4 +- numpy/_core/src/umath/dispatching.c | 2 +- numpy/_core/src/umath/extobj.c | 30 +-- numpy/_core/src/umath/funcs.inc.src | 14 +- numpy/_core/src/umath/override.c | 14 +- numpy/_core/src/umath/scalarmath.c.src | 2 +- numpy/_core/src/umath/ufunc_object.c | 16 +- numpy/_core/src/umath/ufunc_type_resolution.c | 10 +- numpy/_core/src/umath/umathmodule.c | 4 +- 38 files changed, 301 insertions(+), 297 deletions(-) diff --git a/numpy/_core/src/common/npy_cpu_dispatch.c b/numpy/_core/src/common/npy_cpu_dispatch.c index 942d48ba01fd..388ad25b3adf 100644 --- a/numpy/_core/src/common/npy_cpu_dispatch.c +++ b/numpy/_core/src/common/npy_cpu_dispatch.c @@ -8,7 +8,7 @@ NPY_VISIBILITY_HIDDEN int npy_cpu_dispatch_tracer_init(PyObject *mod) { - if (npy_ma_static_data.cpu_dispatch_registry != NULL) { + if (npy_static_pydata.cpu_dispatch_registry != NULL) { PyErr_Format(PyExc_RuntimeError, "CPU dispatcher tracer already initlized"); return -1; } @@ -25,7 +25,7 @@ npy_cpu_dispatch_tracer_init(PyObject *mod) if (err != 0) { return -1; } - npy_ma_static_data.cpu_dispatch_registry = reg_dict; + npy_static_pydata.cpu_dispatch_registry = reg_dict; return 0; } @@ -33,13 +33,13 @@ NPY_VISIBILITY_HIDDEN void npy_cpu_dispatch_trace(const char *fname, const char *signature, const char **dispatch_info) { - PyObject *func_dict = PyDict_GetItemString(npy_ma_static_data.cpu_dispatch_registry, fname); + PyObject *func_dict = PyDict_GetItemString(npy_static_pydata.cpu_dispatch_registry, fname); if (func_dict == NULL) { func_dict = PyDict_New(); if (func_dict == NULL) { return; } - int err = PyDict_SetItemString(npy_ma_static_data.cpu_dispatch_registry, fname, func_dict); + int err = PyDict_SetItemString(npy_static_pydata.cpu_dispatch_registry, fname, func_dict); Py_DECREF(func_dict); if (err != 0) { return; diff --git a/numpy/_core/src/common/npy_ctypes.h b/numpy/_core/src/common/npy_ctypes.h index e05c79792bf9..c72d2dff7fcb 100644 --- a/numpy/_core/src/common/npy_ctypes.h +++ b/numpy/_core/src/common/npy_ctypes.h @@ -22,12 +22,12 @@ npy_ctypes_check(PyTypeObject *obj) int ret; npy_cache_import("numpy._core._internal", "npy_ctypes_check", - &npy_ma_thread_unsafe_state.npy_ctypes_check); - if (npy_ma_thread_unsafe_state.npy_ctypes_check == NULL) { + &npy_thread_unsafe_state.npy_ctypes_check); + if (npy_thread_unsafe_state.npy_ctypes_check == NULL) { goto fail; } - ret_obj = PyObject_CallFunctionObjArgs(npy_ma_thread_unsafe_state.npy_ctypes_check, + ret_obj = PyObject_CallFunctionObjArgs(npy_thread_unsafe_state.npy_ctypes_check, (PyObject *)obj, NULL); if (ret_obj == NULL) { goto fail; diff --git a/numpy/_core/src/common/ufunc_override.c b/numpy/_core/src/common/ufunc_override.c index 5e373e4d8f25..fe9ada5c6224 100644 --- a/numpy/_core/src/common/ufunc_override.c +++ b/numpy/_core/src/common/ufunc_override.c @@ -43,7 +43,7 @@ PyUFuncOverride_GetNonDefaultArrayUfunc(PyObject *obj) return NULL; } /* Ignore if the same as ndarray.__array_ufunc__ */ - if (cls_array_ufunc == npy_ma_static_data.ndarray_array_ufunc) { + if (cls_array_ufunc == npy_static_pydata.ndarray_array_ufunc) { Py_DECREF(cls_array_ufunc); return NULL; } diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index 922e5fb3254c..8f443c6d27c2 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -48,7 +48,7 @@ NPY_NO_EXPORT PyObject * _get_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { #ifdef NPY_OS_LINUX - if (npy_ma_thread_unsafe_state.madvise_hugepage) { + if (npy_thread_unsafe_state.madvise_hugepage) { Py_RETURN_TRUE; } #endif @@ -66,12 +66,12 @@ _get_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) NPY_NO_EXPORT PyObject * _set_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *enabled_obj) { - int was_enabled = npy_ma_thread_unsafe_state.madvise_hugepage; + int was_enabled = npy_thread_unsafe_state.madvise_hugepage; int enabled = PyObject_IsTrue(enabled_obj); if (enabled < 0) { return NULL; } - npy_ma_thread_unsafe_state.madvise_hugepage = enabled; + npy_thread_unsafe_state.madvise_hugepage = enabled; if (was_enabled) { Py_RETURN_TRUE; } @@ -110,7 +110,7 @@ _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz, #ifdef NPY_OS_LINUX /* allow kernel allocating huge pages for large arrays */ if (NPY_UNLIKELY(nelem * esz >= ((1u<<22u))) && - npy_ma_thread_unsafe_state.madvise_hugepage) { + npy_thread_unsafe_state.madvise_hugepage) { npy_uintp offset = 4096u - (npy_uintp)p % (4096u); npy_uintp length = nelem * esz - offset; /** diff --git a/numpy/_core/src/multiarray/array_converter.c b/numpy/_core/src/multiarray/array_converter.c index d4612e3b8ea7..4ed959c499ed 100644 --- a/numpy/_core/src/multiarray/array_converter.c +++ b/numpy/_core/src/multiarray/array_converter.c @@ -186,8 +186,8 @@ static int pyscalar_mode_conv(PyObject *obj, scalar_policy *policy) { PyObject *strings[3] = { - npy_ma_str.convert, npy_ma_str.preserve, - npy_ma_str.convert_if_no_array}; + npy_interned_str.convert, npy_interned_str.preserve, + npy_interned_str.convert_if_no_array}; /* First quick pass using the identity (should practically always match) */ for (int i = 0; i < 3; i++) { diff --git a/numpy/_core/src/multiarray/arrayfunction_override.c b/numpy/_core/src/multiarray/arrayfunction_override.c index 00c29238a8d8..250b02b5134f 100644 --- a/numpy/_core/src/multiarray/arrayfunction_override.c +++ b/numpy/_core/src/multiarray/arrayfunction_override.c @@ -20,11 +20,11 @@ get_array_function(PyObject *obj) { /* Fast return for ndarray */ if (PyArray_CheckExact(obj)) { - Py_INCREF(npy_ma_static_data.ndarray_array_function); - return npy_ma_static_data.ndarray_array_function; + Py_INCREF(npy_static_pydata.ndarray_array_function); + return npy_static_pydata.ndarray_array_function; } - PyObject *array_function = PyArray_LookupSpecial(obj, npy_ma_str.array_function); + PyObject *array_function = PyArray_LookupSpecial(obj, npy_interned_str.array_function); if (array_function == NULL && PyErr_Occurred()) { PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ } @@ -125,7 +125,7 @@ get_implementing_args_and_methods(PyObject *relevant_args, static int is_default_array_function(PyObject *obj) { - return obj == npy_ma_static_data.ndarray_array_function; + return obj == npy_static_pydata.ndarray_array_function; } @@ -153,7 +153,7 @@ array_function_method_impl(PyObject *func, PyObject *types, PyObject *args, } } - PyObject *implementation = PyObject_GetAttr(func, npy_ma_str.implementation); + PyObject *implementation = PyObject_GetAttr(func, npy_interned_str.implementation); if (implementation == NULL) { return NULL; } @@ -233,10 +233,10 @@ set_no_matching_types_error(PyObject *public_api, PyObject *types) /* No acceptable override found, raise TypeError. */ npy_cache_import("numpy._core._internal", "array_function_errmsg_formatter", - &npy_ma_thread_unsafe_state.array_function_errmsg_formatter); - if (npy_ma_thread_unsafe_state.array_function_errmsg_formatter != NULL) { + &npy_thread_unsafe_state.array_function_errmsg_formatter); + if (npy_thread_unsafe_state.array_function_errmsg_formatter != NULL) { PyObject *errmsg = PyObject_CallFunctionObjArgs( - npy_ma_thread_unsafe_state.array_function_errmsg_formatter, + npy_thread_unsafe_state.array_function_errmsg_formatter, public_api, types, NULL); if (errmsg != NULL) { PyErr_SetObject(PyExc_TypeError, errmsg); @@ -299,12 +299,12 @@ array_implement_c_array_function_creation( } /* The like argument must be present in the keyword arguments, remove it */ - if (PyDict_DelItem(kwargs, npy_ma_str.like) < 0) { + if (PyDict_DelItem(kwargs, npy_interned_str.like) < 0) { goto finish; } /* Fetch the actual symbol (the long way right now) */ - numpy_module = PyImport_Import(npy_ma_str.numpy); + numpy_module = PyImport_Import(npy_interned_str.numpy); if (numpy_module == NULL) { goto finish; } diff --git a/numpy/_core/src/multiarray/arrayobject.c b/numpy/_core/src/multiarray/arrayobject.c index 54eaee4029ec..9c2a6b832288 100644 --- a/numpy/_core/src/multiarray/arrayobject.c +++ b/numpy/_core/src/multiarray/arrayobject.c @@ -928,7 +928,7 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) if (result == NULL && (cmp_op == Py_EQ || cmp_op == Py_NE) && PyErr_ExceptionMatches( - npy_ma_static_data._UFuncNoLoopError)) { + npy_static_pydata._UFuncNoLoopError)) { PyErr_Clear(); PyArrayObject *array_other = (PyArrayObject *)PyArray_FROM_O(other); diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index aca963453efb..b99598b825da 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -4185,7 +4185,7 @@ NPY_NO_EXPORT _PyArray_LegacyDescr @from@_Descr = { /* The smallest type number is ?, the largest bounded by 'z'. */ #define _MAX_LETTER ('z' + 1) -#define LETTER_TO_NUM(letter) npy_ma_static_data._letter_to_num[letter - '?'] +#define LETTER_TO_NUM(letter) npy_static_cdata._letter_to_num[letter - '?'] static _PyArray_LegacyDescr *_builtin_descrs[] = { &BOOL_Descr, diff --git a/numpy/_core/src/multiarray/arraywrap.c b/numpy/_core/src/multiarray/arraywrap.c index 699a9ca3297e..7b2bcd929813 100644 --- a/numpy/_core/src/multiarray/arraywrap.c +++ b/numpy/_core/src/multiarray/arraywrap.c @@ -57,7 +57,7 @@ npy_find_array_wrap( } } else { - PyObject *new_wrap = PyArray_LookupSpecial_OnInstance(obj, npy_ma_str.array_wrap); + PyObject *new_wrap = PyArray_LookupSpecial_OnInstance(obj, npy_interned_str.array_wrap); if (new_wrap == NULL) { if (PyErr_Occurred()) { goto fail; @@ -160,7 +160,7 @@ npy_apply_wrap( else { /* Replace passed wrap/wrap_type (borrowed refs) with new_wrap/type. */ new_wrap = PyArray_LookupSpecial_OnInstance( - original_out, npy_ma_str.array_wrap); + original_out, npy_interned_str.array_wrap); if (new_wrap != NULL) { wrap = new_wrap; wrap_type = (PyObject *)Py_TYPE(original_out); diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index ff8f3b07d854..c5bb69463013 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -142,12 +142,12 @@ check_and_adjust_axis_msg(int *axis, int ndim, PyObject *msg_prefix) if (NPY_UNLIKELY((*axis < -ndim) || (*axis >= ndim))) { /* Invoke the AxisError constructor */ PyObject *exc = PyObject_CallFunction( - npy_ma_static_data.AxisError, "iiO", *axis, ndim, + npy_static_pydata.AxisError, "iiO", *axis, ndim, msg_prefix); if (exc == NULL) { return -1; } - PyErr_SetObject(npy_ma_static_data.AxisError, exc); + PyErr_SetObject(npy_static_pydata.AxisError, exc); Py_DECREF(exc); return -1; diff --git a/numpy/_core/src/multiarray/common_dtype.c b/numpy/_core/src/multiarray/common_dtype.c index 88f4388848bb..6635f137288e 100644 --- a/numpy/_core/src/multiarray/common_dtype.c +++ b/numpy/_core/src/multiarray/common_dtype.c @@ -64,7 +64,7 @@ PyArray_CommonDType(PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2) } if (common_dtype == (PyArray_DTypeMeta *)Py_NotImplemented) { Py_DECREF(Py_NotImplemented); - PyErr_Format(npy_ma_static_data.DTypePromotionError, + PyErr_Format(npy_static_pydata.DTypePromotionError, "The DTypes %S and %S do not have a common DType. " "For example they cannot be stored in a single array unless " "the dtype is `object`.", dtype1, dtype2); @@ -285,7 +285,7 @@ PyArray_PromoteDTypeSequence( Py_INCREF(dtypes_in[l]); PyTuple_SET_ITEM(dtypes_in_tuple, l, (PyObject *)dtypes_in[l]); } - PyErr_Format(npy_ma_static_data.DTypePromotionError, + PyErr_Format(npy_static_pydata.DTypePromotionError, "The DType %S could not be promoted by %S. This means that " "no common DType exists for the given inputs. " "For example they cannot be stored in a single array unless " diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index 9f998b0428b9..5876247cec9c 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -1414,7 +1414,7 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t /* Don't add docstrings */ #if PY_VERSION_HEX > 0x030b0000 - if (npy_ma_static_data.optimize > 1) { + if (npy_static_cdata.optimize > 1) { #else if (Py_OptimizeFlag > 1) { #endif @@ -1858,7 +1858,7 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) /* for unity stride we can just copy out of the lookup table */ if (order == 'b') { for (index = 0; index < in_n; index++) { - npy_uint64 v = npy_ma_static_data.unpack_lookup_big[*inptr].uint64; + npy_uint64 v = npy_static_cdata.unpack_lookup_big[*inptr].uint64; memcpy(outptr, &v, 8); outptr += 8; inptr += in_stride; @@ -1866,7 +1866,7 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) } else { for (index = 0; index < in_n; index++) { - npy_uint64 v = npy_ma_static_data.unpack_lookup_big[*inptr].uint64; + npy_uint64 v = npy_static_cdata.unpack_lookup_big[*inptr].uint64; if (order != 'b') { v = npy_bswap8(v); } @@ -1877,7 +1877,7 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) } /* Clean up the tail portion */ if (in_tail) { - npy_uint64 v = npy_ma_static_data.unpack_lookup_big[*inptr].uint64; + npy_uint64 v = npy_static_cdata.unpack_lookup_big[*inptr].uint64; if (order != 'b') { v = npy_bswap8(v); } diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index b0ae1d6f8001..9f86842b973a 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -235,7 +235,7 @@ PyArray_CopyConverter(PyObject *obj, NPY_COPYMODE *copymode) { int int_copymode; - if ((PyObject *)Py_TYPE(obj) == npy_ma_static_data._CopyMode) { + if ((PyObject *)Py_TYPE(obj) == npy_static_pydata._CopyMode) { PyObject* mode_value = PyObject_GetAttrString(obj, "value"); if (mode_value == NULL) { return NPY_FAIL; @@ -270,7 +270,7 @@ PyArray_AsTypeCopyConverter(PyObject *obj, NPY_ASTYPECOPYMODE *copymode) { int int_copymode; - if ((PyObject *)Py_TYPE(obj) == npy_ma_static_data._CopyMode) { + if ((PyObject *)Py_TYPE(obj) == npy_static_pydata._CopyMode) { PyErr_SetString(PyExc_ValueError, "_CopyMode enum is not allowed for astype function. " "Use true/false instead."); @@ -1411,7 +1411,7 @@ PyArray_IntTupleFromIntp(int len, npy_intp const *vals) NPY_NO_EXPORT int _not_NoValue(PyObject *obj, PyObject **out) { - if (obj == npy_ma_static_data._NoValue) { + if (obj == npy_static_pydata._NoValue) { *out = NULL; } else { @@ -1431,7 +1431,7 @@ PyArray_DeviceConverterOptional(PyObject *object, NPY_DEVICE *device) } if (PyUnicode_Check(object) && - PyUnicode_Compare(object, npy_ma_str.cpu) == 0) { + PyUnicode_Compare(object, npy_interned_str.cpu) == 0) { *device = NPY_DEVICE_CPU; return NPY_SUCCEED; } diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 3c8b8fcee26b..012b2cfe0f49 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -84,13 +84,13 @@ npy_give_promotion_warnings(void) npy_cache_import( "numpy._core._ufunc_config", "NO_NEP50_WARNING", - &npy_ma_thread_unsafe_state.NO_NEP50_WARNING); - if (npy_ma_thread_unsafe_state.NO_NEP50_WARNING == NULL) { + &npy_thread_unsafe_state.NO_NEP50_WARNING); + if (npy_thread_unsafe_state.NO_NEP50_WARNING == NULL) { PyErr_WriteUnraisable(NULL); return 1; } - if (PyContextVar_Get(npy_ma_thread_unsafe_state.NO_NEP50_WARNING, + if (PyContextVar_Get(npy_thread_unsafe_state.NO_NEP50_WARNING, Py_False, &val) < 0) { /* Errors should not really happen, but if it does assume we warn. */ PyErr_WriteUnraisable(NULL); @@ -402,7 +402,7 @@ PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) !PyTypeNum_ISCOMPLEX(type_num) && PyTypeNum_ISNUMBER(type_num) && !PyTypeNum_ISBOOL(type_num)) { - int ret = PyErr_WarnEx(npy_ma_static_data.ComplexWarning, + int ret = PyErr_WarnEx(npy_static_pydata.ComplexWarning, "Casting complex values to real discards " "the imaginary part", 1); if (ret < 0) { @@ -2184,12 +2184,12 @@ PyArray_Zero(PyArrayObject *arr) if they simply memcpy it into a ndarray without using setitem(), refcount errors will occur */ - memcpy(zeroval, &npy_ma_static_data.zero_obj, sizeof(PyObject *)); + memcpy(zeroval, &npy_static_pydata.zero_obj, sizeof(PyObject *)); return zeroval; } storeflags = PyArray_FLAGS(arr); PyArray_ENABLEFLAGS(arr, NPY_ARRAY_BEHAVED); - ret = PyArray_SETITEM(arr, zeroval, npy_ma_static_data.zero_obj); + ret = PyArray_SETITEM(arr, zeroval, npy_static_pydata.zero_obj); ((PyArrayObject_fields *)arr)->flags = storeflags; if (ret < 0) { PyDataMem_FREE(zeroval); @@ -2223,13 +2223,13 @@ PyArray_One(PyArrayObject *arr) if they simply memcpy it into a ndarray without using setitem(), refcount errors will occur */ - memcpy(oneval, &npy_ma_static_data.one_obj, sizeof(PyObject *)); + memcpy(oneval, &npy_static_pydata.one_obj, sizeof(PyObject *)); return oneval; } storeflags = PyArray_FLAGS(arr); PyArray_ENABLEFLAGS(arr, NPY_ARRAY_BEHAVED); - ret = PyArray_SETITEM(arr, oneval, npy_ma_static_data.one_obj); + ret = PyArray_SETITEM(arr, oneval, npy_static_pydata.one_obj); ((PyArrayObject_fields *)arr)->flags = storeflags; if (ret < 0) { PyDataMem_FREE(oneval); @@ -2612,7 +2612,7 @@ complex_to_noncomplex_get_loop( PyArrayMethod_StridedLoop **out_loop, NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - int ret = PyErr_WarnEx(npy_ma_static_data.ComplexWarning, + int ret = PyErr_WarnEx(npy_static_pydata.ComplexWarning, "Casting complex values to real discards " "the imaginary part", 1); if (ret < 0) { @@ -3239,8 +3239,8 @@ nonstructured_to_structured_get_loop( static PyObject * PyArray_GetGenericToVoidCastingImpl(void) { - Py_INCREF(npy_ma_static_data.GenericToVoidMethod); - return npy_ma_static_data.GenericToVoidMethod; + Py_INCREF(npy_static_pydata.GenericToVoidMethod); + return npy_static_pydata.GenericToVoidMethod; } @@ -3377,8 +3377,8 @@ structured_to_nonstructured_get_loop( static PyObject * PyArray_GetVoidToGenericCastingImpl(void) { - Py_INCREF(npy_ma_static_data.VoidToGenericMethod); - return npy_ma_static_data.VoidToGenericMethod; + Py_INCREF(npy_static_pydata.VoidToGenericMethod); + return npy_static_pydata.VoidToGenericMethod; } @@ -3742,8 +3742,8 @@ object_to_any_resolve_descriptors( static PyObject * PyArray_GetObjectToGenericCastingImpl(void) { - Py_INCREF(npy_ma_static_data.ObjectToGenericMethod); - return npy_ma_static_data.ObjectToGenericMethod; + Py_INCREF(npy_static_pydata.ObjectToGenericMethod); + return npy_static_pydata.ObjectToGenericMethod; } @@ -3779,8 +3779,8 @@ any_to_object_resolve_descriptors( static PyObject * PyArray_GetGenericToObjectCastingImpl(void) { - Py_INCREF(npy_ma_static_data.GenericToObjectMethod); - return npy_ma_static_data.GenericToObjectMethod; + Py_INCREF(npy_static_pydata.GenericToObjectMethod); + return npy_static_pydata.GenericToObjectMethod; } @@ -3847,7 +3847,7 @@ initialize_void_and_object_globals(void) { method->get_strided_loop = &structured_to_nonstructured_get_loop; method->nin = 1; method->nout = 1; - npy_ma_static_data.VoidToGenericMethod = (PyObject *)method; + npy_static_pydata.VoidToGenericMethod = (PyObject *)method; method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); if (method == NULL) { @@ -3862,7 +3862,7 @@ initialize_void_and_object_globals(void) { method->get_strided_loop = &nonstructured_to_structured_get_loop; method->nin = 1; method->nout = 1; - npy_ma_static_data.GenericToVoidMethod = (PyObject *)method; + npy_static_pydata.GenericToVoidMethod = (PyObject *)method; method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); if (method == NULL) { @@ -3877,7 +3877,7 @@ initialize_void_and_object_globals(void) { method->casting = NPY_UNSAFE_CASTING; method->resolve_descriptors = &object_to_any_resolve_descriptors; method->get_strided_loop = &object_to_any_get_loop; - npy_ma_static_data.ObjectToGenericMethod = (PyObject *)method; + npy_static_pydata.ObjectToGenericMethod = (PyObject *)method; method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); if (method == NULL) { @@ -3892,7 +3892,7 @@ initialize_void_and_object_globals(void) { method->casting = NPY_SAFE_CASTING; method->resolve_descriptors = &any_to_object_resolve_descriptors; method->get_strided_loop = &any_to_object_get_loop; - npy_ma_static_data.GenericToObjectMethod = (PyObject *)method; + npy_static_pydata.GenericToObjectMethod = (PyObject *)method; return 0; } diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 1d1a1e34ed36..97fb3a4ce117 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -622,7 +622,7 @@ raise_memory_error(int nd, npy_intp const *dims, PyArray_Descr *descr) if (exc_value == NULL){ goto fail; } - PyErr_SetObject(npy_ma_static_data._ArrayMemoryError, exc_value); + PyErr_SetObject(npy_static_pydata._ArrayMemoryError, exc_value); Py_DECREF(exc_value); return; @@ -928,11 +928,11 @@ PyArray_NewFromDescr_int( */ if (subtype != &PyArray_Type) { PyObject *res, *func; - func = PyObject_GetAttr((PyObject *)subtype, npy_ma_str.array_finalize); + func = PyObject_GetAttr((PyObject *)subtype, npy_interned_str.array_finalize); if (func == NULL) { goto fail; } - else if (func == npy_ma_static_data.ndarray_array_finalize) { + else if (func == npy_static_pydata.ndarray_array_finalize) { Py_DECREF(func); } else if (func == Py_None) { @@ -2030,7 +2030,7 @@ PyArray_FromStructInterface(PyObject *input) PyObject *attr; char endian = NPY_NATBYTE; - attr = PyArray_LookupSpecial_OnInstance(input, npy_ma_str.array_struct); + attr = PyArray_LookupSpecial_OnInstance(input, npy_interned_str.array_struct); if (attr == NULL) { if (PyErr_Occurred()) { return NULL; @@ -2154,7 +2154,7 @@ PyArray_FromInterface(PyObject *origin) npy_intp dims[NPY_MAXDIMS], strides[NPY_MAXDIMS]; int dataflags = NPY_ARRAY_BEHAVED; - iface = PyArray_LookupSpecial_OnInstance(origin, npy_ma_str.array_interface); + iface = PyArray_LookupSpecial_OnInstance(origin, npy_interned_str.array_interface); if (iface == NULL) { if (PyErr_Occurred()) { @@ -2457,7 +2457,7 @@ check_or_clear_and_warn_error_if_due_to_copy_kwarg(PyObject *kwnames) goto restore_error; } int copy_kwarg_unsupported = PyUnicode_Contains( - str_value, npy_ma_str.array_err_msg_substr); + str_value, npy_interned_str.array_err_msg_substr); Py_DECREF(str_value); if (copy_kwarg_unsupported == -1) { goto restore_error; @@ -2509,7 +2509,7 @@ PyArray_FromArrayAttr_int(PyObject *op, PyArray_Descr *descr, int copy, PyObject *new; PyObject *array_meth; - array_meth = PyArray_LookupSpecial_OnInstance(op, npy_ma_str.array); + array_meth = PyArray_LookupSpecial_OnInstance(op, npy_interned_str.array); if (array_meth == NULL) { if (PyErr_Occurred()) { return NULL; @@ -2543,7 +2543,7 @@ PyArray_FromArrayAttr_int(PyObject *op, PyArray_Descr *descr, int copy, * signature of the __array__ method being called does not have `copy`. */ if (copy != -1) { - kwnames = npy_ma_static_data.kwnames_is_copy; + kwnames = npy_static_pydata.kwnames_is_copy; arguments[nargs] = copy == 1 ? Py_True : Py_False; } diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 90b1f95af8ab..0d1debdf7365 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -726,11 +726,11 @@ _convert_from_commastring(PyObject *obj, int align) PyArray_Descr *res; assert(PyUnicode_Check(obj)); npy_cache_import("numpy._core._internal", "_commastring", - &npy_ma_thread_unsafe_state._commastring); - if (npy_ma_thread_unsafe_state._commastring == NULL) { + &npy_thread_unsafe_state._commastring); + if (npy_thread_unsafe_state._commastring == NULL) { return NULL; } - parsed = PyObject_CallOneArg(npy_ma_thread_unsafe_state._commastring, obj); + parsed = PyObject_CallOneArg(npy_thread_unsafe_state._commastring, obj); if (parsed == NULL) { return NULL; } @@ -2717,7 +2717,7 @@ arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args)) Py_DECREF(ret); return NULL; } - obj = PyObject_GetAttr(mod, npy_ma_str.dtype); + obj = PyObject_GetAttr(mod, npy_interned_str.dtype); Py_DECREF(mod); if (obj == NULL) { Py_DECREF(ret); diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index b1631acb35f6..8f1ab728416e 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -549,7 +549,7 @@ from_dlpack(PyObject *NPY_UNUSED(self), PyObject *capsule = PyObject_VectorcallMethod( - npy_ma_str.__dlpack__, call_args, nargsf, call_kwnames); + npy_interned_str.__dlpack__, call_args, nargsf, call_kwnames); if (capsule == NULL) { /* * TODO: This path should be deprecated in NumPy 2.1. Once deprecated @@ -563,7 +563,7 @@ from_dlpack(PyObject *NPY_UNUSED(self), /* max_version may be unsupported, try without kwargs */ PyErr_Clear(); capsule = PyObject_VectorcallMethod( - npy_ma_str.__dlpack__, call_args, nargsf, NULL); + npy_interned_str.__dlpack__, call_args, nargsf, NULL); } if (capsule == NULL) { return NULL; diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index ee85d1d6771b..a21a441340d7 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -752,7 +752,7 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) if (descr1->subarray == NULL && descr1->names == NULL && descr2->subarray == NULL && descr2->names == NULL) { if (descr1->elsize != descr2->elsize) { - PyErr_SetString(npy_ma_static_data.DTypePromotionError, + PyErr_SetString(npy_static_pydata.DTypePromotionError, "Invalid type promotion with void datatypes of different " "lengths. Use the `np.bytes_` datatype instead to pad the " "shorter value with trailing zero bytes."); @@ -765,12 +765,12 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) if (descr1->names != NULL && descr2->names != NULL) { /* If both have fields promoting individual fields may be possible */ npy_cache_import("numpy._core._internal", "_promote_fields", - &npy_ma_thread_unsafe_state._promote_fields); - if (npy_ma_thread_unsafe_state._promote_fields == NULL) { + &npy_thread_unsafe_state._promote_fields); + if (npy_thread_unsafe_state._promote_fields == NULL) { return NULL; } PyObject *result = PyObject_CallFunctionObjArgs( - npy_ma_thread_unsafe_state._promote_fields, + npy_thread_unsafe_state._promote_fields, descr1, descr2, NULL); if (result == NULL) { return NULL; @@ -791,7 +791,7 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) return NULL; } if (!cmp) { - PyErr_SetString(npy_ma_static_data.DTypePromotionError, + PyErr_SetString(npy_static_pydata.DTypePromotionError, "invalid type promotion with subarray datatypes " "(shape mismatch)."); return NULL; @@ -821,7 +821,7 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) return new_descr; } - PyErr_SetString(npy_ma_static_data.DTypePromotionError, + PyErr_SetString(npy_static_pydata.DTypePromotionError, "invalid type promotion with structured datatype(s)."); return NULL; } @@ -1239,13 +1239,13 @@ dtypemeta_wrap_legacy_descriptor( /* And it to the types submodule if it is a builtin dtype */ if (!PyTypeNum_ISUSERDEF(descr->type_num)) { npy_cache_import("numpy.dtypes", "_add_dtype_helper", - &npy_ma_thread_unsafe_state._add_dtype_helper); - if (npy_ma_thread_unsafe_state._add_dtype_helper == NULL) { + &npy_thread_unsafe_state._add_dtype_helper); + if (npy_thread_unsafe_state._add_dtype_helper == NULL) { return -1; } if (PyObject_CallFunction( - npy_ma_thread_unsafe_state._add_dtype_helper, + npy_thread_unsafe_state._add_dtype_helper, "Os", (PyObject *)dtype_class, alias) == NULL) { return -1; } diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index 562f47ca43bb..df2e10110487 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -388,12 +388,12 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored)) PyObject *safe; npy_cache_import("numpy._core._internal", "_view_is_safe", - &npy_ma_thread_unsafe_state._view_is_safe); - if (npy_ma_thread_unsafe_state._view_is_safe == NULL) { + &npy_thread_unsafe_state._view_is_safe); + if (npy_thread_unsafe_state._view_is_safe == NULL) { goto fail; } - safe = PyObject_CallFunction(npy_ma_thread_unsafe_state._view_is_safe, + safe = PyObject_CallFunction(npy_thread_unsafe_state._view_is_safe, "OO", PyArray_DESCR(self), newtype); if (safe == NULL) { goto fail; diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 99639f1373b6..b954f7abaf73 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -2262,10 +2262,10 @@ PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int axis2) } /* Handle negative axes with standard Python indexing rules */ - if (check_and_adjust_axis_msg(&axis1, ndim, npy_ma_str.axis1) < 0) { + if (check_and_adjust_axis_msg(&axis1, ndim, npy_interned_str.axis1) < 0) { return NULL; } - if (check_and_adjust_axis_msg(&axis2, ndim, npy_ma_str.axis2) < 0) { + if (check_and_adjust_axis_msg(&axis2, ndim, npy_interned_str.axis2) < 0) { return NULL; } if (axis1 == axis2) { diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 40b99920881f..225e13e0db2e 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -114,11 +114,11 @@ npy_forward_method( #define NPY_FORWARD_NDARRAY_METHOD(name) \ npy_cache_import( \ "numpy._core._methods", #name, \ - &npy_ma_thread_unsafe_state.name); \ - if (npy_ma_thread_unsafe_state.name == NULL) { \ + &npy_thread_unsafe_state.name); \ + if (npy_thread_unsafe_state.name == NULL) { \ return NULL; \ } \ - return npy_forward_method(npy_ma_thread_unsafe_state.name, \ + return npy_forward_method(npy_thread_unsafe_state.name, \ (PyObject *)self, args, len_args, kwnames) @@ -406,14 +406,14 @@ PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset) /* check that we are not reinterpreting memory containing Objects. */ if (_may_have_objects(PyArray_DESCR(self)) || _may_have_objects(typed)) { npy_cache_import("numpy._core._internal", "_getfield_is_safe", - &npy_ma_thread_unsafe_state._getfield_is_safe); - if (npy_ma_thread_unsafe_state._getfield_is_safe == NULL) { + &npy_thread_unsafe_state._getfield_is_safe); + if (npy_thread_unsafe_state._getfield_is_safe == NULL) { Py_DECREF(typed); return NULL; } /* only returns True or raises */ - safe = PyObject_CallFunction(npy_ma_thread_unsafe_state._getfield_is_safe, + safe = PyObject_CallFunction(npy_thread_unsafe_state._getfield_is_safe, "OOi", PyArray_DESCR(self), typed, offset); if (safe == NULL) { @@ -1046,7 +1046,7 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) } Py_DECREF(out_kwd_obj); /* check where if it exists */ - where_obj = PyDict_GetItemWithError(kwds, npy_ma_str.where); + where_obj = PyDict_GetItemWithError(kwds, npy_interned_str.where); if (where_obj == NULL) { if (PyErr_Occurred()) { return -1; @@ -2248,17 +2248,17 @@ PyArray_Dump(PyObject *self, PyObject *file, int protocol) { PyObject *ret; npy_cache_import("numpy._core._methods", "_dump", - &npy_ma_thread_unsafe_state._dump); - if (npy_ma_thread_unsafe_state._dump == NULL) { + &npy_thread_unsafe_state._dump); + if (npy_thread_unsafe_state._dump == NULL) { return -1; } if (protocol < 0) { ret = PyObject_CallFunction( - npy_ma_thread_unsafe_state._dump, "OO", self, file); + npy_thread_unsafe_state._dump, "OO", self, file); } else { ret = PyObject_CallFunction( - npy_ma_thread_unsafe_state._dump, "OOi", self, file, protocol); + npy_thread_unsafe_state._dump, "OOi", self, file, protocol); } if (ret == NULL) { return -1; @@ -2272,16 +2272,16 @@ NPY_NO_EXPORT PyObject * PyArray_Dumps(PyObject *self, int protocol) { npy_cache_import("numpy._core._methods", "_dumps", - &npy_ma_thread_unsafe_state._dumps); - if (npy_ma_thread_unsafe_state._dumps == NULL) { + &npy_thread_unsafe_state._dumps); + if (npy_thread_unsafe_state._dumps == NULL) { return NULL; } if (protocol < 0) { - return PyObject_CallFunction(npy_ma_thread_unsafe_state._dumps, "O", self); + return PyObject_CallFunction(npy_thread_unsafe_state._dumps, "O", self); } else { return PyObject_CallFunction( - npy_ma_thread_unsafe_state._dumps, "Oi", self, protocol); + npy_thread_unsafe_state._dumps, "Oi", self, protocol); } } diff --git a/numpy/_core/src/multiarray/methods.h b/numpy/_core/src/multiarray/methods.h index b0cf34e3fab4..f70af8e48aff 100644 --- a/numpy/_core/src/multiarray/methods.h +++ b/numpy/_core/src/multiarray/methods.h @@ -14,11 +14,11 @@ extern NPY_NO_EXPORT PyMethodDef array_methods[]; static inline PyObject * NpyPath_PathlikeToFspath(PyObject *file) { - if (!PyObject_IsInstance(file, npy_ma_static_data.os_PathLike)) { + if (!PyObject_IsInstance(file, npy_static_pydata.os_PathLike)) { Py_INCREF(file); return file; } - return PyObject_CallFunctionObjArgs(npy_ma_static_data.os_fspath, + return PyObject_CallFunctionObjArgs(npy_static_pydata.os_fspath, file, NULL); } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index d0f33637c4b3..ebd165a4a233 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -136,7 +136,7 @@ PyArray_GetPriority(PyObject *obj, double default_) return NPY_SCALAR_PRIORITY; } - ret = PyArray_LookupSpecial_OnInstance(obj, npy_ma_str.array_priority); + ret = PyArray_LookupSpecial_OnInstance(obj, npy_interned_str.array_priority); if (ret == NULL) { if (PyErr_Occurred()) { /* TODO[gh-14801]: propagate crashes during attribute access? */ @@ -3493,7 +3493,7 @@ array_can_cast_safely(PyObject *NPY_UNUSED(self), * weak-promotion branch is in practice identical to dtype one. */ if (get_npy_promotion_state() == NPY_USE_WEAK_PROMOTION) { - PyObject *descr = PyObject_GetAttr(from_obj, npy_ma_str.dtype); + PyObject *descr = PyObject_GetAttr(from_obj, npy_interned_str.dtype); if (descr == NULL) { goto finish; } @@ -4265,7 +4265,7 @@ array_shares_memory_impl(PyObject *args, PyObject *kwds, Py_ssize_t default_max_ } else if (result == MEM_OVERLAP_TOO_HARD) { if (raise_exceptions) { - PyErr_SetString(npy_ma_static_data.TooHardError, + PyErr_SetString(npy_static_pydata.TooHardError, "Exceeded max_work"); return NULL; } @@ -4360,11 +4360,11 @@ _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { return NULL; } /* No need to give the other warning in a sub-interpreter as well... */ - npy_ma_thread_unsafe_state.reload_guard_initialized = 1; + npy_thread_unsafe_state.reload_guard_initialized = 1; Py_RETURN_NONE; } #endif - if (npy_ma_thread_unsafe_state.reload_guard_initialized) { + if (npy_thread_unsafe_state.reload_guard_initialized) { if (PyErr_WarnEx(PyExc_UserWarning, "The NumPy module was reloaded (imported a second time). " "This can in some cases result in small but subtle issues " @@ -4372,7 +4372,7 @@ _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { return NULL; } } - npy_ma_thread_unsafe_state.reload_guard_initialized = 1; + npy_thread_unsafe_state.reload_guard_initialized = 1; Py_RETURN_NONE; } @@ -4767,103 +4767,104 @@ set_flaginfo(PyObject *d) } // static variables are zero-filled by default, no need to explicitly do so -NPY_VISIBILITY_HIDDEN npy_ma_str_struct npy_ma_str; -NPY_VISIBILITY_HIDDEN npy_ma_static_data_struct npy_ma_static_data; -NPY_VISIBILITY_HIDDEN npy_ma_thread_unsafe_state_struct npy_ma_thread_unsafe_state; +NPY_VISIBILITY_HIDDEN npy_interned_str_struct npy_interned_str; +NPY_VISIBILITY_HIDDEN npy_static_pydata_struct npy_static_pydata; +NPY_VISIBILITY_HIDDEN npy_static_cdata_struct npy_static_cdata; +NPY_VISIBILITY_HIDDEN npy_thread_unsafe_state_struct npy_thread_unsafe_state; static int intern_strings(void) { // this is module-level global heap allocation, it is currently // never freed - npy_ma_str.current_allocator = PyUnicode_InternFromString("current_allocator"); - if (npy_ma_str.current_allocator == NULL) { + npy_interned_str.current_allocator = PyUnicode_InternFromString("current_allocator"); + if (npy_interned_str.current_allocator == NULL) { return -1; } - npy_ma_str.array = PyUnicode_InternFromString("__array__"); - if (npy_ma_str.array == NULL) { + npy_interned_str.array = PyUnicode_InternFromString("__array__"); + if (npy_interned_str.array == NULL) { return -1; } - npy_ma_str.array_function = PyUnicode_InternFromString("__array_function__"); - if (npy_ma_str.array_function == NULL) { + npy_interned_str.array_function = PyUnicode_InternFromString("__array_function__"); + if (npy_interned_str.array_function == NULL) { return -1; } - npy_ma_str.array_struct = PyUnicode_InternFromString("__array_struct__"); - if (npy_ma_str.array_struct == NULL) { + npy_interned_str.array_struct = PyUnicode_InternFromString("__array_struct__"); + if (npy_interned_str.array_struct == NULL) { return -1; } - npy_ma_str.array_priority = PyUnicode_InternFromString("__array_priority__"); - if (npy_ma_str.array_priority == NULL) { + npy_interned_str.array_priority = PyUnicode_InternFromString("__array_priority__"); + if (npy_interned_str.array_priority == NULL) { return -1; } - npy_ma_str.array_interface = PyUnicode_InternFromString("__array_interface__"); - if (npy_ma_str.array_interface == NULL) { + npy_interned_str.array_interface = PyUnicode_InternFromString("__array_interface__"); + if (npy_interned_str.array_interface == NULL) { return -1; } - npy_ma_str.array_wrap = PyUnicode_InternFromString("__array_wrap__"); - if (npy_ma_str.array_wrap == NULL) { + npy_interned_str.array_wrap = PyUnicode_InternFromString("__array_wrap__"); + if (npy_interned_str.array_wrap == NULL) { return -1; } - npy_ma_str.array_finalize = PyUnicode_InternFromString("__array_finalize__"); - if (npy_ma_str.array_finalize == NULL) { + npy_interned_str.array_finalize = PyUnicode_InternFromString("__array_finalize__"); + if (npy_interned_str.array_finalize == NULL) { return -1; } - npy_ma_str.implementation = PyUnicode_InternFromString("_implementation"); - if (npy_ma_str.implementation == NULL) { + npy_interned_str.implementation = PyUnicode_InternFromString("_implementation"); + if (npy_interned_str.implementation == NULL) { return -1; } - npy_ma_str.axis1 = PyUnicode_InternFromString("axis1"); - if (npy_ma_str.axis1 == NULL) { + npy_interned_str.axis1 = PyUnicode_InternFromString("axis1"); + if (npy_interned_str.axis1 == NULL) { return -1; } - npy_ma_str.axis2 = PyUnicode_InternFromString("axis2"); - if (npy_ma_str.axis2 == NULL) { + npy_interned_str.axis2 = PyUnicode_InternFromString("axis2"); + if (npy_interned_str.axis2 == NULL) { return -1; } - npy_ma_str.like = PyUnicode_InternFromString("like"); - if (npy_ma_str.like == NULL) { + npy_interned_str.like = PyUnicode_InternFromString("like"); + if (npy_interned_str.like == NULL) { return -1; } - npy_ma_str.numpy = PyUnicode_InternFromString("numpy"); - if (npy_ma_str.numpy == NULL) { + npy_interned_str.numpy = PyUnicode_InternFromString("numpy"); + if (npy_interned_str.numpy == NULL) { return -1; } - npy_ma_str.where = PyUnicode_InternFromString("where"); - if (npy_ma_str.where == NULL) { + npy_interned_str.where = PyUnicode_InternFromString("where"); + if (npy_interned_str.where == NULL) { return -1; } /* scalar policies */ - npy_ma_str.convert = PyUnicode_InternFromString("convert"); - if (npy_ma_str.convert == NULL) { + npy_interned_str.convert = PyUnicode_InternFromString("convert"); + if (npy_interned_str.convert == NULL) { return -1; } - npy_ma_str.preserve = PyUnicode_InternFromString("preserve"); - if (npy_ma_str.preserve == NULL) { + npy_interned_str.preserve = PyUnicode_InternFromString("preserve"); + if (npy_interned_str.preserve == NULL) { return -1; } - npy_ma_str.convert_if_no_array = PyUnicode_InternFromString("convert_if_no_array"); - if (npy_ma_str.convert_if_no_array == NULL) { + npy_interned_str.convert_if_no_array = PyUnicode_InternFromString("convert_if_no_array"); + if (npy_interned_str.convert_if_no_array == NULL) { return -1; } - npy_ma_str.cpu = PyUnicode_InternFromString("cpu"); - if (npy_ma_str.cpu == NULL) { + npy_interned_str.cpu = PyUnicode_InternFromString("cpu"); + if (npy_interned_str.cpu == NULL) { return -1; } - npy_ma_str.dtype = PyUnicode_InternFromString("dtype"); - if (npy_ma_str.dtype == NULL) { + npy_interned_str.dtype = PyUnicode_InternFromString("dtype"); + if (npy_interned_str.dtype == NULL) { return -1; } - npy_ma_str.array_err_msg_substr = PyUnicode_InternFromString( + npy_interned_str.array_err_msg_substr = PyUnicode_InternFromString( "__array__() got an unexpected keyword argument 'copy'"); - if (npy_ma_str.array_err_msg_substr == NULL) { + if (npy_interned_str.array_err_msg_substr == NULL) { return -1; } - npy_ma_str.out = PyUnicode_InternFromString("out"); - if (npy_ma_str.out == NULL) { + npy_interned_str.out = PyUnicode_InternFromString("out"); + if (npy_interned_str.out == NULL) { return -1; } - npy_ma_str.__dlpack__ = PyUnicode_InternFromString("__dlpack__"); - if (npy_ma_str.__dlpack__ == NULL) { + npy_interned_str.__dlpack__ = PyUnicode_InternFromString("__dlpack__"); + if (npy_interned_str.__dlpack__ == NULL) { return -1; } return 0; @@ -4879,7 +4880,7 @@ intern_strings(void) /* * Initializes global constants. * - * All global constants should live inside the npy_ma_static_data + * All global constants should live inside the npy_static_pydata * struct. * * Not all entries in the struct are initialized here, some are @@ -4899,58 +4900,58 @@ initialize_static_globals(void) // cached reference to objects defined in python IMPORT_GLOBAL("math", "floor", - npy_ma_static_data.math_floor_func); + npy_static_pydata.math_floor_func); IMPORT_GLOBAL("math", "ceil", - npy_ma_static_data.math_ceil_func); + npy_static_pydata.math_ceil_func); IMPORT_GLOBAL("math", "trunc", - npy_ma_static_data.math_trunc_func); + npy_static_pydata.math_trunc_func); IMPORT_GLOBAL("math", "gcd", - npy_ma_static_data.math_gcd_func); + npy_static_pydata.math_gcd_func); IMPORT_GLOBAL("numpy.exceptions", "AxisError", - npy_ma_static_data.AxisError); + npy_static_pydata.AxisError); IMPORT_GLOBAL("numpy.exceptions", "ComplexWarning", - npy_ma_static_data.ComplexWarning); + npy_static_pydata.ComplexWarning); IMPORT_GLOBAL("numpy.exceptions", "DTypePromotionError", - npy_ma_static_data.DTypePromotionError); + npy_static_pydata.DTypePromotionError); IMPORT_GLOBAL("numpy.exceptions", "TooHardError", - npy_ma_static_data.TooHardError); + npy_static_pydata.TooHardError); IMPORT_GLOBAL("numpy.exceptions", "VisibleDeprecationWarning", - npy_ma_static_data.VisibleDeprecationWarning); + npy_static_pydata.VisibleDeprecationWarning); IMPORT_GLOBAL("numpy._globals", "_CopyMode", - npy_ma_static_data._CopyMode); + npy_static_pydata._CopyMode); IMPORT_GLOBAL("numpy._globals", "_NoValue", - npy_ma_static_data._NoValue); + npy_static_pydata._NoValue); IMPORT_GLOBAL("numpy._core._exceptions", "_ArrayMemoryError", - npy_ma_static_data._ArrayMemoryError); + npy_static_pydata._ArrayMemoryError); IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncBinaryResolutionError", - npy_ma_static_data._UFuncBinaryResolutionError); + npy_static_pydata._UFuncBinaryResolutionError); IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncInputCastingError", - npy_ma_static_data._UFuncInputCastingError); + npy_static_pydata._UFuncInputCastingError); IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncNoLoopError", - npy_ma_static_data._UFuncNoLoopError); + npy_static_pydata._UFuncNoLoopError); IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncOutputCastingError", - npy_ma_static_data._UFuncOutputCastingError); + npy_static_pydata._UFuncOutputCastingError); IMPORT_GLOBAL("os", "fspath", - npy_ma_static_data.os_fspath); + npy_static_pydata.os_fspath); IMPORT_GLOBAL("os", "PathLike", - npy_ma_static_data.os_PathLike); + npy_static_pydata.os_PathLike); char *env = getenv("NUMPY_WARN_IF_NO_MEM_POLICY"); if ((env != NULL) && (strncmp(env, "1", 1) == 0)) { @@ -4966,9 +4967,9 @@ initialize_static_globals(void) return -1; } - npy_ma_static_data.default_truediv_type_tup = + npy_static_pydata.default_truediv_type_tup = PyTuple_Pack(3, tmp, tmp, tmp); - if (npy_ma_static_data.default_truediv_type_tup == NULL) { + if (npy_static_pydata.default_truediv_type_tup == NULL) { Py_DECREF(tmp); return -1; } @@ -4983,7 +4984,7 @@ initialize_static_globals(void) if (level == NULL) { return -1; } - npy_ma_static_data.optimize = PyLong_AsLong(level); + npy_static_cdata.optimize = PyLong_AsLong(level); Py_DECREF(level); /* @@ -4999,22 +5000,22 @@ initialize_static_globals(void) npy_intp k; for (k=0; k < 8; k++) { npy_uint8 v = (j & (1 << k)) == (1 << k); - npy_ma_static_data.unpack_lookup_big[j].bytes[7 - k] = v; + npy_static_cdata.unpack_lookup_big[j].bytes[7 - k] = v; } } - npy_ma_static_data.kwnames_is_copy = Py_BuildValue("(s)", "copy"); - if (npy_ma_static_data.kwnames_is_copy == NULL) { + npy_static_pydata.kwnames_is_copy = Py_BuildValue("(s)", "copy"); + if (npy_static_pydata.kwnames_is_copy == NULL) { return -1; } - npy_ma_static_data.one_obj = PyLong_FromLong((long) 1); - if (npy_ma_static_data.one_obj == NULL) { + npy_static_pydata.one_obj = PyLong_FromLong((long) 1); + if (npy_static_pydata.one_obj == NULL) { return -1; } - npy_ma_static_data.zero_obj = PyLong_FromLong((long) 0); - if (npy_ma_static_data.zero_obj == NULL) { + npy_static_pydata.zero_obj = PyLong_FromLong((long) 0); + if (npy_static_pydata.zero_obj == NULL) { return -1; } @@ -5283,11 +5284,11 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { } // initialize static references to ndarray.__array_*__ special methods - npy_ma_static_data.ndarray_array_finalize = PyObject_GetAttrString( + npy_static_pydata.ndarray_array_finalize = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_finalize__"); - npy_ma_static_data.ndarray_array_ufunc = PyObject_GetAttrString( + npy_static_pydata.ndarray_array_ufunc = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_ufunc__"); - npy_ma_static_data.ndarray_array_function = PyObject_GetAttrString( + npy_static_pydata.ndarray_array_function = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_function__"); /* @@ -5300,13 +5301,13 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { * the legacy dtypemeta classes are available. */ npy_cache_import("numpy.dtypes", "_add_dtype_helper", - &npy_ma_thread_unsafe_state._add_dtype_helper); - if (npy_ma_thread_unsafe_state._add_dtype_helper == NULL) { + &npy_thread_unsafe_state._add_dtype_helper); + if (npy_thread_unsafe_state._add_dtype_helper == NULL) { goto err; } if (PyObject_CallFunction( - npy_ma_thread_unsafe_state._add_dtype_helper, + npy_thread_unsafe_state._add_dtype_helper, "Os", (PyObject *)&PyArray_StringDType, NULL) == NULL) { goto err; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index 6058ee640c1a..9da928cad606 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -1,7 +1,7 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ #define NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ -typedef struct npy_ma_str_struct { +typedef struct npy_interned_str_struct { PyObject *current_allocator; PyObject *array; PyObject *array_function; @@ -25,7 +25,7 @@ typedef struct npy_ma_str_struct { PyObject *out; PyObject *errmode_strings[6]; PyObject *__dlpack__; -} npy_ma_str_struct; +} npy_interned_str_struct; /* * A struct that stores static global data used throughout @@ -37,7 +37,7 @@ typedef struct npy_ma_str_struct { * this struct after module initialization is likely not thread-safe. */ -typedef struct npy_ma_static_data_struct { +typedef struct npy_static_pydata_struct { /* * Used in ufunc_type_resolution.c to avoid reconstructing a tuple * storing the default true division return types. @@ -90,20 +90,6 @@ typedef struct npy_ma_static_data_struct { PyObject *os_PathLike; PyObject *os_fspath; - /* - * stores sys.flags.optimize as a long, which is used in the add_docstring - * implementation - */ - long optimize; - - /* - * LUT used by unpack_bits - */ - union { - npy_uint8 bytes[8]; - npy_uint64 uint64; - } unpack_lookup_big[256]; - /* * Used in the __array__ internals to avoid building a tuple inline */ @@ -120,15 +106,6 @@ typedef struct npy_ma_static_data_struct { */ PyObject *cpu_dispatch_registry; - /* - * A look-up table to recover integer type numbers from type characters. - * - * See the _MAX_LETTER and LETTER_TO_NUM macros in arraytypes.c.src. - * - * The smallest type number is ?, the largest is bounded by 'z'. - */ - npy_int16 _letter_to_num['z' + 1 - '?']; - /* * references to ArrayMethod implementations that are cached * to avoid repeatedly creating them @@ -137,15 +114,40 @@ typedef struct npy_ma_static_data_struct { PyObject *GenericToVoidMethod; PyObject *ObjectToGenericMethod; PyObject *GenericToObjectMethod; -} npy_ma_static_data_struct; +} npy_static_pydata_struct; +typedef struct npy_static_cdata_struct { + /* + * stores sys.flags.optimize as a long, which is used in the add_docstring + * implementation + */ + long optimize; + + /* + * LUT used by unpack_bits + */ + union { + npy_uint8 bytes[8]; + npy_uint64 uint64; + } unpack_lookup_big[256]; + + /* + * A look-up table to recover integer type numbers from type characters. + * + * See the _MAX_LETTER and LETTER_TO_NUM macros in arraytypes.c.src. + * + * The smallest type number is ?, the largest is bounded by 'z'. + */ + npy_int16 _letter_to_num['z' + 1 - '?']; +} npy_static_cdata_struct; + /* * A struct storing thread-unsafe global state for the _multiarray_umath * module. We should refactor so the global state is thread-safe, * e.g. by adding locking. */ -typedef struct npy_ma_thread_unsafe_state_struct { +typedef struct npy_thread_unsafe_state_struct { /* * Cached references to objects obtained via an import. All of these are * can be initialized at any time by npy_cache_import. @@ -209,12 +211,13 @@ typedef struct npy_ma_thread_unsafe_state_struct { * used to detect module reloading in the reload guard */ int reload_guard_initialized; -} npy_ma_thread_unsafe_state_struct; +} npy_thread_unsafe_state_struct; -NPY_VISIBILITY_HIDDEN extern npy_ma_str_struct npy_ma_str; -NPY_VISIBILITY_HIDDEN extern npy_ma_static_data_struct npy_ma_static_data; -NPY_VISIBILITY_HIDDEN extern npy_ma_thread_unsafe_state_struct npy_ma_thread_unsafe_state; +NPY_VISIBILITY_HIDDEN extern npy_interned_str_struct npy_interned_str; +NPY_VISIBILITY_HIDDEN extern npy_static_pydata_struct npy_static_pydata; +NPY_VISIBILITY_HIDDEN extern npy_static_cdata_struct npy_static_cdata; +NPY_VISIBILITY_HIDDEN extern npy_thread_unsafe_state_struct npy_thread_unsafe_state; #endif /* NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ */ diff --git a/numpy/_core/src/multiarray/number.c b/numpy/_core/src/multiarray/number.c index 15b9eab3282e..f537d2b68e41 100644 --- a/numpy/_core/src/multiarray/number.c +++ b/numpy/_core/src/multiarray/number.c @@ -124,15 +124,15 @@ _PyArray_SetNumericOps(PyObject *dict) SET(clip); // initialize static globals needed for matmul - npy_ma_static_data.axes_1d_obj_kwargs = Py_BuildValue( + npy_static_pydata.axes_1d_obj_kwargs = Py_BuildValue( "{s, [(i), (i, i), (i)]}", "axes", -1, -2, -1, -1); - if (npy_ma_static_data.axes_1d_obj_kwargs == NULL) { + if (npy_static_pydata.axes_1d_obj_kwargs == NULL) { return -1; } - npy_ma_static_data.axes_2d_obj_kwargs = Py_BuildValue( + npy_static_pydata.axes_2d_obj_kwargs = Py_BuildValue( "{s, [(i, i), (i, i), (i, i)]}", "axes", -2, -1, -2, -1, -2, -1); - if (npy_ma_static_data.axes_2d_obj_kwargs == NULL) { + if (npy_static_pydata.axes_2d_obj_kwargs == NULL) { return -1; } @@ -305,10 +305,10 @@ array_inplace_matrix_multiply(PyArrayObject *self, PyObject *other) * passing the correct `axes=`. */ if (PyArray_NDIM(self) == 1) { - kwargs = npy_ma_static_data.axes_1d_obj_kwargs; + kwargs = npy_static_pydata.axes_1d_obj_kwargs; } else { - kwargs = npy_ma_static_data.axes_2d_obj_kwargs; + kwargs = npy_static_pydata.axes_2d_obj_kwargs; } PyObject *res = PyObject_Call(n_ops.matmul, args, kwargs); Py_DECREF(args); @@ -318,7 +318,7 @@ array_inplace_matrix_multiply(PyArrayObject *self, PyObject *other) * AxisError should indicate that the axes argument didn't work out * which should mean the second operand not being 2 dimensional. */ - if (PyErr_ExceptionMatches(npy_ma_static_data.AxisError)) { + if (PyErr_ExceptionMatches(npy_static_pydata.AxisError)) { PyErr_SetString(PyExc_ValueError, "inplace matrix multiplication requires the first operand to " "have at least one and the second at least two dimensions."); diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 32c2a6cfdb1e..15c792d1b5af 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -612,13 +612,13 @@ static PyObject * _void_scalar_to_string(PyObject *obj, int repr) { npy_cache_import("numpy._core.arrayprint", "_void_scalar_to_string", - &npy_ma_thread_unsafe_state._void_scalar_to_string); - if (npy_ma_thread_unsafe_state._void_scalar_to_string == NULL) { + &npy_thread_unsafe_state._void_scalar_to_string); + if (npy_thread_unsafe_state._void_scalar_to_string == NULL) { return NULL; } PyObject *is_repr = repr ? Py_True : Py_False; return PyObject_CallFunctionObjArgs( - npy_ma_thread_unsafe_state._void_scalar_to_string, obj, is_repr, NULL); + npy_thread_unsafe_state._void_scalar_to_string, obj, is_repr, NULL); } static PyObject * @@ -3038,7 +3038,7 @@ object_arrtype_alloc(PyTypeObject *type, Py_ssize_t items) * Object scalars should not actually exist, if they exist we should * consider it to be a bug. */ - if (PyErr_WarnEx(npy_ma_static_data.VisibleDeprecationWarning, + if (PyErr_WarnEx(npy_static_pydata.VisibleDeprecationWarning, "Creating a NumPy object scalar. NumPy object scalars should " "never be created. If you see this message please inform the " "NumPy developers. Since this message should never be shown " diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index 079ac5d7df5c..72bea87002f8 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -668,10 +668,10 @@ PyArray_SwapAxes(PyArrayObject *ap, int a1, int a2) int n = PyArray_NDIM(ap); int i; - if (check_and_adjust_axis_msg(&a1, n, npy_ma_str.axis1) < 0) { + if (check_and_adjust_axis_msg(&a1, n, npy_interned_str.axis1) < 0) { return NULL; } - if (check_and_adjust_axis_msg(&a2, n, npy_ma_str.axis2) < 0) { + if (check_and_adjust_axis_msg(&a2, n, npy_interned_str.axis2) < 0) { return NULL; } diff --git a/numpy/_core/src/multiarray/strfuncs.c b/numpy/_core/src/multiarray/strfuncs.c index 0c15e16b5f0c..759c730c7cfa 100644 --- a/numpy/_core/src/multiarray/strfuncs.c +++ b/numpy/_core/src/multiarray/strfuncs.c @@ -39,14 +39,14 @@ array_repr(PyArrayObject *self) * leads to circular import problems. */ npy_cache_import("numpy._core.arrayprint", "_default_array_repr", - &npy_ma_thread_unsafe_state._default_array_repr); - if (npy_ma_thread_unsafe_state._default_array_repr == NULL) { + &npy_thread_unsafe_state._default_array_repr); + if (npy_thread_unsafe_state._default_array_repr == NULL) { npy_PyErr_SetStringChained(PyExc_RuntimeError, "Unable to configure default ndarray.__repr__"); return NULL; } return PyObject_CallFunctionObjArgs( - npy_ma_thread_unsafe_state._default_array_repr, self, NULL); + npy_thread_unsafe_state._default_array_repr, self, NULL); } @@ -58,14 +58,14 @@ array_str(PyArrayObject *self) * to circular import problems. */ npy_cache_import("numpy._core.arrayprint", "_default_array_str", - &npy_ma_thread_unsafe_state._default_array_str); - if (npy_ma_thread_unsafe_state._default_array_str == NULL) { + &npy_thread_unsafe_state._default_array_str); + if (npy_thread_unsafe_state._default_array_str == NULL) { npy_PyErr_SetStringChained(PyExc_RuntimeError, "Unable to configure default ndarray.__str__"); return NULL; } return PyObject_CallFunctionObjArgs( - npy_ma_thread_unsafe_state._default_array_str, self, NULL); + npy_thread_unsafe_state._default_array_str, self, NULL); } diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 64b88aa62703..038fa8159171 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -716,20 +716,20 @@ static PyObject * stringdtype__reduce__(PyArray_StringDTypeObject *self, PyObject *NPY_UNUSED(args)) { npy_cache_import("numpy._core._internal", "_convert_to_stringdtype_kwargs", - &npy_ma_thread_unsafe_state._convert_to_stringdtype_kwargs); + &npy_thread_unsafe_state._convert_to_stringdtype_kwargs); - if (npy_ma_thread_unsafe_state._convert_to_stringdtype_kwargs == NULL) { + if (npy_thread_unsafe_state._convert_to_stringdtype_kwargs == NULL) { return NULL; } if (self->na_object != NULL) { return Py_BuildValue( - "O(iO)", npy_ma_thread_unsafe_state._convert_to_stringdtype_kwargs, + "O(iO)", npy_thread_unsafe_state._convert_to_stringdtype_kwargs, self->coerce, self->na_object); } return Py_BuildValue( - "O(i)", npy_ma_thread_unsafe_state._convert_to_stringdtype_kwargs, + "O(i)", npy_thread_unsafe_state._convert_to_stringdtype_kwargs, self->coerce); } diff --git a/numpy/_core/src/umath/_scaled_float_dtype.c b/numpy/_core/src/umath/_scaled_float_dtype.c index d73139738109..99d0c644cba3 100644 --- a/numpy/_core/src/umath/_scaled_float_dtype.c +++ b/numpy/_core/src/umath/_scaled_float_dtype.c @@ -867,7 +867,7 @@ sfloat_init_ufuncs(void) { NPY_NO_EXPORT PyObject * get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) { - if (npy_ma_thread_unsafe_state.get_sfloat_dtype_initialized) { + if (npy_thread_unsafe_state.get_sfloat_dtype_initialized) { Py_INCREF(&PyArray_SFloatDType); return (PyObject *)&PyArray_SFloatDType; } @@ -896,6 +896,6 @@ get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) return NULL; } - npy_ma_thread_unsafe_state.get_sfloat_dtype_initialized = NPY_TRUE; + npy_thread_unsafe_state.get_sfloat_dtype_initialized = NPY_TRUE; return (PyObject *)&PyArray_SFloatDType; } diff --git a/numpy/_core/src/umath/dispatching.c b/numpy/_core/src/umath/dispatching.c index dfe918b5482f..a3b9e7584434 100644 --- a/numpy/_core/src/umath/dispatching.c +++ b/numpy/_core/src/umath/dispatching.c @@ -1062,7 +1062,7 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, * then we chain it, because DTypePromotionError effectively means that there * is no loop available. (We failed finding a loop by using promotion.) */ - else if (PyErr_ExceptionMatches(npy_ma_static_data.DTypePromotionError)) { + else if (PyErr_ExceptionMatches(npy_static_pydata.DTypePromotionError)) { PyObject *err_type = NULL, *err_value = NULL, *err_traceback = NULL; PyErr_Fetch(&err_type, &err_value, &err_traceback); raise_no_loop_found_error(ufunc, (PyObject **)op_dtypes); diff --git a/numpy/_core/src/umath/extobj.c b/numpy/_core/src/umath/extobj.c index 09ab0be4d3d4..0405b14a7b02 100644 --- a/numpy/_core/src/umath/extobj.c +++ b/numpy/_core/src/umath/extobj.c @@ -122,8 +122,8 @@ fetch_curr_extobj_state(npy_extobj *extobj) { PyObject *capsule; if (PyContextVar_Get( - npy_ma_static_data.npy_extobj_contextvar, - npy_ma_static_data.default_extobj_capsule, &capsule) < 0) { + npy_static_pydata.npy_extobj_contextvar, + npy_static_pydata.default_extobj_capsule, &capsule) < 0) { return -1; } npy_extobj *obj = PyCapsule_GetPointer(capsule, "numpy.ufunc.extobj"); @@ -150,22 +150,22 @@ init_extobj(void) * inputs. */ for (int i = 0; i <= UFUNC_ERR_LOG; i++) { - npy_ma_str.errmode_strings[i] = PyUnicode_InternFromString( + npy_interned_str.errmode_strings[i] = PyUnicode_InternFromString( errmode_cstrings[i]); - if (npy_ma_str.errmode_strings[i] == NULL) { + if (npy_interned_str.errmode_strings[i] == NULL) { return -1; } } - npy_ma_static_data.default_extobj_capsule = make_extobj_capsule( + npy_static_pydata.default_extobj_capsule = make_extobj_capsule( NPY_BUFSIZE, UFUNC_ERR_DEFAULT, Py_None); - if (npy_ma_static_data.default_extobj_capsule == NULL) { + if (npy_static_pydata.default_extobj_capsule == NULL) { return -1; } - npy_ma_static_data.npy_extobj_contextvar = PyContextVar_New( - "numpy.ufunc.extobj", npy_ma_static_data.default_extobj_capsule); - if (npy_ma_static_data.npy_extobj_contextvar == NULL) { - Py_CLEAR(npy_ma_static_data.default_extobj_capsule); + npy_static_pydata.npy_extobj_contextvar = PyContextVar_New( + "numpy.ufunc.extobj", npy_static_pydata.default_extobj_capsule); + if (npy_static_pydata.npy_extobj_contextvar == NULL) { + Py_CLEAR(npy_static_pydata.default_extobj_capsule); return -1; } return 0; @@ -185,7 +185,7 @@ errmodeconverter(PyObject *obj, int *mode) int i = 0; for (; i <= UFUNC_ERR_LOG; i++) { int eq = PyObject_RichCompareBool( - obj, npy_ma_str.errmode_strings[i], Py_EQ); + obj, npy_interned_str.errmode_strings[i], Py_EQ); if (eq == -1) { return 0; } @@ -333,22 +333,22 @@ extobj_get_extobj_dict(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(noarg)) /* Set all error modes: */ mode = (extobj.errmask & UFUNC_MASK_DIVIDEBYZERO) >> UFUNC_SHIFT_DIVIDEBYZERO; if (PyDict_SetItemString(result, "divide", - npy_ma_str.errmode_strings[mode]) < 0) { + npy_interned_str.errmode_strings[mode]) < 0) { goto fail; } mode = (extobj.errmask & UFUNC_MASK_OVERFLOW) >> UFUNC_SHIFT_OVERFLOW; if (PyDict_SetItemString(result, "over", - npy_ma_str.errmode_strings[mode]) < 0) { + npy_interned_str.errmode_strings[mode]) < 0) { goto fail; } mode = (extobj.errmask & UFUNC_MASK_UNDERFLOW) >> UFUNC_SHIFT_UNDERFLOW; if (PyDict_SetItemString(result, "under", - npy_ma_str.errmode_strings[mode]) < 0) { + npy_interned_str.errmode_strings[mode]) < 0) { goto fail; } mode = (extobj.errmask & UFUNC_MASK_INVALID) >> UFUNC_SHIFT_INVALID; if (PyDict_SetItemString(result, "invalid", - npy_ma_str.errmode_strings[mode]) < 0) { + npy_interned_str.errmode_strings[mode]) < 0) { goto fail; } diff --git a/numpy/_core/src/umath/funcs.inc.src b/numpy/_core/src/umath/funcs.inc.src index 938a5f928cd8..131a678c7865 100644 --- a/numpy/_core/src/umath/funcs.inc.src +++ b/numpy/_core/src/umath/funcs.inc.src @@ -157,19 +157,19 @@ npy_ObjectLogicalNot(PyObject *i1) static PyObject * npy_ObjectFloor(PyObject *obj) { - return PyObject_CallFunction(npy_ma_static_data.math_floor_func, + return PyObject_CallFunction(npy_static_pydata.math_floor_func, "O", obj); } static PyObject * npy_ObjectCeil(PyObject *obj) { - return PyObject_CallFunction(npy_ma_static_data.math_ceil_func, + return PyObject_CallFunction(npy_static_pydata.math_ceil_func, "O", obj); } static PyObject * npy_ObjectTrunc(PyObject *obj) { - return PyObject_CallFunction(npy_ma_static_data.math_trunc_func, + return PyObject_CallFunction(npy_static_pydata.math_trunc_func, "O", obj); } @@ -180,7 +180,7 @@ npy_ObjectGCD(PyObject *i1, PyObject *i2) /* use math.gcd if valid on the provided types */ { - gcd = PyObject_CallFunction(npy_ma_static_data.math_gcd_func, + gcd = PyObject_CallFunction(npy_static_pydata.math_gcd_func, "OO", i1, i2); if (gcd != NULL) { return gcd; @@ -192,11 +192,11 @@ npy_ObjectGCD(PyObject *i1, PyObject *i2) /* otherwise, use our internal one, written in python */ { npy_cache_import("numpy._core._internal", "_gcd", - &npy_ma_thread_unsafe_state.internal_gcd_func); - if (npy_ma_thread_unsafe_state.internal_gcd_func == NULL) { + &npy_thread_unsafe_state.internal_gcd_func); + if (npy_thread_unsafe_state.internal_gcd_func == NULL) { return NULL; } - gcd = PyObject_CallFunction(npy_ma_thread_unsafe_state.internal_gcd_func, + gcd = PyObject_CallFunction(npy_thread_unsafe_state.internal_gcd_func, "OO", i1, i2); if (gcd == NULL) { return NULL; diff --git a/numpy/_core/src/umath/override.c b/numpy/_core/src/umath/override.c index 2e121f52bfe7..ad9c9a4c64f7 100644 --- a/numpy/_core/src/umath/override.c +++ b/numpy/_core/src/umath/override.c @@ -114,19 +114,19 @@ initialize_normal_kwds(PyObject *out_args, if (out_args != NULL) { /* Replace `out` argument with the normalized version */ - int res = PyDict_SetItem(normal_kwds, npy_ma_str.out, out_args); + int res = PyDict_SetItem(normal_kwds, npy_interned_str.out, out_args); if (res < 0) { return -1; } } else { /* Ensure that `out` is not present. */ - int res = PyDict_Contains(normal_kwds, npy_ma_str.out); + int res = PyDict_Contains(normal_kwds, npy_interned_str.out); if (res < 0) { return -1; } if (res) { - return PyDict_DelItem(normal_kwds, npy_ma_str.out); + return PyDict_DelItem(normal_kwds, npy_interned_str.out); } } return 0; @@ -177,7 +177,7 @@ copy_positional_args_to_kwargs(const char **keywords, * 5 keyword arguments. */ assert(strcmp(keywords[i], "initial") == 0); - if (args[i] == npy_ma_static_data._NoValue) { + if (args[i] == npy_static_pydata._NoValue) { continue; } } @@ -371,10 +371,10 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, npy_cache_import( "numpy._core._internal", "array_ufunc_errmsg_formatter", - &npy_ma_thread_unsafe_state.array_ufunc_errmsg_formatter); - if (npy_ma_thread_unsafe_state.array_ufunc_errmsg_formatter != NULL) { + &npy_thread_unsafe_state.array_ufunc_errmsg_formatter); + if (npy_thread_unsafe_state.array_ufunc_errmsg_formatter != NULL) { errmsg = PyObject_Call( - npy_ma_thread_unsafe_state.array_ufunc_errmsg_formatter, + npy_thread_unsafe_state.array_ufunc_errmsg_formatter, override_args, normal_kwds); if (errmsg != NULL) { PyErr_SetObject(PyExc_TypeError, errmsg); diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index 517435894435..b6bf6938d914 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -1788,7 +1788,7 @@ static int static int emit_complexwarning(void) { - return PyErr_WarnEx(npy_ma_static_data.ComplexWarning, + return PyErr_WarnEx(npy_static_pydata.ComplexWarning, "Casting complex values to real discards the imaginary part", 1); } diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 4d7cdd3d1763..aedb548d9591 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -1383,7 +1383,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, if (PyTuple_Check(op_axes_tuple)) { if (PyTuple_Size(op_axes_tuple) != op_ncore) { /* must have been a tuple with too many entries. */ - PyErr_Format(npy_ma_static_data.AxisError, + PyErr_Format(npy_static_pydata.AxisError, "%s: operand %d has %d core dimensions, " "but %zd dimensions are specified by axes tuple.", ufunc_get_name_cstr(ufunc), iop, op_ncore, @@ -1407,7 +1407,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, return -1; } /* If it is a single integer, inform user that more are needed */ - PyErr_Format(npy_ma_static_data.AxisError, + PyErr_Format(npy_static_pydata.AxisError, "%s: operand %d has %d core dimensions, " "but the axes item is a single integer.", ufunc_get_name_cstr(ufunc), iop, op_ncore); @@ -5240,7 +5240,7 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) { PyArrayObject *ap1 = NULL; PyObject *tmp; - npy_cache_import("numpy", "matrix", &npy_ma_thread_unsafe_state.numpy_matrix); + npy_cache_import("numpy", "matrix", &npy_thread_unsafe_state.numpy_matrix); const char *matrix_deprecation_msg = ( "%s.outer() was passed a numpy matrix as %s argument. " @@ -5251,7 +5251,7 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) tmp = PyTuple_GET_ITEM(args, 0); - if (PyObject_IsInstance(tmp, npy_ma_thread_unsafe_state.numpy_matrix)) { + if (PyObject_IsInstance(tmp, npy_thread_unsafe_state.numpy_matrix)) { /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, matrix_deprecation_msg, ufunc->name, "first") < 0) { @@ -5268,7 +5268,7 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) PyArrayObject *ap2 = NULL; tmp = PyTuple_GET_ITEM(args, 1); - if (PyObject_IsInstance(tmp, npy_ma_thread_unsafe_state.numpy_matrix)) { + if (PyObject_IsInstance(tmp, npy_thread_unsafe_state.numpy_matrix)) { /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, matrix_deprecation_msg, ufunc->name, "second") < 0) { @@ -6422,9 +6422,9 @@ ufunc_get_doc(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) npy_cache_import( "numpy._core._internal", "_ufunc_doc_signature_formatter", - &npy_ma_thread_unsafe_state._ufunc_doc_signature_formatter); + &npy_thread_unsafe_state._ufunc_doc_signature_formatter); - if (npy_ma_thread_unsafe_state._ufunc_doc_signature_formatter == NULL) { + if (npy_thread_unsafe_state._ufunc_doc_signature_formatter == NULL) { return NULL; } @@ -6433,7 +6433,7 @@ ufunc_get_doc(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) * introspection on name and nin + nout to automate the first part * of it the doc string shouldn't need the calling convention */ - doc = PyObject_CallFunctionObjArgs(npy_ma_thread_unsafe_state._ufunc_doc_signature_formatter, + doc = PyObject_CallFunctionObjArgs(npy_thread_unsafe_state._ufunc_doc_signature_formatter, (PyObject *)ufunc, NULL); if (doc == NULL) { return NULL; diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index 69fa9b09b61e..b523bd0b4d83 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -90,7 +90,7 @@ raise_binary_type_reso_error(PyUFuncObject *ufunc, PyArrayObject **operands) { return -1; } PyErr_SetObject( - npy_ma_static_data._UFuncBinaryResolutionError, exc_value); + npy_static_pydata._UFuncBinaryResolutionError, exc_value); Py_DECREF(exc_value); return -1; @@ -113,7 +113,7 @@ raise_no_loop_found_error( if (exc_value == NULL) { return -1; } - PyErr_SetObject(npy_ma_static_data._UFuncNoLoopError, exc_value); + PyErr_SetObject(npy_static_pydata._UFuncNoLoopError, exc_value); Py_DECREF(exc_value); return -1; @@ -165,7 +165,7 @@ raise_input_casting_error( PyArray_Descr *to, npy_intp i) { - return raise_casting_error(npy_ma_static_data._UFuncInputCastingError, + return raise_casting_error(npy_static_pydata._UFuncInputCastingError, ufunc, casting, from, to, i); } @@ -181,7 +181,7 @@ raise_output_casting_error( PyArray_Descr *to, npy_intp i) { - return raise_casting_error(npy_ma_static_data._UFuncOutputCastingError, + return raise_casting_error(npy_static_pydata._UFuncOutputCastingError, ufunc, casting, from, to, i); } @@ -1420,7 +1420,7 @@ PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc, (PyTypeNum_ISINTEGER(type_num2) || PyTypeNum_ISBOOL(type_num2))) { return PyUFunc_DefaultTypeResolver( ufunc, casting, operands, - npy_ma_static_data.default_truediv_type_tup, out_dtypes); + npy_static_pydata.default_truediv_type_tup, out_dtypes); } return PyUFunc_DivisionTypeResolver(ufunc, casting, operands, type_tup, out_dtypes); diff --git a/numpy/_core/src/umath/umathmodule.c b/numpy/_core/src/umath/umathmodule.c index 56020fd16847..f83e33ed24b8 100644 --- a/numpy/_core/src/umath/umathmodule.c +++ b/numpy/_core/src/umath/umathmodule.c @@ -273,8 +273,8 @@ int initumath(PyObject *m) #undef ADDSCONST PyModule_AddIntConstant(m, "UFUNC_BUFSIZE_DEFAULT", (long)NPY_BUFSIZE); - Py_INCREF(npy_ma_static_data.npy_extobj_contextvar); - PyModule_AddObject(m, "_extobj_contextvar", npy_ma_static_data.npy_extobj_contextvar); + Py_INCREF(npy_static_pydata.npy_extobj_contextvar); + PyModule_AddObject(m, "_extobj_contextvar", npy_static_pydata.npy_extobj_contextvar); PyModule_AddObject(m, "PINF", PyFloat_FromDouble(NPY_INFINITY)); PyModule_AddObject(m, "NINF", PyFloat_FromDouble(-NPY_INFINITY)); From c23703802c2021c9690a2895db803f51b5c8a5f5 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 7 Jun 2024 15:44:55 -0600 Subject: [PATCH 609/980] MNT: move static data structs into their own file --- numpy/_core/meson.build | 1 + numpy/_core/src/common/binop_override.h | 3 +- numpy/_core/src/common/npy_cpu_dispatch.c | 2 +- numpy/_core/src/common/ufunc_override.c | 4 +- numpy/_core/src/multiarray/alloc.c | 1 + numpy/_core/src/multiarray/array_converter.c | 2 +- .../src/multiarray/arrayfunction_override.c | 1 + numpy/_core/src/multiarray/arraytypes.c.src | 1 + numpy/_core/src/multiarray/arraywrap.c | 2 +- numpy/_core/src/multiarray/common.h | 2 +- numpy/_core/src/multiarray/common_dtype.c | 2 +- numpy/_core/src/multiarray/conversion_utils.c | 1 + numpy/_core/src/multiarray/convert_datatype.c | 3 +- numpy/_core/src/multiarray/ctors.c | 2 +- numpy/_core/src/multiarray/descriptor.c | 1 + numpy/_core/src/multiarray/dlpack.c | 2 +- numpy/_core/src/multiarray/dtypemeta.c | 2 + numpy/_core/src/multiarray/getset.c | 1 + numpy/_core/src/multiarray/item_selection.c | 2 +- numpy/_core/src/multiarray/methods.c | 1 + numpy/_core/src/multiarray/methods.h | 2 +- numpy/_core/src/multiarray/multiarraymodule.c | 189 +----------- numpy/_core/src/multiarray/multiarraymodule.h | 144 ---------- numpy/_core/src/multiarray/npy_static_data.c | 269 ++++++++++++++++++ numpy/_core/src/multiarray/npy_static_data.h | 157 ++++++++++ numpy/_core/src/multiarray/shape.c | 2 +- numpy/_core/src/umath/_scaled_float_dtype.c | 1 + numpy/_core/src/umath/extobj.c | 1 - numpy/_core/src/umath/funcs.inc.src | 1 + numpy/_core/src/umath/override.c | 1 + numpy/_core/src/umath/ufunc_object.c | 2 + numpy/_core/src/umath/ufunc_object.h | 5 - numpy/_core/src/umath/umathmodule.c | 29 -- 33 files changed, 461 insertions(+), 378 deletions(-) create mode 100644 numpy/_core/src/multiarray/npy_static_data.c create mode 100644 numpy/_core/src/multiarray/npy_static_data.h diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index dbe76e0a3dea..aba0b8212d1e 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1101,6 +1101,7 @@ src_multiarray = multiarray_gen_headers + [ 'src/multiarray/nditer_constr.c', 'src/multiarray/nditer_pywrap.c', src_file.process('src/multiarray/nditer_templ.c.src'), + 'src/multiarray/npy_static_data.c', 'src/multiarray/number.c', 'src/multiarray/refcount.c', src_file.process('src/multiarray/scalartypes.c.src'), diff --git a/numpy/_core/src/common/binop_override.h b/numpy/_core/src/common/binop_override.h index ec3d046796ab..def9b895c872 100644 --- a/numpy/_core/src/common/binop_override.h +++ b/numpy/_core/src/common/binop_override.h @@ -6,6 +6,7 @@ #include "numpy/arrayobject.h" #include "get_attr_string.h" +#include "npy_static_data.h" /* * Logic for deciding when binops should return NotImplemented versus when @@ -128,7 +129,7 @@ binop_should_defer(PyObject *self, PyObject *other, int inplace) * Classes with __array_ufunc__ are living in the future, and only need to * check whether __array_ufunc__ equals None. */ - attr = PyArray_LookupSpecial(other, npy_um_str_array_ufunc); + attr = PyArray_LookupSpecial(other, npy_interned_str.array_ufunc); if (attr != NULL) { defer = !inplace && (attr == Py_None); Py_DECREF(attr); diff --git a/numpy/_core/src/common/npy_cpu_dispatch.c b/numpy/_core/src/common/npy_cpu_dispatch.c index 388ad25b3adf..ff22f234a7c6 100644 --- a/numpy/_core/src/common/npy_cpu_dispatch.c +++ b/numpy/_core/src/common/npy_cpu_dispatch.c @@ -3,7 +3,7 @@ #include "npy_cpu_dispatch.h" #include "numpy/ndarraytypes.h" -#include "multiarraymodule.h" +#include "npy_static_data.h" NPY_VISIBILITY_HIDDEN int npy_cpu_dispatch_tracer_init(PyObject *mod) diff --git a/numpy/_core/src/common/ufunc_override.c b/numpy/_core/src/common/ufunc_override.c index fe9ada5c6224..17b678edd4bf 100644 --- a/numpy/_core/src/common/ufunc_override.c +++ b/numpy/_core/src/common/ufunc_override.c @@ -7,7 +7,7 @@ #include "npy_import.h" #include "ufunc_override.h" #include "scalartypes.h" -#include "multiarraymodule.h" +#include "npy_static_data.h" /* * Check whether an object has __array_ufunc__ defined on its class and it @@ -35,7 +35,7 @@ PyUFuncOverride_GetNonDefaultArrayUfunc(PyObject *obj) * Does the class define __array_ufunc__? (Note that LookupSpecial has fast * return for basic python types, so no need to worry about those here) */ - cls_array_ufunc = PyArray_LookupSpecial(obj, npy_um_str_array_ufunc); + cls_array_ufunc = PyArray_LookupSpecial(obj, npy_interned_str.array_ufunc); if (cls_array_ufunc == NULL) { if (PyErr_Occurred()) { PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index 8f443c6d27c2..b7e7c9948ce1 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -11,6 +11,7 @@ #include "numpy/npy_common.h" #include "npy_config.h" #include "alloc.h" +#include "npy_static_data.h" #include "multiarraymodule.h" #include diff --git a/numpy/_core/src/multiarray/array_converter.c b/numpy/_core/src/multiarray/array_converter.c index 4ed959c499ed..496173038954 100644 --- a/numpy/_core/src/multiarray/array_converter.c +++ b/numpy/_core/src/multiarray/array_converter.c @@ -21,7 +21,7 @@ #include "abstractdtypes.h" #include "convert_datatype.h" #include "descriptor.h" -#include "multiarraymodule.h" +#include "npy_static_data.h" #include "ctors.h" #include "npy_config.h" diff --git a/numpy/_core/src/multiarray/arrayfunction_override.c b/numpy/_core/src/multiarray/arrayfunction_override.c index 250b02b5134f..aa3ab42433c7 100644 --- a/numpy/_core/src/multiarray/arrayfunction_override.c +++ b/numpy/_core/src/multiarray/arrayfunction_override.c @@ -7,6 +7,7 @@ #include "numpy/ndarraytypes.h" #include "get_attr_string.h" #include "npy_import.h" +#include "npy_static_data.h" #include "multiarraymodule.h" #include "arrayfunction_override.h" diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index b99598b825da..9524be8a0c89 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -42,6 +42,7 @@ #include "arraytypes.h" #include "umathmodule.h" +#include "npy_static_data.h" /* * Define a stack allocated dummy array with only the minimum information set: diff --git a/numpy/_core/src/multiarray/arraywrap.c b/numpy/_core/src/multiarray/arraywrap.c index 7b2bcd929813..ae7b6e987ff8 100644 --- a/numpy/_core/src/multiarray/arraywrap.c +++ b/numpy/_core/src/multiarray/arraywrap.c @@ -12,7 +12,7 @@ #include "get_attr_string.h" #include "arraywrap.h" -#include "multiarraymodule.h" +#include "npy_static_data.h" /* diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index c5bb69463013..19fba9e66d01 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -8,7 +8,7 @@ #include "npy_cpu_dispatch.h" #include "numpy/npy_cpu.h" -#include "multiarraymodule.h" +#include "npy_static_data.h" #include "npy_import.h" #include diff --git a/numpy/_core/src/multiarray/common_dtype.c b/numpy/_core/src/multiarray/common_dtype.c index 6635f137288e..beba6acef149 100644 --- a/numpy/_core/src/multiarray/common_dtype.c +++ b/numpy/_core/src/multiarray/common_dtype.c @@ -10,7 +10,7 @@ #include "convert_datatype.h" #include "dtypemeta.h" #include "abstractdtypes.h" -#include "multiarraymodule.h" +#include "npy_static_data.h" /* diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index 9f86842b973a..e7b1936d1706 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -18,6 +18,7 @@ #include "conversion_utils.h" #include "alloc.h" #include "npy_buffer.h" +#include "npy_static_data.h" #include "multiarraymodule.h" static int diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 012b2cfe0f49..f029ad8a5986 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -35,7 +35,8 @@ #include "dtype_transfer.h" #include "dtype_traversal.h" #include "arrayobject.h" - +#include "npy_static_data.h" +#include "multiarraymodule.h" /* * Required length of string when converting from unsigned integer type. diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 97fb3a4ce117..8ee9d28c3086 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -16,7 +16,7 @@ #include "npy_pycompat.h" #include "npy_ctypes.h" -#include "multiarraymodule.h" +#include "npy_static_data.h" #include "common.h" #include "ctors.h" diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 0d1debdf7365..c1288cd53902 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -22,6 +22,7 @@ #include "conversion_utils.h" /* for PyArray_TypestrConvert */ #include "templ_common.h" /* for npy_mul_sizes_with_overflow */ #include "descriptor.h" +#include "npy_static_data.h" #include "multiarraymodule.h" #include "alloc.h" #include "assert.h" diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index 8f1ab728416e..51cb454b3a66 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -8,7 +8,7 @@ #include "numpy/arrayobject.h" #include "npy_argparse.h" #include "npy_dlpack.h" -#include "multiarraymodule.h" +#include "npy_static_data.h" #include "conversion_utils.h" diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index a21a441340d7..87a69d8348c1 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -26,6 +26,8 @@ #include "templ_common.h" #include "refcount.h" #include "dtype_traversal.h" +#include "npy_static_data.h" +#include "multiarraymodule.h" #include diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index df2e10110487..092ac65bbbc3 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -25,6 +25,7 @@ #include "alloc.h" #include "npy_buffer.h" #include "shape.h" +#include "multiarraymodule.h" /******************* array attribute get and set routines ******************/ diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index b954f7abaf73..4d98ce0c350c 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -15,7 +15,7 @@ -#include "multiarraymodule.h" +#include "npy_static_data.h" #include "common.h" #include "dtype_transfer.h" #include "dtypemeta.h" diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 225e13e0db2e..dd40fc4e2f3d 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -29,6 +29,7 @@ #include "strfuncs.h" #include "array_assign.h" #include "npy_dlpack.h" +#include "npy_static_data.h" #include "multiarraymodule.h" #include "methods.h" diff --git a/numpy/_core/src/multiarray/methods.h b/numpy/_core/src/multiarray/methods.h index f70af8e48aff..f49e0205894d 100644 --- a/numpy/_core/src/multiarray/methods.h +++ b/numpy/_core/src/multiarray/methods.h @@ -1,7 +1,7 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_METHODS_H_ #define NUMPY_CORE_SRC_MULTIARRAY_METHODS_H_ -#include "multiarraymodule.h" +#include "npy_static_data.h" #include "npy_import.h" extern NPY_NO_EXPORT PyMethodDef array_methods[]; diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index ebd165a4a233..c6938ad10869 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -23,6 +23,7 @@ #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" +#include "multiarraymodule.h" #include "numpy/npy_math.h" #include "npy_argparse.h" #include "npy_config.h" @@ -63,7 +64,7 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0; #include "ctors.h" #include "array_assign.h" #include "common.h" -#include "multiarraymodule.h" +#include "npy_static_data.h" #include "cblasfuncs.h" #include "vdot.h" #include "templ_common.h" /* for npy_mul_sizes_with_overflow */ @@ -4766,193 +4767,11 @@ set_flaginfo(PyObject *d) return; } -// static variables are zero-filled by default, no need to explicitly do so -NPY_VISIBILITY_HIDDEN npy_interned_str_struct npy_interned_str; -NPY_VISIBILITY_HIDDEN npy_static_pydata_struct npy_static_pydata; -NPY_VISIBILITY_HIDDEN npy_static_cdata_struct npy_static_cdata; +// static variables are automatically zero-initialized NPY_VISIBILITY_HIDDEN npy_thread_unsafe_state_struct npy_thread_unsafe_state; static int -intern_strings(void) -{ - // this is module-level global heap allocation, it is currently - // never freed - npy_interned_str.current_allocator = PyUnicode_InternFromString("current_allocator"); - if (npy_interned_str.current_allocator == NULL) { - return -1; - } - npy_interned_str.array = PyUnicode_InternFromString("__array__"); - if (npy_interned_str.array == NULL) { - return -1; - } - npy_interned_str.array_function = PyUnicode_InternFromString("__array_function__"); - if (npy_interned_str.array_function == NULL) { - return -1; - } - npy_interned_str.array_struct = PyUnicode_InternFromString("__array_struct__"); - if (npy_interned_str.array_struct == NULL) { - return -1; - } - npy_interned_str.array_priority = PyUnicode_InternFromString("__array_priority__"); - if (npy_interned_str.array_priority == NULL) { - return -1; - } - npy_interned_str.array_interface = PyUnicode_InternFromString("__array_interface__"); - if (npy_interned_str.array_interface == NULL) { - return -1; - } - npy_interned_str.array_wrap = PyUnicode_InternFromString("__array_wrap__"); - if (npy_interned_str.array_wrap == NULL) { - return -1; - } - npy_interned_str.array_finalize = PyUnicode_InternFromString("__array_finalize__"); - if (npy_interned_str.array_finalize == NULL) { - return -1; - } - npy_interned_str.implementation = PyUnicode_InternFromString("_implementation"); - if (npy_interned_str.implementation == NULL) { - return -1; - } - npy_interned_str.axis1 = PyUnicode_InternFromString("axis1"); - if (npy_interned_str.axis1 == NULL) { - return -1; - } - npy_interned_str.axis2 = PyUnicode_InternFromString("axis2"); - if (npy_interned_str.axis2 == NULL) { - return -1; - } - npy_interned_str.like = PyUnicode_InternFromString("like"); - if (npy_interned_str.like == NULL) { - return -1; - } - npy_interned_str.numpy = PyUnicode_InternFromString("numpy"); - if (npy_interned_str.numpy == NULL) { - return -1; - } - npy_interned_str.where = PyUnicode_InternFromString("where"); - if (npy_interned_str.where == NULL) { - return -1; - } - /* scalar policies */ - npy_interned_str.convert = PyUnicode_InternFromString("convert"); - if (npy_interned_str.convert == NULL) { - return -1; - } - npy_interned_str.preserve = PyUnicode_InternFromString("preserve"); - if (npy_interned_str.preserve == NULL) { - return -1; - } - npy_interned_str.convert_if_no_array = PyUnicode_InternFromString("convert_if_no_array"); - if (npy_interned_str.convert_if_no_array == NULL) { - return -1; - } - npy_interned_str.cpu = PyUnicode_InternFromString("cpu"); - if (npy_interned_str.cpu == NULL) { - return -1; - } - npy_interned_str.dtype = PyUnicode_InternFromString("dtype"); - if (npy_interned_str.dtype == NULL) { - return -1; - } - npy_interned_str.array_err_msg_substr = PyUnicode_InternFromString( - "__array__() got an unexpected keyword argument 'copy'"); - if (npy_interned_str.array_err_msg_substr == NULL) { - return -1; - } - npy_interned_str.out = PyUnicode_InternFromString("out"); - if (npy_interned_str.out == NULL) { - return -1; - } - npy_interned_str.__dlpack__ = PyUnicode_InternFromString("__dlpack__"); - if (npy_interned_str.__dlpack__ == NULL) { - return -1; - } - return 0; -} - -#define IMPORT_GLOBAL(base_path, name, object) \ - assert(object == NULL); \ - npy_cache_import(base_path, name, &object); \ - if (object == NULL) { \ - return -1; \ - } - -/* - * Initializes global constants. - * - * All global constants should live inside the npy_static_pydata - * struct. - * - * Not all entries in the struct are initialized here, some are - * initialized later but care must be taken in those cases to initialize - * the constant in a thread-safe manner, ensuring it is initialized - * exactly once. - * - * Anything initialized here is initialized during module import which - * the python interpreter ensures is done in a single thread. - * - * Anything imported here should not need the C-layer at all and will be - * imported before anything on the C-side is initialized. - */ -static int -initialize_static_globals(void) -{ - // cached reference to objects defined in python - - IMPORT_GLOBAL("math", "floor", - npy_static_pydata.math_floor_func); - - IMPORT_GLOBAL("math", "ceil", - npy_static_pydata.math_ceil_func); - - IMPORT_GLOBAL("math", "trunc", - npy_static_pydata.math_trunc_func); - - IMPORT_GLOBAL("math", "gcd", - npy_static_pydata.math_gcd_func); - - IMPORT_GLOBAL("numpy.exceptions", "AxisError", - npy_static_pydata.AxisError); - - IMPORT_GLOBAL("numpy.exceptions", "ComplexWarning", - npy_static_pydata.ComplexWarning); - - IMPORT_GLOBAL("numpy.exceptions", "DTypePromotionError", - npy_static_pydata.DTypePromotionError); - - IMPORT_GLOBAL("numpy.exceptions", "TooHardError", - npy_static_pydata.TooHardError); - - IMPORT_GLOBAL("numpy.exceptions", "VisibleDeprecationWarning", - npy_static_pydata.VisibleDeprecationWarning); - - IMPORT_GLOBAL("numpy._globals", "_CopyMode", - npy_static_pydata._CopyMode); - - IMPORT_GLOBAL("numpy._globals", "_NoValue", - npy_static_pydata._NoValue); - - IMPORT_GLOBAL("numpy._core._exceptions", "_ArrayMemoryError", - npy_static_pydata._ArrayMemoryError); - - IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncBinaryResolutionError", - npy_static_pydata._UFuncBinaryResolutionError); - - IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncInputCastingError", - npy_static_pydata._UFuncInputCastingError); - - IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncNoLoopError", - npy_static_pydata._UFuncNoLoopError); - - IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncOutputCastingError", - npy_static_pydata._UFuncOutputCastingError); - - IMPORT_GLOBAL("os", "fspath", - npy_static_pydata.os_fspath); - - IMPORT_GLOBAL("os", "PathLike", - npy_static_pydata.os_PathLike); - +initialize_thread_unsafe_state(void) { char *env = getenv("NUMPY_WARN_IF_NO_MEM_POLICY"); if ((env != NULL) && (strncmp(env, "1", 1) == 0)) { numpy_warn_if_no_mem_policy = 1; diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index 9da928cad606..f03c2640f811 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -1,147 +1,6 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ #define NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ -typedef struct npy_interned_str_struct { - PyObject *current_allocator; - PyObject *array; - PyObject *array_function; - PyObject *array_struct; - PyObject *array_priority; - PyObject *array_interface; - PyObject *array_wrap; - PyObject *array_finalize; - PyObject *implementation; - PyObject *axis1; - PyObject *axis2; - PyObject *like; - PyObject *numpy; - PyObject *where; - PyObject *convert; - PyObject *preserve; - PyObject *convert_if_no_array; - PyObject *cpu; - PyObject *dtype; - PyObject *array_err_msg_substr; - PyObject *out; - PyObject *errmode_strings[6]; - PyObject *__dlpack__; -} npy_interned_str_struct; - -/* - * A struct that stores static global data used throughout - * _multiarray_umath, mostly to cache results that would be - * prohibitively expensive to compute at runtime in a tight loop. - * - * All items in this struct should be initialized during module - * initialization and thereafter should be immutable. Mutating items in - * this struct after module initialization is likely not thread-safe. - */ - -typedef struct npy_static_pydata_struct { - /* - * Used in ufunc_type_resolution.c to avoid reconstructing a tuple - * storing the default true division return types. - */ - PyObject *default_truediv_type_tup; - - /* - * Used to set up the default extobj context variable - */ - PyObject *default_extobj_capsule; - - /* - * The global ContextVar to store the extobject. It is exposed to Python - * as `_extobj_contextvar`. - */ - PyObject *npy_extobj_contextvar; - - /* - * A reference to ndarray's implementations for __array_*__ special methods - */ - PyObject *ndarray_array_ufunc; - PyObject *ndarray_array_finalize; - PyObject *ndarray_array_function; - - /* - * References to the '1' and '0' PyLong objects - */ - PyObject *one_obj; - PyObject *zero_obj; - - /* - * References to items obtained via an import at module initialization - */ - PyObject *AxisError; - PyObject *ComplexWarning; - PyObject *DTypePromotionError; - PyObject *TooHardError; - PyObject *VisibleDeprecationWarning; - PyObject *_CopyMode; - PyObject *_NoValue; - PyObject *_ArrayMemoryError; - PyObject *_UFuncBinaryResolutionError; - PyObject *_UFuncInputCastingError; - PyObject *_UFuncNoLoopError; - PyObject *_UFuncOutputCastingError; - PyObject *math_floor_func; - PyObject *math_ceil_func; - PyObject *math_trunc_func; - PyObject *math_gcd_func; - PyObject *os_PathLike; - PyObject *os_fspath; - - /* - * Used in the __array__ internals to avoid building a tuple inline - */ - PyObject *kwnames_is_copy; - - /* - * Used in __imatmul__ to avoid building tuples inline - */ - PyObject *axes_1d_obj_kwargs; - PyObject *axes_2d_obj_kwargs; - - /* - * Used for CPU feature detection and dispatch - */ - PyObject *cpu_dispatch_registry; - - /* - * references to ArrayMethod implementations that are cached - * to avoid repeatedly creating them - */ - PyObject *VoidToGenericMethod; - PyObject *GenericToVoidMethod; - PyObject *ObjectToGenericMethod; - PyObject *GenericToObjectMethod; -} npy_static_pydata_struct; - - -typedef struct npy_static_cdata_struct { - /* - * stores sys.flags.optimize as a long, which is used in the add_docstring - * implementation - */ - long optimize; - - /* - * LUT used by unpack_bits - */ - union { - npy_uint8 bytes[8]; - npy_uint64 uint64; - } unpack_lookup_big[256]; - - /* - * A look-up table to recover integer type numbers from type characters. - * - * See the _MAX_LETTER and LETTER_TO_NUM macros in arraytypes.c.src. - * - * The smallest type number is ?, the largest is bounded by 'z'. - */ - npy_int16 _letter_to_num['z' + 1 - '?']; -} npy_static_cdata_struct; - /* * A struct storing thread-unsafe global state for the _multiarray_umath * module. We should refactor so the global state is thread-safe, @@ -214,9 +73,6 @@ typedef struct npy_thread_unsafe_state_struct { } npy_thread_unsafe_state_struct; -NPY_VISIBILITY_HIDDEN extern npy_interned_str_struct npy_interned_str; -NPY_VISIBILITY_HIDDEN extern npy_static_pydata_struct npy_static_pydata; -NPY_VISIBILITY_HIDDEN extern npy_static_cdata_struct npy_static_cdata; NPY_VISIBILITY_HIDDEN extern npy_thread_unsafe_state_struct npy_thread_unsafe_state; diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c new file mode 100644 index 000000000000..be25c05eb654 --- /dev/null +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -0,0 +1,269 @@ +/* numpy static data structs and initialization */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _UMATHMODULE +#define _MULTIARRAYMODULE + +#define PY_SSIZE_T_CLEAN +#include +#include + +#include "numpy/ndarraytypes.h" +#include "numpy/npy_common.h" +#include "numpy/arrayobject.h" +#include "npy_import.h" +#include "npy_static_data.h" + +// static variables are zero-filled by default, no need to explicitly do so +NPY_VISIBILITY_HIDDEN npy_interned_str_struct npy_interned_str; +NPY_VISIBILITY_HIDDEN npy_static_pydata_struct npy_static_pydata; +NPY_VISIBILITY_HIDDEN npy_static_cdata_struct npy_static_cdata; + +NPY_NO_EXPORT int +intern_strings(void) +{ + npy_interned_str.current_allocator = PyUnicode_InternFromString("current_allocator"); + if (npy_interned_str.current_allocator == NULL) { + return -1; + } + npy_interned_str.array = PyUnicode_InternFromString("__array__"); + if (npy_interned_str.array == NULL) { + return -1; + } + npy_interned_str.array_function = PyUnicode_InternFromString("__array_function__"); + if (npy_interned_str.array_function == NULL) { + return -1; + } + npy_interned_str.array_struct = PyUnicode_InternFromString("__array_struct__"); + if (npy_interned_str.array_struct == NULL) { + return -1; + } + npy_interned_str.array_priority = PyUnicode_InternFromString("__array_priority__"); + if (npy_interned_str.array_priority == NULL) { + return -1; + } + npy_interned_str.array_interface = PyUnicode_InternFromString("__array_interface__"); + if (npy_interned_str.array_interface == NULL) { + return -1; + } + npy_interned_str.array_ufunc = PyUnicode_InternFromString("__array_ufunc__"); + if (npy_interned_str.array_ufunc == NULL) { + return -1; + } + npy_interned_str.array_wrap = PyUnicode_InternFromString("__array_wrap__"); + if (npy_interned_str.array_wrap == NULL) { + return -1; + } + npy_interned_str.array_finalize = PyUnicode_InternFromString("__array_finalize__"); + if (npy_interned_str.array_finalize == NULL) { + return -1; + } + npy_interned_str.implementation = PyUnicode_InternFromString("_implementation"); + if (npy_interned_str.implementation == NULL) { + return -1; + } + npy_interned_str.axis1 = PyUnicode_InternFromString("axis1"); + if (npy_interned_str.axis1 == NULL) { + return -1; + } + npy_interned_str.axis2 = PyUnicode_InternFromString("axis2"); + if (npy_interned_str.axis2 == NULL) { + return -1; + } + npy_interned_str.like = PyUnicode_InternFromString("like"); + if (npy_interned_str.like == NULL) { + return -1; + } + npy_interned_str.numpy = PyUnicode_InternFromString("numpy"); + if (npy_interned_str.numpy == NULL) { + return -1; + } + npy_interned_str.where = PyUnicode_InternFromString("where"); + if (npy_interned_str.where == NULL) { + return -1; + } + npy_interned_str.convert = PyUnicode_InternFromString("convert"); + if (npy_interned_str.convert == NULL) { + return -1; + } + npy_interned_str.preserve = PyUnicode_InternFromString("preserve"); + if (npy_interned_str.preserve == NULL) { + return -1; + } + npy_interned_str.convert_if_no_array = PyUnicode_InternFromString("convert_if_no_array"); + if (npy_interned_str.convert_if_no_array == NULL) { + return -1; + } + npy_interned_str.cpu = PyUnicode_InternFromString("cpu"); + if (npy_interned_str.cpu == NULL) { + return -1; + } + npy_interned_str.dtype = PyUnicode_InternFromString("dtype"); + if (npy_interned_str.dtype == NULL) { + return -1; + } + npy_interned_str.array_err_msg_substr = PyUnicode_InternFromString( + "__array__() got an unexpected keyword argument 'copy'"); + if (npy_interned_str.array_err_msg_substr == NULL) { + return -1; + } + npy_interned_str.out = PyUnicode_InternFromString("out"); + if (npy_interned_str.out == NULL) { + return -1; + } + npy_interned_str.__dlpack__ = PyUnicode_InternFromString("__dlpack__"); + if (npy_interned_str.__dlpack__ == NULL) { + return -1; + } + npy_interned_str.pyvals_name = PyUnicode_InternFromString("UFUNC_PYVALS_NAME"); + if (npy_interned_str.pyvals_name == NULL) { + return -1; + } + return 0; +} + +#define IMPORT_GLOBAL(base_path, name, object) \ + assert(object == NULL); \ + npy_cache_import(base_path, name, &object); \ + if (object == NULL) { \ + return -1; \ + } + + +/* + * Initializes global constants. + * + * All global constants should live inside the npy_static_pydata + * struct. + * + * Not all entries in the struct are initialized here, some are + * initialized later but care must be taken in those cases to initialize + * the constant in a thread-safe manner, ensuring it is initialized + * exactly once. + * + * Anything initialized here is initialized during module import which + * the python interpreter ensures is done in a single thread. + * + * Anything imported here should not need the C-layer at all and will be + * imported before anything on the C-side is initialized. + */ +NPY_NO_EXPORT int +initialize_static_globals(void) +{ + // cached reference to objects defined in python + + IMPORT_GLOBAL("math", "floor", + npy_static_pydata.math_floor_func); + + IMPORT_GLOBAL("math", "ceil", + npy_static_pydata.math_ceil_func); + + IMPORT_GLOBAL("math", "trunc", + npy_static_pydata.math_trunc_func); + + IMPORT_GLOBAL("math", "gcd", + npy_static_pydata.math_gcd_func); + + IMPORT_GLOBAL("numpy.exceptions", "AxisError", + npy_static_pydata.AxisError); + + IMPORT_GLOBAL("numpy.exceptions", "ComplexWarning", + npy_static_pydata.ComplexWarning); + + IMPORT_GLOBAL("numpy.exceptions", "DTypePromotionError", + npy_static_pydata.DTypePromotionError); + + IMPORT_GLOBAL("numpy.exceptions", "TooHardError", + npy_static_pydata.TooHardError); + + IMPORT_GLOBAL("numpy.exceptions", "VisibleDeprecationWarning", + npy_static_pydata.VisibleDeprecationWarning); + + IMPORT_GLOBAL("numpy._globals", "_CopyMode", + npy_static_pydata._CopyMode); + + IMPORT_GLOBAL("numpy._globals", "_NoValue", + npy_static_pydata._NoValue); + + IMPORT_GLOBAL("numpy._core._exceptions", "_ArrayMemoryError", + npy_static_pydata._ArrayMemoryError); + + IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncBinaryResolutionError", + npy_static_pydata._UFuncBinaryResolutionError); + + IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncInputCastingError", + npy_static_pydata._UFuncInputCastingError); + + IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncNoLoopError", + npy_static_pydata._UFuncNoLoopError); + + IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncOutputCastingError", + npy_static_pydata._UFuncOutputCastingError); + + IMPORT_GLOBAL("os", "fspath", + npy_static_pydata.os_fspath); + + IMPORT_GLOBAL("os", "PathLike", + npy_static_pydata.os_PathLike); + + // default_truediv_type_tupS + PyArray_Descr *tmp = PyArray_DescrFromType(NPY_DOUBLE); + if (tmp == NULL) { + return -1; + } + + npy_static_pydata.default_truediv_type_tup = + PyTuple_Pack(3, tmp, tmp, tmp); + if (npy_static_pydata.default_truediv_type_tup == NULL) { + Py_DECREF(tmp); + return -1; + } + Py_DECREF(tmp); + + PyObject *flags = PySys_GetObject("flags"); /* borrowed object */ + if (flags == NULL) { + PyErr_SetString(PyExc_AttributeError, "cannot get sys.flags"); + return -1; + } + PyObject *level = PyObject_GetAttrString(flags, "optimize"); + if (level == NULL) { + return -1; + } + npy_static_cdata.optimize = PyLong_AsLong(level); + Py_DECREF(level); + + /* + * see unpack_bits for how this table is used. + * + * LUT for bigendian bitorder, littleendian is handled via + * byteswapping in the loop. + * + * 256 8 byte blocks representing 8 bits expanded to 1 or 0 bytes + */ + npy_intp j; + for (j=0; j < 256; j++) { + npy_intp k; + for (k=0; k < 8; k++) { + npy_uint8 v = (j & (1 << k)) == (1 << k); + npy_static_cdata.unpack_lookup_big[j].bytes[7 - k] = v; + } + } + + npy_static_pydata.kwnames_is_copy = Py_BuildValue("(s)", "copy"); + if (npy_static_pydata.kwnames_is_copy == NULL) { + return -1; + } + + npy_static_pydata.one_obj = PyLong_FromLong((long) 1); + if (npy_static_pydata.one_obj == NULL) { + return -1; + } + + npy_static_pydata.zero_obj = PyLong_FromLong((long) 0); + if (npy_static_pydata.zero_obj == NULL) { + return -1; + } + + return 0; +} + + diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h new file mode 100644 index 000000000000..311f6bc43f0e --- /dev/null +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -0,0 +1,157 @@ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_STATIC_DATA_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_STATIC_DATA_H_ + +NPY_NO_EXPORT int +initialize_static_globals(void); + +NPY_NO_EXPORT int +intern_strings(void); + +typedef struct npy_interned_str_struct { + PyObject *current_allocator; + PyObject *array; + PyObject *array_function; + PyObject *array_struct; + PyObject *array_priority; + PyObject *array_interface; + PyObject *array_wrap; + PyObject *array_finalize; + PyObject *array_ufunc; + PyObject *implementation; + PyObject *axis1; + PyObject *axis2; + PyObject *like; + PyObject *numpy; + PyObject *where; + PyObject *convert; + PyObject *preserve; + PyObject *convert_if_no_array; + PyObject *cpu; + PyObject *dtype; + PyObject *array_err_msg_substr; + PyObject *out; + PyObject *errmode_strings[6]; + PyObject *__dlpack__; + PyObject *pyvals_name; +} npy_interned_str_struct; + +/* + * A struct that stores static global data used throughout + * _multiarray_umath, mostly to cache results that would be + * prohibitively expensive to compute at runtime in a tight loop. + * + * All items in this struct should be initialized during module + * initialization and thereafter should be immutable. Mutating items in + * this struct after module initialization is likely not thread-safe. + */ + +typedef struct npy_static_pydata_struct { + /* + * Used in ufunc_type_resolution.c to avoid reconstructing a tuple + * storing the default true division return types. + */ + PyObject *default_truediv_type_tup; + + /* + * Used to set up the default extobj context variable + */ + PyObject *default_extobj_capsule; + + /* + * The global ContextVar to store the extobject. It is exposed to Python + * as `_extobj_contextvar`. + */ + PyObject *npy_extobj_contextvar; + + /* + * A reference to ndarray's implementations for __array_*__ special methods + */ + PyObject *ndarray_array_ufunc; + PyObject *ndarray_array_finalize; + PyObject *ndarray_array_function; + + /* + * References to the '1' and '0' PyLong objects + */ + PyObject *one_obj; + PyObject *zero_obj; + + /* + * References to items obtained via an import at module initialization + */ + PyObject *AxisError; + PyObject *ComplexWarning; + PyObject *DTypePromotionError; + PyObject *TooHardError; + PyObject *VisibleDeprecationWarning; + PyObject *_CopyMode; + PyObject *_NoValue; + PyObject *_ArrayMemoryError; + PyObject *_UFuncBinaryResolutionError; + PyObject *_UFuncInputCastingError; + PyObject *_UFuncNoLoopError; + PyObject *_UFuncOutputCastingError; + PyObject *math_floor_func; + PyObject *math_ceil_func; + PyObject *math_trunc_func; + PyObject *math_gcd_func; + PyObject *os_PathLike; + PyObject *os_fspath; + + /* + * Used in the __array__ internals to avoid building a tuple inline + */ + PyObject *kwnames_is_copy; + + /* + * Used in __imatmul__ to avoid building tuples inline + */ + PyObject *axes_1d_obj_kwargs; + PyObject *axes_2d_obj_kwargs; + + /* + * Used for CPU feature detection and dispatch + */ + PyObject *cpu_dispatch_registry; + + /* + * references to ArrayMethod implementations that are cached + * to avoid repeatedly creating them + */ + PyObject *VoidToGenericMethod; + PyObject *GenericToVoidMethod; + PyObject *ObjectToGenericMethod; + PyObject *GenericToObjectMethod; +} npy_static_pydata_struct; + + +typedef struct npy_static_cdata_struct { + /* + * stores sys.flags.optimize as a long, which is used in the add_docstring + * implementation + */ + long optimize; + + /* + * LUT used by unpack_bits + */ + union { + npy_uint8 bytes[8]; + npy_uint64 uint64; + } unpack_lookup_big[256]; + + /* + * A look-up table to recover integer type numbers from type characters. + * + * See the _MAX_LETTER and LETTER_TO_NUM macros in arraytypes.c.src. + * + * The smallest type number is ?, the largest is bounded by 'z'. + */ + npy_int16 _letter_to_num['z' + 1 - '?']; +} npy_static_cdata_struct; + +NPY_VISIBILITY_HIDDEN extern npy_interned_str_struct npy_interned_str; +NPY_VISIBILITY_HIDDEN extern npy_static_pydata_struct npy_static_pydata; +NPY_VISIBILITY_HIDDEN extern npy_static_cdata_struct npy_static_cdata; + +#endif // NUMPY_CORE_SRC_MULTIARRAY_STATIC_DATA_H_ diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index 72bea87002f8..c33272de4eb5 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -19,7 +19,7 @@ #include "shape.h" -#include "multiarraymodule.h" /* for interned strings */ +#include "npy_static_data.h" /* for interned strings */ #include "templ_common.h" /* for npy_mul_sizes_with_overflow */ #include "common.h" /* for convert_shape_to_string */ #include "alloc.h" diff --git a/numpy/_core/src/umath/_scaled_float_dtype.c b/numpy/_core/src/umath/_scaled_float_dtype.c index 99d0c644cba3..fbdbbb8d2375 100644 --- a/numpy/_core/src/umath/_scaled_float_dtype.c +++ b/numpy/_core/src/umath/_scaled_float_dtype.c @@ -25,6 +25,7 @@ #include "dtypemeta.h" #include "dispatching.h" #include "gil_utils.h" +#include "multiarraymodule.h" typedef struct { PyArray_Descr base; diff --git a/numpy/_core/src/umath/extobj.c b/numpy/_core/src/umath/extobj.c index 0405b14a7b02..f9194acf6fd0 100644 --- a/numpy/_core/src/umath/extobj.c +++ b/numpy/_core/src/umath/extobj.c @@ -14,7 +14,6 @@ #include "extobj.h" #include "numpy/ufuncobject.h" -#include "ufunc_object.h" /* for npy_um_str_pyvals_name */ #include "common.h" diff --git a/numpy/_core/src/umath/funcs.inc.src b/numpy/_core/src/umath/funcs.inc.src index 131a678c7865..3825bd869468 100644 --- a/numpy/_core/src/umath/funcs.inc.src +++ b/numpy/_core/src/umath/funcs.inc.src @@ -9,6 +9,7 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "npy_import.h" +#include "npy_static_data.h" #include "multiarraymodule.h" /* diff --git a/numpy/_core/src/umath/override.c b/numpy/_core/src/umath/override.c index ad9c9a4c64f7..b7147a2aaad9 100644 --- a/numpy/_core/src/umath/override.c +++ b/numpy/_core/src/umath/override.c @@ -4,6 +4,7 @@ #include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" #include "npy_import.h" +#include "npy_static_data.h" #include "multiarraymodule.h" #include "npy_pycompat.h" #include "override.h" diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index aedb548d9591..48b4f5905cff 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -62,6 +62,8 @@ #include "legacy_array_method.h" #include "abstractdtypes.h" #include "mapping.h" +#include "npy_static_data.h" +#include "multiarraymodule.h" /* TODO: Only for `NpyIter_GetTransferFlags` until it is public */ #define NPY_ITERATOR_IMPLEMENTATION_CODE diff --git a/numpy/_core/src/umath/ufunc_object.h b/numpy/_core/src/umath/ufunc_object.h index 645023f66aa5..f8e522374394 100644 --- a/numpy/_core/src/umath/ufunc_object.h +++ b/numpy/_core/src/umath/ufunc_object.h @@ -10,9 +10,4 @@ ufunc_get_name_cstr(PyUFuncObject *ufunc); NPY_NO_EXPORT PyObject * PyUFunc_GetDefaultIdentity(PyUFuncObject *ufunc, npy_bool *reorderable); -/* strings from umathmodule.c that are interned on umath import */ -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_array_ufunc; -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_array_wrap; -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_pyvals_name; - #endif diff --git a/numpy/_core/src/umath/umathmodule.c b/numpy/_core/src/umath/umathmodule.c index f83e33ed24b8..5402b17c399a 100644 --- a/numpy/_core/src/umath/umathmodule.c +++ b/numpy/_core/src/umath/umathmodule.c @@ -209,29 +209,6 @@ add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args) ***************************************************************************** */ -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_array_ufunc = NULL; -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_array_wrap = NULL; -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_pyvals_name = NULL; - -/* intern some strings used in ufuncs, returns 0 on success */ -static int -intern_strings(void) -{ - npy_um_str_array_ufunc = PyUnicode_InternFromString("__array_ufunc__"); - if (npy_um_str_array_ufunc == NULL) { - return -1; - } - npy_um_str_array_wrap = PyUnicode_InternFromString("__array_wrap__"); - if (npy_um_str_array_wrap == NULL) { - return -1; - } - npy_um_str_pyvals_name = PyUnicode_InternFromString(UFUNC_PYVALS_NAME); - if (npy_um_str_pyvals_name == NULL) { - return -1; - } - return 0; -} - /* Setup the umath part of the module */ int initumath(PyObject *m) @@ -297,12 +274,6 @@ int initumath(PyObject *m) PyDict_SetItemString(d, "conj", s); PyDict_SetItemString(d, "mod", s2); - if (intern_strings() < 0) { - PyErr_SetString(PyExc_RuntimeError, - "cannot intern strings while initializing _multiarray_umath."); - return -1; - } - /* * Set up promoters for logical functions * TODO: This should probably be done at a better place, or even in the From 98ae65d599720c322403899056dba408583fb755 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 7 Jun 2024 15:45:20 -0600 Subject: [PATCH 610/980] MNT: Add more global state I missed to the thread_unsafe_state struct --- numpy/_core/src/multiarray/arrayobject.c | 6 +- numpy/_core/src/multiarray/arrayobject.h | 2 - numpy/_core/src/multiarray/descriptor.c | 2 +- numpy/_core/src/multiarray/multiarraymodule.c | 85 +++---------------- numpy/_core/src/multiarray/multiarraymodule.h | 16 ++++ numpy/_core/src/multiarray/scalartypes.c.src | 36 ++++---- 6 files changed, 48 insertions(+), 99 deletions(-) diff --git a/numpy/_core/src/multiarray/arrayobject.c b/numpy/_core/src/multiarray/arrayobject.c index 9c2a6b832288..15596f1f86a2 100644 --- a/numpy/_core/src/multiarray/arrayobject.c +++ b/numpy/_core/src/multiarray/arrayobject.c @@ -62,9 +62,7 @@ maintainer email: oliphant.travis@ieee.org #include "binop_override.h" #include "array_coercion.h" - - -NPY_NO_EXPORT npy_bool numpy_warn_if_no_mem_policy = 0; +#include "multiarraymodule.h" /*NUMPY_API Compute the size of an array (in number of items) @@ -429,7 +427,7 @@ array_dealloc(PyArrayObject *self) } } if (fa->mem_handler == NULL) { - if (numpy_warn_if_no_mem_policy) { + if (npy_thread_unsafe_state.warn_if_no_mem_policy) { char const *msg = "Trying to dealloc data, but a memory policy " "is not set. If you take ownership of the data, you must " "set a base owning the data (e.g. a PyCapsule)."; diff --git a/numpy/_core/src/multiarray/arrayobject.h b/numpy/_core/src/multiarray/arrayobject.h index 03e59c41ca92..8d6f84faa6b1 100644 --- a/numpy/_core/src/multiarray/arrayobject.h +++ b/numpy/_core/src/multiarray/arrayobject.h @@ -9,8 +9,6 @@ extern "C" { #endif -extern NPY_NO_EXPORT npy_bool numpy_warn_if_no_mem_policy; - NPY_NO_EXPORT PyObject * _strings_richcompare(PyArrayObject *self, PyArrayObject *other, int cmp_op, int rstrip); diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index c1288cd53902..b9d30c80a2f8 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -23,7 +23,7 @@ #include "templ_common.h" /* for npy_mul_sizes_with_overflow */ #include "descriptor.h" #include "npy_static_data.h" -#include "multiarraymodule.h" +#include "multiarraymodule.h" // for thread unsafe state access #include "alloc.h" #include "assert.h" #include "npy_buffer.h" diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index c6938ad10869..d9fca9c41114 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -98,24 +98,15 @@ NPY_NO_EXPORT PyObject * _umath_strings_richcompare( PyArrayObject *self, PyArrayObject *other, int cmp_op, int rstrip); -/* - * global variable to determine if legacy printing is enabled, accessible from - * C. For simplicity the mode is encoded as an integer where INT_MAX means no - * legacy mode, and '113'/'121' means 1.13/1.21 legacy mode; and 0 maps to - * INT_MAX. We can upgrade this if we have more complex requirements in the - * future. - */ -int npy_legacy_print_mode = INT_MAX; - static PyObject * set_legacy_print_mode(PyObject *NPY_UNUSED(self), PyObject *args) { - if (!PyArg_ParseTuple(args, "i", &npy_legacy_print_mode)) { + if (!PyArg_ParseTuple(args, "i", &npy_thread_unsafe_state.legacy_print_mode)) { return NULL; } - if (!npy_legacy_print_mode) { - npy_legacy_print_mode = INT_MAX; + if (!npy_thread_unsafe_state.legacy_print_mode) { + npy_thread_unsafe_state.legacy_print_mode = INT_MAX; } Py_RETURN_NONE; } @@ -4333,8 +4324,8 @@ _set_numpy_warn_if_no_mem_policy(PyObject *NPY_UNUSED(self), PyObject *arg) if (res < 0) { return NULL; } - int old_value = numpy_warn_if_no_mem_policy; - numpy_warn_if_no_mem_policy = res; + int old_value = npy_thread_unsafe_state.warn_if_no_mem_policy; + npy_thread_unsafe_state.warn_if_no_mem_policy = res; if (old_value) { Py_RETURN_TRUE; } @@ -4774,69 +4765,13 @@ static int initialize_thread_unsafe_state(void) { char *env = getenv("NUMPY_WARN_IF_NO_MEM_POLICY"); if ((env != NULL) && (strncmp(env, "1", 1) == 0)) { - numpy_warn_if_no_mem_policy = 1; + npy_thread_unsafe_state.warn_if_no_mem_policy = 1; } else { - numpy_warn_if_no_mem_policy = 0; - } - - // default_truediv_type_tup - PyArray_Descr *tmp = PyArray_DescrFromType(NPY_DOUBLE); - if (tmp == NULL) { - return -1; - } - - npy_static_pydata.default_truediv_type_tup = - PyTuple_Pack(3, tmp, tmp, tmp); - if (npy_static_pydata.default_truediv_type_tup == NULL) { - Py_DECREF(tmp); - return -1; - } - Py_DECREF(tmp); - - PyObject *flags = PySys_GetObject("flags"); /* borrowed object */ - if (flags == NULL) { - PyErr_SetString(PyExc_AttributeError, "cannot get sys.flags"); - return -1; - } - PyObject *level = PyObject_GetAttrString(flags, "optimize"); - if (level == NULL) { - return -1; - } - npy_static_cdata.optimize = PyLong_AsLong(level); - Py_DECREF(level); - - /* - * see unpack_bits for how this table is used. - * - * LUT for bigendian bitorder, littleendian is handled via - * byteswapping in the loop. - * - * 256 8 byte blocks representing 8 bits expanded to 1 or 0 bytes - */ - npy_intp j; - for (j=0; j < 256; j++) { - npy_intp k; - for (k=0; k < 8; k++) { - npy_uint8 v = (j & (1 << k)) == (1 << k); - npy_static_cdata.unpack_lookup_big[j].bytes[7 - k] = v; - } - } - - npy_static_pydata.kwnames_is_copy = Py_BuildValue("(s)", "copy"); - if (npy_static_pydata.kwnames_is_copy == NULL) { - return -1; + npy_thread_unsafe_state.warn_if_no_mem_policy = 0; } - npy_static_pydata.one_obj = PyLong_FromLong((long) 1); - if (npy_static_pydata.one_obj == NULL) { - return -1; - } - - npy_static_pydata.zero_obj = PyLong_FromLong((long) 0); - if (npy_static_pydata.zero_obj == NULL) { - return -1; - } + npy_thread_unsafe_state.legacy_print_mode = INT_MAX; return 0; } @@ -4903,6 +4838,10 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { goto err; } + if (initialize_thread_unsafe_state() < 0) { + goto err; + } + if (init_extobj() < 0) { goto err; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index f03c2640f811..218dc601613a 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -70,6 +70,22 @@ typedef struct npy_thread_unsafe_state_struct { * used to detect module reloading in the reload guard */ int reload_guard_initialized; + + /* + * global variable to determine if legacy printing is enabled, + * accessible from C. For simplicity the mode is encoded as an + * integer where INT_MAX means no legacy mode, and '113'/'121' + * means 1.13/1.21 legacy mode; and 0 maps to INT_MAX. We can + * upgrade this if we have more complex requirements in the future. + */ + int legacy_print_mode; + + /* + * Holds the user-defined setting for whether or not to warn + * if there is no memory policy set + */ + int warn_if_no_mem_policy; + } npy_thread_unsafe_state_struct; diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 15c792d1b5af..a0517c247215 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -33,15 +33,13 @@ #include "dragon4.h" #include "npy_longdouble.h" #include "npy_buffer.h" +#include "npy_static_data.h" #include "multiarraymodule.h" #include #include "binop_override.h" -/* determines if legacy mode is enabled, global set in multiarraymodule.c */ -extern int npy_legacy_print_mode; - /* * used for allocating a single scalar, so use the default numpy * memory allocators instead of the (maybe) user overrides @@ -338,7 +336,7 @@ genint_type_repr(PyObject *self) if (value_string == NULL) { return NULL; } - if (npy_legacy_print_mode <= 125) { + if (npy_thread_unsafe_state.legacy_print_mode <= 125) { return value_string; } @@ -375,7 +373,7 @@ genbool_type_str(PyObject *self) static PyObject * genbool_type_repr(PyObject *self) { - if (npy_legacy_print_mode <= 125) { + if (npy_thread_unsafe_state.legacy_print_mode <= 125) { return genbool_type_str(self); } return PyUnicode_FromString( @@ -501,7 +499,7 @@ stringtype_@form@(PyObject *self) if (ret == NULL) { return NULL; } - if (npy_legacy_print_mode > 125) { + if (npy_thread_unsafe_state.legacy_print_mode > 125) { Py_SETREF(ret, PyUnicode_FromFormat("np.bytes_(%S)", ret)); } #endif /* IS_repr */ @@ -548,7 +546,7 @@ unicodetype_@form@(PyObject *self) if (ret == NULL) { return NULL; } - if (npy_legacy_print_mode > 125) { + if (npy_thread_unsafe_state.legacy_print_mode > 125) { Py_SETREF(ret, PyUnicode_FromFormat("np.str_(%S)", ret)); } #endif /* IS_repr */ @@ -629,7 +627,7 @@ voidtype_repr(PyObject *self) /* Python helper checks for the legacy mode printing */ return _void_scalar_to_string(self, 1); } - if (npy_legacy_print_mode > 125) { + if (npy_thread_unsafe_state.legacy_print_mode > 125) { return _void_to_hex(s->obval, s->descr->elsize, "np.void(b'", "\\x", "')"); } else { @@ -681,7 +679,7 @@ datetimetype_repr(PyObject *self) */ if ((scal->obmeta.num == 1 && scal->obmeta.base != NPY_FR_h) || scal->obmeta.base == NPY_FR_GENERIC) { - if (npy_legacy_print_mode > 125) { + if (npy_thread_unsafe_state.legacy_print_mode > 125) { ret = PyUnicode_FromFormat("np.datetime64('%s')", iso); } else { @@ -693,7 +691,7 @@ datetimetype_repr(PyObject *self) if (meta == NULL) { return NULL; } - if (npy_legacy_print_mode > 125) { + if (npy_thread_unsafe_state.legacy_print_mode > 125) { ret = PyUnicode_FromFormat("np.datetime64('%s','%S')", iso, meta); } else { @@ -737,7 +735,7 @@ timedeltatype_repr(PyObject *self) /* The metadata unit */ if (scal->obmeta.base == NPY_FR_GENERIC) { - if (npy_legacy_print_mode > 125) { + if (npy_thread_unsafe_state.legacy_print_mode > 125) { ret = PyUnicode_FromFormat("np.timedelta64(%S)", val); } else { @@ -750,7 +748,7 @@ timedeltatype_repr(PyObject *self) Py_DECREF(val); return NULL; } - if (npy_legacy_print_mode > 125) { + if (npy_thread_unsafe_state.legacy_print_mode > 125) { ret = PyUnicode_FromFormat("np.timedelta64(%S,'%S')", val, meta); } else { @@ -1052,7 +1050,7 @@ static PyObject * npy_bool sign) { - if (npy_legacy_print_mode <= 113) { + if (npy_thread_unsafe_state.legacy_print_mode <= 113) { return legacy_@name@_format@kind@(val); } @@ -1083,7 +1081,7 @@ static PyObject * if (string == NULL) { return NULL; } - if (npy_legacy_print_mode > 125) { + if (npy_thread_unsafe_state.legacy_print_mode > 125) { Py_SETREF(string, PyUnicode_FromFormat("@repr_format@", string)); } #endif /* IS_repr */ @@ -1098,7 +1096,7 @@ c@name@type_@kind@(PyObject *self) npy_c@name@ val = PyArrayScalar_VAL(self, C@Name@); TrimMode trim = TrimMode_DptZeros; - if (npy_legacy_print_mode <= 113) { + if (npy_thread_unsafe_state.legacy_print_mode <= 113) { return legacy_c@name@_format@kind@(val); } @@ -1111,7 +1109,7 @@ c@name@type_@kind@(PyObject *self) #ifdef IS_str ret = PyUnicode_FromFormat("%Sj", istr); #else /* IS_repr */ - if (npy_legacy_print_mode <= 125) { + if (npy_thread_unsafe_state.legacy_print_mode <= 125) { ret = PyUnicode_FromFormat("%Sj", istr); } else { @@ -1159,7 +1157,7 @@ c@name@type_@kind@(PyObject *self) #ifdef IS_str string = PyUnicode_FromFormat("(%S%Sj)", rstr, istr); #else /* IS_repr */ - if (npy_legacy_print_mode > 125) { + if (npy_thread_unsafe_state.legacy_print_mode > 125) { string = PyUnicode_FromFormat("@crepr_format@", rstr, istr); } else { @@ -1184,7 +1182,7 @@ halftype_@kind@(PyObject *self) float floatval = npy_half_to_float(val); float absval; - if (npy_legacy_print_mode <= 113) { + if (npy_thread_unsafe_state.legacy_print_mode <= 113) { return legacy_float_format@kind@(floatval); } @@ -1200,7 +1198,7 @@ halftype_@kind@(PyObject *self) #ifdef IS_str return string; #else - if (string == NULL || npy_legacy_print_mode <= 125) { + if (string == NULL || npy_thread_unsafe_state.legacy_print_mode <= 125) { return string; } PyObject *res = PyUnicode_FromFormat("np.float16(%S)", string); From a334ddcdd172533d4c89e7c77a62dd6827916578 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 11 Jun 2024 13:58:18 -0600 Subject: [PATCH 611/980] MNT: verify all entries in npy_interned_str and npy_static_pydata are non-null --- numpy/_core/src/multiarray/multiarraymodule.c | 4 + numpy/_core/src/multiarray/npy_static_data.c | 213 +++++++++--------- numpy/_core/src/multiarray/npy_static_data.h | 5 + numpy/_core/src/umath/extobj.c | 16 -- 4 files changed, 112 insertions(+), 126 deletions(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index d9fca9c41114..2a0c12aaf80c 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -5088,6 +5088,10 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { goto err; } + if (verify_static_structs_initialized() < 0) { + goto err; + } + /* * Export the API tables */ diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index be25c05eb654..5496c89fb2d6 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -12,112 +12,55 @@ #include "numpy/arrayobject.h" #include "npy_import.h" #include "npy_static_data.h" +#include "extobj.h" // static variables are zero-filled by default, no need to explicitly do so NPY_VISIBILITY_HIDDEN npy_interned_str_struct npy_interned_str; NPY_VISIBILITY_HIDDEN npy_static_pydata_struct npy_static_pydata; NPY_VISIBILITY_HIDDEN npy_static_cdata_struct npy_static_cdata; +#define INTERN_STRING(struct_member, string) \ + assert(npy_interned_str.struct_member == NULL); \ + npy_interned_str.struct_member = PyUnicode_InternFromString(string); \ + if (npy_interned_str.struct_member == NULL) { \ + return -1; \ + } \ + NPY_NO_EXPORT int intern_strings(void) { - npy_interned_str.current_allocator = PyUnicode_InternFromString("current_allocator"); - if (npy_interned_str.current_allocator == NULL) { - return -1; - } - npy_interned_str.array = PyUnicode_InternFromString("__array__"); - if (npy_interned_str.array == NULL) { - return -1; - } - npy_interned_str.array_function = PyUnicode_InternFromString("__array_function__"); - if (npy_interned_str.array_function == NULL) { - return -1; - } - npy_interned_str.array_struct = PyUnicode_InternFromString("__array_struct__"); - if (npy_interned_str.array_struct == NULL) { - return -1; - } - npy_interned_str.array_priority = PyUnicode_InternFromString("__array_priority__"); - if (npy_interned_str.array_priority == NULL) { - return -1; - } - npy_interned_str.array_interface = PyUnicode_InternFromString("__array_interface__"); - if (npy_interned_str.array_interface == NULL) { - return -1; - } - npy_interned_str.array_ufunc = PyUnicode_InternFromString("__array_ufunc__"); - if (npy_interned_str.array_ufunc == NULL) { - return -1; - } - npy_interned_str.array_wrap = PyUnicode_InternFromString("__array_wrap__"); - if (npy_interned_str.array_wrap == NULL) { - return -1; - } - npy_interned_str.array_finalize = PyUnicode_InternFromString("__array_finalize__"); - if (npy_interned_str.array_finalize == NULL) { - return -1; - } - npy_interned_str.implementation = PyUnicode_InternFromString("_implementation"); - if (npy_interned_str.implementation == NULL) { - return -1; - } - npy_interned_str.axis1 = PyUnicode_InternFromString("axis1"); - if (npy_interned_str.axis1 == NULL) { - return -1; - } - npy_interned_str.axis2 = PyUnicode_InternFromString("axis2"); - if (npy_interned_str.axis2 == NULL) { - return -1; - } - npy_interned_str.like = PyUnicode_InternFromString("like"); - if (npy_interned_str.like == NULL) { - return -1; - } - npy_interned_str.numpy = PyUnicode_InternFromString("numpy"); - if (npy_interned_str.numpy == NULL) { - return -1; - } - npy_interned_str.where = PyUnicode_InternFromString("where"); - if (npy_interned_str.where == NULL) { - return -1; - } - npy_interned_str.convert = PyUnicode_InternFromString("convert"); - if (npy_interned_str.convert == NULL) { - return -1; - } - npy_interned_str.preserve = PyUnicode_InternFromString("preserve"); - if (npy_interned_str.preserve == NULL) { - return -1; - } - npy_interned_str.convert_if_no_array = PyUnicode_InternFromString("convert_if_no_array"); - if (npy_interned_str.convert_if_no_array == NULL) { - return -1; - } - npy_interned_str.cpu = PyUnicode_InternFromString("cpu"); - if (npy_interned_str.cpu == NULL) { - return -1; - } - npy_interned_str.dtype = PyUnicode_InternFromString("dtype"); - if (npy_interned_str.dtype == NULL) { - return -1; - } - npy_interned_str.array_err_msg_substr = PyUnicode_InternFromString( + INTERN_STRING(current_allocator, "current_allocator"); + INTERN_STRING(array, "__array__"); + INTERN_STRING(array_function, "__array_function__"); + INTERN_STRING(array_struct, "__array_struct__"); + INTERN_STRING(array_priority, "__array_priority__"); + INTERN_STRING(array_interface, "__array_interface__"); + INTERN_STRING(array_ufunc, "__array_ufunc__"); + INTERN_STRING(array_wrap, "__array_wrap__"); + INTERN_STRING(array_finalize, "__array_finalize__"); + INTERN_STRING(implementation, "_implementation"); + INTERN_STRING(axis1, "axis1"); + INTERN_STRING(axis2, "axis2"); + INTERN_STRING(like, "like"); + INTERN_STRING(numpy, "numpy"); + INTERN_STRING(where, "where"); + INTERN_STRING(convert, "convert"); + INTERN_STRING(preserve, "preserve"); + INTERN_STRING(convert_if_no_array, "convert_if_no_array"); + INTERN_STRING(cpu, "cpu"); + INTERN_STRING(dtype, "dtype"); + INTERN_STRING( + array_err_msg_substr, "__array__() got an unexpected keyword argument 'copy'"); - if (npy_interned_str.array_err_msg_substr == NULL) { - return -1; - } - npy_interned_str.out = PyUnicode_InternFromString("out"); - if (npy_interned_str.out == NULL) { - return -1; - } - npy_interned_str.__dlpack__ = PyUnicode_InternFromString("__dlpack__"); - if (npy_interned_str.__dlpack__ == NULL) { - return -1; - } - npy_interned_str.pyvals_name = PyUnicode_InternFromString("UFUNC_PYVALS_NAME"); - if (npy_interned_str.pyvals_name == NULL) { - return -1; - } + INTERN_STRING(out, "out"); + INTERN_STRING(errmode_strings[0], "ignore"); + INTERN_STRING(errmode_strings[1], "warn"); + INTERN_STRING(errmode_strings[2], "raise"); + INTERN_STRING(errmode_strings[3], "call"); + INTERN_STRING(errmode_strings[4], "print"); + INTERN_STRING(errmode_strings[5], "log"); + INTERN_STRING(__dlpack__, "__dlpack__"); + INTERN_STRING(pyvals_name, "UFUNC_PYVALS_NAME"); return 0; } @@ -149,7 +92,13 @@ intern_strings(void) NPY_NO_EXPORT int initialize_static_globals(void) { - // cached reference to objects defined in python + /* + * Initialize contents of npy_static_pydata struct + * + * This struct holds cached references to python objects + * that we want to keep alive for the lifetime of the + * module for performance reasons + */ IMPORT_GLOBAL("math", "floor", npy_static_pydata.math_floor_func); @@ -219,6 +168,32 @@ initialize_static_globals(void) } Py_DECREF(tmp); + npy_static_pydata.kwnames_is_copy = Py_BuildValue("(s)", "copy"); + if (npy_static_pydata.kwnames_is_copy == NULL) { + return -1; + } + + npy_static_pydata.one_obj = PyLong_FromLong((long) 1); + if (npy_static_pydata.one_obj == NULL) { + return -1; + } + + npy_static_pydata.zero_obj = PyLong_FromLong((long) 0); + if (npy_static_pydata.zero_obj == NULL) { + return -1; + } + + /* + * Initialize contents of npy_static_cdata struct + * + * Note that some entries are initialized elsewhere. Care + * must be taken to ensure all entries are initialized during + * module initialization and immutable thereafter. + * + * This struct holds global static caches. These are set + * up this way for performance reasons. + */ + PyObject *flags = PySys_GetObject("flags"); /* borrowed object */ if (flags == NULL) { PyErr_SetString(PyExc_AttributeError, "cannot get sys.flags"); @@ -248,22 +223,40 @@ initialize_static_globals(void) } } - npy_static_pydata.kwnames_is_copy = Py_BuildValue("(s)", "copy"); - if (npy_static_pydata.kwnames_is_copy == NULL) { - return -1; - } + return 0; +} +/* + * Verifies all entries in npy_interned_str and npy_static_pydata are + * non-NULL. + * + * Called at the end of initialization for _multiarray_umath. Some + * entries are initialized outside of this file because they depend on + * items that are initialized late in module initialization but they + * should all be initialized by the time this function is called. + */ - npy_static_pydata.one_obj = PyLong_FromLong((long) 1); - if (npy_static_pydata.one_obj == NULL) { - return -1; +NPY_NO_EXPORT int +verify_static_structs_initialized(void) { + // verify all entries in npy_interned_str are filled in + for (int i=0; i < (sizeof(npy_interned_str_struct)/sizeof(PyObject *)); i++) { + if (*(((PyObject **)&npy_interned_str) + i) == NULL) { + PyErr_Format( + PyExc_SystemError, + "NumPy internal error: NULL entry detected in " + "npy_interned_str at index %d", i); + return -1; + } } - npy_static_pydata.zero_obj = PyLong_FromLong((long) 0); - if (npy_static_pydata.zero_obj == NULL) { - return -1; + // verify all entries in npy_static_pydata are filled in + for (int i=0; i < (sizeof(npy_static_pydata_struct)/sizeof(PyObject *)); i++) { + if (*(((PyObject **)&npy_static_pydata) + i) == NULL) { + PyErr_Format( + PyExc_SystemError, + "NumPy internal error: NULL entry detected in " + "npy_static_pydata at index %d", i); + return -1; + } } - return 0; } - - diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index 311f6bc43f0e..64e8f29c5c7a 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -7,6 +7,9 @@ initialize_static_globals(void); NPY_NO_EXPORT int intern_strings(void); +NPY_NO_EXPORT int +verify_static_structs_initialized(void); + typedef struct npy_interned_str_struct { PyObject *current_allocator; PyObject *array; @@ -146,6 +149,8 @@ typedef struct npy_static_cdata_struct { * See the _MAX_LETTER and LETTER_TO_NUM macros in arraytypes.c.src. * * The smallest type number is ?, the largest is bounded by 'z'. + * + * This is initialized alongside the built-in dtypes */ npy_int16 _letter_to_num['z' + 1 - '?']; } npy_static_cdata_struct; diff --git a/numpy/_core/src/umath/extobj.c b/numpy/_core/src/umath/extobj.c index f9194acf6fd0..755d8665b11d 100644 --- a/numpy/_core/src/umath/extobj.c +++ b/numpy/_core/src/umath/extobj.c @@ -35,10 +35,6 @@ #define UFUNC_SHIFT_UNDERFLOW 6 #define UFUNC_SHIFT_INVALID 9 -/* The python strings for the above error modes defined in extobj.h */ -const char *errmode_cstrings[] = { - "ignore", "warn", "raise", "call", "print", "log"}; - /* Default user error mode (underflows are ignored, others warn) */ #define UFUNC_ERR_DEFAULT \ (UFUNC_ERR_WARN << UFUNC_SHIFT_DIVIDEBYZERO) + \ @@ -144,18 +140,6 @@ fetch_curr_extobj_state(npy_extobj *extobj) NPY_NO_EXPORT int init_extobj(void) { - /* - * First initialize the string constants we need to parse `errstate()` - * inputs. - */ - for (int i = 0; i <= UFUNC_ERR_LOG; i++) { - npy_interned_str.errmode_strings[i] = PyUnicode_InternFromString( - errmode_cstrings[i]); - if (npy_interned_str.errmode_strings[i] == NULL) { - return -1; - } - } - npy_static_pydata.default_extobj_capsule = make_extobj_capsule( NPY_BUFSIZE, UFUNC_ERR_DEFAULT, Py_None); if (npy_static_pydata.default_extobj_capsule == NULL) { From 9ed317f202aea1dca17378f0525e7c5b5b40d787 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 13 Jun 2024 11:38:39 -0600 Subject: [PATCH 612/980] Apply suggestions from code review Co-authored-by: Sebastian Berg --- numpy/_core/src/multiarray/npy_static_data.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index 5496c89fb2d6..e2f643555f14 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -156,17 +156,12 @@ initialize_static_globals(void) // default_truediv_type_tupS PyArray_Descr *tmp = PyArray_DescrFromType(NPY_DOUBLE); - if (tmp == NULL) { - return -1; - } - npy_static_pydata.default_truediv_type_tup = PyTuple_Pack(3, tmp, tmp, tmp); + Py_DECREF(tmp); if (npy_static_pydata.default_truediv_type_tup == NULL) { - Py_DECREF(tmp); return -1; } - Py_DECREF(tmp); npy_static_pydata.kwnames_is_copy = Py_BuildValue("(s)", "copy"); if (npy_static_pydata.kwnames_is_copy == NULL) { @@ -225,6 +220,8 @@ initialize_static_globals(void) return 0; } + + /* * Verifies all entries in npy_interned_str and npy_static_pydata are * non-NULL. @@ -234,7 +231,6 @@ initialize_static_globals(void) * items that are initialized late in module initialization but they * should all be initialized by the time this function is called. */ - NPY_NO_EXPORT int verify_static_structs_initialized(void) { // verify all entries in npy_interned_str are filled in From 3ae66b1909e96afdd9e7221c1684e42a00d4a9e4 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 13 Jun 2024 11:39:27 -0600 Subject: [PATCH 613/980] MAINT: apply more of Sebastian's suggestions --- numpy/_core/src/multiarray/multiarraymodule.c | 18 ++++++++++++++++++ numpy/_core/src/multiarray/npy_static_data.c | 2 +- numpy/_core/src/multiarray/npy_static_data.h | 5 +++++ numpy/_core/src/umath/override.c | 15 +++++++-------- numpy/_core/src/umath/ufunc_object.c | 12 +++--------- 5 files changed, 34 insertions(+), 18 deletions(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 2a0c12aaf80c..c099b63f4633 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -5044,10 +5044,19 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { // initialize static references to ndarray.__array_*__ special methods npy_static_pydata.ndarray_array_finalize = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_finalize__"); + if (npy_static_pydata.ndarray_array_finalize == NULL) { + goto err; + } npy_static_pydata.ndarray_array_ufunc = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_ufunc__"); + if (npy_static_pydata.ndarray_array_ufunc == NULL) { + goto err; + } npy_static_pydata.ndarray_array_function = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_function__"); + if (npy_static_pydata.ndarray_array_function == NULL) { + goto err; + } /* * Initialize np.dtypes.StringDType @@ -5088,6 +5097,15 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { goto err; } + // initialize static reference to a zero-like array + npy_static_pydata.zero_pyint_like_arr = PyArray_ZEROS( + 0, NULL, NPY_LONG, NPY_FALSE); + if (npy_static_pydata.zero_pyint_like_arr == NULL) { + goto err; + } + ((PyArrayObject_fields *)npy_static_pydata.zero_pyint_like_arr)->flags |= + (NPY_ARRAY_WAS_PYTHON_INT|NPY_ARRAY_WAS_INT_AND_REPLACED); + if (verify_static_structs_initialized() < 0) { goto err; } diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index e2f643555f14..7f5e58dde21a 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -154,7 +154,7 @@ initialize_static_globals(void) IMPORT_GLOBAL("os", "PathLike", npy_static_pydata.os_PathLike); - // default_truediv_type_tupS + // default_truediv_type_tup PyArray_Descr *tmp = PyArray_DescrFromType(NPY_DOUBLE); npy_static_pydata.default_truediv_type_tup = PyTuple_Pack(3, tmp, tmp, tmp); diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index 64e8f29c5c7a..c4d3ef4cdfee 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -79,6 +79,11 @@ typedef struct npy_static_pydata_struct { PyObject *one_obj; PyObject *zero_obj; + /* + * Reference to an np.array(0, dtype=np.long) instance + */ + PyObject *zero_pyint_like_arr; + /* * References to items obtained via an import at module initialization */ diff --git a/numpy/_core/src/umath/override.c b/numpy/_core/src/umath/override.c index b7147a2aaad9..55cca0857229 100644 --- a/numpy/_core/src/umath/override.c +++ b/numpy/_core/src/umath/override.c @@ -373,14 +373,13 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, "numpy._core._internal", "array_ufunc_errmsg_formatter", &npy_thread_unsafe_state.array_ufunc_errmsg_formatter); - if (npy_thread_unsafe_state.array_ufunc_errmsg_formatter != NULL) { - errmsg = PyObject_Call( - npy_thread_unsafe_state.array_ufunc_errmsg_formatter, - override_args, normal_kwds); - if (errmsg != NULL) { - PyErr_SetObject(PyExc_TypeError, errmsg); - Py_DECREF(errmsg); - } + assert(npy_thread_unsafe_state.array_ufunc_errmsg_formatter != NULL); + errmsg = PyObject_Call( + npy_thread_unsafe_state.array_ufunc_errmsg_formatter, + override_args, normal_kwds); + if (errmsg != NULL) { + PyErr_SetObject(PyExc_TypeError, errmsg); + Py_DECREF(errmsg); } Py_DECREF(override_args); goto fail; diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 48b4f5905cff..f6ad33f68016 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -700,15 +700,9 @@ convert_ufunc_arguments(PyUFuncObject *ufunc, * TODO: Just like the general dual NEP 50/legacy promotion * support this is meant as a temporary hack for NumPy 1.25. */ - PyArrayObject *zero_arr = (PyArrayObject *)PyArray_ZEROS( - 0, NULL, NPY_LONG, NPY_FALSE); - if (zero_arr == NULL) { - goto fail; - } - ((PyArrayObject_fields *)zero_arr)->flags |= ( - NPY_ARRAY_WAS_PYTHON_INT|NPY_ARRAY_WAS_INT_AND_REPLACED); - Py_INCREF(zero_arr); - Py_SETREF(out_op[i], zero_arr); + Py_INCREF(npy_static_pydata.zero_pyint_like_arr); + Py_SETREF(out_op[i], + (PyArrayObject *)npy_static_pydata.zero_pyint_like_arr); } *promoting_pyscalars = NPY_TRUE; } From 3a4e776fd107a2803f34b654c48898b052ace923 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 18 Jun 2024 15:15:40 -0600 Subject: [PATCH 614/980] BUG: lock umath_linalg lapack calls when no BLAS is detected --- numpy/_core/config.h.in | 2 + numpy/_core/meson.build | 4 ++ numpy/linalg/umath_linalg.cpp | 76 ++++++++++++++++++++++++++++++++++- 3 files changed, 81 insertions(+), 1 deletion(-) diff --git a/numpy/_core/config.h.in b/numpy/_core/config.h.in index e58609b7f073..7625615270a2 100644 --- a/numpy/_core/config.h.in +++ b/numpy/_core/config.h.in @@ -108,6 +108,8 @@ #mesondefine HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE #mesondefine HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE +#mesondefine HAVE_EXTERNAL_LAPACK + #ifndef __cplusplus /* #undef inline */ #endif diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index dbe76e0a3dea..5143dca5f355 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -506,6 +506,10 @@ if cc.has_function_attribute('visibility:hidden') and host_machine.system() != ' endif cdata.set('NPY_VISIBILITY_HIDDEN', visibility_hidden) +# if not set, we're using lapack_lite +if have_lapack + cdata.set10('HAVE_EXTERNAL_LAPACK', have_blas) +endif config_h = configure_file( input: 'config.h.in', diff --git a/numpy/linalg/umath_linalg.cpp b/numpy/linalg/umath_linalg.cpp index 086e1c00bf25..1cd5a005f89d 100644 --- a/numpy/linalg/umath_linalg.cpp +++ b/numpy/linalg/umath_linalg.cpp @@ -27,6 +27,9 @@ static const char* umath_linalg_version_string = "0.1.5"; +// global lock to serialize calls into lapack_lite +static PyThread_type_lock lapack_lite_lock; + /* **************************************************************************** * Debugging support * @@ -400,6 +403,13 @@ FNAME(zgemm)(char *transa, char *transb, #define LAPACK(FUNC) \ FNAME(FUNC) +#ifdef HAVE_EXTERNAL_LAPACK + #define LOCK_LAPACK_LITE + #define UNLOCK_LAPACK_LITE +#else + #define LOCK_LAPACK_LITE PyThread_acquire_lock(lapack_lite_lock, WAIT_LOCK) + #define UNLOCK_LAPACK_LITE PyThread_release_lock(lapack_lite_lock) +#endif /* ***************************************************************************** @@ -1110,7 +1120,9 @@ using ftyp = fortran_type_t; fortran_int lda = fortran_int_max(m, 1); int i; /* note: done in place */ + LOCK_LAPACK_LITE; getrf(&m, &m, (ftyp*)src, &lda, pivots, &info); + UNLOCK_LAPACK_LITE; if (info == 0) { int change_sign = 0; @@ -1262,22 +1274,26 @@ static inline fortran_int call_evd(EIGH_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(ssyevd)(¶ms->JOBZ, ¶ms->UPLO, ¶ms->N, params->A, ¶ms->LDA, params->W, params->WORK, ¶ms->LWORK, params->IWORK, ¶ms->LIWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } static inline fortran_int call_evd(EIGH_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dsyevd)(¶ms->JOBZ, ¶ms->UPLO, ¶ms->N, params->A, ¶ms->LDA, params->W, params->WORK, ¶ms->LWORK, params->IWORK, ¶ms->LIWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1364,12 +1380,14 @@ static inline fortran_int call_evd(EIGH_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(cheevd)(¶ms->JOBZ, ¶ms->UPLO, ¶ms->N, (fortran_type_t*)params->A, ¶ms->LDA, params->W, (fortran_type_t*)params->WORK, ¶ms->LWORK, params->RWORK, ¶ms->LRWORK, params->IWORK, ¶ms->LIWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1377,12 +1395,14 @@ static inline fortran_int call_evd(EIGH_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zheevd)(¶ms->JOBZ, ¶ms->UPLO, ¶ms->N, (fortran_type_t*)params->A, ¶ms->LDA, params->W, (fortran_type_t*)params->WORK, ¶ms->LWORK, params->RWORK, ¶ms->LRWORK, params->IWORK, ¶ms->LIWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1616,11 +1636,13 @@ static inline fortran_int call_gesv(GESV_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(sgesv)(¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->IPIV, params->B, ¶ms->LDB, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1628,11 +1650,13 @@ static inline fortran_int call_gesv(GESV_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dgesv)(¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->IPIV, params->B, ¶ms->LDB, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1640,11 +1664,13 @@ static inline fortran_int call_gesv(GESV_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(cgesv)(¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->IPIV, params->B, ¶ms->LDB, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1652,11 +1678,13 @@ static inline fortran_int call_gesv(GESV_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zgesv)(¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->IPIV, params->B, ¶ms->LDB, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1870,9 +1898,11 @@ static inline fortran_int call_potrf(POTR_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(spotrf)(¶ms->UPLO, ¶ms->N, params->A, ¶ms->LDA, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1880,9 +1910,11 @@ static inline fortran_int call_potrf(POTR_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dpotrf)(¶ms->UPLO, ¶ms->N, params->A, ¶ms->LDA, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1890,9 +1922,11 @@ static inline fortran_int call_potrf(POTR_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(cpotrf)(¶ms->UPLO, ¶ms->N, params->A, ¶ms->LDA, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1900,9 +1934,11 @@ static inline fortran_int call_potrf(POTR_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zpotrf)(¶ms->UPLO, ¶ms->N, params->A, ¶ms->LDA, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -2073,6 +2109,7 @@ static inline fortran_int call_geev(GEEV_PARAMS_t* params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(sgeev)(¶ms->JOBVL, ¶ms->JOBVR, ¶ms->N, params->A, ¶ms->LDA, params->WR, params->WI, @@ -2080,6 +2117,7 @@ call_geev(GEEV_PARAMS_t* params) params->VRR, ¶ms->LDVR, params->WORK, ¶ms->LWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -2087,6 +2125,7 @@ static inline fortran_int call_geev(GEEV_PARAMS_t* params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dgeev)(¶ms->JOBVL, ¶ms->JOBVR, ¶ms->N, params->A, ¶ms->LDA, params->WR, params->WI, @@ -2094,6 +2133,7 @@ call_geev(GEEV_PARAMS_t* params) params->VRR, ¶ms->LDVR, params->WORK, ¶ms->LWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -2285,6 +2325,7 @@ call_geev(GEEV_PARAMS_t* params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(cgeev)(¶ms->JOBVL, ¶ms->JOBVR, ¶ms->N, params->A, ¶ms->LDA, params->W, @@ -2293,6 +2334,7 @@ call_geev(GEEV_PARAMS_t* params) params->WORK, ¶ms->LWORK, params->WR, /* actually RWORK */ &rv); + UNLOCK_LAPACK_LITE; return rv; } #endif @@ -2302,6 +2344,7 @@ call_geev(GEEV_PARAMS_t* params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zgeev)(¶ms->JOBVL, ¶ms->JOBVR, ¶ms->N, params->A, ¶ms->LDA, params->W, @@ -2310,6 +2353,7 @@ call_geev(GEEV_PARAMS_t* params) params->WORK, ¶ms->LWORK, params->WR, /* actually RWORK */ &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -2632,6 +2676,7 @@ static inline fortran_int call_gesdd(GESDD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(sgesdd)(¶ms->JOBZ, ¶ms->M, ¶ms->N, params->A, ¶ms->LDA, params->S, @@ -2640,12 +2685,14 @@ call_gesdd(GESDD_PARAMS_t *params) params->WORK, ¶ms->LWORK, (fortran_int*)params->IWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } static inline fortran_int call_gesdd(GESDD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dgesdd)(¶ms->JOBZ, ¶ms->M, ¶ms->N, params->A, ¶ms->LDA, params->S, @@ -2654,6 +2701,7 @@ call_gesdd(GESDD_PARAMS_t *params) params->WORK, ¶ms->LWORK, (fortran_int*)params->IWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -2760,6 +2808,7 @@ static inline fortran_int call_gesdd(GESDD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(cgesdd)(¶ms->JOBZ, ¶ms->M, ¶ms->N, params->A, ¶ms->LDA, params->S, @@ -2769,12 +2818,14 @@ call_gesdd(GESDD_PARAMS_t *params) params->RWORK, params->IWORK, &rv); + LOCK_LAPACK_LITE; return rv; } static inline fortran_int call_gesdd(GESDD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zgesdd)(¶ms->JOBZ, ¶ms->M, ¶ms->N, params->A, ¶ms->LDA, params->S, @@ -2784,6 +2835,7 @@ call_gesdd(GESDD_PARAMS_t *params) params->RWORK, params->IWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -3074,22 +3126,26 @@ static inline fortran_int call_geqrf(GEQRF_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dgeqrf)(¶ms->M, ¶ms->N, params->A, ¶ms->LDA, params->TAU, params->WORK, ¶ms->LWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } static inline fortran_int call_geqrf(GEQRF_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zgeqrf)(¶ms->M, ¶ms->N, params->A, ¶ms->LDA, params->TAU, params->WORK, ¶ms->LWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -3316,22 +3372,26 @@ static inline fortran_int call_gqr(GQR_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dorgqr)(¶ms->M, ¶ms->MC, ¶ms->MN, params->Q, ¶ms->LDA, params->TAU, params->WORK, ¶ms->LWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } static inline fortran_int call_gqr(GQR_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zungqr)(¶ms->M, ¶ms->MC, ¶ms->MN, params->Q, ¶ms->LDA, params->TAU, params->WORK, ¶ms->LWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -3712,6 +3772,7 @@ static inline fortran_int call_gelsd(GELSD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(sgelsd)(¶ms->M, ¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->B, ¶ms->LDB, @@ -3720,6 +3781,7 @@ call_gelsd(GELSD_PARAMS_t *params) params->WORK, ¶ms->LWORK, params->IWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -3728,6 +3790,7 @@ static inline fortran_int call_gelsd(GELSD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dgelsd)(¶ms->M, ¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->B, ¶ms->LDB, @@ -3736,6 +3799,7 @@ call_gelsd(GELSD_PARAMS_t *params) params->WORK, ¶ms->LWORK, params->IWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -3839,6 +3903,7 @@ static inline fortran_int call_gelsd(GELSD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(cgelsd)(¶ms->M, ¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->B, ¶ms->LDB, @@ -3847,6 +3912,7 @@ call_gelsd(GELSD_PARAMS_t *params) params->WORK, ¶ms->LWORK, params->RWORK, (fortran_int*)params->IWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -3854,6 +3920,7 @@ static inline fortran_int call_gelsd(GELSD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zgelsd)(¶ms->M, ¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->B, ¶ms->LDB, @@ -3862,6 +3929,7 @@ call_gelsd(GELSD_PARAMS_t *params) params->WORK, ¶ms->LWORK, params->RWORK, (fortran_int*)params->IWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -4567,7 +4635,7 @@ addUfuncs(PyObject *dictionary) { /* -------------------------------------------------------------------------- */ - /* Module initialization stuff */ + /* Module initialization and state */ static PyMethodDef UMath_LinAlgMethods[] = { {NULL, NULL, 0, NULL} /* Sentinel */ @@ -4619,6 +4687,12 @@ PyMODINIT_FUNC PyInit__umath_linalg(void) return NULL; } + lapack_lite_lock = PyThread_allocate_lock(); + if (lapack_lite_lock == NULL) { + PyErr_NoMemory(); + return NULL; + } + #ifdef HAVE_BLAS_ILP64 PyDict_SetItemString(d, "_ilp64", Py_True); #else From 79bd9cc07692297515a4a99d105a21870d4f5c1a Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 19 Jun 2024 14:44:33 -0600 Subject: [PATCH 615/980] TST: add a test for lapack_lite thread safety --- numpy/_core/tests/test_multithreading.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 1511cfaf1982..ca8606ca6e88 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -71,3 +71,16 @@ def func(count): (alarge + blarge) - 2 run_threaded(func, 100, pass_count=True) + + +def test_eigvalsh_thread_safety(): + # if lapack isn't thread safe this will randomly segfault or error + # see gh-24512 + rng = np.random.RandomState(873699172) + matrices = ( + rng.random((5, 10, 10, 3, 3)), + rng.random((5, 10, 10, 3, 3)), + ) + + run_threaded(lambda i: np.linalg.eigvalsh(matrices[i]), 2, + pass_count=True) From 86c571147ca28a0aad3681aeba873c42f20656c4 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 19 Jun 2024 15:15:03 -0600 Subject: [PATCH 616/980] MAINT: add a release note --- doc/release/upcoming_changes/26750.improvement.rst | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 doc/release/upcoming_changes/26750.improvement.rst diff --git a/doc/release/upcoming_changes/26750.improvement.rst b/doc/release/upcoming_changes/26750.improvement.rst new file mode 100644 index 000000000000..c7beb85f8fbf --- /dev/null +++ b/doc/release/upcoming_changes/26750.improvement.rst @@ -0,0 +1,12 @@ +`lapack_lite` is thread safe +---------------------------- + +NumPy provides a minimal CPU-only version of LAPACK named ``lapack_lite`` that can +be used if no BLAS/LAPACK system is detected at build time. + +Until now, ``lapack_lite`` was not thread safe. Single-threaded use cases did +not hit any issues, but running linear algebra operations in multiple threads +could lead to errors, incorrect results, or seg faults due to data races. + +We have added a global lock, serializing access to ``lapack_lite`` in multiple +threads. From 63d6f65ee9b1b8132290e8ef445b96a99170bd48 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Mon, 5 Feb 2024 13:57:59 -0800 Subject: [PATCH 617/980] Use extern C in loops.h.src file for cpp files --- numpy/_core/src/umath/loops.h.src | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/umath/loops.h.src b/numpy/_core/src/umath/loops.h.src index 55db18de4474..3cb689818ec8 100644 --- a/numpy/_core/src/umath/loops.h.src +++ b/numpy/_core/src/umath/loops.h.src @@ -10,6 +10,10 @@ #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN #endif +#ifdef __cplusplus +extern "C" { +#endif + /* ***************************************************************************** ** BOOLEAN LOOPS ** @@ -875,5 +879,7 @@ PyUFunc_OOO_O(char **args, npy_intp const *dimensions, npy_intp const *steps, vo ** END LOOPS ** ***************************************************************************** */ - +#ifdef __cplusplus +} +#endif #endif From 1fe89480f2024e61d63eb5c826e82a6929f4cb39 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Mon, 5 Feb 2024 13:58:31 -0800 Subject: [PATCH 618/980] ENH: Rewrite simd sin/cos f32 using Highway instrinsics --- numpy/_core/meson.build | 7 +- .../umath/loops_trigonometric.dispatch.c.src | 457 ------------------ .../umath/loops_trigonometric.dispatch.cpp | 300 ++++++++++++ 3 files changed, 304 insertions(+), 460 deletions(-) delete mode 100644 numpy/_core/src/umath/loops_trigonometric.dispatch.c.src create mode 100644 numpy/_core/src/umath/loops_trigonometric.dispatch.cpp diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index aba0b8212d1e..cfb4bd64ce92 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -944,9 +944,9 @@ foreach gen_mtargets : [ ], [ 'loops_trigonometric.dispatch.h', - src_file.process('src/umath/loops_trigonometric.dispatch.c.src'), + 'src/umath/loops_trigonometric.dispatch.cpp', [ - AVX512F, [AVX2, FMA3], + AVX512_SKX, [AVX2, FMA3], VSX4, VSX3, VSX2, NEON_VFPV4, VXE2, VXE @@ -1020,7 +1020,8 @@ foreach gen_mtargets : [ 'src/common', 'src/multiarray', 'src/npymath', - 'src/umath' + 'src/umath', + 'src/highway', ] ) if not is_variable('multiarray_umath_mtargets') diff --git a/numpy/_core/src/umath/loops_trigonometric.dispatch.c.src b/numpy/_core/src/umath/loops_trigonometric.dispatch.c.src deleted file mode 100644 index 31de906098e3..000000000000 --- a/numpy/_core/src/umath/loops_trigonometric.dispatch.c.src +++ /dev/null @@ -1,457 +0,0 @@ -/*@targets - ** $maxopt baseline - ** (avx2 fma3) avx512f - ** vsx2 vsx3 vsx4 - ** neon_vfpv4 - ** vxe vxe2 - **/ -#include "numpy/npy_math.h" -#include "simd/simd.h" -#include "loops_utils.h" -#include "loops.h" -#include "fast_loop_macros.h" -/* - * TODO: - * - use vectorized version of Payne-Hanek style reduction for large elements or - * when there's no native FUSED support instead of fallback to libc - */ -#if NPY_SIMD_FMA3 // native support -/**begin repeat - * #check = F64, F32# - * #sfx = f64, f32# - * #enable = 0, 1# - */ -#if NPY_SIMD_@check@ && @enable@ -/* - * Vectorized Cody-Waite range reduction technique - * Performs the reduction step x* = x - y*C in three steps: - * 1) x* = x - y*c1 - * 2) x* = x - y*c2 - * 3) x* = x - y*c3 - * c1, c2 are exact floating points, c3 = C - c1 - c2 simulates higher precision - */ -NPY_FINLINE npyv_@sfx@ -simd_range_reduction_@sfx@(npyv_@sfx@ x, npyv_@sfx@ y, npyv_@sfx@ c1, npyv_@sfx@ c2, npyv_@sfx@ c3) -{ - npyv_@sfx@ reduced_x = npyv_muladd_@sfx@(y, c1, x); - reduced_x = npyv_muladd_@sfx@(y, c2, reduced_x); - reduced_x = npyv_muladd_@sfx@(y, c3, reduced_x); - return reduced_x; -} -#endif -/**end repeat**/ -/* Disable SIMD code and revert to libm: see - * https://mail.python.org/archives/list/numpy-discussion@python.org/thread/C6EYZZSR4EWGVKHAZXLE7IBILRMNVK7L/ - * for detailed discussion on this*/ -#if 0 // NPY_SIMD_F64 -/**begin repeat - * #op = cos, sin# - */ -#if defined(NPY_OS_WIN32) || defined(NPY_OS_CYGWIN) -NPY_FINLINE npyv_f64 -#else -NPY_NOINLINE npyv_f64 -#endif -simd_@op@_scalar_f64(npyv_f64 out, npy_uint64 cmp_bits) -{ - // MSVC doesn't compile with direct vector access, so we copy it here - // as we have no npyv_get_lane/npyv_set_lane intrinsics - npy_double NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) out_copy[npyv_nlanes_f64]; - npyv_storea_f64(out_copy, out); - - for (unsigned i = 0; i < npyv_nlanes_f64; ++i) { - if (cmp_bits & (1 << i)) { - out_copy[i] = npy_@op@(out_copy[i]); - } - } - - return npyv_loada_f64(out_copy); -} -/**end repeat**/ - -/* - * Approximate sine algorithm for x \in [-pi/2, pi/2] - * worst-case error is 3.5 ulp. - * abs error: 0x1.be222a58p-53 in [-pi/2, pi/2]. - */ -NPY_FINLINE npyv_f64 -simd_approx_sine_poly_f64(npyv_f64 r) -{ - const npyv_f64 poly1 = npyv_setall_f64(-0x1.9f4a9c8b21dc9p-41); - const npyv_f64 poly2 = npyv_setall_f64(0x1.60e88a10163f2p-33); - const npyv_f64 poly3 = npyv_setall_f64(-0x1.ae6361b7254e7p-26); - const npyv_f64 poly4 = npyv_setall_f64(0x1.71de382e8d62bp-19); - const npyv_f64 poly5 = npyv_setall_f64(-0x1.a01a019aeb4ffp-13); - const npyv_f64 poly6 = npyv_setall_f64(0x1.111111110b25ep-7); - const npyv_f64 poly7 = npyv_setall_f64(-0x1.55555555554c3p-3); - - npyv_f64 r2 = npyv_mul_f64(r, r); - npyv_f64 y = npyv_muladd_f64(poly1, r2, poly2); - y = npyv_muladd_f64(y, r2, poly3); - y = npyv_muladd_f64(y, r2, poly4); - y = npyv_muladd_f64(y, r2, poly5); - y = npyv_muladd_f64(y, r2, poly6); - y = npyv_muladd_f64(y, r2, poly7); - y = npyv_muladd_f64(npyv_mul_f64(y, r2), r, r); - - return y; -} - -/* r = |x| - n*pi (range reduction into -pi/2 .. pi/2). */ -NPY_FINLINE npyv_f64 -simd_range_reduction_pi2(npyv_f64 r, npyv_f64 n) { - const npyv_f64 pi1 = npyv_setall_f64(-0x1.921fb54442d18p+1); - const npyv_f64 pi2 = npyv_setall_f64(-0x1.1a62633145c06p-53); - const npyv_f64 pi3 = npyv_setall_f64(-0x1.c1cd129024e09p-106); - - return simd_range_reduction_f64(r, n, pi1, pi2, pi3); -} - -NPY_FINLINE npyv_b64 simd_sin_range_check_f64(npyv_u64 ir) { - const npyv_u64 tiny_bound = npyv_setall_u64(0x202); /* top12 (asuint64 (0x1p-509)). */ - const npyv_u64 simd_thresh = npyv_setall_u64(0x214); /* top12 (asuint64 (RangeVal)) - SIMD_TINY_BOUND. */ - - return npyv_cmpge_u64(npyv_sub_u64(npyv_shri_u64(ir, 52), tiny_bound), simd_thresh); -} - -NPY_FINLINE npyv_b64 simd_cos_range_check_f64(npyv_u64 ir) { - const npyv_f64 range_val = npyv_setall_f64(0x1p23); - - return npyv_cmpge_u64(ir, npyv_reinterpret_u64_f64(range_val)); -} - -NPY_FINLINE npyv_f64 -simd_cos_poly_f64(npyv_f64 r, npyv_u64 ir, npyv_u64 sign) -{ - const npyv_f64 inv_pi = npyv_setall_f64(0x1.45f306dc9c883p-2); - const npyv_f64 half_pi = npyv_setall_f64(0x1.921fb54442d18p+0); - const npyv_f64 shift = npyv_setall_f64(0x1.8p52); - - /* n = rint((|x|+pi/2)/pi) - 0.5. */ - npyv_f64 n = npyv_muladd_f64(inv_pi, npyv_add_f64(r, half_pi), shift); - npyv_u64 odd = npyv_shli_u64(npyv_reinterpret_u64_f64(n), 63); - n = npyv_sub_f64(n, shift); - n = npyv_sub_f64(n, npyv_setall_f64(0.5)); - - /* r = |x| - n*pi (range reduction into -pi/2 .. pi/2). */ - r = simd_range_reduction_pi2(r, n); - - /* sin(r) poly approx. */ - npyv_f64 y = simd_approx_sine_poly_f64(r); - - /* sign. */ - return npyv_reinterpret_f64_u64(npyv_xor_u64(npyv_reinterpret_u64_f64(y), odd)); -} - -NPY_FINLINE npyv_f64 -simd_sin_poly_f64(npyv_f64 r, npyv_u64 ir, npyv_u64 sign) -{ - const npyv_f64 inv_pi = npyv_setall_f64(0x1.45f306dc9c883p-2); - const npyv_f64 shift = npyv_setall_f64(0x1.8p52); - - /* n = rint(|x|/pi). */ - npyv_f64 n = npyv_muladd_f64(inv_pi, r, shift); - npyv_u64 odd = npyv_shli_u64(npyv_reinterpret_u64_f64(n), 63); - n = npyv_sub_f64(n, shift); - - /* r = |x| - n*pi (range reduction into -pi/2 .. pi/2). */ - r = simd_range_reduction_pi2(r, n); - - /* sin(r) poly approx. */ - npyv_f64 y = simd_approx_sine_poly_f64(r); - - /* sign. */ - return npyv_reinterpret_f64_u64(npyv_xor_u64(npyv_xor_u64(npyv_reinterpret_u64_f64(y), sign), odd)); -} - -/**begin repeat - * #op = cos, sin# - */ -NPY_FINLINE void -simd_@op@_f64(const double *src, npy_intp ssrc, double *dst, npy_intp sdst, npy_intp len) -{ - const npyv_u64 abs_mask = npyv_setall_u64(0x7fffffffffffffff); - const int vstep = npyv_nlanes_f64; - - npyv_f64 out = npyv_zero_f64(); - npyv_f64 x_in; - - for (; len > 0; len -= vstep, src += ssrc*vstep, dst += sdst*vstep) { - if (ssrc == 1) { - x_in = npyv_load_tillz_f64(src, len); - } else { - x_in = npyv_loadn_tillz_f64(src, ssrc, len); - } - - npyv_u64 ir = npyv_and_u64(npyv_reinterpret_u64_f64(x_in), abs_mask); - npyv_f64 r = npyv_reinterpret_f64_u64(ir); - npyv_u64 sign = npyv_and_u64(npyv_reinterpret_u64_f64(x_in), npyv_not_u64(abs_mask)); - - npyv_b64 cmp = simd_@op@_range_check_f64(ir); - /* If fenv exceptions are to be triggered correctly, set any special lanes - to 1 (which is neutral w.r.t. fenv). These lanes will be fixed by - scalar loop later. */ - r = npyv_select_f64(cmp, npyv_setall_f64(1.0), r); - - // Some in range, at least one calculation is useful - if (!npyv_all_b64(cmp)) { - out = simd_@op@_poly_f64(r, ir, sign); - } - - if (npyv_any_b64(cmp)) { - out = npyv_select_f64(cmp, x_in, out); - out = simd_@op@_scalar_f64(out, npyv_tobits_b64(cmp)); - } - - if (sdst == 1) { - npyv_store_till_f64(dst, len, out); - } else { - npyv_storen_till_f64(dst, sdst, len, out); - } - } - npyv_cleanup(); -} -/**end repeat**/ -#endif // NPY_SIMD_F64 - -#if NPY_SIMD_F32 -/* - * Approximate cosine algorithm for x \in [-PI/4, PI/4] - * Maximum ULP across all 32-bit floats = 0.875 - */ -NPY_FINLINE npyv_f32 -simd_cosine_poly_f32(npyv_f32 x2) -{ - const npyv_f32 invf8 = npyv_setall_f32(0x1.98e616p-16f); - const npyv_f32 invf6 = npyv_setall_f32(-0x1.6c06dcp-10f); - const npyv_f32 invf4 = npyv_setall_f32(0x1.55553cp-05f); - const npyv_f32 invf2 = npyv_setall_f32(-0x1.000000p-01f); - const npyv_f32 invf0 = npyv_setall_f32(0x1.000000p+00f); - - npyv_f32 r = npyv_muladd_f32(invf8, x2, invf6); - r = npyv_muladd_f32(r, x2, invf4); - r = npyv_muladd_f32(r, x2, invf2); - r = npyv_muladd_f32(r, x2, invf0); - return r; -} -/* - * Approximate sine algorithm for x \in [-PI/4, PI/4] - * Maximum ULP across all 32-bit floats = 0.647 - * Polynomial approximation based on unpublished work by T. Myklebust - */ -NPY_FINLINE npyv_f32 -simd_sine_poly_f32(npyv_f32 x, npyv_f32 x2) -{ - const npyv_f32 invf9 = npyv_setall_f32(0x1.7d3bbcp-19f); - const npyv_f32 invf7 = npyv_setall_f32(-0x1.a06bbap-13f); - const npyv_f32 invf5 = npyv_setall_f32(0x1.11119ap-07f); - const npyv_f32 invf3 = npyv_setall_f32(-0x1.555556p-03f); - - npyv_f32 r = npyv_muladd_f32(invf9, x2, invf7); - r = npyv_muladd_f32(r, x2, invf5); - r = npyv_muladd_f32(r, x2, invf3); - r = npyv_muladd_f32(r, x2, npyv_zero_f32()); - r = npyv_muladd_f32(r, x, x); - return r; -} -/* - * Vectorized approximate sine/cosine algorithms: The following code is a - * vectorized version of the algorithm presented here: - * https://stackoverflow.com/questions/30463616/payne-hanek-algorithm-implementation-in-c/30465751#30465751 - * (1) Load data in registers and generate mask for elements that are - * within range [-71476.0625f, 71476.0625f] for cosine and [-117435.992f, - * 117435.992f] for sine. - * (2) For elements within range, perform range reduction using Cody-Waite's - * method: x* = x - y*PI/2, where y = rint(x*2/PI). x* \in [-PI/4, PI/4]. - * (3) Map cos(x) to (+/-)sine or (+/-)cosine of x* based on the quadrant k = - * int(y). - * (4) For elements outside that range, Cody-Waite reduction performs poorly - * leading to catastrophic cancellation. We compute cosine by calling glibc in - * a scalar fashion. - * (5) Vectorized implementation has a max ULP of 1.49 and performs at least - * 5-7x(x86) - 2.5-3x(Power) - 1-2x(Arm) faster than scalar implementations - * when magnitude of all elements in the array < 71476.0625f (117435.992f for sine). - * Worst case performance is when all the elements are large leading to about 1-2% reduction in - * performance. - */ -typedef enum -{ - SIMD_COMPUTE_SIN, - SIMD_COMPUTE_COS -} SIMD_TRIG_OP; - -static void SIMD_MSVC_NOINLINE -simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, - npy_intp len, SIMD_TRIG_OP trig_op) -{ - // Load up frequently used constants - const npyv_f32 zerosf = npyv_zero_f32(); - const npyv_s32 ones = npyv_setall_s32(1); - const npyv_s32 twos = npyv_setall_s32(2); - const npyv_f32 two_over_pi = npyv_setall_f32(0x1.45f306p-1f); - const npyv_f32 codyw_pio2_highf = npyv_setall_f32(-0x1.921fb0p+00f); - const npyv_f32 codyw_pio2_medf = npyv_setall_f32(-0x1.5110b4p-22f); - const npyv_f32 codyw_pio2_lowf = npyv_setall_f32(-0x1.846988p-48f); - const npyv_f32 rint_cvt_magic = npyv_setall_f32(0x1.800000p+23f); - // Cody-Waite's range - float max_codi = 117435.992f; - if (trig_op == SIMD_COMPUTE_COS) { - max_codi = 71476.0625f; - } - const npyv_f32 max_cody = npyv_setall_f32(max_codi); - const int vstep = npyv_nlanes_f32; - - for (; len > 0; len -= vstep, src += ssrc*vstep, dst += sdst*vstep) { - npyv_f32 x_in; - if (ssrc == 1) { - x_in = npyv_load_tillz_f32(src, len); - } else { - x_in = npyv_loadn_tillz_f32(src, ssrc, len); - } - npyv_b32 nnan_mask = npyv_notnan_f32(x_in); - #if NPY_SIMD_CMPSIGNAL - // Eliminate NaN to avoid FP invalid exception - x_in = npyv_and_f32(x_in, npyv_reinterpret_f32_u32(npyv_cvt_u32_b32(nnan_mask))); - #endif - npyv_b32 simd_mask = npyv_cmple_f32(npyv_abs_f32(x_in), max_cody); - npy_uint64 simd_maski = npyv_tobits_b32(simd_mask); - /* - * For elements outside of this range, Cody-Waite's range reduction - * becomes inaccurate and we will call libc to compute cosine for - * these numbers - */ - if (simd_maski != 0) { - npyv_f32 x = npyv_select_f32(npyv_and_b32(nnan_mask, simd_mask), x_in, zerosf); - - npyv_f32 quadrant = npyv_mul_f32(x, two_over_pi); - // round to nearest, -0.0f -> +0.0f, and |a| must be <= 0x1.0p+22 - quadrant = npyv_add_f32(quadrant, rint_cvt_magic); - quadrant = npyv_sub_f32(quadrant, rint_cvt_magic); - - // Cody-Waite's range reduction algorithm - npyv_f32 reduced_x = simd_range_reduction_f32( - x, quadrant, codyw_pio2_highf, codyw_pio2_medf, codyw_pio2_lowf - ); - npyv_f32 reduced_x2 = npyv_square_f32(reduced_x); - - // compute cosine and sine - npyv_f32 cos = simd_cosine_poly_f32(reduced_x2); - npyv_f32 sin = simd_sine_poly_f32(reduced_x, reduced_x2); - - npyv_s32 iquadrant = npyv_round_s32_f32(quadrant); - if (trig_op == SIMD_COMPUTE_COS) { - iquadrant = npyv_add_s32(iquadrant, ones); - } - // blend sin and cos based on the quadrant - npyv_b32 sine_mask = npyv_cmpeq_s32(npyv_and_s32(iquadrant, ones), npyv_zero_s32()); - cos = npyv_select_f32(sine_mask, sin, cos); - - // multiply by -1 for appropriate elements - npyv_b32 negate_mask = npyv_cmpeq_s32(npyv_and_s32(iquadrant, twos), twos); - cos = npyv_ifsub_f32(negate_mask, zerosf, cos, cos); - cos = npyv_select_f32(nnan_mask, cos, npyv_setall_f32(NPY_NANF)); - - if (sdst == 1) { - npyv_store_till_f32(dst, len, cos); - } else { - npyv_storen_till_f32(dst, sdst, len, cos); - } - } - if (simd_maski != (npy_uint64)((1 << vstep) - 1)) { - float NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) ip_fback[npyv_nlanes_f32]; - npyv_storea_f32(ip_fback, x_in); - - // process elements using libc for large elements - if (trig_op == SIMD_COMPUTE_COS) { - for (unsigned i = 0; i < npyv_nlanes_f32; ++i) { - if ((simd_maski >> i) & 1) { - continue; - } - dst[sdst*i] = npy_cosf(ip_fback[i]); - } - } - else { - for (unsigned i = 0; i < npyv_nlanes_f32; ++i) { - if ((simd_maski >> i) & 1) { - continue; - } - dst[sdst*i] = npy_sinf(ip_fback[i]); - } - } - } - } - npyv_cleanup(); -} -#endif // NPY_SIMD_FP32 -#endif // NYP_SIMD_FMA3 - -/**begin repeat - * #func = cos, sin# - */ -NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(DOUBLE_@func@) -(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) -{ - /* Disable SIMD code and revert to libm: see - * https://mail.python.org/archives/list/numpy-discussion@python.org/thread/C6EYZZSR4EWGVKHAZXLE7IBILRMNVK7L/ - * for detailed discussion on this*/ -//#if NPY_SIMD_F64 && NPY_SIMD_FMA3 -#if 0 - const double *src = (double*)args[0]; - double *dst = (double*)args[1]; - const int lsize = sizeof(src[0]); - const npy_intp ssrc = steps[0] / lsize; - const npy_intp sdst = steps[1] / lsize; - npy_intp len = dimensions[0]; - assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); - - if (is_mem_overlap(src, steps[0], dst, steps[1], len) || - !npyv_loadable_stride_f64(ssrc) || !npyv_storable_stride_f64(sdst) - ) { - for (; len > 0; --len, src += ssrc, dst += sdst) { - simd_@func@_f64(src, 1, dst, 1, 1); - } - } else { - simd_@func@_f64(src, ssrc, dst, sdst, len); - } -#else - UNARY_LOOP { - const npy_double in1 = *(npy_double *)ip1; - *(npy_double *)op1 = npy_@func@(in1); - } -#endif -} -/**end repeat**/ - -/**begin repeat - * #func = sin, cos# - * #enum = SIMD_COMPUTE_SIN, SIMD_COMPUTE_COS# - */ -NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(FLOAT_@func@) -(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) -{ -#if NPY_SIMD_F32 && NPY_SIMD_FMA3 - const npy_float *src = (npy_float*)args[0]; - npy_float *dst = (npy_float*)args[1]; - - const int lsize = sizeof(src[0]); - const npy_intp ssrc = steps[0] / lsize; - const npy_intp sdst = steps[1] / lsize; - npy_intp len = dimensions[0]; - assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); - if (is_mem_overlap(src, steps[0], dst, steps[1], len) || - !npyv_loadable_stride_f32(ssrc) || !npyv_storable_stride_f32(sdst) - ) { - for (; len > 0; --len, src += ssrc, dst += sdst) { - simd_sincos_f32(src, 1, dst, 1, 1, @enum@); - } - } else { - simd_sincos_f32(src, ssrc, dst, sdst, len, @enum@); - } -#else - UNARY_LOOP { - const npy_float in1 = *(npy_float *)ip1; - *(npy_float *)op1 = npy_@func@f(in1); - } -#endif -} -/**end repeat**/ diff --git a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp new file mode 100644 index 000000000000..0399605d22ae --- /dev/null +++ b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp @@ -0,0 +1,300 @@ +#include "numpy/npy_math.h" +#include "simd/simd.h" +#include "loops_utils.h" +#include "loops.h" +#include "fast_loop_macros.h" +#include +namespace hn = hwy::HWY_NAMESPACE; + +/* + * Vectorized approximate sine/cosine algorithms: The following code is a + * vectorized version of the algorithm presented here: + * https://stackoverflow.com/questions/30463616/payne-hanek-algorithm-implementation-in-c/30465751#30465751 + * (1) Load data in registers and generate mask for elements that are within + * range [-71476.0625f, 71476.0625f] for cosine and [-117435.992f, 117435.992f] + * for sine. + * (2) For elements within range, perform range reduction using + * Cody-Waite's method: x* = x - y*PI/2, where y = rint(x*2/PI). x* \in [-PI/4, + * PI/4]. + * (3) Map cos(x) to (+/-)sine or (+/-)cosine of x* based on the + * quadrant k = int(y). + * (4) For elements outside that range, Cody-Waite + * reduction performs poorly leading to catastrophic cancellation. We compute + * cosine by calling glibc in a scalar fashion. + * (5) Vectorized implementation + * has a max ULP of 1.49 and performs at least 5-7x(x86) - 2.5-3x(Power) - + * 1-2x(Arm) faster than scalar implementations when magnitude of all elements + * in the array < 71476.0625f (117435.992f for sine). Worst case performance + * is when all the elements are large leading to about 1-2% reduction in + * performance. + * TODO: use vectorized version of Payne-Hanek style reduction for large + * elements or when there's no native FUSED support instead of fallback to libc + */ + +#if NPY_SIMD_FMA3 // native support +typedef enum +{ + SIMD_COMPUTE_SIN, + SIMD_COMPUTE_COS +} SIMD_TRIG_OP; + +const hn::ScalableTag f32; +const hn::ScalableTag s32; +using vec_f32 = hn::Vec; +using vec_s32 = hn::Vec; +using opmask_t = hn::Mask; + +NPY_FINLINE vec_f32 +simd_range_reduction_f32(vec_f32 x, vec_f32 y, vec_f32 c1, vec_f32 c2, vec_f32 c3) +{ + vec_f32 reduced_x = hn::MulAdd(y, c1, x); + reduced_x = hn::MulAdd(y, c2, reduced_x); + reduced_x = hn::MulAdd(y, c3, reduced_x); + return reduced_x; +} + +NPY_FINLINE vec_f32 +simd_cosine_poly_f32(vec_f32 x2) +{ + const vec_f32 invf8 = hn::Set(f32, 0x1.98e616p-16f); + const vec_f32 invf6 = hn::Set(f32, -0x1.6c06dcp-10f); + const vec_f32 invf4 = hn::Set(f32, 0x1.55553cp-05f); + const vec_f32 invf2 = hn::Set(f32, -0x1.000000p-01f); + const vec_f32 invf0 = hn::Set(f32, 0x1.000000p+00f); + + vec_f32 r = hn::MulAdd(invf8, x2, invf6); + r = hn::MulAdd(r, x2, invf4); + r = hn::MulAdd(r, x2, invf2); + r = hn::MulAdd(r, x2, invf0); + return r; +} +/* + * Approximate sine algorithm for x \in [-PI/4, PI/4] + * Maximum ULP across all 32-bit floats = 0.647 + * Polynomial approximation based on unpublished work by T. Myklebust + */ +NPY_FINLINE vec_f32 +simd_sine_poly_f32(vec_f32 x, vec_f32 x2) +{ + const vec_f32 invf9 = hn::Set(f32, 0x1.7d3bbcp-19f); + const vec_f32 invf7 = hn::Set(f32, -0x1.a06bbap-13f); + const vec_f32 invf5 = hn::Set(f32, 0x1.11119ap-07f); + const vec_f32 invf3 = hn::Set(f32, -0x1.555556p-03f); + + vec_f32 r = hn::MulAdd(invf9, x2, invf7); + r = hn::MulAdd(r, x2, invf5); + r = hn::MulAdd(r, x2, invf3); + r = hn::MulAdd(r, x2, hn::Zero(f32)); + r = hn::MulAdd(r, x, x); + return r; +} + +NPY_FINLINE vec_f32 +GatherIndexN(const float* src, npy_intp ssrc, npy_intp len) +{ + float temp[hn::Lanes(f32)] = { 0.0f }; + for (auto ii = 0; ii < std::min(len, (npy_intp)hn::Lanes(f32)); ++ii) { + temp[ii] = src[ii * ssrc]; + } + return hn::LoadU(f32, temp); +} + +NPY_FINLINE void +ScatterIndexN(vec_f32 vec, float* dst, npy_intp sdst, npy_intp len) +{ + float temp[hn::Lanes(f32)]; + hn::StoreU(vec, f32, temp); + for (auto ii = 0; ii < std::min(len, (npy_intp)hn::Lanes(f32)); ++ii) { + dst[ii * sdst] = temp[ii]; + } +} + +static void SIMD_MSVC_NOINLINE +simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, + npy_intp len, SIMD_TRIG_OP trig_op) +{ + // Load up frequently used constants + const vec_f32 zerosf = hn::Zero(f32); + const vec_s32 ones = hn::Set(s32, 1); + const vec_s32 twos = hn::Set(s32, 2); + const vec_f32 two_over_pi = hn::Set(f32, 0x1.45f306p-1f); + const vec_f32 codyw_pio2_highf = hn::Set(f32, -0x1.921fb0p+00f); + const vec_f32 codyw_pio2_medf = hn::Set(f32, -0x1.5110b4p-22f); + const vec_f32 codyw_pio2_lowf = hn::Set(f32, -0x1.846988p-48f); + const vec_f32 rint_cvt_magic = hn::Set(f32, 0x1.800000p+23f); + // Cody-Waite's range + float max_codi = 117435.992f; + if (trig_op == SIMD_COMPUTE_COS) { + max_codi = 71476.0625f; + } + const vec_f32 max_cody = hn::Set(f32, max_codi); + + const int lanes = hn::Lanes(f32); + //npy_intp load_index[lanes/2]; + //for (auto i = 0; i < lanes; ++i) { + // load_index[i] = i * ssrc; + //} + //vec_s32 vec_lindex = hn::LoadU(s32, load_index); + //npy_intp store_index[lanes/2]; + //for (auto i = 0; i < lanes; ++i) { + // store_index[i] = i * sdst; + //} + //vec_s32 vec_sindex = hn::LoadU(s32, store_index); + + for (; len > 0; len -= lanes, src += ssrc*lanes, dst += sdst*lanes) { + vec_f32 x_in; + if (ssrc == 1) { + x_in = hn::LoadN(f32, src, len); + } else { + x_in = GatherIndexN(src, ssrc, len); + } + opmask_t nnan_mask = hn::Not(hn::IsNaN(x_in)); + #if NPY_SIMD_CMPSIGNAL + // Eliminate NaN to avoid FP invalid exception + x_in = hn::IfThenElse(nnan_mask, zerosf, x_in); + #endif + opmask_t simd_mask = hn::Le(hn::Abs(x_in), max_cody); + npy_uint64 simd_maski; + hn::StoreMaskBits(f32, simd_mask, (uint8_t*)&simd_maski); + /* + * For elements outside of this range, Cody-Waite's range reduction + * becomes inaccurate and we will call libc to compute cosine for + * these numbers + */ + if (!hn::AllFalse(f32, simd_mask)) { + vec_f32 x = hn::IfThenElse(hn::And(nnan_mask, simd_mask), x_in, zerosf); + + vec_f32 quadrant = hn::Mul(x, two_over_pi); + // round to nearest, -0.0f -> +0.0f, and |a| must be <= 0x1.0p+22 + quadrant = hn::Add(quadrant, rint_cvt_magic); + quadrant = hn::Sub(quadrant, rint_cvt_magic); + + // Cody-Waite's range reduction algorithm + vec_f32 reduced_x = simd_range_reduction_f32( + x, quadrant, codyw_pio2_highf, codyw_pio2_medf, codyw_pio2_lowf + ); + vec_f32 reduced_x2 = hn::Mul(reduced_x, reduced_x); + + // compute cosine and sine + vec_f32 cos = simd_cosine_poly_f32(reduced_x2); + vec_f32 sin = simd_sine_poly_f32(reduced_x, reduced_x2); + + vec_s32 iquadrant = hn::NearestInt(quadrant); + if (trig_op == SIMD_COMPUTE_COS) { + iquadrant = hn::Add(iquadrant, ones); + } + // blend sin and cos based on the quadrant + opmask_t sine_mask = hn::RebindMask(f32, hn::Eq(hn::And(iquadrant, ones), hn::Zero(s32))); + cos = hn::IfThenElse(sine_mask, sin, cos); + + // multiply by -1 for appropriate elements + opmask_t negate_mask = hn::RebindMask(f32, hn::Eq(hn::And(iquadrant, twos), twos)); + cos = hn::MaskedSubOr(cos, negate_mask, zerosf, cos); + cos = hn::IfThenElse(nnan_mask, cos, hn::Set(f32, NPY_NANF)); + + if (sdst == 1) { + hn::StoreN(cos, f32, dst, len); + } else { + ScatterIndexN(cos, dst, sdst, len); + } + } + if (simd_maski != (npy_uint64)((1 << lanes) - 1)) { + float ip_fback[hn::Lanes(f32)]; + hn::StoreU(x_in, f32, ip_fback); + + // process elements using libc for large elements + if (trig_op == SIMD_COMPUTE_COS) { + for (unsigned i = 0; i < hn::Lanes(f32); ++i) { + if ((simd_maski >> i) & 1) { + continue; + } + dst[sdst*i] = npy_cosf(ip_fback[i]); + } + } + else { + for (unsigned i = 0; i < hn::Lanes(f32); ++i) { + if ((simd_maski >> i) & 1) { + continue; + } + dst[sdst*i] = npy_sinf(ip_fback[i]); + } + } + } + npyv_cleanup(); + } +} +#endif // NPY_SIMD_FMA3 + +/* Disable SIMD code sin/cos f64 and revert to libm: see + * https://mail.python.org/archives/list/numpy-discussion@python.org/thread/C6EYZZSR4EWGVKHAZXLE7IBILRMNVK7L/ + * for detailed discussion on this*/ +#define DISPATCH_DOUBLE_FUNC(func) \ +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(DOUBLE_##func) \ +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) \ +{ \ + UNARY_LOOP { \ + const npy_double in1 = *(npy_double *)ip1; \ + *(npy_double *)op1 = npy_##func(in1); \ + } \ +} \ + +DISPATCH_DOUBLE_FUNC(sin) +DISPATCH_DOUBLE_FUNC(cos) + +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(FLOAT_sin) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) +{ +#if NPY_SIMD_F32 && NPY_SIMD_FMA3 + const npy_float *src = (npy_float*)args[0]; + npy_float *dst = (npy_float*)args[1]; + + const int lsize = sizeof(src[0]); + const npy_intp ssrc = steps[0] / lsize; + const npy_intp sdst = steps[1] / lsize; + npy_intp len = dimensions[0]; + assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); + if (is_mem_overlap(src, steps[0], dst, steps[1], len) || + !npyv_loadable_stride_f32(ssrc) || !npyv_storable_stride_f32(sdst) + ) { + for (; len > 0; --len, src += ssrc, dst += sdst) { + simd_sincos_f32(src, 1, dst, 1, 1, SIMD_COMPUTE_SIN); + } + } else { + simd_sincos_f32(src, ssrc, dst, sdst, len, SIMD_COMPUTE_SIN); + } +#else + UNARY_LOOP { + const npy_float in1 = *(npy_float *)ip1; + *(npy_float *)op1 = npy_sinf(in1); + } +#endif +} + +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(FLOAT_cos) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) +{ +#if NPY_SIMD_F32 && NPY_SIMD_FMA3 + const npy_float *src = (npy_float*)args[0]; + npy_float *dst = (npy_float*)args[1]; + + const int lsize = sizeof(src[0]); + const npy_intp ssrc = steps[0] / lsize; + const npy_intp sdst = steps[1] / lsize; + npy_intp len = dimensions[0]; + assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); + if (is_mem_overlap(src, steps[0], dst, steps[1], len) || + !npyv_loadable_stride_f32(ssrc) || !npyv_storable_stride_f32(sdst) + ) { + for (; len > 0; --len, src += ssrc, dst += sdst) { + simd_sincos_f32(src, 1, dst, 1, 1, SIMD_COMPUTE_COS); + } + } else { + simd_sincos_f32(src, ssrc, dst, sdst, len, SIMD_COMPUTE_COS); + } +#else + UNARY_LOOP { + const npy_float in1 = *(npy_float *)ip1; + *(npy_float *)op1 = npy_cosf(in1); + } +#endif +} From c1b35d2b7f2fde6198956af3722bafbee3eeeeab Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Tue, 6 Feb 2024 14:36:22 -0800 Subject: [PATCH 619/980] Eliminate NAN in sincos --- numpy/_core/src/umath/loops_trigonometric.dispatch.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp index 0399605d22ae..7614c5a5cd25 100644 --- a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp +++ b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp @@ -149,10 +149,8 @@ simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, x_in = GatherIndexN(src, ssrc, len); } opmask_t nnan_mask = hn::Not(hn::IsNaN(x_in)); - #if NPY_SIMD_CMPSIGNAL // Eliminate NaN to avoid FP invalid exception - x_in = hn::IfThenElse(nnan_mask, zerosf, x_in); - #endif + x_in = hn::IfThenElse(nnan_mask, x_in, zerosf); opmask_t simd_mask = hn::Le(hn::Abs(x_in), max_cody); npy_uint64 simd_maski; hn::StoreMaskBits(f32, simd_mask, (uint8_t*)&simd_maski); From f69db54cf97e5b71759da44f37357f595130acaf Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Tue, 6 Feb 2024 15:22:10 -0800 Subject: [PATCH 620/980] use -march=skylake-avx512 and -march=skylake --- meson_cpu/x86/meson.build | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/meson_cpu/x86/meson.build b/meson_cpu/x86/meson.build index 598f80ff0c89..5b3f04bdcdb8 100644 --- a/meson_cpu/x86/meson.build +++ b/meson_cpu/x86/meson.build @@ -60,7 +60,7 @@ FMA3 = mod_features.new( test_code: files(source_root + '/numpy/distutils/checks/cpu_fma3.c')[0] ) AVX2 = mod_features.new( - 'AVX2', 25, implies: F16C, args: '-mavx2', + 'AVX2', 25, implies: F16C, args: '-march=skylake', test_code: files(source_root + '/numpy/distutils/checks/cpu_avx2.c')[0] ) # 25-40 left as margin for any extra features @@ -93,7 +93,7 @@ AVX512_KNM = mod_features.new( ) AVX512_SKX = mod_features.new( 'AVX512_SKX', 50, implies: AVX512CD, - args: ['-mavx512vl', '-mavx512bw', '-mavx512dq'], + args: ['-march=skylake-avx512'], group: ['AVX512VL', 'AVX512BW', 'AVX512DQ'], test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_skx.c')[0], extra_tests: { From 0c0826146a5d0dc415114119d65f3940aacceadb Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Wed, 28 Feb 2024 09:49:02 -0800 Subject: [PATCH 621/980] Use references for function arguements --- .../_core/src/umath/loops_trigonometric.dispatch.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp index 7614c5a5cd25..b23eff64f8bf 100644 --- a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp +++ b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp @@ -45,7 +45,7 @@ using vec_s32 = hn::Vec; using opmask_t = hn::Mask; NPY_FINLINE vec_f32 -simd_range_reduction_f32(vec_f32 x, vec_f32 y, vec_f32 c1, vec_f32 c2, vec_f32 c3) +simd_range_reduction_f32(vec_f32& x, vec_f32& y, const vec_f32& c1, const vec_f32& c2, const vec_f32& c3) { vec_f32 reduced_x = hn::MulAdd(y, c1, x); reduced_x = hn::MulAdd(y, c2, reduced_x); @@ -54,7 +54,7 @@ simd_range_reduction_f32(vec_f32 x, vec_f32 y, vec_f32 c1, vec_f32 c2, vec_f32 c } NPY_FINLINE vec_f32 -simd_cosine_poly_f32(vec_f32 x2) +simd_cosine_poly_f32(vec_f32& x2) { const vec_f32 invf8 = hn::Set(f32, 0x1.98e616p-16f); const vec_f32 invf6 = hn::Set(f32, -0x1.6c06dcp-10f); @@ -74,7 +74,7 @@ simd_cosine_poly_f32(vec_f32 x2) * Polynomial approximation based on unpublished work by T. Myklebust */ NPY_FINLINE vec_f32 -simd_sine_poly_f32(vec_f32 x, vec_f32 x2) +simd_sine_poly_f32(vec_f32& x, vec_f32& x2) { const vec_f32 invf9 = hn::Set(f32, 0x1.7d3bbcp-19f); const vec_f32 invf7 = hn::Set(f32, -0x1.a06bbap-13f); @@ -152,8 +152,6 @@ simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, // Eliminate NaN to avoid FP invalid exception x_in = hn::IfThenElse(nnan_mask, x_in, zerosf); opmask_t simd_mask = hn::Le(hn::Abs(x_in), max_cody); - npy_uint64 simd_maski; - hn::StoreMaskBits(f32, simd_mask, (uint8_t*)&simd_maski); /* * For elements outside of this range, Cody-Waite's range reduction * becomes inaccurate and we will call libc to compute cosine for @@ -196,7 +194,9 @@ simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, ScatterIndexN(cos, dst, sdst, len); } } - if (simd_maski != (npy_uint64)((1 << lanes) - 1)) { + if (!hn::AllTrue(f32, simd_mask)) { + npy_uint64 simd_maski; + hn::StoreMaskBits(f32, simd_mask, (uint8_t*)&simd_maski); float ip_fback[hn::Lanes(f32)]; hn::StoreU(x_in, f32, ip_fback); From c56abf034aad87e78286ad5fd8feb44a4ef878d6 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Wed, 28 Feb 2024 12:41:37 -0800 Subject: [PATCH 622/980] Use aligned store --- numpy/_core/src/umath/loops_trigonometric.dispatch.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp index b23eff64f8bf..fc40209e4d7a 100644 --- a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp +++ b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp @@ -197,8 +197,8 @@ simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, if (!hn::AllTrue(f32, simd_mask)) { npy_uint64 simd_maski; hn::StoreMaskBits(f32, simd_mask, (uint8_t*)&simd_maski); - float ip_fback[hn::Lanes(f32)]; - hn::StoreU(x_in, f32, ip_fback); + float NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) ip_fback[hn::Lanes(f32)]; + hn::Store(x_in, f32, ip_fback); // process elements using libc for large elements if (trig_op == SIMD_COMPUTE_COS) { From f3132864d3c6b2dbb0ebb389ec25c163daec6be2 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Wed, 20 Mar 2024 13:10:19 -0700 Subject: [PATCH 623/980] MAINT: Add HWY_ATTR to locally defined functions --- .../_core/src/umath/loops_trigonometric.dispatch.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp index fc40209e4d7a..d3a672d309ff 100644 --- a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp +++ b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp @@ -44,7 +44,7 @@ using vec_f32 = hn::Vec; using vec_s32 = hn::Vec; using opmask_t = hn::Mask; -NPY_FINLINE vec_f32 +NPY_FINLINE HWY_ATTR vec_f32 simd_range_reduction_f32(vec_f32& x, vec_f32& y, const vec_f32& c1, const vec_f32& c2, const vec_f32& c3) { vec_f32 reduced_x = hn::MulAdd(y, c1, x); @@ -53,7 +53,7 @@ simd_range_reduction_f32(vec_f32& x, vec_f32& y, const vec_f32& c1, const vec_f3 return reduced_x; } -NPY_FINLINE vec_f32 +NPY_FINLINE HWY_ATTR vec_f32 simd_cosine_poly_f32(vec_f32& x2) { const vec_f32 invf8 = hn::Set(f32, 0x1.98e616p-16f); @@ -73,7 +73,7 @@ simd_cosine_poly_f32(vec_f32& x2) * Maximum ULP across all 32-bit floats = 0.647 * Polynomial approximation based on unpublished work by T. Myklebust */ -NPY_FINLINE vec_f32 +NPY_FINLINE HWY_ATTR vec_f32 simd_sine_poly_f32(vec_f32& x, vec_f32& x2) { const vec_f32 invf9 = hn::Set(f32, 0x1.7d3bbcp-19f); @@ -89,7 +89,7 @@ simd_sine_poly_f32(vec_f32& x, vec_f32& x2) return r; } -NPY_FINLINE vec_f32 +NPY_FINLINE HWY_ATTR vec_f32 GatherIndexN(const float* src, npy_intp ssrc, npy_intp len) { float temp[hn::Lanes(f32)] = { 0.0f }; @@ -99,7 +99,7 @@ GatherIndexN(const float* src, npy_intp ssrc, npy_intp len) return hn::LoadU(f32, temp); } -NPY_FINLINE void +NPY_FINLINE HWY_ATTR void ScatterIndexN(vec_f32 vec, float* dst, npy_intp sdst, npy_intp len) { float temp[hn::Lanes(f32)]; @@ -109,7 +109,7 @@ ScatterIndexN(vec_f32 vec, float* dst, npy_intp sdst, npy_intp len) } } -static void SIMD_MSVC_NOINLINE +static void HWY_ATTR SIMD_MSVC_NOINLINE simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, npy_intp len, SIMD_TRIG_OP trig_op) { From a46b082627eff020ac5ee740f55c71e5e54b3915 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Wed, 20 Mar 2024 13:42:18 -0700 Subject: [PATCH 624/980] AVX2 now includes FMA3 --- meson_cpu/x86/meson.build | 7 ++++--- numpy/_core/src/common/npy_cpu_features.c | 6 ++++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/meson_cpu/x86/meson.build b/meson_cpu/x86/meson.build index 5b3f04bdcdb8..8c7a0fb59a57 100644 --- a/meson_cpu/x86/meson.build +++ b/meson_cpu/x86/meson.build @@ -59,13 +59,14 @@ FMA3 = mod_features.new( 'FMA3', 24, implies: F16C, args: '-mfma', test_code: files(source_root + '/numpy/distutils/checks/cpu_fma3.c')[0] ) +# match this to HWY_AVX2 AVX2 = mod_features.new( - 'AVX2', 25, implies: F16C, args: '-march=skylake', + 'AVX2', 25, implies: FMA3, args: ['-mavx2', '-maes', '-mpclmul', '-mbmi', '-mbmi2'], test_code: files(source_root + '/numpy/distutils/checks/cpu_avx2.c')[0] ) # 25-40 left as margin for any extra features AVX512F = mod_features.new( - 'AVX512F', 40, implies: [FMA3, AVX2], + 'AVX512F', 40, implies: [AVX2], # Disables mmx because of stack corruption that may happen during mask # conversions. # TODO (seiko2plus): provide more clarification @@ -93,7 +94,7 @@ AVX512_KNM = mod_features.new( ) AVX512_SKX = mod_features.new( 'AVX512_SKX', 50, implies: AVX512CD, - args: ['-march=skylake-avx512'], + args: ['-mavx512vl', '-mavx512bw', '-mavx512dq'], group: ['AVX512VL', 'AVX512BW', 'AVX512DQ'], test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_skx.c')[0], extra_tests: { diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index 04a5449e5b8e..43f2c435a140 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -474,6 +474,8 @@ npy__cpu_init_features(void) // third call to the cpuid to get extended AVX2 & AVX512 feature bits npy__cpu_cpuid(reg, 7); npy__cpu_have[NPY_CPU_FEATURE_AVX2] = (reg[1] & (1 << 5)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_AVX2] = npy__cpu_have[NPY_CPU_FEATURE_AVX2] && + npy__cpu_have[NPY_CPU_FEATURE_FMA3]; if (!npy__cpu_have[NPY_CPU_FEATURE_AVX2]) return; // detect AVX2 & FMA3 @@ -641,7 +643,7 @@ static void npy__cpu_init_features(void) { memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); - + unsigned int hwcap = getauxval(AT_HWCAP); if ((hwcap & HWCAP_S390_VX) == 0) { return; @@ -653,7 +655,7 @@ npy__cpu_init_features(void) npy__cpu_have[NPY_CPU_FEATURE_VXE2] = 1; return; } - + npy__cpu_have[NPY_CPU_FEATURE_VXE] = (hwcap & HWCAP_S390_VXE) != 0; npy__cpu_have[NPY_CPU_FEATURE_VX] = 1; From 7d6cc6d70b307b506dfcb0d65aa922fd031e171b Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Fri, 5 Apr 2024 12:58:44 -0700 Subject: [PATCH 625/980] Update highway to latest: fix build failure on cygwin --- numpy/_core/src/highway | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 3af6ba57bf82..02e497e0ccf1 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 3af6ba57bf82c861870f92f0483149439007d652 +Subproject commit 02e497e0ccf1c2b2285f94efd40d4eb6c930b3b8 From ab62b9fc10472d552f862433cee85344385d4b62 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Wed, 24 Apr 2024 13:30:38 -0700 Subject: [PATCH 626/980] MAINT: Use hwy gather/scatter and hwy macros --- .../umath/loops_trigonometric.dispatch.cpp | 51 ++++--------------- 1 file changed, 11 insertions(+), 40 deletions(-) diff --git a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp index d3a672d309ff..273997197f9f 100644 --- a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp +++ b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp @@ -1,4 +1,3 @@ -#include "numpy/npy_math.h" #include "simd/simd.h" #include "loops_utils.h" #include "loops.h" @@ -31,7 +30,7 @@ namespace hn = hwy::HWY_NAMESPACE; * elements or when there's no native FUSED support instead of fallback to libc */ -#if NPY_SIMD_FMA3 // native support +#if HWY_NATIVE_FMA // native support typedef enum { SIMD_COMPUTE_SIN, @@ -44,7 +43,7 @@ using vec_f32 = hn::Vec; using vec_s32 = hn::Vec; using opmask_t = hn::Mask; -NPY_FINLINE HWY_ATTR vec_f32 +HWY_INLINE HWY_ATTR vec_f32 simd_range_reduction_f32(vec_f32& x, vec_f32& y, const vec_f32& c1, const vec_f32& c2, const vec_f32& c3) { vec_f32 reduced_x = hn::MulAdd(y, c1, x); @@ -53,7 +52,7 @@ simd_range_reduction_f32(vec_f32& x, vec_f32& y, const vec_f32& c1, const vec_f3 return reduced_x; } -NPY_FINLINE HWY_ATTR vec_f32 +HWY_INLINE HWY_ATTR vec_f32 simd_cosine_poly_f32(vec_f32& x2) { const vec_f32 invf8 = hn::Set(f32, 0x1.98e616p-16f); @@ -73,7 +72,7 @@ simd_cosine_poly_f32(vec_f32& x2) * Maximum ULP across all 32-bit floats = 0.647 * Polynomial approximation based on unpublished work by T. Myklebust */ -NPY_FINLINE HWY_ATTR vec_f32 +HWY_INLINE HWY_ATTR vec_f32 simd_sine_poly_f32(vec_f32& x, vec_f32& x2) { const vec_f32 invf9 = hn::Set(f32, 0x1.7d3bbcp-19f); @@ -89,26 +88,6 @@ simd_sine_poly_f32(vec_f32& x, vec_f32& x2) return r; } -NPY_FINLINE HWY_ATTR vec_f32 -GatherIndexN(const float* src, npy_intp ssrc, npy_intp len) -{ - float temp[hn::Lanes(f32)] = { 0.0f }; - for (auto ii = 0; ii < std::min(len, (npy_intp)hn::Lanes(f32)); ++ii) { - temp[ii] = src[ii * ssrc]; - } - return hn::LoadU(f32, temp); -} - -NPY_FINLINE HWY_ATTR void -ScatterIndexN(vec_f32 vec, float* dst, npy_intp sdst, npy_intp len) -{ - float temp[hn::Lanes(f32)]; - hn::StoreU(vec, f32, temp); - for (auto ii = 0; ii < std::min(len, (npy_intp)hn::Lanes(f32)); ++ii) { - dst[ii * sdst] = temp[ii]; - } -} - static void HWY_ATTR SIMD_MSVC_NOINLINE simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, npy_intp len, SIMD_TRIG_OP trig_op) @@ -130,23 +109,15 @@ simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, const vec_f32 max_cody = hn::Set(f32, max_codi); const int lanes = hn::Lanes(f32); - //npy_intp load_index[lanes/2]; - //for (auto i = 0; i < lanes; ++i) { - // load_index[i] = i * ssrc; - //} - //vec_s32 vec_lindex = hn::LoadU(s32, load_index); - //npy_intp store_index[lanes/2]; - //for (auto i = 0; i < lanes; ++i) { - // store_index[i] = i * sdst; - //} - //vec_s32 vec_sindex = hn::LoadU(s32, store_index); + const vec_s32 src_index = hn::Mul(hn::Iota(s32, 0), hn::Set(s32, ssrc)); + const vec_s32 dst_index = hn::Mul(hn::Iota(s32, 0), hn::Set(s32, sdst)); for (; len > 0; len -= lanes, src += ssrc*lanes, dst += sdst*lanes) { vec_f32 x_in; if (ssrc == 1) { x_in = hn::LoadN(f32, src, len); } else { - x_in = GatherIndexN(src, ssrc, len); + x_in = hn::GatherIndexN(f32, src, src_index, len); } opmask_t nnan_mask = hn::Not(hn::IsNaN(x_in)); // Eliminate NaN to avoid FP invalid exception @@ -191,7 +162,7 @@ simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, if (sdst == 1) { hn::StoreN(cos, f32, dst, len); } else { - ScatterIndexN(cos, dst, sdst, len); + hn::ScatterIndexN(cos, f32, dst, dst_index, len); } } if (!hn::AllTrue(f32, simd_mask)) { @@ -221,7 +192,7 @@ simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, npyv_cleanup(); } } -#endif // NPY_SIMD_FMA3 +#endif // HWY_NATIVE_FMA /* Disable SIMD code sin/cos f64 and revert to libm: see * https://mail.python.org/archives/list/numpy-discussion@python.org/thread/C6EYZZSR4EWGVKHAZXLE7IBILRMNVK7L/ @@ -242,7 +213,7 @@ DISPATCH_DOUBLE_FUNC(cos) NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(FLOAT_sin) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { -#if NPY_SIMD_F32 && NPY_SIMD_FMA3 +#if HWY_NATIVE_FMA const npy_float *src = (npy_float*)args[0]; npy_float *dst = (npy_float*)args[1]; @@ -271,7 +242,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(FLOAT_sin) NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(FLOAT_cos) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { -#if NPY_SIMD_F32 && NPY_SIMD_FMA3 +#if HWY_NATIVE_FMA const npy_float *src = (npy_float*)args[0]; npy_float *dst = (npy_float*)args[1]; From c535a4ab8f242b0c3670ea436d18ba941dec2527 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Mon, 6 May 2024 13:13:59 -0700 Subject: [PATCH 627/980] Update highway to latest: fix SIGILL for -ve indices --- numpy/_core/src/highway | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 02e497e0ccf1..e9a2799e365c 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 02e497e0ccf1c2b2285f94efd40d4eb6c930b3b8 +Subproject commit e9a2799e365c79cb5759557c3458a34c5d698cc3 From 2983386814ce094bc0a8c6adbc6f672008a4c1be Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Tue, 7 May 2024 10:49:43 -0700 Subject: [PATCH 628/980] Revert back to NPY_SIMD_FMA3 --- numpy/_core/src/umath/loops_trigonometric.dispatch.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp index 273997197f9f..746e3b92263e 100644 --- a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp +++ b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp @@ -30,7 +30,7 @@ namespace hn = hwy::HWY_NAMESPACE; * elements or when there's no native FUSED support instead of fallback to libc */ -#if HWY_NATIVE_FMA // native support +#if NPY_SIMD_FMA3 // native support typedef enum { SIMD_COMPUTE_SIN, @@ -192,7 +192,7 @@ simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, npyv_cleanup(); } } -#endif // HWY_NATIVE_FMA +#endif // NPY_SIMD_FMA3 /* Disable SIMD code sin/cos f64 and revert to libm: see * https://mail.python.org/archives/list/numpy-discussion@python.org/thread/C6EYZZSR4EWGVKHAZXLE7IBILRMNVK7L/ @@ -213,7 +213,7 @@ DISPATCH_DOUBLE_FUNC(cos) NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(FLOAT_sin) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { -#if HWY_NATIVE_FMA +#if NPY_SIMD_FMA3 const npy_float *src = (npy_float*)args[0]; npy_float *dst = (npy_float*)args[1]; @@ -242,7 +242,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(FLOAT_sin) NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(FLOAT_cos) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { -#if HWY_NATIVE_FMA +#if NPY_SIMD_FMA3 const npy_float *src = (npy_float*)args[0]; npy_float *dst = (npy_float*)args[1]; From 0293ef0ac68d9d442bef70606ef4b561b632d608 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Tue, 7 May 2024 12:16:31 -0700 Subject: [PATCH 629/980] Update highway to latest --- numpy/_core/src/highway | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index e9a2799e365c..1dbb1180e05c 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit e9a2799e365c79cb5759557c3458a34c5d698cc3 +Subproject commit 1dbb1180e05c55b648f2508d3f97bf26c6f926a8 From ccf236783191954b93802d3f8cc019383716c53b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Melissa=20Weber=20Mendon=C3=A7a?= Date: Thu, 20 Jun 2024 17:52:27 +0000 Subject: [PATCH 630/980] DOC, NEP: Update NEP44 Clarify recommendation on using scipy.datasets and minor formatting fixes. Closes gh-26709 [skip azp][skip cirrus][skip actions] --- .../nep-0044-restructuring-numpy-docs.rst | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/doc/neps/nep-0044-restructuring-numpy-docs.rst b/doc/neps/nep-0044-restructuring-numpy-docs.rst index 9c4721664fd4..d1a1a0827ad7 100644 --- a/doc/neps/nep-0044-restructuring-numpy-docs.rst +++ b/doc/neps/nep-0044-restructuring-numpy-docs.rst @@ -86,7 +86,8 @@ up-to-date official documentation that can be easily updated. Status and ideas of each type of doc content -------------------------------------------- -**Reference guide** +Reference guide +^^^^^^^^^^^^^^^ NumPy has a quite complete reference guide. All functions are documented, most have examples, and most are cross-linked well with *See Also* sections. Further @@ -94,7 +95,8 @@ improving the reference guide is incremental work that can be done (and is being done) by many people. There are, however, many explanations in the reference guide. These can be moved to a more dedicated Explanations section on the docs. -**How-to guides** +How-to guides +^^^^^^^^^^^^^ NumPy does not have many how-to's. The subclassing and array ducktyping section may be an example of a how-to. Others that could be added are: @@ -106,7 +108,8 @@ may be an example of a how-to. Others that could be added are: - Performance (memory layout, profiling, use with Numba, Cython, or Pythran) - Writing generic code that works with NumPy, Dask, CuPy, pydata/sparse, etc. -**Explanations** +Explanations +^^^^^^^^^^^^ There is a reasonable amount of content on fundamental NumPy concepts such as indexing, vectorization, broadcasting, (g)ufuncs, and dtypes. This could be @@ -114,7 +117,7 @@ organized better and clarified to ensure it's really about explaining the concep and not mixed with tutorial or how-to like content. There are few explanations about anything other than those fundamental NumPy -concepts. +concepts. Some examples of concepts that could be expanded: @@ -125,7 +128,8 @@ Some examples of concepts that could be expanded: In addition, there are many explanations in the Reference Guide, which should be moved to this new dedicated Explanations section. -**Tutorials** +Tutorials +^^^^^^^^^ There's a lot of scope for writing better tutorials. We have a new *NumPy for absolute beginners tutorial* [3]_ (GSoD project of Anne Bonner). In addition we @@ -154,19 +158,15 @@ propose a *How to write a tutorial* document, which would help users contribute new high-quality content to the documentation. Data sets ---------- +~~~~~~~~~ Using interesting data in the NumPy docs requires giving all users access to that data, either inside NumPy or in a separate package. The former is not the best idea, since it's hard to do without increasing the size of NumPy -significantly. Even for SciPy there has so far been no consensus on this (see -`scipy PR 8707 `_ on adding a new -``scipy.datasets`` subpackage). - -So we'll aim for a new (pure Python) package, named ``numpy-datasets`` or -``scipy-datasets`` or something similar. That package can take some lessons from -how, e.g., scikit-learn ships data sets. Small data sets can be included in the -repo, large data sets can be accessed via a downloader class or function. +significantly. + +Whenever possible, documentation pages should use examples from the +:mod:`scipy.datasets` package. Related work ============ From 40507eaccfa330ddf36477fd92919f0321d6f124 Mon Sep 17 00:00:00 2001 From: Leo Singer Date: Thu, 20 Jun 2024 15:00:59 -0400 Subject: [PATCH 631/980] BUG: fix PyArray_ImportNumPyAPI under -Werror=strict-prototypes Complete the prototype of `PyArray_ImportNumPyAPI()` by adding `void` to the argument list so that this header does not cause errors when the user is compiling with `-Werror=strict-prototypes`. --- numpy/_core/include/numpy/npy_2_compat.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/include/numpy/npy_2_compat.h b/numpy/_core/include/numpy/npy_2_compat.h index 50e637f79223..e5499d04bd2b 100644 --- a/numpy/_core/include/numpy/npy_2_compat.h +++ b/numpy/_core/include/numpy/npy_2_compat.h @@ -74,7 +74,7 @@ #ifdef import_array1 static inline int -PyArray_ImportNumPyAPI() +PyArray_ImportNumPyAPI(void) { if (NPY_UNLIKELY(PyArray_API == NULL)) { import_array1(-1); From 30a84a11053d3eb18fecf9d286716f08d7a65ee9 Mon Sep 17 00:00:00 2001 From: Yannik Wicke Date: Fri, 21 Jun 2024 17:12:20 +0200 Subject: [PATCH 632/980] BUG: remove numpy.f2py from excludedimports Scipy 1.13.1 end up trying to import this module, e.g. when using optimize.root_scalar --- numpy/_pyinstaller/hook-numpy.py | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/_pyinstaller/hook-numpy.py b/numpy/_pyinstaller/hook-numpy.py index 0b3b46f2598a..84f3626b43d5 100644 --- a/numpy/_pyinstaller/hook-numpy.py +++ b/numpy/_pyinstaller/hook-numpy.py @@ -31,7 +31,6 @@ "pytest", "f2py", "setuptools", - "numpy.f2py", "distutils", "numpy.distutils", ] From 97b5a277cb138adf3c77b3a55639f49af5a73425 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 21 Jun 2024 09:41:37 -0600 Subject: [PATCH 633/980] MNT: respond to review comments --- doc/release/upcoming_changes/26750.improvement.rst | 8 ++++---- numpy/_core/meson.build | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/release/upcoming_changes/26750.improvement.rst b/doc/release/upcoming_changes/26750.improvement.rst index c7beb85f8fbf..858061dbe48a 100644 --- a/doc/release/upcoming_changes/26750.improvement.rst +++ b/doc/release/upcoming_changes/26750.improvement.rst @@ -1,8 +1,8 @@ -`lapack_lite` is thread safe ----------------------------- +`lapack_lite` is now thread safe +-------------------------------- -NumPy provides a minimal CPU-only version of LAPACK named ``lapack_lite`` that can -be used if no BLAS/LAPACK system is detected at build time. +NumPy provides a minimal low-performance version of LAPACK named ``lapack_lite`` +that can be used if no BLAS/LAPACK system is detected at build time. Until now, ``lapack_lite`` was not thread safe. Single-threaded use cases did not hit any issues, but running linear algebra operations in multiple threads diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 5143dca5f355..608f939a1d15 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -508,7 +508,7 @@ cdata.set('NPY_VISIBILITY_HIDDEN', visibility_hidden) # if not set, we're using lapack_lite if have_lapack - cdata.set10('HAVE_EXTERNAL_LAPACK', have_blas) + cdata.set10('HAVE_EXTERNAL_LAPACK', have_lapack) endif config_h = configure_file( From 9603c875eee87db746bf61c062663f728c87ff2d Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 23 Jun 2024 14:15:43 +0300 Subject: [PATCH 634/980] avoid side-effect of 'include complex.h' --- numpy/_core/include/numpy/npy_common.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/_core/include/numpy/npy_common.h b/numpy/_core/include/numpy/npy_common.h index c6ef7a6ec669..3132b602a7c8 100644 --- a/numpy/_core/include/numpy/npy_common.h +++ b/numpy/_core/include/numpy/npy_common.h @@ -379,6 +379,12 @@ typedef struct #include +// Downstream libraries like sympy would like to use I +// see https://github.com/numpy/numpy/issues/26787 +#ifdef I +#undef I +#endif + #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) typedef _Dcomplex npy_cdouble; typedef _Fcomplex npy_cfloat; From 11e87b8e2e280cb812cd98addb2c1f811437d312 Mon Sep 17 00:00:00 2001 From: Ross Barnowski Date: Mon, 24 Jun 2024 05:19:03 -0700 Subject: [PATCH 635/980] DOC: Update link to Python stdlib random. [skip azp][skip cirrus][skip actions] --- numpy/random/_generator.pyx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index df3dc27778e9..de9989b18424 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -169,9 +169,10 @@ cdef class Generator: Notes ----- - The Python stdlib module `random` contains pseudo-random number generator - with a number of methods that are similar to the ones available in - `Generator`. It uses Mersenne Twister, and this bit generator can + The Python stdlib module :external+python:mod:`random` contains + pseudo-random number generator with a number of methods that are similar + to the ones available in `Generator`. + It uses Mersenne Twister, and this bit generator can be accessed using `MT19937`. `Generator`, besides being NumPy-aware, has the advantage that it provides a much larger number of probability distributions to choose from. From e7e65745cee4e9492263038af6e0e311c254fc79 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Thu, 20 Jun 2024 09:36:09 -0700 Subject: [PATCH 636/980] Remove VSX from build targets --- numpy/_core/meson.build | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index cfb4bd64ce92..e9a632f12e35 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -947,9 +947,7 @@ foreach gen_mtargets : [ 'src/umath/loops_trigonometric.dispatch.cpp', [ AVX512_SKX, [AVX2, FMA3], - VSX4, VSX3, VSX2, NEON_VFPV4, - VXE2, VXE ] ], [ From 5a3ba2a8f08954a5e42b19ace339594da891124f Mon Sep 17 00:00:00 2001 From: Alex Herbert Date: Thu, 20 Jun 2024 13:42:00 +0100 Subject: [PATCH 637/980] BUG: Quantile closest_observation to round to nearest even order Detection of an even order statistic (1-based) must check for an odd index due to use of 0-based indexing. See #26656 --- .../upcoming_changes/26656.improvement.rst | 5 ++++ numpy/lib/_function_base_impl.py | 26 +++++++++++-------- numpy/lib/tests/test_function_base.py | 16 +++++++++++- 3 files changed, 35 insertions(+), 12 deletions(-) create mode 100644 doc/release/upcoming_changes/26656.improvement.rst diff --git a/doc/release/upcoming_changes/26656.improvement.rst b/doc/release/upcoming_changes/26656.improvement.rst new file mode 100644 index 000000000000..1f5aeab00c00 --- /dev/null +++ b/doc/release/upcoming_changes/26656.improvement.rst @@ -0,0 +1,5 @@ +`quantile` method ``closest_observation`` chooses nearest even order statistic +------------------------------------------------------------------------------ +This changes the definition of nearest for border cases from the nearest odd +order statistic to nearest even order statistic. The numpy implementation now +matches other reference implementations. diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 5356f1cc31c3..9d6fc05bafb4 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -388,7 +388,7 @@ def iterable(y): def _weights_are_valid(weights, a, axis): """Validate weights array. - + We assume, weights is not None. """ wgt = np.asanyarray(weights) @@ -448,7 +448,7 @@ def average(a, axis=None, weights=None, returned=False, *, The calculation is:: avg = sum(a * weights) / sum(weights) - + where the sum is over all included elements. The only constraint on the values of `weights` is that `sum(weights)` must not be 0. @@ -2049,7 +2049,7 @@ def disp(mesg, device=None, linefeed=True): "(deprecated in NumPy 2.0)", DeprecationWarning, stacklevel=2 - ) + ) if device is None: device = sys.stdout @@ -3847,7 +3847,7 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): Axis or axes along which the medians are computed. The default, axis=None, will compute the median along a flattened version of the array. - + .. versionadded:: 1.9.0 If a sequence of axes, the array is first flattened along the @@ -4355,12 +4355,14 @@ def quantile(a, The table above includes only the estimators from H&F that are continuous functions of probability `q` (estimators 4-9). NumPy also provides the three discontinuous estimators from H&F (estimators 1-3), where ``j`` is - defined as above and ``m`` and ``g`` are defined as follows. + defined as above, ``m`` is defined as follows, and ``g`` is a function + of the real-valued ``index = q*n + m - 1`` and ``j``. - 1. ``inverted_cdf``: ``m = 0`` and ``g = int(q*n > 0)`` - 2. ``averaged_inverted_cdf``: ``m = 0`` and ``g = (1 + int(q*n > 0)) / 2`` + 1. ``inverted_cdf``: ``m = 0`` and ``g = int(index - j > 0)`` + 2. ``averaged_inverted_cdf``: ``m = 0`` and + ``g = (1 + int(index - j > 0)) / 2`` 3. ``closest_observation``: ``m = -1/2`` and - ``1 - int((g == 0) & (j%2 == 0))`` + ``g = 1 - int((index == j) & (j%2 == 1))`` For backward compatibility with previous versions of NumPy, `quantile` provides four additional discontinuous estimators. Like @@ -4394,7 +4396,7 @@ def quantile(a, For weighted quantiles, the coverage conditions still hold. The empirical cumulative distribution is simply replaced by its weighted - version, i.e. + version, i.e. :math:`P(Y \\leq t) = \\frac{1}{\\sum_i w_i} \\sum_i w_i 1_{x_i \\leq t}`. Only ``method="inverted_cdf"`` supports weights. @@ -4608,7 +4610,9 @@ def _discret_interpolation_to_boundaries(index, gamma_condition_fun): def _closest_observation(n, quantiles): - gamma_fun = lambda gamma, index: (gamma == 0) & (np.floor(index) % 2 == 0) + # "choose the nearest even order statistic at g=0" (H&F (1996) pp. 362). + # Order is 1-based so for zero-based indexing round to nearest odd index. + gamma_fun = lambda gamma, index: (gamma == 0) & (np.floor(index) % 2 == 1) return _discret_interpolation_to_boundaries((n * quantiles) - 1 - 0.5, gamma_fun) @@ -4838,7 +4842,7 @@ def find_cdf_1d(arr, cdf): return result r_shape = arr.shape[1:] - if quantiles.ndim > 0: + if quantiles.ndim > 0: r_shape = quantiles.shape + r_shape if out is None: result = np.empty_like(arr, shape=r_shape) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index b3cffa2703d5..ef331881ca22 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1904,7 +1904,7 @@ def test_positional_regression_9477(self): def test_datetime_conversion(self): otype = "datetime64[ns]" - arr = np.array(['2024-01-01', '2024-01-02', '2024-01-03'], + arr = np.array(['2024-01-01', '2024-01-02', '2024-01-03'], dtype='datetime64[ns]') assert_array_equal(np.vectorize(lambda x: x, signature="(i)->(j)", otypes=[otype])(arr), arr) @@ -3998,6 +3998,20 @@ def test_weibull_fraction(self): quantile = np.quantile(arr, [Fraction(1, 2)], method='weibull') assert_equal(quantile, np.array(Fraction(1, 20))) + def test_closest_observation(self): + # Round ties to nearest even order statistic (see #26656) + m = 'closest_observation' + q = 0.5 + arr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + assert_equal(2, np.quantile(arr[0:3], q, method=m)) + assert_equal(2, np.quantile(arr[0:4], q, method=m)) + assert_equal(2, np.quantile(arr[0:5], q, method=m)) + assert_equal(3, np.quantile(arr[0:6], q, method=m)) + assert_equal(4, np.quantile(arr[0:7], q, method=m)) + assert_equal(4, np.quantile(arr[0:8], q, method=m)) + assert_equal(4, np.quantile(arr[0:9], q, method=m)) + assert_equal(5, np.quantile(arr, q, method=m)) + class TestLerp: @hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False, From 52679dcc7d07b0f23ebdf1090bc11c90de183f2a Mon Sep 17 00:00:00 2001 From: Alex Herbert Date: Mon, 24 Jun 2024 10:42:28 +0100 Subject: [PATCH 638/980] Restore trailing whitespace --- numpy/lib/_function_base_impl.py | 12 ++++++------ numpy/lib/tests/test_function_base.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 9d6fc05bafb4..aa28d0858d18 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -388,7 +388,7 @@ def iterable(y): def _weights_are_valid(weights, a, axis): """Validate weights array. - + We assume, weights is not None. """ wgt = np.asanyarray(weights) @@ -448,7 +448,7 @@ def average(a, axis=None, weights=None, returned=False, *, The calculation is:: avg = sum(a * weights) / sum(weights) - + where the sum is over all included elements. The only constraint on the values of `weights` is that `sum(weights)` must not be 0. @@ -2049,7 +2049,7 @@ def disp(mesg, device=None, linefeed=True): "(deprecated in NumPy 2.0)", DeprecationWarning, stacklevel=2 - ) + ) if device is None: device = sys.stdout @@ -3847,7 +3847,7 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): Axis or axes along which the medians are computed. The default, axis=None, will compute the median along a flattened version of the array. - + .. versionadded:: 1.9.0 If a sequence of axes, the array is first flattened along the @@ -4396,7 +4396,7 @@ def quantile(a, For weighted quantiles, the coverage conditions still hold. The empirical cumulative distribution is simply replaced by its weighted - version, i.e. + version, i.e. :math:`P(Y \\leq t) = \\frac{1}{\\sum_i w_i} \\sum_i w_i 1_{x_i \\leq t}`. Only ``method="inverted_cdf"`` supports weights. @@ -4842,7 +4842,7 @@ def find_cdf_1d(arr, cdf): return result r_shape = arr.shape[1:] - if quantiles.ndim > 0: + if quantiles.ndim > 0: r_shape = quantiles.shape + r_shape if out is None: result = np.empty_like(arr, shape=r_shape) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index ef331881ca22..59ef25928e3d 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1904,7 +1904,7 @@ def test_positional_regression_9477(self): def test_datetime_conversion(self): otype = "datetime64[ns]" - arr = np.array(['2024-01-01', '2024-01-02', '2024-01-03'], + arr = np.array(['2024-01-01', '2024-01-02', '2024-01-03'], dtype='datetime64[ns]') assert_array_equal(np.vectorize(lambda x: x, signature="(i)->(j)", otypes=[otype])(arr), arr) From 796b718a41051e1769736c6c8ebc9de30a5ecbe7 Mon Sep 17 00:00:00 2001 From: Alex Herbert Date: Mon, 24 Jun 2024 10:42:59 +0100 Subject: [PATCH 639/980] Improve release change notice --- doc/release/upcoming_changes/26656.improvement.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/release/upcoming_changes/26656.improvement.rst b/doc/release/upcoming_changes/26656.improvement.rst index 1f5aeab00c00..66d7508d2738 100644 --- a/doc/release/upcoming_changes/26656.improvement.rst +++ b/doc/release/upcoming_changes/26656.improvement.rst @@ -1,5 +1,5 @@ -`quantile` method ``closest_observation`` chooses nearest even order statistic ------------------------------------------------------------------------------- +`np.quantile` with method ``closest_observation`` chooses nearest even order statistic +-------------------------------------------------------------------------------------- This changes the definition of nearest for border cases from the nearest odd order statistic to nearest even order statistic. The numpy implementation now matches other reference implementations. From 399fce5853df681c566621cd5e6c3377211cf82d Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 25 Jun 2024 15:03:32 -0600 Subject: [PATCH 640/980] Add type overloads and test for unstack --- numpy/_core/shape_base.pyi | 6 ++++++ numpy/typing/tests/data/reveal/shape_base.pyi | 3 +++ 2 files changed, 9 insertions(+) diff --git a/numpy/_core/shape_base.pyi b/numpy/_core/shape_base.pyi index 8cf604b7358d..01200e3465b9 100644 --- a/numpy/_core/shape_base.pyi +++ b/numpy/_core/shape_base.pyi @@ -117,6 +117,12 @@ def stack( casting: _CastingKind = ... ) -> _ArrayType: ... +@overload +def unstack( + array: _ArrayLike[_SCT], + axis: int = 0 +) -> tuple[NDArray[_SCT], ...]: ... + @overload def block(arrays: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... @overload diff --git a/numpy/typing/tests/data/reveal/shape_base.pyi b/numpy/typing/tests/data/reveal/shape_base.pyi index 69940cc1ac2c..b7dafe17660e 100644 --- a/numpy/typing/tests/data/reveal/shape_base.pyi +++ b/numpy/typing/tests/data/reveal/shape_base.pyi @@ -53,3 +53,6 @@ assert_type(np.kron(AR_f8, AR_f8), npt.NDArray[np.floating[Any]]) assert_type(np.tile(AR_i8, 5), npt.NDArray[np.int64]) assert_type(np.tile(AR_LIKE_f8, [2, 2]), npt.NDArray[Any]) + +assert_type(np.unstack(AR_i8, 5), list[npt.NDArray[np.int64]]) +assert_type(np.unstack(AR_LIKE_f8, 5), list[npt.NDArray[Any]]) From 5739e143cb9fb5abe76dd6a44bf14db792801cb0 Mon Sep 17 00:00:00 2001 From: Aaron Meurer Date: Tue, 25 Jun 2024 15:07:02 -0600 Subject: [PATCH 641/980] Add release notes entry for unstack --- doc/release/upcoming_changes/26579.new_function.rst | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 doc/release/upcoming_changes/26579.new_function.rst diff --git a/doc/release/upcoming_changes/26579.new_function.rst b/doc/release/upcoming_changes/26579.new_function.rst new file mode 100644 index 000000000000..ac50ba4d4976 --- /dev/null +++ b/doc/release/upcoming_changes/26579.new_function.rst @@ -0,0 +1,5 @@ +New function ``unstack()`` +-------------------------- + +A new function ``unstack()``, which splits an array into a tuple of arrays +along an axis. It serves as the inverse of ``np.stack()``. From f160b2a6322208e4a0cf44c402b86ac3307b1122 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Tue, 25 Jun 2024 15:57:51 -0700 Subject: [PATCH 642/980] BLD: Fix x86-simd-sort build failure on openBSD --- numpy/_core/src/npysort/x86-simd-sort | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/npysort/x86-simd-sort b/numpy/_core/src/npysort/x86-simd-sort index aad3db19def3..9a1b616d5cd4 160000 --- a/numpy/_core/src/npysort/x86-simd-sort +++ b/numpy/_core/src/npysort/x86-simd-sort @@ -1 +1 @@ -Subproject commit aad3db19def3273843d4390808d63c2b6ebd1dbf +Subproject commit 9a1b616d5cd4eaf49f7664fb86ccc1d18bad2b8d From 63dc4c92532e83b48cec9c9ef3ada00dd98eb339 Mon Sep 17 00:00:00 2001 From: Andrej Date: Wed, 26 Jun 2024 13:03:12 +0500 Subject: [PATCH 643/980] TYP: fix missing `sys` import in numeric.pyi (#26788) * fix missing `sys` import in numeric.pyi * remove sys.version_info >= (3, 10) check in .pyi As 3.10 now is the minimum required version for numpy * remove in numeric.pyi an imported symbol missing after c8e2343 It's okay to just remove it as it wasn't really used in the code either way. --- numpy/_core/numeric.pyi | 6 +----- numpy/lib/_function_base_impl.pyi | 7 +------ numpy/lib/_shape_base_impl.pyi | 16 +++++++++------- numpy/testing/_private/utils.pyi | 5 +---- 4 files changed, 12 insertions(+), 22 deletions(-) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 8871cf9d264a..d20cc13e49eb 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -7,15 +7,11 @@ from typing import ( SupportsAbs, SupportsIndex, NoReturn, + TypeGuard, ) -if sys.version_info >= (3, 10): - from typing import TypeGuard -else: - from typing_extensions import TypeGuard import numpy as np from numpy import ( - ComplexWarning as ComplexWarning, generic, unsignedinteger, signedinteger, diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 2650568d3923..834ad633efb6 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -1,4 +1,3 @@ -import sys from collections.abc import Sequence, Iterator, Callable, Iterable from typing import ( Literal as L, @@ -8,13 +7,9 @@ from typing import ( Protocol, SupportsIndex, SupportsInt, + TypeGuard ) -if sys.version_info >= (3, 10): - from typing import TypeGuard -else: - from typing_extensions import TypeGuard - from numpy import ( vectorize as vectorize, ufunc, diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index cdfe9d9d5637..c765e1e5edf5 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -1,11 +1,13 @@ -import sys from collections.abc import Callable, Sequence -from typing import TypeVar, Any, overload, SupportsIndex, Protocol - -if sys.version_info >= (3, 10): - from typing import ParamSpec, Concatenate -else: - from typing_extensions import ParamSpec, Concatenate +from typing import ( + TypeVar, + Any, + overload, + SupportsIndex, + Protocol, + ParamSpec, + Concatenate, +) import numpy as np from numpy import ( diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index e2272ad2f7d0..113457ae1c55 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -18,11 +18,8 @@ from typing import ( TypeVar, Final, SupportsIndex, + ParamSpec ) -if sys.version_info >= (3, 10): - from typing import ParamSpec -else: - from typing_extensions import ParamSpec import numpy as np from numpy import number, object_, _FloatValue From ef19999d743612a3afb1cb609f0c912fd763ce11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Wed, 26 Jun 2024 12:57:01 +0200 Subject: [PATCH 644/980] MNT: Update dlpack docs and typing stubs --- numpy/__init__.pyi | 18 ++++++++++++++++-- numpy/_core/_add_newdocs.py | 26 +++++++++++++++++++++----- 2 files changed, 37 insertions(+), 7 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 6a6d133e335d..98a7c959b05b 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2563,7 +2563,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __imatmul__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... - def __dlpack__(self: NDArray[number[Any]], *, stream: None = ...) -> _PyCapsule: ... + def __dlpack__( + self: NDArray[number[Any]], + *, + stream: int | Any | None = ..., + max_version: tuple[int, int] | None = ..., + dl_device: tuple[int, L[0]] | None = ..., + copy: bool | None = ..., + ) -> _PyCapsule: ... + def __dlpack_device__(self) -> tuple[int, L[0]]: ... def __array_namespace__(self, *, api_version: str | None = ...) -> Any: ... @@ -3921,4 +3929,10 @@ _CharDType = TypeVar("_CharDType", dtype[str_], dtype[bytes_]) class _SupportsDLPack(Protocol[_T_contra]): def __dlpack__(self, *, stream: None | _T_contra = ...) -> _PyCapsule: ... -def from_dlpack(obj: _SupportsDLPack[None], /) -> NDArray[Any]: ... +def from_dlpack( + obj: _SupportsDLPack[None], + /, + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., +) -> NDArray[Any]: ... diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 34da738675fb..f83b838ac338 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -1679,7 +1679,7 @@ add_newdoc('numpy._core.multiarray', 'from_dlpack', """ - from_dlpack(x, /) + from_dlpack(x, /, *, device=None, copy=None) Create a NumPy array from an object implementing the ``__dlpack__`` protocol. Generally, the returned NumPy array is a read-only view @@ -1690,6 +1690,16 @@ x : object A Python object that implements the ``__dlpack__`` and ``__dlpack_device__`` methods. + device : device, optional + Device on which to place the created array. If device is ``None`` + and ``x`` supports DLPack, the output array will be on the same + device as ``x``. Default: ``None``. + copy : bool, optional + Boolean indicating whether or not to copy the input. If ``True``, + the copy will be made. If ``False``, the function will never copy, + and will raise ``BufferError`` in case a copy is deemed necessary. + If ``None``, the function will reuse the existing memory buffer if + possible and copy otherwise. Default: ``None``. Returns ------- @@ -2380,14 +2390,20 @@ """Array protocol: C-struct side.""")) add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack__', - """a.__dlpack__(*, stream=None) + """ + a.__dlpack__(*, stream=None, max_version=None, dl_device=None, copy=None) - DLPack Protocol: Part of the Array API.""")) + DLPack Protocol: Part of the Array API. + + """)) add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack_device__', - """a.__dlpack_device__() + """ + a.__dlpack_device__() + + DLPack Protocol: Part of the Array API. - DLPack Protocol: Part of the Array API.""")) + """)) add_newdoc('numpy._core.multiarray', 'ndarray', ('base', """ From e9f338a33b2065e2c5cd219cd0d2a4589168ed5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Wed, 26 Jun 2024 17:52:26 +0200 Subject: [PATCH 645/980] Apply review comment --- numpy/_core/_add_newdocs.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index f83b838ac338..845ed360d2dd 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -937,7 +937,7 @@ 'K' (keep) preserve input order Defaults to 'K'. device : str, optional - The device on which to place the created array. Default: None. + The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. .. versionadded:: 2.0.0 @@ -1031,7 +1031,7 @@ 'K' (keep) preserve input order Defaults to 'C'. device : str, optional - The device on which to place the created array. Default: None. + The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. .. versionadded:: 2.1.0 @@ -1235,7 +1235,7 @@ (C-style) or column-major (Fortran-style) order in memory. device : str, optional - The device on which to place the created array. Default: None. + The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. .. versionadded:: 2.0.0 @@ -1691,9 +1691,8 @@ A Python object that implements the ``__dlpack__`` and ``__dlpack_device__`` methods. device : device, optional - Device on which to place the created array. If device is ``None`` - and ``x`` supports DLPack, the output array will be on the same - device as ``x``. Default: ``None``. + Device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. copy : bool, optional Boolean indicating whether or not to copy the input. If ``True``, the copy will be made. If ``False``, the function will never copy, @@ -1701,6 +1700,7 @@ If ``None``, the function will reuse the existing memory buffer if possible and copy otherwise. Default: ``None``. + Returns ------- out : ndarray @@ -1768,7 +1768,7 @@ The type of the output array. If `dtype` is not given, infer the data type from the other input arguments. device : str, optional - The device on which to place the created array. Default: None. + The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. .. versionadded:: 2.0.0 From e2803ad4ab0cfeacc1e2bb43406273f4398a27d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Wed, 26 Jun 2024 20:01:28 +0200 Subject: [PATCH 646/980] Review changes --- doc/release/upcoming_changes/26579.new_function.rst | 9 +++++---- numpy/__init__.pyi | 1 + numpy/_core/shape_base.py | 2 +- numpy/_core/shape_base.pyi | 11 ++++++++++- numpy/typing/tests/data/reveal/shape_base.pyi | 4 ++-- 5 files changed, 19 insertions(+), 8 deletions(-) diff --git a/doc/release/upcoming_changes/26579.new_function.rst b/doc/release/upcoming_changes/26579.new_function.rst index ac50ba4d4976..168d12189323 100644 --- a/doc/release/upcoming_changes/26579.new_function.rst +++ b/doc/release/upcoming_changes/26579.new_function.rst @@ -1,5 +1,6 @@ -New function ``unstack()`` --------------------------- +New function `numpy.unstack` +---------------------------- -A new function ``unstack()``, which splits an array into a tuple of arrays -along an axis. It serves as the inverse of ``np.stack()``. +A new function ``np.unstack(array, axis=...)`` was added, which splits +an array into a tuple of arrays along an axis. It serves as the inverse +of `numpy.stack`. diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d5a0dd796424..35984e396cdf 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -397,6 +397,7 @@ from numpy._core.shape_base import ( hstack as hstack, stack as stack, vstack as vstack, + unstack as unstack, ) from numpy.lib import ( diff --git a/numpy/_core/shape_base.py b/numpy/_core/shape_base.py index bf0933d973e7..e1a2a584be0e 100644 --- a/numpy/_core/shape_base.py +++ b/numpy/_core/shape_base.py @@ -457,7 +457,7 @@ def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): return _nx.concatenate(expanded_arrays, axis=axis, out=out, dtype=dtype, casting=casting) -def _unstack_dispatcher(x, *, axis=None): +def _unstack_dispatcher(x, /, *, axis=None): return (x,) @array_function_dispatch(_unstack_dispatcher) diff --git a/numpy/_core/shape_base.pyi b/numpy/_core/shape_base.pyi index 01200e3465b9..627dbba06c19 100644 --- a/numpy/_core/shape_base.pyi +++ b/numpy/_core/shape_base.pyi @@ -120,8 +120,17 @@ def stack( @overload def unstack( array: _ArrayLike[_SCT], - axis: int = 0 + /, + *, + axis: int = ..., ) -> tuple[NDArray[_SCT], ...]: ... +@overload +def unstack( + array: ArrayLike, + /, + *, + axis: int = ..., +) -> tuple[NDArray[Any], ...]: ... @overload def block(arrays: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... diff --git a/numpy/typing/tests/data/reveal/shape_base.pyi b/numpy/typing/tests/data/reveal/shape_base.pyi index b7dafe17660e..d703a8fd12b3 100644 --- a/numpy/typing/tests/data/reveal/shape_base.pyi +++ b/numpy/typing/tests/data/reveal/shape_base.pyi @@ -54,5 +54,5 @@ assert_type(np.kron(AR_f8, AR_f8), npt.NDArray[np.floating[Any]]) assert_type(np.tile(AR_i8, 5), npt.NDArray[np.int64]) assert_type(np.tile(AR_LIKE_f8, [2, 2]), npt.NDArray[Any]) -assert_type(np.unstack(AR_i8, 5), list[npt.NDArray[np.int64]]) -assert_type(np.unstack(AR_LIKE_f8, 5), list[npt.NDArray[Any]]) +assert_type(np.unstack(AR_i8, axis=0), list[npt.NDArray[np.int64]]) +assert_type(np.unstack(AR_LIKE_f8, axis=0), list[npt.NDArray[Any]]) From 886ceebd7083d35199a9b96e456d06e6bb94b377 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Wed, 26 Jun 2024 20:05:36 +0200 Subject: [PATCH 647/980] Fix reveal test --- numpy/_core/shape_base.py | 2 ++ numpy/typing/tests/data/reveal/shape_base.pyi | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/_core/shape_base.py b/numpy/_core/shape_base.py index e1a2a584be0e..719ebcf7692a 100644 --- a/numpy/_core/shape_base.py +++ b/numpy/_core/shape_base.py @@ -477,6 +477,8 @@ def unstack(x, /, *, axis=0): ---------- x : ndarray The array to be unstacked. + axis : int, optional + Axis along which the array will be split. Default: ``0``. Returns ------- diff --git a/numpy/typing/tests/data/reveal/shape_base.pyi b/numpy/typing/tests/data/reveal/shape_base.pyi index d703a8fd12b3..a133d58e1678 100644 --- a/numpy/typing/tests/data/reveal/shape_base.pyi +++ b/numpy/typing/tests/data/reveal/shape_base.pyi @@ -54,5 +54,5 @@ assert_type(np.kron(AR_f8, AR_f8), npt.NDArray[np.floating[Any]]) assert_type(np.tile(AR_i8, 5), npt.NDArray[np.int64]) assert_type(np.tile(AR_LIKE_f8, [2, 2]), npt.NDArray[Any]) -assert_type(np.unstack(AR_i8, axis=0), list[npt.NDArray[np.int64]]) -assert_type(np.unstack(AR_LIKE_f8, axis=0), list[npt.NDArray[Any]]) +assert_type(np.unstack(AR_i8, axis=0), tuple[npt.NDArray[np.int64]]) +assert_type(np.unstack(AR_LIKE_f8, axis=0), tuple[npt.NDArray[Any]]) From d2cc53f282205f607a2627d1e6f6262958ed6ee2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Wed, 26 Jun 2024 20:30:10 +0200 Subject: [PATCH 648/980] Fix reveal test --- numpy/typing/tests/data/reveal/shape_base.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/typing/tests/data/reveal/shape_base.pyi b/numpy/typing/tests/data/reveal/shape_base.pyi index a133d58e1678..526f3abf161c 100644 --- a/numpy/typing/tests/data/reveal/shape_base.pyi +++ b/numpy/typing/tests/data/reveal/shape_base.pyi @@ -54,5 +54,5 @@ assert_type(np.kron(AR_f8, AR_f8), npt.NDArray[np.floating[Any]]) assert_type(np.tile(AR_i8, 5), npt.NDArray[np.int64]) assert_type(np.tile(AR_LIKE_f8, [2, 2]), npt.NDArray[Any]) -assert_type(np.unstack(AR_i8, axis=0), tuple[npt.NDArray[np.int64]]) -assert_type(np.unstack(AR_LIKE_f8, axis=0), tuple[npt.NDArray[Any]]) +assert_type(np.unstack(AR_i8, axis=0), tuple[npt.NDArray[np.int64], ...]) +assert_type(np.unstack(AR_LIKE_f8, axis=0), tuple[npt.NDArray[Any], ...]) From 457de03031e6c8823bb89d3910658ace3b6a8d0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Wed, 26 Jun 2024 20:53:28 +0200 Subject: [PATCH 649/980] Raise for 0-d input --- numpy/_core/shape_base.py | 2 ++ numpy/_core/tests/test_shape_base.py | 1 + 2 files changed, 3 insertions(+) diff --git a/numpy/_core/shape_base.py b/numpy/_core/shape_base.py index 719ebcf7692a..d479e7c6f415 100644 --- a/numpy/_core/shape_base.py +++ b/numpy/_core/shape_base.py @@ -524,6 +524,8 @@ def unstack(x, /, *, axis=0): np.True_ """ + if x.ndim == 0: + raise ValueError("Input array must be at least 1-d.") return tuple(_nx.moveaxis(x, axis, 0)) # Internal functions to eliminate the overhead of repeated dispatch in one of diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index 4b4bde19cd81..610c693d3d10 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -520,6 +520,7 @@ def test_unstack(): assert_raises(ValueError, np.unstack, a, axis=3) assert_raises(ValueError, np.unstack, a, axis=-4) + assert_raises(ValueError, np.unstack, np.array(0), axis=0) @pytest.mark.parametrize("axis", [0]) From b7249e6a0e6b2117c061e591ed072c8d57fb5bc6 Mon Sep 17 00:00:00 2001 From: Victor Eijkhout Date: Wed, 26 Jun 2024 17:23:59 -0500 Subject: [PATCH 650/980] Missing meson pass-through argument The `-C-D` phrase should be `-Csetup-args=-D` in a number of places. --- doc/source/building/blas_lapack.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/building/blas_lapack.rst b/doc/source/building/blas_lapack.rst index 6ae5f3f78a82..73ab4ac301aa 100644 --- a/doc/source/building/blas_lapack.rst +++ b/doc/source/building/blas_lapack.rst @@ -16,20 +16,20 @@ plain ``libblas``/``liblapack``. This may vary per platform or over releases. That order, and which libraries are tried, can be changed through the ``blas-order`` and ``lapack-order`` build options, for example:: - $ python -m pip install . -C-Dblas-order=openblas,mkl,blis -C-Dlapack-order=openblas,mkl,lapack + $ python -m pip install . -Csetup-args=-Dblas-order=openblas,mkl,blis -Csetup-args=-Dlapack-order=openblas,mkl,lapack The first suitable library that is found will be used. In case no suitable library is found, the NumPy build will print a warning and then use (slow!) NumPy-internal fallback routines. In order to disallow use of those slow routines, the ``allow-noblas`` build option can be used:: - $ python -m pip install . -C-Dallow-noblas=false + $ python -m pip install . -Csetup-args=-Dallow-noblas=false By default the LP64 (32-bit integer) interface to BLAS and LAPACK will be used. For building against the ILP64 (64-bit integer) interface, one must use the ``use-ilp64`` build option:: - $ python -m pip install . -C-Duse-ilp64=true + $ python -m pip install . -Csetup-args=-Duse-ilp64=true .. _accelerated-blas-lapack-libraries: From 31c75f2a2e86c5651a0fded93bc16e6706c27a1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= <8431159+mtsokol@users.noreply.github.com> Date: Thu, 27 Jun 2024 11:02:27 +0200 Subject: [PATCH 651/980] Update numpy/_core/_add_newdocs.py Co-authored-by: Sebastian Berg --- numpy/_core/_add_newdocs.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 845ed360d2dd..877865c8557b 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -1692,7 +1692,8 @@ ``__dlpack_device__`` methods. device : device, optional Device on which to place the created array. Default: ``None``. - For Array-API interoperability only, so must be ``"cpu"`` if passed. + Must be ``"cpu"`` if passed which may allow importing an array + that is not already CPU available. copy : bool, optional Boolean indicating whether or not to copy the input. If ``True``, the copy will be made. If ``False``, the function will never copy, From ec89607436e6230bd04d376834bb4322e8b9e413 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= <8431159+mtsokol@users.noreply.github.com> Date: Thu, 27 Jun 2024 11:02:34 +0200 Subject: [PATCH 652/980] Update numpy/_core/_add_newdocs.py Co-authored-by: Sebastian Berg --- numpy/_core/_add_newdocs.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 877865c8557b..3e5829cce9d9 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -1698,6 +1698,8 @@ Boolean indicating whether or not to copy the input. If ``True``, the copy will be made. If ``False``, the function will never copy, and will raise ``BufferError`` in case a copy is deemed necessary. + Passing it requests a copy from the exporter who may or may not + implement the capability. If ``None``, the function will reuse the existing memory buffer if possible and copy otherwise. Default: ``None``. From 26a2e6c882d25eefd447a25ba930dd3721505834 Mon Sep 17 00:00:00 2001 From: Giovanni Del Monte Date: Thu, 27 Jun 2024 12:00:21 +0200 Subject: [PATCH 653/980] BUG: fix max_rows and chunked string/datetime reading in ``loadtxt`` (#26762) * fixed bug at line 1058 in file numpy/lib&npyio_impl.py; in function _read(), called by loadtxt() method, when files are read in chunks to reduce memory overhead, max_rows lines were always loaded every time, also in the case max_rows>_loadtxt_chunksize, in which case it loaded chunks with the wrong size. A test has been added in numpy/lib/tests/test_loadtxt.py, to check for the array size loaded for different max_rows, less and greater than _loadtxt_chunksize. * changed numpy/lib/tests/test_loadtxt.py; added further tests in functions at lines test_maxrows_exceeding_chunksize() and test_parametric_unit_discovery() to check if loadtxt() method loads correctly files as a whole and in chunks. It seems that the function _load_from_filelike() works well with file-like streams, but not with file objects. * changed value of filelike variable in file numpy/lib/_npyio_impl.py at line 1045; file was converted to iterable, but not accounted for, then _load_from_fillelike() was not able to read the stream properly until the end. * I forgot to add the new version of test_loadtxt.py with the updated test functions for reading files in chunks... * within file numpy/lib/tests/test_loadtxt.py I reduced the size of the arrays within function test_maxrows_exceeding_chunksize() * add max_rows=10 in the call of loadtxt() within function test_field_growing_cases() to avoid memory allocation issues when the line grows too much. * Update numpy/lib/tests/test_loadtxt.py --------- Co-authored-by: Sebastian Berg --- numpy/lib/_npyio_impl.py | 3 ++- numpy/lib/tests/test_loadtxt.py | 38 +++++++++++++++++++++++++++++---- 2 files changed, 36 insertions(+), 5 deletions(-) diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 0ae4ee2e9386..123679837a43 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -1040,6 +1040,7 @@ def _read(fname, *, delimiter=',', comment='#', quote='"', # Due to chunking, certain error reports are less clear, currently. if filelike: data = iter(data) # cannot chunk when reading from file + filelike = False c_byte_converters = False if read_dtype_via_object_chunks == "S": @@ -1055,7 +1056,7 @@ def _read(fname, *, delimiter=',', comment='#', quote='"', next_arr = _load_from_filelike( data, delimiter=delimiter, comment=comment, quote=quote, imaginary_unit=imaginary_unit, - usecols=usecols, skiplines=skiplines, max_rows=max_rows, + usecols=usecols, skiplines=skiplines, max_rows=chunk_size, converters=converters, dtype=dtype, encoding=encoding, filelike=filelike, byte_converters=byte_converters, diff --git a/numpy/lib/tests/test_loadtxt.py b/numpy/lib/tests/test_loadtxt.py index 78c84e491c08..0b2f4042e66d 100644 --- a/numpy/lib/tests/test_loadtxt.py +++ b/numpy/lib/tests/test_loadtxt.py @@ -970,12 +970,15 @@ def test_parametric_unit_discovery( """Check that the correct unit (e.g. month, day, second) is discovered from the data when a user specifies a unitless datetime.""" # Unit should be "D" (days) due to last entry - data = [generic_data] * 50000 + [long_datum] + data = [generic_data] * nrows + [long_datum] expected = np.array(data, dtype=expected_dtype) + assert len(data) == nrows+1 + assert len(data) == len(expected) # file-like path txt = StringIO("\n".join(data)) a = np.loadtxt(txt, dtype=unitless_dtype) + assert len(a) == len(expected) assert a.dtype == expected.dtype assert_equal(a, expected) @@ -983,11 +986,17 @@ def test_parametric_unit_discovery( fd, fname = mkstemp() os.close(fd) with open(fname, "w") as fh: - fh.write("\n".join(data)) + fh.write("\n".join(data)+"\n") + # loading the full file... a = np.loadtxt(fname, dtype=unitless_dtype) - os.remove(fname) + assert len(a) == len(expected) assert a.dtype == expected.dtype assert_equal(a, expected) + # loading half of the file... + a = np.loadtxt(fname, dtype=unitless_dtype, max_rows=int(nrows/2)) + os.remove(fname) + assert len(a) == int(nrows/2) + assert_equal(a, expected[:int(nrows/2)]) def test_str_dtype_unit_discovery_with_converter(): @@ -1041,5 +1050,26 @@ def test_field_growing_cases(): assert len(res) == 0 for i in range(1, 1024): - res = np.loadtxt(["," * i], delimiter=",", dtype=bytes) + res = np.loadtxt(["," * i], delimiter=",", dtype=bytes, max_rows=10) assert len(res) == i+1 + +@pytest.mark.parametrize("nmax", (10000, 50000, 55000, 60000)) +def test_maxrows_exceeding_chunksize(nmax): + # tries to read all of the file, + # or less, equal, greater than _loadtxt_chunksize + file_length = 60000 + + # file-like path + data = ["a 0.5 1"]*file_length + txt = StringIO("\n".join(data)) + res = np.loadtxt(txt, dtype=str, delimiter=" ", max_rows=nmax) + assert len(res) == nmax + + # file-obj path + fd, fname = mkstemp() + os.close(fd) + with open(fname, "w") as fh: + fh.write("\n".join(data)) + res = np.loadtxt(fname, dtype=str, delimiter=" ", max_rows=nmax) + os.remove(fname) + assert len(res) == nmax From 75a73e5bf2e5dde7eac2e3a723720a35ce42650f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Thu, 27 Jun 2024 12:50:51 +0200 Subject: [PATCH 654/980] DOC: Update 2.0 docs [skip actions] [skip azp] [skip cirrus] --- doc/source/numpy_2_0_migration_guide.rst | 3 +++ doc/source/release/2.0.0-notes.rst | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index 4377ee368e63..42fec6f4e4b0 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -251,7 +251,9 @@ asfarray Use ``np.asarray`` with a float dtype instead. byte_bounds Now it's available under ``np.lib.array_utils.byte_bounds`` cast Use ``np.asarray(arr, dtype=dtype)`` instead. cfloat Use ``np.complex128`` instead. +charrarray It's still available as ``np.char.chararray``. clongfloat Use ``np.clongdouble`` instead. +compare_chararrays It's still available as ``np.char.compare_chararrays``. compat There's no replacement, as Python 2 is no longer supported. complex\_ Use ``np.complex128`` instead. cumproduct Use ``np.cumprod`` instead. @@ -266,6 +268,7 @@ find_common_type Use ``numpy.promote_types`` or ``numpy.result_type`` ins To achieve semantics for the ``scalar_types`` argument, use ``numpy.result_type`` and pass the Python values ``0``, ``0.0``, or ``0j``. +format_parser It's still available as ``np.rec.format_parser``. get_array_wrap float\_ Use ``np.float64`` instead. geterrobj Use the np.errstate context manager instead. diff --git a/doc/source/release/2.0.0-notes.rst b/doc/source/release/2.0.0-notes.rst index f4e7ac7da3b4..e711b130f813 100644 --- a/doc/source/release/2.0.0-notes.rst +++ b/doc/source/release/2.0.0-notes.rst @@ -209,7 +209,8 @@ NumPy 2.0 Python API removals * ``np.tracemalloc_domain`` is now only available from ``np.lib``. -* ``np.recfromcsv`` and ``recfromtxt`` are now only available from ``np.lib.npyio``. +* ``np.recfromcsv`` and ``np.recfromtxt`` were removed from the main namespace. + Use ``np.genfromtxt`` with comma delimiter instead. * ``np.issctype``, ``np.maximum_sctype``, ``np.obj2sctype``, ``np.sctype2char``, ``np.sctypes``, ``np.issubsctype`` were all removed from the From 2398d9cbc4210b20666c2469d1b1cc94bcbad2d3 Mon Sep 17 00:00:00 2001 From: Jules Date: Fri, 28 Jun 2024 14:50:06 +0800 Subject: [PATCH 655/980] DOC+TST: Specify np.nan sort order in partition functions --- numpy/_core/fromnumeric.py | 4 +++- numpy/_core/tests/test_multiarray.py | 10 ++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 596ff9fa852c..1debcaa8295e 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -823,6 +823,8 @@ def partition(a, kth, axis=-1, kind='introselect', order=None): the real parts except when they are equal, in which case the order is determined by the imaginary parts. + The sort order of ``np.nan`` is bigger than ``np.inf``. + Examples -------- >>> a = np.array([7, 1, 7, 7, 1, 5, 7, 2, 3, 2, 6, 2, 3, 0]) @@ -923,7 +925,7 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None): is unstable, and hence the returned indices are not guaranteed to be the earliest/latest occurrence of the element. - The treatment of ``np.nan`` in the input array is undefined. + The sort order of ``np.nan`` is bigger than ``np.inf``. See `partition` for notes on the different selection algorithms. diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 6923accbab66..5a68a846a8f2 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -10211,6 +10211,16 @@ def test_partition_fp(N, dtype): assert_arr_partitioned(np.sort(arr)[k], k, arr[np.argpartition(arr, k, kind='introselect')]) + # Check that `np.inf < np.nan` + # This follows np.sort + arr[0] = np.nan + arr[1] = np.inf + o1 = np.partition(arr, -2, kind='introselect') + o2 = arr[np.argpartition(arr, -2, kind='introselect')] + for out in [o1,o2]: + assert_(np.isnan(out[-1])) + assert_equal(out[-2], np.inf) + def test_cannot_assign_data(): a = np.arange(10) b = np.linspace(0, 1, 10) From 3815089fa10d4d75b3566b3055a37c91847bfb2e Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Fri, 28 Jun 2024 14:54:50 +0200 Subject: [PATCH 656/980] CI,TST: Fix meson tests needing gfortran [wheel build] --- .github/workflows/wheels.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index e1267e8b9fc3..31cac63eafc7 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -130,6 +130,10 @@ jobs: - name: Setup macOS if: matrix.buildplat[0] == 'macos-13' || matrix.buildplat[0] == 'macos-14' run: | + # Needed due to https://github.com/actions/runner-images/issues/3371 + echo "FC=gfortran-10" >> "$GITHUB_ENV" + echo "F77=gfortran-10" >> "$GITHUB_ENV" + echo "F90=gfortran-10" >> "$GITHUB_ENV" if [[ ${{ matrix.buildplat[2] }} == 'accelerate' ]]; then # macosx_arm64 and macosx_x86_64 with accelerate # only target Sonoma onwards From ce3829b65041f873e35628fd91782f4a42d5d7cf Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Tue, 18 Jun 2024 18:21:54 +0530 Subject: [PATCH 657/980] DOC, TST: Add `>>> import numpy as np` stub to docstrings --- .../_core/code_generators/ufunc_docstrings.py | 130 +++++++++++++++++- 1 file changed, 127 insertions(+), 3 deletions(-) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index c7bf82fb2a19..012c20ae98e3 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -84,6 +84,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> x = np.array([-1.2, 1.2]) >>> np.absolute(x) array([ 1.2, 1.2]) @@ -136,6 +137,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.add(1.0, 4.0) 5.0 >>> x1 = np.arange(9.0).reshape((3, 3)) @@ -203,6 +205,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + We expect the arccos of 1 to be 0, and of -1 to be pi: >>> np.arccos([1, -1]) @@ -263,6 +267,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.arccosh([np.e, 10.0]) array([ 1.65745445, 2.99322285]) >>> np.arccosh(1) @@ -315,6 +320,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.arcsin(1) # pi/2 1.5707963267948966 >>> np.arcsin(-1) # -pi/2 @@ -366,6 +372,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.arcsinh(np.array([np.e, 10.0])) array([ 1.72538256, 2.99822295]) @@ -419,6 +426,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np We expect the arctan of 0 to be 0, and of 1 to be pi/4: >>> np.arctan([0, 1]) @@ -498,6 +506,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np Consider four points in different quadrants: >>> x = np.array([-1, +1, +1, -1]) @@ -567,6 +576,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.arctanh([0, -0.5]) array([ 0. , -0.54930614]) @@ -603,6 +613,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + The number 13 is represented by ``00001101``. Likewise, 17 is represented by ``00010001``. The bit-wise AND of 13 and 17 is therefore ``000000001``, or 1: @@ -665,6 +677,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + The number 13 has the binary representation ``00001101``. Likewise, 16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is then ``00011101``, or 29: @@ -732,6 +746,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + The number 13 is represented by ``00001101``. Likewise, 17 is represented by ``00010001``. The bit-wise XOR of 13 and 17 is therefore ``00011100``, or 28: @@ -786,6 +802,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.ceil(a) array([-1., -1., -0., 1., 2., 2., 2.]) @@ -822,6 +840,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.trunc(a) array([-1., -1., -0., 0., 1., 1., 2.]) @@ -856,6 +875,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.conjugate(1+2j) (1-2j) @@ -894,6 +914,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.cos(np.array([0, np.pi/2, np.pi])) array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00]) >>> @@ -931,6 +952,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.cosh(0) 1.0 @@ -966,6 +988,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np Convert a radian array to degrees >>> rad = np.arange(12.)*np.pi/6 @@ -1009,6 +1032,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.rad2deg(np.pi/2) 90.0 @@ -1052,6 +1076,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.heaviside([-1.5, 0, 2.0], 0.5) array([ 0. , 0.5, 1. ]) >>> np.heaviside([-1.5, 0, 2.0], 1) @@ -1091,6 +1116,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.divide(2.0, 4.0) 0.5 >>> x1 = np.arange(9.0).reshape((3, 3)) @@ -1136,6 +1162,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.equal([0, 1, 3], np.arange(3)) array([ True, True, False]) @@ -1199,6 +1226,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np Plot the magnitude and phase of ``exp(x)`` in the complex plane: >>> import matplotlib.pyplot as plt @@ -1248,6 +1276,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.exp2([2, 3]) array([ 4., 8.]) @@ -1281,6 +1310,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to about 32 significant digits. This example shows the superiority of expm1 in this case. @@ -1319,6 +1349,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.fabs(-1) 1.0 >>> np.fabs([-1.2, 1.2]) @@ -1358,6 +1389,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.floor(a) array([-2., -2., -1., 0., 1., 1., 2.]) @@ -1396,6 +1428,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.floor_divide(7,3) 2 >>> np.floor_divide([1., 2., 3., 4.], 2.5) @@ -1449,6 +1482,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.fmod([-3, -2, -1, 1, 2, 3], 2) array([-1, 0, -1, 1, 0, 1]) >>> np.remainder([-3, -2, -1, 1, 2, 3], 2) @@ -1493,6 +1527,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.greater([4,2],[2,2]) array([ True, False]) @@ -1530,6 +1565,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.greater_equal([4, 2, 1], [2, 2, 2]) array([ True, True, False]) @@ -1567,6 +1603,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3))) array([[ 5., 5., 5.], [ 5., 5., 5.], @@ -1630,6 +1667,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + We've seen that 13 is represented by ``00001101``. The invert or bit-wise NOT of 13 is then: @@ -1706,6 +1745,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.isfinite(1) True >>> np.isfinite(0) @@ -1762,6 +1802,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.isinf(np.inf) True >>> np.isinf(np.nan) @@ -1807,6 +1848,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.isnan(np.nan) True >>> np.isnan(np.inf) @@ -1840,6 +1882,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.isnat(np.datetime64("NaT")) True >>> np.isnat(np.datetime64("2016-01-01")) @@ -1880,6 +1923,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.binary_repr(5) '101' >>> np.left_shift(5, 2) @@ -1935,6 +1979,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.less([1, 2], [2, 2]) array([ True, False]) @@ -1971,6 +2016,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.less_equal([4, 2, 1], [2, 2, 2]) array([False, True, True]) @@ -2036,6 +2082,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.log([1, np.e, np.e**2, 0]) array([ 0., 1., 2., -inf]) @@ -2090,6 +2137,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.log10([1e-15, -3.]) array([-15., nan]) @@ -2138,6 +2186,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> x = np.array([0, 1, 2, 2**4]) >>> np.log2(x) array([-inf, 0., 1., 4.]) @@ -2181,6 +2230,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> prob1 = np.log(1e-50) >>> prob2 = np.log(2.5e-50) >>> prob12 = np.logaddexp(prob1, prob2) @@ -2224,6 +2274,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> prob1 = np.log2(1e-50) >>> prob2 = np.log2(2.5e-50) >>> prob12 = np.logaddexp2(prob1, prob2) @@ -2283,6 +2334,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.log1p(1e-99) 1e-99 >>> np.log(1 + 1e-99) @@ -2315,6 +2367,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.logical_and(True, False) False >>> np.logical_and([True, False], [False, False]) @@ -2358,6 +2411,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.logical_not(3) False >>> np.logical_not([True, False, 0, 1]) @@ -2394,6 +2448,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.logical_or(True, False) True >>> np.logical_or([True, False], [False, False]) @@ -2437,6 +2492,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.logical_xor(True, False) True >>> np.logical_xor([True, True, False, False], [True, False, True, False]) @@ -2499,6 +2555,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.maximum([2, 3, 4], [1, 5, 2]) array([2, 5, 4]) @@ -2558,6 +2615,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.minimum([2, 3, 4], [1, 5, 2]) array([1, 3, 2]) @@ -2618,6 +2676,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.fmax([2, 3, 4], [1, 5, 2]) array([ 2., 5., 4.]) @@ -2676,6 +2735,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.fmin([2, 3, 4], [1, 5, 2]) array([1, 3, 2]) @@ -2805,6 +2865,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np For 2-D arrays it is the matrix product: >>> a = np.array([[1, 0], @@ -2908,6 +2969,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + Get the projected size along a given normal for an array of vectors. >>> v = np.array([[0., 5., 0.], [0., 0., 10.], [0., 6., 8.]]) @@ -2951,6 +3014,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.modf([0, 3.5]) (array([ 0. , 0.5]), array([ 0., 3.])) >>> np.modf(-0.5) @@ -2981,6 +3045,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.multiply(2.0, 4.0) 8.0 @@ -3021,6 +3086,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.negative([1.,-1.]) array([-1., 1.]) @@ -3057,6 +3123,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> x1 = np.array(([1., -1.])) >>> np.positive(x1) @@ -3095,6 +3162,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.not_equal([1.,2.], [1., 3.]) array([False, True]) >>> np.not_equal([1, 2], [[1, 3],[1, 4]]) @@ -3159,6 +3227,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + Cube each element in an array. >>> x1 = np.arange(6) @@ -3246,6 +3316,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + Cube each element in a list. >>> x1 = range(6) @@ -3309,6 +3381,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + Convert a degree array to radians >>> deg = np.arange(12.) * 30. @@ -3353,6 +3427,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.deg2rad(180) 3.1415926535897931 @@ -3387,6 +3462,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.reciprocal(2.) 0.5 >>> np.reciprocal([1, 2., 3.33]) @@ -3443,6 +3519,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.remainder([4, 7], [2, 3]) array([0, 1]) >>> np.remainder(np.arange(7), 5) @@ -3494,6 +3571,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.divmod(np.arange(5), 3) (array([0, 0, 0, 1, 1]), array([0, 1, 2, 0, 1])) @@ -3537,6 +3615,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.binary_repr(10) '1010' >>> np.right_shift(10, 1) @@ -3585,6 +3664,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.rint(a) array([-2., -2., -0., 0., 2., 2., 2.]) @@ -3624,6 +3704,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.sign([-5., 4.5]) array([-1., 1.]) >>> np.sign(0) @@ -3651,6 +3732,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.signbit(-1.2) True >>> np.signbit(np.array([1, -2.3, 2.1])) @@ -3681,6 +3763,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.copysign(1.3, -1) -1.3 >>> 1/np.copysign(0, 1) @@ -3716,6 +3799,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> eps = np.finfo(np.float64).eps >>> np.nextafter(1, 2) == eps + 1 True @@ -3751,6 +3835,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.spacing(1) == np.finfo(np.float64).eps True @@ -3792,6 +3877,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + Print sine of one angle: >>> np.sin(np.pi/2.) @@ -3845,6 +3932,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.sinh(0) 0.0 >>> np.sinh(np.pi*1j/2) @@ -3903,6 +3991,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.sqrt([1,4,9]) array([ 1., 2., 3.]) @@ -3937,6 +4026,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.cbrt([1,8,27]) array([ 1., 2., 3.]) @@ -3966,6 +4056,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.square([-1j, 1]) array([-1.-0.j, 1.+0.j]) @@ -3994,6 +4085,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.subtract(1.0, 4.0) -3.0 @@ -4046,6 +4138,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> from math import pi >>> np.tan(np.array([-pi,pi/2,pi])) array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16]) @@ -4099,6 +4192,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.tanh((0, np.pi*1j, np.pi*1j/2)) array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j]) @@ -4154,6 +4248,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> x = np.arange(9) >>> y1, y2 = np.frexp(x) >>> y1 @@ -4201,6 +4296,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.ldexp(5, np.arange(4)) array([ 5., 10., 20., 40.], dtype=float16) @@ -4232,6 +4328,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.gcd(12, 20) 4 >>> np.gcd.reduce([15, 25, 35]) @@ -4263,6 +4360,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.lcm(12, 20) 60 >>> np.lcm.reduce([3, 12, 20]) @@ -4303,6 +4401,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.bitwise_count(1023) 10 >>> a = np.array([2**i - 1 for i in range(16)]) @@ -4335,6 +4434,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array(['Grace Hopper Conference', 'Open Source Day']) >>> np.strings.str_len(a) array([23, 15]) @@ -4404,6 +4504,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array(['a', 'b', '0']) >>> np.strings.isdigit(a) array([False, False, True]) @@ -4464,10 +4565,11 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array(['a', '1', 'a1', '(', '']) >>> np.strings.isalnum(a) array([ True, True, True, False, False]) - + """) add_newdoc('numpy._core.umath', 'islower', @@ -4493,6 +4595,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.strings.islower("GHC") array(False) >>> np.strings.islower("ghc") @@ -4523,8 +4626,9 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.strings.isupper("GHC") - array(True) + array(True) >>> a = np.array(["hello", "HELLO", "Hello"]) >>> np.strings.isupper(a) array([False, True, False]) @@ -4553,12 +4657,13 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.strings.istitle("Numpy Is Great") array(True) >>> np.strings.istitle("Numpy is great") array(False) - + """) add_newdoc('numpy._core.umath', 'isdecimal', @@ -4587,6 +4692,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.strings.isdecimal(['12345', '4.99', '123ABC', '']) array([ True, False, False, False]) @@ -4618,6 +4724,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.strings.isnumeric(['123', '123abc', '9.0', '1/4', 'VIII']) array([ True, False, False, False, False]) @@ -4654,6 +4761,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array(["NumPy is a Python library"]) >>> np.strings.find(a, "Python", 0, None) array([11]) @@ -4722,6 +4830,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> c array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np >>> a = np.array(["Computer Science"]) >>> np.strings.index(a, "Science") array([9]) @@ -4794,6 +4904,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array(["Computer Science"]) >>> np.strings.rindex(a, "Science") array([9]) @@ -4867,6 +4978,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> s = np.array(['foo', 'bar']) >>> s array(['foo', 'bar'], dtype='>> import numpy as np >>> c = np.array(['a1b2','1b2a','b2a1','2a1b']); c array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='>> np.strings.center(c, width=9) @@ -4954,6 +5067,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.ljust(c, width=3) array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.rjust(a, width=3) array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np >>> np.strings.zfill(['1', '-1', '+1'], 3) array(['001', '-01', '+01'], dtype='>> import numpy as np + The ufunc is used most easily via ``np.strings.partition``, which calls it after calculating the indices:: @@ -5115,6 +5233,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + The ufunc is used most easily via ``np.strings.rpartition``, which calls it after calculating the indices:: @@ -5157,6 +5277,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + The ufunc is used most easily via ``np.strings.partition``, which calls it under the hood:: @@ -5200,6 +5322,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + The ufunc is used most easily via ``np.strings.rpartition``, which calls it after calculating the indices:: From 8e52d4ff1fee49a851e00052a5fe892038901172 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 28 Jun 2024 10:25:02 -0600 Subject: [PATCH 658/980] TST: fix 'spin test single_test' for future versions of spin --- .spin/cmds.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.spin/cmds.py b/.spin/cmds.py index d98908666a33..9ad1d195650d 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -254,6 +254,9 @@ def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose, *args, **kwargs): pytest_args = ('numpy',) if '-m' not in pytest_args: + if len(pytest_args) == 1 and not tests: + tests = pytest_args[0] + pytest_args = () if markexpr != "full": pytest_args = ('-m', markexpr) + pytest_args From b017362f56251068205b4143da7ce80d40f6d7f9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 28 Jun 2024 17:14:34 +0000 Subject: [PATCH 659/980] MAINT: Bump github/codeql-action from 3.25.10 to 3.25.11 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.25.10 to 3.25.11. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/23acc5c183826b7a8a97bce3cecc52db901f8251...b611370bb5703a7efb587f9d136a52ea24c5c38c) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index b48108f8cbe7..981c5b3f54c2 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -45,7 +45,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@23acc5c183826b7a8a97bce3cecc52db901f8251 # v3.25.10 + uses: github/codeql-action/init@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -55,7 +55,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@23acc5c183826b7a8a97bce3cecc52db901f8251 # v3.25.10 + uses: github/codeql-action/autobuild@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -68,6 +68,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@23acc5c183826b7a8a97bce3cecc52db901f8251 # v3.25.10 + uses: github/codeql-action/analyze@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 1a7b50a75841..40f9208f09a2 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@23acc5c183826b7a8a97bce3cecc52db901f8251 # v2.1.27 + uses: github/codeql-action/upload-sarif@b611370bb5703a7efb587f9d136a52ea24c5c38c # v2.1.27 with: sarif_file: results.sarif From 260c1ac68a2a03cd96984ff602aa6129076cc8d3 Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Tue, 18 Jun 2024 20:23:01 +0530 Subject: [PATCH 660/980] DOC, API: Add `>>> import numpy as np` stub to `numpy/_core/` --- numpy/_core/_add_newdocs.py | 104 ++++++++++++++++++ numpy/_core/_add_newdocs_scalars.py | 1 + numpy/_core/_asarray.py | 1 + numpy/_core/_ufunc_config.py | 8 ++ numpy/_core/arrayprint.py | 9 ++ numpy/_core/code_generators/generate_umath.py | 1 + .../_core/code_generators/ufunc_docstrings.py | 18 ++- numpy/_core/defchararray.py | 27 +++-- numpy/_core/fromnumeric.py | 41 ++++++- numpy/_core/function_base.py | 3 + numpy/_core/getlimits.py | 2 + numpy/_core/memmap.py | 1 + numpy/_core/multiarray.py | 23 ++++ numpy/_core/numeric.py | 53 +++++++++ numpy/_core/records.py | 2 + numpy/_core/shape_base.py | 13 ++- numpy/_core/strings.py | 37 ++++++- 17 files changed, 321 insertions(+), 23 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 34da738675fb..54af7bf5d192 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -45,6 +45,7 @@ Examples -------- + >>> import numpy as np >>> x = np.arange(6).reshape(2, 3) >>> fl = x.flat >>> type(fl) @@ -72,6 +73,7 @@ Examples -------- + >>> import numpy as np >>> x = np.arange(5) >>> fl = x.flat >>> fl.base is x @@ -86,6 +88,7 @@ Examples -------- + >>> import numpy as np >>> x = np.arange(6).reshape(2, 3) >>> fl = x.flat >>> fl.coords @@ -104,6 +107,7 @@ Examples -------- + >>> import numpy as np >>> x = np.arange(6).reshape(2, 3) >>> fl = x.flat >>> fl.index @@ -131,6 +135,7 @@ Examples -------- + >>> import numpy as np >>> x = np.arange(6).reshape(2, 3) >>> x array([[0, 1, 2], @@ -321,6 +326,8 @@ Here is how we might write an ``iter_add`` function, using the Python iterator protocol: + >>> import numpy as np + >>> def iter_add_py(x, y, out=None): ... addop = np.add ... it = np.nditer([x, y, out], [], @@ -426,6 +433,7 @@ Examples -------- + >>> import numpy as np >>> x = np.arange(10) >>> y = x + 1 >>> it = np.nditer([x, y]) @@ -542,6 +550,7 @@ [a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified the first iter's axes as [1] + >>> import numpy as np >>> a = np.arange(12).reshape(2, 3, 2) >>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"]) >>> for x in i: @@ -616,6 +625,7 @@ Manually adding two vectors, using broadcasting: + >>> import numpy as np >>> x = np.array([[1], [2], [3]]) >>> y = np.array([4, 5, 6]) >>> b = np.broadcast(x, y) @@ -644,6 +654,8 @@ Examples -------- + + >>> import numpy as np >>> x = np.array([[1], [2], [3]]) >>> y = np.array([4, 5, 6]) >>> b = np.broadcast(x, y) @@ -669,6 +681,8 @@ Examples -------- + + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) @@ -686,6 +700,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) @@ -701,6 +716,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) @@ -715,6 +731,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) @@ -729,6 +746,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) @@ -743,6 +761,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) @@ -767,6 +786,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) @@ -870,6 +890,7 @@ Examples -------- + >>> import numpy as np >>> np.array([1, 2, 3]) array([1, 2, 3]) @@ -977,6 +998,7 @@ Convert a list into an array: >>> a = [1, 2] + >>> import numpy as np >>> np.asarray(a) array([1, 2]) @@ -1073,6 +1095,7 @@ Convert a list into an array: >>> a = [1, 2] + >>> import numpy as np >>> np.asanyarray(a) array([1, 2]) @@ -1120,6 +1143,7 @@ -------- Starting with a Fortran-contiguous array: + >>> import numpy as np >>> x = np.ones((2, 3), order='F') >>> x.flags['F_CONTIGUOUS'] True @@ -1185,6 +1209,7 @@ -------- Starting with a C-contiguous array: + >>> import numpy as np >>> x = np.ones((2, 3), order='C') >>> x.flags['C_CONTIGUOUS'] True @@ -1266,6 +1291,7 @@ Examples -------- + >>> import numpy as np >>> np.empty([2, 2]) array([[ -9.74499359e+001, 6.69583040e-309], [ 2.13182611e-314, 3.06959433e-309]]) #uninitialized @@ -1328,6 +1354,7 @@ Examples -------- + >>> import numpy as np >>> np.zeros(5) array([ 0., 0., 0., 0., 0.]) @@ -1416,6 +1443,7 @@ Examples -------- + >>> import numpy as np >>> np.fromstring('1 2', dtype=int, sep=' ') array([1, 2]) >>> np.fromstring('1, 2', dtype=int, sep=',') @@ -1456,6 +1484,7 @@ Examples -------- + >>> import numpy as np >>> a = np.array(["a", "b", "cde"]) >>> b = np.array(["a", "a", "dec"]) >>> np.char.compare_chararrays(a, b, ">", True) @@ -1499,6 +1528,7 @@ Examples -------- + >>> import numpy as np >>> iterable = (x*x for x in range(5)) >>> np.fromiter(iterable, float) array([ 0., 1., 4., 9., 16.]) @@ -1583,6 +1613,7 @@ -------- Construct an ndarray: + >>> import numpy as np >>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]), ... ('temp', float)]) >>> x = np.zeros((1,), dtype=dt) @@ -1663,6 +1694,7 @@ Examples -------- + >>> import numpy as np >>> s = b'hello world' >>> np.frombuffer(s, dtype='S1', count=5, offset=6) array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1') @@ -1818,6 +1850,7 @@ Examples -------- + >>> import numpy as np >>> np.arange(3) array([0, 1, 2]) >>> np.arange(3.0) @@ -1893,6 +1926,7 @@ Examples -------- + >>> import numpy as np >>> np.promote_types('f4', 'f8') dtype('float64') @@ -2070,6 +2104,7 @@ Examples -------- + >>> import numpy as np >>> a = np.arange(25).reshape(5,5) >>> b = np.arange(5) >>> c = np.arange(6).reshape(2,3) @@ -2347,6 +2382,7 @@ First mode, `buffer` is None: + >>> import numpy as np >>> np.ndarray(shape=(2,2), dtype=float, order='F') array([[0.0e+000, 0.0e+000], # random [ nan, 2.5e-323]]) @@ -2397,6 +2433,7 @@ -------- The base of an array that owns its memory is None: + >>> import numpy as np >>> x = np.array([1,2,3,4]) >>> x.base is None True @@ -2466,6 +2503,7 @@ Examples -------- + >>> import numpy as np >>> import ctypes >>> x = np.array([[0, 1], [2, 3]], dtype=np.int32) >>> x @@ -2534,6 +2572,7 @@ Examples -------- + >>> import numpy as np >>> x = np.sqrt([1+0j, 0+1j]) >>> x.imag array([ 0. , 0.70710678]) @@ -2549,6 +2588,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1,2,3], dtype=np.float64) >>> x.itemsize 8 @@ -2645,6 +2685,7 @@ Examples -------- + >>> import numpy as np >>> x = np.arange(1, 7).reshape(2, 3) >>> x array([[1, 2, 3], @@ -2689,6 +2730,7 @@ Examples -------- + >>> import numpy as np >>> x = np.zeros((3,5,2), dtype=np.complex128) >>> x.nbytes 480 @@ -2704,6 +2746,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> x.ndim 1 @@ -2720,6 +2763,7 @@ Examples -------- + >>> import numpy as np >>> x = np.sqrt([1+0j, 0+1j]) >>> x.real array([ 1. , 0.70710678]) @@ -2751,6 +2795,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3, 4]) >>> x.shape (4,) @@ -2798,6 +2843,7 @@ Examples -------- + >>> import numpy as np >>> x = np.zeros((3, 5, 2), dtype=np.complex128) >>> x.size 30 @@ -2846,6 +2892,7 @@ Examples -------- + >>> import numpy as np >>> y = np.reshape(np.arange(2*3*4), (2,3,4)) >>> y array([[[ 0, 1, 2, 3], @@ -2883,6 +2930,7 @@ Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> a array([[1, 2], @@ -2920,6 +2968,7 @@ Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> a array([[1, 2], @@ -3224,6 +3273,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 2.5]) >>> x array([1. , 2. , 2.5]) @@ -3258,6 +3308,7 @@ Examples -------- + >>> import numpy as np >>> A = np.array([1, 256, 8755], dtype=np.int16) >>> list(map(hex, A)) ['0x1', '0x100', '0x2233'] @@ -3393,6 +3444,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([[1,2,3],[4,5,6]], order='F') >>> y = x.copy() @@ -3531,6 +3583,7 @@ Examples -------- + >>> import numpy as np >>> a = np.array([1, 2]) >>> a.fill(0) >>> a @@ -3590,6 +3643,7 @@ Examples -------- + >>> import numpy as np >>> a = np.array([[1,2], [3,4]]) >>> a.flatten() array([1, 2, 3, 4]) @@ -3622,6 +3676,7 @@ Examples -------- + >>> import numpy as np >>> x = np.diag([1.+1.j]*2) >>> x[1, 1] = 2 + 4.j >>> x @@ -3682,6 +3737,7 @@ Examples -------- + >>> import numpy as np >>> np.random.seed(123) >>> x = np.random.randint(9, size=(3, 3)) >>> x @@ -3904,6 +3960,8 @@ Shrinking an array: array is flattened (in the order that the data are stored in memory), resized, and reshaped: + >>> import numpy as np + >>> a = np.array([[0, 1], [2, 3]], order='C') >>> a.resize((2, 1)) >>> a @@ -4001,6 +4059,7 @@ Examples -------- + >>> import numpy as np >>> x = np.eye(3) >>> x.getfield(np.float64) array([[1., 0., 0.], @@ -4070,6 +4129,7 @@ Examples -------- + >>> import numpy as np >>> y = np.array([[3, 1, 7], ... [2, 0, 0], ... [8, 5, 9]]) @@ -4141,6 +4201,7 @@ Examples -------- + >>> import numpy as np >>> a = np.array([[1,4], [3,1]]) >>> a.sort(axis=1) >>> a @@ -4212,6 +4273,7 @@ Examples -------- + >>> import numpy as np >>> a = np.array([3, 4, 2, 1]) >>> a.partition(3) >>> a @@ -4374,6 +4436,7 @@ For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``, except that ``tolist`` changes numpy scalars to Python scalars: + >>> import numpy as np >>> a = np.uint32([1, 2]) >>> a_list = list(a) >>> a_list @@ -4437,6 +4500,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([[0, 1], [2, 3]], dtype='>> x.tobytes() b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00' @@ -4508,6 +4572,7 @@ Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> a array([[1, 2], @@ -4593,6 +4658,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) Viewing array data using a different type and dtype: @@ -4713,6 +4779,7 @@ -------- Use frompyfunc to add broadcasting to the Python function ``oct``: + >>> import numpy as np >>> oct_array = np.frompyfunc(oct, 1, 1) >>> oct_array(np.array((10, 30, 100))) array(['0o12', '0o36', '0o144'], dtype=object) @@ -4959,6 +5026,7 @@ Examples -------- + >>> import numpy as np >>> np.add.identity 0 >>> np.multiply.identity @@ -4983,6 +5051,7 @@ Examples -------- + >>> import numpy as np >>> np.add.nargs 3 >>> np.multiply.nargs @@ -5001,6 +5070,7 @@ Examples -------- + >>> import numpy as np >>> np.add.nin 2 >>> np.multiply.nin @@ -5023,6 +5093,7 @@ Examples -------- + >>> import numpy as np >>> np.add.nout 1 >>> np.multiply.nout @@ -5047,6 +5118,7 @@ Examples -------- + >>> import numpy as np >>> np.add.ntypes 18 >>> np.multiply.ntypes @@ -5073,6 +5145,7 @@ Examples -------- + >>> import numpy as np >>> np.add.types ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', @@ -5120,6 +5193,7 @@ Examples -------- + >>> import numpy as np >>> np.linalg._umath_linalg.det.signature '(m,m)->()' >>> np.matmul.signature @@ -5217,6 +5291,7 @@ Examples -------- + >>> import numpy as np >>> np.multiply.reduce([2,3,5]) 30 @@ -5315,6 +5390,7 @@ -------- 1-D array examples: + >>> import numpy as np >>> np.add.accumulate([2, 3, 5]) array([ 2, 5, 10]) >>> np.multiply.accumulate([2, 3, 5]) @@ -5412,6 +5488,7 @@ -------- To take the running sum of four successive values: + >>> import numpy as np >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] array([ 6, 10, 14, 18]) @@ -5553,6 +5630,7 @@ -------- Set items 0 and 1 to their negative values: + >>> import numpy as np >>> a = np.array([1, 2, 3, 4]) >>> np.negative.at(a, [0, 1]) >>> a @@ -5624,6 +5702,7 @@ -------- This API requires passing dtypes, define them for convenience: + >>> import numpy as np >>> int32 = np.dtype("int32") >>> float32 = np.dtype("float32") @@ -5773,6 +5852,7 @@ -------- Using array-scalar type: + >>> import numpy as np >>> np.dtype(np.int16) dtype('int16') @@ -5842,6 +5922,7 @@ Examples -------- + >>> import numpy as np >>> x = np.dtype('i4') >>> x.alignment 4 @@ -5870,6 +5951,7 @@ Examples -------- + >>> import numpy as np >>> dt = np.dtype('i2') >>> dt.byteorder '=' @@ -5901,6 +5983,7 @@ Examples -------- + >>> import numpy as np >>> x = np.dtype(float) >>> x.char 'd' @@ -5921,6 +6004,7 @@ Examples -------- + >>> import numpy as np >>> x = np.dtype(float) >>> x.descr [('', '>> import numpy as np >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) >>> print(dt.fields) {'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)} @@ -5975,6 +6061,7 @@ Examples -------- + >>> import numpy as np >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)]) >>> x.flags 16 @@ -6013,6 +6100,8 @@ Examples -------- + + >>> import numpy as np >>> dt = np.dtype('i2') >>> dt.isbuiltin 1 @@ -6051,6 +6140,7 @@ Examples -------- + >>> import numpy as np >>> arr = np.array([[1, 2], [3, 4]]) >>> arr.dtype dtype('int64') @@ -6084,6 +6174,7 @@ Examples -------- + >>> import numpy as np >>> dt = np.dtype('i4') >>> dt.kind 'i' @@ -6114,6 +6205,7 @@ Examples -------- + >>> import numpy as np >>> dt = np.dtype(float, metadata={"key": "value"}) >>> dt.metadata["key"] 'value' @@ -6144,6 +6236,7 @@ Examples -------- + >>> import numpy as np >>> x = np.dtype(float) >>> x.name 'float64' @@ -6177,6 +6270,7 @@ Examples -------- + >>> import numpy as np >>> dt = np.dtype(str) >>> dt.num 19 @@ -6195,6 +6289,7 @@ Examples -------- + >>> import numpy as np >>> dt = np.dtype(('i4', 4)) >>> dt.shape (4,) @@ -6214,6 +6309,7 @@ Examples -------- + >>> import numpy as np >>> x = np.dtype(float) >>> x.ndim 0 @@ -6249,6 +6345,7 @@ Examples -------- + >>> import numpy as np >>> x = numpy.dtype('8f') >>> x.subdtype (dtype('float32'), (8,)) @@ -6270,6 +6367,7 @@ Examples -------- + >>> import numpy as np >>> x = numpy.dtype('8f') >>> x.base dtype('float32') @@ -6325,6 +6423,7 @@ >>> sys_is_le = sys.byteorder == 'little' >>> native_code = '<' if sys_is_le else '>' >>> swapped_code = '>' if sys_is_le else '<' + >>> import numpy as np >>> native_dt = np.dtype(native_code+'i2') >>> swapped_dt = np.dtype(swapped_code+'i2') >>> native_dt.newbyteorder('S') == swapped_dt @@ -6500,6 +6599,7 @@ Examples -------- + >>> import numpy as np >>> # Some important days in July ... bdd = np.busdaycalendar( ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) @@ -6551,6 +6651,7 @@ Examples -------- + >>> import numpy as np >>> from numpy.lib.array_utils import normalize_axis_index >>> normalize_axis_index(0, ndim=3) 0 @@ -6593,6 +6694,7 @@ Examples -------- + >>> import numpy as np >>> dt_25s = np.dtype('timedelta64[25s]') >>> np.datetime_data(dt_25s) ('s', 25) @@ -6939,6 +7041,8 @@ def refer_to_array_attribute(attr, method=True): Examples -------- + >>> import numpy as np + >>> from numpy.dtypes import StringDType >>> np.array(["hello", "world"], dtype=StringDType()) array(["hello", "world"], dtype=StringDType()) diff --git a/numpy/_core/_add_newdocs_scalars.py b/numpy/_core/_add_newdocs_scalars.py index b363aa9a64a1..d7f2853e94ca 100644 --- a/numpy/_core/_add_newdocs_scalars.py +++ b/numpy/_core/_add_newdocs_scalars.py @@ -327,6 +327,7 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): Examples -------- + >>> import numpy as np >>> np.int64(-2).is_integer() True >>> np.uint32(5).is_integer() diff --git a/numpy/_core/_asarray.py b/numpy/_core/_asarray.py index 75eabb21f996..2908813e7747 100644 --- a/numpy/_core/_asarray.py +++ b/numpy/_core/_asarray.py @@ -75,6 +75,7 @@ def require(a, dtype=None, requirements=None, *, like=None): Examples -------- + >>> import numpy as np >>> x = np.arange(6).reshape(2,3) >>> x.flags C_CONTIGUOUS : True diff --git a/numpy/_core/_ufunc_config.py b/numpy/_core/_ufunc_config.py index fe09ff873c86..d60e7cbbda97 100644 --- a/numpy/_core/_ufunc_config.py +++ b/numpy/_core/_ufunc_config.py @@ -74,6 +74,7 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): Examples -------- + >>> import numpy as np >>> orig_settings = np.seterr(all='ignore') # seterr to known value >>> np.int16(32000) * np.int16(3) 30464 @@ -130,6 +131,7 @@ def geterr(): Examples -------- + >>> import numpy as np >>> np.geterr() {'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'} >>> np.arange(3.) / np.arange(3.) # doctest: +SKIP @@ -176,6 +178,7 @@ def setbufsize(size): -------- When exiting a `numpy.errstate` context manager the bufsize is restored: + >>> import numpy as np >>> with np.errstate(): ... np.setbufsize(4096) ... print(np.getbufsize()) @@ -204,6 +207,7 @@ def getbufsize(): Examples -------- + >>> import numpy as np >>> np.getbufsize() 8192 @@ -260,6 +264,8 @@ def seterrcall(func): ... print("Floating point error (%s), with flag %s" % (type, flag)) ... + >>> import numpy as np + >>> orig_handler = np.seterrcall(err_handler) >>> orig_err = np.seterr(all='call') @@ -327,6 +333,7 @@ def geterrcall(): Examples -------- + >>> import numpy as np >>> np.geterrcall() # we did not yet set a handler, returns None >>> orig_settings = np.seterr(all='call') @@ -394,6 +401,7 @@ class errstate: Examples -------- + >>> import numpy as np >>> olderr = np.seterr(all='ignore') # Set error handling to known state. >>> np.arange(3) / 0. diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 93e803a8216d..3b9a2ef644a0 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -251,6 +251,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, -------- Floating point precision can be set: + >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.array([1.123456789]) [1.1235] @@ -344,6 +345,7 @@ def get_printoptions(): Examples -------- + >>> import numpy as np >>> np.get_printoptions() {'edgeitems': 3, 'threshold': 1000, ..., 'override_repr': None} @@ -378,6 +380,7 @@ def printoptions(*args, **kwargs): Examples -------- + >>> import numpy as np >>> from numpy.testing import assert_equal >>> with np.printoptions(precision=2): @@ -739,6 +742,7 @@ def array2string(a, max_line_width=None, precision=None, Examples -------- + >>> import numpy as np >>> x = np.array([1e-16,1,2,3]) >>> np.array2string(x, precision=2, separator=',', ... suppress_small=True) @@ -1164,6 +1168,7 @@ def format_float_scientific(x, precision=None, unique=True, trim='k', Examples -------- + >>> import numpy as np >>> np.format_float_scientific(np.float32(np.pi)) '3.1415927e+00' >>> s = np.float32(1.23e24) @@ -1251,6 +1256,7 @@ def format_float_positional(x, precision=None, unique=True, Examples -------- + >>> import numpy as np >>> np.format_float_positional(np.float32(np.pi)) '3.1415927' >>> np.format_float_positional(np.float16(np.pi)) @@ -1505,6 +1511,7 @@ def dtype_is_implied(dtype): Examples -------- + >>> import numpy as np >>> np._core.arrayprint.dtype_is_implied(int) True >>> np.array([1, 2, 3], int) @@ -1650,6 +1657,7 @@ def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): Examples -------- + >>> import numpy as np >>> np.array_repr(np.array([1,2])) 'array([1, 2])' >>> np.array_repr(np.ma.array([0.])) @@ -1730,6 +1738,7 @@ def array_str(a, max_line_width=None, precision=None, suppress_small=None): Examples -------- + >>> import numpy as np >>> np.array_str(np.arange(3)) '[0 1 2]' diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index 06871a44b37f..58c74aceb2f2 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -249,6 +249,7 @@ def english_upper(s): Examples -------- + >>> import numpy as np >>> from numpy.lib.utils import english_upper >>> s = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_' >>> english_upper(s) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index 012c20ae98e3..f0a410d94d6c 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -426,9 +426,10 @@ def add_newdoc(place, name, doc): Examples -------- - >>> import numpy as np We expect the arctan of 0 to be 0, and of 1 to be pi/4: + >>> import numpy as np + >>> np.arctan([0, 1]) array([ 0. , 0.78539816]) @@ -506,9 +507,10 @@ def add_newdoc(place, name, doc): Examples -------- - >>> import numpy as np Consider four points in different quadrants: + >>> import numpy as np + >>> x = np.array([-1, +1, +1, -1]) >>> y = np.array([-1, -1, +1, +1]) >>> np.arctan2(y, x) * 180 / np.pi @@ -988,9 +990,10 @@ def add_newdoc(place, name, doc): Examples -------- - >>> import numpy as np Convert a radian array to degrees + >>> import numpy as np + >>> rad = np.arange(12.)*np.pi/6 >>> np.degrees(rad) array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., @@ -1226,9 +1229,10 @@ def add_newdoc(place, name, doc): Examples -------- - >>> import numpy as np Plot the magnitude and phase of ``exp(x)`` in the complex plane: + >>> import numpy as np + >>> import matplotlib.pyplot as plt >>> x = np.linspace(-2*np.pi, 2*np.pi, 100) @@ -1310,11 +1314,12 @@ def add_newdoc(place, name, doc): Examples -------- - >>> import numpy as np The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to about 32 significant digits. This example shows the superiority of expm1 in this case. + >>> import numpy as np + >>> np.expm1(1e-10) 1.00000000005e-10 >>> np.exp(1e-10) - 1 @@ -2865,9 +2870,10 @@ def add_newdoc(place, name, doc): Examples -------- - >>> import numpy as np For 2-D arrays it is the matrix product: + >>> import numpy as np + >>> a = np.array([[1, 0], ... [0, 1]]) >>> b = np.array([[4, 1], diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index b5a3aadfd54d..6301556aaaa9 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -78,10 +78,11 @@ def equal(x1, x2): Examples -------- + >>> import numpy as np >>> y = "aa " >>> x = "aa" >>> np.char.equal(x, y) - array(True) + array(True) See Also -------- @@ -115,10 +116,11 @@ def not_equal(x1, x2): Examples -------- + >>> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.not_equal(x1, 'b') array([ True, False, True]) - + """ return compare_chararrays(x1, x2, '!=', True) @@ -149,10 +151,11 @@ def greater_equal(x1, x2): Examples -------- + >>> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.greater_equal(x1, 'b') array([False, True, True]) - + """ return compare_chararrays(x1, x2, '>=', True) @@ -182,10 +185,11 @@ def less_equal(x1, x2): Examples -------- + >>> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.less_equal(x1, 'b') array([ True, True, False]) - + """ return compare_chararrays(x1, x2, '<=', True) @@ -215,10 +219,11 @@ def greater(x1, x2): Examples -------- + >>> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.greater(x1, 'b') array([False, False, True]) - + """ return compare_chararrays(x1, x2, '>', True) @@ -248,10 +253,11 @@ def less(x1, x2): Examples -------- + >>> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.less(x1, 'b') array([True, False, False]) - + """ return compare_chararrays(x1, x2, '<', True) @@ -283,6 +289,7 @@ def multiply(a, i): Examples -------- + >>> import numpy as np >>> a = np.array(["a", "b", "c"]) >>> np.strings.multiply(a, 3) array(['aaa', 'bbb', 'ccc'], dtype='>> import numpy as np >>> x = np.array(["Numpy is nice!"]) >>> np.char.partition(x, " ") array([['Numpy', ' ', 'is nice!']], dtype='>> import numpy as np >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.char.rpartition(a, 'A') array([['aAaAa', 'A', ''], @@ -515,6 +524,7 @@ class adds the following functionality: Examples -------- + >>> import numpy as np >>> charar = np.char.chararray((3, 3)) >>> charar[:] = 'a' >>> charar @@ -1383,9 +1393,10 @@ class adds the following functionality: Examples -------- + >>> import numpy as np >>> np.char.asarray(['hello', 'world']) chararray(['hello', 'world'], dtype='>> import numpy as np >>> a = [4, 3, 5, 7, 6, 8] >>> indices = [0, 1, 4] >>> np.take(a, indices) @@ -293,6 +294,7 @@ def reshape(a, /, shape=None, *, newshape=None, order='C', copy=None): Examples -------- + >>> import numpy as np >>> a = np.array([[1,2,3], [4,5,6]]) >>> np.reshape(a, 6) array([1, 2, 3, 4, 5, 6]) @@ -416,6 +418,7 @@ def choose(a, choices, out=None, mode='raise'): Examples -------- + >>> import numpy as np >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], ... [20, 21, 22, 23], [30, 31, 32, 33]] >>> np.choose([2, 3, 1, 0], choices @@ -490,6 +493,7 @@ def repeat(a, repeats, axis=None): Examples -------- + >>> import numpy as np >>> np.repeat(3, 4) array([3, 3, 3, 3]) >>> x = np.array([[1,2],[3,4]]) @@ -551,6 +555,7 @@ def put(a, ind, v, mode='raise'): Examples -------- + >>> import numpy as np >>> a = np.arange(5) >>> np.put(a, [0, 2], [-44, -55]) >>> a @@ -599,6 +604,7 @@ def swapaxes(a, axis1, axis2): Examples -------- + >>> import numpy as np >>> x = np.array([[1,2,3]]) >>> np.swapaxes(x,0,1) array([[1], @@ -670,6 +676,7 @@ def transpose(a, axes=None): Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> a array([[1, 2], @@ -724,6 +731,7 @@ def matrix_transpose(x, /): Examples -------- + >>> import numpy as np >>> np.matrix_transpose([[1, 2], [3, 4]]) array([[1, 3], [2, 4]]) @@ -825,6 +833,7 @@ def partition(a, kth, axis=-1, kind='introselect', order=None): Examples -------- + >>> import numpy as np >>> a = np.array([7, 1, 7, 7, 1, 5, 7, 2, 3, 2, 6, 2, 3, 0]) >>> p = np.partition(a, 4) >>> p @@ -924,6 +933,7 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None): -------- One dimensional array: + >>> import numpy as np >>> x = np.array([3, 4, 2, 1]) >>> x[np.argpartition(x, 3)] array([2, 1, 3, 4]) # may vary @@ -1077,6 +1087,7 @@ def sort(a, axis=-1, kind=None, order=None, *, stable=None): Examples -------- + >>> import numpy as np >>> a = np.array([[1,4],[3,1]]) >>> np.sort(a) # sort along the last axis array([[1, 4], @@ -1187,6 +1198,7 @@ def argsort(a, axis=-1, kind=None, order=None, *, stable=None): -------- One dimensional array: + >>> import numpy as np >>> x = np.array([3, 1, 2]) >>> np.argsort(x) array([1, 2, 0]) @@ -1289,6 +1301,7 @@ def argmax(a, axis=None, out=None, *, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.arange(6).reshape(2,3) + 10 >>> a array([[10, 11, 12], @@ -1386,6 +1399,7 @@ def argmin(a, axis=None, out=None, *, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.arange(6).reshape(2,3) + 10 >>> a array([[10, 11, 12], @@ -1499,6 +1513,7 @@ def searchsorted(a, v, side='left', sorter=None): Examples -------- + >>> import numpy as np >>> np.searchsorted([11,12,13,14,15], 13) 2 >>> np.searchsorted([11,12,13,14,15], 13, side='right') @@ -1562,7 +1577,8 @@ def resize(a, new_shape): Examples -------- - >>> a=np.array([[0,1],[2,3]]) + >>> import numpy as np + >>> a = np.array([[0,1],[2,3]]) >>> np.resize(a,(2,3)) array([[0, 1, 2], [3, 0, 1]]) @@ -1636,6 +1652,7 @@ def squeeze(a, axis=None): Examples -------- + >>> import numpy as np >>> x = np.array([[[0], [1], [2]]]) >>> x.shape (1, 3, 1) @@ -1749,6 +1766,7 @@ def diagonal(a, offset=0, axis1=0, axis2=1): Examples -------- + >>> import numpy as np >>> a = np.arange(4).reshape(2,2) >>> a array([[0, 1], @@ -1856,6 +1874,7 @@ def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): Examples -------- + >>> import numpy as np >>> np.trace(np.eye(3)) 3.0 >>> a = np.arange(8).reshape((2,2,2)) @@ -1947,6 +1966,7 @@ def ravel(a, order='C'): -------- It is equivalent to ``reshape(-1, order=order)``. + >>> import numpy as np >>> x = np.array([[1, 2, 3], [4, 5, 6]]) >>> np.ravel(x) array([1, 2, 3, 4, 5, 6]) @@ -2045,6 +2065,7 @@ def nonzero(a): Examples -------- + >>> import numpy as np >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]]) >>> x array([[3, 0, 0], @@ -2118,6 +2139,7 @@ def shape(a): Examples -------- + >>> import numpy as np >>> np.shape(np.eye(3)) (3, 3) >>> np.shape([[1, 3]]) @@ -2185,6 +2207,7 @@ def compress(condition, a, axis=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4], [5, 6]]) >>> a array([[1, 2], @@ -2265,6 +2288,7 @@ def clip(a, a_min, a_max, out=None, **kwargs): Examples -------- + >>> import numpy as np >>> a = np.arange(10) >>> a array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) @@ -2388,6 +2412,7 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, Examples -------- + >>> import numpy as np >>> np.sum([0.5, 1.5]) 2.0 >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32) @@ -2506,6 +2531,7 @@ def any(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): Examples -------- + >>> import numpy as np >>> np.any([[True, False], [True, True]]) True @@ -2618,6 +2644,7 @@ def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): Examples -------- + >>> import numpy as np >>> np.all([[True,False],[True,True]]) False @@ -2696,6 +2723,7 @@ def cumsum(a, axis=None, dtype=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.array([[1,2,3], [4,5,6]]) >>> a array([[1, 2, 3], @@ -2780,6 +2808,7 @@ def ptp(a, axis=None, out=None, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> x = np.array([[4, 9, 2, 10], ... [6, 9, 7, 12]]) @@ -2902,6 +2931,7 @@ def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, Examples -------- + >>> import numpy as np >>> a = np.arange(4).reshape((2,2)) >>> a array([[0, 1], @@ -3045,6 +3075,7 @@ def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, Examples -------- + >>> import numpy as np >>> a = np.arange(4).reshape((2,2)) >>> a array([[0, 1], @@ -3190,6 +3221,7 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, -------- By default, calculate the product of all elements: + >>> import numpy as np >>> np.prod([1.,2.]) 2.0 @@ -3278,6 +3310,7 @@ def cumprod(a, axis=None, dtype=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.array([1,2,3]) >>> np.cumprod(a) # intermediate results 1, 1*2 ... # total product 1*2*3 = 6 @@ -3330,6 +3363,7 @@ def ndim(a): Examples -------- + >>> import numpy as np >>> np.ndim([[1,2,3],[4,5,6]]) 2 >>> np.ndim(np.array([[1,2,3],[4,5,6]])) @@ -3374,6 +3408,7 @@ def size(a, axis=None): Examples -------- + >>> import numpy as np >>> a = np.array([[1,2,3],[4,5,6]]) >>> np.size(a) 6 @@ -3478,6 +3513,7 @@ def round(a, decimals=0, out=None): Examples -------- + >>> import numpy as np >>> np.round([0.37, 1.64]) array([0., 2.]) >>> np.round([0.37, 1.64], decimals=1) @@ -3592,6 +3628,7 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *, Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> np.mean(a) 2.5 @@ -3772,6 +3809,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> np.std(a) 1.1180339887498949 # may vary @@ -3974,6 +4012,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> np.var(a) 1.25 diff --git a/numpy/_core/function_base.py b/numpy/_core/function_base.py index 898bc0e309ce..0e98196f2922 100644 --- a/numpy/_core/function_base.py +++ b/numpy/_core/function_base.py @@ -101,6 +101,7 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, Examples -------- + >>> import numpy as np >>> np.linspace(2.0, 3.0, num=5) array([2. , 2.25, 2.5 , 2.75, 3. ]) >>> np.linspace(2.0, 3.0, num=5, endpoint=False) @@ -272,6 +273,7 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, Examples -------- + >>> import numpy as np >>> np.logspace(2.0, 3.0, num=4) array([ 100. , 215.443469 , 464.15888336, 1000. ]) >>> np.logspace(2.0, 3.0, num=4, endpoint=False) @@ -378,6 +380,7 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): Examples -------- + >>> import numpy as np >>> np.geomspace(1, 1000, num=4) array([ 1., 10., 100., 1000.]) >>> np.geomspace(1, 1000, num=3, endpoint=False) diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index b01e47fade43..669dfc71e298 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -477,6 +477,7 @@ class finfo: Examples -------- + >>> import numpy as np >>> np.finfo(np.float64).dtype dtype('float64') >>> np.finfo(np.complex64).dtype @@ -663,6 +664,7 @@ class iinfo: -------- With types: + >>> import numpy as np >>> ii16 = np.iinfo(np.int16) >>> ii16.min -32768 diff --git a/numpy/_core/memmap.py b/numpy/_core/memmap.py index fb2c95a9d338..268b23dbadf9 100644 --- a/numpy/_core/memmap.py +++ b/numpy/_core/memmap.py @@ -127,6 +127,7 @@ class memmap(ndarray): Examples -------- + >>> import numpy as np >>> data = np.arange(12, dtype='float32') >>> data.resize((3,4)) diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 75ab59851abf..183199608962 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -145,6 +145,7 @@ def empty_like( Examples -------- + >>> import numpy as np >>> a = ([1,2,3], [4,5,6]) # a is array-like >>> np.empty_like(a) array([[-1073741821, -1073741821, 3], # uninitialized @@ -226,6 +227,7 @@ def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None): Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> b = np.array([[5, 6]]) >>> np.concatenate((a, b), axis=0) @@ -324,6 +326,7 @@ def inner(a, b): -------- Ordinary inner product for vectors: + >>> import numpy as np >>> a = np.array([1,2,3]) >>> b = np.array([0,1,0]) >>> np.inner(a, b) @@ -400,6 +403,7 @@ def where(condition, x=None, y=None): Examples -------- + >>> import numpy as np >>> a = np.arange(10) >>> a array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) @@ -472,6 +476,7 @@ def lexsort(keys, axis=None): -------- Sort names: first by surname, then by name. + >>> import numpy as np >>> surnames = ('Hertz', 'Galilei', 'Hertz') >>> first_names = ('Heinrich', 'Galileo', 'Gustav') >>> ind = np.lexsort((first_names, surnames)) @@ -606,6 +611,7 @@ def can_cast(from_, to, casting=None): -------- Basic examples + >>> import numpy as np >>> np.can_cast(np.int32, np.int64) True >>> np.can_cast(np.float64, complex) @@ -656,6 +662,7 @@ def min_scalar_type(a): Examples -------- + >>> import numpy as np >>> np.min_scalar_type(10) dtype('uint8') @@ -734,6 +741,7 @@ def result_type(*arrays_and_dtypes): Examples -------- + >>> import numpy as np >>> np.result_type(3, np.arange(7, dtype='i1')) dtype('int8') @@ -813,6 +821,7 @@ def dot(a, b, out=None): Examples -------- + >>> import numpy as np >>> np.dot(3, 4) 12 @@ -876,6 +885,7 @@ def vdot(a, b): Examples -------- + >>> import numpy as np >>> a = np.array([1+2j,3+4j]) >>> b = np.array([5+6j,7+8j]) >>> np.vdot(a, b) @@ -945,6 +955,7 @@ def bincount(x, weights=None, minlength=None): Examples -------- + >>> import numpy as np >>> np.bincount(np.arange(5)) array([1, 1, 1, 1, 1]) >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7])) @@ -1020,6 +1031,7 @@ def ravel_multi_index(multi_index, dims, mode=None, order=None): Examples -------- + >>> import numpy as np >>> arr = np.array([[3,6,6],[4,5,1]]) >>> np.ravel_multi_index(arr, (7,6)) array([22, 41, 37]) @@ -1074,6 +1086,7 @@ def unravel_index(indices, shape=None, order=None): Examples -------- + >>> import numpy as np >>> np.unravel_index([22, 41, 37], (7,6)) (array([3, 6, 6]), array([4, 5, 1])) >>> np.unravel_index([31, 41, 13], (7,6), order='F') @@ -1120,6 +1133,7 @@ def copyto(dst, src, casting=None, where=None): Examples -------- + >>> import numpy as np >>> A = np.array([4, 5, 6]) >>> B = [1, 2, 3] >>> np.copyto(A, B) @@ -1165,6 +1179,7 @@ def putmask(a, /, mask, values): Examples -------- + >>> import numpy as np >>> x = np.arange(6).reshape(2, 3) >>> np.putmask(x, x>2, x**2) >>> x @@ -1222,6 +1237,7 @@ def packbits(a, axis=None, bitorder='big'): Examples -------- + >>> import numpy as np >>> a = np.array([[[1,0,1], ... [0,1,0]], ... [[1,1,0], @@ -1291,6 +1307,7 @@ def unpackbits(a, axis=None, count=None, bitorder='big'): Examples -------- + >>> import numpy as np >>> a = np.array([[2], [7], [23]], dtype=np.uint8) >>> a array([[ 2], @@ -1368,6 +1385,7 @@ def shares_memory(a, b, max_work=None): Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3, 4]) >>> np.shares_memory(x, np.array([5, 6, 7])) False @@ -1432,6 +1450,7 @@ def may_share_memory(a, b, max_work=None): Examples -------- + >>> import numpy as np >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) False >>> x = np.zeros([3, 4]) @@ -1494,6 +1513,7 @@ def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): Examples -------- + >>> import numpy as np >>> # The weekdays are Friday, Saturday, and Monday ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'], ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) @@ -1578,6 +1598,7 @@ def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, Examples -------- + >>> import numpy as np >>> # First business day in October 2011 (not accounting for holidays) ... np.busday_offset('2011-10', 0, roll='forward') np.datetime64('2011-10-03') @@ -1667,6 +1688,7 @@ def busday_count(begindates, enddates, weekmask=None, holidays=None, Examples -------- + >>> import numpy as np >>> # Number of weekdays in January 2011 ... np.busday_count('2011-01', '2011-02') 21 @@ -1710,6 +1732,7 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None): Examples -------- + >>> import numpy as np >>> import pytz >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') >>> d diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 6c3d880a8656..4998aead0bed 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -115,6 +115,7 @@ def zeros_like( Examples -------- + >>> import numpy as np >>> x = np.arange(6) >>> x = x.reshape((2, 3)) >>> x @@ -180,6 +181,7 @@ def ones(shape, dtype=None, order='C', *, device=None, like=None): Examples -------- + >>> import numpy as np >>> np.ones(5) array([1., 1., 1., 1., 1.]) @@ -268,6 +270,7 @@ def ones_like( Examples -------- + >>> import numpy as np >>> x = np.arange(6) >>> x = x.reshape((2, 3)) >>> x @@ -338,6 +341,7 @@ def full(shape, fill_value, dtype=None, order='C', *, device=None, like=None): Examples -------- + >>> import numpy as np >>> np.full((2, 2), np.inf) array([[inf, inf], [inf, inf]]) @@ -425,6 +429,7 @@ def full_like( Examples -------- + >>> import numpy as np >>> x = np.arange(6, dtype=int) >>> np.full_like(x, 1) array([1, 1, 1, 1, 1, 1]) @@ -503,6 +508,7 @@ def count_nonzero(a, axis=None, *, keepdims=False): Examples -------- + >>> import numpy as np >>> np.count_nonzero(np.eye(4)) 4 >>> a = np.array([[0, 1, 7, 0], @@ -557,6 +563,7 @@ def isfortran(a): order (last index varies the fastest), or FORTRAN-contiguous order in memory (first index varies the fastest). + >>> import numpy as np >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') >>> a array([[1, 2, 3], @@ -632,6 +639,7 @@ def argwhere(a): Examples -------- + >>> import numpy as np >>> x = np.arange(6).reshape(2,3) >>> x array([[0, 1, 2], @@ -680,6 +688,7 @@ def flatnonzero(a): Examples -------- + >>> import numpy as np >>> x = np.arange(-2, 3) >>> x array([-2, -1, 0, 1, 2]) @@ -752,6 +761,7 @@ def correlate(a, v, mode='valid'): Examples -------- + >>> import numpy as np >>> np.correlate([1, 2, 3], [0, 1, 0.5]) array([3.5]) >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same") @@ -851,6 +861,7 @@ def convolve(a, v, mode='full'): Note how the convolution operator flips the second array before "sliding" the two across one another: + >>> import numpy as np >>> np.convolve([1, 2, 3], [0, 1, 0.5]) array([0. , 1. , 2.5, 4. , 1.5]) @@ -935,6 +946,7 @@ def outer(a, b, out=None): -------- Make a (*very* coarse) grid for computing a Mandelbrot set: + >>> import numpy as np >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5)) >>> rl array([[-2., -1., 0., 1., 2.], @@ -1033,6 +1045,7 @@ def tensordot(a, b, axes=2): -------- A "traditional" example: + >>> import numpy as np >>> a = np.arange(60.).reshape(3,4,5) >>> b = np.arange(24.).reshape(4,3,2) >>> c = np.tensordot(a,b, axes=([1,0],[0,1])) @@ -1210,6 +1223,7 @@ def roll(a, shift, axis=None): Examples -------- + >>> import numpy as np >>> x = np.arange(10) >>> np.roll(x, 2) array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]) @@ -1343,6 +1357,7 @@ def rollaxis(a, axis, start=0): Examples -------- + >>> import numpy as np >>> a = np.ones((3,4,5,6)) >>> np.rollaxis(a, 3, 1).shape (3, 6, 4, 5) @@ -1465,6 +1480,7 @@ def moveaxis(a, source, destination): Examples -------- + >>> import numpy as np >>> x = np.zeros((3, 4, 5)) >>> np.moveaxis(x, 0, -1).shape (4, 5, 3) @@ -1575,6 +1591,7 @@ def cross2d(x, y): -------- Vector cross-product. + >>> import numpy as np >>> x = [1, 2, 3] >>> y = [4, 5, 6] >>> np.cross(x, y) @@ -1782,6 +1799,7 @@ def indices(dimensions, dtype=int, sparse=False): Examples -------- + >>> import numpy as np >>> grid = np.indices((2, 3)) >>> grid.shape (2, 2, 3) @@ -1881,6 +1899,7 @@ def fromfunction(function, shape, *, dtype=float, like=None, **kwargs): Examples -------- + >>> import numpy as np >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=float) array([[0., 0.], [1., 1.]]) @@ -1970,14 +1989,20 @@ def isscalar(element): Examples -------- + >>> import numpy as np + >>> np.isscalar(3.1) True + >>> np.isscalar(np.array(3.1)) False + >>> np.isscalar([3.1]) False + >>> np.isscalar(False) True + >>> np.isscalar('numpy') True @@ -2045,6 +2070,7 @@ def binary_repr(num, width=None): Examples -------- + >>> import numpy as np >>> np.binary_repr(3) '11' >>> np.binary_repr(-3) @@ -2129,6 +2155,7 @@ def base_repr(number, base=2, padding=0): Examples -------- + >>> import numpy as np >>> np.base_repr(5) '101' >>> np.base_repr(6, 5) @@ -2202,6 +2229,7 @@ def identity(n, dtype=None, *, like=None): Examples -------- + >>> import numpy as np >>> np.identity(3) array([[1., 0., 0.], [0., 1., 0.], @@ -2292,17 +2320,23 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): Examples -------- + >>> import numpy as np >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8]) False + >>> np.allclose([1e10,1e-8], [1.00001e10,1e-9]) True + >>> np.allclose([1e10,1e-8], [1.0001e10,1e-9]) False + >>> np.allclose([1.0, np.nan], [1.0, np.nan]) False + >>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) True + """ res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)) return builtins.bool(res) @@ -2376,24 +2410,34 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): Examples -------- + >>> import numpy as np >>> np.isclose([1e10,1e-7], [1.00001e10,1e-8]) array([ True, False]) + >>> np.isclose([1e10,1e-8], [1.00001e10,1e-9]) array([ True, True]) + >>> np.isclose([1e10,1e-8], [1.0001e10,1e-9]) array([False, True]) + >>> np.isclose([1.0, np.nan], [1.0, np.nan]) array([ True, False]) + >>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) array([ True, True]) + >>> np.isclose([1e-8, 1e-7], [0.0, 0.0]) array([ True, False]) + >>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0) array([False, False]) + >>> np.isclose([1e-10, 1e-10], [1e-20, 0.0]) array([ True, True]) + >>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0) array([False, True]) + """ # Turn all but python scalars into arrays. x, y, atol, rtol = ( @@ -2472,17 +2516,24 @@ def array_equal(a1, a2, equal_nan=False): Examples -------- + >>> import numpy as np + >>> np.array_equal([1, 2], [1, 2]) True + >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) True + >>> np.array_equal([1, 2], [1, 2, 3]) False + >>> np.array_equal([1, 2], [1, 4]) False + >>> a = np.array([1, np.nan]) >>> np.array_equal(a, a) False + >>> np.array_equal(a, a, equal_nan=True) True @@ -2547,6 +2598,7 @@ def array_equiv(a1, a2): Examples -------- + >>> import numpy as np >>> np.array_equiv([1, 2], [1, 2]) True >>> np.array_equiv([1, 2], [1, 3]) @@ -2613,6 +2665,7 @@ def astype(x, dtype, /, *, copy = True): Examples -------- + >>> import numpy as np >>> arr = np.array([1, 2, 3]); arr array([1, 2, 3]) >>> np.astype(arr, np.float64) diff --git a/numpy/_core/records.py b/numpy/_core/records.py index 8bdeec15c6d2..1f92500aed6e 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -97,6 +97,7 @@ class format_parser: Examples -------- + >>> import numpy as np >>> np.rec.format_parser(['>> import numpy as np >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', '>> x array([(1., 2), (3., 4)], dtype=[('x', '>> import numpy as np >>> np.atleast_1d(1.0) array([1.]) @@ -102,6 +103,7 @@ def atleast_2d(*arys): Examples -------- + >>> import numpy as np >>> np.atleast_2d(3.0) array([[3.]]) @@ -162,6 +164,7 @@ def atleast_3d(*arys): Examples -------- + >>> import numpy as np >>> np.atleast_3d(3.0) array([[[3.]]]) @@ -264,6 +267,7 @@ def vstack(tup, *, dtype=None, casting="same_kind"): Examples -------- + >>> import numpy as np >>> a = np.array([1, 2, 3]) >>> b = np.array([4, 5, 6]) >>> np.vstack((a,b)) @@ -337,6 +341,7 @@ def hstack(tup, *, dtype=None, casting="same_kind"): Examples -------- + >>> import numpy as np >>> a = np.array((1,2,3)) >>> b = np.array((4,5,6)) >>> np.hstack((a,b)) @@ -419,6 +424,7 @@ def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): Examples -------- + >>> import numpy as np >>> rng = np.random.default_rng() >>> arrays = [rng.normal(size=(3,4)) for _ in range(10)] >>> np.stack(arrays, axis=0).shape @@ -862,8 +868,9 @@ def block(arrays): Examples -------- - The most common use of this function is to build a block matrix + The most common use of this function is to build a block matrix: + >>> import numpy as np >>> A = np.eye(2) * 2 >>> B = np.eye(3) * 3 >>> np.block([ @@ -876,7 +883,7 @@ def block(arrays): [1., 1., 0., 3., 0.], [1., 1., 0., 0., 3.]]) - With a list of depth 1, `block` can be used as `hstack` + With a list of depth 1, `block` can be used as `hstack`: >>> np.block([1, 2, 3]) # hstack([1, 2, 3]) array([1, 2, 3]) @@ -908,7 +915,7 @@ def block(arrays): [2, 2], [2, 2]]) - It can also be used in places of `atleast_1d` and `atleast_2d` + It can also be used in place of `atleast_1d` and `atleast_2d`: >>> a = np.array(0) >>> b = np.array([1]) diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 83034705f525..8cdab70b534c 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -138,6 +138,7 @@ def multiply(a, i): Examples -------- + >>> import numpy as np >>> a = np.array(["a", "b", "c"]) >>> np.strings.multiply(a, 3) array(['aaa', 'bbb', 'ccc'], dtype='>> import numpy as np >>> a = np.array(["NumPy is a Python library"]) >>> np.strings.find(a, "Python") array([11]) @@ -291,6 +293,7 @@ def index(a, sub, start=0, end=None): Examples -------- + >>> import numpy as np >>> a = np.array(["Computer Science"]) >>> np.strings.index(a, "Science", start=0, end=None) array([9]) @@ -327,7 +330,7 @@ def rindex(a, sub, start=0, end=None): >>> a = np.array(["Computer Science"]) >>> np.strings.rindex(a, "Science", start=0, end=None) array([9]) - + """ end = end if end is not None else MAX return _rindex_ufunc(a, sub, start, end) @@ -359,6 +362,7 @@ def count(a, sub, start=0, end=None): Examples -------- + >>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> c array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np >>> s = np.array(['foo', 'bar']) >>> s array(['foo', 'bar'], dtype='>> import numpy as np >>> c = np.array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@', ... b'\x81\x82\xc2\xc1\xc2\x82\x81']) >>> c @@ -522,11 +528,12 @@ def encode(a, encoding=None, errors=None): Examples -------- + >>> import numpy as np >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.encode(a, encoding='cp037') array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@', b'\x81\x82\xc2\xc1\xc2\x82\x81'], dtype='|S7') - + """ return _to_bytes_or_str_array( _vec_string(a, np.object_, 'encode', _clean_args(encoding, errors)), @@ -566,6 +573,7 @@ def expandtabs(a, tabsize=8): Examples -------- + >>> import numpy as np >>> a = np.array(['\t\tHello\tworld']) >>> np.strings.expandtabs(a, tabsize=4) # doctest: +SKIP array([' Hello world'], dtype='>> import numpy as np >>> c = np.array(['a1b2','1b2a','b2a1','2a1b']); c array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='>> np.strings.center(c, width=9) @@ -674,6 +683,7 @@ def ljust(a, width, fillchar=' '): Examples -------- + >>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.ljust(c, width=3) array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.rjust(a, width=3) array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np >>> np.strings.zfill(['1', '-1', '+1'], 3) array(['001', '-01', '+01'], dtype='>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> c array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np >>> c = np.array(['aAaAaA', 'abBABba']) >>> c array(['aAaAaA', 'abBABba'], dtype='>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> c array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np >>> c = np.array(['a1b c', '1bca', 'bca1']); c array(['a1b c', '1bca', 'bca1'], dtype='>> np.strings.upper(c) @@ -987,6 +1003,7 @@ def lower(a): Examples -------- + >>> import numpy as np >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c array(['A1B C', '1BCA', 'BCA1'], dtype='>> np.strings.lower(c) @@ -1023,6 +1040,7 @@ def swapcase(a): Examples -------- + >>> import numpy as np >>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'], dtype='|S5') @@ -1061,6 +1079,7 @@ def capitalize(a): Examples -------- + >>> import numpy as np >>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='|S4') @@ -1101,6 +1120,7 @@ def title(a): Examples -------- + >>> import numpy as np >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c array(['a1b c', '1b ca', 'b ca1', 'ca1b'], dtype='|S5') @@ -1140,6 +1160,7 @@ def replace(a, old, new, count=-1): Examples -------- + >>> import numpy as np >>> a = np.array(["That is a mango", "Monkeys eat mangos"]) >>> np.strings.replace(a, 'mango', 'banana') array(['That is a banana', 'Monkeys eat bananas'], dtype='>> a = np.array(["The dish is fresh", "This is it"]) >>> np.strings.replace(a, 'is', 'was') array(['The dwash was fresh', 'Thwas was it'], dtype='>> import numpy as np >>> np.strings.join('-', 'osd') # doctest: +SKIP array('o-s-d', dtype='>> import numpy as np >>> x = np.array("Numpy is nice!") >>> np.strings.split(x, " ") # doctest: +SKIP array(list(['Numpy', 'is', 'nice!']), dtype=object) # doctest: +SKIP @@ -1278,11 +1301,12 @@ def _rsplit(a, sep=None, maxsplit=None): Examples -------- + >>> import numpy as np >>> a = np.array(['aAaAaA', 'abBABba']) >>> np.strings.rsplit(a, 'A') # doctest: +SKIP array([list(['a', 'a', 'a', '']), # doctest: +SKIP list(['abB', 'Bba'])], dtype=object) # doctest: +SKIP - + """ # This will return an array of lists of different sizes, so we # leave it as an object array @@ -1353,6 +1377,7 @@ def partition(a, sep): Examples -------- + >>> import numpy as np >>> x = np.array(["Numpy is nice!"]) >>> np.strings.partition(x, " ") (array(['Numpy'], dtype='>> import numpy as np >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.rpartition(a, 'A') (array(['aAaAa', ' a', 'abB'], dtype='>> import numpy as np >>> a = np.array(['a1b c', '1bca', 'bca1']) >>> table = a[0].maketrans('abc', '123') >>> deletechars = ' ' >>> np.char.translate(a, table, deletechars) array(['112 3', '1231', '2311'], dtype=' Date: Tue, 18 Jun 2024 20:23:36 +0530 Subject: [PATCH 661/980] DOC, API: Add `>>> import numpy as np` stub to `numpy/lib/` --- numpy/lib/_array_utils_impl.py | 1 + numpy/lib/_arraypad_impl.py | 3 +- numpy/lib/_arraysetops_impl.py | 12 ++++++++ numpy/lib/_arrayterator_impl.py | 1 + numpy/lib/_function_base_impl.py | 47 +++++++++++++++++++++++++++++++- numpy/lib/_histograms_impl.py | 3 ++ numpy/lib/_index_tricks_impl.py | 14 +++++++++- numpy/lib/_iotools.py | 5 ++++ numpy/lib/_nanfunctions_impl.py | 14 ++++++++++ numpy/lib/_npyio_impl.py | 11 ++++++++ numpy/lib/_polynomial_impl.py | 17 ++++++++++++ numpy/lib/_scimath_impl.py | 14 ++++++++++ numpy/lib/_shape_base_impl.py | 14 ++++++++++ numpy/lib/_stride_tricks_impl.py | 4 +++ numpy/lib/_twodim_base_impl.py | 22 +++++++++++++-- numpy/lib/_type_check_impl.py | 10 +++++++ numpy/lib/_ufunclike_impl.py | 3 ++ numpy/lib/introspect.py | 1 + numpy/lib/recfunctions.py | 16 +++++++++++ 19 files changed, 207 insertions(+), 5 deletions(-) diff --git a/numpy/lib/_array_utils_impl.py b/numpy/lib/_array_utils_impl.py index 3e9d96e93dd9..d5f778160358 100644 --- a/numpy/lib/_array_utils_impl.py +++ b/numpy/lib/_array_utils_impl.py @@ -29,6 +29,7 @@ def byte_bounds(a): Examples -------- + >>> import numpy as np >>> I = np.eye(2, dtype='f'); I.dtype dtype('float32') >>> low, high = np.lib.array_utils.byte_bounds(I) diff --git a/numpy/lib/_arraypad_impl.py b/numpy/lib/_arraypad_impl.py index af6c4da4c3b7..079af37236f6 100644 --- a/numpy/lib/_arraypad_impl.py +++ b/numpy/lib/_arraypad_impl.py @@ -49,7 +49,7 @@ def _slice_at_axis(sl, axis): Examples -------- - >>> _slice_at_axis(slice(None, 3, -1), 1) + >>> np._slice_at_axis(slice(None, 3, -1), 1) (slice(None, None, None), slice(None, 3, -1), (...,)) """ return (slice(None),) * axis + (sl,) + (...,) @@ -684,6 +684,7 @@ def pad(array, pad_width, mode='constant', **kwargs): Examples -------- + >>> import numpy as np >>> a = [1, 2, 3, 4, 5] >>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6)) array([4, 4, 1, ..., 6, 6, 6]) diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index 435904c95321..6f3e549bb9ee 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -68,6 +68,7 @@ def ediff1d(ary, to_end=None, to_begin=None): Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 4, 7, 0]) >>> np.ediff1d(x) array([ 1, 2, 3, -7]) @@ -233,6 +234,7 @@ def unique(ar, return_index=False, return_inverse=False, Examples -------- + >>> import numpy as np >>> np.unique([1, 1, 2, 2, 3, 3]) array([1, 2, 3]) >>> a = np.array([[1, 1], [2, 3]]) @@ -435,6 +437,7 @@ def unique_all(x): Examples -------- + >>> import numpy as np >>> np.unique_all([1, 1, 2]) UniqueAllResult(values=array([1, 2]), indices=array([0, 2]), @@ -486,6 +489,7 @@ def unique_counts(x): Examples -------- + >>> import numpy as np >>> np.unique_counts([1, 1, 2]) UniqueCountsResult(values=array([1, 2]), counts=array([2, 1])) @@ -535,6 +539,7 @@ def unique_inverse(x): Examples -------- + >>> import numpy as np >>> np.unique_inverse([1, 1, 2]) UniqueInverseResult(values=array([1, 2]), inverse_indices=array([0, 0, 1])) @@ -580,6 +585,7 @@ def unique_values(x): Examples -------- + >>> import numpy as np >>> np.unique_values([1, 1, 2]) array([1, 2]) @@ -634,6 +640,7 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): Examples -------- + >>> import numpy as np >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1]) array([1, 3]) @@ -719,6 +726,7 @@ def setxor1d(ar1, ar2, assume_unique=False): Examples -------- + >>> import numpy as np >>> a = np.array([1, 2, 3, 2, 4]) >>> b = np.array([2, 3, 5, 7, 5]) >>> np.setxor1d(a,b) @@ -822,6 +830,7 @@ def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): Examples -------- + >>> import numpy as np >>> test = np.array([0, 1, 2, 5, 0]) >>> states = [0, 2] >>> mask = np.in1d(test, states) @@ -1060,6 +1069,7 @@ def isin(element, test_elements, assume_unique=False, invert=False, *, Examples -------- + >>> import numpy as np >>> element = 2*np.arange(4).reshape((2, 2)) >>> element array([[0, 2], @@ -1129,6 +1139,7 @@ def union1d(ar1, ar2): Examples -------- + >>> import numpy as np >>> np.union1d([-1, 0, 1], [-2, 0, 2]) array([-2, -1, 0, 1, 2]) @@ -1171,6 +1182,7 @@ def setdiff1d(ar1, ar2, assume_unique=False): Examples -------- + >>> import numpy as np >>> a = np.array([1, 2, 3, 2, 4, 1]) >>> b = np.array([3, 4, 5, 6]) >>> np.setdiff1d(a, b) diff --git a/numpy/lib/_arrayterator_impl.py b/numpy/lib/_arrayterator_impl.py index 8b21a6086638..146161d0236d 100644 --- a/numpy/lib/_arrayterator_impl.py +++ b/numpy/lib/_arrayterator_impl.py @@ -66,6 +66,7 @@ class Arrayterator: Examples -------- + >>> import numpy as np >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) >>> a_itor = np.lib.Arrayterator(a, 2) >>> a_itor.shape diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 5356f1cc31c3..5d00ac0bae32 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -192,6 +192,7 @@ def rot90(m, k=1, axes=(0, 1)): Examples -------- + >>> import numpy as np >>> m = np.array([[1,2],[3,4]], int) >>> m array([[1, 2], @@ -297,6 +298,7 @@ def flip(m, axis=None): Examples -------- + >>> import numpy as np >>> A = np.arange(8).reshape((2,2,2)) >>> A array([[[0, 1], @@ -360,6 +362,7 @@ def iterable(y): Examples -------- + >>> import numpy as np >>> np.iterable([1, 2, 3]) True >>> np.iterable(2) @@ -502,6 +505,7 @@ def average(a, axis=None, weights=None, returned=False, *, Examples -------- + >>> import numpy as np >>> data = np.arange(1, 5) >>> data array([1, 2, 3, 4]) @@ -627,7 +631,9 @@ class ndarray is returned. Examples -------- - Convert a list into an array. If all elements are finite + >>> import numpy as np + + Convert a list into an array. If all elements are finite, then ``asarray_chkfinite`` is identical to ``asarray``. >>> a = [1, 2] @@ -728,6 +734,8 @@ def piecewise(x, condlist, funclist, *args, **kw): Examples -------- + >>> import numpy as np + Define the signum function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. >>> x = np.linspace(-2.5, 2.5, 6) @@ -816,6 +824,8 @@ def select(condlist, choicelist, default=0): Examples -------- + >>> import numpy as np + Beginning with an array of integers from 0 to 5 (inclusive), elements less than ``3`` are negated, elements greater than ``3`` are squared, and elements not meeting either of these conditions @@ -938,6 +948,8 @@ def copy(a, order='K', subok=False): Examples -------- + >>> import numpy as np + Create an array x, with a reference y and a copy z: >>> x = np.array([1, 2, 3]) @@ -1025,6 +1037,7 @@ def gradient(f, *varargs, axis=None, edge_order=1): Examples -------- + >>> import numpy as np >>> f = np.array([1, 2, 4, 7, 11, 16]) >>> np.gradient(f) array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) @@ -1437,6 +1450,7 @@ def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 4, 7, 0]) >>> np.diff(x) array([ 1, 2, 3, -7]) @@ -1571,6 +1585,7 @@ def interp(x, xp, fp, left=None, right=None, period=None): Examples -------- + >>> import numpy as np >>> xp = [1, 2, 3] >>> fp = [3, 2, 0] >>> np.interp(2.5, xp, fp) @@ -1686,6 +1701,7 @@ def angle(z, deg=False): Examples -------- + >>> import numpy as np >>> np.angle([1.0, 1.0j, 1+1j]) # in radians array([ 0. , 1.57079633, 0.78539816]) # may vary >>> np.angle(1+1j, deg=True) # in degrees @@ -1760,6 +1776,7 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): Examples -------- + >>> import numpy as np >>> phase = np.linspace(0, np.pi, num=5) >>> phase[3:] += np.pi >>> phase @@ -1829,6 +1846,7 @@ def sort_complex(a): Examples -------- + >>> import numpy as np >>> np.sort_complex([5, 3, 6, 2, 1]) array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) @@ -1874,6 +1892,7 @@ def trim_zeros(filt, trim='fb'): Examples -------- + >>> import numpy as np >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) >>> np.trim_zeros(a) array([1, 2, 3, 0, 2, 1]) @@ -1939,6 +1958,7 @@ def extract(condition, arr): Examples -------- + >>> import numpy as np >>> arr = np.arange(12).reshape((3, 4)) >>> arr array([[ 0, 1, 2, 3], @@ -1996,6 +2016,7 @@ def place(arr, mask, vals): Examples -------- + >>> import numpy as np >>> arr = np.arange(6).reshape(2, 3) >>> np.place(arr, arr>2, [44, 55]) >>> arr @@ -2031,6 +2052,8 @@ def disp(mesg, device=None, linefeed=True): Examples -------- + >>> import numpy as np + Besides ``sys.stdout``, a file-like object can also be used as it has both required methods: @@ -2270,6 +2293,7 @@ class vectorize: Examples -------- + >>> import numpy as np >>> def myfunc(a, b): ... "Return a-b if a>b, otherwise return a+b" ... if a > b: @@ -2691,6 +2715,8 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, Examples -------- + >>> import numpy as np + Consider two variables, :math:`x_0` and :math:`x_1`, which correlate perfectly, but in opposite directions: @@ -2894,6 +2920,8 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, Examples -------- + >>> import numpy as np + In this example we generate two random arrays, ``xarr`` and ``yarr``, and compute the row-wise and column-wise Pearson correlation coefficients, ``R``. Since ``rowvar`` is true by default, we first find the row-wise @@ -3031,6 +3059,7 @@ def blackman(M): Examples -------- + >>> import numpy as np >>> import matplotlib.pyplot as plt >>> np.blackman(12) array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary @@ -3139,6 +3168,7 @@ def bartlett(M): Examples -------- + >>> import numpy as np >>> import matplotlib.pyplot as plt >>> np.bartlett(12) array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary @@ -3240,6 +3270,7 @@ def hanning(M): Examples -------- + >>> import numpy as np >>> np.hanning(12) array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, @@ -3339,6 +3370,7 @@ def hamming(M): Examples -------- + >>> import numpy as np >>> np.hamming(12) array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, @@ -3518,6 +3550,7 @@ def i0(x): Examples -------- + >>> import numpy as np >>> np.i0(0.) array(1.0) >>> np.i0([0, 1, 2, 3]) @@ -3614,6 +3647,7 @@ def kaiser(M, beta): Examples -------- + >>> import numpy as np >>> import matplotlib.pyplot as plt >>> np.kaiser(12, 14) array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary @@ -3716,6 +3750,7 @@ def sinc(x): Examples -------- + >>> import numpy as np >>> import matplotlib.pyplot as plt >>> x = np.linspace(-4, 4, 41) >>> np.sinc(x) @@ -3894,6 +3929,7 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): Examples -------- + >>> import numpy as np >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], @@ -4105,6 +4141,7 @@ def percentile(a, Examples -------- + >>> import numpy as np >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], @@ -4400,6 +4437,7 @@ def quantile(a, Examples -------- + >>> import numpy as np >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], @@ -4933,6 +4971,8 @@ def trapezoid(y, x=None, dx=1.0, axis=-1): Examples -------- + >>> import numpy as np + Use the trapezoidal rule on evenly spaced points: >>> np.trapezoid([1, 2, 3]) @@ -5113,6 +5153,7 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): Examples -------- + >>> import numpy as np >>> nx, ny = (3, 2) >>> x = np.linspace(0, 1, nx) >>> y = np.linspace(0, 1, ny) @@ -5240,6 +5281,7 @@ def delete(arr, obj, axis=None): Examples -------- + >>> import numpy as np >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) >>> arr array([[ 1, 2, 3, 4], @@ -5424,6 +5466,7 @@ def insert(arr, obj, values, axis=None): Examples -------- + >>> import numpy as np >>> a = np.arange(6).reshape(3, 2) >>> a array([[0, 1], @@ -5600,6 +5643,7 @@ def append(arr, values, axis=None): Examples -------- + >>> import numpy as np >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) array([1, 2, 3, ..., 7, 8, 9]) @@ -5712,6 +5756,7 @@ def digitize(x, bins, right=False): Examples -------- + >>> import numpy as np >>> x = np.array([0.2, 6.4, 3.0, 1.6]) >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) >>> inds = np.digitize(x, bins) diff --git a/numpy/lib/_histograms_impl.py b/numpy/lib/_histograms_impl.py index 80eeffb6a03c..45b6500e892d 100644 --- a/numpy/lib/_histograms_impl.py +++ b/numpy/lib/_histograms_impl.py @@ -632,6 +632,7 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None): Examples -------- + >>> import numpy as np >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5]) >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1)) array([0. , 0.25, 0.5 , 0.75, 1. ]) @@ -755,6 +756,7 @@ def histogram(a, bins=10, range=None, density=None, weights=None): Examples -------- + >>> import numpy as np >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) (array([0, 2, 1]), array([0, 1, 2, 3])) >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) @@ -972,6 +974,7 @@ def histogramdd(sample, bins=10, range=None, density=None, weights=None): Examples -------- + >>> import numpy as np >>> rng = np.random.default_rng() >>> r = rng.normal(size=(100,3)) >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index 62f1d213b29f..3a2ce802f0d1 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -65,6 +65,7 @@ def ix_(*args): Examples -------- + >>> import numpy as np >>> a = np.arange(10).reshape(2, 5) >>> a array([[0, 1, 2, 3, 4], @@ -239,6 +240,7 @@ class MGridClass(nd_grid): Examples -------- + >>> import numpy as np >>> np.mgrid[0:5, 0:5] array([[[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], @@ -505,6 +507,7 @@ class RClass(AxisConcatenator): Examples -------- + >>> import numpy as np >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])] array([1, 2, 3, ..., 4, 5, 6]) >>> np.r_[-1:1:6j, [0]*3, 5, 6] @@ -563,6 +566,7 @@ class CClass(AxisConcatenator): Examples -------- + >>> import numpy as np >>> np.c_[np.array([1,2,3]), np.array([4,5,6])] array([[1, 4], [2, 5], @@ -597,6 +601,7 @@ class ndenumerate: Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> for index, x in np.ndenumerate(a): ... print(index, x) @@ -649,6 +654,8 @@ class ndindex: Examples -------- + >>> import numpy as np + Dimensions as individual arguments >>> for index in np.ndindex(3, 2, 1): @@ -762,6 +769,7 @@ class IndexExpression: Examples -------- + >>> import numpy as np >>> np.s_[2::2] slice(2, None, 2) >>> np.index_exp[2::2] @@ -833,6 +841,7 @@ def fill_diagonal(a, val, wrap=False): Examples -------- + >>> import numpy as np >>> a = np.zeros((3, 3), int) >>> np.fill_diagonal(a, 5) >>> a @@ -959,6 +968,8 @@ def diag_indices(n, ndim=2): Examples -------- + >>> import numpy as np + Create a set of indices to access the diagonal of a (4, 4) array: >>> di = np.diag_indices(4) @@ -1023,7 +1034,8 @@ def diag_indices_from(arr): Examples -------- - + >>> import numpy as np + Create a 4 by 4 array. >>> a = np.arange(16).reshape(4, 4) diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py index a38b0017ee5d..7f9a04575bf2 100644 --- a/numpy/lib/_iotools.py +++ b/numpy/lib/_iotools.py @@ -72,6 +72,7 @@ def has_nested_fields(ndtype): Examples -------- + >>> import numpy as np >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)]) >>> np.lib._iotools.has_nested_fields(dt) False @@ -100,6 +101,7 @@ def flatten_dtype(ndtype, flatten_base=False): Examples -------- + >>> import numpy as np >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), ... ('block', int, (2, 3))]) >>> np.lib._iotools.flatten_dtype(dt) @@ -266,6 +268,7 @@ class NameValidator: Examples -------- + >>> import numpy as np >>> validator = np.lib._iotools.NameValidator() >>> validator(['file', 'field2', 'with space', 'CaSe']) ('file_', 'field2', 'with_space', 'CaSe') @@ -403,6 +406,7 @@ def str2bool(value): Examples -------- + >>> import numpy as np >>> np.lib._iotools.str2bool('TRUE') True >>> np.lib._iotools.str2bool('false') @@ -844,6 +848,7 @@ def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs): Examples -------- + >>> import numpy as np >>> np.lib._iotools.easy_dtype(float) dtype('float64') >>> np.lib._iotools.easy_dtype("i4, f8") diff --git a/numpy/lib/_nanfunctions_impl.py b/numpy/lib/_nanfunctions_impl.py index d1fa5c0747f5..958ebc3cbe82 100644 --- a/numpy/lib/_nanfunctions_impl.py +++ b/numpy/lib/_nanfunctions_impl.py @@ -330,6 +330,7 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, np.nan]]) >>> np.nanmin(a) 1.0 @@ -463,6 +464,7 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, np.nan]]) >>> np.nanmax(a) 3.0 @@ -551,6 +553,7 @@ def nanargmin(a, axis=None, out=None, *, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.array([[np.nan, 4], [2, 3]]) >>> np.argmin(a) 0 @@ -612,6 +615,7 @@ def nanargmax(a, axis=None, out=None, *, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.array([[np.nan, 4], [2, 3]]) >>> np.argmax(a) 0 @@ -714,6 +718,7 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, Examples -------- + >>> import numpy as np >>> np.nansum(1) 1 >>> np.nansum([1]) @@ -805,6 +810,7 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, Examples -------- + >>> import numpy as np >>> np.nanprod(1) 1 >>> np.nanprod([1]) @@ -872,6 +878,7 @@ def nancumsum(a, axis=None, dtype=None, out=None): Examples -------- + >>> import numpy as np >>> np.nancumsum(1) array([1]) >>> np.nancumsum([1]) @@ -939,6 +946,7 @@ def nancumprod(a, axis=None, dtype=None, out=None): Examples -------- + >>> import numpy as np >>> np.nancumprod(1) array([1]) >>> np.nancumprod([1]) @@ -1036,6 +1044,7 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, Examples -------- + >>> import numpy as np >>> a = np.array([[1, np.nan], [3, 4]]) >>> np.nanmean(a) 2.6666666666666665 @@ -1201,6 +1210,7 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValu Examples -------- + >>> import numpy as np >>> a = np.array([[10.0, 7, 4], [3, 2, 1]]) >>> a[0, 1] = np.nan >>> a @@ -1365,6 +1375,7 @@ def nanpercentile( Examples -------- + >>> import numpy as np >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) >>> a[0][1] = np.nan >>> a @@ -1555,6 +1566,7 @@ def nanquantile( Examples -------- + >>> import numpy as np >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) >>> a[0][1] = np.nan >>> a @@ -1831,6 +1843,7 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, Examples -------- + >>> import numpy as np >>> a = np.array([[1, np.nan], [3, 4]]) >>> np.nanvar(a) 1.5555555555555554 @@ -2027,6 +2040,7 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, Examples -------- + >>> import numpy as np >>> a = np.array([[1, np.nan], [3, 4]]) >>> np.nanstd(a) 1.247219128924647 diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 123679837a43..a83c46b0e654 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -51,6 +51,7 @@ class BagObj: Examples -------- + >>> import numpy as np >>> from numpy.lib._npyio_impl import BagObj as BO >>> class BagDemo: ... def __getitem__(self, key): # An instance of BagObj(BagDemo) @@ -157,6 +158,7 @@ class NpzFile(Mapping): Examples -------- + >>> import numpy as np >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) @@ -403,6 +405,8 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, Examples -------- + >>> import numpy as np + Store data to disk, and load it again: >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) @@ -542,6 +546,8 @@ def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): Examples -------- + >>> import numpy as np + >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() @@ -644,6 +650,7 @@ def savez(file, *args, **kwds): Examples -------- + >>> import numpy as np >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) @@ -732,6 +739,7 @@ def savez_compressed(file, *args, **kwds): Examples -------- + >>> import numpy as np >>> test_array = np.random.rand(3, 2) >>> test_vector = np.random.rand(4) >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector) @@ -1243,6 +1251,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, Examples -------- + >>> import numpy as np >>> from io import StringIO # StringIO behaves like a file object >>> c = StringIO("0 1\n2 3") >>> np.loadtxt(c) @@ -1518,6 +1527,7 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', Examples -------- + >>> import numpy as np >>> x = y = z = np.arange(0.0,5.0,1.0) >>> np.savetxt('test.out', x, delimiter=',') # X is an array >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays @@ -1694,6 +1704,7 @@ def fromregex(file, regexp, dtype, encoding=None): Examples -------- + >>> import numpy as np >>> from io import StringIO >>> text = StringIO("1312 foo\n1534 bar\n444 qux") diff --git a/numpy/lib/_polynomial_impl.py b/numpy/lib/_polynomial_impl.py index 63c12f438240..784e6443b9c3 100644 --- a/numpy/lib/_polynomial_impl.py +++ b/numpy/lib/_polynomial_impl.py @@ -101,6 +101,8 @@ def poly(seq_of_zeros): Examples -------- + >>> import numpy as np + Given a sequence of a polynomial's zeros: >>> np.poly((0, 0, 0)) # Multiple root example @@ -209,6 +211,7 @@ def roots(p): Examples -------- + >>> import numpy as np >>> coeff = [3.2, 2, 1] >>> np.roots(coeff) array([-0.3125+0.46351241j, -0.3125-0.46351241j]) @@ -295,6 +298,8 @@ def polyint(p, m=1, k=None): Examples -------- + >>> import numpy as np + The defining property of the antiderivative: >>> p = np.poly1d([1,1,1]) @@ -390,6 +395,8 @@ def polyder(p, m=1): Examples -------- + >>> import numpy as np + The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is: >>> p = np.poly1d([1,1,1,1]) @@ -575,6 +582,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): Examples -------- + >>> import numpy as np >>> import warnings >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) @@ -749,6 +757,7 @@ def polyval(p, x): Examples -------- + >>> import numpy as np >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1 76 >>> np.polyval([3,0,1], np.poly1d(5)) @@ -808,6 +817,7 @@ def polyadd(a1, a2): Examples -------- + >>> import numpy as np >>> np.polyadd([1, 2], [9, 5, 4]) array([9, 6, 6]) @@ -873,6 +883,8 @@ def polysub(a1, a2): Examples -------- + >>> import numpy as np + .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2) >>> np.polysub([2, 10, -2], [3, 10, -4]) @@ -933,6 +945,7 @@ def polymul(a1, a2): Examples -------- + >>> import numpy as np >>> np.polymul([1, 2, 3], [9, 5, 1]) array([ 9, 23, 38, 17, 3]) @@ -1007,6 +1020,8 @@ def polydiv(u, v): Examples -------- + >>> import numpy as np + .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25 >>> x = np.array([3.0, 5.0, 2.0]) @@ -1096,6 +1111,8 @@ class poly1d: Examples -------- + >>> import numpy as np + Construct the polynomial :math:`x^2 + 2x + 3`: >>> p = np.poly1d([1, 2, 3]) diff --git a/numpy/lib/_scimath_impl.py b/numpy/lib/_scimath_impl.py index 43682fefee17..d5492c645247 100644 --- a/numpy/lib/_scimath_impl.py +++ b/numpy/lib/_scimath_impl.py @@ -66,6 +66,7 @@ def _tocomplex(arr): Examples -------- + >>> import numpy as np First, consider an input of type short: @@ -124,6 +125,7 @@ def _fix_real_lt_zero(x): Examples -------- + >>> import numpy as np >>> np.lib.scimath._fix_real_lt_zero([1,2]) array([1, 2]) @@ -152,6 +154,7 @@ def _fix_int_lt_zero(x): Examples -------- + >>> import numpy as np >>> np.lib.scimath._fix_int_lt_zero([1,2]) array([1, 2]) @@ -179,6 +182,7 @@ def _fix_real_abs_gt_1(x): Examples -------- + >>> import numpy as np >>> np.lib.scimath._fix_real_abs_gt_1([0,1]) array([0, 1]) @@ -222,6 +226,8 @@ def sqrt(x): -------- For real, non-negative inputs this works just like `numpy.sqrt`: + >>> import numpy as np + >>> np.emath.sqrt(1) 1.0 >>> np.emath.sqrt([1, 4]) @@ -282,6 +288,7 @@ def log(x): Examples -------- + >>> import numpy as np >>> np.emath.log(np.exp(1)) 1.0 @@ -330,6 +337,7 @@ def log10(x): Examples -------- + >>> import numpy as np (We set the printing precision so the example can be auto-tested) @@ -373,6 +381,7 @@ def logn(n, x): Examples -------- + >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.logn(2, [4, 8]) @@ -420,6 +429,7 @@ def log2(x): Examples -------- + We set the printing precision so the example can be auto-tested: >>> np.set_printoptions(precision=4) @@ -468,6 +478,7 @@ def power(x, p): Examples -------- + >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.power(2, 2) @@ -523,6 +534,7 @@ def arccos(x): Examples -------- + >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.arccos(1) # a scalar is returned @@ -569,6 +581,7 @@ def arcsin(x): Examples -------- + >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.arcsin(0) @@ -617,6 +630,7 @@ def arctanh(x): Examples -------- + >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.arctanh(0.5) diff --git a/numpy/lib/_shape_base_impl.py b/numpy/lib/_shape_base_impl.py index b2e98ab8866a..3e2f2ba7d46c 100644 --- a/numpy/lib/_shape_base_impl.py +++ b/numpy/lib/_shape_base_impl.py @@ -115,6 +115,7 @@ def take_along_axis(arr, indices, axis): Examples -------- + >>> import numpy as np For this sample array @@ -236,6 +237,7 @@ def put_along_axis(arr, indices, values, axis): Examples -------- + >>> import numpy as np For this sample array @@ -331,6 +333,7 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs): Examples -------- + >>> import numpy as np >>> def my_func(a): ... \"\"\"Average first and last element of a 1-D array\"\"\" ... return (a[0] + a[-1]) * 0.5 @@ -461,6 +464,7 @@ def apply_over_axes(func, a, axes): Examples -------- + >>> import numpy as np >>> a = np.arange(24).reshape(2,3,4) >>> a array([[[ 0, 1, 2, 3], @@ -549,6 +553,7 @@ def expand_dims(a, axis): Examples -------- + >>> import numpy as np >>> x = np.array([1, 2]) >>> x.shape (2,) @@ -651,6 +656,7 @@ def column_stack(tup): Examples -------- + >>> import numpy as np >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.column_stack((a,b)) @@ -710,6 +716,7 @@ def dstack(tup): Examples -------- + >>> import numpy as np >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.dstack((a,b)) @@ -762,6 +769,7 @@ def array_split(ary, indices_or_sections, axis=0): Examples -------- + >>> import numpy as np >>> x = np.arange(8.0) >>> np.array_split(x, 3) [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])] @@ -858,6 +866,7 @@ def split(ary, indices_or_sections, axis=0): Examples -------- + >>> import numpy as np >>> x = np.arange(9.0) >>> np.split(x, 3) [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])] @@ -901,6 +910,7 @@ def hsplit(ary, indices_or_sections): Examples -------- + >>> import numpy as np >>> x = np.arange(16.0).reshape(4, 4) >>> x array([[ 0., 1., 2., 3.], @@ -971,6 +981,7 @@ def vsplit(ary, indices_or_sections): Examples -------- + >>> import numpy as np >>> x = np.arange(16.0).reshape(4, 4) >>> x array([[ 0., 1., 2., 3.], @@ -1024,6 +1035,7 @@ def dsplit(ary, indices_or_sections): Examples -------- + >>> import numpy as np >>> x = np.arange(16.0).reshape(2, 2, 4) >>> x array([[[ 0., 1., 2., 3.], @@ -1126,6 +1138,7 @@ def kron(a, b): Examples -------- + >>> import numpy as np >>> np.kron([1,10,100], [5,6,7]) array([ 5, 6, 7, ..., 500, 600, 700]) >>> np.kron([5,6,7], [1,10,100]) @@ -1240,6 +1253,7 @@ def tile(A, reps): Examples -------- + >>> import numpy as np >>> a = np.array([0, 1, 2]) >>> np.tile(a, 2) array([0, 1, 2, 0, 1, 2]) diff --git a/numpy/lib/_stride_tricks_impl.py b/numpy/lib/_stride_tricks_impl.py index 0d437ea1e416..def62523ee0e 100644 --- a/numpy/lib/_stride_tricks_impl.py +++ b/numpy/lib/_stride_tricks_impl.py @@ -204,6 +204,7 @@ def sliding_window_view(x, window_shape, axis=None, *, Examples -------- + >>> import numpy as np >>> from numpy.lib.stride_tricks import sliding_window_view >>> x = np.arange(6) >>> x.shape @@ -413,6 +414,7 @@ def broadcast_to(array, shape, subok=False): Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> np.broadcast_to(x, (3, 3)) array([[1, 2, 3], @@ -475,6 +477,7 @@ def broadcast_shapes(*args): Examples -------- + >>> import numpy as np >>> np.broadcast_shapes((1, 2), (3, 1), (3, 2)) (3, 2) @@ -526,6 +529,7 @@ def broadcast_arrays(*args, subok=False): Examples -------- + >>> import numpy as np >>> x = np.array([[1,2,3]]) >>> y = np.array([[4],[5]]) >>> np.broadcast_arrays(x, y) diff --git a/numpy/lib/_twodim_base_impl.py b/numpy/lib/_twodim_base_impl.py index 8eb6eccfcfbd..584efbfc307e 100644 --- a/numpy/lib/_twodim_base_impl.py +++ b/numpy/lib/_twodim_base_impl.py @@ -79,6 +79,7 @@ def fliplr(m): Examples -------- + >>> import numpy as np >>> A = np.diag([1.,2.,3.]) >>> A array([[1., 0., 0.], @@ -133,6 +134,7 @@ def flipud(m): Examples -------- + >>> import numpy as np >>> A = np.diag([1.0, 2, 3]) >>> A array([[1., 0., 0.], @@ -203,6 +205,7 @@ def eye(N, M=None, k=0, dtype=float, order='C', *, device=None, like=None): Examples -------- + >>> import numpy as np >>> np.eye(2, dtype=int) array([[1, 0], [0, 1]]) @@ -277,6 +280,7 @@ def diag(v, k=0): Examples -------- + >>> import numpy as np >>> x = np.arange(9).reshape((3,3)) >>> x array([[0, 1, 2], @@ -341,6 +345,7 @@ def diagflat(v, k=0): Examples -------- + >>> import numpy as np >>> np.diagflat([[1,2], [3,4]]) array([[1, 0, 0, 0], [0, 2, 0, 0], @@ -401,6 +406,7 @@ def tri(N, M=None, k=0, dtype=float, *, like=None): Examples -------- + >>> import numpy as np >>> np.tri(3, 5, 2, dtype=int) array([[1, 1, 1, 0, 0], [1, 1, 1, 1, 0], @@ -462,6 +468,7 @@ def tril(m, k=0): Examples -------- + >>> import numpy as np >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) array([[ 0, 0, 0], [ 4, 0, 0], @@ -506,6 +513,7 @@ def triu(m, k=0): Examples -------- + >>> import numpy as np >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) array([[ 1, 2, 3], [ 4, 5, 6], @@ -576,6 +584,7 @@ def vander(x, N=None, increasing=False): Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3, 5]) >>> N = 3 >>> np.vander(x, N) @@ -718,6 +727,7 @@ def histogram2d(x, y, bins=10, range=None, density=None, weights=None): Examples -------- + >>> import numpy as np >>> from matplotlib.image import NonUniformImage >>> import matplotlib.pyplot as plt @@ -856,6 +866,8 @@ def mask_indices(n, mask_func, k=0): Examples -------- + >>> import numpy as np + These are the indices that would allow you to access the upper triangular part of any 3x3 array: @@ -925,6 +937,8 @@ def tril_indices(n, k=0, m=None): Examples -------- + >>> import numpy as np + Compute two different sets of indices to access 4x4 arrays, one for the lower triangular part starting at the main diagonal, and one starting two diagonals further right: @@ -992,8 +1006,9 @@ def tril_indices_from(arr, k=0): Examples -------- + >>> import numpy as np - Create a 4 by 4 array. + Create a 4 by 4 array >>> a = np.arange(16).reshape(4, 4) >>> a @@ -1076,6 +1091,8 @@ def triu_indices(n, k=0, m=None): Examples -------- + >>> import numpy as np + Compute two different sets of indices to access 4x4 arrays, one for the upper triangular part starting at the main diagonal, and one starting two diagonals further right: @@ -1144,8 +1161,9 @@ def triu_indices_from(arr, k=0): Examples -------- + >>> import numpy as np - Create a 4 by 4 array. + Create a 4 by 4 array >>> a = np.arange(16).reshape(4, 4) >>> a diff --git a/numpy/lib/_type_check_impl.py b/numpy/lib/_type_check_impl.py index 2e4ef4e6954a..a0d3ad7afead 100644 --- a/numpy/lib/_type_check_impl.py +++ b/numpy/lib/_type_check_impl.py @@ -56,6 +56,7 @@ def mintypecode(typechars, typeset='GDFgdf', default='d'): Examples -------- + >>> import numpy as np >>> np.mintypecode(['d', 'f', 'S']) 'd' >>> x = np.array([1.1, 2-3.j]) @@ -103,6 +104,7 @@ def real(val): Examples -------- + >>> import numpy as np >>> a = np.array([1+2j, 3+4j, 5+6j]) >>> a.real array([1., 3., 5.]) @@ -149,6 +151,7 @@ def imag(val): Examples -------- + >>> import numpy as np >>> a = np.array([1+2j, 3+4j, 5+6j]) >>> a.imag array([2., 4., 6.]) @@ -195,6 +198,7 @@ def iscomplex(x): Examples -------- + >>> import numpy as np >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j]) array([ True, False, False, False, False, True]) @@ -235,6 +239,7 @@ def isreal(x): Examples -------- + >>> import numpy as np >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex) >>> np.isreal(a) array([False, True, True, True, True, False]) @@ -287,6 +292,7 @@ def iscomplexobj(x): Examples -------- + >>> import numpy as np >>> np.iscomplexobj(1) False >>> np.iscomplexobj(1+0j) @@ -341,6 +347,7 @@ def isrealobj(x): Examples -------- + >>> import numpy as np >>> np.isrealobj(1) True >>> np.isrealobj(1+0j) @@ -434,6 +441,7 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): Examples -------- + >>> import numpy as np >>> np.nan_to_num(np.inf) 1.7976931348623157e+308 >>> np.nan_to_num(-np.inf) @@ -525,6 +533,7 @@ def real_if_close(a, tol=100): Examples -------- + >>> import numpy as np >>> np.finfo(float).eps 2.2204460492503131e-16 # may vary @@ -593,6 +602,7 @@ def typename(char): Examples -------- + >>> import numpy as np >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q', ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q'] >>> for typechar in typechars: diff --git a/numpy/lib/_ufunclike_impl.py b/numpy/lib/_ufunclike_impl.py index 241d8af4b4ce..3fc5a32d33a6 100644 --- a/numpy/lib/_ufunclike_impl.py +++ b/numpy/lib/_ufunclike_impl.py @@ -49,6 +49,7 @@ def fix(x, out=None): Examples -------- + >>> import numpy as np >>> np.fix(3.14) 3.0 >>> np.fix(3) @@ -111,6 +112,7 @@ def isposinf(x, out=None): Examples -------- + >>> import numpy as np >>> np.isposinf(np.inf) True >>> np.isposinf(-np.inf) @@ -180,6 +182,7 @@ def isneginf(x, out=None): Examples -------- + >>> import numpy as np >>> np.isneginf(-np.inf) True >>> np.isneginf(np.inf) diff --git a/numpy/lib/introspect.py b/numpy/lib/introspect.py index 4688eadc32ac..70e638d4dde1 100644 --- a/numpy/lib/introspect.py +++ b/numpy/lib/introspect.py @@ -30,6 +30,7 @@ def opt_func_info(func_name=None, signature=None): Retrieve dispatch information for functions named 'add' or 'sub' and data types 'float64' or 'float32': + >>> import numpy as np >>> dict = np.lib.introspect.opt_func_info( ... func_name="add|abs", signature="float64|complex64" ... ) diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py index bc5c5de095a8..ab16d1f9f1aa 100644 --- a/numpy/lib/recfunctions.py +++ b/numpy/lib/recfunctions.py @@ -52,6 +52,7 @@ def recursive_fill_fields(input, output): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)]) >>> b = np.zeros((3,), dtype=a.dtype) @@ -84,6 +85,7 @@ def _get_fieldspec(dtype): Examples -------- + >>> import numpy as np >>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)]) >>> dt.descr [(('a', 'A'), '>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype) ('A',) @@ -148,6 +151,7 @@ def get_names_flat(adtype): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None False @@ -173,6 +177,7 @@ def flatten_descr(ndtype): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> ndtype = np.dtype([('a', '>> rfn.flatten_descr(ndtype) @@ -240,6 +245,7 @@ def get_fieldstructure(adtype, lastname=None, parents=None,): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> ndtype = np.dtype([('A', int), ... ('B', [('BA', int), @@ -380,6 +386,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False, Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.]))) array([( 1, 10.), ( 2, 20.), (-1, 30.)], @@ -526,6 +533,7 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], ... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])]) @@ -621,6 +629,7 @@ def rename_fields(base, namemapper): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])]) @@ -805,6 +814,7 @@ def repack_fields(a, align=False, recurse=False): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> def print_offsets(d): @@ -975,6 +985,7 @@ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) @@ -1110,6 +1121,7 @@ def unstructured_to_structured(arr, dtype=None, names=None, align=False, Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) @@ -1204,6 +1216,7 @@ def apply_along_fields(func, arr): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], @@ -1294,6 +1307,7 @@ def require_fields(array, required_dtype): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')]) @@ -1338,6 +1352,7 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False, Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> x = np.array([1, 2,]) >>> rfn.stack_arrays(x) is x @@ -1427,6 +1442,7 @@ def find_duplicates(a, key=None, ignoremask=True, return_index=False): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> ndtype = [('a', int)] >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3], From c894861370ffb289d975c79a92c62d19327829aa Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Tue, 18 Jun 2024 20:25:39 +0530 Subject: [PATCH 662/980] DOC, API: Add `>>> import numpy as np` stub to `numpy/ma/` --- numpy/ma/core.py | 82 ++++++++++++++++++++++++++++++++++++++++++++++ numpy/ma/extras.py | 33 ++++++++++++++++++- 2 files changed, 114 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 453c63614d2e..3073f53a0d07 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -286,6 +286,7 @@ def default_fill_value(obj): Examples -------- + >>> import numpy as np >>> np.ma.default_fill_value(1) 999999 >>> np.ma.default_fill_value(np.array([1.1, 2., np.pi])) @@ -348,6 +349,7 @@ def minimum_fill_value(obj): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.int8() >>> ma.minimum_fill_value(a) @@ -399,6 +401,7 @@ def maximum_fill_value(obj): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.int8() >>> ma.maximum_fill_value(a) @@ -525,6 +528,7 @@ def set_fill_value(a, fill_value): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(5) >>> a @@ -593,6 +597,7 @@ def common_fill_value(a, b): Examples -------- + >>> import numpy as np >>> x = np.ma.array([0, 1.], fill_value=3) >>> y = np.ma.array([0, 1.], fill_value=3) >>> np.ma.common_fill_value(x, y) @@ -637,6 +642,7 @@ def filled(a, fill_value=None): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], ... [1, 0, 0], @@ -716,6 +722,7 @@ def getdata(a, subok=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a @@ -779,6 +786,7 @@ def fix_invalid(a, mask=nomask, copy=True, fill_value=None): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3) >>> x masked_array(data=[--, -1.0, nan, inf], @@ -1371,6 +1379,7 @@ def make_mask_descr(ndtype): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> dtype = np.dtype({'names':['foo', 'bar'], ... 'formats':[np.float32, np.int64]}) @@ -1405,6 +1414,7 @@ def getmask(a): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a @@ -1467,6 +1477,7 @@ def getmaskarray(arr): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a @@ -1524,6 +1535,7 @@ def is_mask(m): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> m = ma.masked_equal([0, 1, 0, 2, 3], 0) >>> m @@ -1608,6 +1620,7 @@ def make_mask(m, copy=False, shrink=True, dtype=MaskType): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> m = [True, False, True, True] >>> ma.make_mask(m) @@ -1696,6 +1709,7 @@ def make_mask_none(newshape, dtype=None): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> ma.make_mask_none((3,)) array([False, False, False]) @@ -1758,6 +1772,7 @@ def mask_or(m1, m2, copy=False, shrink=True): Examples -------- + >>> import numpy as np >>> m1 = np.ma.make_mask([0, 1, 1, 0]) >>> m2 = np.ma.make_mask([1, 0, 0, 0]) >>> np.ma.mask_or(m1, m2) @@ -1801,6 +1816,7 @@ def flatten_mask(mask): Examples -------- + >>> import numpy as np >>> mask = np.array([0, 0, 1]) >>> np.ma.flatten_mask(mask) array([False, False, True]) @@ -1890,6 +1906,7 @@ def masked_where(condition, a, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a @@ -1987,6 +2004,7 @@ def masked_greater(x, value, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a @@ -2013,6 +2031,7 @@ def masked_greater_equal(x, value, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a @@ -2039,6 +2058,7 @@ def masked_less(x, value, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a @@ -2065,6 +2085,7 @@ def masked_less_equal(x, value, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a @@ -2091,6 +2112,7 @@ def masked_not_equal(x, value, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a @@ -2121,6 +2143,7 @@ def masked_equal(x, value, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a @@ -2154,6 +2177,7 @@ def masked_inside(x, v1, v2, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] >>> ma.masked_inside(x, -0.3, 0.3) @@ -2194,6 +2218,7 @@ def masked_outside(x, v1, v2, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] >>> ma.masked_outside(x, -0.3, 0.3) @@ -2247,6 +2272,7 @@ def masked_object(x, value, copy=True, shrink=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> food = np.array(['green_eggs', 'ham'], dtype=object) >>> # don't eat spoiled food @@ -2323,6 +2349,7 @@ def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = np.array([1, 1.1, 2, 1.1, 3]) >>> ma.masked_values(x, 1.1) @@ -2371,6 +2398,7 @@ def masked_invalid(a, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(5, dtype=float) >>> a[2] = np.nan @@ -2532,6 +2560,7 @@ def flatten_structured_array(a): Examples -------- + >>> import numpy as np >>> ndtype = [('a', int), ('b', float)] >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype) >>> np.ma.flatten_structured_array(a) @@ -2637,6 +2666,7 @@ class MaskedIterator: Examples -------- + >>> import numpy as np >>> x = np.ma.array(arange(6).reshape(2, 3)) >>> fl = x.flat >>> type(fl) @@ -2699,6 +2729,7 @@ def __next__(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array([3, 2], mask=[0, 1]) >>> fl = x.flat >>> next(fl) @@ -2777,6 +2808,7 @@ class MaskedArray(ndarray): Examples -------- + >>> import numpy as np The ``mask`` can be initialized with an array of boolean values with the same shape as ``data``. @@ -3632,6 +3664,7 @@ def hardmask(self): Examples -------- + >>> import numpy as np >>> x = np.arange(10) >>> m = np.ma.masked_array(x, x>5) >>> assert not m.hardmask @@ -3697,6 +3730,7 @@ def shrink_mask(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4) >>> x.mask array([[False, False], @@ -3757,6 +3791,7 @@ def fill_value(self): Examples -------- + >>> import numpy as np >>> for dt in [np.int32, np.int64, np.float64, np.complex128]: ... np.ma.array([0, 1], dtype=dt).get_fill_value() ... @@ -3841,6 +3876,7 @@ def filled(self, fill_value=None): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) >>> x.filled() array([ 1, 2, -999, 4, -999]) @@ -3908,6 +3944,7 @@ def compressed(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3) >>> x.compressed() array([0, 1]) @@ -3961,6 +3998,7 @@ def compress(self, condition, axis=None, out=None): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( @@ -4534,6 +4572,7 @@ def imag(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) >>> x.imag masked_array(data=[1.0, --, 1.6], @@ -4561,6 +4600,7 @@ def real(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) >>> x.real masked_array(data=[1.0, --, 3.45], @@ -4703,6 +4743,7 @@ def ravel(self, order='C'): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( @@ -4772,6 +4813,7 @@ def reshape(self, *s, **kwargs): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1]) >>> x masked_array( @@ -4847,6 +4889,7 @@ def put(self, indices, values, mode='raise'): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( @@ -4915,6 +4958,7 @@ def ids(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1]) >>> x.ids() (166670640, 166659832) # may vary @@ -4941,6 +4985,7 @@ def iscontiguous(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1, 2, 3]) >>> x.iscontiguous() True @@ -4975,6 +5020,7 @@ def all(self, axis=None, out=None, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> np.ma.array([1,2,3]).all() True >>> a = np.ma.array([1,2,3], mask=True) @@ -5069,6 +5115,7 @@ def nonzero(self): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.array(np.eye(3)) >>> x @@ -5195,6 +5242,7 @@ def sum(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( @@ -5266,6 +5314,7 @@ def cumsum(self, axis=None, dtype=None, out=None): Examples -------- + >>> import numpy as np >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) >>> marr.cumsum() masked_array(data=[0, 1, 3, --, --, --, 9, 16, 24, 33], @@ -5373,6 +5422,7 @@ def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.ma.array([1,2,3], mask=[False, False, True]) >>> a masked_array(data=[1, 2, --], @@ -5435,6 +5485,7 @@ def anom(self, axis=None, dtype=None): Examples -------- + >>> import numpy as np >>> a = np.ma.array([1,2,3]) >>> a.anom() masked_array(data=[-1., 0., 1.], @@ -5561,6 +5612,7 @@ def round(self, decimals=0, out=None): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.array([1.35, 2.5, 1.5, 1.75, 2.25, 2.75], ... mask=[0, 0, 0, 1, 0, 0]) @@ -5639,6 +5691,7 @@ def argsort(self, axis=np._NoValue, kind=None, order=None, endwith=True, Examples -------- + >>> import numpy as np >>> a = np.ma.array([3,2,1], mask=[False, False, True]) >>> a masked_array(data=[3, 2, --], @@ -5696,6 +5749,7 @@ def argmin(self, axis=None, fill_value=None, out=None, *, Examples -------- + >>> import numpy as np >>> x = np.ma.array(np.arange(4), mask=[1,1,0,0]) >>> x.shape = (2,2) >>> x @@ -5741,6 +5795,7 @@ def argmax(self, axis=None, fill_value=None, out=None, *, Examples -------- + >>> import numpy as np >>> a = np.arange(6).reshape(2,3) >>> a.argmax() 5 @@ -5804,6 +5859,7 @@ def sort(self, axis=-1, kind=None, order=None, endwith=True, Examples -------- + >>> import numpy as np >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) >>> # Default >>> a.sort() @@ -6089,6 +6145,7 @@ def ptp(self, axis=None, out=None, fill_value=None, keepdims=False): Examples -------- + >>> import numpy as np >>> x = np.ma.MaskedArray([[4, 9, 2, 10], ... [6, 9, 7, 12]]) @@ -6243,6 +6300,7 @@ def tolist(self, fill_value=None): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4) >>> x.tolist() [[1, None, 3], [None, 5, None], [7, None, 9]] @@ -6322,6 +6380,7 @@ def tobytes(self, fill_value=None, order='C'): Examples -------- + >>> import numpy as np >>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) >>> x.tobytes() b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00' @@ -6371,6 +6430,7 @@ def toflex(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( @@ -6631,6 +6691,7 @@ def isMaskedArray(x): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.eye(3, 3) >>> a @@ -6815,6 +6876,7 @@ def is_masked(x): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0) >>> x @@ -7058,6 +7120,7 @@ def power(a, b, third=None): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = [11.2, -3.973, 0.801, -1.41] >>> mask = [0, 0, 0, 1] @@ -7152,6 +7215,7 @@ def sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None, *, Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = [11.2, -3.973, 0.801, -1.41] >>> mask = [0, 0, 0, 1] @@ -7191,6 +7255,7 @@ def compressed(x): Examples -------- + >>> import numpy as np Create an array with negative values masked: @@ -7239,6 +7304,7 @@ def concatenate(arrays, axis=0): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.arange(3) >>> a[1] = ma.masked @@ -7289,6 +7355,7 @@ def diag(v, k=0): Examples -------- + >>> import numpy as np Create an array with negative values masked: @@ -7360,6 +7427,7 @@ def right_shift(a, n): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = [11, 3, 8, 1] >>> mask = [0, 0, 0, 1] @@ -7420,6 +7488,7 @@ def putmask(a, mask, values): # , mode='raise'): Examples -------- + >>> import numpy as np >>> arr = [[1, 2], [3, 4]] >>> mask = [[1, 0], [0, 0]] >>> x = np.ma.array(arr, mask=mask) @@ -7470,6 +7539,7 @@ def transpose(a, axes=None): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.arange(4).reshape((2,2)) >>> x[1, 1] = ma.masked @@ -7530,6 +7600,7 @@ def resize(x, new_shape): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.array([[1, 2] ,[3, 4]]) >>> a[0, 1] = ma.masked @@ -7671,6 +7742,7 @@ def diff(a, /, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.array([1, 2, 3, 4, 7, 0, 2, 3]) >>> x = np.ma.masked_where(a < 2, a) >>> np.ma.diff(x) @@ -7772,6 +7844,7 @@ def where(condition, x=_NoValue, y=_NoValue): Examples -------- + >>> import numpy as np >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0], ... [1, 0, 1], ... [0, 1, 0]]) @@ -7869,6 +7942,7 @@ def choose(indices, choices, out=None, mode='raise'): Examples -------- + >>> import numpy as np >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]]) >>> a = np.array([2, 1, 0]) >>> np.ma.choose(a, choice) @@ -7932,6 +8006,7 @@ def round_(a, decimals=0, out=None): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = [11.2, -3.973, 0.801, -1.41] >>> mask = [0, 0, 0, 1] @@ -8016,6 +8091,7 @@ def dot(a, b, strict=False, out=None): Examples -------- + >>> import numpy as np >>> a = np.ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]]) >>> b = np.ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]]) >>> np.ma.dot(a, b) @@ -8203,6 +8279,7 @@ def allequal(a, b, fill_value=True): Examples -------- + >>> import numpy as np >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) >>> a masked_array(data=[10000000000.0, 1e-07, --], @@ -8280,6 +8357,7 @@ def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8): Examples -------- + >>> import numpy as np >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) >>> a masked_array(data=[10000000000.0, 1e-07, --], @@ -8372,6 +8450,7 @@ def asarray(a, dtype=None, order=None): Examples -------- + >>> import numpy as np >>> x = np.arange(10.).reshape(2, 5) >>> x array([[0., 1., 2., 3., 4.], @@ -8419,6 +8498,7 @@ def asanyarray(a, dtype=None): Examples -------- + >>> import numpy as np >>> x = np.arange(10.).reshape(2, 5) >>> x array([[0., 1., 2., 3., 4.], @@ -8474,6 +8554,7 @@ def fromflex(fxarray): Examples -------- + >>> import numpy as np >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4) >>> rec = x.toflex() >>> rec @@ -8694,6 +8775,7 @@ def append(a, b, axis=None): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.masked_values([1, 2, 3], 2) >>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index dc1f8658f82a..f5a1de6f2da0 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -70,6 +70,7 @@ def count_masked(arr, axis=None): Examples -------- + >>> import numpy as np >>> a = np.arange(9).reshape((3,3)) >>> a = np.ma.array(a) >>> a[1, 0] = np.ma.masked @@ -133,6 +134,7 @@ def masked_all(shape, dtype=float): Examples -------- + >>> import numpy as np >>> np.ma.masked_all((3, 3)) masked_array( data=[[--, --, --], @@ -196,6 +198,7 @@ def masked_all_like(arr): Examples -------- + >>> import numpy as np >>> arr = np.zeros((2, 3), dtype=np.float32) >>> arr array([[0., 0., 0.], @@ -499,6 +502,7 @@ def apply_over_axes(func, a, axes): Examples -------- + >>> import numpy as np >>> a = np.ma.arange(24).reshape(2,3,4) >>> a[:,0,1] = np.ma.masked >>> a[:,1,:] = np.ma.masked @@ -608,6 +612,7 @@ def average(a, axis=None, weights=None, returned=False, *, Examples -------- + >>> import numpy as np >>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True]) >>> np.ma.average(a, weights=[3, 1, 0, 0]) 1.25 @@ -761,6 +766,7 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): Examples -------- + >>> import numpy as np >>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4) >>> np.ma.median(x) 1.5 @@ -895,6 +901,7 @@ def compress_nd(x, axis=None): Examples -------- + >>> import numpy as np >>> arr = [[1, 2], [3, 4]] >>> mask = [[0, 1], [0, 0]] >>> x = np.ma.array(arr, mask=mask) @@ -956,6 +963,7 @@ def compress_rowcols(x, axis=None): Examples -------- + >>> import numpy as np >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], ... [1, 0, 0], ... [0, 0, 0]]) @@ -1009,12 +1017,13 @@ def compress_rows(a): Examples -------- + >>> import numpy as np >>> a = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], ... [1, 0, 0], ... [0, 0, 0]]) >>> np.ma.compress_rows(a) array([[6, 7, 8]]) - + """ a = asarray(a) if a.ndim != 2: @@ -1047,6 +1056,7 @@ def compress_cols(a): Examples -------- + >>> import numpy as np >>> a = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], ... [1, 0, 0], ... [0, 0, 0]]) @@ -1107,6 +1117,7 @@ def mask_rowcols(a, axis=None): Examples -------- + >>> import numpy as np >>> a = np.zeros((3, 3), dtype=int) >>> a[1, 1] = 1 >>> a @@ -1163,6 +1174,7 @@ def mask_rows(a, axis=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.zeros((3, 3), dtype=int) >>> a[1, 1] = 1 >>> a @@ -1213,6 +1225,7 @@ def mask_cols(a, axis=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.zeros((3, 3), dtype=int) >>> a[1, 1] = 1 >>> a @@ -1266,6 +1279,7 @@ def ediff1d(arr, to_end=None, to_begin=None): Examples -------- + >>> import numpy as np >>> arr = np.ma.array([1, 2, 4, 7, 0]) >>> np.ma.ediff1d(arr) masked_array(data=[ 1, 2, 3, -7], @@ -1303,6 +1317,7 @@ def unique(ar1, return_index=False, return_inverse=False): Examples -------- + >>> import numpy as np >>> a = [1, 2, 1000, 2, 3] >>> mask = [0, 0, 1, 0, 0] >>> masked_a = np.ma.masked_array(a, mask) @@ -1354,6 +1369,7 @@ def intersect1d(ar1, ar2, assume_unique=False): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1, 3, 3, 3], mask=[0, 0, 0, 1]) >>> y = np.ma.array([3, 1, 1, 1], mask=[0, 0, 0, 1]) >>> np.ma.intersect1d(x, y) @@ -1383,6 +1399,7 @@ def setxor1d(ar1, ar2, assume_unique=False): Examples -------- + >>> import numpy as np >>> ar1 = np.ma.array([1, 2, 3, 2, 4]) >>> ar2 = np.ma.array([2, 3, 5, 7, 5]) >>> np.ma.setxor1d(ar1, ar2) @@ -1427,6 +1444,7 @@ def in1d(ar1, ar2, assume_unique=False, invert=False): Examples -------- + >>> import numpy as np >>> ar1 = np.ma.array([0, 1, 2, 5, 0]) >>> ar2 = [0, 2] >>> np.ma.in1d(ar1, ar2) @@ -1477,6 +1495,7 @@ def isin(element, test_elements, assume_unique=False, invert=False): Examples -------- + >>> import numpy as np >>> element = np.ma.array([1, 2, 3, 4, 5, 6]) >>> test_elements = [0, 2] >>> np.ma.isin(element, test_elements) @@ -1502,6 +1521,7 @@ def union1d(ar1, ar2): Examples -------- + >>> import numpy as np >>> ar1 = np.ma.array([1, 2, 3, 4]) >>> ar2 = np.ma.array([3, 4, 5, 6]) >>> np.ma.union1d(ar1, ar2) @@ -1526,6 +1546,7 @@ def setdiff1d(ar1, ar2, assume_unique=False): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1]) >>> np.ma.setdiff1d(x, [1, 2]) masked_array(data=[3, --], @@ -1659,6 +1680,7 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[0, 1], [1, 1]], mask=[0, 1, 0, 1]) >>> y = np.ma.array([[1, 0], [0, 1]], mask=[0, 0, 1, 1]) >>> np.ma.cov(x, y) @@ -1751,6 +1773,7 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, Examples -------- + >>> import numpy as np >>> x = np.ma.array([[0, 1], [1, 1]], mask=[0, 1, 0, 1]) >>> np.ma.corrcoef(x) masked_array( @@ -1822,6 +1845,7 @@ class mr_class(MAxisConcatenator): Examples -------- + >>> import numpy as np >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])] masked_array(data=[1, 2, 3, ..., 4, 5, 6], mask=False, @@ -1865,6 +1889,7 @@ def ndenumerate(a, compressed=True): Examples -------- + >>> import numpy as np >>> a = np.ma.arange(9).reshape((3, 3)) >>> a[1, 0] = np.ma.masked >>> a[1, 2] = np.ma.masked @@ -1934,6 +1959,7 @@ def flatnotmasked_edges(a): Examples -------- + >>> import numpy as np >>> a = np.ma.arange(10) >>> np.ma.flatnotmasked_edges(a) array([0, 9]) @@ -1991,6 +2017,7 @@ def notmasked_edges(a, axis=None): Examples -------- + >>> import numpy as np >>> a = np.arange(9).reshape((3, 3)) >>> m = np.zeros_like(a) >>> m[1:, 1:] = 1 @@ -2040,6 +2067,7 @@ def flatnotmasked_contiguous(a): Examples -------- + >>> import numpy as np >>> a = np.ma.arange(10) >>> np.ma.flatnotmasked_contiguous(a) [slice(0, 10, None)] @@ -2101,6 +2129,7 @@ def notmasked_contiguous(a, axis=None): Examples -------- + >>> import numpy as np >>> a = np.arange(12).reshape((3, 4)) >>> mask = np.zeros_like(a) >>> mask[1:, :-1] = 1; mask[0, 1] = 1; mask[-1, 0] = 0 @@ -2202,6 +2231,7 @@ def clump_unmasked(a): Examples -------- + >>> import numpy as np >>> a = np.ma.masked_array(np.arange(10)) >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked >>> np.ma.clump_unmasked(a) @@ -2241,6 +2271,7 @@ def clump_masked(a): Examples -------- + >>> import numpy as np >>> a = np.ma.masked_array(np.arange(10)) >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked >>> np.ma.clump_masked(a) From 44a1107d97b799632c3402367ebdaf161212a067 Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Tue, 18 Jun 2024 20:25:54 +0530 Subject: [PATCH 663/980] DOC, API: Add `>>> import numpy as np` stub to `numpy/linalg/_linalg.py` --- numpy/linalg/_linalg.py | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 689cdf52ed0b..07f36fe6cddf 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -272,6 +272,7 @@ def tensorsolve(a, b, axes=None): Examples -------- + >>> import numpy as np >>> a = np.eye(2*3*4) >>> a.shape = (2*3, 4, 2, 3, 4) >>> rng = np.random.default_rng() @@ -380,6 +381,7 @@ def solve(a, b): ``x0 + 2 * x1 = 1`` and ``3 * x0 + 5 * x1 = 2``: + >>> import numpy as np >>> a = np.array([[1, 2], [3, 5]]) >>> b = np.array([1, 2]) >>> x = np.linalg.solve(a, b) @@ -452,6 +454,7 @@ def tensorinv(a, ind=2): Examples -------- + >>> import numpy as np >>> a = np.eye(4*6) >>> a.shape = (4, 6, 8, 3) >>> ainv = np.linalg.tensorinv(a, ind=2) @@ -541,6 +544,7 @@ def inv(a): Examples -------- + >>> import numpy as np >>> from numpy.linalg import inv >>> a = np.array([[1., 2.], [3., 4.]]) >>> ainv = inv(a) @@ -652,6 +656,7 @@ def matrix_power(a, n): Examples -------- + >>> import numpy as np >>> from numpy.linalg import matrix_power >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit >>> matrix_power(i, 3) # should = -i @@ -806,6 +811,7 @@ def cholesky(a, /, *, upper=False): Examples -------- + >>> import numpy as np >>> A = np.array([[1,-2j],[2j,5]]) >>> A array([[ 1.+0.j, -0.-2.j], @@ -981,6 +987,7 @@ def qr(a, mode='reduced'): Examples -------- + >>> import numpy as np >>> rng = np.random.default_rng() >>> a = rng.normal(size=(9, 6)) >>> Q, R = np.linalg.qr(a) @@ -1156,6 +1163,7 @@ def eigvals(a): if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as ``A``: + >>> import numpy as np >>> from numpy import linalg as LA >>> x = np.random.random() >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]]) @@ -1252,6 +1260,7 @@ def eigvalsh(a, UPLO='L'): Examples -------- + >>> import numpy as np >>> from numpy import linalg as LA >>> a = np.array([[1, -2j], [2j, 5]]) >>> LA.eigvalsh(a) @@ -1392,6 +1401,7 @@ def eig(a): Examples -------- + >>> import numpy as np >>> from numpy import linalg as LA (Almost) trivial example with real eigenvalues and eigenvectors. @@ -1532,6 +1542,7 @@ def eigh(a, UPLO='L'): Examples -------- + >>> import numpy as np >>> from numpy import linalg as LA >>> a = np.array([[1, -2j], [2j, 5]]) >>> a @@ -1574,12 +1585,14 @@ def eigh(a, UPLO='L'): [0.+2.j, 2.+0.j]]) >>> wa, va = LA.eigh(a) >>> wb, vb = LA.eig(b) - >>> wa; wb + >>> wa array([1., 6.]) + >>> wb array([6.+0.j, 1.+0.j]) - >>> va; vb + >>> va array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary [ 0. +0.89442719j, 0. -0.4472136j ]]) + >>> vb array([[ 0.89442719+0.j , -0. +0.4472136j], [-0. +0.4472136j, 0.89442719+0.j ]]) @@ -1707,6 +1720,7 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): Examples -------- + >>> import numpy as np >>> rng = np.random.default_rng() >>> a = rng.normal(size=(9, 6)) + 1j*rng.normal(size=(9, 6)) >>> b = rng.normal(size=(2, 7, 8, 3)) + 1j*rng.normal(size=(2, 7, 8, 3)) @@ -1914,6 +1928,7 @@ def cond(x, p=None): Examples -------- + >>> import numpy as np >>> from numpy import linalg as LA >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]]) >>> a @@ -2072,6 +2087,7 @@ def matrix_rank(A, tol=None, hermitian=False, *, rtol=None): Examples -------- + >>> import numpy as np >>> from numpy.linalg import matrix_rank >>> matrix_rank(np.eye(4)) # Full rank matrix 4 @@ -2187,6 +2203,7 @@ def pinv(a, rcond=None, hermitian=False, *, rtol=_NoValue): The following example checks that ``a * a+ * a == a`` and ``a+ * a * a+ == a+``: + >>> import numpy as np >>> rng = np.random.default_rng() >>> a = rng.normal(size=(9, 6)) >>> B = np.linalg.pinv(a) @@ -2283,6 +2300,7 @@ def slogdet(a): -------- The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``: + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> (sign, logabsdet) = np.linalg.slogdet(a) >>> (sign, logabsdet) @@ -2357,6 +2375,7 @@ def det(a): -------- The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> np.linalg.det(a) -2.0 # may vary @@ -2453,6 +2472,7 @@ def lstsq(a, b, rcond=None): -------- Fit a line, ``y = mx + c``, through some noisy data-points: + >>> import numpy as np >>> x = np.array([0, 1, 2, 3]) >>> y = np.array([-1, 0.2, 0.9, 2.1]) @@ -2654,6 +2674,8 @@ def norm(x, ord=None, axis=None, keepdims=False): Examples -------- + + >>> import numpy as np >>> from numpy import linalg as LA >>> a = np.arange(9) - 4 >>> a @@ -2891,6 +2913,7 @@ def multi_dot(arrays): return functools.reduce(np.dot, arrays) -------- `multi_dot` allows you to write:: + >>> import numpy as np >>> from numpy.linalg import multi_dot >>> # Prepare some data >>> A = np.random.random((10000, 100)) From 7f72956744af26834af8b15dfd042d92e4ce3591 Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Tue, 18 Jun 2024 20:26:02 +0530 Subject: [PATCH 664/980] DOC, API: Add `>>> import numpy as np` stub to `numpy/fft/` --- numpy/fft/_helper.py | 4 ++++ numpy/fft/_pocketfft.py | 14 ++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/numpy/fft/_helper.py b/numpy/fft/_helper.py index 9f4512f90715..f6c114bab18d 100644 --- a/numpy/fft/_helper.py +++ b/numpy/fft/_helper.py @@ -42,6 +42,7 @@ def fftshift(x, axes=None): Examples -------- + >>> import numpy as np >>> freqs = np.fft.fftfreq(10, 0.1) >>> freqs array([ 0., 1., 2., ..., -3., -2., -1.]) @@ -97,6 +98,7 @@ def ifftshift(x, axes=None): Examples -------- + >>> import numpy as np >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) >>> freqs array([[ 0., 1., 2.], @@ -153,6 +155,7 @@ def fftfreq(n, d=1.0, device=None): Examples -------- + >>> import numpy as np >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) >>> fourier = np.fft.fft(signal) >>> n = signal.size @@ -211,6 +214,7 @@ def rfftfreq(n, d=1.0, device=None): Examples -------- + >>> import numpy as np >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) >>> fourier = np.fft.rfft(signal) >>> n = signal.size diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py index 5972a346de20..29316c784406 100644 --- a/numpy/fft/_pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -185,6 +185,7 @@ def fft(a, n=None, axis=-1, norm=None, out=None): Examples -------- + >>> import numpy as np >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j, 2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j, @@ -291,6 +292,7 @@ def ifft(a, n=None, axis=-1, norm=None, out=None): Examples -------- + >>> import numpy as np >>> np.fft.ifft([0, 4, 0, 0]) array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary @@ -398,6 +400,7 @@ def rfft(a, n=None, axis=-1, norm=None, out=None): Examples -------- + >>> import numpy as np >>> np.fft.fft([0, 1, 0, 0]) array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary >>> np.fft.rfft([0, 1, 0, 0]) @@ -506,6 +509,7 @@ def irfft(a, n=None, axis=-1, norm=None, out=None): Examples -------- + >>> import numpy as np >>> np.fft.ifft([1, -1j, -1, 1j]) array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary >>> np.fft.irfft([1, -1j, -1]) @@ -601,6 +605,7 @@ def hfft(a, n=None, axis=-1, norm=None, out=None): Examples -------- + >>> import numpy as np >>> signal = np.array([1, 2, 3, 4, 3, 2]) >>> np.fft.fft(signal) array([15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) # may vary @@ -686,6 +691,7 @@ def ihfft(a, n=None, axis=-1, norm=None, out=None): Examples -------- + >>> import numpy as np >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) >>> np.fft.ifft(spectrum) array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary @@ -855,6 +861,7 @@ def fftn(a, s=None, axes=None, norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.mgrid[:3, :3, :3][0] >>> np.fft.fftn(a, axes=(1, 2)) array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary @@ -996,6 +1003,7 @@ def ifftn(a, s=None, axes=None, norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.eye(4) >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,)) array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary @@ -1127,6 +1135,7 @@ def fft2(a, s=None, axes=(-2, -1), norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.mgrid[:5, :5][0] >>> np.fft.fft2(a) array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary @@ -1256,6 +1265,7 @@ def ifft2(a, s=None, axes=(-2, -1), norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = 4 * np.eye(4) >>> np.fft.ifft2(a) array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary @@ -1373,6 +1383,7 @@ def rfftn(a, s=None, axes=None, norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.ones((2, 2, 2)) >>> np.fft.rfftn(a) array([[[8.+0.j, 0.+0.j], # may vary @@ -1465,6 +1476,7 @@ def rfft2(a, s=None, axes=(-2, -1), norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.mgrid[:5, :5][0] >>> np.fft.rfft2(a) array([[ 50. +0.j , 0. +0.j , 0. +0.j ], @@ -1597,6 +1609,7 @@ def irfftn(a, s=None, axes=None, norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.zeros((3, 2, 2)) >>> a[0, 0, 0] = 3 * 2 * 2 >>> np.fft.irfftn(a) @@ -1689,6 +1702,7 @@ def irfft2(a, s=None, axes=(-2, -1), norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.mgrid[:5, :5][0] >>> A = np.fft.rfft2(a) >>> np.fft.irfft2(A, s=a.shape) From 40762cba793b2dc6064a6547e57a039ec7b9dd1b Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Tue, 18 Jun 2024 20:26:12 +0530 Subject: [PATCH 665/980] DOC, API: Add `>>> import numpy as np` stub to `numpy/random/` --- numpy/random/_generator.pyx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index de9989b18424..221ac817b783 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -5023,13 +5023,13 @@ def default_rng(seed=None): is instantiated. This function does not manage a default global instance. See :ref:`seeding_and_entropy` for more information about seeding. - + Examples -------- `default_rng` is the recommended constructor for the random number class `Generator`. Here are several ways we can construct a random number generator using `default_rng` and the `Generator` class. - + Here we use `default_rng` to generate a random float: >>> import numpy as np From ab9b618d673d169536459f82366cd3fba1af2bf8 Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Tue, 18 Jun 2024 20:26:28 +0530 Subject: [PATCH 666/980] DOC, API: Add `>>> import numpy as np` stub to `numpy/polynomial/` --- numpy/matrixlib/defmatrix.py | 3 +++ numpy/polynomial/hermite.py | 10 +++++++--- numpy/polynomial/hermite_e.py | 3 +++ numpy/polynomial/laguerre.py | 13 +++++++++---- numpy/polynomial/legendre.py | 1 + numpy/polynomial/polynomial.py | 5 +++++ numpy/polynomial/polyutils.py | 3 +++ 7 files changed, 31 insertions(+), 7 deletions(-) diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index 866f867c8eaa..68a74bb16a6e 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -56,6 +56,7 @@ def asmatrix(data, dtype=None): Examples -------- + >>> import numpy as np >>> x = np.array([[1, 2], [3, 4]]) >>> m = np.asmatrix(x) @@ -103,6 +104,7 @@ class matrix(N.ndarray): Examples -------- + >>> import numpy as np >>> a = np.matrix('1 2; 3 4') >>> a matrix([[1, 2], @@ -1065,6 +1067,7 @@ def bmat(obj, ldict=None, gdict=None): Examples -------- + >>> import numpy as np >>> A = np.asmatrix('1 1; 1 1') >>> B = np.asmatrix('2 2; 2 2') >>> C = np.asmatrix('3 4; 5 6') diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index 58d18cb0d88c..cfb418c10dca 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -948,7 +948,7 @@ def hermval2d(x, y, c): >>> y = [4, 5] >>> c = [[1, 2, 3], [4, 5, 6]] >>> hermval2d(x, y, c) - array ([1035., 2883.]) + array([1035., 2883.]) """ return pu._valnd(hermval, c, x, y) @@ -1074,7 +1074,7 @@ def hermval3d(x, y, z, c): >>> c = [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]] >>> hermval3d(x, y, z, c) array([ 40077., 120131.]) - + """ return pu._valnd(hermval, c, x, y, z) @@ -1184,6 +1184,7 @@ def hermvander(x, deg): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.hermite import hermvander >>> x = np.array([-1, 0, 1]) >>> hermvander(x, 3) @@ -1260,6 +1261,7 @@ def hermvander2d(x, y, deg): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.hermite import hermvander2d >>> x = np.array([-1, 0, 1]) >>> y = np.array([-1, 0, 1]) @@ -1332,7 +1334,7 @@ def hermvander3d(x, y, z, deg): array([[ 1., -2., 2., -2., 4., -4.], [ 1., 0., -2., 0., 0., -0.], [ 1., 2., 2., 2., 4., 4.]]) - + """ return pu._vander_nd_flat((hermvander, hermvander, hermvander), (x, y, z), deg) @@ -1458,6 +1460,7 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.hermite import hermfit, hermval >>> x = np.linspace(-10, 10) >>> rng = np.random.default_rng() @@ -1727,6 +1730,7 @@ def hermweight(x): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.hermite import hermweight >>> x = np.arange(-2, 2) >>> hermweight(x) diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index e7fe1233cd14..4178020f3c9b 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -127,6 +127,7 @@ def poly2herme(pol): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.hermite_e import poly2herme >>> poly2herme(np.arange(4)) array([ 2., 10., 2., 3.]) @@ -1137,6 +1138,7 @@ def hermevander(x, deg): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.hermite_e import hermevander >>> x = np.array([-1, 0, 1]) >>> hermevander(x, 3) @@ -1389,6 +1391,7 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.hermite_e import hermefit, hermeval >>> x = np.linspace(-10, 10) >>> rng = np.random.default_rng() diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index b0de7d9bce35..eee72597c7a2 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -126,6 +126,7 @@ def poly2lag(pol): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.laguerre import poly2lag >>> poly2lag(np.arange(4)) array([ 23., -63., 58., -18.]) @@ -1062,7 +1063,7 @@ def lagval3d(x, y, z, c): >>> c = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] >>> lagval3d(1, 1, 2, c) -1.0 - + """ return pu._valnd(lagval, c, x, y, z) @@ -1128,7 +1129,7 @@ def laggrid3d(x, y, z, c): [ -2., -18.]], [[ -2., -14.], [ -1., -5.]]]) - + """ return pu._gridnd(lagval, c, x, y, z) @@ -1169,6 +1170,7 @@ def lagvander(x, deg): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.laguerre import lagvander >>> x = np.array([0, 1, 2]) >>> lagvander(x, 3) @@ -1244,12 +1246,13 @@ def lagvander2d(x, y, deg): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.laguerre import lagvander2d >>> x = np.array([0]) >>> y = np.array([2]) >>> lagvander2d(x, y, [2, 1]) array([[ 1., -1., 1., -1., 1., -1.]]) - + """ return pu._vander_nd_flat((lagvander, lagvander), (x, y), deg) @@ -1306,6 +1309,7 @@ def lagvander3d(x, y, z, deg): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.laguerre import lagvander3d >>> x = np.array([0]) >>> y = np.array([2]) @@ -1439,6 +1443,7 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.laguerre import lagfit, lagval >>> x = np.linspace(0, 10) >>> rng = np.random.default_rng() @@ -1481,7 +1486,7 @@ def lagcompanion(c): >>> lagcompanion([1, 2, 3]) array([[ 1. , -0.33333333], [-1. , 4.33333333]]) - + """ # c is a trimmed copy [c] = pu.as_series([c]) diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index ded9e7821891..535e377b027d 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -128,6 +128,7 @@ def poly2leg(pol): Examples -------- + >>> import numpy as np >>> from numpy import polynomial as P >>> p = P.Polynomial(np.arange(4)) >>> p diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 7b78005fa396..f00c6f3625a3 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -729,6 +729,7 @@ def polyval(x, c, tensor=True): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.polynomial import polyval >>> polyval(1, [1,2,3]) 6.0 @@ -1207,6 +1208,8 @@ def polyvander2d(x, y, deg): Examples -------- + >>> import numpy as np + The 2-D pseudo-Vandermonde matrix of degree ``[1, 2]`` and sample points ``x = [-1, 2]`` and ``y = [1, 3]`` is as follows: @@ -1290,6 +1293,7 @@ def polyvander3d(x, y, z, deg): Examples -------- + >>> import numpy as np >>> from numpy.polynomial import polynomial as P >>> x = np.asarray([-1, 2, 1]) >>> y = np.asarray([1, -2, -3]) @@ -1441,6 +1445,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): Examples -------- + >>> import numpy as np >>> from numpy.polynomial import polynomial as P >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] >>> rng = np.random.default_rng() diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py index 54ffe5937e8c..10dc047c404f 100644 --- a/numpy/polynomial/polyutils.py +++ b/numpy/polynomial/polyutils.py @@ -95,6 +95,7 @@ def as_series(alist, trim=True): Examples -------- + >>> import numpy as np >>> from numpy.polynomial import polyutils as pu >>> a = np.arange(4) >>> pu.as_series(a) @@ -218,6 +219,7 @@ def getdomain(x): Examples -------- + >>> import numpy as np >>> from numpy.polynomial import polyutils as pu >>> points = np.arange(4)**2 - 5; points array([-5, -4, -1, 4]) @@ -323,6 +325,7 @@ def mapdomain(x, old, new): Examples -------- + >>> import numpy as np >>> from numpy.polynomial import polyutils as pu >>> old_domain = (-1,1) >>> new_domain = (0,2*np.pi) From 1f18316c2b813a4d44f7bb53125b5781c66ca3aa Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Tue, 18 Jun 2024 20:26:50 +0530 Subject: [PATCH 667/980] DOC, API: Add `>>> import numpy as np` stub to `numpy/exceptions.py` --- numpy/exceptions.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/exceptions.py b/numpy/exceptions.py index 6cacdbcc5227..adf88c754b66 100644 --- a/numpy/exceptions.py +++ b/numpy/exceptions.py @@ -146,6 +146,7 @@ class AxisError(ValueError, IndexError): Examples -------- + >>> import numpy as np >>> array_1d = np.arange(10) >>> np.cumsum(array_1d, axis=1) Traceback (most recent call last): @@ -222,6 +223,7 @@ class DTypePromotionError(TypeError): Datetimes and complex numbers are incompatible classes and cannot be promoted: + >>> import numpy as np >>> np.result_type(np.dtype("M8[s]"), np.complex128) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... From 347207f5dede2237dc512a298e56f012a0cc5ce4 Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Tue, 18 Jun 2024 20:22:21 +0530 Subject: [PATCH 668/980] DOC: Add `>>> import numpy as np` to API reference examples [skip azp] [skip cirrus] --- doc/source/reference/arrays.classes.rst | 107 ++-- doc/source/reference/arrays.datetime.rst | 57 +- doc/source/reference/arrays.dtypes.rst | 232 ++++---- doc/source/reference/arrays.ndarray.rst | 112 ++-- doc/source/reference/arrays.nditer.rst | 542 ++++++++++-------- doc/source/reference/arrays.scalars.rst | 48 +- doc/source/reference/constants.rst | 11 +- .../reference/maskedarray.baseclass.rst | 2 + doc/source/reference/maskedarray.generic.rst | 145 ++--- doc/source/reference/random/generator.rst | 10 +- doc/source/reference/random/index.rst | 52 +- doc/source/reference/routines.char.rst | 9 +- doc/source/reference/routines.polynomials.rst | 16 +- doc/source/reference/routines.rec.rst | 18 +- 14 files changed, 761 insertions(+), 600 deletions(-) diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index 04bced806587..2cfe975ad3b5 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -409,24 +409,27 @@ alias for "matrix "in NumPy. Example 1: Matrix creation from a string ->>> a = np.asmatrix('1 2 3; 4 5 3') ->>> print((a*a.T).I) + >>> import numpy as np + >>> a = np.asmatrix('1 2 3; 4 5 3') + >>> print((a*a.T).I) [[ 0.29239766 -0.13450292] [-0.13450292 0.08187135]] -Example 2: Matrix creation from nested sequence +Example 2: Matrix creation from a nested sequence ->>> np.asmatrix([[1,5,10],[1.0,3,4j]]) -matrix([[ 1.+0.j, 5.+0.j, 10.+0.j], - [ 1.+0.j, 3.+0.j, 0.+4.j]]) + >>> import numpy as np + >>> np.asmatrix([[1,5,10],[1.0,3,4j]]) + matrix([[ 1.+0.j, 5.+0.j, 10.+0.j], + [ 1.+0.j, 3.+0.j, 0.+4.j]]) Example 3: Matrix creation from an array ->>> np.asmatrix(np.random.rand(3,3)).T -matrix([[4.17022005e-01, 3.02332573e-01, 1.86260211e-01], - [7.20324493e-01, 1.46755891e-01, 3.45560727e-01], - [1.14374817e-04, 9.23385948e-02, 3.96767474e-01]]) + >>> import numpy as np + >>> np.asmatrix(np.random.rand(3,3)).T + matrix([[4.17022005e-01, 3.02332573e-01, 1.86260211e-01], + [7.20324493e-01, 1.46755891e-01, 3.45560727e-01], + [1.14374817e-04, 9.23385948e-02, 3.96767474e-01]]) Memory-mapped file arrays @@ -458,16 +461,20 @@ array actually get written to disk. Example: ->>> a = np.memmap('newfile.dat', dtype=float, mode='w+', shape=1000) ->>> a[10] = 10.0 ->>> a[30] = 30.0 ->>> del a ->>> b = np.fromfile('newfile.dat', dtype=float) ->>> print(b[10], b[30]) -10.0 30.0 ->>> a = np.memmap('newfile.dat', dtype=float) ->>> print(a[10], a[30]) -10.0 30.0 + >>> import numpy as np + + >>> a = np.memmap('newfile.dat', dtype=float, mode='w+', shape=1000) + >>> a[10] = 10.0 + >>> a[30] = 30.0 + >>> del a + + >>> b = np.fromfile('newfile.dat', dtype=float) + >>> print(b[10], b[30]) + 10.0 30.0 + + >>> a = np.memmap('newfile.dat', dtype=float) + >>> print(a[10], a[30]) + 10.0 30.0 Character arrays (:mod:`numpy.char`) @@ -602,15 +609,16 @@ This default iterator selects a sub-array of dimension :math:`N-1` from the array. This can be a useful construct for defining recursive algorithms. To loop over the entire array requires :math:`N` for-loops. ->>> a = np.arange(24).reshape(3,2,4)+10 ->>> for val in a: -... print('item:', val) -item: [[10 11 12 13] - [14 15 16 17]] -item: [[18 19 20 21] - [22 23 24 25]] -item: [[26 27 28 29] - [30 31 32 33]] + >>> import numpy as np + >>> a = np.arange(24).reshape(3,2,4) + 10 + >>> for val in a: + ... print('item:', val) + item: [[10 11 12 13] + [14 15 16 17]] + item: [[18 19 20 21] + [22 23 24 25]] + item: [[26 27 28 29] + [30 31 32 33]] Flat iteration @@ -625,13 +633,14 @@ As mentioned previously, the flat attribute of ndarray objects returns an iterator that will cycle over the entire array in C-style contiguous order. ->>> for i, val in enumerate(a.flat): -... if i%5 == 0: print(i, val) -0 10 -5 15 -10 20 -15 25 -20 30 + >>> import numpy as np + >>> for i, val in enumerate(a.flat): + ... if i%5 == 0: print(i, val) + 0 10 + 5 15 + 10 20 + 15 25 + 20 30 Here, I've used the built-in enumerate iterator to return the iterator index as well as the value. @@ -648,12 +657,13 @@ N-dimensional enumeration Sometimes it may be useful to get the N-dimensional index while iterating. The ndenumerate iterator can achieve this. ->>> for i, val in np.ndenumerate(a): -... if sum(i)%5 == 0: print(i, val) -(0, 0, 0) 10 -(1, 1, 3) 25 -(2, 0, 3) 29 -(2, 1, 2) 32 + >>> import numpy as np + >>> for i, val in np.ndenumerate(a): + ... if sum(i)%5 == 0: print(i, val) + (0, 0, 0) 10 + (1, 1, 3) 25 + (2, 0, 3) 29 + (2, 1, 2) 32 Iterator for broadcasting @@ -670,9 +680,10 @@ objects as inputs and returns an iterator that returns tuples providing each of the input sequence elements in the broadcasted result. ->>> for val in np.broadcast([[1, 0], [2, 3]], [0, 1]): -... print(val) -(np.int64(1), np.int64(0)) -(np.int64(0), np.int64(1)) -(np.int64(2), np.int64(0)) -(np.int64(3), np.int64(1)) + >>> import numpy as np + >>> for val in np.broadcast([[1, 0], [2, 3]], [0, 1]): + ... print(val) + (np.int64(1), np.int64(0)) + (np.int64(0), np.int64(1)) + (np.int64(2), np.int64(0)) + (np.int64(3), np.int64(1)) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index ea76425e0160..a63fbdc6a910 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -61,6 +61,8 @@ letters, for a "Not A Time" value. A simple ISO date: + >>> import numpy as np + >>> np.datetime64('2005-02-25') np.datetime64('2005-02-25') @@ -95,6 +97,8 @@ datetime type with generic units. .. admonition:: Example + >>> import numpy as np + >>> np.array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype='datetime64') array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype='datetime64[D]') @@ -107,6 +111,8 @@ POSIX timestamps with the given unit. .. admonition:: Example + >>> import numpy as np + >>> np.array([0, 1577836800], dtype='datetime64[s]') array(['1970-01-01T00:00:00', '2020-01-01T00:00:00'], dtype='datetime64[s]') @@ -122,6 +128,8 @@ example :func:`arange` can be used to generate ranges of dates. All the dates for one month: + >>> import numpy as np + >>> np.arange('2005-02', '2005-03', dtype='datetime64[D]') array(['2005-02-01', '2005-02-02', '2005-02-03', '2005-02-04', '2005-02-05', '2005-02-06', '2005-02-07', '2005-02-08', @@ -140,6 +148,8 @@ because the moment of time is still being represented exactly. .. admonition:: Example + >>> import numpy as np + >>> np.datetime64('2005') == np.datetime64('2005-01-01') True @@ -167,6 +177,8 @@ data type also accepts the string "NAT" in place of the number for a "Not A Time .. admonition:: Example + >>> import numpy as np + >>> np.timedelta64(1, 'D') np.timedelta64(1,'D') @@ -181,6 +193,8 @@ simple datetime calculations. .. admonition:: Example + >>> import numpy as np + >>> np.datetime64('2009-01-01') - np.datetime64('2008-01-01') np.timedelta64(366,'D') @@ -214,6 +228,8 @@ calculating the averaged values from the 400 year leap-year cycle. .. admonition:: Example + >>> import numpy as np + >>> a = np.timedelta64(1, 'Y') >>> np.timedelta64(a, 'M') @@ -293,6 +309,8 @@ specified in business days to datetimes with a unit of 'D' (day). .. admonition:: Example + >>> import numpy as np + >>> np.busday_offset('2011-06-23', 1) np.datetime64('2011-06-24') @@ -307,6 +325,8 @@ The rules most typically used are 'forward' and 'backward'. .. admonition:: Example + >>> import numpy as np + >>> np.busday_offset('2011-06-25', 2) Traceback (most recent call last): File "", line 1, in @@ -331,6 +351,8 @@ is necessary to get a desired answer. The first business day on or after a date: + >>> import numpy as np + >>> np.busday_offset('2011-03-20', 0, roll='forward') np.datetime64('2011-03-21') >>> np.busday_offset('2011-03-22', 0, roll='forward') @@ -350,6 +372,8 @@ weekmask. .. admonition:: Example + >>> import numpy as np + >>> np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun') np.datetime64('2012-05-13') @@ -364,6 +388,8 @@ To test a `datetime64` value to see if it is a valid day, use :func:`is_busday`. .. admonition:: Example + >>> import numpy as np + >>> np.is_busday(np.datetime64('2011-07-15')) # a Friday True >>> np.is_busday(np.datetime64('2011-07-16')) # a Saturday @@ -381,6 +407,8 @@ dates, use :func:`busday_count`: .. admonition:: Example + >>> import numpy as np + >>> np.busday_count(np.datetime64('2011-07-11'), np.datetime64('2011-07-18')) 5 >>> np.busday_count(np.datetime64('2011-07-18'), np.datetime64('2011-07-11')) @@ -391,6 +419,8 @@ how many of them are valid dates, you can do this: .. admonition:: Example + >>> import numpy as np + >>> a = np.arange(np.datetime64('2011-07-11'), np.datetime64('2011-07-18')) >>> np.count_nonzero(np.is_busday(a)) 5 @@ -438,6 +468,8 @@ given below. 23:59:60.450 UTC" is a valid timestamp which is not parseable by `datetime64`: + >>> import numpy as np + >>> np.datetime64("2016-12-31 23:59:60.450") Traceback (most recent call last): File "", line 1, in @@ -451,14 +483,16 @@ given below. Compute the number of SI seconds between "2021-01-01 12:56:23.423 UTC" and "2001-01-01 00:00:00.000 UTC": + >>> import numpy as np + >>> ( ... np.datetime64("2021-01-01 12:56:23.423") ... - np.datetime64("2001-01-01") ... ) / np.timedelta64(1, "s") 631198583.423 - however correct answer is `631198588.423` SI seconds because there were 5 - leap seconds between 2001 and 2021. + However, the correct answer is `631198588.423` SI seconds, because there were + 5 leap seconds between 2001 and 2021. - Timedelta64 computations for dates in the past do not return SI seconds, as one would expect. @@ -469,16 +503,19 @@ given below. where UT is `universal time `_: + + >>> import numpy as np + >>> a = np.datetime64("0000-01-01", "us") >>> b = np.datetime64("1600-01-01", "us") >>> b - a numpy.timedelta64(50491123200000000,'us') - The computed results, `50491123200` seconds, is obtained as the elapsed - number of days (`584388`) times `86400` seconds; this is the number of - seconds of a clock in sync with earth rotation. The exact value in SI - seconds can only be estimated, e.g using data published in `Measurement of - the Earth's rotation: 720 BC to AD 2015, 2016, Royal Society's Proceedings - A 472, by Stephenson et.al. `_. A - sensible estimate is `50491112870 ± 90` seconds, with a difference of 10330 - seconds. + The computed results, `50491123200` seconds, are obtained as the elapsed + number of days (`584388`) times `86400` seconds; this is the number of + seconds of a clock in sync with the Earth's rotation. The exact value in SI + seconds can only be estimated, e.g., using data published in `Measurement of + the Earth's rotation: 720 BC to AD 2015, 2016, Royal Society's Proceedings + A 472, by Stephenson et.al. `_. A + sensible estimate is `50491112870 ± 90` seconds, with a difference of 10330 + seconds. diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst index b2a6f5ab8a2d..eda59690e312 100644 --- a/doc/source/reference/arrays.dtypes.rst +++ b/doc/source/reference/arrays.dtypes.rst @@ -68,15 +68,17 @@ Sub-arrays always have a C-contiguous memory layout. A simple data type containing a 32-bit big-endian integer: (see :ref:`arrays.dtypes.constructing` for details on construction) - >>> dt = np.dtype('>i4') - >>> dt.byteorder - '>' - >>> dt.itemsize - 4 - >>> dt.name - 'int32' - >>> dt.type is np.int32 - True + >>> import numpy as np + + >>> dt = np.dtype('>i4') + >>> dt.byteorder + '>' + >>> dt.itemsize + 4 + >>> dt.name + 'int32' + >>> dt.type is np.int32 + True The corresponding array scalar type is :class:`int32`. @@ -85,24 +87,28 @@ Sub-arrays always have a C-contiguous memory layout. A structured data type containing a 16-character string (in field 'name') and a sub-array of two 64-bit floating-point number (in field 'grades'): - >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) - >>> dt['name'] - dtype('>> dt['grades'] - dtype(('>> import numpy as np + + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> dt['name'] + dtype('>> dt['grades'] + dtype(('` type that also has two fields: - >>> x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) - >>> x[1] - ('John', [6., 7.]) - >>> x[1]['grades'] - array([6., 7.]) - >>> type(x[1]) - - >>> type(x[1]['grades']) - + >>> import numpy as np + + >>> x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) + >>> x[1] + ('John', [6., 7.]) + >>> x[1]['grades'] + array([6., 7.]) + >>> type(x[1]) + + >>> type(x[1]['grades']) + .. _arrays.dtypes.constructing: @@ -148,8 +154,10 @@ Array-scalar types .. admonition:: Example - >>> dt = np.dtype(np.int32) # 32-bit integer - >>> dt = np.dtype(np.complex128) # 128-bit complex floating-point number + >>> import numpy as np + + >>> dt = np.dtype(np.int32) # 32-bit integer + >>> dt = np.dtype(np.complex128) # 128-bit complex floating-point number Generic types The generic hierarchical type objects convert to corresponding @@ -191,9 +199,11 @@ Built-in Python types .. admonition:: Example - >>> dt = np.dtype(float) # Python-compatible floating-point number - >>> dt = np.dtype(int) # Python-compatible integer - >>> dt = np.dtype(object) # Python object + >>> import numpy as np + + >>> dt = np.dtype(float) # Python-compatible floating-point number + >>> dt = np.dtype(int) # Python-compatible integer + >>> dt = np.dtype(object) # Python object .. note:: @@ -219,10 +229,12 @@ One-character strings .. admonition:: Example - >>> dt = np.dtype('b') # byte, native byte order - >>> dt = np.dtype('>H') # big-endian unsigned short - >>> dt = np.dtype('>> dt = np.dtype('d') # double-precision floating-point number + >>> import numpy as np + + >>> dt = np.dtype('b') # byte, native byte order + >>> dt = np.dtype('>H') # big-endian unsigned short + >>> dt = np.dtype('>> dt = np.dtype('d') # double-precision floating-point number Array-protocol type strings (see :ref:`arrays.interface`) The first character specifies the kind of data and the remaining @@ -249,11 +261,13 @@ Array-protocol type strings (see :ref:`arrays.interface`) .. admonition:: Example - >>> dt = np.dtype('i4') # 32-bit signed integer - >>> dt = np.dtype('f8') # 64-bit floating-point number - >>> dt = np.dtype('c16') # 128-bit complex floating-point number - >>> dt = np.dtype('S25') # 25-length zero-terminated bytes - >>> dt = np.dtype('U25') # 25-character string + >>> import numpy as np + + >>> dt = np.dtype('i4') # 32-bit signed integer + >>> dt = np.dtype('f8') # 64-bit floating-point number + >>> dt = np.dtype('c16') # 128-bit complex floating-point number + >>> dt = np.dtype('S25') # 25-length zero-terminated bytes + >>> dt = np.dtype('U25') # 25-character string .. _string-dtype-note: @@ -285,7 +299,8 @@ String with comma-separated fields of 64-bit floating-point numbers - field named ``f2`` containing a 32-bit floating-point number - >>> dt = np.dtype("i4, (2,3)f8, f4") + >>> import numpy as np + >>> dt = np.dtype("i4, (2,3)f8, f4") - field named ``f0`` containing a 3-character string - field named ``f1`` containing a sub-array of shape (3,) @@ -293,15 +308,18 @@ String with comma-separated fields - field named ``f2`` containing a 3 x 4 sub-array containing 10-character strings - >>> dt = np.dtype("S3, 3u8, (3,4)S10") + >>> import numpy as np + >>> dt = np.dtype("S3, 3u8, (3,4)S10") Type strings Any string name of a NumPy dtype, e.g.: .. admonition:: Example - >>> dt = np.dtype('uint32') # 32-bit unsigned integer - >>> dt = np.dtype('float64') # 64-bit floating-point number + >>> import numpy as np + + >>> dt = np.dtype('uint32') # 32-bit unsigned integer + >>> dt = np.dtype('float64') # 64-bit floating-point number .. index:: triple: dtype; construction; from tuple @@ -313,8 +331,10 @@ Type strings .. admonition:: Example - >>> dt = np.dtype((np.void, 10)) # 10-byte wide data block - >>> dt = np.dtype(('U', 10)) # 10-character unicode string + >>> import numpy as np + + >>> dt = np.dtype((np.void, 10)) # 10-byte wide data block + >>> dt = np.dtype(('U', 10)) # 10-character unicode string ``(fixed_dtype, shape)`` .. index:: @@ -330,8 +350,10 @@ Type strings .. admonition:: Example - >>> dt = np.dtype((np.int32, (2,2))) # 2 x 2 integer sub-array - >>> dt = np.dtype(('i4, (2,3)f8, f4', (2,3))) # 2 x 3 structured sub-array + >>> import numpy as np + + >>> dt = np.dtype((np.int32, (2,2))) # 2 x 2 integer sub-array + >>> dt = np.dtype(('i4, (2,3)f8, f4', (2,3))) # 2 x 3 structured sub-array .. index:: triple: dtype; construction; from list @@ -362,15 +384,17 @@ Type strings .. admonition:: Example - Data-type with fields ``big`` (big-endian 32-bit integer) and - ``little`` (little-endian 32-bit integer): + >>> import numpy as np - >>> dt = np.dtype([('big', '>i4'), ('little', '>> dt = np.dtype([('big', '>i4'), ('little', '>> dt = np.dtype([('R','u1'), ('G','u1'), ('B','u1'), ('A','u1')]) + Data-type with fields ``R``, ``G``, ``B``, ``A``, each being an + unsigned 8-bit integer: + + >>> dt = np.dtype([('R','u1'), ('G','u1'), ('B','u1'), ('A','u1')]) .. index:: triple: dtype; construction; from dict @@ -401,19 +425,21 @@ Type strings .. admonition:: Example - Data type with fields ``r``, ``g``, ``b``, ``a``, each being - an 8-bit unsigned integer: + >>> import numpy as np + + Data type with fields ``r``, ``g``, ``b``, ``a``, each being + an 8-bit unsigned integer: - >>> dt = np.dtype({'names': ['r','g','b','a'], - ... 'formats': [np.uint8, np.uint8, np.uint8, np.uint8]}) + >>> dt = np.dtype({'names': ['r','g','b','a'], + ... 'formats': [np.uint8, np.uint8, np.uint8, np.uint8]}) - Data type with fields ``r`` and ``b`` (with the given titles), - both being 8-bit unsigned integers, the first at byte position - 0 from the start of the field and the second at position 2: + Data type with fields ``r`` and ``b`` (with the given titles), + both being 8-bit unsigned integers, the first at byte position + 0 from the start of the field and the second at position 2: - >>> dt = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'], - ... 'offsets': [0, 2], - ... 'titles': ['Red pixel', 'Blue pixel']}) + >>> dt = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'], + ... 'offsets': [0, 2], + ... 'titles': ['Red pixel', 'Blue pixel']}) ``{'field1': ..., 'field2': ..., ...}`` @@ -430,12 +456,14 @@ Type strings .. admonition:: Example - Data type containing field ``col1`` (10-character string at - byte position 0), ``col2`` (32-bit float at byte position 10), - and ``col3`` (integers at byte position 14): + >>> import numpy as np - >>> dt = np.dtype({'col1': ('U10', 0), 'col2': (np.float32, 10), - ... 'col3': (int, 14)}) + Data type containing field ``col1`` (10-character string at + byte position 0), ``col2`` (32-bit float at byte position 10), + and ``col3`` (integers at byte position 14): + + >>> dt = np.dtype({'col1': ('U10', 0), 'col2': (np.float32, 10), + ... 'col3': (int, 14)}) ``(base_dtype, new_dtype)`` In NumPy 1.7 and later, this form allows `base_dtype` to be interpreted as @@ -453,20 +481,22 @@ Type strings .. admonition:: Example - 32-bit integer, whose first two bytes are interpreted as an integer - via field ``real``, and the following two bytes via field ``imag``. + >>> import numpy as np + + 32-bit integer, whose first two bytes are interpreted as an integer + via field ``real``, and the following two bytes via field ``imag``. - >>> dt = np.dtype((np.int32,{'real':(np.int16, 0),'imag':(np.int16, 2)})) + >>> dt = np.dtype((np.int32,{'real':(np.int16, 0),'imag':(np.int16, 2)})) - 32-bit integer, which is interpreted as consisting of a sub-array - of shape ``(4,)`` containing 8-bit integers: + 32-bit integer, which is interpreted as consisting of a sub-array + of shape ``(4,)`` containing 8-bit integers: - >>> dt = np.dtype((np.int32, (np.int8, 4))) + >>> dt = np.dtype((np.int32, (np.int8, 4))) - 32-bit integer, containing fields ``r``, ``g``, ``b``, ``a`` that - interpret the 4 bytes in the integer as four unsigned integers: + 32-bit integer, containing fields ``r``, ``g``, ``b``, ``a`` that + interpret the 4 bytes in the integer as four unsigned integers: - >>> dt = np.dtype(('i4', [('r','u1'),('g','u1'),('b','u1'),('a','u1')])) + >>> dt = np.dtype(('i4', [('r','u1'),('g','u1'),('b','u1'),('a','u1')])) Checking the data type @@ -475,11 +505,13 @@ When checking for a specific data type, use ``==`` comparison. .. admonition:: Example - >>> a = np.array([1, 2], dtype=np.float32) - >>> a.dtype == np.float32 - True + >>> import numpy as np -As opposed to python types, a comparison using ``is`` should not be used. + >>> a = np.array([1, 2], dtype=np.float32) + >>> a.dtype == np.float32 + True + +As opposed to Python types, a comparison using ``is`` should not be used. First, NumPy treats data type specifications (everything that can be passed to the :class:`dtype` constructor) as equivalent to the data type object itself. @@ -487,31 +519,35 @@ This equivalence can only be handled through ``==``, not through ``is``. .. admonition:: Example - A :class:`dtype` object is equal to all data type specifications that are - equivalent to it. - - >>> a = np.array([1, 2], dtype=float) - >>> a.dtype == np.dtype(np.float64) - True - >>> a.dtype == np.float64 - True - >>> a.dtype == float - True - >>> a.dtype == "float64" - True - >>> a.dtype == "d" - True + >>> import numpy as np + + A :class:`dtype` object is equal to all data type specifications that are + equivalent to it. + + >>> a = np.array([1, 2], dtype=float) + >>> a.dtype == np.dtype(np.float64) + True + >>> a.dtype == np.float64 + True + >>> a.dtype == float + True + >>> a.dtype == "float64" + True + >>> a.dtype == "d" + True Second, there is no guarantee that data type objects are singletons. .. admonition:: Example - Do not use ``is`` because data type objects may or may not be singletons. + >>> import numpy as np + + Do not use ``is`` because data type objects may or may not be singletons. - >>> np.dtype(float) is np.dtype(float) - True - >>> np.dtype([('a', float)]) is np.dtype([('a', float)]) - False + >>> np.dtype(float) is np.dtype(float) + True + >>> np.dtype([('a', float)]) is np.dtype([('a', float)]) + False :class:`dtype` ============== diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst index 5429a272569d..a403c9528800 100644 --- a/doc/source/reference/arrays.ndarray.rst +++ b/doc/source/reference/arrays.ndarray.rst @@ -32,35 +32,37 @@ objects implementing the :class:`memoryview` or :ref:`array .. admonition:: Example - A 2-dimensional array of size 2 x 3, composed of 4-byte integer - elements: + >>> import numpy as np - >>> x = np.array([[1, 2, 3], [4, 5, 6]], np.int32) - >>> type(x) - - >>> x.shape - (2, 3) - >>> x.dtype - dtype('int32') + A 2-dimensional array of size 2 x 3, composed of 4-byte integer + elements: - The array can be indexed using Python container-like syntax: + >>> x = np.array([[1, 2, 3], [4, 5, 6]], np.int32) + >>> type(x) + + >>> x.shape + (2, 3) + >>> x.dtype + dtype('int32') - >>> # The element of x in the *second* row, *third* column, namely, 6. - >>> x[1, 2] - 6 + The array can be indexed using Python container-like syntax: - For example :ref:`slicing ` can produce views of - the array: + >>> # The element of x in the *second* row, *third* column, namely, 6. + >>> x[1, 2] + 6 - >>> y = x[:,1] - >>> y - array([2, 5], dtype=int32) - >>> y[0] = 9 # this also changes the corresponding element in x - >>> y - array([9, 5], dtype=int32) - >>> x - array([[1, 9, 3], - [4, 5, 6]], dtype=int32) + For example :ref:`slicing ` can produce views of + the array: + + >>> y = x[:,1] + >>> y + array([2, 5], dtype=int32) + >>> y[0] = 9 # this also changes the corresponding element in x + >>> y + array([9, 5], dtype=int32) + >>> x + array([[1, 9, 3], + [4, 5, 6]], dtype=int32) Constructing arrays @@ -360,36 +362,38 @@ Many of these methods take an argument named *axis*. In such cases, .. admonition:: Example of the *axis* argument - A 3-dimensional array of size 3 x 3 x 3, summed over each of its - three axes - - >>> x = np.arange(27).reshape((3,3,3)) - >>> x - array([[[ 0, 1, 2], - [ 3, 4, 5], - [ 6, 7, 8]], - [[ 9, 10, 11], - [12, 13, 14], - [15, 16, 17]], - [[18, 19, 20], - [21, 22, 23], - [24, 25, 26]]]) - >>> x.sum(axis=0) - array([[27, 30, 33], - [36, 39, 42], - [45, 48, 51]]) - >>> # for sum, axis is the first keyword, so we may omit it, - >>> # specifying only its value - >>> x.sum(0), x.sum(1), x.sum(2) - (array([[27, 30, 33], - [36, 39, 42], - [45, 48, 51]]), - array([[ 9, 12, 15], - [36, 39, 42], - [63, 66, 69]]), - array([[ 3, 12, 21], - [30, 39, 48], - [57, 66, 75]])) + >>> import numpy as np + + A 3-dimensional array of size 3 x 3 x 3, summed over each of its + three axes + + >>> x = np.arange(27).reshape((3,3,3)) + >>> x + array([[[ 0, 1, 2], + [ 3, 4, 5], + [ 6, 7, 8]], + [[ 9, 10, 11], + [12, 13, 14], + [15, 16, 17]], + [[18, 19, 20], + [21, 22, 23], + [24, 25, 26]]]) + >>> x.sum(axis=0) + array([[27, 30, 33], + [36, 39, 42], + [45, 48, 51]]) + >>> # for sum, axis is the first keyword, so we may omit it, + >>> # specifying only its value + >>> x.sum(0), x.sum(1), x.sum(2) + (array([[27, 30, 33], + [36, 39, 42], + [45, 48, 51]]), + array([[ 9, 12, 15], + [36, 39, 42], + [63, 66, 69]]), + array([[ 3, 12, 21], + [30, 39, 48], + [57, 66, 75]])) The parameter *dtype* specifies the data type over which a reduction operation (like summing) should take place. The default reduce data diff --git a/doc/source/reference/arrays.nditer.rst b/doc/source/reference/arrays.nditer.rst index d5d19d244e94..3c71a69e0fcd 100644 --- a/doc/source/reference/arrays.nditer.rst +++ b/doc/source/reference/arrays.nditer.rst @@ -32,11 +32,13 @@ using the standard Python iterator interface. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> for x in np.nditer(a): - ... print(x, end=' ') - ... - 0 1 2 3 4 5 + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) + >>> for x in np.nditer(a): + ... print(x, end=' ') + ... + 0 1 2 3 4 5 An important thing to be aware of for this iteration is that the order is chosen to match the memory layout of the array instead of using a @@ -48,16 +50,18 @@ of that transpose in C order. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> for x in np.nditer(a.T): - ... print(x, end=' ') - ... - 0 1 2 3 4 5 + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) + >>> for x in np.nditer(a.T): + ... print(x, end=' ') + ... + 0 1 2 3 4 5 - >>> for x in np.nditer(a.T.copy(order='C')): - ... print(x, end=' ') - ... - 0 3 1 4 2 5 + >>> for x in np.nditer(a.T.copy(order='C')): + ... print(x, end=' ') + ... + 0 3 1 4 2 5 The elements of both `a` and `a.T` get traversed in the same order, namely the order they are stored in memory, whereas the elements of @@ -76,15 +80,17 @@ order='C' for C order and order='F' for Fortran order. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> for x in np.nditer(a, order='F'): - ... print(x, end=' ') - ... - 0 3 1 4 2 5 - >>> for x in np.nditer(a.T, order='C'): - ... print(x, end=' ') - ... - 0 3 1 4 2 5 + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) + >>> for x in np.nditer(a, order='F'): + ... print(x, end=' ') + ... + 0 3 1 4 2 5 + >>> for x in np.nditer(a.T, order='C'): + ... print(x, end=' ') + ... + 0 3 1 4 2 5 .. _nditer-context-manager: @@ -111,17 +117,19 @@ context is exited. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> a - array([[0, 1, 2], - [3, 4, 5]]) - >>> with np.nditer(a, op_flags=['readwrite']) as it: - ... for x in it: - ... x[...] = 2 * x - ... - >>> a - array([[ 0, 2, 4], - [ 6, 8, 10]]) + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) + >>> a + array([[0, 1, 2], + [3, 4, 5]]) + >>> with np.nditer(a, op_flags=['readwrite']) as it: + ... for x in it: + ... x[...] = 2 * x + ... + >>> a + array([[ 0, 2, 4], + [ 6, 8, 10]]) If you are writing code that needs to support older versions of numpy, note that prior to 1.15, :class:`nditer` was not a context manager and @@ -150,16 +158,18 @@ elements each. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> for x in np.nditer(a, flags=['external_loop']): - ... print(x, end=' ') - ... - [0 1 2 3 4 5] + >>> import numpy as np - >>> for x in np.nditer(a, flags=['external_loop'], order='F'): - ... print(x, end=' ') - ... - [0 3] [1 4] [2 5] + >>> a = np.arange(6).reshape(2,3) + >>> for x in np.nditer(a, flags=['external_loop']): + ... print(x, end=' ') + ... + [0 1 2 3 4 5] + + >>> for x in np.nditer(a, flags=['external_loop'], order='F'): + ... print(x, end=' ') + ... + [0 3] [1 4] [2 5] Tracking an index or multi-index -------------------------------- @@ -176,26 +186,28 @@ progression of the index: .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> it = np.nditer(a, flags=['f_index']) - >>> for x in it: - ... print("%d <%d>" % (x, it.index), end=' ') - ... - 0 <0> 1 <2> 2 <4> 3 <1> 4 <3> 5 <5> - - >>> it = np.nditer(a, flags=['multi_index']) - >>> for x in it: - ... print("%d <%s>" % (x, it.multi_index), end=' ') - ... - 0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)> - - >>> with np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) as it: - ... for x in it: - ... x[...] = it.multi_index[1] - it.multi_index[0] - ... - >>> a - array([[ 0, 1, 2], - [-1, 0, 1]]) + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) + >>> it = np.nditer(a, flags=['f_index']) + >>> for x in it: + ... print("%d <%d>" % (x, it.index), end=' ') + ... + 0 <0> 1 <2> 2 <4> 3 <1> 4 <3> 5 <5> + + >>> it = np.nditer(a, flags=['multi_index']) + >>> for x in it: + ... print("%d <%s>" % (x, it.multi_index), end=' ') + ... + 0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)> + + >>> with np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) as it: + ... for x in it: + ... x[...] = it.multi_index[1] - it.multi_index[0] + ... + >>> a + array([[ 0, 1, 2], + [-1, 0, 1]]) Tracking an index or multi-index is incompatible with using an external loop, because it requires a different index value per element. If @@ -204,11 +216,13 @@ raise an exception. .. admonition:: Example - >>> a = np.zeros((2,3)) - >>> it = np.nditer(a, flags=['c_index', 'external_loop']) - Traceback (most recent call last): - File "", line 1, in - ValueError: Iterator flag EXTERNAL_LOOP cannot be used if an index or multi-index is being tracked + >>> import numpy as np + + >>> a = np.zeros((2,3)) + >>> it = np.nditer(a, flags=['c_index', 'external_loop']) + Traceback (most recent call last): + File "", line 1, in + ValueError: Iterator flag EXTERNAL_LOOP cannot be used if an index or multi-index is being tracked Alternative looping and element access -------------------------------------- @@ -222,29 +236,31 @@ produce identical results to the ones in the previous section. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> it = np.nditer(a, flags=['f_index']) - >>> while not it.finished: - ... print("%d <%d>" % (it[0], it.index), end=' ') - ... is_not_finished = it.iternext() - ... - 0 <0> 1 <2> 2 <4> 3 <1> 4 <3> 5 <5> - - >>> it = np.nditer(a, flags=['multi_index']) - >>> while not it.finished: - ... print("%d <%s>" % (it[0], it.multi_index), end=' ') - ... is_not_finished = it.iternext() - ... - 0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)> - - >>> with np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) as it: - ... while not it.finished: - ... it[0] = it.multi_index[1] - it.multi_index[0] - ... is_not_finished = it.iternext() - ... - >>> a - array([[ 0, 1, 2], - [-1, 0, 1]]) + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) + >>> it = np.nditer(a, flags=['f_index']) + >>> while not it.finished: + ... print("%d <%d>" % (it[0], it.index), end=' ') + ... is_not_finished = it.iternext() + ... + 0 <0> 1 <2> 2 <4> 3 <1> 4 <3> 5 <5> + + >>> it = np.nditer(a, flags=['multi_index']) + >>> while not it.finished: + ... print("%d <%s>" % (it[0], it.multi_index), end=' ') + ... is_not_finished = it.iternext() + ... + 0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)> + + >>> with np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) as it: + ... while not it.finished: + ... it[0] = it.multi_index[1] - it.multi_index[0] + ... is_not_finished = it.iternext() + ... + >>> a + array([[ 0, 1, 2], + [-1, 0, 1]]) Buffering the array elements ---------------------------- @@ -263,16 +279,18 @@ is enabled. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> for x in np.nditer(a, flags=['external_loop'], order='F'): - ... print(x, end=' ') - ... - [0 3] [1 4] [2 5] + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) + >>> for x in np.nditer(a, flags=['external_loop'], order='F'): + ... print(x, end=' ') + ... + [0 3] [1 4] [2 5] - >>> for x in np.nditer(a, flags=['external_loop','buffered'], order='F'): - ... print(x, end=' ') - ... - [0 3 1 4 2 5] + >>> for x in np.nditer(a, flags=['external_loop','buffered'], order='F'): + ... print(x, end=' ') + ... + [0 3 1 4 2 5] Iterating as a specific data type --------------------------------- @@ -305,13 +323,15 @@ data type doesn't match precisely. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - 3 - >>> for x in np.nditer(a, op_dtypes=['complex128']): - ... print(np.sqrt(x), end=' ') - ... - Traceback (most recent call last): - File "", line 1, in - TypeError: Iterator operand required copying or buffering, but neither copying nor buffering was enabled + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) - 3 + >>> for x in np.nditer(a, op_dtypes=['complex128']): + ... print(np.sqrt(x), end=' ') + ... + Traceback (most recent call last): + File "", line 1, in + TypeError: Iterator operand required copying or buffering, but neither copying nor buffering was enabled In copying mode, 'copy' is specified as a per-operand flag. This is done to provide control in a per-operand fashion. Buffering mode is @@ -319,17 +339,19 @@ specified as an iterator flag. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - 3 - >>> for x in np.nditer(a, op_flags=['readonly','copy'], - ... op_dtypes=['complex128']): - ... print(np.sqrt(x), end=' ') - ... - 1.7320508075688772j 1.4142135623730951j 1j 0j (1+0j) (1.4142135623730951+0j) + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) - 3 + >>> for x in np.nditer(a, op_flags=['readonly','copy'], + ... op_dtypes=['complex128']): + ... print(np.sqrt(x), end=' ') + ... + 1.7320508075688772j 1.4142135623730951j 1j 0j (1+0j) (1.4142135623730951+0j) - >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['complex128']): - ... print(np.sqrt(x), end=' ') - ... - 1.7320508075688772j 1.4142135623730951j 1j 0j (1+0j) (1.4142135623730951+0j) + >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['complex128']): + ... print(np.sqrt(x), end=' ') + ... + 1.7320508075688772j 1.4142135623730951j 1j 0j (1+0j) (1.4142135623730951+0j) The iterator uses NumPy's casting rules to determine whether a specific @@ -342,26 +364,28 @@ complex to float. .. admonition:: Example - >>> a = np.arange(6.) - >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['float32']): - ... print(x, end=' ') - ... - Traceback (most recent call last): - File "", line 1, in - TypeError: Iterator operand 0 dtype could not be cast from dtype('float64') to dtype('float32') according to the rule 'safe' - - >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['float32'], - ... casting='same_kind'): - ... print(x, end=' ') - ... - 0.0 1.0 2.0 3.0 4.0 5.0 - - >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['int32'], casting='same_kind'): - ... print(x, end=' ') - ... - Traceback (most recent call last): - File "", line 1, in - TypeError: Iterator operand 0 dtype could not be cast from dtype('float64') to dtype('int32') according to the rule 'same_kind' + >>> import numpy as np + + >>> a = np.arange(6.) + >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['float32']): + ... print(x, end=' ') + ... + Traceback (most recent call last): + File "", line 1, in + TypeError: Iterator operand 0 dtype could not be cast from dtype('float64') to dtype('float32') according to the rule 'safe' + + >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['float32'], + ... casting='same_kind'): + ... print(x, end=' ') + ... + 0.0 1.0 2.0 3.0 4.0 5.0 + + >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['int32'], casting='same_kind'): + ... print(x, end=' ') + ... + Traceback (most recent call last): + File "", line 1, in + TypeError: Iterator operand 0 dtype could not be cast from dtype('float64') to dtype('int32') according to the rule 'same_kind' One thing to watch out for is conversions back to the original data type when using a read-write or write-only operand. A common case is @@ -373,14 +397,16 @@ would violate the casting rule. .. admonition:: Example - >>> a = np.arange(6) - >>> for x in np.nditer(a, flags=['buffered'], op_flags=['readwrite'], - ... op_dtypes=['float64'], casting='same_kind'): - ... x[...] = x / 2.0 - ... - Traceback (most recent call last): - File "", line 2, in - TypeError: Iterator requested dtype could not be cast from dtype('float64') to dtype('int64'), the operand 0 dtype, according to the rule 'same_kind' + >>> import numpy as np + + >>> a = np.arange(6) + >>> for x in np.nditer(a, flags=['buffered'], op_flags=['readwrite'], + ... op_dtypes=['float64'], casting='same_kind'): + ... x[...] = x / 2.0 + ... + Traceback (most recent call last): + File "", line 2, in + TypeError: Iterator requested dtype could not be cast from dtype('float64') to dtype('int64'), the operand 0 dtype, according to the rule 'same_kind' Broadcasting array iteration ============================ @@ -396,26 +422,30 @@ a two dimensional array together. .. admonition:: Example - >>> a = np.arange(3) - >>> b = np.arange(6).reshape(2,3) - >>> for x, y in np.nditer([a,b]): - ... print("%d:%d" % (x,y), end=' ') - ... - 0:0 1:1 2:2 0:3 1:4 2:5 + >>> import numpy as np + + >>> a = np.arange(3) + >>> b = np.arange(6).reshape(2,3) + >>> for x, y in np.nditer([a,b]): + ... print("%d:%d" % (x,y), end=' ') + ... + 0:0 1:1 2:2 0:3 1:4 2:5 When a broadcasting error occurs, the iterator raises an exception which includes the input shapes to help diagnose the problem. .. admonition:: Example - >>> a = np.arange(2) - >>> b = np.arange(6).reshape(2,3) - >>> for x, y in np.nditer([a,b]): - ... print("%d:%d" % (x,y), end=' ') - ... - Traceback (most recent call last): - ... - ValueError: operands could not be broadcast together with shapes (2,) (2,3) + >>> import numpy as np + + >>> a = np.arange(2) + >>> b = np.arange(6).reshape(2,3) + >>> for x, y in np.nditer([a,b]): + ... print("%d:%d" % (x,y), end=' ') + ... + Traceback (most recent call last): + ... + ValueError: operands could not be broadcast together with shapes (2,) (2,3) Iterator-allocated output arrays -------------------------------- @@ -432,14 +462,16 @@ parameter support. .. admonition:: Example - >>> def square(a): - ... with np.nditer([a, None]) as it: - ... for x, y in it: - ... y[...] = x*x - ... return it.operands[1] - ... - >>> square([1,2,3]) - array([1, 4, 9]) + >>> import numpy as np + + >>> def square(a): + ... with np.nditer([a, None]) as it: + ... for x, y in it: + ... y[...] = x*x + ... return it.operands[1] + ... + >>> square([1,2,3]) + array([1, 4, 9]) By default, the :class:`nditer` uses the flags 'allocate' and 'writeonly' for operands that are passed in as None. This means we were able to provide @@ -469,31 +501,33 @@ reasons. .. admonition:: Example - >>> def square(a, out=None): - ... it = np.nditer([a, out], - ... flags = ['external_loop', 'buffered'], - ... op_flags = [['readonly'], - ... ['writeonly', 'allocate', 'no_broadcast']]) - ... with it: - ... for x, y in it: - ... y[...] = x*x - ... return it.operands[1] - ... - - >>> square([1,2,3]) - array([1, 4, 9]) - - >>> b = np.zeros((3,)) - >>> square([1,2,3], out=b) - array([1., 4., 9.]) - >>> b - array([1., 4., 9.]) - - >>> square(np.arange(6).reshape(2,3), out=b) - Traceback (most recent call last): - ... - ValueError: non-broadcastable output operand with shape (3,) doesn't - match the broadcast shape (2,3) + >>> import numpy as np + + >>> def square(a, out=None): + ... it = np.nditer([a, out], + ... flags = ['external_loop', 'buffered'], + ... op_flags = [['readonly'], + ... ['writeonly', 'allocate', 'no_broadcast']]) + ... with it: + ... for x, y in it: + ... y[...] = x*x + ... return it.operands[1] + ... + + >>> square([1,2,3]) + array([1, 4, 9]) + + >>> b = np.zeros((3,)) + >>> square([1,2,3], out=b) + array([1., 4., 9.]) + >>> b + array([1., 4., 9.]) + + >>> square(np.arange(6).reshape(2,3), out=b) + Traceback (most recent call last): + ... + ValueError: non-broadcastable output operand with shape (3,) doesn't + match the broadcast shape (2,3) Outer product iteration ----------------------- @@ -525,22 +559,24 @@ Everything to do with the outer product is handled by the iterator setup. .. admonition:: Example - >>> a = np.arange(3) - >>> b = np.arange(8).reshape(2,4) - >>> it = np.nditer([a, b, None], flags=['external_loop'], - ... op_axes=[[0, -1, -1], [-1, 0, 1], None]) - >>> with it: - ... for x, y, z in it: - ... z[...] = x*y - ... result = it.operands[2] # same as z - ... - >>> result - array([[[ 0, 0, 0, 0], - [ 0, 0, 0, 0]], - [[ 0, 1, 2, 3], - [ 4, 5, 6, 7]], - [[ 0, 2, 4, 6], - [ 8, 10, 12, 14]]]) + >>> import numpy as np + + >>> a = np.arange(3) + >>> b = np.arange(8).reshape(2,4) + >>> it = np.nditer([a, b, None], flags=['external_loop'], + ... op_axes=[[0, -1, -1], [-1, 0, 1], None]) + >>> with it: + ... for x, y, z in it: + ... z[...] = x*y + ... result = it.operands[2] # same as z + ... + >>> result + array([[[ 0, 0, 0, 0], + [ 0, 0, 0, 0]], + [[ 0, 1, 2, 3], + [ 4, 5, 6, 7]], + [[ 0, 2, 4, 6], + [ 8, 10, 12, 14]]]) Note that once the iterator is closed we can not access :func:`operands ` and must use a reference created inside the context manager. @@ -557,17 +593,19 @@ For a simple example, consider taking the sum of all elements in an array. .. admonition:: Example - >>> a = np.arange(24).reshape(2,3,4) - >>> b = np.array(0) - >>> with np.nditer([a, b], flags=['reduce_ok'], - ... op_flags=[['readonly'], ['readwrite']]) as it: - ... for x,y in it: - ... y[...] += x - ... - >>> b - array(276) - >>> np.sum(a) - 276 + >>> import numpy as np + + >>> a = np.arange(24).reshape(2,3,4) + >>> b = np.array(0) + >>> with np.nditer([a, b], flags=['reduce_ok'], + ... op_flags=[['readonly'], ['readwrite']]) as it: + ... for x,y in it: + ... y[...] += x + ... + >>> b + array(276) + >>> np.sum(a) + 276 Things are a little bit more tricky when combining reduction and allocated operands. Before iteration is started, any reduction operand must be @@ -576,22 +614,24 @@ sums along the last axis of `a`. .. admonition:: Example - >>> a = np.arange(24).reshape(2,3,4) - >>> it = np.nditer([a, None], flags=['reduce_ok'], - ... op_flags=[['readonly'], ['readwrite', 'allocate']], - ... op_axes=[None, [0,1,-1]]) - >>> with it: - ... it.operands[1][...] = 0 - ... for x, y in it: - ... y[...] += x - ... result = it.operands[1] - ... - >>> result - array([[ 6, 22, 38], - [54, 70, 86]]) - >>> np.sum(a, axis=2) - array([[ 6, 22, 38], - [54, 70, 86]]) + >>> import numpy as np + + >>> a = np.arange(24).reshape(2,3,4) + >>> it = np.nditer([a, None], flags=['reduce_ok'], + ... op_flags=[['readonly'], ['readwrite', 'allocate']], + ... op_axes=[None, [0,1,-1]]) + >>> with it: + ... it.operands[1][...] = 0 + ... for x, y in it: + ... y[...] += x + ... result = it.operands[1] + ... + >>> result + array([[ 6, 22, 38], + [54, 70, 86]]) + >>> np.sum(a, axis=2) + array([[ 6, 22, 38], + [54, 70, 86]]) To do buffered reduction requires yet another adjustment during the setup. Normally the iterator construction involves copying the first @@ -610,21 +650,23 @@ buffering. .. admonition:: Example - >>> a = np.arange(24).reshape(2,3,4) - >>> it = np.nditer([a, None], flags=['reduce_ok', - ... 'buffered', 'delay_bufalloc'], - ... op_flags=[['readonly'], ['readwrite', 'allocate']], - ... op_axes=[None, [0,1,-1]]) - >>> with it: - ... it.operands[1][...] = 0 - ... it.reset() - ... for x, y in it: - ... y[...] += x - ... result = it.operands[1] - ... - >>> result - array([[ 6, 22, 38], - [54, 70, 86]]) + >>> import numpy as np + + >>> a = np.arange(24).reshape(2,3,4) + >>> it = np.nditer([a, None], flags=['reduce_ok', + ... 'buffered', 'delay_bufalloc'], + ... op_flags=[['readonly'], ['readwrite', 'allocate']], + ... op_axes=[None, [0,1,-1]]) + >>> with it: + ... it.operands[1][...] = 0 + ... it.reset() + ... for x, y in it: + ... y[...] += x + ... result = it.operands[1] + ... + >>> result + array([[ 6, 22, 38], + [54, 70, 86]]) .. for doctests Include Cython section separately. Those tests are skipped entirely via an diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst index 11b3bdc16c6c..e8588ef92b4f 100644 --- a/doc/source/reference/arrays.scalars.rst +++ b/doc/source/reference/arrays.scalars.rst @@ -191,29 +191,31 @@ Inexact types This means that variables with equal binary values but whose datatypes are of different precisions may display differently:: - >>> f16 = np.float16("0.1") - >>> f32 = np.float32(f16) - >>> f64 = np.float64(f32) - >>> f16 == f32 == f64 - True - >>> f16, f32, f64 - (0.1, 0.099975586, 0.0999755859375) - - Note that none of these floats hold the exact value :math:`\frac{1}{10}`; - ``f16`` prints as ``0.1`` because it is as close to that value as possible, - whereas the other types do not as they have more precision and therefore have - closer values. - - Conversely, floating-point scalars of different precisions which approximate - the same decimal value may compare unequal despite printing identically: - - >>> f16 = np.float16("0.1") - >>> f32 = np.float32("0.1") - >>> f64 = np.float64("0.1") - >>> f16 == f32 == f64 - False - >>> f16, f32, f64 - (0.1, 0.1, 0.1) + >>> import numpy as np + + >>> f16 = np.float16("0.1") + >>> f32 = np.float32(f16) + >>> f64 = np.float64(f32) + >>> f16 == f32 == f64 + True + >>> f16, f32, f64 + (0.1, 0.099975586, 0.0999755859375) + + Note that none of these floats hold the exact value :math:`\frac{1}{10}`; + ``f16`` prints as ``0.1`` because it is as close to that value as possible, + whereas the other types do not as they have more precision and therefore have + closer values. + + Conversely, floating-point scalars of different precisions which approximate + the same decimal value may compare unequal despite printing identically: + + >>> f16 = np.float16("0.1") + >>> f32 = np.float32("0.1") + >>> f64 = np.float64("0.1") + >>> f16 == f32 == f64 + False + >>> f16, f32, f64 + (0.1, 0.1, 0.1) Floating-point types ~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/reference/constants.rst b/doc/source/reference/constants.rst index b09cd9bbb21d..71ce0051bf13 100644 --- a/doc/source/reference/constants.rst +++ b/doc/source/reference/constants.rst @@ -62,6 +62,7 @@ NumPy includes several constants: .. rubric:: Examples + >>> import numpy as np >>> np.inf inf >>> np.array([1]) / 0. @@ -90,6 +91,7 @@ NumPy includes several constants: .. rubric:: Examples + >>> import numpy as np >>> np.nan nan >>> np.log(-1) @@ -104,6 +106,7 @@ NumPy includes several constants: .. rubric:: Examples + >>> import numpy as np >>> np.newaxis is None True >>> x = np.arange(3) @@ -119,16 +122,16 @@ NumPy includes several constants: [[2]]]) >>> x[:, np.newaxis] * x array([[0, 0, 0], - [0, 1, 2], - [0, 2, 4]]) + [0, 1, 2], + [0, 2, 4]]) Outer product, same as ``outer(x, y)``: >>> y = np.arange(3, 6) >>> x[:, np.newaxis] * y array([[ 0, 0, 0], - [ 3, 4, 5], - [ 6, 8, 10]]) + [ 3, 4, 5], + [ 6, 8, 10]]) ``x[np.newaxis, :]`` is equivalent to ``x[np.newaxis]`` and ``x[None]``: diff --git a/doc/source/reference/maskedarray.baseclass.rst b/doc/source/reference/maskedarray.baseclass.rst index 7121914b93e2..01ac67f42704 100644 --- a/doc/source/reference/maskedarray.baseclass.rst +++ b/doc/source/reference/maskedarray.baseclass.rst @@ -18,6 +18,8 @@ defines several constants. specific entry of a masked array is masked, or to mask one or several entries of a masked array:: + >>> import numpy as np + >>> x = ma.array([1, 2, 3], mask=[0, 1, 0]) >>> x[1] is ma.masked True diff --git a/doc/source/reference/maskedarray.generic.rst b/doc/source/reference/maskedarray.generic.rst index 161ce14b76d2..3324269ee7aa 100644 --- a/doc/source/reference/maskedarray.generic.rst +++ b/doc/source/reference/maskedarray.generic.rst @@ -35,19 +35,19 @@ masked (invalid). The package ensures that masked entries are not used in computations. -As an illustration, let's consider the following dataset:: + As an illustration, let's consider the following dataset: >>> import numpy as np >>> import numpy.ma as ma >>> x = np.array([1, 2, 3, -1, 5]) -We wish to mark the fourth entry as invalid. The easiest is to create a masked -array:: + We wish to mark the fourth entry as invalid. The easiest is to create a masked + array:: >>> mx = ma.masked_array(x, mask=[0, 0, 0, 1, 0]) -We can now compute the mean of the dataset, without taking the invalid data -into account:: + We can now compute the mean of the dataset, without taking the invalid data + into account: >>> mx.mean() 2.75 @@ -62,17 +62,17 @@ class, which is a subclass of :class:`numpy.ndarray`. The class, its attributes and methods are described in more details in the :ref:`MaskedArray class ` section. -The :mod:`numpy.ma` module can be used as an addition to :mod:`numpy`: :: +The :mod:`numpy.ma` module can be used as an addition to :mod:`numpy`: >>> import numpy as np >>> import numpy.ma as ma -To create an array with the second element invalid, we would do:: + To create an array with the second element invalid, we would do:: >>> y = ma.array([1, 2, 3], mask = [0, 1, 0]) -To create a masked array where all values close to 1.e20 are invalid, we would -do:: + To create a masked array where all values close to 1.e20 are invalid, we would + do: >>> z = ma.masked_values([1.0, 1.e20, 3.0, 4.0], 1.e20) @@ -108,17 +108,18 @@ There are several ways to construct a masked array. mask of the view is set to :attr:`nomask` if the array has no named fields, or an array of boolean with the same structure as the array otherwise. - >>> x = np.array([1, 2, 3]) - >>> x.view(ma.MaskedArray) - masked_array(data=[1, 2, 3], - mask=False, - fill_value=999999) - >>> x = np.array([(1, 1.), (2, 2.)], dtype=[('a',int), ('b', float)]) - >>> x.view(ma.MaskedArray) - masked_array(data=[(1, 1.0), (2, 2.0)], - mask=[(False, False), (False, False)], - fill_value=(999999, 1e+20), - dtype=[('a', '>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> x.view(ma.MaskedArray) + masked_array(data=[1, 2, 3], + mask=False, + fill_value=999999) + >>> x = np.array([(1, 1.), (2, 2.)], dtype=[('a',int), ('b', float)]) + >>> x.view(ma.MaskedArray) + masked_array(data=[(1, 1.0), (2, 2.0)], + mask=[(False, False), (False, False)], + fill_value=(999999, 1e+20), + dtype=[('a', '>> import numpy as np >>> x = ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]]) >>> x[~x.mask] masked_array(data=[1, 4], - mask=[False, False], - fill_value=999999) + mask=[False, False], + fill_value=999999) -Another way to retrieve the valid data is to use the :meth:`compressed` -method, which returns a one-dimensional :class:`~numpy.ndarray` (or one of its -subclasses, depending on the value of the :attr:`~MaskedArray.baseclass` -attribute):: + Another way to retrieve the valid data is to use the :meth:`compressed` + method, which returns a one-dimensional :class:`~numpy.ndarray` (or one of its + subclasses, depending on the value of the :attr:`~MaskedArray.baseclass` + attribute): >>> x.compressed() array([1, 4]) -Note that the output of :meth:`compressed` is always 1D. + Note that the output of :meth:`compressed` is always 1D. @@ -218,7 +220,7 @@ Masking an entry ~~~~~~~~~~~~~~~~ The recommended way to mark one or several specific entries of a masked array -as invalid is to assign the special value :attr:`masked` to them:: +as invalid is to assign the special value :attr:`masked` to them: >>> x = ma.array([1, 2, 3]) >>> x[0] = ma.masked @@ -257,8 +259,9 @@ but this usage is discouraged. All the entries of an array can be masked at once by assigning ``True`` to the -mask:: +mask: + >>> import numpy.ma as ma >>> x = ma.array([1, 2, 3], mask=[0, 0, 1]) >>> x.mask = True >>> x @@ -267,8 +270,8 @@ mask:: fill_value=999999, dtype=int64) -Finally, specific entries can be masked and/or unmasked by assigning to the -mask a sequence of booleans:: + Finally, specific entries can be masked and/or unmasked by assigning to the + mask a sequence of booleans: >>> x = ma.array([1, 2, 3]) >>> x.mask = [0, 1, 0] @@ -281,8 +284,9 @@ Unmasking an entry ~~~~~~~~~~~~~~~~~~ To unmask one or several specific entries, we can just assign one or several -new valid values to them:: +new valid values to them: + >>> import numpy.ma as ma >>> x = ma.array([1, 2, 3], mask=[0, 0, 1]) >>> x masked_array(data=[1, 2, --], @@ -300,37 +304,40 @@ new valid values to them:: attribute. This feature was introduced to prevent overwriting the mask. To force the unmasking of an entry where the array has a hard mask, the mask must first to be softened using the :meth:`soften_mask` method - before the allocation. It can be re-hardened with :meth:`harden_mask`:: - - >>> x = ma.array([1, 2, 3], mask=[0, 0, 1], hard_mask=True) - >>> x - masked_array(data=[1, 2, --], - mask=[False, False, True], - fill_value=999999) - >>> x[-1] = 5 - >>> x - masked_array(data=[1, 2, --], - mask=[False, False, True], - fill_value=999999) - >>> x.soften_mask() - masked_array(data=[1, 2, --], - mask=[False, False, True], - fill_value=999999) - >>> x[-1] = 5 - >>> x - masked_array(data=[1, 2, 5], - mask=[False, False, False], - fill_value=999999) - >>> x.harden_mask() - masked_array(data=[1, 2, 5], - mask=[False, False, False], - fill_value=999999) + before the allocation. It can be re-hardened with :meth:`harden_mask` as + follows: + + >>> import numpy.ma as ma + >>> x = ma.array([1, 2, 3], mask=[0, 0, 1], hard_mask=True) + >>> x + masked_array(data=[1, 2, --], + mask=[False, False, True], + fill_value=999999) + >>> x[-1] = 5 + >>> x + masked_array(data=[1, 2, --], + mask=[False, False, True], + fill_value=999999) + >>> x.soften_mask() + masked_array(data=[1, 2, --], + mask=[False, False, True], + fill_value=999999) + >>> x[-1] = 5 + >>> x + masked_array(data=[1, 2, 5], + mask=[False, False, False], + fill_value=999999) + >>> x.harden_mask() + masked_array(data=[1, 2, 5], + mask=[False, False, False], + fill_value=999999) To unmask all masked entries of a masked array (provided the mask isn't a hard mask), the simplest solution is to assign the constant :attr:`nomask` to the -mask:: +mask: + >>> import numpy.ma as ma >>> x = ma.array([1, 2, 3], mask=[0, 0, 1]) >>> x masked_array(data=[1, 2, --], @@ -352,8 +359,9 @@ its mechanisms for indexing and slicing. When accessing a single entry of a masked array with no named fields, the output is either a scalar (if the corresponding entry of the mask is ``False``) or the special value :attr:`masked` (if the corresponding entry of -the mask is ``True``):: +the mask is ``True``): + >>> import numpy.ma as ma >>> x = ma.array([1, 2, 3], mask=[0, 0, 1]) >>> x[0] 1 @@ -367,6 +375,7 @@ If the masked array has named fields, accessing a single entry returns a array with the same dtype as the initial array if at least one of the fields is masked. + >>> import numpy.ma as ma >>> y = ma.masked_array([(1,2), (3, 4)], ... mask=[(0, 0), (0, 1)], ... dtype=[('a', int), ('b', int)]) @@ -382,6 +391,7 @@ mask is either :attr:`nomask` (if there was no invalid entries in the original array) or a view of the corresponding slice of the original mask. The view is required to ensure propagation of any modification of the mask to the original. + >>> import numpy.ma as ma >>> x = ma.array([1, 2, 3, 4, 5], mask=[0, 1, 0, 0, 1]) >>> mx = x[:3] >>> mx @@ -398,6 +408,7 @@ required to ensure propagation of any modification of the mask to the original. >>> x.data array([ 1, -1, 3, 4, 5]) + Accessing a field of a masked array with structured datatype returns a :class:`MaskedArray`. @@ -417,8 +428,9 @@ meaning that the corresponding :attr:`~MaskedArray.data` entries The :mod:`numpy.ma` module comes with a specific implementation of most ufuncs. Unary and binary functions that have a validity domain (such as :func:`~numpy.log` or :func:`~numpy.divide`) return the :data:`masked` -constant whenever the input is masked or falls outside the validity domain:: +constant whenever the input is masked or falls outside the validity domain: + >>> import numpy.ma as ma >>> ma.log([-1, 0, 1, 2]) masked_array(data=[--, --, 0.0, 0.6931471805599453], mask=[ True, True, False, False], @@ -430,8 +442,9 @@ result of a binary ufunc is masked wherever any of the input is masked. If the ufunc also returns the optional context output (a 3-element tuple containing the name of the ufunc, its arguments and its domain), the context is processed and entries of the output masked array are masked wherever the corresponding -input fall outside the validity domain:: +input fall outside the validity domain: + >>> import numpy.ma as ma >>> x = ma.array([-1, 1, 0, 2, 3], mask=[0, 0, 0, 0, 1]) >>> np.log(x) masked_array(data=[--, 0.0, --, 0.6931471805599453, --], @@ -447,7 +460,7 @@ Data with a given value representing missing data Let's consider a list of elements, ``x``, where values of -9999. represent missing data. We wish to compute the average value of the data and the vector -of anomalies (deviations from the average):: +of anomalies (deviations from the average): >>> import numpy.ma as ma >>> x = [0.,1.,-9999.,3.,4.] @@ -466,6 +479,8 @@ Filling in the missing data Suppose now that we wish to print that same data, but with the missing values replaced by the average value. + >>> import numpy.ma as ma + >>> mx = ma.masked_values (x, -9999.) >>> print(mx.filled(mx.mean())) [0. 1. 2. 3. 4.] @@ -492,8 +507,10 @@ Ignoring extreme values Let's consider an array ``d`` of floats between 0 and 1. We wish to compute the average of the values of ``d`` while ignoring any data outside -the range ``[0.2, 0.9]``:: +the range ``[0.2, 0.9]``: + >>> import numpy as np + >>> import numpy.ma as ma >>> d = np.linspace(0, 1, 20) >>> print(d.mean() - ma.masked_outside(d, 0.2, 0.9).mean()) -0.05263157894736836 diff --git a/doc/source/reference/random/generator.rst b/doc/source/reference/random/generator.rst index eaa29feae57e..088d159c74f5 100644 --- a/doc/source/reference/random/generator.rst +++ b/doc/source/reference/random/generator.rst @@ -72,6 +72,7 @@ By default, `Generator.permuted` returns a copy. To operate in-place with `Generator.permuted`, pass the same array as the first argument *and* as the value of the ``out`` parameter. For example, + >>> import numpy as np >>> rng = np.random.default_rng() >>> x = np.arange(0, 15).reshape(3, 5) >>> x #doctest: +SKIP @@ -84,12 +85,12 @@ the value of the ``out`` parameter. For example, [ 6, 7, 8, 9, 5], [10, 14, 11, 13, 12]]) -Note that when ``out`` is given, the return value is ``out``: + Note that when ``out`` is given, the return value is ``out``: >>> y is x True -.. _generator-handling-axis-parameter: +.. _generator-handling-axis-parameter: Handling the ``axis`` parameter ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -100,6 +101,7 @@ which dimension of the input array to use as the sequence. In the case of a two-dimensional array, ``axis=0`` will, in effect, rearrange the rows of the array, and ``axis=1`` will rearrange the columns. For example + >>> import numpy as np >>> rng = np.random.default_rng() >>> x = np.arange(0, 15).reshape(3, 5) >>> x @@ -119,6 +121,8 @@ how `numpy.sort` treats it. Each slice along the given axis is shuffled independently of the others. Compare the following example of the use of `Generator.permuted` to the above example of `Generator.permutation`: + >>> import numpy as np + >>> rng = np.random.default_rng() >>> rng.permuted(x, axis=1) #doctest: +SKIP array([[ 1, 0, 2, 4, 3], # random [ 5, 7, 6, 9, 8], @@ -132,8 +136,8 @@ Shuffling non-NumPy sequences ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ `Generator.shuffle` works on non-NumPy sequences. That is, if it is given a sequence that is not a NumPy array, it shuffles that sequence in-place. -For example, + >>> import numpy as np >>> rng = np.random.default_rng() >>> a = ['A', 'B', 'C', 'D', 'E'] >>> rng.shuffle(a) # shuffle the list in-place diff --git a/doc/source/reference/random/index.rst b/doc/source/reference/random/index.rst index a2f508c58bbf..84940fca3171 100644 --- a/doc/source/reference/random/index.rst +++ b/doc/source/reference/random/index.rst @@ -18,18 +18,22 @@ probability distributions. In general, users will create a `Generator` instance with `default_rng` and call the various methods on it to obtain samples from different distributions. -:: - >>> import numpy as np >>> rng = np.random.default_rng() - # Generate one random float uniformly distributed over the range [0, 1) + + Generate one random float uniformly distributed over the range [0, 1) + >>> rng.random() #doctest: +SKIP 0.06369197489564249 # may vary - # Generate an array of 10 numbers according to a unit Gaussian distribution. + + Generate an array of 10 numbers according to a unit Gaussian distribution + >>> rng.standard_normal(10) #doctest: +SKIP array([-0.31018314, -1.8922078 , -0.3628523 , -0.63526532, 0.43181166, # may vary 0.51640373, 1.25693945, 0.07779185, 0.84090247, -2.13406828]) - # Generate an array of 5 integers uniformly over the range [0, 10). + + Generate an array of 5 integers uniformly over the range [0, 10) + >>> rng.integers(low=0, high=10, size=5) #doctest: +SKIP array([8, 7, 6, 2, 0]) # may vary @@ -40,14 +44,13 @@ generate different numbers each time. The pseudo-random sequences will be independent for all practical purposes, at least those purposes for which our pseudo-randomness was good for in the first place. -:: - - >>> rng1 = np.random.default_rng() - >>> rng1.random() #doctest: +SKIP - 0.6596288841243357 # may vary - >>> rng2 = np.random.default_rng() - >>> rng2.random() #doctest: +SKIP - 0.11885628817151628 # may vary + >>> import numpy as np + >>> rng1 = np.random.default_rng() + >>> rng1.random() #doctest: +SKIP + 0.6596288841243357 # may vary + >>> rng2 = np.random.default_rng() + >>> rng2.random() #doctest: +SKIP + 0.11885628817151628 # may vary .. warning:: @@ -66,18 +69,17 @@ intentionally *trying* to reproduce their result. A convenient way to get such a seed number is to use :py:func:`secrets.randbits` to get an arbitrary 128-bit integer. -:: - - >>> import secrets - >>> import numpy as np - >>> secrets.randbits(128) #doctest: +SKIP - 122807528840384100672342137672332424406 # may vary - >>> rng1 = np.random.default_rng(122807528840384100672342137672332424406) - >>> rng1.random() - 0.5363922081269535 - >>> rng2 = np.random.default_rng(122807528840384100672342137672332424406) - >>> rng2.random() - 0.5363922081269535 + >>> import numpy as np + >>> import secrets + >>> import numpy as np + >>> secrets.randbits(128) #doctest: +SKIP + 122807528840384100672342137672332424406 # may vary + >>> rng1 = np.random.default_rng(122807528840384100672342137672332424406) + >>> rng1.random() + 0.5363922081269535 + >>> rng2 = np.random.default_rng(122807528840384100672342137672332424406) + >>> rng2.random() + 0.5363922081269535 See the documentation on `default_rng` and `SeedSequence` for more advanced options for controlling the seed in specialized scenarios. diff --git a/doc/source/reference/routines.char.rst b/doc/source/reference/routines.char.rst index b62294b9a191..7a8728f2d727 100644 --- a/doc/source/reference/routines.char.rst +++ b/doc/source/reference/routines.char.rst @@ -16,10 +16,11 @@ Legacy fixed-width string functionality The `numpy.char` module provides a set of vectorized string operations for arrays of type `numpy.str_` or `numpy.bytes_`. For example - >>> np.char.capitalize(["python", "numpy"]) - array(['Python', 'Numpy'], dtype='>> np.char.add(["num", "doc"], ["py", "umentation"]) - array(['numpy', 'documentation'], dtype='>> import numpy as np + >>> np.char.capitalize(["python", "numpy"]) + array(['Python', 'Numpy'], dtype='>> np.char.add(["num", "doc"], ["py", "umentation"]) + array(['numpy', 'documentation'], dtype='>> import numpy as np >>> p1d = np.poly1d([1, 2, 3]) >>> p = np.polynomial.Polynomial(p1d.coef[::-1]) -In addition to the ``coef`` attribute, polynomials from the polynomial -package also have ``domain`` and ``window`` attributes. -These attributes are most relevant when fitting -polynomials to data, though it should be noted that polynomials with -different ``domain`` and ``window`` attributes are not considered equal, and -can't be mixed in arithmetic:: + In addition to the ``coef`` attribute, polynomials from the polynomial + package also have ``domain`` and ``window`` attributes. + These attributes are most relevant when fitting + polynomials to data, though it should be noted that polynomials with + different ``domain`` and ``window`` attributes are not considered equal, and + can't be mixed in arithmetic: >>> p1 = np.polynomial.Polynomial([1, 2, 3]) >>> p1 diff --git a/doc/source/reference/routines.rec.rst b/doc/source/reference/routines.rec.rst index 21700332418b..aa3a715f47a9 100644 --- a/doc/source/reference/routines.rec.rst +++ b/doc/source/reference/routines.rec.rst @@ -11,17 +11,18 @@ Record arrays expose the fields of structured arrays as properties. Most commonly, ndarrays contain elements of a single type, e.g. floats, integers, bools etc. However, it is possible for elements to be combinations -of these using structured types, such as:: +of these using structured types, such as: - >>> a = np.array([(1, 2.0), (1, 2.0)], + >>> import numpy as np + >>> a = np.array([(1, 2.0), (1, 2.0)], ... dtype=[('x', np.int64), ('y', np.float64)]) >>> a array([(1, 2.), (1, 2.)], dtype=[('x', '>> a['x'] array([1, 1]) @@ -29,13 +30,11 @@ one would a dictionary:: >>> a['y'] array([2., 2.]) -Record arrays allow us to access fields as properties:: + Record arrays allow us to access fields as properties: >>> ar = np.rec.array(a) - >>> ar.x array([1, 1]) - >>> ar.y array([2., 2.]) @@ -55,4 +54,3 @@ Functions Also, the `numpy.recarray` class and the `numpy.record` scalar dtype are present in this namespace. - From 811830b955a617ff5dddc4df5f3fdd85e94aca31 Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Sat, 29 Jun 2024 00:07:17 +0530 Subject: [PATCH 669/980] DOC: Make new imports succeed text in examples [skip azp] [skip cirrus] --- doc/source/reference/arrays.dtypes.rst | 24 ++++++++++++------------ doc/source/reference/arrays.ndarray.rst | 10 +++++----- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst index eda59690e312..8aa7170df065 100644 --- a/doc/source/reference/arrays.dtypes.rst +++ b/doc/source/reference/arrays.dtypes.rst @@ -384,11 +384,11 @@ Type strings .. admonition:: Example - >>> import numpy as np - Data-type with fields ``big`` (big-endian 32-bit integer) and ``little`` (little-endian 32-bit integer): + >>> import numpy as np + >>> dt = np.dtype([('big', '>i4'), ('little', '>> import numpy as np - Data type with fields ``r``, ``g``, ``b``, ``a``, each being an 8-bit unsigned integer: + >>> import numpy as np + >>> dt = np.dtype({'names': ['r','g','b','a'], ... 'formats': [np.uint8, np.uint8, np.uint8, np.uint8]}) @@ -456,12 +456,12 @@ Type strings .. admonition:: Example - >>> import numpy as np - Data type containing field ``col1`` (10-character string at byte position 0), ``col2`` (32-bit float at byte position 10), and ``col3`` (integers at byte position 14): + >>> import numpy as np + >>> dt = np.dtype({'col1': ('U10', 0), 'col2': (np.float32, 10), ... 'col3': (int, 14)}) @@ -481,11 +481,11 @@ Type strings .. admonition:: Example - >>> import numpy as np - 32-bit integer, whose first two bytes are interpreted as an integer via field ``real``, and the following two bytes via field ``imag``. + >>> import numpy as np + >>> dt = np.dtype((np.int32,{'real':(np.int16, 0),'imag':(np.int16, 2)})) 32-bit integer, which is interpreted as consisting of a sub-array @@ -519,11 +519,11 @@ This equivalence can only be handled through ``==``, not through ``is``. .. admonition:: Example - >>> import numpy as np - A :class:`dtype` object is equal to all data type specifications that are equivalent to it. + >>> import numpy as np + >>> a = np.array([1, 2], dtype=float) >>> a.dtype == np.dtype(np.float64) True @@ -540,10 +540,10 @@ Second, there is no guarantee that data type objects are singletons. .. admonition:: Example - >>> import numpy as np - Do not use ``is`` because data type objects may or may not be singletons. + >>> import numpy as np + >>> np.dtype(float) is np.dtype(float) True >>> np.dtype([('a', float)]) is np.dtype([('a', float)]) diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst index a403c9528800..d03ebde361a2 100644 --- a/doc/source/reference/arrays.ndarray.rst +++ b/doc/source/reference/arrays.ndarray.rst @@ -32,11 +32,11 @@ objects implementing the :class:`memoryview` or :ref:`array .. admonition:: Example - >>> import numpy as np - A 2-dimensional array of size 2 x 3, composed of 4-byte integer elements: + >>> import numpy as np + >>> x = np.array([[1, 2, 3], [4, 5, 6]], np.int32) >>> type(x) @@ -362,10 +362,10 @@ Many of these methods take an argument named *axis*. In such cases, .. admonition:: Example of the *axis* argument - >>> import numpy as np - A 3-dimensional array of size 3 x 3 x 3, summed over each of its - three axes + three axes: + + >>> import numpy as np >>> x = np.arange(27).reshape((3,3,3)) >>> x From dba775253d6b82d8e565e272ee1d83267264f2b2 Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Sat, 29 Jun 2024 00:19:01 +0530 Subject: [PATCH 670/980] DOC: Define ndarray object `a` for "Flat iteration" example --- doc/source/reference/arrays.classes.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index 2cfe975ad3b5..3b2d0c4b2a02 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -634,6 +634,7 @@ an iterator that will cycle over the entire array in C-style contiguous order. >>> import numpy as np + >>> a = np.arange(24).reshape(3,2,4) + 10 >>> for i, val in enumerate(a.flat): ... if i%5 == 0: print(i, val) 0 10 From c70c1b2fdc89214ecafb9ea27c9e36849d73551e Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Fri, 28 Jun 2024 19:43:41 +0200 Subject: [PATCH 671/980] CI,MAINT: Bump gfortran version [wheel build] Co-authored-by: ngoldbaum --- .github/workflows/wheels.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 31cac63eafc7..f8a90493c6d6 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -131,9 +131,10 @@ jobs: if: matrix.buildplat[0] == 'macos-13' || matrix.buildplat[0] == 'macos-14' run: | # Needed due to https://github.com/actions/runner-images/issues/3371 - echo "FC=gfortran-10" >> "$GITHUB_ENV" - echo "F77=gfortran-10" >> "$GITHUB_ENV" - echo "F90=gfortran-10" >> "$GITHUB_ENV" + # Supported versions: https://github.com/actions/runner-images/blob/main/images/macos/macos-14-arm64-Readme.md + echo "FC=gfortran-13" >> "$GITHUB_ENV" + echo "F77=gfortran-13" >> "$GITHUB_ENV" + echo "F90=gfortran-13" >> "$GITHUB_ENV" if [[ ${{ matrix.buildplat[2] }} == 'accelerate' ]]; then # macosx_arm64 and macosx_x86_64 with accelerate # only target Sonoma onwards From e0251206feca52d12a8e77edf95d6e328a7c0f7e Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 29 Jun 2024 01:07:53 +0200 Subject: [PATCH 672/980] CI,BUG: Fix meson gfortran locator [wheel build] --- .github/workflows/free-threaded-wheels.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/free-threaded-wheels.yml b/.github/workflows/free-threaded-wheels.yml index 231805c50eae..739e95d35117 100644 --- a/.github/workflows/free-threaded-wheels.yml +++ b/.github/workflows/free-threaded-wheels.yml @@ -118,6 +118,11 @@ jobs: - name: Setup macOS if: matrix.buildplat[0] == 'macos-13' || matrix.buildplat[0] == 'macos-14' run: | + # Needed due to https://github.com/actions/runner-images/issues/3371 + # Supported versions: https://github.com/actions/runner-images/blob/main/images/macos/macos-14-arm64-Readme.md + echo "FC=gfortran-13" >> "$GITHUB_ENV" + echo "F77=gfortran-13" >> "$GITHUB_ENV" + echo "F90=gfortran-13" >> "$GITHUB_ENV" if [[ ${{ matrix.buildplat[2] }} == 'accelerate' ]]; then # macosx_arm64 and macosx_x86_64 with accelerate # only target Sonoma onwards From 0c36d5592dfb6998a2bbd7d80a9dae6f3fa2ffd7 Mon Sep 17 00:00:00 2001 From: vahidmech Date: Mon, 24 Jun 2024 20:51:22 -0500 Subject: [PATCH 673/980] BUG: Ensure output order follows input in numpy.fft closes #26777 --- numpy/fft/_pocketfft.py | 6 +++--- numpy/fft/tests/test_pocketfft.py | 10 ++++++++++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py index 5972a346de20..d91b92c63f4b 100644 --- a/numpy/fft/_pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -34,7 +34,7 @@ import warnings from numpy.lib.array_utils import normalize_axis_index -from numpy._core import (asarray, empty, zeros, swapaxes, result_type, +from numpy._core import (asarray, empty_like, result_type, conjugate, take, sqrt, reciprocal) from . import _pocketfft_umath as pfu from numpy._core import overrides @@ -85,8 +85,8 @@ def _raw_fft(a, n, axis, is_real, is_forward, norm, out=None): out_dtype = real_dtype else: # Others, complex output. out_dtype = result_type(a.dtype, 1j) - out = empty(a.shape[:axis] + (n_out,) + a.shape[axis+1:], - dtype=out_dtype) + out = empty_like(a, shape=a.shape[:axis] + (n_out,) + a.shape[axis+1:], + dtype=out_dtype) elif ((shape := getattr(out, "shape", None)) is not None and (len(shape) != a.ndim or shape[axis] != n_out)): raise ValueError("output array has wrong shape.") diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py index f58ed0cecb39..ca60427a53ea 100644 --- a/numpy/fft/tests/test_pocketfft.py +++ b/numpy/fft/tests/test_pocketfft.py @@ -497,6 +497,16 @@ def test_fft_with_order(dtype, order, fft): raise ValueError() +@pytest.mark.parametrize("order", ["F", "C"]) +@pytest.mark.parametrize("n", [None, 7, 12]) +def test_fft_output_order(order, n): + rng = np.random.RandomState(42) + x = rng.rand(10) + x = np.asarray(x, dtype=np.complex64, order=order) + res = np.fft.fft(x, n=n) + assert res.flags.c_contiguous == x.flags.c_contiguous + assert res.flags.f_contiguous == x.flags.f_contiguous + @pytest.mark.skipif(IS_WASM, reason="Cannot start thread") class TestFFTThreadSafe: threads = 16 From 9b06b21d6d12595c4c3e2fbb0f2aa0d1306abe56 Mon Sep 17 00:00:00 2001 From: Jakob Unfried Date: Fri, 28 Jun 2024 11:33:25 +0200 Subject: [PATCH 674/980] DOC: update notes on sign for complex numbers Update the notes section to reflect the changed definition for complex numbers. --- numpy/_core/code_generators/ufunc_docstrings.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index c7bf82fb2a19..ce1ea904564c 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -3619,8 +3619,10 @@ def add_newdoc(place, name, doc): Notes ----- There is more than one definition of sign in common use for complex - numbers. The definition used here is equivalent to :math:`x/\\sqrt{x*x}` - which is different from a common alternative, :math:`x/|x|`. + numbers. The definition used here, :math:`x/|x|`, is the more common + and useful one, but is different from the one used in numpy prior to + version 2.0, :math:`x/\\sqrt{x*x}`, which is equivalent to + ``sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j``. Examples -------- From b6fcc192587977d9a447d9f72ae22ff2e3785ee5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Mon, 17 Jun 2024 16:27:32 +0200 Subject: [PATCH 675/980] Add Array API 2023.12 version support --- .github/workflows/linux.yml | 2 +- .../upcoming_changes/26724.new_feature.rst | 7 + doc/source/reference/routines.math.rst | 2 + numpy/__init__.py | 36 +-- numpy/__init__.pyi | 2 + numpy/_core/_methods.py | 4 +- numpy/_core/fromnumeric.py | 234 +++++++++++++++++- numpy/_core/fromnumeric.pyi | 154 ++++++++++++ numpy/_core/numeric.py | 14 +- numpy/_core/numeric.pyi | 2 + numpy/_core/src/multiarray/methods.c | 3 +- numpy/_core/tests/test_numeric.py | 27 +- numpy/_core/tests/test_regression.py | 6 +- numpy/lib/tests/test_function_base.py | 53 +++- numpy/typing/tests/data/pass/fromnumeric.py | 12 + .../typing/tests/data/reveal/fromnumeric.pyi | 20 ++ requirements/test_requirements.txt | 2 +- tools/ci/array-api-skips.txt | 6 +- 18 files changed, 536 insertions(+), 50 deletions(-) create mode 100644 doc/release/upcoming_changes/26724.new_feature.rst diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 1ceeb61514bb..15c52f662d83 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -231,7 +231,7 @@ jobs: uses: actions/checkout@v4 with: repository: data-apis/array-api-tests - ref: '3cf8ef654c456d9fd1633d64e67b4470465940e9' # Latest commit as of 2024-04-09 + ref: '809a1984414cfc0bca68a823aeaeba7df3900d17' # Latest commit as of 2024-06-26 submodules: 'true' path: 'array-api-tests' - name: Set up Python diff --git a/doc/release/upcoming_changes/26724.new_feature.rst b/doc/release/upcoming_changes/26724.new_feature.rst new file mode 100644 index 000000000000..3c6a830728a4 --- /dev/null +++ b/doc/release/upcoming_changes/26724.new_feature.rst @@ -0,0 +1,7 @@ +* `numpy.cumulative_sum` and `numpy.cumulative_prod` were added as Array API + compatible alternatives for `numpy.cumsum` and `numpy.cumprod`. The new functions + can include a fixed initial (zeros for ``sum`` and ones for ``prod``) in the result. +* `numpy.clip` now supports ``max`` and ``min`` keyword arguments which are meant + to replace ``a_min`` and ``a_max``. Also, for ``np.clip(a)`` or ``np.clip(a, None, None)`` + a copy of the input array will be returned instead of raising an error. +* `numpy.astype` now supports ``device`` argument. diff --git a/doc/source/reference/routines.math.rst b/doc/source/reference/routines.math.rst index 59310f0a714f..fb08f59fa266 100644 --- a/doc/source/reference/routines.math.rst +++ b/doc/source/reference/routines.math.rst @@ -63,6 +63,8 @@ Sums, products, differences sum nanprod nansum + cumulative_sum + cumulative_prod cumprod cumsum nancumprod diff --git a/numpy/__init__.py b/numpy/__init__.py index f92216a27514..0673f8d1dd71 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -134,23 +134,23 @@ can_cast, cbrt, cdouble, ceil, character, choose, clip, clongdouble, complex128, complex64, complexfloating, compress, concat, concatenate, conj, conjugate, convolve, copysign, copyto, correlate, cos, cosh, - count_nonzero, cross, csingle, cumprod, cumsum, - datetime64, datetime_as_string, datetime_data, deg2rad, degrees, - diagonal, divide, divmod, dot, double, dtype, e, einsum, einsum_path, - empty, empty_like, equal, errstate, euler_gamma, exp, exp2, expm1, - fabs, finfo, flatiter, flatnonzero, flexible, float16, float32, - float64, float_power, floating, floor, floor_divide, fmax, fmin, fmod, - format_float_positional, format_float_scientific, frexp, from_dlpack, - frombuffer, fromfile, fromfunction, fromiter, frompyfunc, fromstring, - full, full_like, gcd, generic, geomspace, get_printoptions, - getbufsize, geterr, geterrcall, greater, greater_equal, half, - heaviside, hstack, hypot, identity, iinfo, iinfo, indices, inexact, - inf, inner, int16, int32, int64, int8, int_, intc, integer, intp, - invert, is_busday, isclose, isdtype, isfinite, isfortran, isinf, - isnan, isnat, isscalar, issubdtype, lcm, ldexp, left_shift, less, - less_equal, lexsort, linspace, little_endian, log, log10, log1p, log2, - logaddexp, logaddexp2, logical_and, logical_not, logical_or, - logical_xor, logspace, long, longdouble, longlong, matmul, + count_nonzero, cross, csingle, cumprod, cumsum, cumulative_prod, + cumulative_sum, datetime64, datetime_as_string, datetime_data, + deg2rad, degrees, diagonal, divide, divmod, dot, double, dtype, e, + einsum, einsum_path, empty, empty_like, equal, errstate, euler_gamma, + exp, exp2, expm1, fabs, finfo, flatiter, flatnonzero, flexible, + float16, float32, float64, float_power, floating, floor, floor_divide, + fmax, fmin, fmod, format_float_positional, format_float_scientific, + frexp, from_dlpack, frombuffer, fromfile, fromfunction, fromiter, + frompyfunc, fromstring, full, full_like, gcd, generic, geomspace, + get_printoptions, getbufsize, geterr, geterrcall, greater, + greater_equal, half, heaviside, hstack, hypot, identity, iinfo, iinfo, + indices, inexact, inf, inner, int16, int32, int64, int8, int_, intc, + integer, intp, invert, is_busday, isclose, isdtype, isfinite, + isfortran, isinf, isnan, isnat, isscalar, issubdtype, lcm, ldexp, + left_shift, less, less_equal, lexsort, linspace, little_endian, log, + log10, log1p, log2, logaddexp, logaddexp2, logical_and, logical_not, + logical_or, logical_xor, logspace, long, longdouble, longlong, matmul, matrix_transpose, max, maximum, may_share_memory, mean, memmap, min, min_scalar_type, minimum, mod, modf, moveaxis, multiply, nan, ndarray, ndim, nditer, negative, nested_iters, newaxis, nextafter, nonzero, @@ -290,7 +290,7 @@ # import with `from numpy import *`. __future_scalars__ = {"str", "bytes", "object"} - __array_api_version__ = "2022.12" + __array_api_version__ = "2023.12" from ._array_api_info import __array_namespace_info__ diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index f4963f51a5e6..54073f9d3053 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -260,6 +260,7 @@ from numpy._core.fromnumeric import ( all as all, any as any, cumsum as cumsum, + cumulative_sum as cumulative_sum, ptp as ptp, max as max, min as min, @@ -267,6 +268,7 @@ from numpy._core.fromnumeric import ( amin as amin, prod as prod, cumprod as cumprod, + cumulative_prod as cumulative_prod, ndim as ndim, size as size, around as around, diff --git a/numpy/_core/_methods.py b/numpy/_core/_methods.py index f214ff957370..d31e2eef41fd 100644 --- a/numpy/_core/_methods.py +++ b/numpy/_core/_methods.py @@ -98,8 +98,8 @@ def _count_reduce_items(arr, axis, keepdims=False, where=True): def _clip(a, min=None, max=None, out=None, **kwargs): if min is None and max is None: - raise ValueError("One of max or min must be given") - + # return identity + return um.positive(a, out=out, **kwargs) if min is None: return um.minimum(a, max, out=out, **kwargs) elif max is None: diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 57602293ad80..204dcf86539c 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -21,8 +21,8 @@ __all__ = [ 'all', 'amax', 'amin', 'any', 'argmax', 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip', - 'compress', 'cumprod', 'cumsum', 'diagonal', 'mean', - 'max', 'min', 'matrix_transpose', + 'compress', 'cumprod', 'cumsum', 'cumulative_prod', 'cumulative_sum', + 'diagonal', 'mean', 'max', 'min', 'matrix_transpose', 'ndim', 'nonzero', 'partition', 'prod', 'ptp', 'put', 'ravel', 'repeat', 'reshape', 'resize', 'round', 'searchsorted', 'shape', 'size', 'sort', 'squeeze', @@ -2210,12 +2210,14 @@ def compress(condition, a, axis=None, out=None): return _wrapfunc(a, 'compress', condition, axis=axis, out=out) -def _clip_dispatcher(a, a_min, a_max, out=None, **kwargs): - return (a, a_min, a_max) +def _clip_dispatcher(a, a_min=None, a_max=None, out=None, *, min=None, + max=None, **kwargs): + return (a, a_min, a_max, out, min, max) @array_function_dispatch(_clip_dispatcher) -def clip(a, a_min, a_max, out=None, **kwargs): +def clip(a, a_min=np._NoValue, a_max=np._NoValue, out=None, *, + min=np._NoValue, max=np._NoValue, **kwargs): """ Clip (limit) the values in an array. @@ -2234,12 +2236,19 @@ def clip(a, a_min, a_max, out=None, **kwargs): Array containing elements to clip. a_min, a_max : array_like or None Minimum and maximum value. If ``None``, clipping is not performed on - the corresponding edge. Only one of `a_min` and `a_max` may be - ``None``. Both are broadcast against `a`. + the corresponding edge. If both ``a_min`` and ``a_max`` are ``None``, + the elements of the returned array stay the same. Both are broadcasted + against ``a``. out : ndarray, optional The results will be placed in this array. It may be the input array for in-place clipping. `out` must be of the right shape to hold the output. Its type is preserved. + min, max : array_like or None + Array API compatible alternatives for ``a_min`` and ``a_max`` + arguments. Either ``a_min`` and ``a_max`` or ``min`` and ``max`` + can be passed at the same time. Default: ``None``. + + .. versionadded:: 2.1.0 **kwargs For other keyword-only arguments, see the :ref:`ufunc docs `. @@ -2283,6 +2292,19 @@ def clip(a, a_min, a_max, out=None, **kwargs): array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8]) """ + if a_min is np._NoValue and a_max is np._NoValue: + a_min = None if min is np._NoValue else min + a_max = None if max is np._NoValue else max + elif a_min is np._NoValue: + raise TypeError("clip() missing 1 required positional " + "argument: 'a_min'") + elif a_max is np._NoValue: + raise TypeError("clip() missing 1 required positional " + "argument: 'a_max'") + elif min is not np._NoValue or max is not np._NoValue: + raise ValueError("Passing `min` or `max` keyword argument when " + "`a_min` and `a_max` are provided is forbidden.") + return _wrapfunc(a, 'clip', a_min, a_max, out=out, **kwargs) @@ -2643,6 +2665,202 @@ def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): keepdims=keepdims, where=where) +def _cumulative_func(x, func, axis, dtype, out, include_initial): + x = np.atleast_1d(x) + x_ndim = x.ndim + if axis is None: + if x_ndim >= 2: + raise ValueError("For arrays which have more than one dimension " + "``axis`` argument is required.") + axis = 0 + + if out is not None and include_initial: + item = [slice(None)] * x_ndim + item[axis] = slice(1, None) + func.accumulate(x, axis=axis, dtype=dtype, out=out[tuple(item)]) + item[axis] = 0 + out[tuple(item)] = func.identity + return out + + res = func.accumulate(x, axis=axis, dtype=dtype, out=out) + if include_initial: + initial_shape = list(x.shape) + initial_shape[axis] = 1 + res = np.concat( + [np.full_like(res, func.identity, shape=initial_shape), res], + axis=axis, + ) + + return res + + +def _cumulative_prod_dispatcher(x, /, *, axis=None, dtype=None, out=None, + include_initial=None): + return (x, out) + + +@array_function_dispatch(_cumulative_prod_dispatcher) +def cumulative_prod(x, /, *, axis=None, dtype=None, out=None, + include_initial=False): + """ + Return the cumulative product of elements along a given axis. + + This function is an Array API compatible alternative to `numpy.cumprod`. + + Parameters + ---------- + x : array_like + Input array. + axis : int, optional + Axis along which the cumulative product is computed. The default + (None) is only allowed for one-dimensional arrays. For arrays + with more than one dimension ``axis`` is required. + dtype : dtype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If ``dtype`` is not specified, it + defaults to the dtype of ``x``, unless ``x`` has an integer dtype + with a precision less than that of the default platform integer. + In that case, the default platform integer is used instead. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + See :ref:`ufuncs-output-type` for more details. + include_initial : bool, optional + Boolean indicating whether to include the initial value (ones) as + the first value in the output. With ``include_initial=True`` + the shape of the output is different than the shape of the input. + Default: ``False``. + + Returns + ------- + cumulative_prod_along_axis : ndarray + A new array holding the result is returned unless ``out`` is + specified, in which case a reference to ``out`` is returned. The + result has the same shape as ``x`` if ``include_initial=False``. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + Examples + -------- + >>> a = np.array([1, 2, 3]) + >>> np.cumulative_prod(a) # intermediate results 1, 1*2 + ... # total product 1*2*3 = 6 + array([1, 2, 6]) + >>> a = np.array([1, 2, 3, 4, 5, 6]) + >>> np.cumulative_prod(a, dtype=float) # specify type of output + array([ 1., 2., 6., 24., 120., 720.]) + + The cumulative product for each column (i.e., over the rows) of ``b``: + + >>> b = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.cumulative_prod(b, axis=0) + array([[ 1, 2, 3], + [ 4, 10, 18]]) + + The cumulative product for each row (i.e. over the columns) of ``b``: + + >>> np.cumulative_prod(b, axis=1) + array([[ 1, 2, 6], + [ 4, 20, 120]]) + + """ + return _cumulative_func(x, um.multiply, axis, dtype, out, include_initial) + + +def _cumulative_sum_dispatcher(x, /, *, axis=None, dtype=None, out=None, + include_initial=None): + return (x, out) + + +@array_function_dispatch(_cumulative_sum_dispatcher) +def cumulative_sum(x, /, *, axis=None, dtype=None, out=None, + include_initial=False): + """ + Return the cumulative sum of the elements along a given axis. + + This function is an Array API compatible alternative to `numpy.cumsum`. + + Parameters + ---------- + x : array_like + Input array. + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is only allowed for one-dimensional arrays. For arrays + with more than one dimension ``axis`` is required. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If ``dtype`` is not specified, it defaults + to the dtype of ``x``, unless ``x`` has an integer dtype with + a precision less than that of the default platform integer. + In that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. See :ref:`ufuncs-output-type` + for more details. + include_initial : bool, optional + Boolean indicating whether to include the initial value (ones) as + the first value in the output. With ``include_initial=True`` + the shape of the output is different than the shape of the input. + Default: ``False``. + + Returns + ------- + cumulative_sum_along_axis : ndarray + A new array holding the result is returned unless ``out`` is + specified, in which case a reference to ``out`` is returned. The + result has the same shape as ``x`` if ``include_initial=False``. + + See Also + -------- + sum : Sum array elements. + trapezoid : Integration of array values using composite trapezoidal rule. + diff : Calculate the n-th discrete difference along given axis. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + ``cumulative_sum(a)[-1]`` may not be equal to ``sum(a)`` for + floating-point values since ``sum`` may use a pairwise summation routine, + reducing the roundoff-error. See `sum` for more information. + + Examples + -------- + >>> a = np.array([1, 2, 3, 4, 5, 6]) + >>> a + array([1, 2, 3, 4, 5, 6]) + >>> np.cumulative_sum(a) + array([ 1, 3, 6, 10, 15, 21]) + >>> np.cumulative_sum(a, dtype=float) # specifies type of output value(s) + array([ 1., 3., 6., 10., 15., 21.]) + + >>> b = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.cumulative_sum(b,axis=0) # sum over rows for each of the 3 columns + array([[1, 2, 3], + [5, 7, 9]]) + >>> np.cumulative_sum(b,axis=1) # sum over columns for each of the 2 rows + array([[ 1, 3, 6], + [ 4, 9, 15]]) + + ``cumulative_sum(c)[-1]`` may not be equal to ``sum(c)`` + + >>> c = np.array([1, 2e-9, 3e-9] * 1000000) + >>> np.cumulative_sum(c)[-1] + 1000000.0050045159 + >>> c.sum() + 1000000.0050000029 + + """ + return _cumulative_func(x, um.add, axis, dtype, out, include_initial) + + def _cumsum_dispatcher(a, axis=None, dtype=None, out=None): return (a, out) @@ -2681,6 +2899,7 @@ def cumsum(a, axis=None, dtype=None, out=None): See Also -------- + cumulative_sum : Array API compatible alternative for ``cumsum``. sum : Sum array elements. trapezoid : Integration of array values using composite trapezoidal rule. diff : Calculate the n-th discrete difference along given axis. @@ -3269,6 +3488,7 @@ def cumprod(a, axis=None, dtype=None, out=None): See Also -------- + cumulative_prod : Array API compatible alternative for ``cumprod``. :ref:`ufuncs-output-type` Notes diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index cde666f6f37d..0d4e30ce8101 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -399,6 +399,8 @@ def clip( a_max: None | ArrayLike, out: None = ..., *, + min: None | ArrayLike = ..., + max: None | ArrayLike = ..., dtype: None = ..., where: None | _ArrayLikeBool_co = ..., order: _OrderKACF = ..., @@ -413,6 +415,8 @@ def clip( a_max: None | ArrayLike, out: None = ..., *, + min: None | ArrayLike = ..., + max: None | ArrayLike = ..., dtype: None = ..., where: None | _ArrayLikeBool_co = ..., order: _OrderKACF = ..., @@ -427,6 +431,8 @@ def clip( a_max: None | ArrayLike, out: None = ..., *, + min: None | ArrayLike = ..., + max: None | ArrayLike = ..., dtype: None = ..., where: None | _ArrayLikeBool_co = ..., order: _OrderKACF = ..., @@ -441,6 +447,8 @@ def clip( a_max: None | ArrayLike, out: None = ..., *, + min: None | ArrayLike = ..., + max: None | ArrayLike = ..., dtype: None = ..., where: None | _ArrayLikeBool_co = ..., order: _OrderKACF = ..., @@ -455,6 +463,8 @@ def clip( a_max: None | ArrayLike, out: _ArrayType = ..., *, + min: None | ArrayLike = ..., + max: None | ArrayLike = ..., dtype: DTypeLike, where: None | _ArrayLikeBool_co = ..., order: _OrderKACF = ..., @@ -469,6 +479,8 @@ def clip( a_max: None | ArrayLike, out: _ArrayType, *, + min: None | ArrayLike = ..., + max: None | ArrayLike = ..., dtype: DTypeLike = ..., where: None | _ArrayLikeBool_co = ..., order: _OrderKACF = ..., @@ -600,6 +612,57 @@ def cumsum( out: _ArrayType = ..., ) -> _ArrayType: ... +@overload +def cumulative_sum( + x: _ArrayLike[_SCT], + /, + *, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[_SCT]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[Any]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: None | SupportsIndex = ..., + dtype: _DTypeLike[_SCT] = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[_SCT]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: None | SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[Any]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: None | SupportsIndex = ..., + dtype: DTypeLike = ..., + out: _ArrayType = ..., + include_initial: bool = ..., +) -> _ArrayType: ... + @overload def ptp( a: _ArrayLike[_SCT], @@ -840,6 +903,97 @@ def cumprod( out: _ArrayType = ..., ) -> _ArrayType: ... +@overload +def cumulative_prod( + x: _ArrayLikeBool_co, + /, + *, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[int_]: ... +@overload +def cumulative_prod( + x: _ArrayLikeUInt_co, + /, + *, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[uint64]: ... +@overload +def cumulative_prod( + x: _ArrayLikeInt_co, + /, + *, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[int64]: ... +@overload +def cumulative_prod( + x: _ArrayLikeFloat_co, + /, + *, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[floating[Any]]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co, + /, + *, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def cumulative_prod( + x: _ArrayLikeObject_co, + /, + *, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[object_]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, + /, + *, + axis: None | SupportsIndex = ..., + dtype: _DTypeLike[_SCT] = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[_SCT]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, + /, + *, + axis: None | SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[Any]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, + /, + *, + axis: None | SupportsIndex = ..., + dtype: DTypeLike = ..., + out: _ArrayType = ..., + include_initial: bool = ..., +) -> _ArrayType: ... + def ndim(a: ArrayLike) -> int: ... def size(a: ArrayLike, axis: None | int = ...) -> int: ... diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 6c3d880a8656..20a0d3418fe3 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -2575,12 +2575,12 @@ def array_equiv(a1, a2): return builtins.bool((a1 == a2).all()) -def _astype_dispatcher(x, dtype, /, *, copy=None): +def _astype_dispatcher(x, dtype, /, *, copy=None, device=None): return (x, dtype) @array_function_dispatch(_astype_dispatcher) -def astype(x, dtype, /, *, copy = True): +def astype(x, dtype, /, *, copy=True, device=None): """ Copies an array to a specified data type. @@ -2601,6 +2601,11 @@ def astype(x, dtype, /, *, copy = True): matches the data type of the input array, the input array must be returned; otherwise, a newly allocated array must be returned. Defaults to ``True``. + device : str, optional + The device on which to place the returned array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.1.0 Returns ------- @@ -2630,6 +2635,11 @@ def astype(x, dtype, /, *, copy = True): raise TypeError( f"Input should be a NumPy array. It is a {type(x)} instead." ) + if device is not None and device != "cpu": + raise ValueError( + 'Device not understood. Only "cpu" is allowed, but received:' + f' {device}' + ) return x.astype(dtype, copy=copy) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index d20cc13e49eb..f25c6258f2d0 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -696,10 +696,12 @@ def astype( x: NDArray[Any], dtype: _DTypeLike[_SCT], copy: bool = ..., + device: None | L["cpu"] = ..., ) -> NDArray[_SCT]: ... @overload def astype( x: NDArray[Any], dtype: DTypeLike, copy: bool = ..., + device: None | L["cpu"] = ..., ) -> NDArray[Any]: ... diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index dd40fc4e2f3d..669d5e575c7a 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -2825,7 +2825,8 @@ array_array_namespace(PyArrayObject *self, PyObject *args, PyObject *kwds) "but received: %S.", array_api_version); return NULL; } else if (PyUnicode_CompareWithASCIIString(array_api_version, "2021.12") != 0 && - PyUnicode_CompareWithASCIIString(array_api_version, "2022.12") != 0) + PyUnicode_CompareWithASCIIString(array_api_version, "2022.12") != 0 && + PyUnicode_CompareWithASCIIString(array_api_version, "2023.12") != 0) { PyErr_Format(PyExc_ValueError, "Version \"%U\" of the Array API Standard is not supported.", diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index ce6d721e2ea9..c13b04382728 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -2740,9 +2740,9 @@ def test_object_clip(self): assert actual.tolist() == expected.tolist() def test_clip_all_none(self): - a = np.arange(10, dtype=object) - with assert_raises_regex(ValueError, 'max or min'): - np.clip(a, None, None) + arr = np.arange(10, dtype=object) + assert_equal(np.clip(arr, None, None), arr) + assert_equal(np.clip(arr), arr) def test_clip_invalid_casting(self): a = np.arange(10, dtype=object) @@ -2859,6 +2859,27 @@ def test_clip_property(self, data, arr): assert result.dtype == t assert_array_equal(result, expected) + def test_clip_min_max_args(self): + arr = np.arange(5) + + assert_array_equal(np.clip(arr), arr) + assert_array_equal(np.clip(arr, min=2, max=3), np.clip(arr, 2, 3)) + assert_array_equal(np.clip(arr, min=None, max=2), + np.clip(arr, None, 2)) + + with assert_raises_regex(TypeError, "missing 1 required positional " + "argument: 'a_max'"): + np.clip(arr, 2) + with assert_raises_regex(TypeError, "missing 1 required positional " + "argument: 'a_min'"): + np.clip(arr, a_max=2) + msg = ("Passing `min` or `max` keyword argument when `a_min` and " + "`a_max` are provided is forbidden.") + with assert_raises_regex(ValueError, msg): + np.clip(arr, 2, 3, max=3) + with assert_raises_regex(ValueError, msg): + np.clip(arr, 2, 3, min=2) + class TestAllclose: rtol = 1e-5 diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 826415f9ce00..6e14909fc853 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -2567,15 +2567,17 @@ def test__array_namespace__(self): assert xp is np xp = arr.__array_namespace__(api_version="2022.12") assert xp is np + xp = arr.__array_namespace__(api_version="2023.12") + assert xp is np xp = arr.__array_namespace__(api_version=None) assert xp is np with pytest.raises( ValueError, - match="Version \"2023.12\" of the Array API Standard " + match="Version \"2024.12\" of the Array API Standard " "is not supported." ): - arr.__array_namespace__(api_version="2023.12") + arr.__array_namespace__(api_version="2024.12") with pytest.raises( ValueError, diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index b3cffa2703d5..2a25805af9f4 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -695,7 +695,8 @@ def test_basic(self): class TestCumsum: - def test_basic(self): + @pytest.mark.parametrize("cumsum", [np.cumsum, np.cumulative_sum]) + def test_basic(self, cumsum): ba = [1, 2, 10, 11, 6, 5, 4] ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32, @@ -705,15 +706,15 @@ def test_basic(self): a2 = np.array(ba2, ctype) tgt = np.array([1, 3, 13, 24, 30, 35, 39], ctype) - assert_array_equal(np.cumsum(a, axis=0), tgt) + assert_array_equal(cumsum(a, axis=0), tgt) tgt = np.array( [[1, 2, 3, 4], [6, 8, 10, 13], [16, 11, 14, 18]], ctype) - assert_array_equal(np.cumsum(a2, axis=0), tgt) + assert_array_equal(cumsum(a2, axis=0), tgt) tgt = np.array( [[1, 3, 6, 10], [5, 11, 18, 27], [10, 13, 17, 22]], ctype) - assert_array_equal(np.cumsum(a2, axis=1), tgt) + assert_array_equal(cumsum(a2, axis=1), tgt) class TestProd: @@ -738,7 +739,8 @@ def test_basic(self): class TestCumprod: - def test_basic(self): + @pytest.mark.parametrize("cumprod", [np.cumprod, np.cumulative_prod]) + def test_basic(self, cumprod): ba = [1, 2, 10, 11, 6, 5, 4] ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] for ctype in [np.int16, np.uint16, np.int32, np.uint32, @@ -746,23 +748,52 @@ def test_basic(self): a = np.array(ba, ctype) a2 = np.array(ba2, ctype) if ctype in ['1', 'b']: - assert_raises(ArithmeticError, np.cumprod, a) - assert_raises(ArithmeticError, np.cumprod, a2, 1) - assert_raises(ArithmeticError, np.cumprod, a) + assert_raises(ArithmeticError, cumprod, a) + assert_raises(ArithmeticError, cumprod, a2, 1) + assert_raises(ArithmeticError, cumprod, a) else: - assert_array_equal(np.cumprod(a, axis=-1), + assert_array_equal(cumprod(a, axis=-1), np.array([1, 2, 20, 220, 1320, 6600, 26400], ctype)) - assert_array_equal(np.cumprod(a2, axis=0), + assert_array_equal(cumprod(a2, axis=0), np.array([[1, 2, 3, 4], [5, 12, 21, 36], [50, 36, 84, 180]], ctype)) - assert_array_equal(np.cumprod(a2, axis=-1), + assert_array_equal(cumprod(a2, axis=-1), np.array([[1, 2, 6, 24], [5, 30, 210, 1890], [10, 30, 120, 600]], ctype)) +def test_cumulative_include_initial(): + arr = np.arange(8).reshape((2, 2, 2)) + + expected = np.array([ + [[0, 0], [0, 1], [2, 4]], [[0, 0], [4, 5], [10, 12]] + ]) + assert_array_equal( + np.cumulative_sum(arr, axis=1, include_initial=True), expected + ) + + expected = np.array([ + [[1, 0, 0], [1, 2, 6]], [[1, 4, 20], [1, 6, 42]] + ]) + assert_array_equal( + np.cumulative_prod(arr, axis=2, include_initial=True), expected + ) + + out = np.zeros((3, 2), dtype=np.float64) + expected = np.array([[0, 0], [1, 2], [4, 6]], dtype=np.float64) + arr = np.arange(1, 5).reshape((2, 2)) + np.cumulative_sum(arr, axis=0, out=out, include_initial=True) + assert_array_equal(out, expected) + + expected = np.array([1, 2, 4]) + assert_array_equal( + np.cumulative_prod(np.array([2, 2]), include_initial=True), expected + ) + + class TestDiff: def test_basic(self): diff --git a/numpy/typing/tests/data/pass/fromnumeric.py b/numpy/typing/tests/data/pass/fromnumeric.py index 3d7ef2938e20..7cc2bcfd8b50 100644 --- a/numpy/typing/tests/data/pass/fromnumeric.py +++ b/numpy/typing/tests/data/pass/fromnumeric.py @@ -159,6 +159,12 @@ np.cumsum(A) np.cumsum(B) +np.cumulative_sum(a) +np.cumulative_sum(b) +np.cumulative_sum(c) +np.cumulative_sum(A, axis=0) +np.cumulative_sum(B, axis=0) + np.ptp(b) np.ptp(c) np.ptp(B) @@ -205,6 +211,12 @@ np.cumprod(A) np.cumprod(B) +np.cumulative_prod(a) +np.cumulative_prod(b) +np.cumulative_prod(c) +np.cumulative_prod(A, axis=0) +np.cumulative_prod(B, axis=0) + np.ndim(a) np.ndim(b) np.ndim(c) diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 7fa2260bc312..94b3f5e5496d 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -193,6 +193,15 @@ assert_type(np.cumsum(f, dtype=float), npt.NDArray[Any]) assert_type(np.cumsum(f, dtype=np.float64), npt.NDArray[np.float64]) assert_type(np.cumsum(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.cumulative_sum(b), npt.NDArray[np.bool]) +assert_type(np.cumulative_sum(f4), npt.NDArray[np.float32]) +assert_type(np.cumulative_sum(f), npt.NDArray[Any]) +assert_type(np.cumulative_sum(AR_b), npt.NDArray[np.bool]) +assert_type(np.cumulative_sum(AR_f4), npt.NDArray[np.float32]) +assert_type(np.cumulative_sum(f, dtype=float), npt.NDArray[Any]) +assert_type(np.cumulative_sum(f, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumulative_sum(AR_f4, out=AR_subclass), NDArraySubclass) + assert_type(np.ptp(b), np.bool) assert_type(np.ptp(f4), np.float32) assert_type(np.ptp(f), Any) @@ -249,6 +258,17 @@ assert_type(np.cumprod(AR_f4, dtype=np.float64), npt.NDArray[np.float64]) assert_type(np.cumprod(AR_f4, dtype=float), npt.NDArray[Any]) assert_type(np.cumprod(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.cumulative_prod(AR_b), npt.NDArray[np.int_]) +assert_type(np.cumulative_prod(AR_u8), npt.NDArray[np.uint64]) +assert_type(np.cumulative_prod(AR_i8), npt.NDArray[np.int64]) +assert_type(np.cumulative_prod(AR_f4), npt.NDArray[np.floating[Any]]) +assert_type(np.cumulative_prod(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.cumulative_prod(AR_O), npt.NDArray[np.object_]) +assert_type(np.cumulative_prod(AR_f4, axis=0), npt.NDArray[np.floating[Any]]) +assert_type(np.cumulative_prod(AR_f4, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumulative_prod(AR_f4, dtype=float), npt.NDArray[Any]) +assert_type(np.cumulative_prod(AR_f4, out=AR_subclass), NDArraySubclass) + assert_type(np.ndim(b), int) assert_type(np.ndim(f4), int) assert_type(np.ndim(f), int) diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 4e53f86d355c..ec7827b7e50e 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -3,7 +3,7 @@ wheel==0.38.1 #setuptools==65.5.1 ; python_version < '3.12' #setuptools ; python_version >= '3.12' setuptools -hypothesis==6.81.1 +hypothesis==6.104.1 pytest==7.4.0 pytz==2023.3.post1 pytest-cov==4.1.0 diff --git a/tools/ci/array-api-skips.txt b/tools/ci/array-api-skips.txt index 74c4b49c5dfc..5b7324ae753b 100644 --- a/tools/ci/array-api-skips.txt +++ b/tools/ci/array-api-skips.txt @@ -1,8 +1,7 @@ # finfo return type misalignment array_api_tests/test_data_type_functions.py::test_finfo[float32] -# out.dtype=float32, but should be int16 -# dtype('float16') not found +# for int inputs out.dtype=float32, but should be int array_api_tests/test_operators_and_elementwise_functions.py::test_ceil array_api_tests/test_operators_and_elementwise_functions.py::test_floor array_api_tests/test_operators_and_elementwise_functions.py::test_trunc @@ -10,6 +9,9 @@ array_api_tests/test_operators_and_elementwise_functions.py::test_trunc # 'shape' arg is present. 'newshape' is retained for backward compat. array_api_tests/test_signatures.py::test_func_signature[reshape] +# 'min/max' args are present. 'a_min/a_max' are retained for backward compat. +array_api_tests/test_signatures.py::test_func_signature[clip] + # missing 'descending' keyword arguments array_api_tests/test_signatures.py::test_func_signature[argsort] array_api_tests/test_signatures.py::test_func_signature[sort] From 05a3b829b98355ad2e5f358f9e86daea22e28a07 Mon Sep 17 00:00:00 2001 From: Jules <57632293+JuliaPoo@users.noreply.github.com> Date: Mon, 1 Jul 2024 14:37:14 +0800 Subject: [PATCH 676/980] Update numpy/_core/tests/test_multiarray.py Co-authored-by: Matti Picus --- numpy/_core/tests/test_multiarray.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 5a68a846a8f2..b6ae7cd7cbfc 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -10217,7 +10217,7 @@ def test_partition_fp(N, dtype): arr[1] = np.inf o1 = np.partition(arr, -2, kind='introselect') o2 = arr[np.argpartition(arr, -2, kind='introselect')] - for out in [o1,o2]: + for out in [o1, o2]: assert_(np.isnan(out[-1])) assert_equal(out[-2], np.inf) From 456c4a91c38101b4aeb47f9c534bde350be714b4 Mon Sep 17 00:00:00 2001 From: Jules Date: Mon, 1 Jul 2024 14:41:57 +0800 Subject: [PATCH 677/980] DOC: Clean docs for argpartition --- numpy/_core/fromnumeric.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 1debcaa8295e..704417bf71a7 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -925,9 +925,9 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None): is unstable, and hence the returned indices are not guaranteed to be the earliest/latest occurrence of the element. - The sort order of ``np.nan`` is bigger than ``np.inf``. - - See `partition` for notes on the different selection algorithms. + `argpartition` works for real/complex inputs with nan values, + see `partition` for notes on the enhanced sort order and + different selection algorithms. Examples -------- From a676539536923bb84851b1adeb6396a2a3b35672 Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Mon, 1 Jul 2024 17:09:47 +0530 Subject: [PATCH 678/980] DOC: Reorder stubs for a few polynomial docstrings [skip azp] [skip cirrus] --- numpy/lib/_polynomial_impl.py | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/numpy/lib/_polynomial_impl.py b/numpy/lib/_polynomial_impl.py index 784e6443b9c3..3c0614abf851 100644 --- a/numpy/lib/_polynomial_impl.py +++ b/numpy/lib/_polynomial_impl.py @@ -101,10 +101,10 @@ def poly(seq_of_zeros): Examples -------- - >>> import numpy as np - Given a sequence of a polynomial's zeros: + >>> import numpy as np + >>> np.poly((0, 0, 0)) # Multiple root example array([1., 0., 0., 0.]) @@ -298,10 +298,10 @@ def polyint(p, m=1, k=None): Examples -------- - >>> import numpy as np - The defining property of the antiderivative: + >>> import numpy as np + >>> p = np.poly1d([1,1,1]) >>> P = np.polyint(p) >>> P @@ -395,10 +395,10 @@ def polyder(p, m=1): Examples -------- - >>> import numpy as np - The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is: + >>> import numpy as np + >>> p = np.poly1d([1,1,1,1]) >>> p2 = np.polyder(p) >>> p2 @@ -883,10 +883,10 @@ def polysub(a1, a2): Examples -------- - >>> import numpy as np - .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2) + >>> import numpy as np + >>> np.polysub([2, 10, -2], [3, 10, -4]) array([-1, 0, 2]) @@ -1020,10 +1020,9 @@ def polydiv(u, v): Examples -------- - >>> import numpy as np - .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25 + >>> import numpy as np >>> x = np.array([3.0, 5.0, 2.0]) >>> y = np.array([2.0, 1.0]) >>> np.polydiv(x, y) @@ -1111,10 +1110,10 @@ class poly1d: Examples -------- - >>> import numpy as np - Construct the polynomial :math:`x^2 + 2x + 3`: + >>> import numpy as np + >>> p = np.poly1d([1, 2, 3]) >>> print(np.poly1d(p)) 2 From b8532781412138501246512bf685eaded4824985 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Tue, 2 Jul 2024 18:51:53 +0300 Subject: [PATCH 679/980] DOC: remove hack to override _add_newdocs_scalars (#26826) --- doc/source/conf.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 63c3d7aacd5d..fef7963539f6 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -41,10 +41,6 @@ class PyTypeObject(ctypes.Structure): ('tp_name', ctypes.c_char_p), ] - # prevent numpy attaching docstrings to the scalar types - assert 'numpy._core._add_newdocs_scalars' not in sys.modules - sys.modules['numpy._core._add_newdocs_scalars'] = object() - import numpy # change the __name__ of the scalar types @@ -58,9 +54,6 @@ class PyTypeObject(ctypes.Structure): c_typ = PyTypeObject.from_address(id(typ)) c_typ.tp_name = _name_cache[typ] = b"numpy." + name.encode('utf8') - # now generate the docstrings as usual - del sys.modules['numpy._core._add_newdocs_scalars'] - import numpy._core._add_newdocs_scalars replace_scalar_type_names() From 4389fa70333d8f67f01acac26e5c9c40268badd2 Mon Sep 17 00:00:00 2001 From: otieno-juma Date: Wed, 12 Jun 2024 14:28:14 -0500 Subject: [PATCH 680/980] DOC: AI generated examples for ma.correlate. I adapted the AI generated examples. I removed the default parameters. First example shows how ma.correlate adds a mask to regular arrays. Second example shows basic usage with defaults. Third example introduces mixed arrays and other options. [skip actions] [skip azp] [skip cirrus] added text --- numpy/ma/core.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 453c63614d2e..1be93e65a9e9 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -8144,6 +8144,37 @@ def correlate(a, v, mode='valid', propagate_mask=True): See Also -------- numpy.correlate : Equivalent function in the top-level NumPy module. + + Examples + -------- + Basic correlation: + + >>> a = np.ma.array([1, 2, 3]) + >>> v = np.ma.array([0, 1, 0]) + >>> np.ma.correlate(a, v, mode='valid') + masked_array(data=[2], + mask=[False], + fill_value=999999) + + Correlation with masked elements: + + >>> a = np.ma.array([1, 2, 3], mask=[False, True, False]) + >>> v = np.ma.array([0, 1, 0]) + >>> np.ma.correlate(a, v, mode='valid', propagate_mask=True) + masked_array(data=[--], + mask=[ True], + fill_value=999999, + dtype=int64) + + Correlation with different modes and mixed array types: + + >>> a = np.ma.array([1, 2, 3]) + >>> v = np.ma.array([0, 1, 0]) + >>> np.ma.correlate(a, v, mode='full') + masked_array(data=[0, 1, 2, 3, 0], + mask=[False, False, False, False, False], + fill_value=999999) + """ return _convolve_or_correlate(np.correlate, a, v, mode, propagate_mask) From 3220f9cd6b31cd0008e038f4e78e310ef4f34a93 Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Tue, 2 Jul 2024 22:04:44 +0530 Subject: [PATCH 681/980] DOC: Make "Copies and views" self-contained [skip azp] [skip cirrus] --- doc/source/user/basics.copies.rst | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/doc/source/user/basics.copies.rst b/doc/source/user/basics.copies.rst index 482cbc189ec8..3148fbf2d27f 100644 --- a/doc/source/user/basics.copies.rst +++ b/doc/source/user/basics.copies.rst @@ -50,6 +50,7 @@ Views are created when elements can be addressed with offsets and strides in the original array. Hence, basic indexing always creates views. For example:: + >>> import numpy as np >>> x = np.arange(10) >>> x array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) @@ -60,13 +61,14 @@ For example:: >>> x array([ 0, 10, 11, 3, 4, 5, 6, 7, 8, 9]) >>> y - array([10, 11]) + array([10, 11]) Here, ``y`` gets changed when ``x`` is changed because it is a view. :ref:`advanced-indexing`, on the other hand, always creates copies. For example:: + >>> import numpy as np >>> x = np.arange(9).reshape(3, 3) >>> x array([[0, 1, 2], @@ -79,9 +81,9 @@ For example:: >>> y.base is None True -Here, ``y`` is a copy, as signified by the :attr:`base <.ndarray.base>` -attribute. We can also confirm this by assigning new values to ``x[[1, 2]]`` -which in turn will not affect ``y`` at all:: + Here, ``y`` is a copy, as signified by the :attr:`base <.ndarray.base>` + attribute. We can also confirm this by assigning new values to ``x[[1, 2]]`` + which in turn will not affect ``y`` at all:: >>> x[[1, 2]] = [[10, 11, 12], [13, 14, 15]] >>> x @@ -93,7 +95,7 @@ which in turn will not affect ``y`` at all:: [6, 7, 8]]) It must be noted here that during the assignment of ``x[[1, 2]]`` no view -or copy is created as the assignment happens in-place. +or copy is created as the assignment happens in-place. Other operations @@ -107,6 +109,7 @@ the reshaping cannot be done by modifying strides and requires a copy. In these cases, we can raise an error by assigning the new shape to the shape attribute of the array. For example:: + >>> import numpy as np >>> x = np.ones((2, 3)) >>> y = x.T # makes the array non-contiguous >>> y @@ -132,6 +135,7 @@ The :attr:`base <.ndarray.base>` attribute of the ndarray makes it easy to tell if an array is a view or a copy. The base attribute of a view returns the original array while it returns ``None`` for a copy. + >>> import numpy as np >>> x = np.arange(9) >>> x array([0, 1, 2, 3, 4, 5, 6, 7, 8]) From 3a4f8c70b8c2238929bcefc40c3995bd73b9e75c Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Tue, 2 Jul 2024 22:06:33 +0530 Subject: [PATCH 682/980] DOC: Make "Array creation" self-contained [skip azp] [skip cirrus] --- doc/source/user/basics.creation.rst | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/doc/source/user/basics.creation.rst b/doc/source/user/basics.creation.rst index c9773dc0fcd0..6c09adfdff54 100644 --- a/doc/source/user/basics.creation.rst +++ b/doc/source/user/basics.creation.rst @@ -35,6 +35,7 @@ respectively. Lists and tuples can define ndarray creation: :: + >>> import numpy as np >>> a1D = np.array([1, 2, 3, 4]) >>> a2D = np.array([[1, 2], [3, 4]]) >>> a3D = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) @@ -47,6 +48,7 @@ are handled in C/C++ functions. When values do not fit and you are using a ``dtype``, NumPy may raise an error:: + >>> import numpy as np >>> np.array([127, 128, 129], dtype=np.int8) Traceback (most recent call last): ... @@ -56,8 +58,9 @@ An 8-bit signed integer represents integers from -128 to 127. Assigning the ``int8`` array to integers outside of this range results in overflow. This feature can often be misunderstood. If you perform calculations with mismatching ``dtypes``, you can get unwanted -results, for example:: +results, for example:: + >>> import numpy as np >>> a = np.array([2, 3, 4], dtype=np.uint32) >>> b = np.array([5, 6, 7], dtype=np.uint32) >>> c_unsigned32 = a - b @@ -72,7 +75,7 @@ Notice when you perform operations with two arrays of the same perform operations with different ``dtype``, NumPy will assign a new type that satisfies all of the array elements involved in the computation, here ``uint32`` and ``int32`` can both be represented in -as ``int64``. +as ``int64``. The default NumPy behavior is to create arrays in either 32 or 64-bit signed integers (platform dependent and matches C ``long`` size) or double precision @@ -107,6 +110,7 @@ The 1D array creation functions e.g. :func:`numpy.linspace` and Check the documentation for complete information and examples. A few examples are shown:: + >>> import numpy as np >>> np.arange(10) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> np.arange(2, 10, dtype=float) @@ -124,6 +128,7 @@ the ``stop`` value is sometimes included. spaced equally between the specified beginning and end values. For example: :: + >>> import numpy as np >>> np.linspace(1., 4., 6) array([1. , 1.6, 2.2, 2.8, 3.4, 4. ]) @@ -140,6 +145,7 @@ define properties of special matrices represented as 2D arrays. ``np.eye(n, m)`` defines a 2D identity matrix. The elements where i=j (row index and column index are equal) are 1 and the rest are 0, as such:: + >>> import numpy as np >>> np.eye(3) array([[1., 0., 0.], [0., 1., 0.], @@ -154,6 +160,7 @@ the diagonal *or* if given a 2D array returns a 1D array that is only the diagonal elements. The two array creation functions can be helpful while doing linear algebra, as such:: + >>> import numpy as np >>> np.diag([1, 2, 3]) array([[1, 0, 0], [0, 2, 0], @@ -172,7 +179,8 @@ of the Vandermonde matrix is a decreasing power of the input 1D array or list or tuple, ``x`` where the highest polynomial order is ``n-1``. This array creation routine is helpful in generating linear least squares models, as such:: - + + >>> import numpy as np >>> np.vander(np.linspace(0, 2, 5), 2) array([[0. , 1. ], [0.5, 1. ], @@ -202,6 +210,7 @@ and length along that dimension in a tuple or list. :func:`numpy.zeros` will create an array filled with 0 values with the specified shape. The default dtype is ``float64``:: + >>> import numpy as np >>> np.zeros((2, 3)) array([[0., 0., 0.], [0., 0., 0.]]) @@ -217,6 +226,7 @@ specified shape. The default dtype is ``float64``:: :func:`numpy.ones` will create an array filled with 1 values. It is identical to ``zeros`` in all other respects as such:: + >>> import numpy as np >>> np.ones((2, 3)) array([[1., 1., 1.], [1., 1., 1.]]) @@ -236,6 +246,7 @@ library. Below, two arrays are created with shapes (2,3) and (2,3,2), respectively. The seed is set to 42 so you can reproduce these pseudorandom numbers:: + >>> import numpy as np >>> from numpy.random import default_rng >>> default_rng(42).random((2,3)) array([[0.77395605, 0.43887844, 0.85859792], @@ -250,8 +261,9 @@ pseudorandom numbers:: :func:`numpy.indices` will create a set of arrays (stacked as a one-higher dimensioned array), one per dimension with each representing variation in that -dimension: :: +dimension:: + >>> import numpy as np >>> np.indices((3,3)) array([[[0, 0, 0], [1, 1, 1], @@ -272,6 +284,7 @@ elements to a new variable, you have to explicitly :func:`numpy.copy` the array, otherwise the variable is a view into the original array. Consider the following example:: + >>> import numpy as np >>> a = np.array([1, 2, 3, 4, 5, 6]) >>> b = a[:2] >>> b += 1 @@ -283,6 +296,7 @@ In this example, you did not create a new array. You created a variable, would get the same result by adding 1 to ``a[:2]``. If you want to create a *new* array, use the :func:`numpy.copy` array creation routine as such:: + >>> import numpy as np >>> a = np.array([1, 2, 3, 4]) >>> b = a[:2].copy() >>> b += 1 @@ -296,6 +310,7 @@ There are a number of routines to join existing arrays e.g. :func:`numpy.vstack` :func:`numpy.hstack`, and :func:`numpy.block`. Here is an example of joining four 2-by-2 arrays into a 4-by-4 array using ``block``:: + >>> import numpy as np >>> A = np.ones((2, 2)) >>> B = np.eye(2, 2) >>> C = np.zeros((2, 2)) @@ -354,6 +369,7 @@ and :func:`numpy.genfromtxt`. These functions have more involved use cases in Importing ``simple.csv`` is accomplished using :func:`numpy.loadtxt`:: + >>> import numpy as np >>> np.loadtxt('simple.csv', delimiter = ',', skiprows = 1) # doctest: +SKIP array([[0., 0.], [1., 1.], From 065a4a44d52ba43a2eaea6464bdc22ace51da3ad Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Tue, 2 Jul 2024 22:09:26 +0530 Subject: [PATCH 683/980] DOC: Make "Broadcasting" self-contained [skip azp] [skip cirrus] --- doc/source/user/basics.broadcasting.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/source/user/basics.broadcasting.rst b/doc/source/user/basics.broadcasting.rst index a753767655c7..2b03817bba91 100644 --- a/doc/source/user/basics.broadcasting.rst +++ b/doc/source/user/basics.broadcasting.rst @@ -23,6 +23,7 @@ NumPy operations are usually done on pairs of arrays on an element-by-element basis. In the simplest case, the two arrays must have exactly the same shape, as in the following example: + >>> import numpy as np >>> a = np.array([1.0, 2.0, 3.0]) >>> b = np.array([2.0, 2.0, 2.0]) >>> a * b @@ -32,6 +33,7 @@ NumPy's broadcasting rule relaxes this constraint when the arrays' shapes meet certain constraints. The simplest broadcasting example occurs when an array and a scalar value are combined in an operation: +>>> import numpy as np >>> a = np.array([1.0, 2.0, 3.0]) >>> b = 2.0 >>> a * b @@ -162,6 +164,7 @@ Here are examples of shapes that do not broadcast:: An example of broadcasting when a 1-d array is added to a 2-d array:: + >>> import numpy as np >>> a = np.array([[ 0.0, 0.0, 0.0], ... [10.0, 10.0, 10.0], ... [20.0, 20.0, 20.0], @@ -209,6 +212,7 @@ Broadcasting provides a convenient way of taking the outer product (or any other outer operation) of two arrays. The following example shows an outer addition operation of two 1-d arrays:: + >>> import numpy as np >>> a = np.array([0.0, 10.0, 20.0, 30.0]) >>> b = np.array([1.0, 2.0, 3.0]) >>> a[:, np.newaxis] + b From 2157a4cee60dead15747eefd900f331eaeaa6cbd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Jul 2024 17:46:45 +0000 Subject: [PATCH 684/980] MAINT: Bump pypa/cibuildwheel from 2.19.1 to 2.19.2 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 2.19.1 to 2.19.2. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/932529cab190fafca8c735a551657247fa8f8eaf...7e5a838a63ac8128d71ab2dfd99e4634dd1bca09) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/free-threaded-wheels.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/free-threaded-wheels.yml b/.github/workflows/free-threaded-wheels.yml index 739e95d35117..bbcdd8dac6f2 100644 --- a/.github/workflows/free-threaded-wheels.yml +++ b/.github/workflows/free-threaded-wheels.yml @@ -142,7 +142,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@932529cab190fafca8c735a551657247fa8f8eaf # v2.19.1 + uses: pypa/cibuildwheel@7e5a838a63ac8128d71ab2dfd99e4634dd1bca09 # v2.19.2 env: CIBW_PRERELEASE_PYTHONS: True CIBW_FREE_THREADED_SUPPORT: True diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index f8a90493c6d6..353fd04ecd23 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -154,7 +154,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@932529cab190fafca8c735a551657247fa8f8eaf # v2.19.1 + uses: pypa/cibuildwheel@7e5a838a63ac8128d71ab2dfd99e4634dd1bca09 # v2.19.2 env: CIBW_PRERELEASE_PYTHONS: True CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From 30301ed484be5a22a0b2e756ea09afc32159cf38 Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Tue, 2 Jul 2024 23:18:49 +0530 Subject: [PATCH 685/980] DOC: Fix indentation for "Inexact types" example --- doc/source/reference/arrays.scalars.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst index e8588ef92b4f..c80e3f932377 100644 --- a/doc/source/reference/arrays.scalars.rst +++ b/doc/source/reference/arrays.scalars.rst @@ -189,7 +189,7 @@ Inexact types `format_float_positional` and `format_float_scientific`. This means that variables with equal binary values but whose datatypes are of - different precisions may display differently:: + different precisions may display differently: >>> import numpy as np From 25f9a99e2db092356b8392d48c92004949efdda6 Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Tue, 2 Jul 2024 23:26:31 +0530 Subject: [PATCH 686/980] DOC: Revert `default_rng` example text indentation [skip azp] [skip cirrus] --- doc/source/reference/random/index.rst | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/doc/source/reference/random/index.rst b/doc/source/reference/random/index.rst index 84940fca3171..976a03a9a449 100644 --- a/doc/source/reference/random/index.rst +++ b/doc/source/reference/random/index.rst @@ -20,20 +20,14 @@ different distributions. >>> import numpy as np >>> rng = np.random.default_rng() - - Generate one random float uniformly distributed over the range [0, 1) - + # Generate one random float uniformly distributed over the range [0, 1) >>> rng.random() #doctest: +SKIP 0.06369197489564249 # may vary - - Generate an array of 10 numbers according to a unit Gaussian distribution - + # Generate an array of 10 numbers according to a unit Gaussian distribution >>> rng.standard_normal(10) #doctest: +SKIP array([-0.31018314, -1.8922078 , -0.3628523 , -0.63526532, 0.43181166, # may vary 0.51640373, 1.25693945, 0.07779185, 0.84090247, -2.13406828]) - - Generate an array of 5 integers uniformly over the range [0, 10) - + # Generate an array of 5 integers uniformly over the range [0, 10) >>> rng.integers(low=0, high=10, size=5) #doctest: +SKIP array([8, 7, 6, 2, 0]) # may vary From c2cc9ac34fdbbadd7f7d6c540f83f211f29b3faa Mon Sep 17 00:00:00 2001 From: Anne Gunn Date: Thu, 27 Jun 2024 10:36:28 -0600 Subject: [PATCH 687/980] DOC: Change selected hardlinks to NEPs to intersphinx mappings This commit changes links to NEPs from explicit https urls to intersphinx :ref:s. Selected files are 'actual doc files' (my phrase), not release notes, change logs, or the NEPs themselves. These change are a first installment towards addressing #26707. [skip actions] [skip azp] [skip cirrus] --- doc/source/dev/development_environment.rst | 4 ++-- doc/source/dev/howto-docs.rst | 2 +- doc/source/numpy_2_0_migration_guide.rst | 2 +- doc/source/reference/array_api.rst | 8 ++++---- doc/source/reference/c-api/strings.rst | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index b1cc7d96ffe2..7af4f62c064b 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -205,7 +205,7 @@ since the linter runs as part of the CI pipeline. For more details on Style Guidelines: - `Python Style Guide`_ -- `C Style Guide`_ +- :ref:`NEP45` Rebuilding & cleaning the workspace ----------------------------------- @@ -327,7 +327,7 @@ typically packaged as ``python-dbg``) is highly recommended. .. _Waf: https://code.google.com/p/waf/ .. _`match test names using python operators`: https://docs.pytest.org/en/latest/usage.html#specifying-tests-selecting-tests .. _`Python Style Guide`: https://www.python.org/dev/peps/pep-0008/ -.. _`C Style Guide`: https://numpy.org/neps/nep-0045-c_style_guide.html +.. :ref:`NEP45` Understanding the code & getting started ---------------------------------------- diff --git a/doc/source/dev/howto-docs.rst b/doc/source/dev/howto-docs.rst index 097456fad0b4..1eea77041740 100644 --- a/doc/source/dev/howto-docs.rst +++ b/doc/source/dev/howto-docs.rst @@ -79,7 +79,7 @@ ideas and feedback. If you want to alert us to a gap, If you're looking for subjects, our formal roadmap for documentation is a *NumPy Enhancement Proposal (NEP)*, -`NEP 44 - Restructuring the NumPy Documentation `__. +:ref:`NEP44`. It identifies areas where our docs need help and lists several additions we'd like to see, including :ref:`Jupyter notebooks `. diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index 42fec6f4e4b0..1b588f012d0e 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -231,7 +231,7 @@ private. Please see the tables below for guidance on migration. For most changes this means replacing it with a backwards compatible alternative. -Please refer to `NEP 52 `_ for more details. +Please refer to :ref:`NEP52` for more details. Main namespace -------------- diff --git a/doc/source/reference/array_api.rst b/doc/source/reference/array_api.rst index 6f130efc8ca8..c11d8e5eb9d2 100644 --- a/doc/source/reference/array_api.rst +++ b/doc/source/reference/array_api.rst @@ -13,7 +13,7 @@ NumPy aims to implement support for the `2023.12 version `__ and future versions of the standard - assuming that those future versions can be upgraded to given NumPy's -`backwards compatibility policy `__. +:ref:`backwards compatibility policy `. For usage guidelines for downstream libraries and end users who want to write code that will work with both NumPy and other array libraries, we refer to the @@ -33,8 +33,8 @@ rather than anything NumPy-specific, the `array-api-strict standard, via a separate ``numpy.array_api`` submodule. This module was marked as experimental (it emitted a warning on import) and removed in NumPy 2.0 because full support was included in the main namespace. - `NEP 47 `__ and - `NEP 56 `__ + :ref:`NEP 47 ` and + :ref:`NEP 56 ` describe the motivation and scope for implementing the array API standard in NumPy. @@ -57,7 +57,7 @@ an entry point. .. rubric:: Footnotes .. [1] With a few very minor exceptions, as documented in - `NEP 56 `__. + :ref:`NEP 56 `. The ``sum``, ``prod`` and ``trace`` behavior adheres to the 2023.12 version instead, as do function signatures; the only known incompatibility that may remain is that the standard forbids unsafe casts for in-place operators diff --git a/doc/source/reference/c-api/strings.rst b/doc/source/reference/c-api/strings.rst index 43d280d14e09..2e7dc34a337f 100644 --- a/doc/source/reference/c-api/strings.rst +++ b/doc/source/reference/c-api/strings.rst @@ -6,7 +6,7 @@ NpyString API .. versionadded:: 2.0 This API allows access to the UTF-8 string data stored in NumPy StringDType -arrays. See `NEP-55 `_ for +arrays. See :ref:`NEP-55 ` for more in-depth details into the design of StringDType. Examples From a2187452e10dea2addbbb4a7d082959b2a6de98e Mon Sep 17 00:00:00 2001 From: Anne Gunn Date: Mon, 1 Jul 2024 11:43:57 -0600 Subject: [PATCH 688/980] DOC: Remove redundant reference definition As pointed out by @melissawm, the NEP45 reference is predefined and does not need to have a line in the reference definition section at the bottom of the document. This correction is the next installment towards addressing #26707. [skip actions] [skip azp] [skip cirrus] --- doc/source/dev/development_environment.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index 7af4f62c064b..2c811568d90c 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -327,7 +327,6 @@ typically packaged as ``python-dbg``) is highly recommended. .. _Waf: https://code.google.com/p/waf/ .. _`match test names using python operators`: https://docs.pytest.org/en/latest/usage.html#specifying-tests-selecting-tests .. _`Python Style Guide`: https://www.python.org/dev/peps/pep-0008/ -.. :ref:`NEP45` Understanding the code & getting started ---------------------------------------- From f87d1915bc17c25edd6123d38f4b1adb91a64b7c Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Tue, 2 Jul 2024 22:52:23 +0300 Subject: [PATCH 689/980] DOC: fix a typo Co-authored-by: Matti Picus --- doc/source/dev/development_environment.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index 45992b17d123..08eb86cf7e39 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -117,7 +117,7 @@ argument to pytest:: To run "doctests" -- to check that the code examples in the documentation is correct -- use the `check-docs` spin command. It relies on the `scipy-docs` package, which provides several additional features on top of the standard library ``doctest`` -package. Install ``scipy-doctest`` and run on of:: +package. Install ``scipy-doctest`` and run one of:: $ spin check-docs -v $ spin check-docs numpy/linalg From 4e88577e7c71b7308ac3a8588bed2c13138011b0 Mon Sep 17 00:00:00 2001 From: Jules <57632293+JuliaPoo@users.noreply.github.com> Date: Wed, 3 Jul 2024 17:05:32 +0800 Subject: [PATCH 690/980] BENCH: Missing ufunc in benchmarks (#26841) * BENCH: ufunc account for alias and missing unfunc benchmarks now raise an exception --- benchmarks/benchmarks/bench_ufunc.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index ca96d8c22775..b7f711b9c58a 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -21,13 +21,23 @@ 'power', 'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift', 'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh', - 'true_divide', 'trunc'] + 'true_divide', 'trunc', 'vecdot'] arrayfuncdisp = ['real', 'round'] +for name in ufuncs: + f = getattr(np, name, None) + if not isinstance(f, np.ufunc): + raise ValueError(f"Bench target `np.{name}` is not a ufunc") -for name in dir(np): - if isinstance(getattr(np, name, None), np.ufunc) and name not in ufuncs: - print("Missing ufunc %r" % (name,)) +all_ufuncs = (getattr(np, name, None) for name in dir(np)) +all_ufuncs = set(filter(lambda f: isinstance(f, np.ufunc), all_ufuncs)) +bench_ufuncs = set((getattr(np, name, None) for name in ufuncs)) + +missing_ufuncs = all_ufuncs - bench_ufuncs +if len(missing_ufuncs) > 0: + missing_ufunc_names = [f.__name__ for f in missing_ufuncs] + raise NotImplementedError( + "Missing benchmarks for ufuncs %r" % missing_ufunc_names) class ArrayFunctionDispatcher(Benchmark): From e8f7d3471c23be4fccebaaf3c33b0d5494800070 Mon Sep 17 00:00:00 2001 From: mattip Date: Wed, 3 Jul 2024 14:47:45 +0300 Subject: [PATCH 691/980] BUILD: clean out py2 stuff from npy_3kcompat.h --- doc/Py3K.rst | 903 ------------------ doc/source/f2py/code/var.pyf | 2 +- numpy/_core/include/numpy/npy_3kcompat.h | 291 +----- numpy/_core/src/multiarray/array_method.c | 1 + numpy/_core/src/multiarray/multiarraymodule.c | 2 +- numpy/f2py/cfuncs.py | 4 +- 6 files changed, 20 insertions(+), 1183 deletions(-) delete mode 100644 doc/Py3K.rst diff --git a/doc/Py3K.rst b/doc/Py3K.rst deleted file mode 100644 index 3f312f7ec53a..000000000000 --- a/doc/Py3K.rst +++ /dev/null @@ -1,903 +0,0 @@ -.. -*-rst-*- - -********************************************* -Developer notes on the transition to Python 3 -********************************************* - -:date: 2010-07-11 -:author: Charles R. Harris -:author: Pauli Virtanen - -General -======= - -NumPy has now been ported to Python 3. - -Some glitches may still be present; however, we are not aware of any -significant ones, the test suite passes. - - -Resources ---------- - -Information on porting to 3K: - -- https://wiki.python.org/moin/cporting -- https://wiki.python.org/moin/PortingExtensionModulesToPy3k - - -Prerequisites -------------- - -The Nose test framework has currently (Nov 2009) no released Python 3 -compatible version. Its 3K SVN branch, however, works quite well: - -- http://python-nose.googlecode.com/svn/branches/py3k - - -Known semantic changes on Py2 -============================= - -As a side effect, the Py3 adaptation has caused the following semantic -changes that are visible on Py2. - -* Objects (except bytes and str) that implement the PEP 3118 array interface - will behave as ndarrays in `array(...)` and `asarray(...)`; the same way - as if they had ``__array_interface__`` defined. - -* Otherwise, there are no known semantic changes. - - -Known semantic changes on Py3 -============================= - -The following semantic changes have been made on Py3: - -* Division: integer division is by default true_divide, also for arrays. - -* Dtype field names are Unicode. - -* Only unicode dtype field titles are included in fields dict. - -* :pep:`3118` buffer objects will behave differently from Py2 buffer objects - when used as an argument to `array(...)`, `asarray(...)`. - - In Py2, they would cast to an object array. - - In Py3, they cast similarly as objects having an - ``__array_interface__`` attribute, ie., they behave as if they were - an ndarray view on the data. - - - -Python code -=========== - - -2to3 in setup.py ----------------- - -Currently, setup.py calls 2to3 automatically to convert Python sources -to Python 3 ones, and stores the results under:: - - build/py3k - -Only changed files will be re-converted when setup.py is called a second -time, making development much faster. - -Currently, this seems to handle all of the necessary Python code -conversion. - -Not all of the 2to3 transformations are appropriate for all files. -Especially, 2to3 seems to be quite trigger-happy in replacing e.g. -``unicode`` by ``str`` which causes problems in ``defchararray.py``. -For files that need special handling, add entries to -``tools/py3tool.py``. - - - -numpy.compat.py3k ------------------ - -There are some utility functions needed for 3K compatibility in -``numpy.compat.py3k`` -- they can be imported from ``numpy.compat``: - -- bytes, unicode: bytes and unicode constructors -- asbytes: convert string to bytes (no-op on Py2) -- asbytes_nested: convert strings in lists to Bytes -- asunicode: convert string to unicode -- asunicode_nested: convert strings in lists to Unicode -- asstr: convert item to the str type -- getexception: get current exception (see below) -- isfileobj: detect Python file objects -- strchar: character for Unicode (Py3) or Strings (Py2) -- open_latin1: open file in the latin1 text mode - -More can be added as needed. - - -numpy.f2py ----------- - -F2py is ported to Py3. - - -Bytes vs. strings ------------------ - -At many points in NumPy, bytes literals are needed. These can be created via -numpy.compat.asbytes and asbytes_nested. - - -Exception syntax ----------------- - -Syntax change: "except FooException, bar:" -> "except FooException as bar:" - -This is taken care by 2to3, however. - - -Relative imports ----------------- - -The new relative import syntax, - - from . import foo - -is not available on Py2.4, so we can't simply use it. - -Using absolute imports everywhere is probably OK, if they just happen -to work. - -2to3, however, converts the old syntax to new syntax, so as long as we -use the converter, it takes care of most parts. - - -Print ------ - -The Print statement changed to a builtin function in Py3. - -Also this is taken care of by 2to3. - -``types`` module ----------------- - -The following items were removed from `types` module in Py3: - -- StringType (Py3: `bytes` is equivalent, to some degree) -- InstanceType (Py3: ???) -- IntType (Py3: no equivalent) -- LongType (Py3: equivalent `long`) -- FloatType (Py3: equivalent `float`) -- BooleanType (Py3: equivalent `bool`) -- ComplexType (Py3: equivalent `complex`) -- UnicodeType (Py3: equivalent `str`) -- BufferType (Py3: more-or-less equivalent `memoryview`) - -In ``numerictypes.py``, the "common" types were replaced by their -plain equivalents, and `IntType` was dropped. - - -numpy._core.numerictypes ------------------------ - -In numerictypes, types on Python 3 were changed so that: - -=========== ============ -Scalar type Value -=========== ============ -str_ This is the basic Unicode string type on Py3 -bytes_ This is the basic Byte-string type on Py3 -string_ bytes_ alias -unicode_ str_ alias -=========== ============ - - -numpy.loadtxt et al -------------------- - -These routines are difficult to duck-type to read both Unicode and -Bytes input. - -I assumed they are meant for reading Bytes streams -- this is probably -the far more common use case with scientific data. - - -Cyclic imports --------------- - -Python 3 is less forgiving about cyclic imports than Python 2. Cycles -need to be broken to have the same code work both on Python 2 and 3. - - -C code -====== - - -NPY_PY3K --------- - -A #define in config.h, defined when building for Py3. - -.. todo:: - - Currently, this is generated as a part of the config. - Is this sensible (we could also use Py_VERSION_HEX)? - - -private/npy_3kcompat.h ----------------------- - -Convenience macros for Python 3 support: - -- PyInt -> PyLong on Py3 -- PyString -> PyBytes on Py3 -- PyUString -> PyUnicode on Py3 and PyString on Py2 -- PyBytes on Py2 -- PyUnicode_ConcatAndDel, PyUnicode_Concat2 -- Py_SIZE et al., for older Python versions -- npy_PyFile_Dup, etc. to get FILE* from Py3 file objects -- PyObject_Cmp, convenience comparison function on Py3 -- NpyCapsule_* helpers: PyCObject - -Any new ones that need to be added should be added in this file. - -.. todo:: - - Remove PyString_* eventually -- having a call to one of these in NumPy - sources is a sign of an error... - - -ob_type, ob_size ----------------- - -These use Py_SIZE, etc. macros now. The macros are also defined in -npy_3kcompat.h for the Python versions that don't have them natively. - - -Py_TPFLAGS_CHECKTYPES ---------------------- - -Python 3 no longer supports type coercion in arithmetic. - -Py_TPFLAGS_CHECKTYPES is now on by default, and so the C-level -interface, ``nb_*`` methods, still unconditionally receive whatever -types as their two arguments. - -However, this will affect Python-level code: previously if you -inherited from a Py_TPFLAGS_CHECKTYPES enabled class that implemented -a ``__mul__`` method, the same ``__mul__`` method would still be -called also as when a ``__rmul__`` was required, but with swapped -arguments (see Python/Objects/typeobject.c:wrap_binaryfunc_r). -However, on Python 3, arguments are swapped only if both are of same -(sub-)type, and otherwise things fail. - -This means that ``ndarray``-derived subclasses must now implement all -relevant ``__r*__`` methods, since they cannot any more automatically -fall back to ndarray code. - - -PyNumberMethods ---------------- - -The structures have been converted to the new format: - -- number.c -- scalartypes.c.src -- scalarmathmodule.c.src - -The slots np_divide, np_long, np_oct, np_hex, and np_inplace_divide -have gone away. The slot np_int is what np_long used to be, tp_divide -is now tp_floor_divide, and np_inplace_divide is now -np_inplace_floor_divide. - -These have simply been #ifdef'd out on Py3. - -The Py2/Py3 compatible structure definition looks like:: - - static PyNumberMethods @name@_as_number = { - (binaryfunc)0, /*nb_add*/ - (binaryfunc)0, /*nb_subtract*/ - (binaryfunc)0, /*nb_multiply*/ - #if defined(NPY_PY3K) - #else - (binaryfunc)0, /*nb_divide*/ - #endif - (binaryfunc)0, /*nb_remainder*/ - (binaryfunc)0, /*nb_divmod*/ - (ternaryfunc)0, /*nb_power*/ - (unaryfunc)0, - (unaryfunc)0, /*nb_pos*/ - (unaryfunc)0, /*nb_abs*/ - #if defined(NPY_PY3K) - (inquiry)0, /*nb_bool*/ - #else - (inquiry)0, /*nb_nonzero*/ - #endif - (unaryfunc)0, /*nb_invert*/ - (binaryfunc)0, /*nb_lshift*/ - (binaryfunc)0, /*nb_rshift*/ - (binaryfunc)0, /*nb_and*/ - (binaryfunc)0, /*nb_xor*/ - (binaryfunc)0, /*nb_or*/ - #if defined(NPY_PY3K) - #else - 0, /*nb_coerce*/ - #endif - (unaryfunc)0, /*nb_int*/ - #if defined(NPY_PY3K) - (unaryfunc)0, /*nb_reserved*/ - #else - (unaryfunc)0, /*nb_long*/ - #endif - (unaryfunc)0, /*nb_float*/ - #if defined(NPY_PY3K) - #else - (unaryfunc)0, /*nb_oct*/ - (unaryfunc)0, /*nb_hex*/ - #endif - 0, /*inplace_add*/ - 0, /*inplace_subtract*/ - 0, /*inplace_multiply*/ - #if defined(NPY_PY3K) - #else - 0, /*inplace_divide*/ - #endif - 0, /*inplace_remainder*/ - 0, /*inplace_power*/ - 0, /*inplace_lshift*/ - 0, /*inplace_rshift*/ - 0, /*inplace_and*/ - 0, /*inplace_xor*/ - 0, /*inplace_or*/ - (binaryfunc)0, /*nb_floor_divide*/ - (binaryfunc)0, /*nb_true_divide*/ - 0, /*nb_inplace_floor_divide*/ - 0, /*nb_inplace_true_divide*/ - (unaryfunc)NULL, /*nb_index*/ - }; - - - -PyBuffer (provider) -------------------- - -PyBuffer usage is widely spread in multiarray: - -1) The void scalar makes use of buffers -2) Multiarray has methods for creating buffers etc. explicitly -3) Arrays can be created from buffers etc. -4) The .data attribute of an array is a buffer - -Py3 introduces the PEP 3118 buffer protocol as the *only* protocol, -so we must implement it. - -The exporter parts of the PEP 3118 buffer protocol are currently -implemented in ``buffer.c`` for arrays, and in ``scalartypes.c.src`` -for generic array scalars. The generic array scalar exporter, however, -doesn't currently produce format strings, which needs to be fixed. - -Also some code also stops working when ``bf_releasebuffer`` is -defined. Most importantly, ``PyArg_ParseTuple("s#", ...)`` refuses to -return a buffer if ``bf_releasebuffer`` is present. For this reason, -the buffer interface for arrays is implemented currently *without* -defining ``bf_releasebuffer`` at all. This forces us to go through -some additional work. - -There are a couple of places that need further attention: - -- VOID_getitem - - In some cases, this returns a buffer object on Python 2. On Python 3, - there is no stand-alone buffer object, so we return a byte array instead. - -The Py2/Py3 compatible PyBufferMethods definition looks like:: - - NPY_NO_EXPORT PyBufferProcs array_as_buffer = { - #if !defined(NPY_PY3K) - #if PY_VERSION_HEX >= 0x02050000 - (readbufferproc)array_getreadbuf, /*bf_getreadbuffer*/ - (writebufferproc)array_getwritebuf, /*bf_getwritebuffer*/ - (segcountproc)array_getsegcount, /*bf_getsegcount*/ - (charbufferproc)array_getcharbuf, /*bf_getcharbuffer*/ - #else - (getreadbufferproc)array_getreadbuf, /*bf_getreadbuffer*/ - (getwritebufferproc)array_getwritebuf, /*bf_getwritebuffer*/ - (getsegcountproc)array_getsegcount, /*bf_getsegcount*/ - (getcharbufferproc)array_getcharbuf, /*bf_getcharbuffer*/ - #endif - #endif - #if PY_VERSION_HEX >= 0x02060000 - (getbufferproc)array_getbuffer, /*bf_getbuffer*/ - (releasebufferproc)array_releasebuffer, /*bf_releasebuffer*/ - #endif - }; - -.. todo:: - - Produce PEP 3118 format strings for array scalar objects. - -.. todo:: - - There's stuff to clean up in numarray/_capi.c - - -PyBuffer (consumer) -------------------- - -There are two places in which we may want to be able to consume buffer -objects and cast them to ndarrays: - -1) `multiarray.frombuffer`, ie., ``PyArray_FromAny`` - - The frombuffer returns only arrays of a fixed dtype. It does not - make sense to support PEP 3118 at this location, since not much - would be gained from that -- the backward compatibility functions - using the old array interface still work. - - So no changes needed here. - -2) `multiarray.array`, ie., ``PyArray_FromAny`` - - In general, we would like to handle :pep:`3118` buffers in the same way - as ``__array_interface__`` objects. Hence, we want to be able to cast - them to arrays already in ``PyArray_FromAny``. - - Hence, ``PyArray_FromAny`` needs additions. - -There are a few caveats in allowing :pep:`3118` buffers in -``PyArray_FromAny``: - -a) `bytes` (and `str` on Py2) objects offer a buffer interface that - specifies them as 1-D array of bytes. - - Previously ``PyArray_FromAny`` has cast these to 'S#' dtypes. We - don't want to change this, since will cause problems in many places. - - We do, however, want to allow other objects that provide 1-D byte arrays - to be cast to 1-D ndarrays and not 'S#' arrays -- for instance, 'S#' - arrays tend to strip trailing NUL characters. - -So what is done in ``PyArray_FromAny`` currently is that: - -- Presence of :pep:`3118` buffer interface is checked before checking - for array interface. If it is present *and* the object is not - `bytes` object, then it is used for creating a view on the buffer. - -- We also check in ``discover_depth`` and ``_array_find_type`` for the - 3118 buffers, so that:: - - array([some_3118_object]) - - will treat the object similarly as it would handle an `ndarray`. - - However, again, bytes (and unicode) have priority and will not be - handled as buffer objects. - -This amounts to possible semantic changes: - -- ``array(buffer)`` will no longer create an object array - ``array([buffer], dtype='O')``, but will instead expand to a view - on the buffer. - -.. todo:: - - Take a second look at places that used PyBuffer_FromMemory and - PyBuffer_FromReadWriteMemory -- what can be done with these? - -.. todo:: - - There's some buffer code in numarray/_capi.c that needs to be addressed. - - -PyBuffer (object) ------------------ - -Since there is a native buffer object in Py3, the `memoryview`, the -`newbuffer` and `getbuffer` functions are removed from `multiarray` in -Py3: their functionality is taken over by the new `memoryview` object. - - -PyString --------- - -There is no PyString in Py3, everything is either Bytes or Unicode. -Unicode is also preferred in many places, e.g., in __dict__. - -There are two issues related to the str/bytes change: - -1) Return values etc. should prefer unicode -2) The 'S' dtype - -This entry discusses return values etc. only, the 'S' dtype is a -separate topic. - -All uses of PyString in NumPy should be changed to one of - -- PyBytes: one-byte character strings in Py2 and Py3 -- PyUString (defined in npy_3kconfig.h): PyString in Py2, PyUnicode in Py3 -- PyUnicode: UCS in Py2 and Py3 - -In many cases the conversion only entails replacing PyString with -PyUString. - -PyString is currently defined to PyBytes in npy_3kcompat.h, for making -things to build. This definition will be removed when Py3 support is -finished. - -Where ``*_AsStringAndSize`` is used, more care needs to be taken, as -encoding Unicode to Bytes may needed. If this cannot be avoided, the -encoding should be ASCII, unless there is a very strong reason to do -otherwise. Especially, I don't believe we should silently fall back to -UTF-8 -- raising an exception may be a better choice. - -Exceptions should use PyUnicode_AsUnicodeEscape -- this should result -to an ASCII-clean string that is appropriate for the exception -message. - -Some specific decisions that have been made so far: - -* descriptor.c: dtype field names are UString - - At some places in NumPy code, there are some guards for Unicode field - names. However, the dtype constructor accepts only strings as field names, - so we should assume field names are *always* UString. - -* descriptor.c: field titles can be arbitrary objects. - If they are UString (or, on Py2, Bytes or Unicode), insert to fields dict. - -* descriptor.c: dtype strings are Unicode. - -* descriptor.c: datetime tuple contains Bytes only. - -* repr() and str() should return UString - -* comparison between Unicode and Bytes is not defined in Py3 - -* Type codes in numerictypes.typeInfo dict are Unicode - -* Func name in errobj is Bytes (should be forced to ASCII) - -.. todo:: - - tp_doc -- it's a char* pointer, but what is the encoding? - Check esp. lib/src/_compiled_base - - Currently, UTF-8 is assumed. - -.. todo:: - - ufunc names -- again, what's the encoding? - -.. todo:: - - Cleanup to do later on: Replace all occurrences of PyString by - PyBytes, PyUnicode, or PyUString. - -.. todo:: - - Revise errobj decision? - -.. todo:: - - Check that non-UString field names are not accepted anywhere. - - -PyUnicode ---------- - -PyUnicode in Py3 is pretty much as it was in Py2, except that it is -now the only "real" string type. - -In Py3, Unicode and Bytes are not comparable, ie., 'a' != b'a'. NumPy -comparison routines were handled to act in the same way, leaving -comparison between Unicode and Bytes undefined. - -.. todo:: - - Check that indeed all comparison routines were changed. - - -Fate of the 'S' dtype ---------------------- - -On Python 3, the 'S' dtype will still be Bytes. - -However,:: - - str, str_ == unicode_ - - -PyInt ------ - -There is no limited-range integer type any more in Py3. It makes no -sense to inherit NumPy ints from Py3 ints. - -Currently, the following is done: - -1) NumPy's integer types no longer inherit from Python integer. -2) int is taken dtype-equivalent to NPY_LONG -3) ints are converted to NPY_LONG - -PyInt methods are currently replaced by PyLong, via macros in npy_3kcompat.h. - -Dtype decision rules were changed accordingly, so that NumPy understands -Py3 int translate to NPY_LONG as far as dtypes are concerned. - -array([1]).dtype will be the default NPY_LONG integer. - -.. todo:: - - Not inheriting from `int` on Python 3 makes the following not work: - ``np.intp("0xff", 16)`` -- because the NumPy type does not take - the second argument. This could perhaps be fixed... - - -Divide ------- - -The Divide operation is no more. - -Calls to PyNumber_Divide were replaced by FloorDivide or TrueDivide, -as appropriate. - -The PyNumberMethods entry is #ifdef'd out on Py3, see above. - - -tp_compare, PyObject_Compare ----------------------------- - -The compare method has vanished, and is replaced with richcompare. -We just #ifdef the compare methods out on Py3. - -New richcompare methods were implemented for: - -* flagsobject.c - -On the consumer side, we have a convenience wrapper in npy_3kcompat.h -providing PyObject_Cmp also on Py3. - - -Pickling --------- - -The ndarray and dtype __setstate__ were modified to be -backward-compatible with Py3: they need to accept a Unicode endian -character, and Unicode data since that's what Py2 str is unpickled to -in Py3. - -An encoding assumption is required for backward compatibility: the user -must do - - loads(f, encoding='latin1') - -to successfully read pickles created by Py2. - -.. todo:: - - Forward compatibility? Is it even possible? - For sure, we are not knowingly going to store data in PyUnicode, - so probably the only way for forward compatibility is to implement - a custom Unpickler for Py2? - -.. todo:: - - If forward compatibility is not possible, aim to store also the endian - character as Bytes... - - -Module initialization ---------------------- - -The module initialization API changed in Python 3.1. - -Most NumPy modules are now converted. - - -PyTypeObject ------------- - -The PyTypeObject of py3k is binary compatible with the py2k version and the -old initializers should work. However, there are several considerations to -keep in mind. - -1) Because the first three slots are now part of a struct some compilers issue - warnings if they are initialized in the old way. - -2) The compare slot has been made reserved in order to preserve binary - compatibility while the tp_compare function went away. The tp_richcompare - function has replaced it and we need to use that slot instead. This will - likely require modifications in the searchsorted functions and generic sorts - that currently use the compare function. - -3) The previous numpy practice of initializing the COUNT_ALLOCS slots was - bogus. They are not supposed to be explicitly initialized and were out of - place in any case because an extra base slot was added in python 2.6. - -Because of these facts it is better to use #ifdefs to bring the old -initializers up to py3k snuff rather than just fill the tp_richcompare -slot. They also serve to mark the places where changes have been -made. Note that explicit initialization can stop once none of the -remaining entries are non-zero, because zero is the default value that -variables with non-local linkage receive. - -The Py2/Py3 compatible TypeObject definition looks like:: - - NPY_NO_EXPORT PyTypeObject Foo_Type = { - #if defined(NPY_PY3K) - PyVarObject_HEAD_INIT(0,0) - #else - PyObject_HEAD_INIT(0) - 0, /* ob_size */ - #endif - "numpy.foo" /* tp_name */ - 0, /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - #if defined(NPY_PY3K) - (void *)0, /* tp_reserved */ - #else - 0, /* tp_compare */ - #endif - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ - 0 /* tp_version_tag (2.6) */ - }; - - - -PySequenceMethods ------------------ - -Types with tp_as_sequence defined - -* multiarray/descriptor.c -* multiarray/scalartypes.c.src -* multiarray/arrayobject.c - -PySequenceMethods in py3k are binary compatible with py2k, but some of the -slots have gone away. I suspect this means some functions need redefining so -the semantics of the slots needs to be checked:: - - PySequenceMethods foo_sequence_methods = { - (lenfunc)0, /* sq_length */ - (binaryfunc)0, /* sq_concat */ - (ssizeargfunc)0, /* sq_repeat */ - (ssizeargfunc)0, /* sq_item */ - (void *)0, /* nee sq_slice */ - (ssizeobjargproc)0, /* sq_ass_item */ - (void *)0, /* nee sq_ass_slice */ - (objobjproc)0, /* sq_contains */ - (binaryfunc)0, /* sq_inplace_concat */ - (ssizeargfunc)0 /* sq_inplace_repeat */ - }; - - -PyMappingMethods ----------------- - -Types with tp_as_mapping defined - -* multiarray/descriptor.c -* multiarray/iterators.c -* multiarray/scalartypes.c.src -* multiarray/flagsobject.c -* multiarray/arrayobject.c - -PyMappingMethods in py3k look to be the same as in py2k. The semantics -of the slots needs to be checked:: - - PyMappingMethods foo_mapping_methods = { - (lenfunc)0, /* mp_length */ - (binaryfunc)0, /* mp_subscript */ - (objobjargproc)0 /* mp_ass_subscript */ - }; - - -PyFile ------- - -Many of the PyFile items have disappeared: - -1) PyFile_Type -2) PyFile_AsFile -3) PyFile_FromString - -Most importantly, in Py3 there is no way to extract a FILE* pointer -from the Python file object. There are, however, new PyFile_* functions -for writing and reading data from the file. - -Compatibility wrappers that return a dup-ed `fdopen` file pointer are -in private/npy_3kcompat.h. This causes more flushing to be necessary, -but it appears there is no alternative solution. The FILE pointer so -obtained must be closed with fclose after use. - -.. todo:: - - Should probably be done much later on... - - Adapt all NumPy I/O to use the PyFile_* methods or the low-level - IO routines. In any case, it's unlikely that C stdio can be used any more. - - Perhaps using PyFile_* makes numpy.tofile e.g. to a gzip to work? - - -READONLY --------- - -The RO alias for READONLY is no more. - -These were replaced, as READONLY is present also on Py2. - - -PyOS ----- - -Deprecations: - -1) PyOS_ascii_strtod -> PyOS_double_from_string; - curiously enough, PyOS_ascii_strtod is not only deprecated but also - causes segfaults - - -PyInstance ----------- - -There are some checks for PyInstance in ``common.c`` and ``ctors.c``. - -Currently, ``PyInstance_Check`` is just #ifdef'd out for Py3. This is, -possibly, not the correct thing to do. - -.. todo:: - - Do the right thing for PyInstance checks. - - -PyCObject / PyCapsule ---------------------- - -The PyCObject API is removed in Python 3.2, so we need to rewrite it -using PyCapsule. - -NumPy was changed to use the Capsule API, using NpyCapsule* wrappers. diff --git a/doc/source/f2py/code/var.pyf b/doc/source/f2py/code/var.pyf index 8275ff3afe21..b7c080682a62 100644 --- a/doc/source/f2py/code/var.pyf +++ b/doc/source/f2py/code/var.pyf @@ -5,7 +5,7 @@ python module var ''' interface usercode ''' - PyDict_SetItemString(d,"BAR",PyInt_FromLong(BAR)); + PyDict_SetItemString(d,"BAR",PyLong_FromLong(BAR)); ''' end interface end python module diff --git a/numpy/_core/include/numpy/npy_3kcompat.h b/numpy/_core/include/numpy/npy_3kcompat.h index 62fde943aacc..8688393beb57 100644 --- a/numpy/_core/include/numpy/npy_3kcompat.h +++ b/numpy/_core/include/numpy/npy_3kcompat.h @@ -5,8 +5,7 @@ * hence the "3k" naming. * * If you want to use this for your own projects, it's recommended to make a - * copy of it. Although the stuff below is unlikely to change, we don't provide - * strong backwards compatibility guarantees at the moment. + * copy of it. We don't provide backwards compatibility guarantees. */ #ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ @@ -15,158 +14,12 @@ #include #include -#ifndef NPY_PY3K -#define NPY_PY3K 1 -#endif - -#include "numpy/npy_common.h" -#include "numpy/ndarrayobject.h" +#include "npy_common.h" #ifdef __cplusplus extern "C" { #endif -/* - * PyInt -> PyLong - */ - - -/* - * This is a renamed copy of the Python non-limited API function _PyLong_AsInt. It is - * included here because it is missing from the PyPy API. It completes the PyLong_As* - * group of functions and can be useful in replacing PyInt_Check. - */ -static inline int -Npy__PyLong_AsInt(PyObject *obj) -{ - int overflow; - long result = PyLong_AsLongAndOverflow(obj, &overflow); - - /* INT_MAX and INT_MIN are defined in Python.h */ - if (overflow || result > INT_MAX || result < INT_MIN) { - /* XXX: could be cute and give a different - message for overflow == -1 */ - PyErr_SetString(PyExc_OverflowError, - "Python int too large to convert to C int"); - return -1; - } - return (int)result; -} - - -#if defined(NPY_PY3K) -/* Return True only if the long fits in a C long */ -static inline int PyInt_Check(PyObject *op) { - int overflow = 0; - if (!PyLong_Check(op)) { - return 0; - } - PyLong_AsLongAndOverflow(op, &overflow); - return (overflow == 0); -} - - -#define PyInt_FromLong PyLong_FromLong -#define PyInt_AsLong PyLong_AsLong -#define PyInt_AS_LONG PyLong_AsLong -#define PyInt_AsSsize_t PyLong_AsSsize_t -#define PyNumber_Int PyNumber_Long - -/* NOTE: - * - * Since the PyLong type is very different from the fixed-range PyInt, - * we don't define PyInt_Type -> PyLong_Type. - */ -#endif /* NPY_PY3K */ - -/* Py3 changes PySlice_GetIndicesEx' first argument's type to PyObject* */ -#ifdef NPY_PY3K -# define NpySlice_GetIndicesEx PySlice_GetIndicesEx -#else -# define NpySlice_GetIndicesEx(op, nop, start, end, step, slicelength) \ - PySlice_GetIndicesEx((PySliceObject *)op, nop, start, end, step, slicelength) -#endif - -#if PY_VERSION_HEX < 0x030900a4 - /* Introduced in https://github.com/python/cpython/commit/d2ec81a8c99796b51fb8c49b77a7fe369863226f */ - #define Py_SET_TYPE(obj, type) ((Py_TYPE(obj) = (type)), (void)0) - /* Introduced in https://github.com/python/cpython/commit/b10dc3e7a11fcdb97e285882eba6da92594f90f9 */ - #define Py_SET_SIZE(obj, size) ((Py_SIZE(obj) = (size)), (void)0) - /* Introduced in https://github.com/python/cpython/commit/c86a11221df7e37da389f9c6ce6e47ea22dc44ff */ - #define Py_SET_REFCNT(obj, refcnt) ((Py_REFCNT(obj) = (refcnt)), (void)0) -#endif - - -#define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x) - -/* - * PyString -> PyBytes - */ - -#if defined(NPY_PY3K) - -#define PyString_Type PyBytes_Type -#define PyString_Check PyBytes_Check -#define PyStringObject PyBytesObject -#define PyString_FromString PyBytes_FromString -#define PyString_FromStringAndSize PyBytes_FromStringAndSize -#define PyString_AS_STRING PyBytes_AS_STRING -#define PyString_AsStringAndSize PyBytes_AsStringAndSize -#define PyString_FromFormat PyBytes_FromFormat -#define PyString_Concat PyBytes_Concat -#define PyString_ConcatAndDel PyBytes_ConcatAndDel -#define PyString_AsString PyBytes_AsString -#define PyString_GET_SIZE PyBytes_GET_SIZE -#define PyString_Size PyBytes_Size - -#define PyUString_Type PyUnicode_Type -#define PyUString_Check PyUnicode_Check -#define PyUStringObject PyUnicodeObject -#define PyUString_FromString PyUnicode_FromString -#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize -#define PyUString_FromFormat PyUnicode_FromFormat -#define PyUString_Concat PyUnicode_Concat2 -#define PyUString_ConcatAndDel PyUnicode_ConcatAndDel -#define PyUString_GET_SIZE PyUnicode_GET_SIZE -#define PyUString_Size PyUnicode_Size -#define PyUString_InternFromString PyUnicode_InternFromString -#define PyUString_Format PyUnicode_Format - -#define PyBaseString_Check(obj) (PyUnicode_Check(obj)) - -#else - -#define PyBytes_Type PyString_Type -#define PyBytes_Check PyString_Check -#define PyBytesObject PyStringObject -#define PyBytes_FromString PyString_FromString -#define PyBytes_FromStringAndSize PyString_FromStringAndSize -#define PyBytes_AS_STRING PyString_AS_STRING -#define PyBytes_AsStringAndSize PyString_AsStringAndSize -#define PyBytes_FromFormat PyString_FromFormat -#define PyBytes_Concat PyString_Concat -#define PyBytes_ConcatAndDel PyString_ConcatAndDel -#define PyBytes_AsString PyString_AsString -#define PyBytes_GET_SIZE PyString_GET_SIZE -#define PyBytes_Size PyString_Size - -#define PyUString_Type PyString_Type -#define PyUString_Check PyString_Check -#define PyUStringObject PyStringObject -#define PyUString_FromString PyString_FromString -#define PyUString_FromStringAndSize PyString_FromStringAndSize -#define PyUString_FromFormat PyString_FromFormat -#define PyUString_Concat PyString_Concat -#define PyUString_ConcatAndDel PyString_ConcatAndDel -#define PyUString_GET_SIZE PyString_GET_SIZE -#define PyUString_Size PyString_Size -#define PyUString_InternFromString PyString_InternFromString -#define PyUString_Format PyString_Format - -#define PyBaseString_Check(obj) (PyBytes_Check(obj) || PyUnicode_Check(obj)) - -#endif /* NPY_PY3K */ - /* * Macros to protect CRT calls against instant termination when passed an * invalid parameter (https://bugs.python.org/issue23524). @@ -188,19 +41,6 @@ extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler; #endif /* _MSC_VER >= 1900 */ -static inline void -PyUnicode_ConcatAndDel(PyObject **left, PyObject *right) -{ - Py_SETREF(*left, PyUnicode_Concat(*left, right)); - Py_DECREF(right); -} - -static inline void -PyUnicode_Concat2(PyObject **left, PyObject *right) -{ - Py_SETREF(*left, PyUnicode_Concat(*left, right)); -} - /* * PyFile_* compatibility */ @@ -217,13 +57,6 @@ npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos) npy_off_t pos; FILE *handle; - /* For Python 2 PyFileObject, use PyFile_AsFile */ -#if !defined(NPY_PY3K) - if (PyFile_Check(file)) { - return PyFile_AsFile(file); - } -#endif - /* Flush first to ensure things end up in the file in the correct order */ ret = PyObject_CallMethod(file, "flush", ""); if (ret == NULL) { @@ -335,13 +168,6 @@ npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos) PyObject *ret, *io, *io_raw; npy_off_t position; - /* For Python 2 PyFileObject, do nothing */ -#if !defined(NPY_PY3K) - if (PyFile_Check(file)) { - return 0; - } -#endif - position = npy_ftell(handle); /* Close the FILE* handle */ @@ -395,24 +221,6 @@ npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos) return 0; } -static inline int -npy_PyFile_Check(PyObject *file) -{ - int fd; - /* For Python 2, check if it is a PyFileObject */ -#if !defined(NPY_PY3K) - if (PyFile_Check(file)) { - return 1; - } -#endif - fd = PyObject_AsFileDescriptor(file); - if (fd == -1) { - PyErr_Clear(); - return 0; - } - return 1; -} - static inline PyObject* npy_PyFile_OpenFile(PyObject *filename, const char *mode) { @@ -438,39 +246,11 @@ npy_PyFile_CloseFile(PyObject *file) } -/* This is a copy of _PyErr_ChainExceptions - */ -static inline void -npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb) -{ - if (exc == NULL) - return; - - if (PyErr_Occurred()) { - /* only py3 supports this anyway */ - #ifdef NPY_PY3K - PyObject *exc2, *val2, *tb2; - PyErr_Fetch(&exc2, &val2, &tb2); - PyErr_NormalizeException(&exc, &val, &tb); - if (tb != NULL) { - PyException_SetTraceback(val, tb); - Py_DECREF(tb); - } - Py_DECREF(exc); - PyErr_NormalizeException(&exc2, &val2, &tb2); - PyException_SetContext(val2, val); - PyErr_Restore(exc2, val2, tb2); - #endif - } - else { - PyErr_Restore(exc, val, tb); - } -} +#define npy_PyErr_ChainExceptions _PyErr_ChainExceptions /* This is a copy of _PyErr_ChainExceptions, with: - * - a minimal implementation for python 2 - * - __cause__ used instead of __context__ + * __cause__ used instead of __context__ */ static inline void npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb) @@ -479,64 +259,23 @@ npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb) return; if (PyErr_Occurred()) { - /* only py3 supports this anyway */ - #ifdef NPY_PY3K - PyObject *exc2, *val2, *tb2; - PyErr_Fetch(&exc2, &val2, &tb2); - PyErr_NormalizeException(&exc, &val, &tb); - if (tb != NULL) { - PyException_SetTraceback(val, tb); - Py_DECREF(tb); - } - Py_DECREF(exc); - PyErr_NormalizeException(&exc2, &val2, &tb2); - PyException_SetCause(val2, val); - PyErr_Restore(exc2, val2, tb2); - #endif + PyObject *exc2, *val2, *tb2; + PyErr_Fetch(&exc2, &val2, &tb2); + PyErr_NormalizeException(&exc, &val, &tb); + if (tb != NULL) { + PyException_SetTraceback(val, tb); + Py_DECREF(tb); + } + Py_DECREF(exc); + PyErr_NormalizeException(&exc2, &val2, &tb2); + PyException_SetCause(val2, val); + PyErr_Restore(exc2, val2, tb2); } else { PyErr_Restore(exc, val, tb); } } -/* - * PyObject_Cmp - */ -#if defined(NPY_PY3K) -static inline int -PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp) -{ - int v; - v = PyObject_RichCompareBool(i1, i2, Py_LT); - if (v == 1) { - *cmp = -1; - return 1; - } - else if (v == -1) { - return -1; - } - - v = PyObject_RichCompareBool(i1, i2, Py_GT); - if (v == 1) { - *cmp = 1; - return 1; - } - else if (v == -1) { - return -1; - } - - v = PyObject_RichCompareBool(i1, i2, Py_EQ); - if (v == 1) { - *cmp = 0; - return 1; - } - else { - *cmp = 0; - return -1; - } -} -#endif - /* * PyCObject functions adapted to PyCapsules. * diff --git a/numpy/_core/src/multiarray/array_method.c b/numpy/_core/src/multiarray/array_method.c index ac8a73aea005..f09e560b0607 100644 --- a/numpy/_core/src/multiarray/array_method.c +++ b/numpy/_core/src/multiarray/array_method.c @@ -31,6 +31,7 @@ #define _MULTIARRAYMODULE #include +#include #include "arrayobject.h" #include "array_coercion.h" #include "array_method.h" diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index c099b63f4633..e8bc75c1e359 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4702,7 +4702,7 @@ setup_scalartypes(PyObject *NPY_UNUSED(dict)) DUAL_INHERIT(CDouble, Complex, ComplexFloating); SINGLE_INHERIT(CLongDouble, ComplexFloating); - DUAL_INHERIT2(String, String, Character); + DUAL_INHERIT2(String, Bytes, Character); DUAL_INHERIT2(Unicode, Unicode, Character); SINGLE_INHERIT(Void, Flexible); diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index 4328a6e5004c..4f966e58e6af 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -878,13 +878,13 @@ PyObject* tmp = NULL; if (PyLong_Check(obj)) { - *v = Npy__PyLong_AsInt(obj); + *v = _PyLong_AsInt(obj); return !(*v == -1 && PyErr_Occurred()); } tmp = PyNumber_Long(obj); if (tmp) { - *v = Npy__PyLong_AsInt(tmp); + *v = _PyLong_AsInt(tmp); Py_DECREF(tmp); return !(*v == -1 && PyErr_Occurred()); } From 1cc89e76cfbe8887727639d906b1339d87ca79a3 Mon Sep 17 00:00:00 2001 From: mattip Date: Wed, 3 Jul 2024 15:00:40 +0300 Subject: [PATCH 692/980] DOC: add a release note --- doc/release/upcoming_changes/26842.c_api.rst | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 doc/release/upcoming_changes/26842.c_api.rst diff --git a/doc/release/upcoming_changes/26842.c_api.rst b/doc/release/upcoming_changes/26842.c_api.rst new file mode 100644 index 000000000000..7e50dd385006 --- /dev/null +++ b/doc/release/upcoming_changes/26842.c_api.rst @@ -0,0 +1,5 @@ +Many shims removed from npy_3kcompat.h +-------------------------------------- +Many of the old shims and helper functions were removed from +``npy_3kcompat.h``. If you find yourself in need of these, vendor the previous +version of the file into your codebase. From 64cffe16c5072fff7ccc19c3d9d7ba4d80de6db4 Mon Sep 17 00:00:00 2001 From: mattip Date: Wed, 3 Jul 2024 15:35:55 +0300 Subject: [PATCH 693/980] MAINT: restore npy_PyErr_ChainExceptions --- numpy/_core/include/numpy/npy_3kcompat.h | 27 ++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/numpy/_core/include/numpy/npy_3kcompat.h b/numpy/_core/include/numpy/npy_3kcompat.h index 8688393beb57..4eb4f2d88d26 100644 --- a/numpy/_core/include/numpy/npy_3kcompat.h +++ b/numpy/_core/include/numpy/npy_3kcompat.h @@ -245,9 +245,32 @@ npy_PyFile_CloseFile(PyObject *file) return 0; } +/* This is a copy of _PyErr_ChainExceptions, which + * is no longer exported from Python3.12 + */ +static inline void +npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb) +{ + if (exc == NULL) + return; -#define npy_PyErr_ChainExceptions _PyErr_ChainExceptions - + if (PyErr_Occurred()) { + PyObject *exc2, *val2, *tb2; + PyErr_Fetch(&exc2, &val2, &tb2); + PyErr_NormalizeException(&exc, &val, &tb); + if (tb != NULL) { + PyException_SetTraceback(val, tb); + Py_DECREF(tb); + } + Py_DECREF(exc); + PyErr_NormalizeException(&exc2, &val2, &tb2); + PyException_SetContext(val2, val); + PyErr_Restore(exc2, val2, tb2); + } + else { + PyErr_Restore(exc, val, tb); + } +} /* This is a copy of _PyErr_ChainExceptions, with: * __cause__ used instead of __context__ From 5954b85950638a122bdc0662720627e1a35937e5 Mon Sep 17 00:00:00 2001 From: mattip Date: Wed, 3 Jul 2024 17:07:15 +0300 Subject: [PATCH 694/980] MAINT: restore Npy__PyLong_AsInt --- numpy/_core/include/numpy/npy_3kcompat.h | 27 +++++++++++++++++++----- numpy/f2py/cfuncs.py | 4 ++-- 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/numpy/_core/include/numpy/npy_3kcompat.h b/numpy/_core/include/numpy/npy_3kcompat.h index 4eb4f2d88d26..c2bf74faf09d 100644 --- a/numpy/_core/include/numpy/npy_3kcompat.h +++ b/numpy/_core/include/numpy/npy_3kcompat.h @@ -20,14 +20,32 @@ extern "C" { #endif -/* - * Macros to protect CRT calls against instant termination when passed an - * invalid parameter (https://bugs.python.org/issue23524). - */ +/* Python13 removes _PyLong_AsInt */ +static inline int +Npy__PyLong_AsInt(PyObject *obj) +{ + int overflow; + long result = PyLong_AsLongAndOverflow(obj, &overflow); + + /* INT_MAX and INT_MIN are defined in Python.h */ + if (overflow || result > INT_MAX || result < INT_MIN) { + /* XXX: could be cute and give a different + message for overflow == -1 */ + PyErr_SetString(PyExc_OverflowError, + "Python int too large to convert to C int"); + return -1; + } + return (int)result; +} + #if defined _MSC_VER && _MSC_VER >= 1900 #include +/* + * Macros to protect CRT calls against instant termination when passed an + * invalid parameter (https://bugs.python.org/issue23524). + */ extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler; #define NPY_BEGIN_SUPPRESS_IPH { _invalid_parameter_handler _Py_old_handler = \ _set_thread_local_invalid_parameter_handler(_Py_silent_invalid_parameter_handler); @@ -40,7 +58,6 @@ extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler; #endif /* _MSC_VER >= 1900 */ - /* * PyFile_* compatibility */ diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index 4f966e58e6af..4328a6e5004c 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -878,13 +878,13 @@ PyObject* tmp = NULL; if (PyLong_Check(obj)) { - *v = _PyLong_AsInt(obj); + *v = Npy__PyLong_AsInt(obj); return !(*v == -1 && PyErr_Occurred()); } tmp = PyNumber_Long(obj); if (tmp) { - *v = _PyLong_AsInt(tmp); + *v = Npy__PyLong_AsInt(tmp); Py_DECREF(tmp); return !(*v == -1 && PyErr_Occurred()); } From 0acdad6c8b7808e0cc6ce19523e2150b3fb72b27 Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Wed, 3 Jul 2024 10:38:52 -0600 Subject: [PATCH 695/980] BUG: fancy indexing copy (#26558) * BUG: fancy indexing copy * Fixes gh-26542 * For these very simple advanced indexing cases, if the `result` and `self` arrays share the same data pointers, use a copy of `result` to assign values to `self`, otherwise the outcome of the fancy indexing suffers from mutation of `result` before the assignments are complete. * MAINT, BUG: PR 26558 revisions * Avoid leaking the old `tmp_arr`, based on reviewer feedback. * Add a test for a similar 2D case that fails, then hoist `solve_may_share_memory()` check farther up in the control flow such that the test passes. * Add a reviewer-requested test for index overlap. * BUG: PR 26558 revisions * The usage of `solve_may_share_memory` in the above PR wasn't quite right since it ignored the case of failing to solve the overlap problem. This has been revised according to reviewer feedback. Co-authored-by: Sebastian Berg --------- Co-authored-by: Sebastian Berg --- numpy/_core/src/multiarray/mapping.c | 4 ++++ numpy/_core/tests/test_indexing.py | 22 ++++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 1861241a040e..e329a7a6758c 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -1960,6 +1960,10 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) tmp_arr = (PyArrayObject *)op; } + if (tmp_arr && solve_may_share_memory(self, tmp_arr, 1) != 0) { + Py_SETREF(tmp_arr, (PyArrayObject *)PyArray_NewCopy(tmp_arr, NPY_ANYORDER)); + } + /* * Special case for very simple 1-d fancy indexing, which however * is quite common. This saves not only a lot of setup time in the diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index bea1c1017fb2..9611f75221d2 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -133,6 +133,28 @@ def test_empty_fancy_index(self): b = np.array([]) assert_raises(IndexError, a.__getitem__, b) + def test_gh_26542(self): + a = np.array([0, 1, 2]) + idx = np.array([2, 1, 0]) + a[idx] = a + expected = np.array([2, 1, 0]) + assert_equal(a, expected) + + def test_gh_26542_2d(self): + a = np.array([[0, 1, 2]]) + idx_row = np.zeros(3, dtype=int) + idx_col = np.array([2, 1, 0]) + a[idx_row, idx_col] = a + expected = np.array([[2, 1, 0]]) + assert_equal(a, expected) + + def test_gh_26542_index_overlap(self): + arr = np.arange(100) + expected_vals = np.copy(arr[:-10]) + arr[10:] = arr[:-10] + actual_vals = arr[10:] + assert_equal(actual_vals, expected_vals) + def test_ellipsis_index(self): a = np.array([[1, 2, 3], [4, 5, 6], From 6c915672c772b4753641572add2d846cbd7f0d67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Thu, 20 Jun 2024 16:37:51 +0200 Subject: [PATCH 696/980] ENH: Support integer & bool dtype inputs in rounding functions. --- doc/release/upcoming_changes/26766.change.rst | 2 ++ numpy/_core/code_generators/generate_umath.py | 3 +++ numpy/_core/src/umath/loops.h.src | 8 +++++++- numpy/_core/src/umath/loops_autovec.dispatch.c.src | 11 +++++++++++ numpy/_core/tests/test_umath.py | 9 +++++++++ tools/ci/array-api-skips.txt | 5 ----- 6 files changed, 32 insertions(+), 6 deletions(-) create mode 100644 doc/release/upcoming_changes/26766.change.rst diff --git a/doc/release/upcoming_changes/26766.change.rst b/doc/release/upcoming_changes/26766.change.rst new file mode 100644 index 000000000000..923dbe816dd1 --- /dev/null +++ b/doc/release/upcoming_changes/26766.change.rst @@ -0,0 +1,2 @@ +* `numpy.floor`, `numpy.ceil`, and `numpy.trunc` now won't perform casting + to a floating dtype for integer and boolean dtype input arrays. diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index 06871a44b37f..6ec19f12e067 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -953,6 +953,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.ceil'), None, + TD(bints), TD('e', f='ceil', astype={'e': 'f'}), TD(inexactvec, dispatch=[('loops_unary_fp', 'fd')]), TD('fdg', f='ceil'), @@ -962,6 +963,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.trunc'), None, + TD(bints), TD('e', f='trunc', astype={'e': 'f'}), TD(inexactvec, dispatch=[('loops_unary_fp', 'fd')]), TD('fdg', f='trunc'), @@ -978,6 +980,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.floor'), None, + TD(bints), TD('e', f='floor', astype={'e': 'f'}), TD(inexactvec, dispatch=[('loops_unary_fp', 'fd')]), TD('fdg', f='floor'), diff --git a/numpy/_core/src/umath/loops.h.src b/numpy/_core/src/umath/loops.h.src index 3cb689818ec8..f775bc22b8a8 100644 --- a/numpy/_core/src/umath/loops.h.src +++ b/numpy/_core/src/umath/loops.h.src @@ -77,7 +77,7 @@ BOOL_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void #include "loops_autovec.dispatch.h" #endif /**begin repeat - * #kind = isnan, isinf, isfinite# + * #kind = isnan, isinf, isfinite, floor, ceil, trunc# */ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void BOOL_@kind@, (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))) @@ -179,6 +179,12 @@ NPY_NO_EXPORT void NPY_NO_EXPORT void @S@@TYPE@_positive(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); +/**begin repeat2 + * #kind = floor, ceil, trunc# + */ +#define @S@@TYPE@_@kind@ @S@@TYPE@_positive +/**end repeat2**/ + /**begin repeat2 * Arithmetic * #kind = add, subtract, multiply, bitwise_and, bitwise_or, bitwise_xor, diff --git a/numpy/_core/src/umath/loops_autovec.dispatch.c.src b/numpy/_core/src/umath/loops_autovec.dispatch.c.src index 6ccafe577c72..e93e851d6b7a 100644 --- a/numpy/_core/src/umath/loops_autovec.dispatch.c.src +++ b/numpy/_core/src/umath/loops_autovec.dispatch.c.src @@ -264,6 +264,17 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(BOOL_@kind@) } /**end repeat**/ +/**begin repeat + * Identity + * #kind = floor, ceil, trunc# + */ +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(BOOL_@kind@) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP_FAST(npy_bool, npy_bool, *out = in); +} +/**end repeat**/ + /* ***************************************************************************** ** HALF-FLOAT LOOPS ** diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index df8ec07dc3f5..548411da7f48 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4174,6 +4174,15 @@ def test_fraction(self): assert_equal(np.ceil(f), -1) assert_equal(np.trunc(f), -1) + @pytest.mark.parametrize('func', [np.floor, np.ceil, np.trunc]) + @pytest.mark.parametrize('dtype', [np.bool, np.float64, np.float32, + np.int64, np.uint32]) + def test_output_dtype(self, func, dtype): + arr = np.array([-2, 0, 4, 8]).astype(dtype) + result = func(arr) + assert_equal(arr, result) + assert result.dtype == dtype + class TestComplexFunctions: funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh, diff --git a/tools/ci/array-api-skips.txt b/tools/ci/array-api-skips.txt index 5b7324ae753b..2d618ee05d45 100644 --- a/tools/ci/array-api-skips.txt +++ b/tools/ci/array-api-skips.txt @@ -1,11 +1,6 @@ # finfo return type misalignment array_api_tests/test_data_type_functions.py::test_finfo[float32] -# for int inputs out.dtype=float32, but should be int -array_api_tests/test_operators_and_elementwise_functions.py::test_ceil -array_api_tests/test_operators_and_elementwise_functions.py::test_floor -array_api_tests/test_operators_and_elementwise_functions.py::test_trunc - # 'shape' arg is present. 'newshape' is retained for backward compat. array_api_tests/test_signatures.py::test_func_signature[reshape] From ff66650684cd9175e1eb03535cdd5db34901ae0c Mon Sep 17 00:00:00 2001 From: Rostan Date: Thu, 4 Jul 2024 16:16:02 +0200 Subject: [PATCH 697/980] BUG: Mismatched allocation domains in `PyArray_FillWithScalar` (#26849) When `PyArray_FillWithScalar` needs to heap-allocate a buffer, the deallocating function does not use the same domain as the allocating function. Such API violations are detected when debug hooks are enabled. --- numpy/_core/src/multiarray/convert.c | 4 ++-- numpy/_core/tests/test_multiarray.py | 22 +++++++++++++++++----- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/numpy/_core/src/multiarray/convert.c b/numpy/_core/src/multiarray/convert.c index aad40cab9593..57a76cd5f9bd 100644 --- a/numpy/_core/src/multiarray/convert.c +++ b/numpy/_core/src/multiarray/convert.c @@ -408,9 +408,9 @@ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj) char *value = (char *)value_buffer_stack; PyArray_Descr *descr = PyArray_DESCR(arr); - if ((size_t)descr->elsize > sizeof(value_buffer_stack)) { + if (PyDataType_ELSIZE(descr) > sizeof(value_buffer_stack)) { /* We need a large temporary buffer... */ - value_buffer_heap = PyObject_Calloc(1, descr->elsize); + value_buffer_heap = PyMem_Calloc(1, PyDataType_ELSIZE(descr)); if (value_buffer_heap == NULL) { PyErr_NoMemory(); return -1; diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 0b75b275a6b2..85534fa4dd2f 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -425,6 +425,18 @@ def test_fill_readonly(self): with pytest.raises(ValueError, match=".*read-only"): a.fill(0) + def test_fill_subarrays(self): + # NOTE: + # This is also a regression test for a crash with PYTHONMALLOC=debug + + dtype = np.dtype("2 Date: Thu, 4 Jul 2024 16:21:51 +0200 Subject: [PATCH 698/980] DOC: Apply Mattis suggestions Co-authored-by: Matti Picus --- doc/source/user/troubleshooting-importerror.rst | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/doc/source/user/troubleshooting-importerror.rst b/doc/source/user/troubleshooting-importerror.rst index 807feb119367..d1b83d388aac 100644 --- a/doc/source/user/troubleshooting-importerror.rst +++ b/doc/source/user/troubleshooting-importerror.rst @@ -185,11 +185,11 @@ There can be various reason for the incompatibility: * You have recently upgraded NumPy, most likely to NumPy 2, and the other module now also needs to be upgraded. (NumPy 2 was released in June 2024.) -* Especially if you have version constraints on some packages, ``pip`` may - have found incompatible versions when installing. +* You have version constraints and ``pip`` may + have installed a combination of incompatible packages. -* Manual forced versions or setup steps, such as copying a compiled extension - to another computer with a different NumPy version. +* You have compiled locally or have copied a compiled extension from + elsewhere (which is, in general, a bad idea). The best solution will usually be to upgrade the failing package: @@ -216,7 +216,6 @@ not yet exist or cannot be installed for other reasons. In that case: * Add additional version pins to the failing package to help ``pip`` resolve compatible versions of NumPy and the package. -* Investigate how the packages got installed and why incompatibilities arose. Segfaults or crashes From 840aefea82f2c210013e94abd485d36da9823f64 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 5 Jul 2024 01:42:32 +0200 Subject: [PATCH 699/980] TYP: Annotate type aliases as `typing.TypeAlias` --- numpy/__init__.pyi | 101 ++++++++++++++++++----------------- numpy/_typing/_array_like.py | 42 +++++++-------- numpy/_typing/_dtype_like.py | 33 ++++++------ numpy/_typing/_scalars.py | 26 +++++---- numpy/_typing/_shape.py | 6 +-- numpy/linalg/_linalg.pyi | 5 +- 6 files changed, 110 insertions(+), 103 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 2e08ddbb1791..23cef0725a85 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -195,6 +195,7 @@ from typing import ( Final, final, ClassVar, + TypeAlias ) # Ensures that the stubs are picked up @@ -641,7 +642,7 @@ def show_config() -> None: ... _NdArraySubClass = TypeVar("_NdArraySubClass", bound=NDArray[Any]) _DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic) -_ByteOrder = L["S", "<", ">", "=", "|", "L", "B", "N", "I", "little", "big", "native"] +_ByteOrder: TypeAlias = L["S", "<", ">", "=", "|", "L", "B", "N", "I", "little", "big", "native"] @final class dtype(Generic[_DTypeScalar_co]): @@ -894,7 +895,7 @@ class dtype(Generic[_DTypeScalar_co]): @property def type(self) -> type[_DTypeScalar_co]: ... -_ArrayLikeInt = ( +_ArrayLikeInt: TypeAlias = ( int | integer[Any] | Sequence[int | integer[Any]] @@ -941,14 +942,14 @@ class flatiter(Generic[_NdArraySubClass]): @overload def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ... -_OrderKACF = L[None, "K", "A", "C", "F"] -_OrderACF = L[None, "A", "C", "F"] -_OrderCF = L[None, "C", "F"] +_OrderKACF: TypeAlias = L[None, "K", "A", "C", "F"] +_OrderACF: TypeAlias = L[None, "A", "C", "F"] +_OrderCF: TypeAlias = L[None, "C", "F"] -_ModeKind = L["raise", "wrap", "clip"] -_PartitionKind = L["introselect"] -_SortKind = L["quicksort", "mergesort", "heapsort", "stable"] -_SortSide = L["left", "right"] +_ModeKind: TypeAlias = L["raise", "wrap", "clip"] +_PartitionKind: TypeAlias = L["introselect"] +_SortKind: TypeAlias = L["quicksort", "mergesort", "heapsort", "stable"] +_SortSide: TypeAlias = L["left", "right"] _ArraySelf = TypeVar("_ArraySelf", bound=_ArrayOrScalarCommon) @@ -1391,7 +1392,7 @@ _NumberType = TypeVar("_NumberType", bound=number[Any]) if sys.version_info >= (3, 12): from collections.abc import Buffer as _SupportsBuffer else: - _SupportsBuffer = ( + _SupportsBuffer: TypeAlias = ( bytes | bytearray | memoryview @@ -1404,22 +1405,22 @@ else: _T = TypeVar("_T") _T_co = TypeVar("_T_co", covariant=True) _T_contra = TypeVar("_T_contra", contravariant=True) -_2Tuple = tuple[_T, _T] -_CastingKind = L["no", "equiv", "safe", "same_kind", "unsafe"] +_2Tuple: TypeAlias = tuple[_T, _T] +_CastingKind: TypeAlias = L["no", "equiv", "safe", "same_kind", "unsafe"] -_ArrayUInt_co = NDArray[np.bool | unsignedinteger[Any]] -_ArrayInt_co = NDArray[np.bool | integer[Any]] -_ArrayFloat_co = NDArray[np.bool | integer[Any] | floating[Any]] -_ArrayComplex_co = NDArray[np.bool | integer[Any] | floating[Any] | complexfloating[Any, Any]] -_ArrayNumber_co = NDArray[np.bool | number[Any]] -_ArrayTD64_co = NDArray[np.bool | integer[Any] | timedelta64] +_ArrayUInt_co: TypeAlias = NDArray[np.bool | unsignedinteger[Any]] +_ArrayInt_co: TypeAlias = NDArray[np.bool | integer[Any]] +_ArrayFloat_co: TypeAlias = NDArray[np.bool | integer[Any] | floating[Any]] +_ArrayComplex_co: TypeAlias = NDArray[np.bool | integer[Any] | floating[Any] | complexfloating[Any, Any]] +_ArrayNumber_co: TypeAlias = NDArray[np.bool | number[Any]] +_ArrayTD64_co: TypeAlias = NDArray[np.bool | integer[Any] | timedelta64] # Introduce an alias for `dtype` to avoid naming conflicts. -_dtype = dtype +_dtype: TypeAlias = dtype # `builtins.PyCapsule` unfortunately lacks annotations as of the moment; # use `Any` as a stopgap measure -_PyCapsule = Any +_PyCapsule: TypeAlias = Any class _SupportsItem(Protocol[_T_co]): def item(self, args: Any, /) -> _T_co: ... @@ -2837,7 +2838,7 @@ class bool(generic): __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] -bool_ = bool +bool_: TypeAlias = bool class object_(generic): def __init__(self, value: object = ..., /) -> None: ... @@ -2893,9 +2894,9 @@ class datetime64(generic): __gt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] __ge__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] -_IntValue = SupportsInt | _CharLike_co | SupportsIndex -_FloatValue = None | _CharLike_co | SupportsFloat | SupportsIndex -_ComplexValue = ( +_IntValue: TypeAlias = SupportsInt | _CharLike_co | SupportsIndex +_FloatValue: TypeAlias = None | _CharLike_co | SupportsFloat | SupportsIndex +_ComplexValue: TypeAlias = ( None | _CharLike_co | SupportsFloat @@ -3049,18 +3050,18 @@ class unsignedinteger(integer[_NBit1]): __divmod__: _UnsignedIntDivMod[_NBit1] __rdivmod__: _UnsignedIntDivMod[_NBit1] -uint8 = unsignedinteger[_8Bit] -uint16 = unsignedinteger[_16Bit] -uint32 = unsignedinteger[_32Bit] -uint64 = unsignedinteger[_64Bit] +uint8: TypeAlias = unsignedinteger[_8Bit] +uint16: TypeAlias = unsignedinteger[_16Bit] +uint32: TypeAlias = unsignedinteger[_32Bit] +uint64: TypeAlias = unsignedinteger[_64Bit] -ubyte = unsignedinteger[_NBitByte] -ushort = unsignedinteger[_NBitShort] -uintc = unsignedinteger[_NBitIntC] -uintp = unsignedinteger[_NBitIntP] -uint = uintp -ulong = unsignedinteger[_NBitLong] -ulonglong = unsignedinteger[_NBitLongLong] +ubyte: TypeAlias = unsignedinteger[_NBitByte] +ushort: TypeAlias = unsignedinteger[_NBitShort] +uintc: TypeAlias = unsignedinteger[_NBitIntC] +uintp: TypeAlias = unsignedinteger[_NBitIntP] +uint: TypeAlias = uintp +ulong: TypeAlias = unsignedinteger[_NBitLong] +ulonglong: TypeAlias = unsignedinteger[_NBitLongLong] class inexact(number[_NBit1]): # type: ignore def __getnewargs__(self: inexact[_64Bit]) -> tuple[float, ...]: ... @@ -3106,14 +3107,14 @@ class floating(inexact[_NBit1]): __divmod__: _FloatDivMod[_NBit1] __rdivmod__: _FloatDivMod[_NBit1] -float16 = floating[_16Bit] -float32 = floating[_32Bit] -float64 = floating[_64Bit] +float16: TypeAlias = floating[_16Bit] +float32: TypeAlias = floating[_32Bit] +float64: TypeAlias = floating[_64Bit] -half = floating[_NBitHalf] -single = floating[_NBitSingle] -double = floating[_NBitDouble] -longdouble = floating[_NBitLongDouble] +half: TypeAlias = floating[_NBitHalf] +single: TypeAlias = floating[_NBitSingle] +double: TypeAlias = floating[_NBitDouble] +longdouble: TypeAlias = floating[_NBitLongDouble] # The main reason for `complexfloating` having two typevars is cosmetic. # It is used to clarify why `complex128`s precision is `_64Bit`, the latter @@ -3144,12 +3145,12 @@ class complexfloating(inexact[_NBit1], Generic[_NBit1, _NBit2]): __pow__: _ComplexOp[_NBit1] __rpow__: _ComplexOp[_NBit1] -complex64 = complexfloating[_32Bit, _32Bit] -complex128 = complexfloating[_64Bit, _64Bit] +complex64: TypeAlias = complexfloating[_32Bit, _32Bit] +complex128: TypeAlias = complexfloating[_64Bit, _64Bit] -csingle = complexfloating[_NBitSingle, _NBitSingle] -cdouble = complexfloating[_NBitDouble, _NBitDouble] -clongdouble = complexfloating[_NBitLongDouble, _NBitLongDouble] +csingle: TypeAlias = complexfloating[_NBitSingle, _NBitSingle] +cdouble: TypeAlias = complexfloating[_NBitDouble, _NBitDouble] +clongdouble: TypeAlias = complexfloating[_NBitLongDouble, _NBitLongDouble] class flexible(generic): ... # type: ignore @@ -3528,7 +3529,7 @@ class iinfo(Generic[_IntType]): @overload def __new__(cls, dtype: str) -> iinfo[Any]: ... -_NDIterFlagsKind = L[ +_NDIterFlagsKind: TypeAlias = L[ "buffered", "c_index", "copy_if_overlap", @@ -3544,7 +3545,7 @@ _NDIterFlagsKind = L[ "zerosize_ok", ] -_NDIterOpFlagsKind = L[ +_NDIterOpFlagsKind: TypeAlias = L[ "aligned", "allocate", "arraymask", @@ -3635,7 +3636,7 @@ class nditer: @property def value(self) -> tuple[NDArray[Any], ...]: ... -_MemMapModeKind = L[ +_MemMapModeKind: TypeAlias = L[ "readonly", "r", "copyonwrite", "c", "readwrite", "r+", diff --git a/numpy/_typing/_array_like.py b/numpy/_typing/_array_like.py index 33255693806e..79e13a8f5243 100644 --- a/numpy/_typing/_array_like.py +++ b/numpy/_typing/_array_like.py @@ -2,7 +2,7 @@ import sys from collections.abc import Collection, Callable, Sequence -from typing import Any, Protocol, Union, TypeVar, runtime_checkable +from typing import Any, Protocol, Union, TypeAlias, TypeVar, runtime_checkable import numpy as np from numpy import ( @@ -29,7 +29,7 @@ _DType = TypeVar("_DType", bound=dtype[Any]) _DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any]) -NDArray = ndarray[Any, dtype[_ScalarType_co]] +NDArray: TypeAlias = ndarray[Any, dtype[_ScalarType_co]] # The `_SupportsArray` protocol only cares about the default dtype # (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned @@ -54,7 +54,7 @@ def __array_function__( # TODO: Wait until mypy supports recursive objects in combination with typevars -_FiniteNestedSequence = Union[ +_FiniteNestedSequence: TypeAlias = Union[ _T, Sequence[_T], Sequence[Sequence[_T]], @@ -63,7 +63,7 @@ def __array_function__( ] # A subset of `npt.ArrayLike` that can be parametrized w.r.t. `np.generic` -_ArrayLike = Union[ +_ArrayLike: TypeAlias = Union[ _SupportsArray[dtype[_ScalarType]], _NestedSequence[_SupportsArray[dtype[_ScalarType]]], ] @@ -71,7 +71,7 @@ def __array_function__( # A union representing array-like objects; consists of two typevars: # One representing types that can be parametrized w.r.t. `np.dtype` # and another one for the rest -_DualArrayLike = Union[ +_DualArrayLike: TypeAlias = Union[ _SupportsArray[_DType], _NestedSequence[_SupportsArray[_DType]], _T, @@ -81,35 +81,35 @@ def __array_function__( if sys.version_info >= (3, 12): from collections.abc import Buffer - ArrayLike = Buffer | _DualArrayLike[ + ArrayLike: TypeAlias = Buffer | _DualArrayLike[ dtype[Any], Union[bool, int, float, complex, str, bytes], ] else: - ArrayLike = _DualArrayLike[ + ArrayLike: TypeAlias = _DualArrayLike[ dtype[Any], Union[bool, int, float, complex, str, bytes], ] # `ArrayLike_co`: array-like objects that can be coerced into `X` # given the casting rules `same_kind` -_ArrayLikeBool_co = _DualArrayLike[ +_ArrayLikeBool_co: TypeAlias = _DualArrayLike[ dtype[np.bool], bool, ] -_ArrayLikeUInt_co = _DualArrayLike[ +_ArrayLikeUInt_co: TypeAlias = _DualArrayLike[ dtype[Union[np.bool, unsignedinteger[Any]]], bool, ] -_ArrayLikeInt_co = _DualArrayLike[ +_ArrayLikeInt_co: TypeAlias = _DualArrayLike[ dtype[Union[np.bool, integer[Any]]], Union[bool, int], ] -_ArrayLikeFloat_co = _DualArrayLike[ +_ArrayLikeFloat_co: TypeAlias = _DualArrayLike[ dtype[Union[np.bool, integer[Any], floating[Any]]], Union[bool, int, float], ] -_ArrayLikeComplex_co = _DualArrayLike[ +_ArrayLikeComplex_co: TypeAlias = _DualArrayLike[ dtype[Union[ np.bool, integer[Any], @@ -118,37 +118,37 @@ def __array_function__( ]], Union[bool, int, float, complex], ] -_ArrayLikeNumber_co = _DualArrayLike[ +_ArrayLikeNumber_co: TypeAlias = _DualArrayLike[ dtype[Union[np.bool, number[Any]]], Union[bool, int, float, complex], ] -_ArrayLikeTD64_co = _DualArrayLike[ +_ArrayLikeTD64_co: TypeAlias = _DualArrayLike[ dtype[Union[np.bool, integer[Any], timedelta64]], Union[bool, int], ] -_ArrayLikeDT64_co = Union[ +_ArrayLikeDT64_co: TypeAlias = Union[ _SupportsArray[dtype[datetime64]], _NestedSequence[_SupportsArray[dtype[datetime64]]], ] -_ArrayLikeObject_co = Union[ +_ArrayLikeObject_co: TypeAlias = Union[ _SupportsArray[dtype[object_]], _NestedSequence[_SupportsArray[dtype[object_]]], ] -_ArrayLikeVoid_co = Union[ +_ArrayLikeVoid_co: TypeAlias = Union[ _SupportsArray[dtype[void]], _NestedSequence[_SupportsArray[dtype[void]]], ] -_ArrayLikeStr_co = _DualArrayLike[ +_ArrayLikeStr_co: TypeAlias = _DualArrayLike[ dtype[str_], str, ] -_ArrayLikeBytes_co = _DualArrayLike[ +_ArrayLikeBytes_co: TypeAlias = _DualArrayLike[ dtype[bytes_], bytes, ] -_ArrayLikeInt = _DualArrayLike[ +_ArrayLikeInt: TypeAlias = _DualArrayLike[ dtype[integer[Any]], int, ] @@ -161,7 +161,7 @@ class _UnknownType: ... -_ArrayLikeUnknown = _DualArrayLike[ +_ArrayLikeUnknown: TypeAlias = _DualArrayLike[ dtype[_UnknownType], _UnknownType, ] diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index 73a5f7d7b5a7..217f92623984 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -3,6 +3,7 @@ Any, Sequence, Union, + TypeAlias, TypeVar, Protocol, TypedDict, @@ -60,7 +61,7 @@ _SCT = TypeVar("_SCT", bound=np.generic) _DType_co = TypeVar("_DType_co", covariant=True, bound=np.dtype[Any]) -_DTypeLikeNested = Any # TODO: wait for support for recursive types +_DTypeLikeNested: TypeAlias = Any # TODO: wait for support for recursive types # Mandatory keys @@ -87,7 +88,7 @@ def dtype(self) -> _DType_co: ... # A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` -_DTypeLike = Union[ +_DTypeLike: TypeAlias = Union[ np.dtype[_SCT], type[_SCT], _SupportsDType[np.dtype[_SCT]], @@ -95,7 +96,7 @@ def dtype(self) -> _DType_co: ... # Would create a dtype[np.void] -_VoidDTypeLike = Union[ +_VoidDTypeLike: TypeAlias = Union[ # (flexible_dtype, itemsize) tuple[_DTypeLikeNested, int], # (fixed_dtype, shape) @@ -115,7 +116,7 @@ def dtype(self) -> _DType_co: ... # Anything that can be coerced into numpy.dtype. # Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html -DTypeLike = Union[ +DTypeLike: TypeAlias = Union[ np.dtype[Any], # default data type (float64) None, @@ -137,14 +138,14 @@ def dtype(self) -> _DType_co: ... # Aliases for commonly used dtype-like objects. # Note that the precision of `np.number` subclasses is ignored herein. -_DTypeLikeBool = Union[ +_DTypeLikeBool: TypeAlias = Union[ type[bool], type[np.bool], np.dtype[np.bool], _SupportsDType[np.dtype[np.bool]], _BoolCodes, ] -_DTypeLikeUInt = Union[ +_DTypeLikeUInt: TypeAlias = Union[ type[np.unsignedinteger], np.dtype[np.unsignedinteger], _SupportsDType[np.dtype[np.unsignedinteger]], @@ -160,7 +161,7 @@ def dtype(self) -> _DType_co: ... _UIntPCodes, _UIntCodes, ] -_DTypeLikeInt = Union[ +_DTypeLikeInt: TypeAlias = Union[ type[int], type[np.signedinteger], np.dtype[np.signedinteger], @@ -177,7 +178,7 @@ def dtype(self) -> _DType_co: ... _IntPCodes, _IntCodes, ] -_DTypeLikeFloat = Union[ +_DTypeLikeFloat: TypeAlias = Union[ type[float], type[np.floating], np.dtype[np.floating], @@ -190,7 +191,7 @@ def dtype(self) -> _DType_co: ... _DoubleCodes, _LongDoubleCodes, ] -_DTypeLikeComplex = Union[ +_DTypeLikeComplex: TypeAlias = Union[ type[complex], type[np.complexfloating], np.dtype[np.complexfloating], @@ -201,47 +202,47 @@ def dtype(self) -> _DType_co: ... _CDoubleCodes, _CLongDoubleCodes, ] -_DTypeLikeDT64 = Union[ +_DTypeLikeDT64: TypeAlias = Union[ type[np.timedelta64], np.dtype[np.timedelta64], _SupportsDType[np.dtype[np.timedelta64]], _TD64Codes, ] -_DTypeLikeTD64 = Union[ +_DTypeLikeTD64: TypeAlias = Union[ type[np.datetime64], np.dtype[np.datetime64], _SupportsDType[np.dtype[np.datetime64]], _DT64Codes, ] -_DTypeLikeStr = Union[ +_DTypeLikeStr: TypeAlias = Union[ type[str], type[np.str_], np.dtype[np.str_], _SupportsDType[np.dtype[np.str_]], _StrCodes, ] -_DTypeLikeBytes = Union[ +_DTypeLikeBytes: TypeAlias = Union[ type[bytes], type[np.bytes_], np.dtype[np.bytes_], _SupportsDType[np.dtype[np.bytes_]], _BytesCodes, ] -_DTypeLikeVoid = Union[ +_DTypeLikeVoid: TypeAlias = Union[ type[np.void], np.dtype[np.void], _SupportsDType[np.dtype[np.void]], _VoidCodes, _VoidDTypeLike, ] -_DTypeLikeObject = Union[ +_DTypeLikeObject: TypeAlias = Union[ type, np.dtype[np.object_], _SupportsDType[np.dtype[np.object_]], _ObjectCodes, ] -_DTypeLikeComplex_co = Union[ +_DTypeLikeComplex_co: TypeAlias = Union[ _DTypeLikeBool, _DTypeLikeUInt, _DTypeLikeInt, diff --git a/numpy/_typing/_scalars.py b/numpy/_typing/_scalars.py index b9274e867c83..9d3f848ff110 100644 --- a/numpy/_typing/_scalars.py +++ b/numpy/_typing/_scalars.py @@ -1,23 +1,27 @@ -from typing import Union, Any +from typing import Any, TypeAlias, Union import numpy as np # NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and # `np.bytes_` are already subclasses of their builtin counterpart -_CharLike_co = Union[str, bytes] +_CharLike_co: TypeAlias = Union[str, bytes] # The 6 `Like_co` type-aliases below represent all scalars that can be # coerced into `` (with the casting rule `same_kind`) -_BoolLike_co = Union[bool, np.bool] -_UIntLike_co = Union[_BoolLike_co, np.unsignedinteger[Any]] -_IntLike_co = Union[_BoolLike_co, int, np.integer[Any]] -_FloatLike_co = Union[_IntLike_co, float, np.floating[Any]] -_ComplexLike_co = Union[_FloatLike_co, complex, np.complexfloating[Any, Any]] -_TD64Like_co = Union[_IntLike_co, np.timedelta64] +_BoolLike_co: TypeAlias = Union[bool, np.bool] +_UIntLike_co: TypeAlias = Union[_BoolLike_co, np.unsignedinteger[Any]] +_IntLike_co: TypeAlias = Union[_BoolLike_co, int, np.integer[Any]] +_FloatLike_co: TypeAlias = Union[_IntLike_co, float, np.floating[Any]] +_ComplexLike_co: TypeAlias = Union[ + _FloatLike_co, + complex, + np.complexfloating[Any, Any], +] +_TD64Like_co: TypeAlias = Union[_IntLike_co, np.timedelta64] -_NumberLike_co = Union[int, float, complex, np.number[Any], np.bool] -_ScalarLike_co = Union[ +_NumberLike_co: TypeAlias = Union[int, float, complex, np.number[Any], np.bool] +_ScalarLike_co: TypeAlias = Union[ int, float, complex, @@ -27,4 +31,4 @@ ] # `_VoidLike_co` is technically not a scalar, but it's close enough -_VoidLike_co = Union[tuple[Any, ...], np.void] +_VoidLike_co: TypeAlias = Union[tuple[Any, ...], np.void] diff --git a/numpy/_typing/_shape.py b/numpy/_typing/_shape.py index 4f1204e47c6a..2b854d65153a 100644 --- a/numpy/_typing/_shape.py +++ b/numpy/_typing/_shape.py @@ -1,7 +1,7 @@ from collections.abc import Sequence -from typing import Union, SupportsIndex +from typing import SupportsIndex, TypeAlias -_Shape = tuple[int, ...] +_Shape: TypeAlias = tuple[int, ...] # Anything that can be coerced to a shape tuple -_ShapeLike = Union[SupportsIndex, Sequence[SupportsIndex]] +_ShapeLike: TypeAlias = SupportsIndex | Sequence[SupportsIndex] diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index e9f00e226a94..0d431794b74d 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -2,6 +2,7 @@ from collections.abc import Iterable from typing import ( Literal as L, overload, + TypeAlias, TypeVar, Any, SupportsIndex, @@ -45,8 +46,8 @@ _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) _SCT = TypeVar("_SCT", bound=generic, covariant=True) _SCT2 = TypeVar("_SCT2", bound=generic, covariant=True) -_2Tuple = tuple[_T, _T] -_ModeKind = L["reduced", "complete", "r", "raw"] +_2Tuple: TypeAlias = tuple[_T, _T] +_ModeKind: TypeAlias = L["reduced", "complete", "r", "raw"] __all__: list[str] From 2386a2dffe380207aa2d812c65bb888e380b0191 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 5 Jul 2024 17:41:25 +0000 Subject: [PATCH 700/980] MAINT: Bump actions/upload-artifact from 4.3.3 to 4.3.4 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.3.3 to 4.3.4. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/65462800fd760344b1a7b4382951275a0abb4808...0b2256b8c012f0828dc542b3febcab082c67f72b) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/cygwin.yml | 2 +- .github/workflows/free-threaded-wheels.yml | 2 +- .github/workflows/scorecards.yml | 2 +- .github/workflows/wheels.yml | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index ce43d807f8f0..e17a0d95d0ac 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -62,7 +62,7 @@ jobs: cd tools /usr/bin/python3.9 -m pytest --pyargs numpy -n2 -m "not slow" - name: Upload wheel if tests fail - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 if: failure() with: name: numpy-cygwin-wheel diff --git a/.github/workflows/free-threaded-wheels.yml b/.github/workflows/free-threaded-wheels.yml index bbcdd8dac6f2..5d00fed8b749 100644 --- a/.github/workflows/free-threaded-wheels.yml +++ b/.github/workflows/free-threaded-wheels.yml @@ -151,7 +151,7 @@ jobs: # cibw_before_build.sh when a released cython can build numpy CIBW_BUILD_FRONTEND: "pip; args: --no-build-isolation" - - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 with: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 40f9208f09a2..8d34e207bc55 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -42,7 +42,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable # uploads of run results in SARIF format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 with: name: SARIF file path: results.sarif diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 353fd04ecd23..a2ab0f16c5c1 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -159,7 +159,7 @@ jobs: CIBW_PRERELEASE_PYTHONS: True CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 with: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl @@ -240,7 +240,7 @@ jobs: python -mpip install twine twine check dist/* - - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 with: name: sdist path: ./dist/* From bd7e8495f5a99542e14f5c596931946499a4929b Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 6 Jul 2024 05:19:35 +0200 Subject: [PATCH 701/980] TYP: improved `numpy._array_api_info` typing --- numpy/_array_api_info.pyi | 233 +++++++++++++++--- .../tests/data/reveal/array_api_info.pyi | 78 +++++- 2 files changed, 260 insertions(+), 51 deletions(-) diff --git a/numpy/_array_api_info.pyi b/numpy/_array_api_info.pyi index f86aeb63fd2b..52b98fc0039b 100644 --- a/numpy/_array_api_info.pyi +++ b/numpy/_array_api_info.pyi @@ -1,62 +1,213 @@ -from typing import TypedDict, Optional, Union, Tuple, List -from numpy._typing import DtypeLike +import sys +from typing import ( + TYPE_CHECKING, + ClassVar, + Literal, + TypeAlias, + TypedDict, + TypeVar, + final, + overload, +) + +import numpy as np + +if sys.version_info >= (3, 11): + from typing import Never +elif TYPE_CHECKING: + from typing_extensions import Never +else: + # `NoReturn` and `Never` are equivalent (but not equal) for type-checkers, + # but are used in different places by convention + from typing import NoReturn as Never + +_Device: TypeAlias = Literal["cpu"] +_DeviceLike: TypeAlias = None | _Device -Capabilities = TypedDict( - "Capabilities", +_Capabilities = TypedDict( + "_Capabilities", { - "boolean indexing": bool, - "data-dependent shapes": bool, + "boolean indexing": Literal[True], + "data-dependent shapes": Literal[True], }, ) -DefaultDataTypes = TypedDict( - "DefaultDataTypes", +_DefaultDTypes = TypedDict( + "_DefaultDTypes", { - "real floating": DtypeLike, - "complex floating": DtypeLike, - "integral": DtypeLike, - "indexing": DtypeLike, + "real floating": np.dtype[np.float64], + "complex floating": np.dtype[np.complex128], + "integral": np.dtype[np.intp], + "indexing": np.dtype[np.intp], }, ) -DataTypes = TypedDict( - "DataTypes", - { - "bool": DtypeLike, - "float32": DtypeLike, - "float64": DtypeLike, - "complex64": DtypeLike, - "complex128": DtypeLike, - "int8": DtypeLike, - "int16": DtypeLike, - "int32": DtypeLike, - "int64": DtypeLike, - "uint8": DtypeLike, - "uint16": DtypeLike, - "uint32": DtypeLike, - "uint64": DtypeLike, - }, - total=False, + +_KindBool: TypeAlias = Literal["bool"] +_KindInt: TypeAlias = Literal["signed integer"] +_KindUInt: TypeAlias = Literal["unsigned integer"] +_KindInteger: TypeAlias = Literal["integral"] +_KindFloat: TypeAlias = Literal["real floating"] +_KindComplex: TypeAlias = Literal["complex floating"] +_KindNumber: TypeAlias = Literal["numeric"] +_Kind: TypeAlias = ( + _KindBool + | _KindInt + | _KindUInt + | _KindInteger + | _KindFloat + | _KindComplex + | _KindNumber ) -class __array_namespace_info__: - __module__: str - def capabilities(self) -> Capabilities: ... +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") +_T3 = TypeVar("_T3") +_Permute1: TypeAlias = _T1 | tuple[_T1] +_Permute2: TypeAlias = tuple[_T1, _T2] | tuple[_T2, _T1] +_Permute3: TypeAlias = ( + tuple[_T1, _T2, _T3] | tuple[_T1, _T3, _T2] + | tuple[_T2, _T1, _T3] | tuple[_T2, _T3, _T1] + | tuple[_T3, _T1, _T2] | tuple[_T3, _T2, _T1] +) + +class _DTypesBool(TypedDict): + bool: np.dtype[np.bool] + +class _DTypesInt(TypedDict): + int8: np.dtype[np.int8] + int16: np.dtype[np.int16] + int32: np.dtype[np.int32] + int64: np.dtype[np.int64] + +class _DTypesUInt(TypedDict): + uint8: np.dtype[np.uint8] + uint16: np.dtype[np.uint16] + uint32: np.dtype[np.uint32] + uint64: np.dtype[np.uint64] + +class _DTypesInteger(_DTypesInt, _DTypesUInt): + ... - def default_device(self) -> str: ... +class _DTypesFloat(TypedDict): + float32: np.dtype[np.float32] + float64: np.dtype[np.float64] +class _DTypesComplex(TypedDict): + complex64: np.dtype[np.complex64] + complex128: np.dtype[np.complex128] + +class _DTypesNumber(_DTypesInteger, _DTypesFloat, _DTypesComplex): + ... + +class _DTypes(_DTypesBool, _DTypesNumber): + ... + +class _DTypesUnion(TypedDict, total=False): + bool: np.dtype[np.bool] + int8: np.dtype[np.int8] + int16: np.dtype[np.int16] + int32: np.dtype[np.int32] + int64: np.dtype[np.int64] + uint8: np.dtype[np.uint8] + uint16: np.dtype[np.uint16] + uint32: np.dtype[np.uint32] + uint64: np.dtype[np.uint64] + float32: np.dtype[np.float32] + float64: np.dtype[np.float64] + complex64: np.dtype[np.complex64] + complex128: np.dtype[np.complex128] + +_EmptyDict: TypeAlias = dict[Never, Never] + + +@final +class __array_namespace_info__: + __module__: ClassVar[Literal['numpy']] + + def capabilities(self) -> _Capabilities: ... + def default_device(self) -> _Device: ... def default_dtypes( self, *, - device: Optional[str] = None, - ) -> DefaultDataTypes: ... + device: _DeviceLike = ..., + ) -> _DefaultDTypes: ... + def devices(self) -> list[_Device]: ... + @overload def dtypes( self, *, - device: Optional[str] = None, - kind: Optional[Union[str, Tuple[str, ...]]] = None, - ) -> DataTypes: ... - - def devices(self) -> List[str]: ... + device: _DeviceLike = ..., + kind: None = ..., + ) -> _DTypes: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: _Permute1[_KindBool], + ) -> _DTypesBool: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: _Permute1[_KindInt], + ) -> _DTypesInt: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: _Permute1[_KindUInt], + ) -> _DTypesUInt: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: _Permute1[_KindFloat], + ) -> _DTypesFloat: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: _Permute1[_KindComplex], + ) -> _DTypesComplex: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: ( + _Permute1[_KindInteger] + | _Permute2[_KindInt, _KindUInt] + ), + ) -> _DTypesInteger: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: ( + _Permute1[_KindNumber] + | _Permute3[_KindInteger, _KindFloat, _KindComplex] + ), + ) -> _DTypesNumber: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: tuple[()], + ) -> _EmptyDict: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: tuple[_Kind, ...], + ) -> _DTypesUnion: ... diff --git a/numpy/typing/tests/data/reveal/array_api_info.pyi b/numpy/typing/tests/data/reveal/array_api_info.pyi index 056547681366..b7dd2b934aec 100644 --- a/numpy/typing/tests/data/reveal/array_api_info.pyi +++ b/numpy/typing/tests/data/reveal/array_api_info.pyi @@ -1,18 +1,76 @@ import sys -from typing import List +from typing import Literal import numpy as np if sys.version_info >= (3, 11): - from typing import assert_type + from typing import Never, assert_type else: - from typing_extensions import assert_type + from typing_extensions import Never, assert_type -array_namespace_info = np.__array_namespace_info__() +info = np.__array_namespace_info__() -assert_type(array_namespace_info.__module__, str) -assert_type(array_namespace_info.capabilities(), np._array_api_info.Capabilities) -assert_type(array_namespace_info.default_device(), str) -assert_type(array_namespace_info.default_dtypes(), np._array_api_info.DefaultDataTypes) -assert_type(array_namespace_info.dtypes(), np._array_api_info.DataTypes) -assert_type(array_namespace_info.devices(), List[str]) +assert_type(info.__module__, Literal["numpy"]) + +assert_type(info.default_device(), Literal["cpu"]) +assert_type(info.devices()[0], Literal["cpu"]) +assert_type(info.devices()[-1], Literal["cpu"]) + +assert_type(info.capabilities()["boolean indexing"], Literal[True]) +assert_type(info.capabilities()["data-dependent shapes"], Literal[True]) + +assert_type(info.default_dtypes()["real floating"], np.dtype[np.float64]) +assert_type(info.default_dtypes()["complex floating"], np.dtype[np.complex128]) +assert_type(info.default_dtypes()["integral"], np.dtype[np.int_]) +assert_type(info.default_dtypes()["indexing"], np.dtype[np.intp]) + +assert_type(info.dtypes()["bool"], np.dtype[np.bool]) +assert_type(info.dtypes()["int8"], np.dtype[np.int8]) +assert_type(info.dtypes()["uint8"], np.dtype[np.uint8]) +assert_type(info.dtypes()["float32"], np.dtype[np.float32]) +assert_type(info.dtypes()["complex64"], np.dtype[np.complex64]) + +assert_type(info.dtypes(kind="bool")["bool"], np.dtype[np.bool]) +assert_type(info.dtypes(kind="signed integer")["int64"], np.dtype[np.int64]) +assert_type(info.dtypes(kind="unsigned integer")["uint64"], np.dtype[np.uint64]) +assert_type(info.dtypes(kind="integral")["int32"], np.dtype[np.int32]) +assert_type(info.dtypes(kind="integral")["uint32"], np.dtype[np.uint32]) +assert_type(info.dtypes(kind="real floating")["float64"], np.dtype[np.float64]) +assert_type(info.dtypes(kind="complex floating")["complex128"], np.dtype[np.complex128]) +assert_type(info.dtypes(kind="numeric")["int16"], np.dtype[np.int16]) +assert_type(info.dtypes(kind="numeric")["uint16"], np.dtype[np.uint16]) +assert_type(info.dtypes(kind="numeric")["float64"], np.dtype[np.float64]) +assert_type(info.dtypes(kind="numeric")["complex128"], np.dtype[np.complex128]) + +assert_type(info.dtypes(kind=()), dict[Never, Never]) + +assert_type(info.dtypes(kind=("bool",))["bool"], np.dtype[np.bool]) +assert_type(info.dtypes(kind=("signed integer",))["int64"], np.dtype[np.int64]) +assert_type(info.dtypes(kind=("integral",))["uint32"], np.dtype[np.uint32]) +assert_type(info.dtypes(kind=("complex floating",))["complex128"], np.dtype[np.complex128]) +assert_type(info.dtypes(kind=("numeric",))["float64"], np.dtype[np.float64]) + +assert_type( + info.dtypes(kind=("signed integer", "unsigned integer"))["int8"], + np.dtype[np.int8], +) +assert_type( + info.dtypes(kind=("signed integer", "unsigned integer"))["uint8"], + np.dtype[np.uint8], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["int16"], + np.dtype[np.int16], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["uint16"], + np.dtype[np.uint16], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["float32"], + np.dtype[np.float32], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["complex64"], + np.dtype[np.complex64], +) From e94bd89fe72967572cea414253950f4ae43abf23 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sat, 6 Jul 2024 11:12:25 +0200 Subject: [PATCH 702/980] BUG: apply ruff/flake8-implicit-str-concat rule ISC001 ISC001 Implicitly concatenated string literals on one line --- benchmarks/benchmarks/bench_ufunc_strides.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_ufunc_strides.py b/benchmarks/benchmarks/bench_ufunc_strides.py index 183c7c4fb75e..1c7eb0a68e2c 100644 --- a/benchmarks/benchmarks/bench_ufunc_strides.py +++ b/benchmarks/benchmarks/bench_ufunc_strides.py @@ -8,7 +8,7 @@ class _AbstractBinary(Benchmark): params = [] - param_names = ['ufunc', 'stride_in0', 'stride_in1' 'stride_out', 'dtype'] + param_names = ['ufunc', 'stride_in0', 'stride_in1', 'stride_out', 'dtype'] timeout = 10 arrlen = 10000 data_finite = True From a4596d7446121d281a404d6af7a47bec43f6b440 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 18 Jun 2024 16:54:03 +0200 Subject: [PATCH 703/980] BUG,MAINT: Move `allow_legacy_promotion` check to later Moving it to later makes it much simpler to consider also the signature in the decision, which is necessary to get things right. In practice, it might also be possible to just reject it later, but this seemed actually simpler. --- numpy/_core/src/multiarray/abstractdtypes.h | 2 ++ numpy/_core/src/umath/dispatching.c | 16 +++++++++-- numpy/_core/src/umath/dispatching.h | 1 - numpy/_core/src/umath/ufunc_object.c | 32 ++++++--------------- 4 files changed, 24 insertions(+), 27 deletions(-) diff --git a/numpy/_core/src/multiarray/abstractdtypes.h b/numpy/_core/src/multiarray/abstractdtypes.h index 4533e99b635f..b4cf1a13f673 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.h +++ b/numpy/_core/src/multiarray/abstractdtypes.h @@ -47,6 +47,8 @@ npy_mark_tmp_array_if_pyscalar( * a custom DType registered, and then we should use that. * Further, `np.float64` is a double subclass, so must reject it. */ + // TODO,NOTE: This function should be changed to do exact long checks + // For NumPy 2.1! if (PyLong_Check(obj) && (PyArray_ISINTEGER(arr) || PyArray_ISOBJECT(arr))) { ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_INT; diff --git a/numpy/_core/src/umath/dispatching.c b/numpy/_core/src/umath/dispatching.c index a3b9e7584434..feef130b22ec 100644 --- a/numpy/_core/src/umath/dispatching.c +++ b/numpy/_core/src/umath/dispatching.c @@ -47,6 +47,7 @@ #include "common.h" #include "npy_pycompat.h" +#include "arrayobject.h" #include "dispatching.h" #include "dtypemeta.h" #include "npy_hashtable.h" @@ -935,11 +936,11 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *op_dtypes[], npy_bool force_legacy_promotion, - npy_bool allow_legacy_promotion, npy_bool promoting_pyscalars, npy_bool ensure_reduce_compatible) { int nin = ufunc->nin, nargs = ufunc->nargs; + npy_bool allow_legacy_promotion = NPY_TRUE; /* * Get the actual DTypes we operate with by setting op_dtypes[i] from @@ -964,11 +965,20 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, */ Py_CLEAR(op_dtypes[i]); } + /* + * If the op_dtype ends up being a non-legacy one, then we cannot use + * legacy promotion (unless this is a python scalar). + */ + if (op_dtypes[i] != NULL && !NPY_DT_is_legacy(op_dtypes[i]) && ( + signature[i] != NULL || // signature cannot be a pyscalar + !(PyArray_FLAGS(ops[i]) & NPY_ARRAY_WAS_PYTHON_LITERAL))) { + allow_legacy_promotion = NPY_FALSE; + } } int current_promotion_state = get_npy_promotion_state(); - if (force_legacy_promotion + if (force_legacy_promotion && allow_legacy_promotion && current_promotion_state == NPY_USE_LEGACY_PROMOTION && (ufunc->ntypes != 0 || ufunc->userloops != NULL)) { /* @@ -1032,7 +1042,7 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, Py_INCREF(signature[0]); return promote_and_get_ufuncimpl(ufunc, ops, signature, op_dtypes, - force_legacy_promotion, allow_legacy_promotion, + force_legacy_promotion, promoting_pyscalars, NPY_FALSE); } diff --git a/numpy/_core/src/umath/dispatching.h b/numpy/_core/src/umath/dispatching.h index c711a66688c6..9bb5fbd9b013 100644 --- a/numpy/_core/src/umath/dispatching.h +++ b/numpy/_core/src/umath/dispatching.h @@ -22,7 +22,6 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *op_dtypes[], npy_bool force_legacy_promotion, - npy_bool allow_legacy_promotion, npy_bool promote_pyscalars, npy_bool ensure_reduce_compatible); diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index f6ad33f68016..3faa67d80203 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -606,7 +606,7 @@ static int convert_ufunc_arguments(PyUFuncObject *ufunc, ufunc_full_args full_args, PyArrayObject *out_op[], PyArray_DTypeMeta *out_op_DTypes[], - npy_bool *force_legacy_promotion, npy_bool *allow_legacy_promotion, + npy_bool *force_legacy_promotion, npy_bool *promoting_pyscalars, PyObject *order_obj, NPY_ORDER *out_order, PyObject *casting_obj, NPY_CASTING *out_casting, @@ -622,7 +622,6 @@ convert_ufunc_arguments(PyUFuncObject *ufunc, /* Convert and fill in input arguments */ npy_bool all_scalar = NPY_TRUE; npy_bool any_scalar = NPY_FALSE; - *allow_legacy_promotion = NPY_TRUE; *force_legacy_promotion = NPY_FALSE; *promoting_pyscalars = NPY_FALSE; for (int i = 0; i < nin; i++) { @@ -657,11 +656,6 @@ convert_ufunc_arguments(PyUFuncObject *ufunc, break; } - if (!NPY_DT_is_legacy(out_op_DTypes[i])) { - *allow_legacy_promotion = NPY_FALSE; - // TODO: A subclass of int, float, complex could reach here and - // it should not be flagged as "weak" if it does. - } if (PyArray_NDIM(out_op[i]) == 0) { any_scalar = NPY_TRUE; } @@ -707,7 +701,7 @@ convert_ufunc_arguments(PyUFuncObject *ufunc, *promoting_pyscalars = NPY_TRUE; } } - if (*allow_legacy_promotion && (!all_scalar && any_scalar)) { + if ((!all_scalar && any_scalar)) { *force_legacy_promotion = should_use_min_scalar(nin, out_op, 0, NULL); } @@ -2351,8 +2345,7 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc, } PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc, - ops, signature, operation_DTypes, NPY_FALSE, NPY_TRUE, - NPY_FALSE, NPY_TRUE); + ops, signature, operation_DTypes, NPY_FALSE, NPY_FALSE, NPY_TRUE); if (evil_ndim_mutating_hack) { ((PyArrayObject_fields *)out)->nd = 0; } @@ -4433,13 +4426,12 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, npy_bool subok = NPY_TRUE; int keepdims = -1; /* We need to know if it was passed */ npy_bool force_legacy_promotion; - npy_bool allow_legacy_promotion; npy_bool promoting_pyscalars; if (convert_ufunc_arguments(ufunc, /* extract operand related information: */ full_args, operands, operand_DTypes, - &force_legacy_promotion, &allow_legacy_promotion, + &force_legacy_promotion, &promoting_pyscalars, /* extract general information: */ order_obj, &order, @@ -4460,7 +4452,7 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, */ PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc, operands, signature, - operand_DTypes, force_legacy_promotion, allow_legacy_promotion, + operand_DTypes, force_legacy_promotion, promoting_pyscalars, NPY_FALSE); if (ufuncimpl == NULL) { goto fail; @@ -5790,19 +5782,17 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) operand_DTypes[0] = NPY_DTYPE(PyArray_DESCR(op1_array)); Py_INCREF(operand_DTypes[0]); int force_legacy_promotion = 0; - int allow_legacy_promotion = NPY_DT_is_legacy(operand_DTypes[0]); if (op2_array != NULL) { tmp_operands[1] = op2_array; operand_DTypes[1] = NPY_DTYPE(PyArray_DESCR(op2_array)); Py_INCREF(operand_DTypes[1]); - allow_legacy_promotion &= NPY_DT_is_legacy(operand_DTypes[1]); tmp_operands[2] = tmp_operands[0]; operand_DTypes[2] = operand_DTypes[0]; Py_INCREF(operand_DTypes[2]); - if (allow_legacy_promotion && ((PyArray_NDIM(op1_array) == 0) - != (PyArray_NDIM(op2_array) == 0))) { + if ((PyArray_NDIM(op1_array) == 0) + != (PyArray_NDIM(op2_array) == 0)) { /* both are legacy and only one is 0-D: force legacy */ force_legacy_promotion = should_use_min_scalar(2, tmp_operands, 0, NULL); } @@ -5816,7 +5806,7 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) ufuncimpl = promote_and_get_ufuncimpl(ufunc, tmp_operands, signature, operand_DTypes, force_legacy_promotion, - allow_legacy_promotion, NPY_FALSE, NPY_FALSE); + NPY_FALSE, NPY_FALSE); if (ufuncimpl == NULL) { for (int i = 0; i < 3; i++) { Py_XDECREF(signature[i]); @@ -6058,7 +6048,6 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, set_npy_promotion_state(NPY_USE_WEAK_PROMOTION); npy_bool promoting_pyscalars = NPY_FALSE; - npy_bool allow_legacy_promotion = NPY_TRUE; if (_get_fixed_signature(ufunc, NULL, signature_obj, signature) < 0) { goto finish; @@ -6091,9 +6080,6 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, } DTypes[i] = NPY_DTYPE(descr); Py_INCREF(DTypes[i]); - if (!NPY_DT_is_legacy(DTypes[i])) { - allow_legacy_promotion = NPY_FALSE; - } } /* Explicitly allow int, float, and complex for the "weak" types. */ else if (descr_obj == (PyObject *)&PyLong_Type) { @@ -6149,7 +6135,7 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, if (!reduction) { ufuncimpl = promote_and_get_ufuncimpl(ufunc, dummy_arrays, signature, DTypes, NPY_FALSE, - allow_legacy_promotion, promoting_pyscalars, NPY_FALSE); + promoting_pyscalars, NPY_FALSE); if (ufuncimpl == NULL) { goto finish; } From 0b723bc3b8f3c8cbd6cee81ec76ac9597752c1e0 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 18 Jun 2024 16:55:12 +0200 Subject: [PATCH 704/980] BUG: Fix string addition promoter to work with `dtype=` Also makes it reject forced unicode selection, since that doens't work. --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 30 ++++++++++++++++++++ numpy/_core/tests/test_stringdtype.py | 16 +++++++++++ 2 files changed, 46 insertions(+) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index 3135f9cbf9c0..173b85d6fe3f 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1028,6 +1028,25 @@ all_strings_promoter(PyObject *NPY_UNUSED(ufunc), PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { + if (op_dtypes[0] != &PyArray_StringDType && + op_dtypes[1] != &PyArray_StringDType && + op_dtypes[2] != &PyArray_StringDType) { + /* + * This promoter was triggered with only unicode arguments, so use + * unicode. This can happen due to `dtype=` support which sets the + * output DType/signature. + */ + new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_UnicodeDType); + new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_UnicodeDType); + new_op_dtypes[2] = NPY_DT_NewRef(&PyArray_UnicodeDType); + return 0; + } + if (signature[0] == &PyArray_UnicodeDType && + signature[1] == &PyArray_UnicodeDType && + signature[2] == &PyArray_UnicodeDType) { + /* Unicode forced, but didn't override a string input: invalid */ + return -1; + } new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_StringDType); new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_StringDType); new_op_dtypes[2] = NPY_DT_NewRef(&PyArray_StringDType); @@ -2532,6 +2551,17 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } + PyArray_DTypeMeta *out_strings_promoter_dtypes[] = { + &PyArray_UnicodeDType, + &PyArray_UnicodeDType, + &PyArray_StringDType, + }; + + if (add_promoter(umath, "add", out_strings_promoter_dtypes, 3, + all_strings_promoter) < 0) { + return -1; + } + INIT_MULTIPLY(Int64, int64); INIT_MULTIPLY(UInt64, uint64); diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 228b5e949cfd..b15ef9e6dd7a 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -828,6 +828,22 @@ def test_add_promoter(string_list): assert_array_equal(op + arr, lresult) assert_array_equal(arr + op, rresult) + # The promoter should be able to handle things if users pass `dtype=` + res = np.add("hello", string_list, dtype=StringDType) + assert res.dtype == StringDType() + + # The promoter should not kick in if users override the input, + # which means arr is cast, this fails because of the unknown length. + with pytest.raises(TypeError, match="cannot cast dtype"): + np.add(arr, "add", signature=("U", "U", None), casting="unsafe") + + # But it must simply reject the following: + with pytest.raises(TypeError, match=".*did not contain a loop"): + np.add(arr, "add", signature=(None, "U", None)) + + with pytest.raises(TypeError, match=".*did not contain a loop"): + np.add("a", "b", signature=("U", "U", StringDType)) + def test_add_promoter_reduce(): # Exact TypeError could change, but ensure StringDtype doesn't match From 634736935a689dc9237ccfa9f9e14a4ca3191107 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 18 Jun 2024 17:14:07 +0200 Subject: [PATCH 705/980] TST: Add regression test for bad promotion error/path --- numpy/_core/tests/test_stringdtype.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index b15ef9e6dd7a..10367d1cd88d 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -845,6 +845,15 @@ def test_add_promoter(string_list): np.add("a", "b", signature=("U", "U", StringDType)) +def test_add_no_legacy_promote_with_signature(): + # Possibly misplaced, but useful to test with string DType. We check that + # if there is clearly no loop found, a stray `dtype=` doesn't break things + # Regression test for the bad error in gh-26735 + # (If legacy promotion is gone, this can be deleted...) + with pytest.raises(TypeError, match=".*did not contain a loop"): + np.add("3", 6, dtype=StringDType) + + def test_add_promoter_reduce(): # Exact TypeError could change, but ensure StringDtype doesn't match with pytest.raises(TypeError, match="the resolved dtypes are not"): From 817c9e4c775033c8c80027f11819c25380ad7ebd Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 18 Jun 2024 20:56:31 +0200 Subject: [PATCH 706/980] STY: Apply style suggestion from review Co-authored-by: Nathan Goldbaum --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index 173b85d6fe3f..ed9f62077589 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1028,9 +1028,9 @@ all_strings_promoter(PyObject *NPY_UNUSED(ufunc), PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { - if (op_dtypes[0] != &PyArray_StringDType && - op_dtypes[1] != &PyArray_StringDType && - op_dtypes[2] != &PyArray_StringDType) { + if ((op_dtypes[0] != &PyArray_StringDType && + op_dtypes[1] != &PyArray_StringDType && + op_dtypes[2] != &PyArray_StringDType)) { /* * This promoter was triggered with only unicode arguments, so use * unicode. This can happen due to `dtype=` support which sets the @@ -1041,9 +1041,9 @@ all_strings_promoter(PyObject *NPY_UNUSED(ufunc), new_op_dtypes[2] = NPY_DT_NewRef(&PyArray_UnicodeDType); return 0; } - if (signature[0] == &PyArray_UnicodeDType && - signature[1] == &PyArray_UnicodeDType && - signature[2] == &PyArray_UnicodeDType) { + if ((signature[0] == &PyArray_UnicodeDType && + signature[1] == &PyArray_UnicodeDType && + signature[2] == &PyArray_UnicodeDType)) { /* Unicode forced, but didn't override a string input: invalid */ return -1; } From 1d1c0c045adacfd7872c9108ae000fba7920d8de Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 4 Jul 2024 15:30:18 +0200 Subject: [PATCH 707/980] STY: Address review comments --- numpy/_core/src/umath/dispatching.c | 14 +++++++------- numpy/_core/src/umath/ufunc_object.c | 8 ++++---- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/numpy/_core/src/umath/dispatching.c b/numpy/_core/src/umath/dispatching.c index feef130b22ec..110e2f40ab32 100644 --- a/numpy/_core/src/umath/dispatching.c +++ b/numpy/_core/src/umath/dispatching.c @@ -65,7 +65,7 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, PyArrayObject *const ops[], PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *op_dtypes[], - npy_bool allow_legacy_promotion); + npy_bool legacy_promotion_is_possible); /** @@ -760,7 +760,7 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, PyArrayObject *const ops[], PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *op_dtypes[], - npy_bool allow_legacy_promotion) + npy_bool legacy_promotion_is_possible) { /* * Fetch the dispatching info which consists of the implementation and @@ -829,7 +829,7 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, * However, we need to give the legacy implementation a chance here. * (it will modify `op_dtypes`). */ - if (!allow_legacy_promotion || ufunc->type_resolver == NULL || + if (!legacy_promotion_is_possible || ufunc->type_resolver == NULL || (ufunc->ntypes == 0 && ufunc->userloops == NULL)) { /* Already tried or not a "legacy" ufunc (no loop found, return) */ return NULL; @@ -940,7 +940,7 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, npy_bool ensure_reduce_compatible) { int nin = ufunc->nin, nargs = ufunc->nargs; - npy_bool allow_legacy_promotion = NPY_TRUE; + npy_bool legacy_promotion_is_possible = NPY_TRUE; /* * Get the actual DTypes we operate with by setting op_dtypes[i] from @@ -972,13 +972,13 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, if (op_dtypes[i] != NULL && !NPY_DT_is_legacy(op_dtypes[i]) && ( signature[i] != NULL || // signature cannot be a pyscalar !(PyArray_FLAGS(ops[i]) & NPY_ARRAY_WAS_PYTHON_LITERAL))) { - allow_legacy_promotion = NPY_FALSE; + legacy_promotion_is_possible = NPY_FALSE; } } int current_promotion_state = get_npy_promotion_state(); - if (force_legacy_promotion && allow_legacy_promotion + if (force_legacy_promotion && legacy_promotion_is_possible && current_promotion_state == NPY_USE_LEGACY_PROMOTION && (ufunc->ntypes != 0 || ufunc->userloops != NULL)) { /* @@ -996,7 +996,7 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, /* Pause warnings and always use "new" path */ set_npy_promotion_state(NPY_USE_WEAK_PROMOTION); PyObject *info = promote_and_get_info_and_ufuncimpl(ufunc, - ops, signature, op_dtypes, allow_legacy_promotion); + ops, signature, op_dtypes, legacy_promotion_is_possible); set_npy_promotion_state(current_promotion_state); if (info == NULL) { diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 3faa67d80203..a0acaf6573ed 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -5792,10 +5792,10 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) Py_INCREF(operand_DTypes[2]); if ((PyArray_NDIM(op1_array) == 0) - != (PyArray_NDIM(op2_array) == 0)) { - /* both are legacy and only one is 0-D: force legacy */ - force_legacy_promotion = should_use_min_scalar(2, tmp_operands, 0, NULL); - } + != (PyArray_NDIM(op2_array) == 0)) { + /* both are legacy and only one is 0-D: force legacy */ + force_legacy_promotion = should_use_min_scalar(2, tmp_operands, 0, NULL); + } } else { tmp_operands[1] = tmp_operands[0]; From a153fb2e65d4ef4f5b8e8c3491a98b300114693d Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sat, 6 Jul 2024 11:35:30 +0200 Subject: [PATCH 708/980] BUG: Make Polynomial evaluation adhere to nep 50 (#26550) * Make sure evaluation of a Polynomial respects nep50 for scalar input: ``` import numpy as np p=np.polynomial.Polynomial(np.array([1, 2], dtype=np.float32)) print(type(p(2))) # np.float64 # correct, since the domain argument is the default which maps to [-1., 1.] w=np.array([-1,1], dtype=np.float32) p=np.polynomial.Polynomial(np.array([1, 2], dtype=np.float32), domain=w, window=w) print(type(p(2))) # np.float32 (was float64 on main) ``` * Update documentation of the various polynomial classes for the updated domain and window (was changed in #24568) * Not addressed: ``` import numpy as np arr = np.polydiv(1, np.float32(1)) arr.dtype # float64 ``` The input here are polynomial coefficients, which are really an array and not a scalar. So the output type seems correct, even though `1` looks like a scalar input. --- benchmarks/benchmarks/bench_polynomial.py | 29 +++++++++++++++++++++++ numpy/polynomial/chebyshev.py | 4 ++-- numpy/polynomial/hermite.py | 4 ++-- numpy/polynomial/hermite_e.py | 4 ++-- numpy/polynomial/laguerre.py | 4 ++-- numpy/polynomial/legendre.py | 4 ++-- numpy/polynomial/polynomial.py | 4 ++-- numpy/polynomial/polyutils.py | 3 ++- numpy/polynomial/tests/test_polynomial.py | 9 +++++++ 9 files changed, 52 insertions(+), 13 deletions(-) create mode 100644 benchmarks/benchmarks/bench_polynomial.py diff --git a/benchmarks/benchmarks/bench_polynomial.py b/benchmarks/benchmarks/bench_polynomial.py new file mode 100644 index 000000000000..ab2e95b7d1ab --- /dev/null +++ b/benchmarks/benchmarks/bench_polynomial.py @@ -0,0 +1,29 @@ +from .common import Benchmark + +import numpy as np + + +class Polynomial(Benchmark): + + def setup(self): + self.polynomial_degree2 = np.polynomial.Polynomial(np.array([1, 2])) + self.array3 = np.linspace(0, 1, 3) + self.array1000 = np.linspace(0, 1, 10_000) + self.float64 = np.float64(1.0) + + def time_polynomial_evaluation_scalar(self): + self.polynomial_degree2(self.float64) + + def time_polynomial_evaluation_python_float(self): + self.polynomial_degree2(1.0) + + def time_polynomial_evaluation_array_3(self): + self.polynomial_degree2(self.array3) + + def time_polynomial_evaluation_array_1000(self): + self.polynomial_degree2(self.array1000) + + def time_polynomial_addition(self): + _ = self.polynomial_degree2 + self.polynomial_degree2 + + diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index e7ac1404d343..66fe7d60c040 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -2011,9 +2011,9 @@ class Chebyshev(ABCPolyBase): domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. + The default value is [-1., 1.]. window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. + Window, see `domain` for its use. The default value is [-1., 1.]. .. versionadded:: 1.6.0 symbol : str, optional diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index 58d18cb0d88c..3c995909c742 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -1756,9 +1756,9 @@ class Hermite(ABCPolyBase): domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. + The default value is [-1., 1.]. window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. + Window, see `domain` for its use. The default value is [-1., 1.]. .. versionadded:: 1.6.0 symbol : str, optional diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index e7fe1233cd14..0dc090bc3e3a 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -1667,9 +1667,9 @@ class HermiteE(ABCPolyBase): domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. + The default value is [-1., 1.]. window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. + Window, see `domain` for its use. The default value is [-1., 1.]. .. versionadded:: 1.6.0 symbol : str, optional diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index b0de7d9bce35..c3df02b6d99b 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -1688,9 +1688,9 @@ class Laguerre(ABCPolyBase): domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [0, 1]. + The default value is [0., 1.]. window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [0, 1]. + Window, see `domain` for its use. The default value is [0., 1.]. .. versionadded:: 1.6.0 symbol : str, optional diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index ded9e7821891..d54a852c5a80 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -1632,9 +1632,9 @@ class Legendre(ABCPolyBase): domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. + The default value is [-1., 1.]. window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. + Window, see `domain` for its use. The default value is [-1., 1.]. .. versionadded:: 1.6.0 symbol : str, optional diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 7b78005fa396..3c1b09b43237 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -1601,9 +1601,9 @@ class Polynomial(ABCPolyBase): domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. + The default value is [-1., 1.]. window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. + Window, see `domain` for its use. The default value is [-1., 1.]. .. versionadded:: 1.6.0 symbol : str, optional diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py index 54ffe5937e8c..505f6f4aedd2 100644 --- a/numpy/polynomial/polyutils.py +++ b/numpy/polynomial/polyutils.py @@ -346,7 +346,8 @@ def mapdomain(x, old, new): array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) # may vary """ - x = np.asanyarray(x) + if type(x) not in (int, float, complex) and not isinstance(x, np.generic): + x = np.asanyarray(x) off, scl = mapparms(old, new) return off + scl*x diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index b761668a3b82..a0be94c3a6a0 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -627,3 +627,12 @@ def test_polyline(self): def test_polyline_zero(self): assert_equal(poly.polyline(3, 0), [3]) + + def test_result_type(self): + w = np.array([-1, 1], dtype=np.float32) + p = np.polynomial.Polynomial(w, domain=w, window=w) + v = p(2) + assert_equal(v.dtype, np.float32) + + arr = np.polydiv(1, np.float32(1)) + assert_equal(arr[0].dtype, np.float64) From 173f614fe5aabee4f1f4e91bdb0dfede5c214c12 Mon Sep 17 00:00:00 2001 From: Yuki K Date: Sat, 6 Jul 2024 13:07:51 +0000 Subject: [PATCH 709/980] DOC: Fix small incorrect markup [skip cirrus] [skip azp] [skip actions] --- doc/source/reference/c-api/iterator.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/c-api/iterator.rst b/doc/source/reference/c-api/iterator.rst index 71bf44f4b239..50fbec96392a 100644 --- a/doc/source/reference/c-api/iterator.rst +++ b/doc/source/reference/c-api/iterator.rst @@ -715,7 +715,7 @@ Construction and destruction may not be repeated. The following example is how normal broadcasting applies to a 3-D array, a 2-D array, a 1-D array and a scalar. - **Note**: Before NumPy 1.8 ``oa_ndim == 0` was used for signalling + **Note**: Before NumPy 1.8 ``oa_ndim == 0`` was used for signalling that ``op_axes`` and ``itershape`` are unused. This is deprecated and should be replaced with -1. Better backward compatibility may be achieved by using :c:func:`NpyIter_MultiNew` for this case. From 34d1704ce12316f436178c6903b46e293bbd1f21 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sat, 6 Jul 2024 15:31:02 +0200 Subject: [PATCH 710/980] DOC: fix a couple typos found by codespell in NEPs --- doc/neps/nep-0055-string_dtype.rst | 2 +- doc/neps/nep-0056-array-api-main-namespace.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/neps/nep-0055-string_dtype.rst b/doc/neps/nep-0055-string_dtype.rst index 2e3f3cbf03c4..7e29e1425e8c 100644 --- a/doc/neps/nep-0055-string_dtype.rst +++ b/doc/neps/nep-0055-string_dtype.rst @@ -990,7 +990,7 @@ in the array buffer as a short string. No matter where it is stored, once a string is initialized it is marked with the ``NPY_STRING_INITIALIZED`` flag. This lets us clearly distinguish between an -unitialized empty string and a string that has been mutated into the empty +uninitialized empty string and a string that has been mutated into the empty string. The size of the allocation is stored in the arena to allow reuse of the arena diff --git a/doc/neps/nep-0056-array-api-main-namespace.rst b/doc/neps/nep-0056-array-api-main-namespace.rst index 5fb8ad250a81..41e070444e81 100644 --- a/doc/neps/nep-0056-array-api-main-namespace.rst +++ b/doc/neps/nep-0056-array-api-main-namespace.rst @@ -302,7 +302,7 @@ three types of behavior rather than two - ``copy=None`` means "copy if needed". an exception because they use* ``copy=False`` *explicitly in their copy but a copy was previously made anyway, they have to inspect their code and determine whether the intent of the code was the old or the new semantics (both seem -rougly equally likely), and adapt the code as appropriate. We expect most cases +roughly equally likely), and adapt the code as appropriate. We expect most cases to be* ``np.array(..., copy=False)``, *because until a few years ago that had lower overhead than* ``np.asarray(...)``. *This was solved though, and* ``np.asarray(...)`` *is idiomatic NumPy usage.* From c7a9216878ce171572c7ca3857086d9d1338b851 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sat, 6 Jul 2024 15:36:09 +0200 Subject: [PATCH 711/980] DOC: fix typos found by codespell in documentation --- doc/Makefile | 2 +- doc/source/numpy_2_0_migration_guide.rst | 2 +- doc/source/reference/array_api.rst | 2 +- doc/source/reference/c-api/array.rst | 4 ++-- doc/source/release/2.0.0-notes.rst | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/Makefile b/doc/Makefile index eccd40b1adef..57d063e9c936 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -17,7 +17,7 @@ PAPER ?= DOXYGEN ?= doxygen # For merging a documentation archive into a git checkout of numpy/doc # Turn a tag like v1.18.0 into 1.18 -# Use sed -n -e 's/patttern/match/p' to return a blank value if no match +# Use sed -n -e 's/pattern/match/p' to return a blank value if no match TAG ?= $(shell git describe --tag | sed -n -e's,v\([1-9]\.[0-9]*\)\.[0-9].*,\1,p') FILES= diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index 1b588f012d0e..665da641f237 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -213,7 +213,7 @@ have been added for setting the real or imaginary part. The underlying type remains a struct under C++ (all of the above still remains valid). -This has implications for Cython. It is recommened to always use the native +This has implications for Cython. It is recommended to always use the native typedefs ``cfloat_t``, ``cdouble_t``, ``clongdouble_t`` rather than the NumPy types ``npy_cfloat``, etc, unless you have to interface with C code written using the NumPy types. You can still write cython code using the ``c.real`` and diff --git a/doc/source/reference/array_api.rst b/doc/source/reference/array_api.rst index c11d8e5eb9d2..69b51215e555 100644 --- a/doc/source/reference/array_api.rst +++ b/doc/source/reference/array_api.rst @@ -18,7 +18,7 @@ upgraded to given NumPy's For usage guidelines for downstream libraries and end users who want to write code that will work with both NumPy and other array libraries, we refer to the documentation of the array API standard itself and to code and -developer-focused documention in SciPy and scikit-learn. +developer-focused documentation in SciPy and scikit-learn. Note that in order to use standard-complaint code with older NumPy versions (< 2.0), the `array-api-compat diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 167bdb7d49ac..698e7586c52f 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -823,7 +823,7 @@ cannot not be accessed directly. .. c:function:: PyArray_ArrayDescr *PyDataType_SUBARRAY(PyArray_Descr *descr) - Information about a subarray dtype eqivalent to the Python `np.dtype.base` + Information about a subarray dtype equivalent to the Python `np.dtype.base` and `np.dtype.shape`. If this is non- ``NULL``, then this data-type descriptor is a @@ -3975,7 +3975,7 @@ the C-API is needed then some additional steps must be taken. behavior as NumPy 1.x. .. note:: - Windows never had shared visbility although you can use this macro + Windows never had shared visibility although you can use this macro to achieve it. We generally discourage sharing beyond shared boundary lines since importing the array API includes NumPy version checks. diff --git a/doc/source/release/2.0.0-notes.rst b/doc/source/release/2.0.0-notes.rst index e711b130f813..9ea3e55fd504 100644 --- a/doc/source/release/2.0.0-notes.rst +++ b/doc/source/release/2.0.0-notes.rst @@ -653,7 +653,7 @@ The ``metadata`` field is kept, but the macro version should also be preferred. Descriptor ``elsize`` and ``alignment`` access ---------------------------------------------- -Unless compiling only with NumPy 2 support, the ``elsize`` and ``aligment`` +Unless compiling only with NumPy 2 support, the ``elsize`` and ``alignment`` fields must now be accessed via ``PyDataType_ELSIZE``, ``PyDataType_SET_ELSIZE``, and ``PyDataType_ALIGNMENT``. In cases where the descriptor is attached to an array, we advise From d7f8f1618b1910d636c9018580ab9d027f3aa83c Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sat, 6 Jul 2024 15:47:48 +0200 Subject: [PATCH 712/980] MAINT: fix typos found by codespell --- numpy/_core/include/numpy/ndarraytypes.h | 2 +- numpy/_core/include/numpy/npy_2_compat.h | 2 +- numpy/_core/src/_simd/_simd_vector.inc | 2 +- numpy/_core/src/multiarray/array_coercion.c | 2 +- numpy/_core/src/multiarray/common_dtype.c | 2 +- numpy/_core/src/multiarray/dtypemeta.c | 2 +- numpy/_core/src/multiarray/multiarraymodule.h | 2 +- numpy/_core/src/umath/special_integer_comparisons.cpp | 2 +- numpy/_core/strings.py | 2 +- numpy/_core/tests/test_dtype.py | 2 +- numpy/_core/tests/test_function_base.py | 2 +- numpy/_core/tests/test_numerictypes.py | 2 +- numpy/_core/tests/test_umath.py | 2 +- numpy/distutils/misc_util.py | 2 +- numpy/lib/_polynomial_impl.py | 2 +- numpy/linalg/_linalg.py | 2 +- tools/wheels/cibw_test_command.sh | 2 +- 17 files changed, 17 insertions(+), 17 deletions(-) diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index 95821b0baff2..573f26938d87 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -1302,7 +1302,7 @@ typedef struct { PyArrayIterObject *iters[64]; #elif defined(__cplusplus) /* - * C++ doesn't stricly support flexible members and gives compilers + * C++ doesn't strictly support flexible members and gives compilers * warnings (pedantic only), so we lie. We can't make it 64 because * then Cython is unhappy (larger struct at runtime is OK smaller not). */ diff --git a/numpy/_core/include/numpy/npy_2_compat.h b/numpy/_core/include/numpy/npy_2_compat.h index e5499d04bd2b..80bb4088c812 100644 --- a/numpy/_core/include/numpy/npy_2_compat.h +++ b/numpy/_core/include/numpy/npy_2_compat.h @@ -53,7 +53,7 @@ #if NPY_ABI_VERSION < 0x02000000 /* * Define 2.0 feature version as it is needed below to decide whether we - * compile for both 1.x and 2.x (defining it gaurantees 1.x only). + * compile for both 1.x and 2.x (defining it guarantees 1.x only). */ #define NPY_2_0_API_VERSION 0x00000012 /* diff --git a/numpy/_core/src/_simd/_simd_vector.inc b/numpy/_core/src/_simd/_simd_vector.inc index 3d0c15375074..4911402bc568 100644 --- a/numpy/_core/src/_simd/_simd_vector.inc +++ b/numpy/_core/src/_simd/_simd_vector.inc @@ -92,7 +92,7 @@ static PyTypeObject PySIMDVectorType = { * miss-align load variable of 256/512-bit vector from non-aligned * 256/512-bit stack pointer. * - * check the following links for more clearification: + * check the following links for more clarification: * https://github.com/numpy/numpy/pull/18330#issuecomment-821539919 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=49001 */ diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index 51aa874bf934..69da09875bfb 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -1185,7 +1185,7 @@ PyArray_DiscoverDTypeAndShape_Recursive( } /* - * For a sequence we need to make a copy of the final aggreate anyway. + * For a sequence we need to make a copy of the final aggregate anyway. * There's no need to pass explicit `copy=True`, so we switch * to `copy=None` (copy if needed). */ diff --git a/numpy/_core/src/multiarray/common_dtype.c b/numpy/_core/src/multiarray/common_dtype.c index beba6acef149..a65aba060a55 100644 --- a/numpy/_core/src/multiarray/common_dtype.c +++ b/numpy/_core/src/multiarray/common_dtype.c @@ -132,7 +132,7 @@ reduce_dtypes_to_most_knowledgeable( } if (res == (PyArray_DTypeMeta *)Py_NotImplemented) { - /* guess at other being more "knowledgable" */ + /* guess at other being more "knowledgeable" */ PyArray_DTypeMeta *tmp = dtypes[low]; dtypes[low] = dtypes[high]; dtypes[high] = tmp; diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 87a69d8348c1..a8ba51fa6e06 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -1403,7 +1403,7 @@ PyArray_DTypeMeta *_Void_dtype = NULL; * This function is exposed with an underscore "privately" because the * public version is a static inline function which only calls the function * on 2.x but directly accesses the `descr` struct on 1.x. - * Once 1.x backwards compatibility is gone, it shoudl be exported without + * Once 1.x backwards compatibility is gone, it should be exported without * the underscore directly. * Internally, we define a private inline function `PyDataType_GetArrFuncs` * for convenience as we are allowed to access the `DType` slots directly. diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index 218dc601613a..25dc0b54f1e3 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -23,7 +23,7 @@ typedef struct npy_thread_unsafe_state_struct { * PyObject *value; * } * - * so the initialization is thread-safe and the only possibile lock + * so the initialization is thread-safe and the only possible lock * contention happens before the cache is initialized, not on every single * read. */ diff --git a/numpy/_core/src/umath/special_integer_comparisons.cpp b/numpy/_core/src/umath/special_integer_comparisons.cpp index 05026be96e67..06babeeda0a8 100644 --- a/numpy/_core/src/umath/special_integer_comparisons.cpp +++ b/numpy/_core/src/umath/special_integer_comparisons.cpp @@ -293,7 +293,7 @@ get_loop(PyArrayMethod_Context *context, /* - * Machinery to add the python integer to NumPy intger comparsisons as well + * Machinery to add the python integer to NumPy integer comparsisons as well * as a special promotion to special case Python int with Python int * comparisons. */ diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 83034705f525..ebdbdb402d98 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -63,7 +63,7 @@ # _vec_string - Will probably not become ufuncs "mod", "decode", "encode", "translate", - # Removed from namespace until behavior has been crystalized + # Removed from namespace until behavior has been crystallized # "join", "split", "rsplit", "splitlines", ] diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 73e02a84e2e8..869183956f78 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1514,7 +1514,7 @@ def test_python_integer_promotion(self, val): @np._no_nep50_warning() def test_float_int_pyscalar_promote_rational( self, weak_promotion, other, expected): - # Note that rationals are a bit akward as they promote with float64 + # Note that rationals are a bit awkward as they promote with float64 # or default ints, but not float16 or uint8/int8 (which looks # inconsistent here). The new promotion fixes this (partially?) if not weak_promotion and type(other) == float: diff --git a/numpy/_core/tests/test_function_base.py b/numpy/_core/tests/test_function_base.py index bebc7c52e9df..333943212646 100644 --- a/numpy/_core/tests/test_function_base.py +++ b/numpy/_core/tests/test_function_base.py @@ -223,7 +223,7 @@ def test_complex(self): assert_allclose(y, [-5, 3j]) def test_complex_shortest_path(self): - # test the shortest logorithmic spiral is used, see gh-25644 + # test the shortest logarithmic spiral is used, see gh-25644 x = 1.2 + 3.4j y = np.exp(1j*(np.pi-.1)) * x z = np.geomspace(x, y, 5) diff --git a/numpy/_core/tests/test_numerictypes.py b/numpy/_core/tests/test_numerictypes.py index f09622e422a1..502f1849e69a 100644 --- a/numpy/_core/tests/test_numerictypes.py +++ b/numpy/_core/tests/test_numerictypes.py @@ -476,7 +476,7 @@ def test_isdtype_invalid_args(self): np.isdtype(np.int64, "int64") def test_sctypes_complete(self): - # issue 26439: int32/intc were masking eachother on 32-bit builds + # issue 26439: int32/intc were masking each other on 32-bit builds assert np.int32 in sctypes['int'] assert np.intc in sctypes['int'] assert np.int64 in sctypes['int'] diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index df8ec07dc3f5..0941a522ab64 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -2961,7 +2961,7 @@ def test_lower_align(self): def test_reduce_reorder(self): # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus - # and put it before the call to an intrisic function that causes + # and put it before the call to an intrinsic function that causes # invalid status to be set. Also make sure warnings are not emitted for n in (2, 4, 8, 16, 32): for dt in (np.float32, np.float16, np.complex64): diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index 776eb8d3928b..4495dfb56d39 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -1420,7 +1420,7 @@ def paths(self,*paths,**kws): """Apply glob to paths and prepend local_path if needed. Applies glob.glob(...) to each path in the sequence (if needed) and - pre-pends the local_path if needed. Because this is called on all + prepends the local_path if needed. Because this is called on all source lists, this allows wildcard characters to be specified in lists of sources for extension modules and libraries and scripts and allows path-names be relative to the source directory. diff --git a/numpy/lib/_polynomial_impl.py b/numpy/lib/_polynomial_impl.py index 63c12f438240..2f3a09dd6724 100644 --- a/numpy/lib/_polynomial_impl.py +++ b/numpy/lib/_polynomial_impl.py @@ -675,7 +675,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): raise ValueError("the number of data points must exceed order " "to scale the covariance matrix") # note, this used to be: fac = resids / (len(x) - order - 2.0) - # it was deciced that the "- 2" (originally justified by "Bayesian + # it was decided that the "- 2" (originally justified by "Bayesian # uncertainty analysis") is not what the user expects # (see gh-11196 and gh-11197) fac = resids / (len(x) - order) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 5a6e65c4ce1f..46ad8b5db374 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -3032,7 +3032,7 @@ def _multi_dot_three(A, B, C, out=None): def _multi_dot_matrix_chain_order(arrays, return_costs=False): """ - Return a np.array that encodes the optimal order of mutiplications. + Return a np.array that encodes the optimal order of multiplications. The optimal order array is then used by `_multi_dot()` to do the multiplication. diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index e0962d4b36e5..9a462de2a684 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -1,5 +1,5 @@ # This script is used by .github/workflows/wheels.yml to run the full test -# suite, checks for lincense inclusion and that the openblas version is correct. +# suite, checks for license inclusion and that the openblas version is correct. set -xe PROJECT_DIR="$1" From 549e3d6436a962d8ab8d45eb132e5b00a83154ad Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 4 Jul 2024 14:27:46 +0200 Subject: [PATCH 713/980] MAINT: apply ruff/flake8-simplify rule SIM115 SIM115 Use context handler for opening files --- tools/c_coverage/c_coverage_report.py | 65 +++++++++++++-------------- 1 file changed, 31 insertions(+), 34 deletions(-) diff --git a/tools/c_coverage/c_coverage_report.py b/tools/c_coverage/c_coverage_report.py index bd3eeaee9776..2e5a4c270376 100755 --- a/tools/c_coverage/c_coverage_report.py +++ b/tools/c_coverage/c_coverage_report.py @@ -54,25 +54,23 @@ def mark_line(self, lineno, as_func=None): line.add(as_func) def write_text(self, fd): - source = open(self.path, "r") - for i, line in enumerate(source): - if i + 1 in self.lines: - fd.write("> ") - else: - fd.write("! ") - fd.write(line) - source.close() + with open(self.path, "r") as source: + for i, line in enumerate(source): + if i + 1 in self.lines: + fd.write("> ") + else: + fd.write("! ") + fd.write(line) def write_html(self, fd): - source = open(self.path, 'r') - code = source.read() - lexer = CLexer() - formatter = FunctionHtmlFormatter( - self.lines, - full=True, - linenos='inline') - fd.write(highlight(code, lexer, formatter)) - source.close() + with open(self.path, 'r') as source: + code = source.read() + lexer = CLexer() + formatter = FunctionHtmlFormatter( + self.lines, + full=True, + linenos='inline') + fd.write(highlight(code, lexer, formatter)) class SourceFiles: @@ -95,24 +93,24 @@ def clean_path(self, path): def write_text(self, root): for path, source in self.files.items(): - fd = open(os.path.join(root, self.clean_path(path)), "w") - source.write_text(fd) - fd.close() + with open(os.path.join(root, self.clean_path(path)), "w") as fd: + source.write_text(fd) def write_html(self, root): for path, source in self.files.items(): - fd = open(os.path.join(root, self.clean_path(path) + ".html"), "w") - source.write_html(fd) - fd.close() + with open( + os.path.join(root, self.clean_path(path) + ".html"), "w" + ) as fd: + source.write_html(fd) - fd = open(os.path.join(root, 'index.html'), 'w') - fd.write("") - paths = sorted(self.files.keys()) - for path in paths: - fd.write('

%s

' % - (self.clean_path(path), escape(path[len(self.prefix):]))) - fd.write("") - fd.close() + with open(os.path.join(root, 'index.html'), 'w') as fd: + fd.write("") + paths = sorted(self.files.keys()) + for path in paths: + fd.write('

%s

' % + (self.clean_path(path), + escape(path[len(self.prefix):]))) + fd.write("") def collect_stats(files, fd, pattern): @@ -164,9 +162,8 @@ def collect_stats(files, fd, pattern): files = SourceFiles() for log_file in args.callgrind_file: - log_fd = open(log_file, 'r') - collect_stats(files, log_fd, args.pattern) - log_fd.close() + with open(log_file, 'r') as log_fd: + collect_stats(files, log_fd, args.pattern) if not os.path.exists(args.directory): os.makedirs(args.directory) From 286606ca9e48baac7343ee0ba41fa7cdf5613134 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 4 Jul 2024 15:23:31 +0200 Subject: [PATCH 714/980] MAINT: apply ruff/flake8-simplify rule SIM103 SIM103 Return the condition `...` directly --- numpy/_core/_dtype.py | 4 +--- numpy/_core/numerictypes.py | 6 ++---- numpy/f2py/tests/util.py | 5 +---- numpy/tests/test_public_api.py | 3 +-- 4 files changed, 5 insertions(+), 13 deletions(-) diff --git a/numpy/_core/_dtype.py b/numpy/_core/_dtype.py index 328a0e3959f3..ee9b96590263 100644 --- a/numpy/_core/_dtype.py +++ b/numpy/_core/_dtype.py @@ -277,9 +277,7 @@ def _is_packed(dtype): if align: total_offset = _aligned_offset(total_offset, max_alignment) - if total_offset != dtype.itemsize: - return False - return True + return total_offset == dtype.itemsize def _struct_list_str(dtype): diff --git a/numpy/_core/numerictypes.py b/numpy/_core/numerictypes.py index ac52cff49db2..9530b168d859 100644 --- a/numpy/_core/numerictypes.py +++ b/numpy/_core/numerictypes.py @@ -223,12 +223,10 @@ def issctype(rep): return False try: res = obj2sctype(rep) - if res and res != object_: - return True - return False except Exception: return False - + else: + return res and res != object_ @set_module('numpy') def obj2sctype(rep, default=None): diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index c2258791e6d9..c9c2201962b5 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -221,10 +221,7 @@ def check_language(lang, code_snippet=None): stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) - if runmeson.returncode == 0: - return True - else: - return False + return runmeson.returncode == 0 finally: shutil.rmtree(tmpdir) return False diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 618223705937..eb96560b9c9a 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -614,8 +614,7 @@ def test_functions_single_location(): # else check if we got a function-like object elif ( inspect.isfunction(member) or - isinstance(member, dispatched_function) or - isinstance(member, np.ufunc) + isinstance(member, (dispatched_function, np.ufunc)) ): if member in visited_functions: From 961d0b6f5ce44e25abe3c5e3a8b6ad7e62f77064 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 4 Jul 2024 15:33:10 +0200 Subject: [PATCH 715/980] MAINT: apply ruff/flake8-simplify rule SIM113 SIM113 Use `enumerate()` for index variable in `for` loop --- numpy/_core/code_generators/generate_umath.py | 5 +---- numpy/matrixlib/defmatrix.py | 4 +--- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index 06871a44b37f..72cde8af63c5 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -1376,10 +1376,9 @@ def make_arrays(funcdict): funclist = [] datalist = [] siglist = [] - k = 0 sub = 0 - for t in uf.type_descriptions: + for k, t in enumerate(uf.type_descriptions): cfunc_alias = t.cfunc_alias if t.cfunc_alias else name cfunc_fname = None if t.func_data is FullTypeDescr: @@ -1439,8 +1438,6 @@ def make_arrays(funcdict): for x in t.in_ + t.out: siglist.append('NPY_%s' % (english_upper(chartoname[x]),)) - k += 1 - if funclist or siglist or datalist: funcnames = ', '.join(funclist) signames = ', '.join(siglist) diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index 866f867c8eaa..6feda68025a6 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -18,8 +18,7 @@ def _convert_from_string(data): rows = data.split(';') newdata = [] - count = 0 - for row in rows: + for count, row in enumerate(rows): trow = row.split(',') newrow = [] for col in trow: @@ -29,7 +28,6 @@ def _convert_from_string(data): Ncols = len(newrow) elif len(newrow) != Ncols: raise ValueError("Rows not the same size.") - count += 1 newdata.append(newrow) return newdata From 4c3592db336fb922d9d79157ea67628284d524a7 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 4 Jul 2024 15:35:04 +0200 Subject: [PATCH 716/980] MAINT: apply ruff/flake8-simplify rule SIM910 SIM910 Use `.get(...)` instead of `.get(..., None)` --- numpy/lib/_type_check_impl.py | 2 +- numpy/ma/core.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_type_check_impl.py b/numpy/lib/_type_check_impl.py index 2e4ef4e6954a..3dab464f5023 100644 --- a/numpy/lib/_type_check_impl.py +++ b/numpy/lib/_type_check_impl.py @@ -689,7 +689,7 @@ def common_type(*arrays): if issubclass(t, _nx.integer): p = 2 # array_precision[_nx.double] else: - p = array_precision.get(t, None) + p = array_precision.get(t) if p is None: raise TypeError("can't get common type for non-numeric array") precision = max(precision, p) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 453c63614d2e..859806238ff7 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -3117,7 +3117,7 @@ def __array_wrap__(self, obj, context=None, return_scalar=False): input_args = args[:func.nin] m = reduce(mask_or, [getmaskarray(arg) for arg in input_args]) # Get the domain mask - domain = ufunc_domain.get(func, None) + domain = ufunc_domain.get(func) if domain is not None: # Take the domain, and make sure it's a ndarray with np.errstate(divide='ignore', invalid='ignore'): From 1557a3005fdfcb568742c8341c9b0512db5dd41c Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 4 Jul 2024 15:37:40 +0200 Subject: [PATCH 717/980] MAINT: apply ruff/flake8-simplify rule SIM110 SIM110 Use `return all(...)` instead of `for` loop SIM110 Use `return any(...)` instead of `for` loop --- numpy/_core/tests/test_cpu_features.py | 5 +---- numpy/distutils/extension.py | 10 ++-------- numpy/distutils/misc_util.py | 15 +++------------ numpy/f2py/tests/test_array_from_pyobj.py | 5 +---- numpy/lib/_datasource.py | 5 +---- numpy/lib/_iotools.py | 5 +---- 6 files changed, 9 insertions(+), 36 deletions(-) diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index f4a85c54ca6a..35d81005cfc1 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -85,10 +85,7 @@ def cpu_have(self, feature_name): map_names = self.features_map.get(feature_name, feature_name) if isinstance(map_names, str): return map_names in self.features_flags - for f in map_names: - if f in self.features_flags: - return True - return False + return any(f in self.features_flags for f in map_names) def load_flags_cpuinfo(self, magic_key): self.features_flags = self.get_cpuinfo_item(magic_key) diff --git a/numpy/distutils/extension.py b/numpy/distutils/extension.py index 3ede013e0f3c..06e6441e65df 100644 --- a/numpy/distutils/extension.py +++ b/numpy/distutils/extension.py @@ -93,15 +93,9 @@ def __init__( return def has_cxx_sources(self): - for source in self.sources: - if cxx_ext_re(str(source)): - return True - return False + return any(cxx_ext_re(str(source)) for source in self.sources) def has_f2py_sources(self): - for source in self.sources: - if fortran_pyf_ext_re(source): - return True - return False + return any(fortran_pyf_ext_re(source) for source in self.sources) # class Extension diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index 776eb8d3928b..cbf889d2daf1 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -489,10 +489,7 @@ def is_string(s): def all_strings(lst): """Return True if all items in lst are string objects. """ - for item in lst: - if not is_string(item): - return False - return True + return all(is_string(item) for item in lst) def is_sequence(seq): if is_string(seq): @@ -527,17 +524,11 @@ def get_language(sources): def has_f_sources(sources): """Return True if sources contains Fortran files """ - for source in sources: - if fortran_ext_match(source): - return True - return False + return any(fortran_ext_match(source) for source in sources) def has_cxx_sources(sources): """Return True if sources contains C++ files """ - for source in sources: - if cxx_ext_match(source): - return True - return False + return any(cxx_ext_match(source) for source in sources) def filter_sources(sources): """Return four lists of filenames containing diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index d5ae235e7d82..c10fe75a04cf 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -90,10 +90,7 @@ def __repr__(self): return "Intent(%r)" % (self.intent_list) def is_intent(self, *names): - for name in names: - if name not in self.intent_list: - return False - return True + return all(name in self.intent_list for name in names) def is_intent_exact(self, *names): return len(self.intent_list) == len(names) and self.is_intent(*names) diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py index 9b455513ac89..2ee277f0d421 100644 --- a/numpy/lib/_datasource.py +++ b/numpy/lib/_datasource.py @@ -271,10 +271,7 @@ def _iswritemode(self, mode): # Currently only used to test the bz2 files. _writemodes = ("w", "+") - for c in mode: - if c in _writemodes: - return True - return False + return any(c in _writemodes for c in mode) def _splitzipext(self, filename): """Split zip extension from filename and return filename. diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py index a38b0017ee5d..93dceaef9dd3 100644 --- a/numpy/lib/_iotools.py +++ b/numpy/lib/_iotools.py @@ -77,10 +77,7 @@ def has_nested_fields(ndtype): False """ - for name in ndtype.names or (): - if ndtype[name].names is not None: - return True - return False + return any(ndtype[name].names is not None for name in ndtype.names or ()) def flatten_dtype(ndtype, flatten_base=False): From 811148581e801c8ae8e9201965226ba7294300e2 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 4 Jul 2024 15:39:50 +0200 Subject: [PATCH 718/980] MAINT: apply ruff/flake8-simplify rule SIM210 SIM210 Remove unnecessary `True if ... else False` --- numpy/lib/_arraypad_impl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_arraypad_impl.py b/numpy/lib/_arraypad_impl.py index af6c4da4c3b7..1d900a0dc376 100644 --- a/numpy/lib/_arraypad_impl.py +++ b/numpy/lib/_arraypad_impl.py @@ -860,7 +860,7 @@ def pad(array, pad_width, mode='constant', **kwargs): elif mode in {"reflect", "symmetric"}: method = kwargs.get("reflect_type", "even") - include_edge = True if mode == "symmetric" else False + include_edge = mode == "symmetric" for axis, (left_index, right_index) in zip(axes, pad_width): if array.shape[axis] == 1 and (left_index > 0 or right_index > 0): # Extending singleton dimension for 'reflect' is legacy From 1d56d38359463c6d3b56f5f1a4eecc9b7a6d30c4 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sat, 6 Jul 2024 16:42:07 +0200 Subject: [PATCH 719/980] MAINT: apply ruff/pyupgrade rule UP004 UP004 Class inherits from `object` --- numpy/_core/tests/test_multiarray.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 85534fa4dd2f..54a10c4a30e3 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -6886,7 +6886,7 @@ def test_huge_vectordot(self, dtype): def test_dtype_discovery_fails(self): # See gh-14247, error checking was missing for failed dtype discovery - class BadObject(object): + class BadObject: def __array__(self, dtype=None, copy=None): raise TypeError("just this tiny mint leaf") @@ -8356,7 +8356,7 @@ def test_no_suboffsets(self): np.frombuffer(buffer) -class TestArrayCreationCopyArgument(object): +class TestArrayCreationCopyArgument: class RaiseOnBool: From 3d54568770bbcf2a69a3e100b88c2947869a9d18 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sat, 6 Jul 2024 16:44:07 +0200 Subject: [PATCH 720/980] MAINT: apply ruff/pyupgrade rule UP022 UP022 Prefer `capture_output` over sending `stdout` and `stderr` to `PIPE` --- numpy/f2py/tests/util.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index c2258791e6d9..6ee3abd81aa7 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -218,8 +218,7 @@ def check_language(lang, code_snippet=None): ["meson", "setup", "btmp"], check=False, cwd=tmpdir, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + capture_output=True, ) if runmeson.returncode == 0: return True From 6155ebc65d09d000a55329bc0041d2b652bd9211 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sat, 6 Jul 2024 16:45:18 +0200 Subject: [PATCH 721/980] MAINT: apply ruff/pyupgrade rule UP034 UP034 Avoid extraneous parentheses --- benchmarks/benchmarks/bench_ufunc.py | 4 ++-- numpy/_core/tests/test_indexing.py | 14 +++++++------- numpy/_core/tests/test_multiarray.py | 6 +++--- numpy/_core/tests/test_records.py | 6 +++--- numpy/_core/tests/test_regression.py | 4 ++-- numpy/_core/tests/test_shape_base.py | 6 +++--- numpy/_core/tests/test_stringdtype.py | 4 ++-- numpy/f2py/auxfuncs.py | 4 ++-- numpy/f2py/crackfortran.py | 10 +++++----- numpy/f2py/symbolic.py | 4 ++-- numpy/lib/tests/test_loadtxt.py | 12 ++++++------ numpy/lib/tests/test_nanfunctions.py | 2 +- numpy/lib/tests/test_shape_base.py | 4 ++-- numpy/ma/tests/test_mrecords.py | 2 +- numpy/polynomial/polyutils.py | 2 +- numpy/random/tests/test_smoke.py | 6 +++--- 16 files changed, 45 insertions(+), 45 deletions(-) diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index b7f711b9c58a..3545d939cf36 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -31,7 +31,7 @@ all_ufuncs = (getattr(np, name, None) for name in dir(np)) all_ufuncs = set(filter(lambda f: isinstance(f, np.ufunc), all_ufuncs)) -bench_ufuncs = set((getattr(np, name, None) for name in ufuncs)) +bench_ufuncs = set(getattr(np, name, None) for name in ufuncs) missing_ufuncs = all_ufuncs - bench_ufuncs if len(missing_ufuncs) > 0: @@ -497,7 +497,7 @@ def time_floor_divide_int(self, dtype, size): class Scalar(Benchmark): def setup(self): self.x = np.asarray(1.0) - self.y = np.asarray((1.0 + 1j)) + self.y = np.asarray(1.0 + 1j) self.z = complex(1.0, 1.0) def time_add_scalar(self): diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index 9611f75221d2..686caf9c7822 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -983,7 +983,7 @@ def _get_multi_index(self, arr, indices): elif indx is None: # this is like taking a slice with one element from a new axis: indices.append(['n', np.array([0], dtype=np.intp)]) - arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:])) + arr = arr.reshape(arr.shape[:ax] + (1,) + arr.shape[ax:]) continue if isinstance(indx, np.ndarray) and indx.dtype == bool: if indx.shape != arr.shape[ax:ax+indx.ndim]: @@ -998,9 +998,9 @@ def _get_multi_index(self, arr, indices): flat_indx = np.array([0]*indx.sum(), dtype=np.intp) # concatenate axis into a single one: if indx.ndim != 0: - arr = arr.reshape((arr.shape[:ax] + arr = arr.reshape(arr.shape[:ax] + (np.prod(arr.shape[ax:ax+indx.ndim]),) - + arr.shape[ax+indx.ndim:])) + + arr.shape[ax+indx.ndim:]) indx = flat_indx else: # This could be changed, a 0-d boolean index can @@ -1067,9 +1067,9 @@ def _get_multi_index(self, arr, indices): # First of all, reshape arr to combine fancy axes into one: orig_shape = arr.shape orig_slice = orig_shape[ax:ax + len(indx[1:])] - arr = arr.reshape((arr.shape[:ax] + arr = arr.reshape(arr.shape[:ax] + (np.prod(orig_slice).astype(int),) - + arr.shape[ax + len(indx[1:]):])) + + arr.shape[ax + len(indx[1:]):]) # Check if broadcasting works res = np.broadcast(*indx[1:]) @@ -1103,9 +1103,9 @@ def _get_multi_index(self, arr, indices): raise ValueError arr = arr.take(mi.ravel(), axis=ax) try: - arr = arr.reshape((arr.shape[:ax] + arr = arr.reshape(arr.shape[:ax] + mi.shape - + arr.shape[ax+1:])) + + arr.shape[ax+1:]) except ValueError: # too many dimensions, probably raise IndexError diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 54a10c4a30e3..913055f8717c 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -6480,11 +6480,11 @@ def test_std_where(self): [True], [False]]) _cases = [ - (0, True, 7.07106781*np.ones((5))), - (1, True, 1.41421356*np.ones((5))), + (0, True, 7.07106781*np.ones(5)), + (1, True, 1.41421356*np.ones(5)), (0, whf, np.array([4.0824829 , 8.16496581, 5., 7.39509973, 8.49836586])), - (0, whp, 2.5*np.ones((5))) + (0, whp, 2.5*np.ones(5)) ] for _ax, _wh, _res in _cases: assert_allclose(a.std(axis=_ax, where=_wh), _res) diff --git a/numpy/_core/tests/test_records.py b/numpy/_core/tests/test_records.py index 975bb322f87c..151fa4e68727 100644 --- a/numpy/_core/tests/test_records.py +++ b/numpy/_core/tests/test_records.py @@ -114,9 +114,9 @@ def test_recarray_from_obj(self): mine = np.rec.fromarrays([a, b, c], names='date,data1,data2') for i in range(len(a)): - assert_((mine.date[i] == list(range(1, 10)))) - assert_((mine.data1[i] == 0.0)) - assert_((mine.data2[i] == 0.0)) + assert_(mine.date[i] == list(range(1, 10))) + assert_(mine.data1[i] == 0.0) + assert_(mine.data2[i] == 0.0) def test_recarray_repr(self): a = np.array([(1, 0.1), (2, 0.2)], diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 6e14909fc853..02726d6a108c 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -1071,8 +1071,8 @@ def test_astype_copy(self): with open(filename, 'rb') as f: xp = pickle.load(f, encoding='latin1') xpd = xp.astype(np.float64) - assert_((xp.__array_interface__['data'][0] != - xpd.__array_interface__['data'][0])) + assert_(xp.__array_interface__['data'][0] != + xpd.__array_interface__['data'][0]) def test_compress_small_type(self): # Ticket #789, changeset 5217. diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index 610c693d3d10..a885cb64a661 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -154,7 +154,7 @@ def test_2D_array(self): def test_generator(self): with pytest.raises(TypeError, match="arrays to stack must be"): - hstack((np.arange(3) for _ in range(2))) + hstack(np.arange(3) for _ in range(2)) with pytest.raises(TypeError, match="arrays to stack must be"): hstack(map(lambda x: x, np.ones((3, 2)))) @@ -209,7 +209,7 @@ def test_2D_array2(self): def test_generator(self): with pytest.raises(TypeError, match="arrays to stack must be"): - vstack((np.arange(3) for _ in range(2))) + vstack(np.arange(3) for _ in range(2)) def test_casting_and_dtype(self): a = np.array([1, 2, 3]) @@ -477,7 +477,7 @@ def test_stack(): # do not accept generators with pytest.raises(TypeError, match="arrays to stack must be"): - stack((x for x in range(3))) + stack(x for x in range(3)) #casting and dtype test a = np.array([1, 2, 3]) diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 10367d1cd88d..9ff3224947d9 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -232,12 +232,12 @@ def test_self_casts(dtype, dtype2, strings): if hasattr(dtype, "na_object") and hasattr(dtype2, "na_object"): na1 = dtype.na_object na2 = dtype2.na_object - if ((na1 is not na2 and + if (na1 is not na2 and # check for pd_NA first because bool(pd_NA) is an error ((na1 is pd_NA or na2 is pd_NA) or # the second check is a NaN check, spelled this way # to avoid errors from math.isnan and np.isnan - (na1 != na2 and not (na1 != na1 and na2 != na2))))): + (na1 != na2 and not (na1 != na1 and na2 != na2)))): with pytest.raises(TypeError): arr[:-1] == newarr[:-1] return diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 13a1074b447e..5acf770b8e74 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -701,9 +701,9 @@ def getcallprotoargument(rout, cb_map={}): else: if not isattr_value(var): ctype = ctype + '*' - if ((isstring(var) + if (isstring(var) or isarrayofstrings(var) # obsolete? - or isstringarray(var))): + or isstringarray(var)): arg_types2.append('size_t') arg_types.append(ctype) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 2c6fa83889ca..980dee356d96 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -806,7 +806,7 @@ def crackline(line, reset=0): raise Exception('crackline: groupcounter(=%s) is nonpositive. ' 'Check the blocks.' % (groupcounter)) - m1 = beginpattern[0].match((line)) + m1 = beginpattern[0].match(line) if (m1) and (not m1.group('this') == groupname[groupcounter]): raise Exception('crackline: End group %s does not match with ' 'previous Begin group %s\n\t%s' % @@ -2735,8 +2735,8 @@ def analyzevars(block): d = param_parse(d, params) except (ValueError, IndexError, KeyError): outmess( - ('analyzevars: could not parse dimension for ' - f'variable {d!r}\n') + 'analyzevars: could not parse dimension for ' + f'variable {d!r}\n' ) dim_char = ':' if d == ':' else '*' @@ -2816,9 +2816,9 @@ def compute_deps(v, deps): compute_deps(v1, deps) all_deps = set() compute_deps(v, all_deps) - if ((v in n_deps + if (v in n_deps or '=' in vars[v] - or 'depend' in vars[v])): + or 'depend' in vars[v]): # Skip a variable that # - n depends on # - has user-defined initialization expression diff --git a/numpy/f2py/symbolic.py b/numpy/f2py/symbolic.py index 67120d79a51e..6884a473b43b 100644 --- a/numpy/f2py/symbolic.py +++ b/numpy/f2py/symbolic.py @@ -1084,9 +1084,9 @@ def as_factors(obj): if coeff == 1: return Expr(Op.FACTORS, {term: 1}) return Expr(Op.FACTORS, {term: 1, Expr.number(coeff): 1}) - if ((obj.op is Op.APPLY + if (obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV - and not obj.data[2])): + and not obj.data[2]): return Expr(Op.FACTORS, {obj.data[1][0]: 1, obj.data[1][1]: -1}) return Expr(Op.FACTORS, {obj: 1}) raise OpError(f'cannot convert {type(obj)} to terms Expr') diff --git a/numpy/lib/tests/test_loadtxt.py b/numpy/lib/tests/test_loadtxt.py index 0b2f4042e66d..2678aa82e600 100644 --- a/numpy/lib/tests/test_loadtxt.py +++ b/numpy/lib/tests/test_loadtxt.py @@ -18,12 +18,12 @@ def test_scientific_notation(): """Test that both 'e' and 'E' are parsed correctly.""" data = StringIO( - ( + "1.0e-1,2.0E1,3.0\n" "4.0e-2,5.0E-1,6.0\n" "7.0e-3,8.0E1,9.0\n" "0.0e-4,1.0E-1,2.0" - ) + ) expected = np.array( [[0.1, 20., 3.0], [0.04, 0.5, 6], [0.007, 80., 9], [0, 0.1, 2]] @@ -46,14 +46,14 @@ def mixed_types_structured(): with the associated structured array. """ data = StringIO( - ( + "1000;2.4;alpha;-34\n" "2000;3.1;beta;29\n" "3500;9.9;gamma;120\n" "4090;8.1;delta;0\n" "5001;4.4;epsilon;-99\n" "6543;7.8;omega;-1\n" - ) + ) dtype = np.dtype( [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)] @@ -597,14 +597,14 @@ def test_comment_multichar_error_with_quote(): def test_structured_dtype_with_quotes(): data = StringIO( - ( + "1000;2.4;'alpha';-34\n" "2000;3.1;'beta';29\n" "3500;9.9;'gamma';120\n" "4090;8.1;'delta';0\n" "5001;4.4;'epsilon';-99\n" "6543;7.8;'omega';-1\n" - ) + ) dtype = np.dtype( [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)] diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index d196b133005f..2a92cad2f315 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -651,7 +651,7 @@ def test_empty(self): tgt = mat res = f(mat, axis=1) assert_equal(res, tgt) - tgt = np.zeros((0)) + tgt = np.zeros(0) res = f(mat, axis=None) assert_equal(res, tgt) diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py index 2b03fdae39b5..13529e001354 100644 --- a/numpy/lib/tests/test_shape_base.py +++ b/numpy/lib/tests/test_shape_base.py @@ -514,7 +514,7 @@ def test_2D_arrays(self): def test_generator(self): with pytest.raises(TypeError, match="arrays to stack must be"): - column_stack((np.arange(3) for _ in range(2))) + column_stack(np.arange(3) for _ in range(2)) class TestDstack: @@ -551,7 +551,7 @@ def test_2D_array2(self): def test_generator(self): with pytest.raises(TypeError, match="arrays to stack must be"): - dstack((np.arange(3) for _ in range(2))) + dstack(np.arange(3) for _ in range(2)) # array_split has more comprehensive test of splitting. diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py index dc2c561b888c..a364268a344b 100644 --- a/numpy/ma/tests/test_mrecords.py +++ b/numpy/ma/tests/test_mrecords.py @@ -70,7 +70,7 @@ def test_get(self): assert_equal(mbase_last.recordmask, True) assert_equal(mbase_last._mask.item(), (True, True, True)) assert_equal(mbase_last['a'], mbase['a'][-1]) - assert_((mbase_last['a'] is masked)) + assert_(mbase_last['a'] is masked) # as slice .......... mbase_sl = mbase[:2] assert_(isinstance(mbase_sl, mrecarray)) diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py index 505f6f4aedd2..9f68f38e5235 100644 --- a/numpy/polynomial/polyutils.py +++ b/numpy/polynomial/polyutils.py @@ -480,7 +480,7 @@ def _valnd(val_f, c, *args): """ args = [np.asanyarray(a) for a in args] shape0 = args[0].shape - if not all((a.shape == shape0 for a in args[1:])): + if not all(a.shape == shape0 for a in args[1:]): if len(args) == 3: raise ValueError('x, y, z are incompatible') elif len(args) == 2: diff --git a/numpy/random/tests/test_smoke.py b/numpy/random/tests/test_smoke.py index 7e12561962a9..b402e87384d6 100644 --- a/numpy/random/tests/test_smoke.py +++ b/numpy/random/tests/test_smoke.py @@ -434,13 +434,13 @@ def test_dirichlet(self): def test_pickle(self): pick = pickle.dumps(self.rg) unpick = pickle.loads(pick) - assert_((type(self.rg) == type(unpick))) + assert_(type(self.rg) == type(unpick)) assert_(comp_state(self.rg.bit_generator.state, unpick.bit_generator.state)) pick = pickle.dumps(self.rg) unpick = pickle.loads(pick) - assert_((type(self.rg) == type(unpick))) + assert_(type(self.rg) == type(unpick)) assert_(comp_state(self.rg.bit_generator.state, unpick.bit_generator.state)) @@ -735,7 +735,7 @@ def test_numpy_state(self): self.rg.bit_generator.state = state state2 = self.rg.bit_generator.state assert_((state[1] == state2['state']['key']).all()) - assert_((state[2] == state2['state']['pos'])) + assert_(state[2] == state2['state']['pos']) class TestPhilox(RNG): From 5e05384fddd15b483779f0cc945c4e0b935e4d0b Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sat, 6 Jul 2024 16:48:42 +0200 Subject: [PATCH 722/980] MAINT: apply ruff/pyupgrade rule UP039 UP039 Unnecessary parentheses after class definition --- numpy/_core/tests/test_array_coercion.py | 2 +- numpy/_core/tests/test_multiarray.py | 8 ++++---- numpy/_core/tests/test_overrides.py | 4 ++-- numpy/f2py/tests/test_crackfortran.py | 6 +++--- numpy/lib/_version.py | 2 +- numpy/lib/tests/test_packbits.py | 2 +- numpy/lib/tests/test_regression.py | 2 +- numpy/ma/tests/test_core.py | 4 ++-- 8 files changed, 15 insertions(+), 15 deletions(-) diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index a88873fb7fc5..ee7b7c8d6685 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -38,7 +38,7 @@ def subclass(a): yield subclass - class _SequenceLike(): + class _SequenceLike: # Older NumPy versions, sometimes cared whether a protocol array was # also _SequenceLike. This shouldn't matter, but keep it for now # for __array__ and not the others. diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 913055f8717c..33a89bcc9f1e 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -234,7 +234,7 @@ def test_readonly_flag_protocols(self, flag, flag_value, writeable): a = np.arange(10) setattr(a.flags, flag, flag_value) - class MyArr(): + class MyArr: __array_struct__ = a.__array_struct__ assert memoryview(a).readonly is not writeable @@ -7247,7 +7247,7 @@ def test_matmul_empty(self): def test_matmul_exception_multiply(self): # test that matmul fails if `__mul__` is missing - class add_not_multiply(): + class add_not_multiply: def __add__(self, other): return self a = np.full((3,3), add_not_multiply()) @@ -7256,7 +7256,7 @@ def __add__(self, other): def test_matmul_exception_add(self): # test that matmul fails if `__add__` is missing - class multiply_not_add(): + class multiply_not_add: def __mul__(self, other): return self a = np.full((3,3), multiply_not_add()) @@ -8677,7 +8677,7 @@ def test_multiarray_flags_not_writable_attribute_deletion(self): assert_raises(AttributeError, delattr, a, s) -class TestArrayInterface(): +class TestArrayInterface: class Foo: def __init__(self, value): self.value = value diff --git a/numpy/_core/tests/test_overrides.py b/numpy/_core/tests/test_overrides.py index 025cd001ff0a..1ac2277b5de7 100644 --- a/numpy/_core/tests/test_overrides.py +++ b/numpy/_core/tests/test_overrides.py @@ -540,7 +540,7 @@ def __array_function__(self, func, types, args, kwargs): class TestArrayLike: def setup_method(self): - class MyArray(): + class MyArray: def __init__(self, function=None): self.function = function @@ -554,7 +554,7 @@ def __array_function__(self, func, types, args, kwargs): self.MyArray = MyArray - class MyNoArrayFunctionArray(): + class MyNoArrayFunctionArray: def __init__(self, function=None): self.function = function diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py index 1caa4147c2d7..4986cfbdc4c7 100644 --- a/numpy/f2py/tests/test_crackfortran.py +++ b/numpy/f2py/tests/test_crackfortran.py @@ -66,7 +66,7 @@ def test_nowrap_private_proceedures(self, tmp_path): pyf = crackfortran.crack2fortran(mod) assert 'bar' not in pyf -class TestModuleProcedure(): +class TestModuleProcedure: def test_moduleOperators(self, tmp_path): fpath = util.getpath("tests", "src", "crackfortran", "operators.f90") mod = crackfortran.crackfortran([str(fpath)]) @@ -347,14 +347,14 @@ def test_end_if_comment(self): assert False, f"'crackfortran.crackfortran' raised an exception {exc}" -class TestF77CommonBlockReader(): +class TestF77CommonBlockReader: def test_gh22648(self, tmp_path): fpath = util.getpath("tests", "src", "crackfortran", "gh22648.pyf") with contextlib.redirect_stdout(io.StringIO()) as stdout_f2py: mod = crackfortran.crackfortran([str(fpath)]) assert "Mismatch" not in stdout_f2py.getvalue() -class TestParamEval(): +class TestParamEval: # issue gh-11612, array parameter parsing def test_param_eval_nested(self): v = '(/3.14, 4./)' diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py index bfac5f814501..7dec3243b883 100644 --- a/numpy/lib/_version.py +++ b/numpy/lib/_version.py @@ -11,7 +11,7 @@ __all__ = ['NumpyVersion'] -class NumpyVersion(): +class NumpyVersion: """Parse and compare numpy version strings. NumPy has the following versioning scheme (numbers given are examples; they diff --git a/numpy/lib/tests/test_packbits.py b/numpy/lib/tests/test_packbits.py index 5b07f41c6260..a446156327cd 100644 --- a/numpy/lib/tests/test_packbits.py +++ b/numpy/lib/tests/test_packbits.py @@ -282,7 +282,7 @@ def test_unpackbits_large(): assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d) -class TestCount(): +class TestCount: x = np.array([ [1, 0, 1, 0, 0, 1, 0], [0, 1, 1, 1, 0, 0, 0], diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py index 07b80904b917..5b777f5735e4 100644 --- a/numpy/lib/tests/test_regression.py +++ b/numpy/lib/tests/test_regression.py @@ -215,7 +215,7 @@ def test_nansum_with_boolean(self): def test_py3_compat(self): # gh-2561 # Test if the oldstyle class test is bypassed in python3 - class C(): + class C: """Old-style class in python2, normal class in python3""" pass diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 58d787226e84..970ae2875493 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -251,7 +251,7 @@ def test_creation_with_list_of_maskedarrays_no_bool_cast(self): # The above only failed due a long chain of oddity, try also with # an object array that cannot be converted to bool always: - class NotBool(): + class NotBool: def __bool__(self): raise ValueError("not a bool!") masked_obj = np.ma.masked_array([NotBool(), 'b'], mask=[True, False]) @@ -4631,7 +4631,7 @@ def test_masked_invalid_pandas(self): # getdata() used to be bad for pandas series due to its _data # attribute. This test is a regression test mainly and may be # removed if getdata() is adjusted. - class Series(): + class Series: _data = "nonsense" def __array__(self, dtype=None, copy=None): From 58684de23345574bc13e634ac091144d71d31e57 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 6 Jul 2024 11:55:30 -0600 Subject: [PATCH 723/980] BUG: Make issctype always return bool. Fixes bug introduced in #26879. --- numpy/_core/numerictypes.py | 7 +++++-- numpy/_core/tests/test_numerictypes.py | 1 + 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/_core/numerictypes.py b/numpy/_core/numerictypes.py index 9530b168d859..d736aecd5a35 100644 --- a/numpy/_core/numerictypes.py +++ b/numpy/_core/numerictypes.py @@ -223,10 +223,13 @@ def issctype(rep): return False try: res = obj2sctype(rep) + if res and res != object_: + return True + else: + return False except Exception: return False - else: - return res and res != object_ + @set_module('numpy') def obj2sctype(rep, default=None): diff --git a/numpy/_core/tests/test_numerictypes.py b/numpy/_core/tests/test_numerictypes.py index 502f1849e69a..db4509b9c28f 100644 --- a/numpy/_core/tests/test_numerictypes.py +++ b/numpy/_core/tests/test_numerictypes.py @@ -570,6 +570,7 @@ def test_issctype(rep, expected): # ensure proper identification of scalar # data-types by issctype() actual = issctype(rep) + assert type(actual) is bool assert_equal(actual, expected) From 79f1334121d94cb06f35081a27a033c37572a4f6 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Sat, 6 Jul 2024 17:42:55 -0400 Subject: [PATCH 724/980] MAINT: Remove a redundant import from the generated __ufunc_api.h --- numpy/_core/code_generators/generate_ufunc_api.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/numpy/_core/code_generators/generate_ufunc_api.py b/numpy/_core/code_generators/generate_ufunc_api.py index 4bdbbdb9abac..ef34b95d9fb2 100644 --- a/numpy/_core/code_generators/generate_ufunc_api.py +++ b/numpy/_core/code_generators/generate_ufunc_api.py @@ -41,11 +41,7 @@ PyObject *numpy = PyImport_ImportModule("numpy._core._multiarray_umath"); if (numpy == NULL && PyErr_ExceptionMatches(PyExc_ModuleNotFoundError)) { PyErr_Clear(); - numpy = PyImport_ImportModule("numpy._core._multiarray_umath"); - if (numpy == NULL && PyErr_ExceptionMatches(PyExc_ModuleNotFoundError)) { - PyErr_Clear(); - numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); - } + numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); } if (numpy == NULL) { From d0d3cd2a4548820bc541725158eae2e383b0370f Mon Sep 17 00:00:00 2001 From: Anne Gunn Date: Mon, 8 Jul 2024 18:01:07 -0600 Subject: [PATCH 725/980] DOC: Change documentation copyright strings to use a dynamic end year This change addresses #26834 by changing the web page copyright strings for documentation and NEPs to have a dynamic end year which will always reflect the current year. [skip actions] [skip azp] [skip cirrus] --- doc/neps/conf.py | 4 +++- doc/source/conf.py | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/doc/neps/conf.py b/doc/neps/conf.py index 6cf97ddfe59f..ea8b5755d340 100644 --- a/doc/neps/conf.py +++ b/doc/neps/conf.py @@ -16,6 +16,7 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. # import os +from datetime import datetime # import sys # sys.path.insert(0, os.path.abspath('.')) @@ -48,7 +49,8 @@ # General information about the project. project = 'NumPy Enhancement Proposals' -copyright = '2017-2018, NumPy Developers' +year = datetime.now().year +copyright = f'2017-{year}, NumPy Developers' author = 'NumPy Developers' title = 'NumPy Enhancement Proposals Documentation' diff --git a/doc/source/conf.py b/doc/source/conf.py index fef7963539f6..2e54df1ac3c5 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -4,6 +4,7 @@ import importlib from docutils import nodes from docutils.parsers.rst import Directive +from datetime import datetime # Minimum version, enforced by sphinx needs_sphinx = '4.3' @@ -107,7 +108,8 @@ class PyTypeObject(ctypes.Structure): # General substitutions. project = 'NumPy' -copyright = '2008-2024, NumPy Developers' +year = datetime.now().year +copyright = f'2008-{year}, NumPy Developers' # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. From 241a5300ab360b97125351f8def6dc1cd0beede3 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 9 Jul 2024 13:32:52 +0200 Subject: [PATCH 726/980] DOC: Add a note that one should free the proto struct Really, heap allocating should be very rare and I am not even sure it ever worked (in the error paths at least), but people should be able to work around it easily enough, so give a pointer. Close gh-26763 --- doc/source/reference/c-api/array.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 698e7586c52f..68fbb6ef3d66 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -1240,6 +1240,11 @@ User-defined data types With these two changes, the code should compile and work on both 1.x and 2.x or later. + In the unlikely case that you are heap allocating the dtype struct you + should free it again on NumPy 2, since a copy is made. + The struct is not a valid Python object, so do not use ``Py_DECREF`` + on it. + Register a data-type as a new user-defined data type for arrays. The type must have most of its entries filled in. This is not always checked and errors can produce segfaults. In From 40836b6a216355cda5a8d622a516be12ff9f71de Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 9 Jul 2024 14:01:45 +0200 Subject: [PATCH 727/980] ENH: Allow use of clip with Python integers to always succeed This helps with NEP 50 behavior since the type is unchanged in that case, but the result is still clearly defined for clipping. This part could be pushed into the loops, but is more tricky to do there. It may make sense if it comes up as a problem with minimum/maximum. However, I think the situation there is slightly less surprising. (Does not preserve NEP 50 warnings/old behavior.) Closes gh-26759 --- numpy/_core/_methods.py | 11 ++++++++++- numpy/_core/tests/test_multiarray.py | 26 +++++++++++++++++--------- numpy/_core/tests/test_numeric.py | 18 ++++++++++++++++++ 3 files changed, 45 insertions(+), 10 deletions(-) diff --git a/numpy/_core/_methods.py b/numpy/_core/_methods.py index d31e2eef41fd..388854e664a5 100644 --- a/numpy/_core/_methods.py +++ b/numpy/_core/_methods.py @@ -8,6 +8,7 @@ import warnings from contextlib import nullcontext +import numpy as np from numpy._core import multiarray as mu from numpy._core import umath as um from numpy._core.multiarray import asanyarray @@ -97,10 +98,18 @@ def _count_reduce_items(arr, axis, keepdims=False, where=True): return items def _clip(a, min=None, max=None, out=None, **kwargs): + if a.dtype.kind in "iu": + # If min/max is a Python integer, deal with out-of-bound values here. + # (This enforces NEP 50 rules as no value based promotion is done.) + if type(min) is int and min <= np.iinfo(a.dtype).min: + min = None + if type(max) is int and max >= np.iinfo(a.dtype).max: + max = None + if min is None and max is None: # return identity return um.positive(a, out=out, **kwargs) - if min is None: + elif min is None: return um.minimum(a, max, out=out, **kwargs) elif max is None: return um.maximum(a, min, out=out, **kwargs) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 33a89bcc9f1e..93ac6a649b1b 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -5091,16 +5091,24 @@ def test_basic(self): 'uint', 1024, 10, 100, inplace=inplace) @pytest.mark.parametrize("inplace", [False, True]) - def test_int_range_error(self, inplace): - # E.g. clipping uint with negative integers fails to promote - # (changed with NEP 50 and may be adaptable) - # Similar to last check in `test_basic` + def test_int_out_of_range(self, inplace): + # Simple check for out-of-bound integers, also testing the in-place + # path. x = (np.random.random(1000) * 255).astype("uint8") - with pytest.raises(OverflowError): - x.clip(-1, 10, out=x if inplace else None) - - with pytest.raises(OverflowError): - x.clip(0, 256, out=x if inplace else None) + out = np.empty_like(x) + res = x.clip(-1, 300, out=out if inplace else None) + assert res is out or not inplace + assert (res == x).all() + + res = x.clip(-1, 50, out=out if inplace else None) + assert res is out or not inplace + assert (res <= 50).all() + assert (res[x <= 50] == x[x <= 50]).all() + + res = x.clip(100, 1000, out=out if inplace else None) + assert res is out or not inplace + assert (res >= 100).all() + assert (res[x >= 100] == x[x >= 100]).all() def test_record_array(self): rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index c13b04382728..a9694ebee736 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -2880,6 +2880,24 @@ def test_clip_min_max_args(self): with assert_raises_regex(ValueError, msg): np.clip(arr, 2, 3, min=2) + @pytest.mark.parametrize("dtype,min,max", [ + ("int32", -2**32-1, 2**32), + ("int32", -2**320, None), + ("int32", None, 2**300), + ("int32", -1000, 2**32), + ("int32", -2**32-1, 1000), + ("uint8", -1, 129), + ]) + def test_out_of_bound_pyints(self, dtype, min, max): + a = np.arange(10000).astype(dtype) + # Check min only + c = np.clip(a, min=min, max=max) + assert not np.may_share_memory(a, c) + assert c.dtype == a.dtype + if min is not None: + assert (c >= min).all() + if max is not None: + assert (c <= max).all() class TestAllclose: rtol = 1e-5 From e316e6042c68f5117c0fa5b1be26241653adc153 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 9 Jul 2024 17:33:42 +0000 Subject: [PATCH 728/980] MAINT: Bump actions/setup-node from 4.0.2 to 4.0.3 Bumps [actions/setup-node](https://github.com/actions/setup-node) from 4.0.2 to 4.0.3. - [Release notes](https://github.com/actions/setup-node/releases) - [Commits](https://github.com/actions/setup-node/compare/60edb5dd545a775178f52524783378180af0d1f8...1e60f620b9541d16bece96c5465dc8ee9832be0b) --- updated-dependencies: - dependency-name: actions/setup-node dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index ad2f08a9348b..4eb61331095b 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -100,7 +100,7 @@ jobs: -Csetup-args="-Dlapack=none" - name: Set up Node.js - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 + uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 with: node-version: ${{ env.NODE_VERSION }} From e55b25c0eacbe5ec1de2e76a4efecf9b489c4700 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 13 May 2024 13:58:04 -0600 Subject: [PATCH 729/980] TST: add a test to check if the argparse cache is thread safe --- .../src/multiarray/_multiarray_tests.c.src | 25 +++++++++++++++++++ numpy/_core/tests/test_argparse.py | 21 +++++++++++++++- 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index fbd5fc445a2c..284f633b072b 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -42,6 +42,28 @@ argparse_example_function(PyObject *NPY_UNUSED(mod), Py_RETURN_NONE; } +/* + * Tests that argparse cache creation is thread-safe. *must* be called only + * by the python-level test_thread_safe_argparse_cache function, otherwise + * the cache might be created before the test to make sure cache creation is + * thread-safe runs + */ +static PyObject * +threaded_argparse_example_function(PyObject *NPY_UNUSED(mod), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) +{ + NPY_PREPARE_ARGPARSER; + int arg1; + PyObject *arg2; + if (npy_parse_arguments("thread_func", args, len_args, kwnames, + "$arg1", &PyArray_PythonPyIntFromInt, &arg1, + "$arg2", NULL, &arg2, + NULL, NULL, NULL) < 0) { + return NULL; + } + Py_RETURN_NONE; +} + /* test PyArray_IsPythonScalar, before including private py3 compat header */ static PyObject * IsPythonScalar(PyObject * dummy, PyObject *args) @@ -2205,6 +2227,9 @@ static PyMethodDef Multiarray_TestsMethods[] = { {"argparse_example_function", (PyCFunction)argparse_example_function, METH_KEYWORDS | METH_FASTCALL, NULL}, + {"threaded_argparse_example_function", + (PyCFunction)threaded_argparse_example_function, + METH_KEYWORDS | METH_FASTCALL, NULL}, {"IsPythonScalar", IsPythonScalar, METH_VARARGS, NULL}, diff --git a/numpy/_core/tests/test_argparse.py b/numpy/_core/tests/test_argparse.py index cddee72ea04c..ededced3b9fe 100644 --- a/numpy/_core/tests/test_argparse.py +++ b/numpy/_core/tests/test_argparse.py @@ -11,10 +11,29 @@ def func(arg1, /, arg2, *, arg3): return None """ +import threading + import pytest import numpy as np -from numpy._core._multiarray_tests import argparse_example_function as func +from numpy._core._multiarray_tests import ( + argparse_example_function as func, + threaded_argparse_example_function as thread_func, +) +from numpy.testing import IS_WASM + + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") +def test_thread_safe_argparse_cache(): + b = threading.Barrier(8) + + def call_thread_func(): + b.wait() + thread_func(arg1=3, arg2=None) + + tasks = [threading.Thread(target=call_thread_func) for _ in range(8)] + [t.start() for t in tasks] + [t.join() for t in tasks] def test_invalid_integers(): From 6386423f6a88b35e0ed0c06effe8cd8c3dec82fb Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 15 May 2024 16:05:31 -0600 Subject: [PATCH 730/980] MNT: lock initializing the argparse cache --- numpy/_core/src/common/npy_argparse.c | 37 ++++++++++++---- numpy/_core/src/common/npy_argparse.h | 3 +- numpy/_core/src/common/npy_atomic.h | 42 +++++++++++++++++++ .../src/multiarray/_multiarray_tests.c.src | 3 ++ numpy/_core/src/umath/umathmodule.c | 5 +++ 5 files changed, 80 insertions(+), 10 deletions(-) create mode 100644 numpy/_core/src/common/npy_atomic.h diff --git a/numpy/_core/src/common/npy_argparse.c b/numpy/_core/src/common/npy_argparse.c index 2be17483ec28..38739b103da1 100644 --- a/numpy/_core/src/common/npy_argparse.c +++ b/numpy/_core/src/common/npy_argparse.c @@ -7,11 +7,22 @@ #include "numpy/ndarraytypes.h" #include "numpy/npy_2_compat.h" #include "npy_argparse.h" - +#include "npy_atomic.h" #include "npy_import.h" #include "arrayfunction_override.h" +static PyThread_type_lock argparse_mutex; + +NPY_NO_EXPORT int +init_argparse_mutex(void) { + argparse_mutex = PyThread_allocate_lock(); + if (argparse_mutex == NULL) { + PyErr_NoMemory(); + return -1; + } + return 0; +} /** * Small wrapper converting to array just like CPython does. @@ -274,14 +285,22 @@ _npy_parse_arguments(const char *funcname, /* ... is NULL, NULL, NULL terminated: name, converter, value */ ...) { - if (NPY_UNLIKELY(cache->npositional == -1)) { - va_list va; - va_start(va, kwnames); - - int res = initialize_keywords(funcname, cache, va); - va_end(va); - if (res < 0) { - return -1; + if (NPY_UNLIKELY(!cache->initialized)) { + // only do a possibly slow atomic load if the cache isn't already initialized + if (!npy_atomic_load_uint8(&cache->initialized)) { + PyThread_acquire_lock(argparse_mutex, WAIT_LOCK); + if (!cache->initialized) { + va_list va; + va_start(va, kwnames); + int res = initialize_keywords(funcname, cache, va); + va_end(va); + if (res < 0) { + PyThread_release_lock(argparse_mutex); + return -1; + } + cache->initialized = 1; + } + PyThread_release_lock(argparse_mutex); } } diff --git a/numpy/_core/src/common/npy_argparse.h b/numpy/_core/src/common/npy_argparse.h index f4122103d22b..27cd52161de4 100644 --- a/numpy/_core/src/common/npy_argparse.h +++ b/numpy/_core/src/common/npy_argparse.h @@ -20,7 +20,6 @@ NPY_NO_EXPORT int PyArray_PythonPyIntFromInt(PyObject *obj, int *value); - #define _NPY_MAX_KWARGS 15 typedef struct { @@ -28,10 +27,12 @@ typedef struct { int nargs; int npositional_only; int nrequired; + npy_uint8 initialized; /* Null terminated list of keyword argument name strings */ PyObject *kw_strings[_NPY_MAX_KWARGS+1]; } _NpyArgParserCache; +NPY_NO_EXPORT int init_argparse_mutex(void); /* * The sole purpose of this macro is to hide the argument parsing cache. diff --git a/numpy/_core/src/common/npy_atomic.h b/numpy/_core/src/common/npy_atomic.h new file mode 100644 index 000000000000..ba91dafc3a2f --- /dev/null +++ b/numpy/_core/src/common/npy_atomic.h @@ -0,0 +1,42 @@ +/* + * Provides wrappers around C11 standard library atomics and MSVC intrinsics + * to provide basic atomic load and store functionality. This is based on + * code in CPython's pyatomic.h, pyatomic_std.h, and pyatomic_msc.h + */ + +#ifndef NUMPY_CORE_SRC_COMMON_NPY_ATOMIC_H_ +#define NUMPY_CORE_SRC_COMMON_NPY_ATOMIC_H_ + +#include "numpy/npy_common.h" + +#if __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) +// TODO: support C++ atomics as well if this header is ever needed in C++ + #include + #include + #define STDC_ATOMICS +#elif _MSC_VER + #include + #define MSC_ATOMICS +#else + #error "no support for missing C11 atomics except with MSVC" +#endif + + +static inline npy_uint8 npy_atomic_load_uint8(const npy_uint8 *obj) { +#ifdef STDC_ATOMICS + return (npy_uint8)atomic_load((const _Atomic(uint8_t)*)obj); +#elif defined(MSC_ATOMICS) +#if defined(_M_X64) || defined(_M_IX86) + return *(volatile npy_uint8 *)obj; +#elif defined(_M_ARM64) + return (npy_uint8)__ldar8((unsigned __int8 volatile *)obj); +#else +#error "Unsupported MSVC build configuration, neither x86 or ARM" +#endif +#endif +} + +#undef MSC_ATOMICS +#undef STDC_ATOMICS + +#endif // NUMPY_CORE_SRC_COMMON_NPY_NPY_ATOMIC_H_ diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index 284f633b072b..5dd5057f9602 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -2432,6 +2432,9 @@ PyMODINIT_FUNC PyInit__multiarray_tests(void) return m; } import_array(); + if (init_argparse_mutex() < 0) { + return NULL; + } if (PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "cannot load _multiarray_tests module."); diff --git a/numpy/_core/src/umath/umathmodule.c b/numpy/_core/src/umath/umathmodule.c index 5402b17c399a..0c8fc4857ea7 100644 --- a/numpy/_core/src/umath/umathmodule.c +++ b/numpy/_core/src/umath/umathmodule.c @@ -22,6 +22,7 @@ #include "numpy/ufuncobject.h" #include "numpy/npy_3kcompat.h" #include "npy_pycompat.h" +#include "npy_argparse.h" #include "abstract.h" #include "numpy/npy_math.h" @@ -321,5 +322,9 @@ int initumath(PyObject *m) return -1; } + if (init_argparse_mutex() < 0) { + return -1; + } + return 0; } From 9a22d8473470e41bd68b201e660864609b9d970a Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 21 Jun 2024 15:20:20 -0600 Subject: [PATCH 731/980] MNT: convert runtime imports to use single-initialization --- numpy/_core/meson.build | 1 + numpy/_core/src/common/npy_ctypes.h | 10 +- numpy/_core/src/common/npy_import.c | 19 ++++ numpy/_core/src/common/npy_import.h | 104 ++++++++++++++++-- .../src/multiarray/arrayfunction_override.c | 10 +- numpy/_core/src/multiarray/convert_datatype.c | 7 +- numpy/_core/src/multiarray/descriptor.c | 8 +- numpy/_core/src/multiarray/dtypemeta.c | 15 ++- numpy/_core/src/multiarray/getset.c | 8 +- numpy/_core/src/multiarray/methods.c | 36 +++--- numpy/_core/src/multiarray/multiarraymodule.c | 13 ++- numpy/_core/src/multiarray/npy_static_data.c | 2 +- numpy/_core/src/multiarray/scalartypes.c.src | 9 +- numpy/_core/src/multiarray/strfuncs.c | 15 ++- .../_core/src/multiarray/stringdtype/dtype.c | 11 +- numpy/_core/src/umath/funcs.inc.src | 7 +- numpy/_core/src/umath/override.c | 9 +- numpy/_core/src/umath/ufunc_object.c | 21 ++-- numpy/_core/src/umath/ufunc_type_resolution.c | 3 +- 19 files changed, 204 insertions(+), 104 deletions(-) create mode 100644 numpy/_core/src/common/npy_import.c diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 78b60364bea5..90f29a0f6d7f 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1041,6 +1041,7 @@ src_multiarray_umath_common = [ 'src/common/mem_overlap.c', 'src/common/npy_argparse.c', 'src/common/npy_hashtable.c', + 'src/common/npy_import.c', 'src/common/npy_longdouble.c', 'src/common/ucsnarrow.c', 'src/common/ufunc_override.c', diff --git a/numpy/_core/src/common/npy_ctypes.h b/numpy/_core/src/common/npy_ctypes.h index c72d2dff7fcb..7614a254a37a 100644 --- a/numpy/_core/src/common/npy_ctypes.h +++ b/numpy/_core/src/common/npy_ctypes.h @@ -21,14 +21,14 @@ npy_ctypes_check(PyTypeObject *obj) PyObject *ret_obj; int ret; - npy_cache_import("numpy._core._internal", "npy_ctypes_check", - &npy_thread_unsafe_state.npy_ctypes_check); - if (npy_thread_unsafe_state.npy_ctypes_check == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "npy_ctypes_check", + &npy_runtime_imports.npy_ctypes_check) == -1) { goto fail; } - ret_obj = PyObject_CallFunctionObjArgs(npy_thread_unsafe_state.npy_ctypes_check, - (PyObject *)obj, NULL); + ret_obj = PyObject_CallFunctionObjArgs( + npy_runtime_imports.npy_ctypes_check.obj, (PyObject *)obj, NULL); if (ret_obj == NULL) { goto fail; } diff --git a/numpy/_core/src/common/npy_import.c b/numpy/_core/src/common/npy_import.c new file mode 100644 index 000000000000..d220b840f7a9 --- /dev/null +++ b/numpy/_core/src/common/npy_import.c @@ -0,0 +1,19 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + +#include "numpy/ndarraytypes.h" +#include "npy_import.h" +#include "npy_atomic.h" + + +NPY_VISIBILITY_HIDDEN npy_runtime_imports_struct npy_runtime_imports; + +NPY_NO_EXPORT int +init_import_mutex(void) { + npy_runtime_imports.import_mutex = PyThread_allocate_lock(); + if (npy_runtime_imports.import_mutex == NULL) { + PyErr_NoMemory(); + return -1; + } + return 0; +} diff --git a/numpy/_core/src/common/npy_import.h b/numpy/_core/src/common/npy_import.h index 58b4ba0bc7e5..b06fdc5e4b72 100644 --- a/numpy/_core/src/common/npy_import.h +++ b/numpy/_core/src/common/npy_import.h @@ -3,7 +3,80 @@ #include -/*! \brief Fetch and cache Python function. +#include "numpy/npy_common.h" +#include "npy_atomic.h" + +/* + * Holds a cached PyObject where the cache is initialized via a + * runtime import. The cache is only filled once. + */ + +typedef struct npy_runtime_import { + npy_uint8 initialized; + PyObject *obj; +} npy_runtime_import; + +/* + * Cached references to objects obtained via an import. All of these are + * can be initialized at any time by npy_cache_import_runtime. + */ +typedef struct npy_runtime_imports_struct { + PyThread_type_lock import_mutex; + npy_runtime_import _add_dtype_helper; + npy_runtime_import _all; + npy_runtime_import _amax; + npy_runtime_import _amin; + npy_runtime_import _any; + npy_runtime_import array_function_errmsg_formatter; + npy_runtime_import array_ufunc_errmsg_formatter; + npy_runtime_import _clip; + npy_runtime_import _commastring; + npy_runtime_import _convert_to_stringdtype_kwargs; + npy_runtime_import _default_array_repr; + npy_runtime_import _default_array_str; + npy_runtime_import _dump; + npy_runtime_import _dumps; + npy_runtime_import _getfield_is_safe; + npy_runtime_import internal_gcd_func; + npy_runtime_import _mean; + npy_runtime_import NO_NEP50_WARNING; + npy_runtime_import npy_ctypes_check; + npy_runtime_import numpy_matrix; + npy_runtime_import _prod; + npy_runtime_import _promote_fields; + npy_runtime_import _std; + npy_runtime_import _sum; + npy_runtime_import _ufunc_doc_signature_formatter; + npy_runtime_import _var; + npy_runtime_import _view_is_safe; + npy_runtime_import _void_scalar_to_string; +} npy_runtime_imports_struct; + +NPY_VISIBILITY_HIDDEN extern npy_runtime_imports_struct npy_runtime_imports; + +/*! \brief Import a Python object. + + * This function imports the Python function specified by + * \a module and \a function, increments its reference count, and returns + * the result. On error, returns NULL. + * + * @param module Absolute module name. + * @param attr module attribute to cache. + */ +static inline PyObject* +npy_import(const char *module, const char *attr) +{ + PyObject *ret = NULL; + PyObject *mod = PyImport_ImportModule(module); + + if (mod != NULL) { + ret = PyObject_GetAttrString(mod, attr); + Py_DECREF(mod); + } + return ret; +} + +/*! \brief Fetch and cache Python object at runtime. * * Import a Python function and cache it for use. The function checks if * cache is NULL, and if not NULL imports the Python function specified by @@ -16,17 +89,28 @@ * @param attr module attribute to cache. * @param cache Storage location for imported function. */ -static inline void -npy_cache_import(const char *module, const char *attr, PyObject **cache) -{ - if (NPY_UNLIKELY(*cache == NULL)) { - PyObject *mod = PyImport_ImportModule(module); - - if (mod != NULL) { - *cache = PyObject_GetAttrString(mod, attr); - Py_DECREF(mod); +static inline int +npy_cache_import_runtime(const char *module, const char *attr, npy_runtime_import *cache) { + if (cache->initialized) { + return 0; + } + else { + if (!npy_atomic_load_uint8(&cache->initialized)) { + PyThread_acquire_lock(npy_runtime_imports.import_mutex, WAIT_LOCK); + if (!cache->initialized) { + cache->obj = npy_import(module, attr); + cache->initialized = 1; + } + PyThread_release_lock(npy_runtime_imports.import_mutex); } } + if (cache->obj == NULL) { + return -1; + } + return 0; } +NPY_NO_EXPORT int +init_import_mutex(void); + #endif /* NUMPY_CORE_SRC_COMMON_NPY_IMPORT_H_ */ diff --git a/numpy/_core/src/multiarray/arrayfunction_override.c b/numpy/_core/src/multiarray/arrayfunction_override.c index aa3ab42433c7..f66191698b6a 100644 --- a/numpy/_core/src/multiarray/arrayfunction_override.c +++ b/numpy/_core/src/multiarray/arrayfunction_override.c @@ -232,12 +232,12 @@ static void set_no_matching_types_error(PyObject *public_api, PyObject *types) { /* No acceptable override found, raise TypeError. */ - npy_cache_import("numpy._core._internal", - "array_function_errmsg_formatter", - &npy_thread_unsafe_state.array_function_errmsg_formatter); - if (npy_thread_unsafe_state.array_function_errmsg_formatter != NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", + "array_function_errmsg_formatter", + &npy_runtime_imports.array_function_errmsg_formatter) == 0) { PyObject *errmsg = PyObject_CallFunctionObjArgs( - npy_thread_unsafe_state.array_function_errmsg_formatter, + npy_runtime_imports.array_function_errmsg_formatter.obj, public_api, types, NULL); if (errmsg != NULL) { PyErr_SetObject(PyExc_TypeError, errmsg); diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index f029ad8a5986..dbc357128249 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -83,15 +83,14 @@ npy_give_promotion_warnings(void) { PyObject *val; - npy_cache_import( + if (npy_cache_import_runtime( "numpy._core._ufunc_config", "NO_NEP50_WARNING", - &npy_thread_unsafe_state.NO_NEP50_WARNING); - if (npy_thread_unsafe_state.NO_NEP50_WARNING == NULL) { + &npy_runtime_imports.NO_NEP50_WARNING) == -1) { PyErr_WriteUnraisable(NULL); return 1; } - if (PyContextVar_Get(npy_thread_unsafe_state.NO_NEP50_WARNING, + if (PyContextVar_Get(npy_runtime_imports.NO_NEP50_WARNING.obj, Py_False, &val) < 0) { /* Errors should not really happen, but if it does assume we warn. */ PyErr_WriteUnraisable(NULL); diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index b9d30c80a2f8..3b753e253728 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -726,12 +726,12 @@ _convert_from_commastring(PyObject *obj, int align) PyObject *parsed; PyArray_Descr *res; assert(PyUnicode_Check(obj)); - npy_cache_import("numpy._core._internal", "_commastring", - &npy_thread_unsafe_state._commastring); - if (npy_thread_unsafe_state._commastring == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_commastring", + &npy_runtime_imports._commastring) == -1) { return NULL; } - parsed = PyObject_CallOneArg(npy_thread_unsafe_state._commastring, obj); + parsed = PyObject_CallOneArg(npy_runtime_imports._commastring.obj, obj); if (parsed == NULL) { return NULL; } diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index a8ba51fa6e06..b9d6d0c08774 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -766,13 +766,13 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) if (descr1->names != NULL && descr2->names != NULL) { /* If both have fields promoting individual fields may be possible */ - npy_cache_import("numpy._core._internal", "_promote_fields", - &npy_thread_unsafe_state._promote_fields); - if (npy_thread_unsafe_state._promote_fields == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_promote_fields", + &npy_runtime_imports._promote_fields) == -1) { return NULL; } PyObject *result = PyObject_CallFunctionObjArgs( - npy_thread_unsafe_state._promote_fields, + npy_runtime_imports._promote_fields.obj, descr1, descr2, NULL); if (result == NULL) { return NULL; @@ -1240,14 +1240,13 @@ dtypemeta_wrap_legacy_descriptor( /* And it to the types submodule if it is a builtin dtype */ if (!PyTypeNum_ISUSERDEF(descr->type_num)) { - npy_cache_import("numpy.dtypes", "_add_dtype_helper", - &npy_thread_unsafe_state._add_dtype_helper); - if (npy_thread_unsafe_state._add_dtype_helper == NULL) { + if (npy_cache_import_runtime("numpy.dtypes", "_add_dtype_helper", + &npy_runtime_imports._add_dtype_helper) == -1) { return -1; } if (PyObject_CallFunction( - npy_thread_unsafe_state._add_dtype_helper, + npy_runtime_imports._add_dtype_helper.obj, "Os", (PyObject *)dtype_class, alias) == NULL) { return -1; } diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index 092ac65bbbc3..fe18a9b4e5b7 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -388,13 +388,13 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored)) if (_may_have_objects(PyArray_DESCR(self)) || _may_have_objects(newtype)) { PyObject *safe; - npy_cache_import("numpy._core._internal", "_view_is_safe", - &npy_thread_unsafe_state._view_is_safe); - if (npy_thread_unsafe_state._view_is_safe == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_view_is_safe", + &npy_runtime_imports._view_is_safe) == -1) { goto fail; } - safe = PyObject_CallFunction(npy_thread_unsafe_state._view_is_safe, + safe = PyObject_CallFunction(npy_runtime_imports._view_is_safe.obj, "OO", PyArray_DESCR(self), newtype); if (safe == NULL) { goto fail; diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 669d5e575c7a..525bd2445c34 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -113,13 +113,11 @@ npy_forward_method( * be correct. */ #define NPY_FORWARD_NDARRAY_METHOD(name) \ - npy_cache_import( \ - "numpy._core._methods", #name, \ - &npy_thread_unsafe_state.name); \ - if (npy_thread_unsafe_state.name == NULL) { \ + if (npy_cache_import_runtime("numpy._core._methods", #name, \ + &npy_runtime_imports.name) == -1) { \ return NULL; \ } \ - return npy_forward_method(npy_thread_unsafe_state.name, \ + return npy_forward_method(npy_runtime_imports.name.obj, \ (PyObject *)self, args, len_args, kwnames) @@ -406,15 +404,15 @@ PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset) /* check that we are not reinterpreting memory containing Objects. */ if (_may_have_objects(PyArray_DESCR(self)) || _may_have_objects(typed)) { - npy_cache_import("numpy._core._internal", "_getfield_is_safe", - &npy_thread_unsafe_state._getfield_is_safe); - if (npy_thread_unsafe_state._getfield_is_safe == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_getfield_is_safe", + &npy_runtime_imports._getfield_is_safe) == -1) { Py_DECREF(typed); return NULL; } /* only returns True or raises */ - safe = PyObject_CallFunction(npy_thread_unsafe_state._getfield_is_safe, + safe = PyObject_CallFunction(npy_runtime_imports._getfield_is_safe.obj, "OOi", PyArray_DESCR(self), typed, offset); if (safe == NULL) { @@ -2248,18 +2246,19 @@ NPY_NO_EXPORT int PyArray_Dump(PyObject *self, PyObject *file, int protocol) { PyObject *ret; - npy_cache_import("numpy._core._methods", "_dump", - &npy_thread_unsafe_state._dump); - if (npy_thread_unsafe_state._dump == NULL) { + if (npy_cache_import_runtime( + "numpy._core._methods", "_dump", + &npy_runtime_imports._dump) == -1) { return -1; } + if (protocol < 0) { ret = PyObject_CallFunction( - npy_thread_unsafe_state._dump, "OO", self, file); + npy_runtime_imports._dump.obj, "OO", self, file); } else { ret = PyObject_CallFunction( - npy_thread_unsafe_state._dump, "OOi", self, file, protocol); + npy_runtime_imports._dump.obj, "OOi", self, file, protocol); } if (ret == NULL) { return -1; @@ -2272,17 +2271,16 @@ PyArray_Dump(PyObject *self, PyObject *file, int protocol) NPY_NO_EXPORT PyObject * PyArray_Dumps(PyObject *self, int protocol) { - npy_cache_import("numpy._core._methods", "_dumps", - &npy_thread_unsafe_state._dumps); - if (npy_thread_unsafe_state._dumps == NULL) { + if (npy_cache_import_runtime("numpy._core._methods", "_dumps", + &npy_runtime_imports._dumps) == -1) { return NULL; } if (protocol < 0) { - return PyObject_CallFunction(npy_thread_unsafe_state._dumps, "O", self); + return PyObject_CallFunction(npy_runtime_imports._dumps.obj, "O", self); } else { return PyObject_CallFunction( - npy_thread_unsafe_state._dumps, "Oi", self, protocol); + npy_runtime_imports._dumps.obj, "Oi", self, protocol); } } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index e8bc75c1e359..d4b038827ef4 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4842,6 +4842,10 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { goto err; } + if (init_import_mutex() < 0) { + goto err; + } + if (init_extobj() < 0) { goto err; } @@ -5067,14 +5071,15 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { * init_string_dtype() but that needs to happen after * the legacy dtypemeta classes are available. */ - npy_cache_import("numpy.dtypes", "_add_dtype_helper", - &npy_thread_unsafe_state._add_dtype_helper); - if (npy_thread_unsafe_state._add_dtype_helper == NULL) { + + if (npy_cache_import_runtime( + "numpy.dtypes", "_add_dtype_helper", + &npy_runtime_imports._add_dtype_helper) == -1) { goto err; } if (PyObject_CallFunction( - npy_thread_unsafe_state._add_dtype_helper, + npy_runtime_imports._add_dtype_helper.obj, "Os", (PyObject *)&PyArray_StringDType, NULL) == NULL) { goto err; } diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index 7f5e58dde21a..e8f554d40d9b 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -66,7 +66,7 @@ intern_strings(void) #define IMPORT_GLOBAL(base_path, name, object) \ assert(object == NULL); \ - npy_cache_import(base_path, name, &object); \ + object = npy_import(base_path, name); \ if (object == NULL) { \ return -1; \ } diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index a0517c247215..d4ce85dfd438 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -608,15 +608,14 @@ _void_to_hex(const char* argbuf, const Py_ssize_t arglen, static PyObject * _void_scalar_to_string(PyObject *obj, int repr) { - npy_cache_import("numpy._core.arrayprint", - "_void_scalar_to_string", - &npy_thread_unsafe_state._void_scalar_to_string); - if (npy_thread_unsafe_state._void_scalar_to_string == NULL) { + if (npy_cache_import_runtime( + "numpy._core.arrayprint", "_void_scalar_to_string", + &npy_runtime_imports._void_scalar_to_string) == -1) { return NULL; } PyObject *is_repr = repr ? Py_True : Py_False; return PyObject_CallFunctionObjArgs( - npy_thread_unsafe_state._void_scalar_to_string, obj, is_repr, NULL); + npy_runtime_imports._void_scalar_to_string.obj, obj, is_repr, NULL); } static PyObject * diff --git a/numpy/_core/src/multiarray/strfuncs.c b/numpy/_core/src/multiarray/strfuncs.c index 759c730c7cfa..54e1f0da3178 100644 --- a/numpy/_core/src/multiarray/strfuncs.c +++ b/numpy/_core/src/multiarray/strfuncs.c @@ -38,15 +38,14 @@ array_repr(PyArrayObject *self) * We need to do a delayed import here as initialization on module load * leads to circular import problems. */ - npy_cache_import("numpy._core.arrayprint", "_default_array_repr", - &npy_thread_unsafe_state._default_array_repr); - if (npy_thread_unsafe_state._default_array_repr == NULL) { + if (npy_cache_import_runtime("numpy._core.arrayprint", "_default_array_repr", + &npy_runtime_imports._default_array_repr) == -1) { npy_PyErr_SetStringChained(PyExc_RuntimeError, "Unable to configure default ndarray.__repr__"); return NULL; } return PyObject_CallFunctionObjArgs( - npy_thread_unsafe_state._default_array_repr, self, NULL); + npy_runtime_imports._default_array_repr.obj, self, NULL); } @@ -57,15 +56,15 @@ array_str(PyArrayObject *self) * We need to do a delayed import here as initialization on module load leads * to circular import problems. */ - npy_cache_import("numpy._core.arrayprint", "_default_array_str", - &npy_thread_unsafe_state._default_array_str); - if (npy_thread_unsafe_state._default_array_str == NULL) { + if (npy_cache_import_runtime( + "numpy._core.arrayprint", "_default_array_str", + &npy_runtime_imports._default_array_str) == -1) { npy_PyErr_SetStringChained(PyExc_RuntimeError, "Unable to configure default ndarray.__str__"); return NULL; } return PyObject_CallFunctionObjArgs( - npy_thread_unsafe_state._default_array_str, self, NULL); + npy_runtime_imports._default_array_str.obj, self, NULL); } diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 038fa8159171..7b97d6ff9087 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -715,21 +715,20 @@ stringdtype_repr(PyArray_StringDTypeObject *self) static PyObject * stringdtype__reduce__(PyArray_StringDTypeObject *self, PyObject *NPY_UNUSED(args)) { - npy_cache_import("numpy._core._internal", "_convert_to_stringdtype_kwargs", - &npy_thread_unsafe_state._convert_to_stringdtype_kwargs); - - if (npy_thread_unsafe_state._convert_to_stringdtype_kwargs == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_convert_to_stringdtype_kwargs", + &npy_runtime_imports._convert_to_stringdtype_kwargs) == -1) { return NULL; } if (self->na_object != NULL) { return Py_BuildValue( - "O(iO)", npy_thread_unsafe_state._convert_to_stringdtype_kwargs, + "O(iO)", npy_runtime_imports._convert_to_stringdtype_kwargs.obj, self->coerce, self->na_object); } return Py_BuildValue( - "O(i)", npy_thread_unsafe_state._convert_to_stringdtype_kwargs, + "O(i)", npy_runtime_imports._convert_to_stringdtype_kwargs.obj, self->coerce); } diff --git a/numpy/_core/src/umath/funcs.inc.src b/numpy/_core/src/umath/funcs.inc.src index 3825bd869468..773efefe18f4 100644 --- a/numpy/_core/src/umath/funcs.inc.src +++ b/numpy/_core/src/umath/funcs.inc.src @@ -192,12 +192,11 @@ npy_ObjectGCD(PyObject *i1, PyObject *i2) /* otherwise, use our internal one, written in python */ { - npy_cache_import("numpy._core._internal", "_gcd", - &npy_thread_unsafe_state.internal_gcd_func); - if (npy_thread_unsafe_state.internal_gcd_func == NULL) { + if (npy_cache_import_runtime("numpy._core._internal", "_gcd", + &npy_runtime_imports.internal_gcd_func) == -1) { return NULL; } - gcd = PyObject_CallFunction(npy_thread_unsafe_state.internal_gcd_func, + gcd = PyObject_CallFunction(npy_runtime_imports.internal_gcd_func.obj, "OO", i1, i2); if (gcd == NULL) { return NULL; diff --git a/numpy/_core/src/umath/override.c b/numpy/_core/src/umath/override.c index 55cca0857229..d7a5020bb9e8 100644 --- a/numpy/_core/src/umath/override.c +++ b/numpy/_core/src/umath/override.c @@ -369,13 +369,14 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, /* All tuple items must be set before use */ Py_INCREF(Py_None); PyTuple_SET_ITEM(override_args, 0, Py_None); - npy_cache_import( + if (npy_cache_import_runtime( "numpy._core._internal", "array_ufunc_errmsg_formatter", - &npy_thread_unsafe_state.array_ufunc_errmsg_formatter); - assert(npy_thread_unsafe_state.array_ufunc_errmsg_formatter != NULL); + &npy_runtime_imports.array_ufunc_errmsg_formatter) == -1) { + goto fail; + } errmsg = PyObject_Call( - npy_thread_unsafe_state.array_ufunc_errmsg_formatter, + npy_runtime_imports.array_ufunc_errmsg_formatter.obj, override_args, normal_kwds); if (errmsg != NULL) { PyErr_SetObject(PyExc_TypeError, errmsg); diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index a0acaf6573ed..85440e338836 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -5228,7 +5228,8 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) { PyArrayObject *ap1 = NULL; PyObject *tmp; - npy_cache_import("numpy", "matrix", &npy_thread_unsafe_state.numpy_matrix); + npy_cache_import_runtime("numpy", "matrix", + &npy_runtime_imports.numpy_matrix); const char *matrix_deprecation_msg = ( "%s.outer() was passed a numpy matrix as %s argument. " @@ -5239,7 +5240,7 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) tmp = PyTuple_GET_ITEM(args, 0); - if (PyObject_IsInstance(tmp, npy_thread_unsafe_state.numpy_matrix)) { + if (PyObject_IsInstance(tmp, npy_runtime_imports.numpy_matrix.obj)) { /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, matrix_deprecation_msg, ufunc->name, "first") < 0) { @@ -5256,7 +5257,7 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) PyArrayObject *ap2 = NULL; tmp = PyTuple_GET_ITEM(args, 1); - if (PyObject_IsInstance(tmp, npy_thread_unsafe_state.numpy_matrix)) { + if (PyObject_IsInstance(tmp, npy_runtime_imports.numpy_matrix.obj)) { /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, matrix_deprecation_msg, ufunc->name, "second") < 0) { @@ -6401,12 +6402,9 @@ ufunc_get_doc(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) { PyObject *doc; - npy_cache_import( - "numpy._core._internal", - "_ufunc_doc_signature_formatter", - &npy_thread_unsafe_state._ufunc_doc_signature_formatter); - - if (npy_thread_unsafe_state._ufunc_doc_signature_formatter == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_ufunc_doc_signature_formatter", + &npy_runtime_imports._ufunc_doc_signature_formatter) == -1) { return NULL; } @@ -6415,8 +6413,9 @@ ufunc_get_doc(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) * introspection on name and nin + nout to automate the first part * of it the doc string shouldn't need the calling convention */ - doc = PyObject_CallFunctionObjArgs(npy_thread_unsafe_state._ufunc_doc_signature_formatter, - (PyObject *)ufunc, NULL); + doc = PyObject_CallFunctionObjArgs( + npy_runtime_imports._ufunc_doc_signature_formatter.obj, + (PyObject *)ufunc, NULL); if (doc == NULL) { return NULL; } diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index b523bd0b4d83..cabcff3b9bef 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -35,10 +35,9 @@ #include "npy_config.h" #include "numpy/npy_common.h" -#include "npy_import.h" - #include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" +#include "npy_import.h" #include "ufunc_type_resolution.h" #include "ufunc_object.h" #include "common.h" From 6ea18a4ea18e0a522a73cb86d11fbb630bdac0fa Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 27 Jun 2024 13:01:31 -0600 Subject: [PATCH 732/980] MAINT: re-add libatomic link test to meson.build --- numpy/meson.build | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/numpy/meson.build b/numpy/meson.build index 7e9ec5244cc9..dbfc5b48d81d 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -214,6 +214,51 @@ else lapack_dep = declare_dependency(dependencies: [lapack, blas_dep]) endif +# Determine whether it is necessary to link libatomic. This could be the +# case on 32-bit platforms when atomic operations are used on 64-bit +# types or on RISC-V using 8-bit atomics, so we explicitly check for +# both 64 bit and 8 bit operations. The check is adapted from SciPy, +# who copied it from Mesa. +null_dep = dependency('', required : false) +atomic_dep = null_dep +code_non_lockfree = ''' + #include + int main() { + struct { + uint64_t *u64v; + uint8_t *u8v; + } x; + x.u8v = 0; + x.u64v = 0; + uint64_t res1 = __atomic_load_n(x.u64v, __ATOMIC_ACQUIRE) & + __atomic_add_fetch(x.u64v, (uint64_t)1, __ATOMIC_ACQ_REL); + uint8_t res2 = __atomic_load_n(x.u8v, __ATOMIC_ACQUIRE) & + __atomic_add_fetch(x.u8v, (uint8_t)1, __ATOMIC_ACQ_REL); + return 0; + } +''' +if cc.get_id() != 'msvc' + if not cc.links( + code_non_lockfree, + name : 'Check atomic builtins without -latomic' + ) + atomic_dep = cc.find_library('atomic', required: false) + if atomic_dep.found() + # We're not sure that with `-latomic` things will work for all compilers, + # so verify and only keep libatomic as a dependency if this works. It is + # possible the build will fail later otherwise - unclear under what + # circumstances (compilers, runtimes, etc.) exactly and this may need to + # be extended when support is added for new CPUs + if not cc.links( + code_non_lockfree, + dependencies: atomic_dep, + name : 'Check atomic builtins with -latomic' + ) + atomic_dep = null_dep + endif + endif + endif +endif # Copy the main __init__.py|pxd files to the build dir (needed for Cython) __init__py = fs.copyfile('__init__.py') From 3c620cb38073bca55922da4110c9874a8f82044d Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 27 Jun 2024 13:29:19 -0600 Subject: [PATCH 733/980] MAINT: add support for gcc builtin atomics --- numpy/_core/src/common/npy_atomic.h | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/common/npy_atomic.h b/numpy/_core/src/common/npy_atomic.h index ba91dafc3a2f..20d771b3ee1c 100644 --- a/numpy/_core/src/common/npy_atomic.h +++ b/numpy/_core/src/common/npy_atomic.h @@ -17,8 +17,14 @@ #elif _MSC_VER #include #define MSC_ATOMICS +#elif defined(__GNUC__) && (__GNUC__ > 4) + #define GCC_ATOMICS +#elif defined(__clang__) + #if __has_builtin(__atomic_load) + #define GCC_ATOMICS + #endif #else - #error "no support for missing C11 atomics except with MSVC" + #error "no supported atomic implementation for this platform/compiler" #endif @@ -33,10 +39,13 @@ static inline npy_uint8 npy_atomic_load_uint8(const npy_uint8 *obj) { #else #error "Unsupported MSVC build configuration, neither x86 or ARM" #endif +#elif defined(GCC_ATOMICS) + return __atomic_load_n(obj, __ATOMIC_SEQ_CST); #endif } #undef MSC_ATOMICS #undef STDC_ATOMICS +#undef GCC_ATOMICS #endif // NUMPY_CORE_SRC_COMMON_NPY_NPY_ATOMIC_H_ From 2e2fa1d6e3ae60979dbb4d3de72d3149b876ccd0 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 4 Jul 2024 15:15:06 -0600 Subject: [PATCH 734/980] MAINT: rework to use double-checked locking and avoid new struct --- numpy/_core/src/common/npy_argparse.c | 25 +++--- numpy/_core/src/common/npy_atomic.h | 56 +++++++++++- numpy/_core/src/common/npy_ctypes.h | 3 +- numpy/_core/src/common/npy_import.h | 88 ++++++++----------- .../src/multiarray/arrayfunction_override.c | 2 +- numpy/_core/src/multiarray/convert_datatype.c | 2 +- numpy/_core/src/multiarray/descriptor.c | 2 +- numpy/_core/src/multiarray/dtypemeta.c | 4 +- numpy/_core/src/multiarray/getset.c | 2 +- numpy/_core/src/multiarray/methods.c | 12 +-- numpy/_core/src/multiarray/multiarraymodule.c | 2 +- numpy/_core/src/multiarray/scalartypes.c.src | 2 +- numpy/_core/src/multiarray/strfuncs.c | 4 +- .../_core/src/multiarray/stringdtype/dtype.c | 4 +- numpy/_core/src/umath/funcs.inc.src | 2 +- numpy/_core/src/umath/override.c | 2 +- numpy/_core/src/umath/ufunc_object.c | 6 +- numpy/meson.build | 26 +++--- 18 files changed, 137 insertions(+), 107 deletions(-) diff --git a/numpy/_core/src/common/npy_argparse.c b/numpy/_core/src/common/npy_argparse.c index 38739b103da1..eb1597c0ebb9 100644 --- a/numpy/_core/src/common/npy_argparse.c +++ b/numpy/_core/src/common/npy_argparse.c @@ -285,23 +285,20 @@ _npy_parse_arguments(const char *funcname, /* ... is NULL, NULL, NULL terminated: name, converter, value */ ...) { - if (NPY_UNLIKELY(!cache->initialized)) { - // only do a possibly slow atomic load if the cache isn't already initialized + if (!npy_atomic_load_uint8(&cache->initialized)) { + PyThread_acquire_lock(argparse_mutex, WAIT_LOCK); if (!npy_atomic_load_uint8(&cache->initialized)) { - PyThread_acquire_lock(argparse_mutex, WAIT_LOCK); - if (!cache->initialized) { - va_list va; - va_start(va, kwnames); - int res = initialize_keywords(funcname, cache, va); - va_end(va); - if (res < 0) { - PyThread_release_lock(argparse_mutex); - return -1; - } - cache->initialized = 1; + va_list va; + va_start(va, kwnames); + int res = initialize_keywords(funcname, cache, va); + va_end(va); + if (res < 0) { + PyThread_release_lock(argparse_mutex); + return -1; } - PyThread_release_lock(argparse_mutex); + npy_atomic_store_uint8(&cache->initialized, 1); } + PyThread_release_lock(argparse_mutex); } if (NPY_UNLIKELY(len_args > cache->npositional)) { diff --git a/numpy/_core/src/common/npy_atomic.h b/numpy/_core/src/common/npy_atomic.h index 20d771b3ee1c..75423940a719 100644 --- a/numpy/_core/src/common/npy_atomic.h +++ b/numpy/_core/src/common/npy_atomic.h @@ -17,6 +17,9 @@ #elif _MSC_VER #include #define MSC_ATOMICS + #if !defined(_M_X64) || !defined(_M_IX86) || !defined(_M_ARM64) + #error "Unsupported MSVC build configuration, neither x86 or ARM" + #endif #elif defined(__GNUC__) && (__GNUC__ > 4) #define GCC_ATOMICS #elif defined(__clang__) @@ -28,22 +31,67 @@ #endif -static inline npy_uint8 npy_atomic_load_uint8(const npy_uint8 *obj) { +static inline npy_uint8 +npy_atomic_load_uint8(const npy_uint8 *obj) { #ifdef STDC_ATOMICS return (npy_uint8)atomic_load((const _Atomic(uint8_t)*)obj); #elif defined(MSC_ATOMICS) #if defined(_M_X64) || defined(_M_IX86) return *(volatile npy_uint8 *)obj; -#elif defined(_M_ARM64) +#else // defined(_M_ARM64) return (npy_uint8)__ldar8((unsigned __int8 volatile *)obj); -#else -#error "Unsupported MSVC build configuration, neither x86 or ARM" #endif #elif defined(GCC_ATOMICS) return __atomic_load_n(obj, __ATOMIC_SEQ_CST); #endif } +static inline void* +npy_atomic_load_ptr(const void *obj) { +#ifdef STDC_ATOMICS + return atomic_load((const _Atomic(void *)*)obj); +#elif defined(MSC_ATOMICS) +#if SIZEOF_VOID_P == 8 +#if defined(_M_X64) || defined(_M_IX86) + return *(volatile uint64_t *)obj; +#elif defined(_M_ARM64) + return (uint64_t)__ldar64((unsigned __int64 volatile *)obj); +#endif +#else +#if defined(_M_X64) || defined(_M_IX86) + return *(volatile uint32_t *)obj; +#elif defined(_M_ARM64) + return (uint32_t)__ldar32((unsigned __int32 volatile *)obj); +#endif +#endif +#elif defined(GCC_ATOMICS) + return (void *)__atomic_load_n((void * const *)obj, __ATOMIC_SEQ_CST); +#endif +} + +static inline void +npy_atomic_store_uint8(npy_uint8 *obj, npy_uint8 value) { +#ifdef STDC_ATOMICS + atomic_store((_Atomic(uint8_t)*)obj, value); +#elif defined(MSC_ATOMICS) + _InterlockedExchange8((volatile char *)obj, (char)value); +#elif defined(GCC_ATOMICS) + __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); +#endif +} + +static inline void +npy_atomic_store_ptr(void *obj, void *value) +{ +#ifdef STDC_ATOMICS + atomic_store((_Atomic(void *)*)obj, value); +#elif defined(MSC_ATOMICS) + _InterlockedExchangePointer((void * volatile *)obj, (void *)value); +#elif defined(GCC_ATOMICS) + __atomic_store_n((void **)obj, value, __ATOMIC_SEQ_CST); +#endif +} + #undef MSC_ATOMICS #undef STDC_ATOMICS #undef GCC_ATOMICS diff --git a/numpy/_core/src/common/npy_ctypes.h b/numpy/_core/src/common/npy_ctypes.h index 7614a254a37a..78809732416c 100644 --- a/numpy/_core/src/common/npy_ctypes.h +++ b/numpy/_core/src/common/npy_ctypes.h @@ -21,6 +21,7 @@ npy_ctypes_check(PyTypeObject *obj) PyObject *ret_obj; int ret; + if (npy_cache_import_runtime( "numpy._core._internal", "npy_ctypes_check", &npy_runtime_imports.npy_ctypes_check) == -1) { @@ -28,7 +29,7 @@ npy_ctypes_check(PyTypeObject *obj) } ret_obj = PyObject_CallFunctionObjArgs( - npy_runtime_imports.npy_ctypes_check.obj, (PyObject *)obj, NULL); + npy_runtime_imports.npy_ctypes_check, (PyObject *)obj, NULL); if (ret_obj == NULL) { goto fail; } diff --git a/numpy/_core/src/common/npy_import.h b/numpy/_core/src/common/npy_import.h index b06fdc5e4b72..0262f7042dbf 100644 --- a/numpy/_core/src/common/npy_import.h +++ b/numpy/_core/src/common/npy_import.h @@ -6,50 +6,40 @@ #include "numpy/npy_common.h" #include "npy_atomic.h" -/* - * Holds a cached PyObject where the cache is initialized via a - * runtime import. The cache is only filled once. - */ - -typedef struct npy_runtime_import { - npy_uint8 initialized; - PyObject *obj; -} npy_runtime_import; - /* * Cached references to objects obtained via an import. All of these are * can be initialized at any time by npy_cache_import_runtime. */ typedef struct npy_runtime_imports_struct { PyThread_type_lock import_mutex; - npy_runtime_import _add_dtype_helper; - npy_runtime_import _all; - npy_runtime_import _amax; - npy_runtime_import _amin; - npy_runtime_import _any; - npy_runtime_import array_function_errmsg_formatter; - npy_runtime_import array_ufunc_errmsg_formatter; - npy_runtime_import _clip; - npy_runtime_import _commastring; - npy_runtime_import _convert_to_stringdtype_kwargs; - npy_runtime_import _default_array_repr; - npy_runtime_import _default_array_str; - npy_runtime_import _dump; - npy_runtime_import _dumps; - npy_runtime_import _getfield_is_safe; - npy_runtime_import internal_gcd_func; - npy_runtime_import _mean; - npy_runtime_import NO_NEP50_WARNING; - npy_runtime_import npy_ctypes_check; - npy_runtime_import numpy_matrix; - npy_runtime_import _prod; - npy_runtime_import _promote_fields; - npy_runtime_import _std; - npy_runtime_import _sum; - npy_runtime_import _ufunc_doc_signature_formatter; - npy_runtime_import _var; - npy_runtime_import _view_is_safe; - npy_runtime_import _void_scalar_to_string; + PyObject *_add_dtype_helper; + PyObject *_all; + PyObject *_amax; + PyObject *_amin; + PyObject *_any; + PyObject *array_function_errmsg_formatter; + PyObject *array_ufunc_errmsg_formatter; + PyObject *_clip; + PyObject *_commastring; + PyObject *_convert_to_stringdtype_kwargs; + PyObject *_default_array_repr; + PyObject *_default_array_str; + PyObject *_dump; + PyObject *_dumps; + PyObject *_getfield_is_safe; + PyObject *internal_gcd_func; + PyObject *_mean; + PyObject *NO_NEP50_WARNING; + PyObject *npy_ctypes_check; + PyObject *numpy_matrix; + PyObject *_prod; + PyObject *_promote_fields; + PyObject *_std; + PyObject *_sum; + PyObject *_ufunc_doc_signature_formatter; + PyObject *_var; + PyObject *_view_is_safe; + PyObject *_void_scalar_to_string; } npy_runtime_imports_struct; NPY_VISIBILITY_HIDDEN extern npy_runtime_imports_struct npy_runtime_imports; @@ -90,22 +80,16 @@ npy_import(const char *module, const char *attr) * @param cache Storage location for imported function. */ static inline int -npy_cache_import_runtime(const char *module, const char *attr, npy_runtime_import *cache) { - if (cache->initialized) { - return 0; - } - else { - if (!npy_atomic_load_uint8(&cache->initialized)) { - PyThread_acquire_lock(npy_runtime_imports.import_mutex, WAIT_LOCK); - if (!cache->initialized) { - cache->obj = npy_import(module, attr); - cache->initialized = 1; +npy_cache_import_runtime(const char *module, const char *attr, PyObject **obj) { + if (!npy_atomic_load_ptr(obj)) { + PyThread_acquire_lock(npy_runtime_imports.import_mutex, WAIT_LOCK); + if (!npy_atomic_load_ptr(obj)) { + npy_atomic_store_ptr(obj, npy_import(module, attr)); + if (obj == NULL) { + return -1; } - PyThread_release_lock(npy_runtime_imports.import_mutex); } - } - if (cache->obj == NULL) { - return -1; + PyThread_release_lock(npy_runtime_imports.import_mutex); } return 0; } diff --git a/numpy/_core/src/multiarray/arrayfunction_override.c b/numpy/_core/src/multiarray/arrayfunction_override.c index f66191698b6a..e4248ad29aba 100644 --- a/numpy/_core/src/multiarray/arrayfunction_override.c +++ b/numpy/_core/src/multiarray/arrayfunction_override.c @@ -237,7 +237,7 @@ set_no_matching_types_error(PyObject *public_api, PyObject *types) "array_function_errmsg_formatter", &npy_runtime_imports.array_function_errmsg_formatter) == 0) { PyObject *errmsg = PyObject_CallFunctionObjArgs( - npy_runtime_imports.array_function_errmsg_formatter.obj, + npy_runtime_imports.array_function_errmsg_formatter, public_api, types, NULL); if (errmsg != NULL) { PyErr_SetObject(PyExc_TypeError, errmsg); diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index dbc357128249..550d3e253868 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -90,7 +90,7 @@ npy_give_promotion_warnings(void) return 1; } - if (PyContextVar_Get(npy_runtime_imports.NO_NEP50_WARNING.obj, + if (PyContextVar_Get(npy_runtime_imports.NO_NEP50_WARNING, Py_False, &val) < 0) { /* Errors should not really happen, but if it does assume we warn. */ PyErr_WriteUnraisable(NULL); diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 3b753e253728..a47a71d39196 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -731,7 +731,7 @@ _convert_from_commastring(PyObject *obj, int align) &npy_runtime_imports._commastring) == -1) { return NULL; } - parsed = PyObject_CallOneArg(npy_runtime_imports._commastring.obj, obj); + parsed = PyObject_CallOneArg(npy_runtime_imports._commastring, obj); if (parsed == NULL) { return NULL; } diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index b9d6d0c08774..f46e882ec2d1 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -772,7 +772,7 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) return NULL; } PyObject *result = PyObject_CallFunctionObjArgs( - npy_runtime_imports._promote_fields.obj, + npy_runtime_imports._promote_fields, descr1, descr2, NULL); if (result == NULL) { return NULL; @@ -1246,7 +1246,7 @@ dtypemeta_wrap_legacy_descriptor( } if (PyObject_CallFunction( - npy_runtime_imports._add_dtype_helper.obj, + npy_runtime_imports._add_dtype_helper, "Os", (PyObject *)dtype_class, alias) == NULL) { return -1; } diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index fe18a9b4e5b7..94028f71f964 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -394,7 +394,7 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored)) goto fail; } - safe = PyObject_CallFunction(npy_runtime_imports._view_is_safe.obj, + safe = PyObject_CallFunction(npy_runtime_imports._view_is_safe, "OO", PyArray_DESCR(self), newtype); if (safe == NULL) { goto fail; diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 525bd2445c34..70f487393842 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -117,7 +117,7 @@ npy_forward_method( &npy_runtime_imports.name) == -1) { \ return NULL; \ } \ - return npy_forward_method(npy_runtime_imports.name.obj, \ + return npy_forward_method(npy_runtime_imports.name, \ (PyObject *)self, args, len_args, kwnames) @@ -412,7 +412,7 @@ PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset) } /* only returns True or raises */ - safe = PyObject_CallFunction(npy_runtime_imports._getfield_is_safe.obj, + safe = PyObject_CallFunction(npy_runtime_imports._getfield_is_safe, "OOi", PyArray_DESCR(self), typed, offset); if (safe == NULL) { @@ -2254,11 +2254,11 @@ PyArray_Dump(PyObject *self, PyObject *file, int protocol) if (protocol < 0) { ret = PyObject_CallFunction( - npy_runtime_imports._dump.obj, "OO", self, file); + npy_runtime_imports._dump, "OO", self, file); } else { ret = PyObject_CallFunction( - npy_runtime_imports._dump.obj, "OOi", self, file, protocol); + npy_runtime_imports._dump, "OOi", self, file, protocol); } if (ret == NULL) { return -1; @@ -2276,11 +2276,11 @@ PyArray_Dumps(PyObject *self, int protocol) return NULL; } if (protocol < 0) { - return PyObject_CallFunction(npy_runtime_imports._dumps.obj, "O", self); + return PyObject_CallFunction(npy_runtime_imports._dumps, "O", self); } else { return PyObject_CallFunction( - npy_runtime_imports._dumps.obj, "Oi", self, protocol); + npy_runtime_imports._dumps, "Oi", self, protocol); } } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index d4b038827ef4..ac0bdc8dafe2 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -5079,7 +5079,7 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { } if (PyObject_CallFunction( - npy_runtime_imports._add_dtype_helper.obj, + npy_runtime_imports._add_dtype_helper, "Os", (PyObject *)&PyArray_StringDType, NULL) == NULL) { goto err; } diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index d4ce85dfd438..448e157ed2eb 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -615,7 +615,7 @@ _void_scalar_to_string(PyObject *obj, int repr) { } PyObject *is_repr = repr ? Py_True : Py_False; return PyObject_CallFunctionObjArgs( - npy_runtime_imports._void_scalar_to_string.obj, obj, is_repr, NULL); + npy_runtime_imports._void_scalar_to_string, obj, is_repr, NULL); } static PyObject * diff --git a/numpy/_core/src/multiarray/strfuncs.c b/numpy/_core/src/multiarray/strfuncs.c index 54e1f0da3178..efe5c8a4fdd8 100644 --- a/numpy/_core/src/multiarray/strfuncs.c +++ b/numpy/_core/src/multiarray/strfuncs.c @@ -45,7 +45,7 @@ array_repr(PyArrayObject *self) return NULL; } return PyObject_CallFunctionObjArgs( - npy_runtime_imports._default_array_repr.obj, self, NULL); + npy_runtime_imports._default_array_repr, self, NULL); } @@ -64,7 +64,7 @@ array_str(PyArrayObject *self) return NULL; } return PyObject_CallFunctionObjArgs( - npy_runtime_imports._default_array_str.obj, self, NULL); + npy_runtime_imports._default_array_str, self, NULL); } diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 7b97d6ff9087..81a846bf6d96 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -723,12 +723,12 @@ stringdtype__reduce__(PyArray_StringDTypeObject *self, PyObject *NPY_UNUSED(args if (self->na_object != NULL) { return Py_BuildValue( - "O(iO)", npy_runtime_imports._convert_to_stringdtype_kwargs.obj, + "O(iO)", npy_runtime_imports._convert_to_stringdtype_kwargs, self->coerce, self->na_object); } return Py_BuildValue( - "O(i)", npy_runtime_imports._convert_to_stringdtype_kwargs.obj, + "O(i)", npy_runtime_imports._convert_to_stringdtype_kwargs, self->coerce); } diff --git a/numpy/_core/src/umath/funcs.inc.src b/numpy/_core/src/umath/funcs.inc.src index 773efefe18f4..1075af97c9df 100644 --- a/numpy/_core/src/umath/funcs.inc.src +++ b/numpy/_core/src/umath/funcs.inc.src @@ -196,7 +196,7 @@ npy_ObjectGCD(PyObject *i1, PyObject *i2) &npy_runtime_imports.internal_gcd_func) == -1) { return NULL; } - gcd = PyObject_CallFunction(npy_runtime_imports.internal_gcd_func.obj, + gcd = PyObject_CallFunction(npy_runtime_imports.internal_gcd_func, "OO", i1, i2); if (gcd == NULL) { return NULL; diff --git a/numpy/_core/src/umath/override.c b/numpy/_core/src/umath/override.c index d7a5020bb9e8..139d9c7bdbbd 100644 --- a/numpy/_core/src/umath/override.c +++ b/numpy/_core/src/umath/override.c @@ -376,7 +376,7 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, goto fail; } errmsg = PyObject_Call( - npy_runtime_imports.array_ufunc_errmsg_formatter.obj, + npy_runtime_imports.array_ufunc_errmsg_formatter, override_args, normal_kwds); if (errmsg != NULL) { PyErr_SetObject(PyExc_TypeError, errmsg); diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 85440e338836..3715866a2a83 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -5240,7 +5240,7 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) tmp = PyTuple_GET_ITEM(args, 0); - if (PyObject_IsInstance(tmp, npy_runtime_imports.numpy_matrix.obj)) { + if (PyObject_IsInstance(tmp, npy_runtime_imports.numpy_matrix)) { /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, matrix_deprecation_msg, ufunc->name, "first") < 0) { @@ -5257,7 +5257,7 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) PyArrayObject *ap2 = NULL; tmp = PyTuple_GET_ITEM(args, 1); - if (PyObject_IsInstance(tmp, npy_runtime_imports.numpy_matrix.obj)) { + if (PyObject_IsInstance(tmp, npy_runtime_imports.numpy_matrix)) { /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, matrix_deprecation_msg, ufunc->name, "second") < 0) { @@ -6414,7 +6414,7 @@ ufunc_get_doc(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) * of it the doc string shouldn't need the calling convention */ doc = PyObject_CallFunctionObjArgs( - npy_runtime_imports._ufunc_doc_signature_formatter.obj, + npy_runtime_imports._ufunc_doc_signature_formatter, (PyObject *)ufunc, NULL); if (doc == NULL) { return NULL; diff --git a/numpy/meson.build b/numpy/meson.build index dbfc5b48d81d..032cdd5c6b60 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -214,28 +214,28 @@ else lapack_dep = declare_dependency(dependencies: [lapack, blas_dep]) endif -# Determine whether it is necessary to link libatomic. This could be the -# case on 32-bit platforms when atomic operations are used on 64-bit -# types or on RISC-V using 8-bit atomics, so we explicitly check for -# both 64 bit and 8 bit operations. The check is adapted from SciPy, -# who copied it from Mesa. +# Determine whether it is necessary to link libatomic with gcc. This +# could be the case on 32-bit platforms when atomic operations are used +# on 64-bit types or on RISC-V using 8-bit atomics, so we explicitly +# check for both 64 bit and 8 bit operations. The check is adapted from +# SciPy, who copied it from Mesa. null_dep = dependency('', required : false) atomic_dep = null_dep code_non_lockfree = ''' #include int main() { struct { - uint64_t *u64v; - uint8_t *u8v; + void *p; + uint8_t u8v; } x; + x.p = NULL; x.u8v = 0; - x.u64v = 0; - uint64_t res1 = __atomic_load_n(x.u64v, __ATOMIC_ACQUIRE) & - __atomic_add_fetch(x.u64v, (uint64_t)1, __ATOMIC_ACQ_REL); - uint8_t res2 = __atomic_load_n(x.u8v, __ATOMIC_ACQUIRE) & - __atomic_add_fetch(x.u8v, (uint8_t)1, __ATOMIC_ACQ_REL); + uint8_t res = __atomic_load_n(x.u8v, __ATOMIC_SEQ_CST); + __atomic_store_n(x.u8v, 1, ATOMIC_SEQ_CST); + void *p = __atomic_load_n(x.p, __ATOMIC_SEQ_CST); + __atomic_store_n((void **)x.p, NULL, __ATOMIC_SEQ_CST) return 0; - } + } ''' if cc.get_id() != 'msvc' if not cc.links( From b6af99a05485d2a4bcc8436e5bebcb93d80872ee Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 4 Jul 2024 15:29:50 -0600 Subject: [PATCH 735/980] BUG: fix windows build --- numpy/_core/src/common/npy_atomic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/common/npy_atomic.h b/numpy/_core/src/common/npy_atomic.h index 75423940a719..b92d58d583c0 100644 --- a/numpy/_core/src/common/npy_atomic.h +++ b/numpy/_core/src/common/npy_atomic.h @@ -17,7 +17,7 @@ #elif _MSC_VER #include #define MSC_ATOMICS - #if !defined(_M_X64) || !defined(_M_IX86) || !defined(_M_ARM64) + #if !defined(_M_X64) && !defined(_M_IX86) && !defined(_M_ARM64) #error "Unsupported MSVC build configuration, neither x86 or ARM" #endif #elif defined(__GNUC__) && (__GNUC__ > 4) From dabfe59827e89fc94636e6218cdf9024e0fa8c0d Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 9 Jul 2024 12:38:13 -0600 Subject: [PATCH 736/980] MAINT: respond to review comments --- numpy/_core/src/common/npy_argparse.h | 2 +- numpy/_core/src/common/npy_import.h | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/common/npy_argparse.h b/numpy/_core/src/common/npy_argparse.h index 27cd52161de4..9f69da1307b5 100644 --- a/numpy/_core/src/common/npy_argparse.h +++ b/numpy/_core/src/common/npy_argparse.h @@ -38,7 +38,7 @@ NPY_NO_EXPORT int init_argparse_mutex(void); * The sole purpose of this macro is to hide the argument parsing cache. * Since this cache must be static, this also removes a source of error. */ -#define NPY_PREPARE_ARGPARSER static _NpyArgParserCache __argparse_cache = {-1} +#define NPY_PREPARE_ARGPARSER static _NpyArgParserCache __argparse_cache; /** * Macro to help with argument parsing. diff --git a/numpy/_core/src/common/npy_import.h b/numpy/_core/src/common/npy_import.h index 0262f7042dbf..89b300159b61 100644 --- a/numpy/_core/src/common/npy_import.h +++ b/numpy/_core/src/common/npy_import.h @@ -82,14 +82,16 @@ npy_import(const char *module, const char *attr) static inline int npy_cache_import_runtime(const char *module, const char *attr, PyObject **obj) { if (!npy_atomic_load_ptr(obj)) { + PyObject* value = npy_import(module, attr); + if (value == NULL) { + return -1; + } PyThread_acquire_lock(npy_runtime_imports.import_mutex, WAIT_LOCK); if (!npy_atomic_load_ptr(obj)) { - npy_atomic_store_ptr(obj, npy_import(module, attr)); - if (obj == NULL) { - return -1; - } + npy_atomic_store_ptr(obj, Py_NewRef(value)); } PyThread_release_lock(npy_runtime_imports.import_mutex); + Py_DECREF(value); } return 0; } From 3be40917906cfb2462524b7387fbfe9d111b1fb4 Mon Sep 17 00:00:00 2001 From: Anne Gunn Date: Tue, 9 Jul 2024 15:49:51 -0600 Subject: [PATCH 737/980] DOC: Change NEP hardlinks to intersphinx mappings. This pull request changes most explicit URL links to NEPs from NEPs to be intersphinx mappings. I believe this will complete my work on changes to address #26707 except as needed to address review comments. [skip actions] [skip azp] [skip cirrus] --- doc/neps/content.rst | 5 +---- doc/neps/nep-0018-array-function-protocol.rst | 6 +++--- doc/neps/nep-0030-duck-array-protocol.rst | 2 +- doc/neps/nep-0031-uarray.rst | 14 +++++++------- ...array-creation-dispatch-with-array-function.rst | 8 ++++---- doc/neps/nep-0036-fair-play.rst | 6 ++---- doc/neps/nep-0037-array-module.rst | 11 +++++------ doc/neps/nep-0054-simd-cpp-highway.rst | 3 +-- 8 files changed, 24 insertions(+), 31 deletions(-) diff --git a/doc/neps/content.rst b/doc/neps/content.rst index a188deae2ab2..a6e9dace9853 100644 --- a/doc/neps/content.rst +++ b/doc/neps/content.rst @@ -16,10 +16,7 @@ Roadmap Index The Scope of NumPy Current roadmap - Wishlist (opens new window) |wishlist_link| + Wish list -.. |wishlist_link| raw:: html - - WishList diff --git a/doc/neps/nep-0018-array-function-protocol.rst b/doc/neps/nep-0018-array-function-protocol.rst index a1682435272f..8eec748e3be1 100644 --- a/doc/neps/nep-0018-array-function-protocol.rst +++ b/doc/neps/nep-0018-array-function-protocol.rst @@ -141,7 +141,7 @@ The type of ``types`` is intentionally vague: instead for performance reasons. In any case, ``__array_function__`` implementations should not rely on the iteration order of ``types``, which would violate a well-defined "Type casting hierarchy" (as described in -`NEP-13 `_). +:ref:`NEP-13 `). Example for a project implementing the NumPy API ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -300,7 +300,7 @@ are valid then which has precedence? For the most part, the rules for dispatch with ``__array_function__`` match those for ``__array_ufunc__`` (see -`NEP-13 `_). +:ref:`NEP-13 `). In particular: - NumPy will gather implementations of ``__array_function__`` from all @@ -819,7 +819,7 @@ don't think it makes sense to do so now, because code generation involves tradeoffs and NumPy's experience with type annotations is still `quite limited `_. Even if NumPy was Python 3 only (which will happen -`sometime in 2019 `_), +:ref:`sometime in 2019 `), we aren't ready to annotate NumPy's codebase directly yet. Support for implementation-specific arguments diff --git a/doc/neps/nep-0030-duck-array-protocol.rst b/doc/neps/nep-0030-duck-array-protocol.rst index bb58eaf4fa24..7fb8c9734900 100644 --- a/doc/neps/nep-0030-duck-array-protocol.rst +++ b/doc/neps/nep-0030-duck-array-protocol.rst @@ -176,7 +176,7 @@ Previous proposals and discussion --------------------------------- The duck typing protocol proposed here was described in a high level in -`NEP 22 `_. +:ref:`NEP 22 `. Additionally, longer discussions about the protocol and related proposals took place in diff --git a/doc/neps/nep-0031-uarray.rst b/doc/neps/nep-0031-uarray.rst index cf06d1109c11..cb906248fde6 100644 --- a/doc/neps/nep-0031-uarray.rst +++ b/doc/neps/nep-0031-uarray.rst @@ -319,7 +319,7 @@ It has been formally realized (at least in part) that a backend system is needed for this, in the `NumPy roadmap `_. For ``numpy.random``, it's still necessary to make the C-API fit the one -proposed in `NEP-19 `_. +proposed in :ref:`NEP-19 `. This is impossible for `mkl-random`, because then it would need to be rewritten to fit that framework. The guarantees on stream compatibility will be the same as before, but if there's a backend that affects @@ -620,8 +620,8 @@ Discussion ---------- * ``uarray`` blogpost: https://labs.quansight.org/blog/2019/07/uarray-update-api-changes-overhead-and-comparison-to-__array_function__/ -* The discussion section of NEP-18: https://numpy.org/neps/nep-0018-array-function-protocol.html#discussion -* NEP-22: https://numpy.org/neps/nep-0022-ndarray-duck-typing-overview.html +* The discussion section of :ref:`NEP18` +* :ref:`NEP22` * Dask issue #4462: https://github.com/dask/dask/issues/4462 * PR #13046: https://github.com/numpy/numpy/pull/13046 * Dask issue #4883: https://github.com/dask/dask/issues/4883 @@ -636,11 +636,11 @@ References and footnotes .. [1] uarray, A general dispatch mechanism for Python: https://uarray.readthedocs.io -.. [2] NEP 18 — A dispatch mechanism for NumPy’s high level array functions: https://numpy.org/neps/nep-0018-array-function-protocol.html +.. [2] :ref:`NEP18` -.. [3] NEP 22 — Duck typing for NumPy arrays – high level overview: https://numpy.org/neps/nep-0022-ndarray-duck-typing-overview.html +.. [3] :ref:`NEP22` -.. [4] NEP 13 — A Mechanism for Overriding Ufuncs: https://numpy.org/neps/nep-0013-ufunc-overrides.html +.. [4] :ref:`NEP13` .. [5] Reply to Adding to the non-dispatched implementation of NumPy methods: https://mail.python.org/archives/list/numpy-discussion@python.org/thread/5GUDMALWDIRHITG5YUOCV343J66QSX3U/#5GUDMALWDIRHITG5YUOCV343J66QSX3U @@ -650,7 +650,7 @@ References and footnotes .. [8] unumpy: NumPy, but implementation-independent: https://unumpy.readthedocs.io -.. [9] NEP 30 — Duck Typing for NumPy Arrays - Implementation: https://www.numpy.org/neps/nep-0030-duck-array-protocol.html +.. [9] :ref:`NEP30` .. [10] http://scipy.github.io/devdocs/fft.html#backend-control diff --git a/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst b/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst index 63593277dd9a..09a376298245 100644 --- a/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst +++ b/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst @@ -430,17 +430,17 @@ Discussion References ---------- -.. [1] `NEP 18 - A dispatch mechanism for NumPy's high level array functions `_. +.. [1] :ref:`NEP18`. .. [2] `PEP 3102 — Keyword-Only Arguments `_. -.. [3] `NEP 30 — Duck Typing for NumPy Arrays - Implementation `_. +.. [3] :ref:`NEP30`. -.. [4] `NEP 31 — Context-local and global overrides of the NumPy API `_. +.. [4] :ref:`NEP31`. .. [5] `Array creation routines `_. -.. [6] `NEP 37 — A dispatch protocol for NumPy-like modules `_. +.. [6] :ref:`NEP37`. .. [7] `Implementation's pull request on GitHub `_ diff --git a/doc/neps/nep-0036-fair-play.rst b/doc/neps/nep-0036-fair-play.rst index 5d55c8aa25d5..022bf9435513 100644 --- a/doc/neps/nep-0036-fair-play.rst +++ b/doc/neps/nep-0036-fair-play.rst @@ -121,10 +121,8 @@ Fair play rules 4. *DO* use official mechanism to engage with the API. - Protocols such as `__array_ufunc__ - `__ and - `__array_function__ - `__ + Protocols such as :ref:`__array_ufunc__ ` and + :ref:`__array_function__ ` were designed to help external packages interact more easily with NumPy. E.g., the latter allows objects from foreign libraries to pass through NumPy. We actively encourage using any of diff --git a/doc/neps/nep-0037-array-module.rst b/doc/neps/nep-0037-array-module.rst index 653141661421..7777cc73c2a6 100644 --- a/doc/neps/nep-0037-array-module.rst +++ b/doc/neps/nep-0037-array-module.rst @@ -29,7 +29,7 @@ expect will make it easier to adopt. Why ``__array_function__`` hasn't been enough --------------------------------------------- -There are two broad ways in which NEP-18 has fallen short of its goals: +There are two broad ways in which :ref:`NEP-18 ` has fallen short of its goals: 1. **Backwards compatibility concerns**. `__array_function__` has significant implications for libraries that use it: @@ -64,7 +64,7 @@ There are two broad ways in which NEP-18 has fallen short of its goals: - **Array creation** routines (e.g., ``np.arange`` and those in ``np.random``) need some other mechanism for indicating what type of - arrays to create. `NEP 35 `_ + arrays to create. :ref:`NEP 35 ` proposed adding optional ``like=`` arguments to functions without existing array arguments. However, we still lack any mechanism to override methods on objects, such as those needed by @@ -72,8 +72,7 @@ There are two broad ways in which NEP-18 has fallen short of its goals: - **Array conversion** can't reuse the existing coercion functions like ``np.asarray``, because ``np.asarray`` sometimes means "convert to an exact ``np.ndarray``" and other times means "convert to something _like_ - a NumPy array." This led to the `NEP 30 - `_ proposal for + a NumPy array." This led to the :ref:`NEP 30 ` proposal for a separate ``np.duckarray`` function, but this still does not resolve how to cast one duck array into a type matching another duck array. @@ -144,8 +143,8 @@ we can simply pull out the appropriate submodule: noise = module.random.randn(*array.shape) return array + noise -We can also write the duck-array ``stack`` function from `NEP 30 -`_, without the need +We can also write the duck-array ``stack`` function from +:ref:`NEP 30 `, without the need for a new ``np.duckarray`` function: .. code:: python diff --git a/doc/neps/nep-0054-simd-cpp-highway.rst b/doc/neps/nep-0054-simd-cpp-highway.rst index f06de05ca036..53f1816c4428 100644 --- a/doc/neps/nep-0054-simd-cpp-highway.rst +++ b/doc/neps/nep-0054-simd-cpp-highway.rst @@ -17,7 +17,7 @@ Abstract We are moving the SIMD intrinsic framework, Universal Intrinsics, from C to C++. We have also moved to Meson as the build system. The Google Highway intrinsics project is proposing we use Highway instead of our Universal -Intrinsics as described in `NEP 38`_. This is a complex and multi-faceted +Intrinsics as described in :ref:`NEP 38 `. This is a complex and multi-faceted decision - this NEP is an attempt to describe the trade-offs involved and what would need to be done. @@ -350,7 +350,6 @@ References and Footnotes this NEP as an example) or licensed under the `Open Publication License`_. .. _Open Publication License: https://www.opencontent.org/openpub/ -.. _`NEP 38`: https://numpy.org/neps/nep-0038-SIMD-optimizations.html .. _`gh-20866`: https://github.com/numpy/numpy/pull/20866 .. _`gh-21057`: https://github.com/numpy/numpy/pull/21057 .. _`gh-23096`: https://github.com/numpy/numpy/pull/23096 From b22b27d39086fca055ec28c43b83ae059aaec118 Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Wed, 10 Jul 2024 13:43:23 +0200 Subject: [PATCH 738/980] Fix off-by-one error in amount of characters in strip Closes #26898. --- numpy/_core/src/umath/string_buffer.h | 4 +--- numpy/_core/tests/test_strings.py | 3 +++ 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index 77f230cf9ad5..bc705f3e039b 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -1281,9 +1281,7 @@ string_lrstrip_chars(Buffer buf1, Buffer buf2, Buffer out, STRIPT } num_bytes -= traverse_buf.num_bytes_next_character(); j--; - if (j > 0) { - traverse_buf--; - } + traverse_buf--; } } diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index f12b743f4daa..ec8f8f723e69 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -463,6 +463,7 @@ def test_endswith(self, a, suffix, start, end, out, dt): ("xyxzx", "x", "yxzx"), (["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"], ["helloxyzzy", "hello"]), + (["ba", "ac", "baa", "bba"], "b", ["a", "ac", "aa", "a"]), ]) def test_lstrip(self, a, chars, out, dt): a = np.array(a, dtype=dt) @@ -488,6 +489,7 @@ def test_lstrip(self, a, chars, out, dt): ("xyxzx", "x", "xyxz"), (["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"], ["xyzzyhello", "hello"]), + (["ab", "ac", "aab", "abb"], "b", ["a", "ac", "aa", "a"]), ]) def test_rstrip(self, a, chars, out, dt): a = np.array(a, dtype=dt) @@ -511,6 +513,7 @@ def test_rstrip(self, a, chars, out, dt): ("xyxzx", "x", "yxz"), (["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"], ["hello", "hello"]), + (["bab", "ac", "baab", "bbabb"], "b", ["a", "ac", "aa", "a"]), ]) def test_strip(self, a, chars, out, dt): a = np.array(a, dtype=dt) From 682cd040f0997ece83af51ffe0e3241242c7c1df Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 10 Jul 2024 16:50:32 +0200 Subject: [PATCH 739/980] BUG,ENH: Fix generic scalar infinite recursion issues This reorganizes the avoidance of infinite recursion to happen in the generic scalar fallback, rather than attempting to do so (badly) in the scalarmath, where it only applies to longdouble to begin with. This was also needed for the follow up to remove special handling of python scalar subclasses. --- numpy/_core/src/multiarray/npy_static_data.c | 1 + numpy/_core/src/multiarray/npy_static_data.h | 1 + numpy/_core/src/multiarray/scalartypes.c.src | 311 ++++++++++++++++--- numpy/_core/src/umath/scalarmath.c.src | 16 +- 4 files changed, 276 insertions(+), 53 deletions(-) diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index 7f5e58dde21a..605eaa494dc4 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -41,6 +41,7 @@ intern_strings(void) INTERN_STRING(implementation, "_implementation"); INTERN_STRING(axis1, "axis1"); INTERN_STRING(axis2, "axis2"); + INTERN_STRING(item, "item"); INTERN_STRING(like, "like"); INTERN_STRING(numpy, "numpy"); INTERN_STRING(where, "where"); diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index c4d3ef4cdfee..0839b162b81a 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -23,6 +23,7 @@ typedef struct npy_interned_str_struct { PyObject *implementation; PyObject *axis1; PyObject *axis2; + PyObject *item; PyObject *like; PyObject *numpy; PyObject *where; diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index a0517c247215..4c1e27ed6d19 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -21,6 +21,7 @@ #include "ctors.h" #include "dtypemeta.h" #include "usertypes.h" +#include "number.h" #include "numpyos.h" #include "can_cast_table.h" #include "common.h" @@ -120,19 +121,6 @@ gentype_free(PyObject *v) } -static PyObject * -gentype_power(PyObject *m1, PyObject *m2, PyObject *modulo) -{ - if (modulo != Py_None) { - /* modular exponentiation is not implemented (gh-8804) */ - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - - BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_power, gentype_power); - return PyArray_Type.tp_as_number->nb_power(m1, m2, Py_None); -} - static PyObject * gentype_generic_method(PyObject *self, PyObject *args, PyObject *kwds, char *str) @@ -164,33 +152,194 @@ gentype_generic_method(PyObject *self, PyObject *args, PyObject *kwds, } } -static PyObject * -gentype_add(PyObject *m1, PyObject* m2) -{ - /* special case str.__radd__, which should not call array_add */ - if (PyBytes_Check(m1) || PyUnicode_Check(m1)) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; + +/* + * Helper function to deal with binary operator deferral. Must be passed a + * valid self (a generic scalar) and an other item. + * May fill self_item and/or other_arr (but not both) with non-NULL values. + * + * Why this dance? When the other object is a exactly Python scalar something + * awkward happens historically in NumPy. + * NumPy doesn't define a result, but the ufunc would cast to `astype(object)` + * which is the same as `scalar.item()`. And that operation converts e.g. + * float32 or float64 to Python floats. + * It then retries. And because it is a builtin type now the operation may + * succeed. + * + * This retrying pass only makes sense if the other object is a Python + * scalar (otherwise we fill in `other_arr` which can be used to call the + * ufunc). + * Additionally, if `self.item()` has the same type as `self` we would end up + * in an infinite recursion. + * + * So the result of this function means the following: + * - < 0 error return. + * - self_op is filled in: Retry the Python operator. + * - other_op is filled in: Use the array operator (goes into ufuncs) + * (This may be the original generic if it is one.) + * - neither is filled in: Return NotImplemented. + * + * It is not possible for both to be filled. If `other` is also a generics, + * it is returned. + */ +static inline int +find_binary_operation_path( + PyObject *self, PyObject *other, PyObject **self_op, PyObject **other_op) +{ + *other_op = NULL; + *self_op = NULL; + + if (PyArray_IsScalar(other, Generic) || + PyLong_CheckExact(other) || + PyFloat_CheckExact(other) || + PyComplex_CheckExact(other) || + PyBool_Check(other)) { + /* + * The other operand is ready for the operation already. Must pass on + * on float/long/complex mainly for weak promotion (NEP 50). + */ + Py_INCREF(other); + *other_op = other; + return 0; } - BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_add, gentype_add); - return PyArray_Type.tp_as_number->nb_add(m1, m2); + + /* + * Now check `other`. We want to know whether it is an object scalar + * and the easiest way is by converting to an array here. + */ + int was_scalar; + PyArrayObject *arr = (PyArrayObject *)PyArray_FromAny_int( + other, NULL, NULL, 0, 0, 0, NULL, &was_scalar); + if (arr == NULL) { + return -1; + } + + if (!was_scalar || PyArray_DESCR(arr)->type_num != NPY_OBJECT) { + /* The array is OK for usage and we can simply forward it + * + * NOTE: Future NumPy may need to distinguish scalars here, one option + * could be marking the array. + */ + *other_op = (PyObject *)arr; + return 0; + } + Py_DECREF(arr); + + /* + * If we are here, we need to operate on Python scalars. In general + * that would just fails since NumPy doesn't know the other object! + * + * However, NumPy (historically) often makes this work magically because + * it object ufuncs end up casting to object with `.item()` and that may + * returns Python type often (e.g. float for float32, float64)! + * Retrying then succeeds. So if (and only if) `self.item()` returns a new + * type, we can safely attempt the operation (again) with that. + */ + PyObject *self_item = PyObject_CallMethodNoArgs(self, npy_interned_str.item); + if (self_item == NULL) { + return -1; + } + if (Py_TYPE(self_item) != Py_TYPE(self)) { + /* self_item can be used to retry the operation */ + *self_op = self_item; + return 0; + } + /* The operation can't work and we will return NotImplemented */ + return 0; } + +/* + * These are defined below as they require special handling, we still define + * a _gen version here. `power` is special as it has three arguments. + */ +static PyObject * +gentype_add(PyObject *m1, PyObject *m2); + +static PyObject * +gentype_multiply(PyObject *m1, PyObject *m2); + + /**begin repeat * - * #name = subtract, remainder, divmod, lshift, rshift, - * and, xor, or, floor_divide, true_divide# + * #name = add, multiply, subtract, remainder, divmod, + * lshift, rshift, and, xor, or, floor_divide, true_divide# + * #ufunc = add, multiply, subtract, remainder, divmod, + * left_shift, right_shift, bitwise_and, bitwise_xor, bitwise_or, + * floor_divide, true_divide# + * #func = Add, Multiply, Subtract, Remainder, Divmod, + * Lshift, Rshift, And, Xor, Or, FloorDivide, TrueDivide# + * #suff = _gen, _gen,,,,,,,,,,# */ +/* NOTE: We suffix the name for functions requiring special handling first. */ static PyObject * -gentype_@name@(PyObject *m1, PyObject *m2) +gentype_@name@@suff@(PyObject *m1, PyObject *m2) { BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_@name@, gentype_@name@); - return PyArray_Type.tp_as_number->nb_@name@(m1, m2); + + PyObject *self = NULL; + PyObject *other = NULL; + PyObject *self_op, *other_op; + + if (!PyArray_IsScalar(m2, Generic)) { + self = m1; + other = m2; + } + else { + self = m2; + other = m1; + } + if (find_binary_operation_path(self, other, &self_op, &other_op) < 0) { + return NULL; + } + if (self_op != NULL) { + PyObject *res; + if (self == m1) { + res = PyNumber_@func@(self_op, m2); + } + else { + res = PyNumber_@func@(m1, self_op); + } + Py_DECREF(self_op); + return res; + } + else if (other_op != NULL) { + /* Call the corresponding ufunc (with the array) */ + PyObject *res; + if (self == m1) { + res = PyArray_GenericBinaryFunction(m1, other_op, n_ops.@ufunc@); + } + else { + res = PyArray_GenericBinaryFunction(other_op, m2, n_ops.@ufunc@); + } + Py_DECREF(other_op); + return res; + } + else { + assert(other_op == NULL); + Py_RETURN_NOTIMPLEMENTED; + } } /**end repeat**/ -/* Get a nested slot, or NULL if absent */ +/* + * The following operators use the above, but require specialization. + */ + +static PyObject * +gentype_add(PyObject *m1, PyObject *m2) +{ + /* special case str.__radd__, which should not call array_add */ + if (PyBytes_Check(m1) || PyUnicode_Check(m1)) { + Py_INCREF(Py_NotImplemented); + return Py_NotImplemented; + } + + return gentype_add_gen(m1, m2); +} + +/* Get a nested slot, or NULL if absent (for multiply implementation) */ #define GET_NESTED_SLOT(type, group, slot) \ ((type)->group == NULL ? NULL : (type)->group->slot) @@ -219,11 +368,75 @@ gentype_multiply(PyObject *m1, PyObject *m2) Py_INCREF(Py_NotImplemented); return Py_NotImplemented; } - /* All normal cases are handled by PyArray's multiply */ - BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_multiply, gentype_multiply); - return PyArray_Type.tp_as_number->nb_multiply(m1, m2); + + return gentype_multiply_gen(m1, m2); } + +/* + * NOTE: The three argument nature of power requires code duplication here. + */ +static PyObject * +gentype_power(PyObject *m1, PyObject *m2, PyObject *modulo) +{ + if (modulo != Py_None) { + /* modular exponentiation is not implemented (gh-8804) */ + Py_INCREF(Py_NotImplemented); + return Py_NotImplemented; + } + + BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_power, gentype_power); + + PyObject *self = NULL; + PyObject *other = NULL; + PyObject *self_op, *other_op; + + if (!PyArray_IsScalar(m2, Generic)) { + self = m1; + other = m2; + } + else { + self = m2; + other = m1; + } + if (find_binary_operation_path(self, other, &self_op, &other_op) < 0) { + return NULL; + } + if (self_op != NULL) { + PyObject *res; + if (self == m1) { + res = PyNumber_Power(self_op, m2, Py_None); + } + else { + res = PyNumber_Power(m1, self_op, Py_None); + } + Py_DECREF(self_op); + return res; + } + else if (other_op != NULL) { + /* Call the corresponding ufunc (with the array) + * NOTE: As of NumPy 2.0 there are inconsistencies in array_power + * calling it would fail a (niche) test because an array is + * returned in one of the fast-paths. + * (once NumPy propagates 0-D arrays, this is irrelevant) + */ + PyObject *res; + if (self == m1) { + res = PyArray_GenericBinaryFunction(m1, other_op, n_ops.power); + } + else { + res = PyArray_GenericBinaryFunction(other_op, m2, n_ops.power); + } + Py_DECREF(other_op); + return res; + } + else { + assert(other_op == NULL); + Py_RETURN_NOTIMPLEMENTED; + } +} + + /**begin repeat * #TYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, * LONG, ULONG, LONGLONG, ULONGLONG# @@ -1265,8 +1478,6 @@ static PyNumberMethods gentype_as_number = { static PyObject * gentype_richcompare(PyObject *self, PyObject *other, int cmp_op) { - PyObject *arr, *ret; - /* * If the other object is None, False is always right. This avoids * the array None comparison, at least until deprecation it is fixed. @@ -1287,17 +1498,35 @@ gentype_richcompare(PyObject *self, PyObject *other, int cmp_op) RICHCMP_GIVE_UP_IF_NEEDED(self, other); - arr = PyArray_FromScalar(self, NULL); - if (arr == NULL) { + PyObject *self_op; + PyObject *other_op; + if (find_binary_operation_path(self, other, &self_op, &other_op) < 0) { return NULL; } - /* - * Call via PyObject_RichCompare to ensure that other.__eq__ - * has a chance to run when necessary - */ - ret = PyObject_RichCompare(arr, other, cmp_op); - Py_DECREF(arr); - return ret; + + /* We can always just call RichCompare again */ + if (other_op != NULL) { + /* If we use richcompare again, need to ensure that one op is array */ + self_op = PyArray_FromScalar(self, NULL); + if (self_op == NULL) { + Py_DECREF(other_op); + return NULL; + } + PyObject *res = PyObject_RichCompare(self_op, other_op, cmp_op); + Py_DECREF(self_op); + Py_DECREF(other_op); + return res; + } + else if (self_op != NULL) { + /* Try again, since other is an object scalar and this one mutated */ + PyObject *res = PyObject_RichCompare(self_op, other, cmp_op); + Py_DECREF(self_op); + return res; + } + else { + /* Comparison with arbitrary objects cannot be defined. */ + Py_RETURN_NOTIMPLEMENTED; + } } static PyObject * diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index b6bf6938d914..af21887a6099 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -798,8 +798,9 @@ typedef enum { */ CONVERT_PYSCALAR, /* - * Other object is an unknown scalar or array-like, we (typically) use + * Other object is an unknown scalar or array-like, we also use * the generic path, which normally ends up in the ufunc machinery. + * (So it ends up identical to PROMOTION_REQUIRED.) */ OTHER_IS_UNKNOWN_OBJECT, /* @@ -1262,12 +1263,9 @@ static PyObject * * also integers that are too large to convert to `long`), or * even a subclass of a NumPy scalar (currently). * - * Generally, we try dropping through to the array path here, - * but this can lead to infinite recursions for (c)longdouble. + * We drop through to the generic path here which checks for the + * (c)longdouble infinite recursion problem. */ -#if defined(IS_longdouble) || defined(IS_clongdouble) - Py_RETURN_NOTIMPLEMENTED; -#endif case PROMOTION_REQUIRED: /* * Python scalar that is larger than the current one, or two @@ -1544,9 +1542,6 @@ static PyObject * case CONVERSION_SUCCESS: break; /* successfully extracted value we can proceed */ case OTHER_IS_UNKNOWN_OBJECT: -#if defined(IS_longdouble) || defined(IS_clongdouble) - Py_RETURN_NOTIMPLEMENTED; -#endif case PROMOTION_REQUIRED: return PyGenericArrType_Type.tp_as_number->nb_power(a, b, modulo); case CONVERT_PYSCALAR: @@ -1955,9 +1950,6 @@ static PyObject* case CONVERSION_SUCCESS: break; /* successfully extracted value we can proceed */ case OTHER_IS_UNKNOWN_OBJECT: -#if defined(IS_longdouble) || defined(IS_clongdouble) - Py_RETURN_NOTIMPLEMENTED; -#endif case PROMOTION_REQUIRED: return PyGenericArrType_Type.tp_richcompare(self, other, cmp_op); case CONVERT_PYSCALAR: From 799f5fbbac09290956807112761bc6e7d69c5f99 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 10 Jul 2024 17:02:29 +0200 Subject: [PATCH 740/980] MAINT: For now, do not use exact checks (this is a follow up) --- numpy/_core/src/multiarray/scalartypes.c.src | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 4c1e27ed6d19..cbd9e345717c 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -190,9 +190,9 @@ find_binary_operation_path( *self_op = NULL; if (PyArray_IsScalar(other, Generic) || - PyLong_CheckExact(other) || - PyFloat_CheckExact(other) || - PyComplex_CheckExact(other) || + PyLong_Check(other) || + PyFloat_Check(other) || + PyComplex_Check(other) || PyBool_Check(other)) { /* * The other operand is ready for the operation already. Must pass on From 56299a3c75353fb01145e27013ad61794a12d827 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 10 Jul 2024 17:02:43 +0200 Subject: [PATCH 741/980] TST: Also test xor and rational (segfault currently not just longdouble) --- numpy/_core/tests/test_scalarmath.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index 3517023cb5f0..3912575bd1ac 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -12,6 +12,7 @@ import numpy as np from numpy.exceptions import ComplexWarning +from numpy._core._rational_tests import rational from numpy.testing import ( assert_, assert_equal, assert_raises, assert_almost_equal, assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data, @@ -31,7 +32,7 @@ reasonable_operators_for_scalars = [ operator.lt, operator.le, operator.eq, operator.ne, operator.ge, operator.gt, operator.add, operator.floordiv, operator.mod, - operator.mul, operator.pow, operator.sub, operator.truediv, + operator.mul, operator.pow, operator.sub, operator.truediv ] @@ -864,8 +865,8 @@ def recursionlimit(n): @given(sampled_from(objecty_things), - sampled_from(reasonable_operators_for_scalars), - sampled_from(types)) + sampled_from(reasonable_operators_for_scalars + [operator.xor]), + sampled_from(types + [rational])) def test_operator_object_left(o, op, type_): try: with recursionlimit(200): @@ -875,8 +876,8 @@ def test_operator_object_left(o, op, type_): @given(sampled_from(objecty_things), - sampled_from(reasonable_operators_for_scalars), - sampled_from(types)) + sampled_from(reasonable_operators_for_scalars + [operator.xor]), + sampled_from(types + [rational])) def test_operator_object_right(o, op, type_): try: with recursionlimit(200): From ecbb3a4cb6a31f2d84f036d10f7116b0747ff11d Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 10 Jul 2024 17:35:56 +0200 Subject: [PATCH 742/980] TST: Refine/add tests for paths that now work for longdouble --- numpy/_core/tests/test_scalarmath.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index 3912575bd1ac..b6fb8e961a16 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -909,6 +909,9 @@ def test_longdouble_operators_with_obj(sctype, op): # # That would recurse infinitely. Other scalars return the python object # on cast, so this type of things works OK. + # + # As of NumPy 2.1, this has been consolidated into the np.generic binops + # and now checks `.item()`. That also allows the below path to work now. try: op(sctype(3), None) except TypeError: @@ -919,6 +922,15 @@ def test_longdouble_operators_with_obj(sctype, op): pass +@pytest.mark.parametrize("op", [operator.add, operator.pow, operator.sub]) +@pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble]) +def test_longdouble_with_arrlike(sctype, op): + # As of NumPy 2.1, longdouble behaves like other types and can coerce + # e.g. lists. (Not necessarily better, but consistent.) + assert_array_equal(op(sctype(3), [1, 2]), op(3, np.array([1, 2]))) + assert_array_equal(op([1, 2], sctype(3)), op(np.array([1, 2]), 3)) + + @pytest.mark.parametrize("op", reasonable_operators_for_scalars) @pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble]) @np.errstate(all="ignore") From 3e6cc4aea9fba6efd155d8e5ea805826c5a98fed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Jul 2024 17:38:51 +0000 Subject: [PATCH 743/980] MAINT: Bump actions/setup-python from 5.1.0 to 5.1.1 Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5.1.0 to 5.1.1. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/82c7e631bb3cdc910f68e0081d67478d79c6982d...39cd14951b08e74b54015e9e001cdefcf80e669f) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- .github/workflows/free-threaded-wheels.yml | 2 +- .github/workflows/linux.yml | 16 ++++++++-------- .github/workflows/linux_blas.yml | 12 ++++++------ .github/workflows/linux_compiler_sanitizers.yml | 2 +- .github/workflows/linux_simd.yml | 10 +++++----- .github/workflows/macos.yml | 2 +- .github/workflows/mypy.yml | 2 +- .github/workflows/wheels.yml | 4 ++-- .github/workflows/windows.yml | 4 ++-- 10 files changed, 28 insertions(+), 28 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 4eb61331095b..276592e1840f 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -64,7 +64,7 @@ jobs: - name: Set up Python ${{ env.PYTHON_VERSION }} id: setup-python - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: ${{ env.PYTHON_VERSION }} diff --git a/.github/workflows/free-threaded-wheels.yml b/.github/workflows/free-threaded-wheels.yml index 5d00fed8b749..3c5cc9294cef 100644 --- a/.github/workflows/free-threaded-wheels.yml +++ b/.github/workflows/free-threaded-wheels.yml @@ -111,7 +111,7 @@ jobs: if: runner.os == 'windows' # Used to push the built wheels - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: "3.x" diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 2245615ca1a3..c536336c7115 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -37,7 +37,7 @@ jobs: with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.10' - name: Install linter requirements @@ -61,7 +61,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: ${{ matrix.version }} - uses: ./.github/meson_actions @@ -75,7 +75,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: 'pypy3.10-v7.3.15' - name: Setup using scipy-openblas @@ -122,7 +122,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.10' - name: Install build and test dependencies from PyPI @@ -157,7 +157,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.10' - name: Install build and benchmarking dependencies @@ -194,7 +194,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' - name: Install gfortran and setup OpenBLAS (sdist build) @@ -236,7 +236,7 @@ jobs: submodules: 'true' path: 'array-api-tests' - name: Set up Python - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' - name: Install build and test dependencies from PyPI @@ -264,7 +264,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' - name: Install build and test dependencies from PyPI diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 3b23072dccfa..b7ac412499b3 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -69,7 +69,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' @@ -196,7 +196,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' @@ -224,7 +224,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' @@ -284,7 +284,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' @@ -347,7 +347,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' @@ -383,7 +383,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' diff --git a/.github/workflows/linux_compiler_sanitizers.yml b/.github/workflows/linux_compiler_sanitizers.yml index d54dd1415950..efa8eb980730 100644 --- a/.github/workflows/linux_compiler_sanitizers.yml +++ b/.github/workflows/linux_compiler_sanitizers.yml @@ -30,7 +30,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: ${{ env.PYTHON_VERSION }} - name: Install dependencies diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index aa4fe75f14cf..a19ae38502ba 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -62,7 +62,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.10' - uses: ./.github/meson_actions @@ -79,7 +79,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.10' @@ -144,7 +144,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: "${{ matrix.BUILD_PROP[2] }}" - uses: ./.github/meson_actions @@ -158,7 +158,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' @@ -208,7 +208,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 9e622f2221d4..164a4c6710c2 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -119,7 +119,7 @@ jobs: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.10' diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index be9874a9f7eb..726e6b839051 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -54,7 +54,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: ${{ matrix.os_python[1] }} - name: Install dependencies diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index a2ab0f16c5c1..76168ef9e6c5 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -123,7 +123,7 @@ jobs: if: runner.os == 'windows' # Used to push the built wheels - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: "3.x" @@ -218,7 +218,7 @@ jobs: with: submodules: true # Used to push the built wheels - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: # Build sdist on lowest supported Python python-version: "3.10" diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index bf10ff006649..0ecf4be83628 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -31,7 +31,7 @@ jobs: fetch-tags: true - name: Setup Python - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' @@ -94,7 +94,7 @@ jobs: fetch-tags: true - name: Setup Python (32-bit) - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.10' architecture: 'x86' From 7cd9de700ed1590ba555bf39a17d17d49ba40398 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Mon, 8 Jul 2024 12:40:30 +0200 Subject: [PATCH 744/980] API: Add `device` and `to_device` to scalars --- numpy/__init__.pyi | 23 ++++-- numpy/_core/code_generators/genapi.py | 1 + numpy/_core/meson.build | 1 + .../_core/src/multiarray/array_api_standard.c | 78 +++++++++++++++++++ .../_core/src/multiarray/array_api_standard.h | 14 ++++ numpy/_core/src/multiarray/getset.c | 7 +- numpy/_core/src/multiarray/methods.c | 68 +--------------- numpy/_core/src/multiarray/scalartypes.c.src | 13 ++++ numpy/_core/tests/test_scalar_methods.py | 20 +++++ .../typing/tests/data/reveal/ndarray_misc.pyi | 12 +++ 10 files changed, 158 insertions(+), 79 deletions(-) create mode 100644 numpy/_core/src/multiarray/array_api_standard.c create mode 100644 numpy/_core/src/multiarray/array_api_standard.h diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 23cef0725a85..3028c1f4c0f5 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -966,6 +966,8 @@ class _ArrayOrScalarCommon: def itemsize(self) -> int: ... @property def nbytes(self) -> int: ... + @property + def device(self) -> L["cpu"]: ... def __bool__(self) -> builtins.bool: ... def __bytes__(self) -> bytes: ... def __str__(self) -> str: ... @@ -998,6 +1000,7 @@ class _ArrayOrScalarCommon: def __array_priority__(self) -> float: ... @property def __array_struct__(self) -> Any: ... # builtins.PyCapsule + def __array_namespace__(self, *, api_version: str | None = ...) -> Any: ... def __setstate__(self, state: tuple[ SupportsIndex, # version _ShapeLike, # Shape @@ -2578,12 +2581,18 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __dlpack_device__(self) -> tuple[int, L[0]]: ... - def __array_namespace__(self, *, api_version: str | None = ...) -> Any: ... - - def to_device(self, device: L["cpu"], /, *, stream: None | int | Any = ...) -> NDArray[Any]: ... - - @property - def device(self) -> L["cpu"]: ... + @overload + def to_device(self: NDArray[_UnknownType], device: L["cpu"], /, *, stream: None | int | Any = ...) -> NDArray[Any]: ... + @overload + def to_device(self: NDArray[np.bool], device: L["cpu"], /, *, stream: None | int | Any = ...) -> NDArray[np.bool]: ... + @overload + def to_device(self: NDArray[_NumberType], device: L["cpu"], /, *, stream: None | int | Any = ...) -> NDArray[_NumberType]: ... + @overload + def to_device(self: NDArray[timedelta64], device: L["cpu"], /, *, stream: None | int | Any = ...) -> NDArray[timedelta64]: ... + @overload + def to_device(self: NDArray[object_], device: L["cpu"], /, *, stream: None | int | Any = ...) -> NDArray[object_]: ... + @overload + def to_device(self: NDArray[Any], device: L["cpu"], /, *, stream: None | int | Any = ...) -> NDArray[Any]: ... def bitwise_count( self, @@ -2637,6 +2646,8 @@ class generic(_ArrayOrScalarCommon): if sys.version_info >= (3, 12): def __buffer__(self, flags: int, /) -> memoryview: ... + def to_device(self: _ScalarType, device: L["cpu"], /, *, stream: None | int | Any = ...) -> _ScalarType: ... + @overload def astype( self, diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index 27e42bcb4c14..bf9c1d74f01b 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -41,6 +41,7 @@ def get_processor(): API_FILES = [join('multiarray', 'alloc.c'), join('multiarray', 'abstractdtypes.c'), join('multiarray', 'arrayfunction_override.c'), + join('multiarray', 'array_api_standard.c'), join('multiarray', 'array_assign_array.c'), join('multiarray', 'array_assign_scalar.c'), join('multiarray', 'array_coercion.c'), diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 78b60364bea5..b734bd40a7aa 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1063,6 +1063,7 @@ src_multiarray = multiarray_gen_headers + [ 'src/multiarray/array_coercion.c', 'src/multiarray/array_converter.c', 'src/multiarray/array_method.c', + 'src/multiarray/array_api_standard.c', 'src/multiarray/array_assign_scalar.c', 'src/multiarray/array_assign_array.c', 'src/multiarray/arrayfunction_override.c', diff --git a/numpy/_core/src/multiarray/array_api_standard.c b/numpy/_core/src/multiarray/array_api_standard.c new file mode 100644 index 000000000000..b473dc0823d9 --- /dev/null +++ b/numpy/_core/src/multiarray/array_api_standard.c @@ -0,0 +1,78 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + +#include + + +NPY_NO_EXPORT PyObject * +array_device(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) +{ + return PyUnicode_FromString("cpu"); +} + +NPY_NO_EXPORT PyObject * +array_to_device(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"", "stream", NULL}; + char *device = ""; + PyObject *stream = Py_None; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "s|$O:to_device", kwlist, + &device, + &stream)) { + return NULL; + } + + if (stream != Py_None) { + PyErr_SetString(PyExc_ValueError, + "The stream argument in to_device() " + "is not supported"); + return NULL; + } + + if (strcmp(device, "cpu") != 0) { + PyErr_Format(PyExc_ValueError, + "Unsupported device: %s.", device); + return NULL; + } + + Py_INCREF(self); + return self; +} + +NPY_NO_EXPORT PyObject * +array_array_namespace(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"api_version", NULL}; + PyObject *array_api_version = Py_None; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "|$O:__array_namespace__", kwlist, + &array_api_version)) { + return NULL; + } + + if (array_api_version != Py_None) { + if (!PyUnicode_Check(array_api_version)) + { + PyErr_Format(PyExc_ValueError, + "Only None and strings are allowed as the Array API version, " + "but received: %S.", array_api_version); + return NULL; + } else if (PyUnicode_CompareWithASCIIString(array_api_version, "2021.12") != 0 && + PyUnicode_CompareWithASCIIString(array_api_version, "2022.12") != 0 && + PyUnicode_CompareWithASCIIString(array_api_version, "2023.12") != 0) + { + PyErr_Format(PyExc_ValueError, + "Version \"%U\" of the Array API Standard is not supported.", + array_api_version); + return NULL; + } + } + + PyObject *numpy_module = PyImport_ImportModule("numpy"); + if (numpy_module == NULL){ + return NULL; + } + + return numpy_module; +} diff --git a/numpy/_core/src/multiarray/array_api_standard.h b/numpy/_core/src/multiarray/array_api_standard.h new file mode 100644 index 000000000000..6776863701b8 --- /dev/null +++ b/numpy/_core/src/multiarray/array_api_standard.h @@ -0,0 +1,14 @@ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_ARRAY_API_STANDARD_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_ARRAY_API_STANDARD_H_ + + +NPY_NO_EXPORT PyObject * +array_device(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)); + +NPY_NO_EXPORT PyObject * +array_to_device(PyObject *self, PyObject *args, PyObject *kwds); + +NPY_NO_EXPORT PyObject * +array_array_namespace(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds); + +#endif /* NUMPY_CORE_SRC_MULTIARRAY_ARRAY_API_STANDARD_H_ */ diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index 092ac65bbbc3..6457149c62c9 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -26,6 +26,7 @@ #include "npy_buffer.h" #include "shape.h" #include "multiarraymodule.h" +#include "array_api_standard.h" /******************* array attribute get and set routines ******************/ @@ -884,12 +885,6 @@ array_itemset(PyArrayObject *self, PyObject *args) return NULL; } -static PyObject * -array_device(PyArrayObject *self, void *NPY_UNUSED(ignored)) -{ - return PyUnicode_FromString("cpu"); -} - NPY_NO_EXPORT PyGetSetDef array_getsetlist[] = { {"ndim", (getter)array_ndim_get, diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 669d5e575c7a..ede662d11a5f 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -34,6 +34,7 @@ #include "methods.h" #include "alloc.h" +#include "array_api_standard.h" #include @@ -2806,73 +2807,6 @@ array_class_getitem(PyObject *cls, PyObject *args) return Py_GenericAlias(cls, args); } -static PyObject * -array_array_namespace(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"api_version", NULL}; - PyObject *array_api_version = Py_None; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|$O:__array_namespace__", kwlist, - &array_api_version)) { - return NULL; - } - - if (array_api_version != Py_None) { - if (!PyUnicode_Check(array_api_version)) - { - PyErr_Format(PyExc_ValueError, - "Only None and strings are allowed as the Array API version, " - "but received: %S.", array_api_version); - return NULL; - } else if (PyUnicode_CompareWithASCIIString(array_api_version, "2021.12") != 0 && - PyUnicode_CompareWithASCIIString(array_api_version, "2022.12") != 0 && - PyUnicode_CompareWithASCIIString(array_api_version, "2023.12") != 0) - { - PyErr_Format(PyExc_ValueError, - "Version \"%U\" of the Array API Standard is not supported.", - array_api_version); - return NULL; - } - } - - PyObject *numpy_module = PyImport_ImportModule("numpy"); - if (numpy_module == NULL){ - return NULL; - } - - return numpy_module; -} - -static PyObject * -array_to_device(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"", "stream", NULL}; - char *device = ""; - PyObject *stream = Py_None; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "s|$O:to_device", kwlist, - &device, - &stream)) { - return NULL; - } - - if (stream != Py_None) { - PyErr_SetString(PyExc_ValueError, - "The stream argument in to_device() " - "is not supported"); - return NULL; - } - - if (strcmp(device, "cpu") != 0) { - PyErr_Format(PyExc_ValueError, - "Unsupported device: %s.", device); - return NULL; - } - - Py_INCREF(self); - return (PyObject *)self; -} - NPY_NO_EXPORT PyMethodDef array_methods[] = { /* for subtypes */ diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index a0517c247215..7d6adf243774 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -35,6 +35,7 @@ #include "npy_buffer.h" #include "npy_static_data.h" #include "multiarraymodule.h" +#include "array_api_standard.h" #include @@ -1678,6 +1679,9 @@ static PyGetSetDef gentype_getsets[] = { {"ptp", (getter)gentype_ptp, (setter)0, NULL, NULL}, + {"device", + (getter)array_device, + (setter)0, NULL, NULL}, {"__array_interface__", (getter)gentype_interface_get, NULL, @@ -2451,6 +2455,15 @@ static PyMethodDef gentype_methods[] = { {"setflags", (PyCFunction)gentype_setflags, METH_VARARGS | METH_KEYWORDS, NULL}, + + /* For Array API compatibility */ + {"__array_namespace__", + (PyCFunction)array_array_namespace, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"to_device", + (PyCFunction)array_to_device, + METH_VARARGS | METH_KEYWORDS, NULL}, + {NULL, NULL, 0, NULL} /* sentinel */ }; diff --git a/numpy/_core/tests/test_scalar_methods.py b/numpy/_core/tests/test_scalar_methods.py index b0bdd126e8a7..7b6e83554d81 100644 --- a/numpy/_core/tests/test_scalar_methods.py +++ b/numpy/_core/tests/test_scalar_methods.py @@ -203,3 +203,23 @@ def test_bit_count(self): assert np.uint64(a - 1).bit_count() == exp assert np.uint64(a ^ 63).bit_count() == 7 assert np.uint64((a - 1) ^ 510).bit_count() == exp - 8 + + +class TestDevice: + """ + Test scalar.device attribute and scalar.to_device() method. + """ + scalars = [np.bool(True), np.int64(1), np.uint64(1), np.float64(1.0), + np.complex128(1+1j)] + + @pytest.mark.parametrize("scalar", scalars) + def test_device(self, scalar): + assert scalar.device == "cpu" + + @pytest.mark.parametrize("scalar", scalars) + def test_to_device(self, scalar): + assert scalar.to_device("cpu") is scalar + + @pytest.mark.parametrize("scalar", scalars) + def test___array_namespace__(self, scalar): + assert scalar.__array_namespace__() is np diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 5f3526a72d45..b9a5fe433653 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -22,6 +22,7 @@ else: class SubClass(npt.NDArray[np.object_]): ... f8: np.float64 +i8: np.int64 B: SubClass AR_f8: npt.NDArray[np.float64] AR_i8: npt.NDArray[np.int64] @@ -213,3 +214,14 @@ with open("test_file", "wb") as f: assert_type(AR_f8.__array_finalize__(None), None) assert_type(AR_f8.__array_finalize__(B), None) assert_type(AR_f8.__array_finalize__(AR_f8), None) + +assert_type(f8.device, Literal["cpu"]) +assert_type(AR_f8.device, Literal["cpu"]) + +assert_type(f8.to_device("cpu"), np.float64) +assert_type(i8.to_device("cpu"), np.int64) +assert_type(AR_f8.to_device("cpu"), npt.NDArray[np.float64]) +assert_type(AR_i8.to_device("cpu"), npt.NDArray[np.int64]) + +assert_type(f8.__array_namespace__(), Any) +assert_type(AR_f8.__array_namespace__(), Any) From 9d7c8cb0f62e2bbfc871e7e109fa1b9ff2cb0aae Mon Sep 17 00:00:00 2001 From: Azuk 443 Date: Wed, 10 Jul 2024 16:17:46 +0800 Subject: [PATCH 745/980] BUG: `np.loadtxt` always return F_CONTIGUOUS ndarray if row size is too big --- numpy/_core/src/multiarray/textreading/rows.c | 6 ++++++ numpy/_core/tests/test_multiarray.py | 11 +++++++++++ 2 files changed, 17 insertions(+) diff --git a/numpy/_core/src/multiarray/textreading/rows.c b/numpy/_core/src/multiarray/textreading/rows.c index 19c07b18fb51..8fe13d0d3532 100644 --- a/numpy/_core/src/multiarray/textreading/rows.c +++ b/numpy/_core/src/multiarray/textreading/rows.c @@ -480,6 +480,12 @@ read_rows(stream *s, ((PyArrayObject_fields *)data_array)->dimensions[0] = row_count; } + /* + * If row_size is too big, F_CONTIGUOUS is always set + * as array was created for only one row of data. + * We just update the contiguous flags here. + */ + PyArray_UpdateFlags(data_array, NPY_ARRAY_F_CONTIGUOUS); return data_array; error: diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 33a89bcc9f1e..c757be9d80ef 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -266,6 +266,17 @@ def test_void_align(self): a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")])) assert_(a.flags.aligned) + @pytest.mark.parametrize("row_size", [5, 1 << 16]) + @pytest.mark.parametrize("row_count", [1, 5]) + @pytest.mark.parametrize("ndmin", [0, 1, 2]) + def test_xcontiguous_load_txt(self, row_size, row_count, ndmin): + s = io.StringIO('\n'.join(['1.0 ' * row_size] * row_count)) + a = np.loadtxt(s, ndmin=ndmin) + + assert a.flags.c_contiguous + x = [i for i in a.shape if i != 1] + assert a.flags.f_contiguous == (len(x) <= 1) + class TestHash: # see #3793 From d7ce405df4b3e6055c3e13b167a04514f56cfdc5 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 10 Jul 2024 13:14:53 +0200 Subject: [PATCH 746/980] Apply ruff/flake8-bugbear rule B004 B004 Using `hasattr(x, "__call__")` to test if x is callable is unreliable. Use `callable(x)` for consistent results. --- numpy/lib/_iotools.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py index 93dceaef9dd3..35cf656ffa9f 100644 --- a/numpy/lib/_iotools.py +++ b/numpy/lib/_iotools.py @@ -561,7 +561,7 @@ def upgrade_mapper(cls, func, default=None): >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate) """ # Func is a single functions - if hasattr(func, '__call__'): + if callable(func): cls._mapper.insert(-1, (cls._getsubdtype(default), func, default)) return elif hasattr(func, '__iter__'): @@ -608,7 +608,7 @@ def __init__(self, dtype_or_func=None, default=None, missing_values=None, dtype = np.dtype(dtype_or_func) except TypeError: # dtype_or_func must be a function, then - if not hasattr(dtype_or_func, '__call__'): + if not callable(dtype_or_func): errmsg = ("The input argument `dtype` is neither a" " function nor a dtype (got '%s' instead)") raise TypeError(errmsg % type(dtype_or_func)) From a36036444d3f1359572d00641ea2db525a0f31fe Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 10 Jul 2024 13:21:29 +0200 Subject: [PATCH 747/980] Apply ruff/flake8-bugbear rule B005 B005 Using `.strip()` with multi-character strings is misleading --- numpy/f2py/crackfortran.py | 2 +- numpy/lib/_datasource.py | 2 +- numpy/lib/tests/test_loadtxt.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 980dee356d96..68ef46c05fc0 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -2539,7 +2539,7 @@ def get_parameters(vars, global_params={}): outmess(f'get_parameters[TODO]: ' f'implement evaluation of complex expression {v}\n') - dimspec = ([s.lstrip('dimension').strip() + dimspec = ([s.removeprefix('dimension').strip() for s in vars[n]['attrspec'] if s.startswith('dimension')] or [None])[0] diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py index 2ee277f0d421..e3d85b854941 100644 --- a/numpy/lib/_datasource.py +++ b/numpy/lib/_datasource.py @@ -420,7 +420,7 @@ def _sanitize_relative_path(self, path): last = path # Note: os.path.join treats '/' as os.sep on Windows path = path.lstrip(os.sep).lstrip('/') - path = path.lstrip(os.pardir).lstrip('..') + path = path.lstrip(os.pardir).removeprefix('..') drive, path = os.path.splitdrive(path) # for Windows return path diff --git a/numpy/lib/tests/test_loadtxt.py b/numpy/lib/tests/test_loadtxt.py index 2678aa82e600..aba00c4256ad 100644 --- a/numpy/lib/tests/test_loadtxt.py +++ b/numpy/lib/tests/test_loadtxt.py @@ -1004,7 +1004,7 @@ def test_str_dtype_unit_discovery_with_converter(): expected = np.array( ["spam-a-lot"] * 60000 + ["tis_but_a_scratch"], dtype="U17" ) - conv = lambda s: s.strip("XXX") + conv = lambda s: s.removeprefix("XXX") # file-like path txt = StringIO("\n".join(data)) From 3c13c428d760145cace280e0d3e606dc11db4d95 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 11 Jul 2024 11:32:42 -0700 Subject: [PATCH 748/980] MAINT: declare that NumPy's C extensions support running without the GIL --- numpy/_core/src/_simd/_simd.c | 6 ++++++ numpy/_core/src/multiarray/_multiarray_tests.c.src | 6 ++++++ numpy/_core/src/multiarray/multiarraymodule.c | 5 +++++ numpy/_core/src/umath/_operand_flag_tests.c | 5 +++++ numpy/_core/src/umath/_rational_tests.c | 5 +++++ numpy/_core/src/umath/_struct_ufunc_tests.c | 6 ++++++ numpy/_core/src/umath/_umath_tests.c.src | 6 ++++++ numpy/f2py/rules.py | 5 +++++ numpy/fft/_pocketfft_umath.cpp | 5 +++++ numpy/linalg/lapack_litemodule.c | 5 +++++ numpy/linalg/umath_linalg.cpp | 5 +++++ 11 files changed, 59 insertions(+) diff --git a/numpy/_core/src/_simd/_simd.c b/numpy/_core/src/_simd/_simd.c index 87ecc3e9f479..c1881dd86f0a 100644 --- a/numpy/_core/src/_simd/_simd.c +++ b/numpy/_core/src/_simd/_simd.c @@ -92,6 +92,12 @@ PyMODINIT_FUNC PyInit__simd(void) NPY__CPU_DISPATCH_CALL(NPY_CPU_HAVE, ATTACH_MODULE, MAKE_MSVC_HAPPY) NPY__CPU_DISPATCH_BASELINE_CALL(ATTACH_BASELINE_MODULE, MAKE_MSVC_HAPPY) #endif + +#if Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; err: Py_DECREF(m); diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index fbd5fc445a2c..bafa8445a14b 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -2411,6 +2411,12 @@ PyMODINIT_FUNC PyInit__multiarray_tests(void) PyErr_SetString(PyExc_RuntimeError, "cannot load _multiarray_tests module."); } + +#if Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index e8bc75c1e359..a09bf9ee0434 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -5132,6 +5132,11 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { goto err; } +#if Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; err: diff --git a/numpy/_core/src/umath/_operand_flag_tests.c b/numpy/_core/src/umath/_operand_flag_tests.c index 11b74af72d28..9747b7946512 100644 --- a/numpy/_core/src/umath/_operand_flag_tests.c +++ b/numpy/_core/src/umath/_operand_flag_tests.c @@ -77,6 +77,11 @@ PyMODINIT_FUNC PyInit__operand_flag_tests(void) ((PyUFuncObject*)ufunc)->iter_flags = NPY_ITER_REDUCE_OK; PyModule_AddObject(m, "inplace_add", (PyObject*)ufunc); +#if Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; fail: diff --git a/numpy/_core/src/umath/_rational_tests.c b/numpy/_core/src/umath/_rational_tests.c index aa4250e4efc8..a95c89b373df 100644 --- a/numpy/_core/src/umath/_rational_tests.c +++ b/numpy/_core/src/umath/_rational_tests.c @@ -1355,6 +1355,11 @@ PyMODINIT_FUNC PyInit__rational_tests(void) { GCD_LCM_UFUNC(gcd,NPY_INT64,"greatest common denominator of two integers"); GCD_LCM_UFUNC(lcm,NPY_INT64,"least common multiple of two integers"); +#if Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; fail: diff --git a/numpy/_core/src/umath/_struct_ufunc_tests.c b/numpy/_core/src/umath/_struct_ufunc_tests.c index ee71c4698f79..90b7e147d50a 100644 --- a/numpy/_core/src/umath/_struct_ufunc_tests.c +++ b/numpy/_core/src/umath/_struct_ufunc_tests.c @@ -156,5 +156,11 @@ PyMODINIT_FUNC PyInit__struct_ufunc_tests(void) PyDict_SetItemString(d, "add_triplet", add_triplet); Py_DECREF(add_triplet); + +#if Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; } diff --git a/numpy/_core/src/umath/_umath_tests.c.src b/numpy/_core/src/umath/_umath_tests.c.src index a16a915c09d5..8b5c3b65a9a4 100644 --- a/numpy/_core/src/umath/_umath_tests.c.src +++ b/numpy/_core/src/umath/_umath_tests.c.src @@ -829,5 +829,11 @@ PyMODINIT_FUNC PyInit__umath_tests(void) { "cannot load _umath_tests module."); return NULL; } + +#if Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; } diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 009365e04761..ac21fd76cc34 100755 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -236,6 +236,11 @@ #initcommonhooks# #interface_usercode# +#if Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + #ifdef F2PY_REPORT_ATEXIT if (! PyErr_Occurred()) on_exit(f2py_report_on_exit,(void*)\"#modulename#\"); diff --git a/numpy/fft/_pocketfft_umath.cpp b/numpy/fft/_pocketfft_umath.cpp index 013db5f1d8d4..848888710d6c 100644 --- a/numpy/fft/_pocketfft_umath.cpp +++ b/numpy/fft/_pocketfft_umath.cpp @@ -419,5 +419,10 @@ PyMODINIT_FUNC PyInit__pocketfft_umath(void) return NULL; } +#if Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; } diff --git a/numpy/linalg/lapack_litemodule.c b/numpy/linalg/lapack_litemodule.c index 766dfa9527b1..85590ba687ca 100644 --- a/numpy/linalg/lapack_litemodule.c +++ b/numpy/linalg/lapack_litemodule.c @@ -409,5 +409,10 @@ PyMODINIT_FUNC PyInit_lapack_lite(void) PyDict_SetItemString(d, "_ilp64", Py_False); #endif +#if Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; } diff --git a/numpy/linalg/umath_linalg.cpp b/numpy/linalg/umath_linalg.cpp index 1cd5a005f89d..8c36ba43864d 100644 --- a/numpy/linalg/umath_linalg.cpp +++ b/numpy/linalg/umath_linalg.cpp @@ -4699,5 +4699,10 @@ PyMODINIT_FUNC PyInit__umath_linalg(void) PyDict_SetItemString(d, "_ilp64", Py_False); #endif +#if Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; } From 6160c58aeddc56ea9f26c1c8aed5123f3ac447e9 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 11 Jul 2024 22:11:32 +0200 Subject: [PATCH 749/980] API: Partially revert unique with return_inverse There was a good argument that it is not possible to reconstruct the original array with `axis=None` without first reshaping and changing the result shape helped with it. However, it was always possible to do it for other axis values by using `np.take` rather than `np.take_along_axis`. Changing it for all axis values is unnecessary to achieve reconstruction because `np.take(arr, inverse, axis=axis)` already performed the job except for `axis=None`. Thus, this keeps the change for axis=None, but reverts gh-25553 for numerical axis. --- doc/source/release/2.0.0-notes.rst | 6 ++++++ numpy/lib/_arraysetops_impl.py | 19 +++++++++++++------ numpy/lib/tests/test_arraysetops.py | 15 ++++++--------- 3 files changed, 25 insertions(+), 15 deletions(-) diff --git a/doc/source/release/2.0.0-notes.rst b/doc/source/release/2.0.0-notes.rst index 9ea3e55fd504..9d54513edb7c 100644 --- a/doc/source/release/2.0.0-notes.rst +++ b/doc/source/release/2.0.0-notes.rst @@ -1496,6 +1496,12 @@ the ``unique_inverse`` output is now shaped such that the input can be reconstru directly using ``np.take(unique, unique_inverse)`` when ``axis=None``, and ``np.take_along_axis(unique, unique_inverse, axis=axis)`` otherwise. +.. note:: + This change was reverted in 2.0.1 except for ``axis=None``. The correct + reconstruction is always ``np.take(unique, unique_inverse, axis=axis)``. + When 2.0.0 needs to be supported, add ``unique_inverse.reshape(-1)`` + to code. + (`gh-25553 `__, `gh-25570 `__) diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index 435904c95321..bd65e6be04c5 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -228,8 +228,13 @@ def unique(ar, return_index=False, return_inverse=False, .. versionchanged: 2.0 For multi-dimensional inputs, ``unique_inverse`` is reshaped such that the input can be reconstructed using - ``np.take(unique, unique_inverse)`` when ``axis = None``, and - ``np.take_along_axis(unique, unique_inverse, axis=axis)`` otherwise. + ``np.take(unique, unique_inverse, axis=axis)``. The result is + now not 1-dimensional when ``axis=None``. + + Note that in NumPy 2.0.0 a higher dimensional array was returned also + when ``axis`` was not ``None``. This was reverted, but + ``inverse.reshape(-1)`` can be used to ensure compatibility with both + versions. Examples -------- @@ -282,7 +287,7 @@ def unique(ar, return_index=False, return_inverse=False, ar = np.asanyarray(ar) if axis is None: ret = _unique1d(ar, return_index, return_inverse, return_counts, - equal_nan=equal_nan, inverse_shape=ar.shape) + equal_nan=equal_nan, inverse_shape=ar.shape, axis=None) return _unpack_tuple(ret) # axis was specified and not None @@ -328,13 +333,15 @@ def reshape_uniq(uniq): output = _unique1d(consolidated, return_index, return_inverse, return_counts, - equal_nan=equal_nan, inverse_shape=inverse_shape) + equal_nan=equal_nan, inverse_shape=inverse_shape, + axis=axis) output = (reshape_uniq(output[0]),) + output[1:] return _unpack_tuple(output) def _unique1d(ar, return_index=False, return_inverse=False, - return_counts=False, *, equal_nan=True, inverse_shape=None): + return_counts=False, *, equal_nan=True, inverse_shape=None, + axis=None): """ Find the unique elements of an array, ignoring shape. """ @@ -371,7 +378,7 @@ def _unique1d(ar, return_index=False, return_inverse=False, imask = np.cumsum(mask) - 1 inv_idx = np.empty(mask.shape, dtype=np.intp) inv_idx[perm] = imask - ret += (inv_idx.reshape(inverse_shape),) + ret += (inv_idx.reshape(inverse_shape) if axis is None else inv_idx,) if return_counts: idx = np.concatenate(np.nonzero(mask) + ([mask.size],)) ret += (np.diff(idx),) diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index 33b43b57a381..37f96b02a6f0 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -828,11 +828,8 @@ def test_unique_1d_with_axis(self, axis): def test_unique_inverse_with_axis(self, axis): x = np.array([[4, 4, 3], [2, 2, 1], [2, 2, 1], [4, 4, 3]]) uniq, inv = unique(x, return_inverse=True, axis=axis) - assert_equal(inv.ndim, x.ndim) - if axis is None: - assert_array_equal(x, np.take(uniq, inv)) - else: - assert_array_equal(x, np.take_along_axis(uniq, inv, axis=axis)) + assert_equal(inv.ndim, x.ndim if axis is None else 1) + assert_array_equal(x, np.take(uniq, inv, axis=axis)) def test_unique_axis_zeros(self): # issue 15559 @@ -844,7 +841,7 @@ def test_unique_axis_zeros(self): assert_equal(uniq.dtype, single_zero.dtype) assert_array_equal(uniq, np.empty(shape=(1, 0))) assert_array_equal(idx, np.array([0])) - assert_array_equal(inv, np.array([[0], [0]])) + assert_array_equal(inv, np.array([0, 0])) assert_array_equal(cnt, np.array([2])) # there's 0 elements of shape (2,) along axis 1 @@ -854,7 +851,7 @@ def test_unique_axis_zeros(self): assert_equal(uniq.dtype, single_zero.dtype) assert_array_equal(uniq, np.empty(shape=(2, 0))) assert_array_equal(idx, np.array([])) - assert_array_equal(inv, np.empty((1, 0))) + assert_array_equal(inv, np.array([])) assert_array_equal(cnt, np.array([])) # test a "complicated" shape @@ -923,7 +920,7 @@ def _run_axis_tests(self, dtype): msg = "Unique's return_index=True failed with axis=0" assert_array_equal(data[idx], uniq, msg) msg = "Unique's return_inverse=True failed with axis=0" - assert_array_equal(np.take_along_axis(uniq, inv, axis=0), data) + assert_array_equal(np.take(uniq, inv, axis=0), data) msg = "Unique's return_counts=True failed with axis=0" assert_array_equal(cnt, np.array([2, 2]), msg) @@ -932,7 +929,7 @@ def _run_axis_tests(self, dtype): msg = "Unique's return_index=True failed with axis=1" assert_array_equal(data[:, idx], uniq) msg = "Unique's return_inverse=True failed with axis=1" - assert_array_equal(np.take_along_axis(uniq, inv, axis=1), data) + assert_array_equal(np.take(uniq, inv, axis=1), data) msg = "Unique's return_counts=True failed with axis=1" assert_array_equal(cnt, np.array([2, 1, 1]), msg) From bdae7c2782b73f93e561925113b809693a73a7f5 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 11 Jul 2024 16:21:18 -0700 Subject: [PATCH 750/980] MAINT: filter the GIL-enabled RuntimeWarning in the f2py tests --- numpy/f2py/rules.py | 5 ----- numpy/f2py/tests/test_abstract_interface.py | 2 ++ numpy/f2py/tests/test_callback.py | 2 ++ 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index ac21fd76cc34..009365e04761 100755 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -236,11 +236,6 @@ #initcommonhooks# #interface_usercode# -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); -#endif - #ifdef F2PY_REPORT_ATEXIT if (! PyErr_Occurred()) on_exit(f2py_report_on_exit,(void*)\"#modulename#\"); diff --git a/numpy/f2py/tests/test_abstract_interface.py b/numpy/f2py/tests/test_abstract_interface.py index 2c6555aecea1..a35e041872cb 100644 --- a/numpy/f2py/tests/test_abstract_interface.py +++ b/numpy/f2py/tests/test_abstract_interface.py @@ -6,6 +6,8 @@ from numpy.testing import IS_WASM +@pytest.mark.filterwarnings("ignore:.*The global interpreter lock \(GIL\) " + "has been enabled.*:RuntimeWarning") @pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") @pytest.mark.slow class TestAbstractInterface(util.F2PyTest): diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py index 8bd6175a3eb9..00b0b33b024b 100644 --- a/numpy/f2py/tests/test_callback.py +++ b/numpy/f2py/tests/test_callback.py @@ -11,6 +11,8 @@ from . import util +@pytest.mark.filterwarnings("ignore:.*The global interpreter lock \(GIL\) " + "has been enabled.*:RuntimeWarning") class TestF77Callback(util.F2PyTest): sources = [util.getpath("tests", "src", "callback", "foo.f")] From afe45985f37ac34b5b9586e8c3462bc50b10b2ea Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 12 Jul 2024 09:00:02 +0200 Subject: [PATCH 751/980] BUG,MAINT: Fix utf-8 character stripping memory access This fixes the memory access bug, the old if was there for a reason (obviously...), but unfortunately only the sanitizer checks noticed that. But to make it clear, I had to just also rename/change things a bit making it unsigned and using a stop range rather than the actual range is just much clearer here where the range can go to length 0 IMO. I.e. the old code had to check for `j >= 0` not `j > 0` (j being the last character index), because `j` could go negative. Fixes the sanitizer tests. --- numpy/_core/src/umath/string_buffer.h | 59 +++++++++++++++------------ 1 file changed, 33 insertions(+), 26 deletions(-) diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index bc705f3e039b..7f2345d2838f 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -1149,49 +1149,54 @@ string_lrstrip_whitespace(Buffer buf, Buffer out, STRIPTYPE striptype) return 0; } - size_t i = 0; + size_t new_start = 0; size_t num_bytes = (buf.after - buf.buf); Buffer traverse_buf = Buffer(buf.buf, num_bytes); if (striptype != STRIPTYPE::RIGHTSTRIP) { - while (i < len) { + while (new_start < len) { if (!traverse_buf.first_character_isspace()) { break; } num_bytes -= traverse_buf.num_bytes_next_character(); - traverse_buf++; - i++; + new_start++; + traverse_buf++; // may go one beyond buffer } } - npy_intp j = len - 1; // Could also turn negative if we're stripping the whole string + size_t new_stop = len; // New stop is a range (beyond last char) if (enc == ENCODING::UTF8) { traverse_buf = Buffer(buf.after, 0) - 1; } else { - traverse_buf = buf + j; + traverse_buf = buf + (new_stop - 1); } if (striptype != STRIPTYPE::LEFTSTRIP) { - while (j >= static_cast(i)) { + while (new_stop > new_start) { if (*traverse_buf != 0 && !traverse_buf.first_character_isspace()) { break; } + num_bytes -= traverse_buf.num_bytes_next_character(); - traverse_buf--; - j--; + new_stop--; + + // Do not step to character -1: can't find it's start for utf-8. + if (new_stop > 0) { + traverse_buf--; + } } } - Buffer offset_buf = buf + i; + Buffer offset_buf = buf + new_start; if (enc == ENCODING::UTF8) { offset_buf.buffer_memcpy(out, num_bytes); return num_bytes; } - offset_buf.buffer_memcpy(out, j - i + 1); - out.buffer_fill_with_zeros_after_index(j - i + 1); - return j - i + 1; + offset_buf.buffer_memcpy(out, new_stop - new_start); + out.buffer_fill_with_zeros_after_index(new_stop - new_start); + return new_stop - new_start; } @@ -1218,13 +1223,13 @@ string_lrstrip_chars(Buffer buf1, Buffer buf2, Buffer out, STRIPT return len1; } - size_t i = 0; + size_t new_start = 0; size_t num_bytes = (buf1.after - buf1.buf); Buffer traverse_buf = Buffer(buf1.buf, num_bytes); if (striptype != STRIPTYPE::RIGHTSTRIP) { - while (i < len1) { + for (; new_start < len1; traverse_buf++) { Py_ssize_t res; switch (enc) { case ENCODING::ASCII: @@ -1245,21 +1250,20 @@ string_lrstrip_chars(Buffer buf1, Buffer buf2, Buffer out, STRIPT break; } num_bytes -= traverse_buf.num_bytes_next_character(); - traverse_buf++; - i++; + new_start++; } } - npy_intp j = len1 - 1; + size_t new_stop = len1; // New stop is a range (beyond last char) if (enc == ENCODING::UTF8) { traverse_buf = Buffer(buf1.after, 0) - 1; } else { - traverse_buf = buf1 + j; + traverse_buf = buf1 + (new_stop - 1); } if (striptype != STRIPTYPE::LEFTSTRIP) { - while (j >= static_cast(i)) { + while (new_stop > new_start) { Py_ssize_t res; switch (enc) { case ENCODING::ASCII: @@ -1280,19 +1284,22 @@ string_lrstrip_chars(Buffer buf1, Buffer buf2, Buffer out, STRIPT break; } num_bytes -= traverse_buf.num_bytes_next_character(); - j--; - traverse_buf--; + new_stop--; + // Do not step to character -1: can't find it's start for utf-8. + if (new_stop > 0) { + traverse_buf--; + } } } - Buffer offset_buf = buf1 + i; + Buffer offset_buf = buf1 + new_start; if (enc == ENCODING::UTF8) { offset_buf.buffer_memcpy(out, num_bytes); return num_bytes; } - offset_buf.buffer_memcpy(out, j - i + 1); - out.buffer_fill_with_zeros_after_index(j - i + 1); - return j - i + 1; + offset_buf.buffer_memcpy(out, new_stop - new_start); + out.buffer_fill_with_zeros_after_index(new_stop - new_start); + return new_stop - new_start; } template From f79560c265e72a2cf93a470fa65aa2a7a17ea4ab Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 12 Jul 2024 09:34:21 -0700 Subject: [PATCH 752/980] BUG: Fix invalid escape sequence error --- numpy/f2py/tests/test_abstract_interface.py | 4 ++-- numpy/f2py/tests/test_callback.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/f2py/tests/test_abstract_interface.py b/numpy/f2py/tests/test_abstract_interface.py index a35e041872cb..238e9197d122 100644 --- a/numpy/f2py/tests/test_abstract_interface.py +++ b/numpy/f2py/tests/test_abstract_interface.py @@ -6,8 +6,8 @@ from numpy.testing import IS_WASM -@pytest.mark.filterwarnings("ignore:.*The global interpreter lock \(GIL\) " - "has been enabled.*:RuntimeWarning") +@pytest.mark.filterwarnings(r"ignore:.*The global interpreter lock \(GIL\) " + r"has been enabled.*:RuntimeWarning") @pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") @pytest.mark.slow class TestAbstractInterface(util.F2PyTest): diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py index 00b0b33b024b..f31407d5a0f6 100644 --- a/numpy/f2py/tests/test_callback.py +++ b/numpy/f2py/tests/test_callback.py @@ -11,8 +11,8 @@ from . import util -@pytest.mark.filterwarnings("ignore:.*The global interpreter lock \(GIL\) " - "has been enabled.*:RuntimeWarning") +@pytest.mark.filterwarnings(r"ignore:.*The global interpreter lock \(GIL\) " + r"has been enabled.*:RuntimeWarning") class TestF77Callback(util.F2PyTest): sources = [util.getpath("tests", "src", "callback", "foo.f")] From b16882ae9dda0a54f6ae504ac7b807f43c744a45 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 12 Jul 2024 18:55:26 +0200 Subject: [PATCH 753/980] TST: Apply test suggestion by Nathan for rlstrip fixes --- numpy/_core/tests/test_strings.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index ec8f8f723e69..d74f2388e55b 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -467,10 +467,12 @@ def test_endswith(self, a, suffix, start, end, out, dt): ]) def test_lstrip(self, a, chars, out, dt): a = np.array(a, dtype=dt) + out = np.array(out, dtype=dt) if chars is not None: chars = np.array(chars, dtype=dt) - out = np.array(out, dtype=dt) - assert_array_equal(np.strings.lstrip(a, chars), out) + assert_array_equal(np.strings.lstrip(a, chars), out) + else: + assert_array_equal(np.strings.lstrip(a), out) @pytest.mark.parametrize("a,chars,out", [ ("", None, ""), @@ -486,6 +488,7 @@ def test_lstrip(self, a, chars, out, dt): ("xyzzyhelloxyzzy", "xyz", "xyzzyhello"), ("hello", "xyz", "hello"), ("xyxz", "xyxz", ""), + (" ", None, ""), ("xyxzx", "x", "xyxz"), (["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"], ["xyzzyhello", "hello"]), @@ -493,10 +496,12 @@ def test_lstrip(self, a, chars, out, dt): ]) def test_rstrip(self, a, chars, out, dt): a = np.array(a, dtype=dt) + out = np.array(out, dtype=dt) if chars is not None: chars = np.array(chars, dtype=dt) - out = np.array(out, dtype=dt) - assert_array_equal(np.strings.rstrip(a, chars), out) + assert_array_equal(np.strings.rstrip(a, chars), out) + else: + assert_array_equal(np.strings.rstrip(a), out) @pytest.mark.parametrize("a,chars,out", [ ("", None, ""), From befcd31c48eafb84eafe386543b4dfcacaf3a5ed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 12 Jul 2024 17:31:30 +0000 Subject: [PATCH 754/980] MAINT: Bump actions/dependency-review-action from 4.3.3 to 4.3.4 Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.3.3 to 4.3.4. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/72eb03d02c7872a771aacd928f3123ac62ad6d3a...5a2ce3f5b92ee19cbb1541a4984c76d921601d7c) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/dependency-review.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 95a99d6dcf9b..461ef2b4253b 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -17,4 +17,4 @@ jobs: - name: 'Checkout Repository' uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: 'Dependency Review' - uses: actions/dependency-review-action@72eb03d02c7872a771aacd928f3123ac62ad6d3a # v4.3.3 + uses: actions/dependency-review-action@5a2ce3f5b92ee19cbb1541a4984c76d921601d7c # v4.3.4 From 00073b16ac246198d2a6c934e2bb316132139f83 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 12 Jul 2024 17:31:40 +0000 Subject: [PATCH 755/980] MAINT: Bump github/codeql-action from 3.25.11 to 3.25.12 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.25.11 to 3.25.12. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/b611370bb5703a7efb587f9d136a52ea24c5c38c...4fa2a7953630fd2f3fb380f21be14ede0169dd4f) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 981c5b3f54c2..c49f5114cce4 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -45,7 +45,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11 + uses: github/codeql-action/init@4fa2a7953630fd2f3fb380f21be14ede0169dd4f # v3.25.12 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -55,7 +55,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11 + uses: github/codeql-action/autobuild@4fa2a7953630fd2f3fb380f21be14ede0169dd4f # v3.25.12 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -68,6 +68,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11 + uses: github/codeql-action/analyze@4fa2a7953630fd2f3fb380f21be14ede0169dd4f # v3.25.12 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 8d34e207bc55..349941a1e13c 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@b611370bb5703a7efb587f9d136a52ea24c5c38c # v2.1.27 + uses: github/codeql-action/upload-sarif@4fa2a7953630fd2f3fb380f21be14ede0169dd4f # v2.1.27 with: sarif_file: results.sarif From 7d77682b2f3f2d8eb9ff825d7a3f05fb6e7682a0 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 13 Jul 2024 04:44:58 +0200 Subject: [PATCH 756/980] TYP: Make the type parameter of `numpy.flatiter` covariant --- numpy/__init__.pyi | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 23cef0725a85..464b205c0298 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -641,6 +641,7 @@ test: PytestTester def show_config() -> None: ... _NdArraySubClass = TypeVar("_NdArraySubClass", bound=NDArray[Any]) +_NdArraySubClass_co = TypeVar("_NdArraySubClass_co", bound=NDArray[Any], covariant=True) _DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic) _ByteOrder: TypeAlias = L["S", "<", ">", "=", "|", "L", "B", "N", "I", "little", "big", "native"] @@ -906,15 +907,15 @@ _ArrayLikeInt: TypeAlias = ( _FlatIterSelf = TypeVar("_FlatIterSelf", bound=flatiter[Any]) @final -class flatiter(Generic[_NdArraySubClass]): +class flatiter(Generic[_NdArraySubClass_co]): __hash__: ClassVar[None] @property - def base(self) -> _NdArraySubClass: ... + def base(self) -> _NdArraySubClass_co: ... @property def coords(self) -> _Shape: ... @property def index(self) -> int: ... - def copy(self) -> _NdArraySubClass: ... + def copy(self) -> _NdArraySubClass_co: ... def __iter__(self: _FlatIterSelf) -> _FlatIterSelf: ... def __next__(self: flatiter[NDArray[_ScalarType]]) -> _ScalarType: ... def __len__(self) -> int: ... @@ -927,7 +928,7 @@ class flatiter(Generic[_NdArraySubClass]): def __getitem__( self, key: _ArrayLikeInt | slice | ellipsis | tuple[_ArrayLikeInt | slice | ellipsis], - ) -> _NdArraySubClass: ... + ) -> _NdArraySubClass_co: ... # TODO: `__setitem__` operates via `unsafe` casting rules, and can # thus accept any type accepted by the relevant underlying `np.generic` # constructor. From 6abbf5a3cf26c8161e2034a3cd37db7c824edc25 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 13 Jul 2024 05:11:36 +0200 Subject: [PATCH 757/980] TYP: positional-only binop parameters in `numpy.ndarray` --- numpy/__init__.pyi | 668 ++++++++++++++++++++++----------------------- 1 file changed, 334 insertions(+), 334 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 23cef0725a85..cfdce573f9ab 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1809,48 +1809,48 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): # strings, it will pass through the final overload otherwise @overload - def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[np.bool]: ... + def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... @overload - def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[np.bool]: ... + def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... @overload - def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[np.bool]: ... + def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... @overload - def __lt__(self: NDArray[object_], other: Any) -> NDArray[np.bool]: ... + def __lt__(self: NDArray[object_], other: Any, /) -> NDArray[np.bool]: ... @overload - def __lt__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[np.bool]: ... + def __lt__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... @overload - def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[np.bool]: ... + def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... @overload - def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[np.bool]: ... + def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... @overload - def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[np.bool]: ... + def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... @overload - def __le__(self: NDArray[object_], other: Any) -> NDArray[np.bool]: ... + def __le__(self: NDArray[object_], other: Any, /) -> NDArray[np.bool]: ... @overload - def __le__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[np.bool]: ... + def __le__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... @overload - def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[np.bool]: ... + def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... @overload - def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[np.bool]: ... + def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... @overload - def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... @overload - def __gt__(self: NDArray[object_], other: Any) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[object_], other: Any, /) -> NDArray[np.bool]: ... @overload - def __gt__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... @overload - def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[np.bool]: ... + def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... @overload - def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[np.bool]: ... + def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... @overload - def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... @overload - def __ge__(self: NDArray[object_], other: Any) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[object_], other: Any, /) -> NDArray[np.bool]: ... @overload - def __ge__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... # Unary ops @overload @@ -1891,500 +1891,500 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): # Binary ops @overload - def __matmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __matmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... @overload - def __matmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __matmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __matmul__(self: NDArray[object_], other: Any) -> Any: ... + def __matmul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rmatmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rmatmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... @overload - def __rmatmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __rmatmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __rmatmul__(self: NDArray[object_], other: Any) -> Any: ... + def __rmatmul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __mod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __mod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __mod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[timedelta64]: ... + def __mod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[timedelta64]: ... @overload - def __mod__(self: NDArray[object_], other: Any) -> Any: ... + def __mod__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rmod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rmod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[timedelta64]: ... + def __rmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[timedelta64]: ... @overload - def __rmod__(self: NDArray[object_], other: Any) -> Any: ... + def __rmod__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __divmod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> _2Tuple[NDArray[Any]]: ... + def __divmod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> _2Tuple[NDArray[Any]]: ... @overload def __divmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] @overload - def __divmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] @overload - def __divmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] @overload - def __divmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] @overload - def __divmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + def __divmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... @overload - def __rdivmod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> _2Tuple[NDArray[Any]]: ... + def __rdivmod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> _2Tuple[NDArray[Any]]: ... @overload - def __rdivmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] + def __rdivmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + def __rdivmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... @overload - def __add__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __add__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __add__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __add__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... + def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... @overload - def __add__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + def __add__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... @overload - def __add__(self: NDArray[object_], other: Any) -> Any: ... + def __add__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __add__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __add__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __radd__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __radd__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __radd__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __radd__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... @overload - def __radd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + def __radd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... @overload - def __radd__(self: NDArray[object_], other: Any) -> Any: ... + def __radd__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __sub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __sub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NoReturn: ... + def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __sub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __sub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... # type: ignore[misc] @overload - def __sub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + def __sub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... @overload - def __sub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[timedelta64]: ... + def __sub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[timedelta64]: ... @overload - def __sub__(self: NDArray[object_], other: Any) -> Any: ... + def __sub__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rsub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rsub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NoReturn: ... + def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __rsub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __rsub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... # type: ignore[misc] @overload - def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... # type: ignore[misc] + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... # type: ignore[misc] @overload - def __rsub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[timedelta64]: ... + def __rsub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[timedelta64]: ... @overload - def __rsub__(self: NDArray[object_], other: Any) -> Any: ... + def __rsub__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __mul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __mul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __mul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __mul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __mul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + def __mul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... @overload - def __mul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload - def __mul__(self: NDArray[object_], other: Any) -> Any: ... + def __mul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __rmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __rmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __rmul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + def __rmul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... @overload - def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload - def __rmul__(self: NDArray[object_], other: Any) -> Any: ... + def __rmul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __floordiv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __floordiv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __floordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[int64]: ... + def __floordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[int64]: ... @overload - def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... @overload - def __floordiv__(self: NDArray[object_], other: Any) -> Any: ... + def __floordiv__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rfloordiv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rfloordiv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rfloordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[int64]: ... + def __rfloordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[int64]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeTD64_co) -> NoReturn: ... + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeTD64_co, /) -> NoReturn: ... @overload - def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload - def __rfloordiv__(self: NDArray[object_], other: Any) -> Any: ... + def __rfloordiv__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __pow__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __pow__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... @overload - def __pow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __pow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __pow__(self: NDArray[object_], other: Any) -> Any: ... + def __pow__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rpow__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rpow__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... @overload - def __rpow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __rpow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __rpow__(self: NDArray[object_], other: Any) -> Any: ... + def __rpow__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __truediv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __truediv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __truediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> NDArray[float64]: ... # type: ignore[misc] + def __truediv__(self: _ArrayInt_co, other: _ArrayInt_co, /) -> NDArray[float64]: ... # type: ignore[misc] @overload - def __truediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __truediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __truediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __truediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __truediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __truediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __truediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[float64]: ... + def __truediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[float64]: ... @overload - def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... @overload - def __truediv__(self: NDArray[object_], other: Any) -> Any: ... + def __truediv__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rtruediv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rtruediv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rtruediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> NDArray[float64]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayInt_co, other: _ArrayInt_co, /) -> NDArray[float64]: ... # type: ignore[misc] @overload - def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rtruediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __rtruediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __rtruediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __rtruediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[float64]: ... + def __rtruediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[float64]: ... @overload - def __rtruediv__(self: NDArray[np.bool], other: _ArrayLikeTD64_co) -> NoReturn: ... + def __rtruediv__(self: NDArray[np.bool], other: _ArrayLikeTD64_co, /) -> NoReturn: ... @overload - def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload - def __rtruediv__(self: NDArray[object_], other: Any) -> Any: ... + def __rtruediv__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __lshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __lshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... @overload - def __lshift__(self: NDArray[object_], other: Any) -> Any: ... + def __lshift__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rlshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rlshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... @overload - def __rlshift__(self: NDArray[object_], other: Any) -> Any: ... + def __rlshift__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... @overload - def __rshift__(self: NDArray[object_], other: Any) -> Any: ... + def __rshift__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rrshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rrshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... @overload - def __rrshift__(self: NDArray[object_], other: Any) -> Any: ... + def __rrshift__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __and__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __and__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... @overload - def __and__(self: NDArray[object_], other: Any) -> Any: ... + def __and__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __and__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __and__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rand__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rand__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... @overload - def __rand__(self: NDArray[object_], other: Any) -> Any: ... + def __rand__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __xor__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __xor__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... @overload - def __xor__(self: NDArray[object_], other: Any) -> Any: ... + def __xor__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rxor__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rxor__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... @overload - def __rxor__(self: NDArray[object_], other: Any) -> Any: ... + def __rxor__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __or__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __or__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... @overload - def __or__(self: NDArray[object_], other: Any) -> Any: ... + def __or__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __or__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __or__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __ror__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __ror__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... @overload - def __ror__(self: NDArray[object_], other: Any) -> Any: ... + def __ror__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __ror__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __ror__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # `np.generic` does not support inplace operations @@ -2393,179 +2393,179 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): # also accepts a signed integer for the right operand as long it is a 0D # object and its value is >= 0 @overload - def __iadd__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __iadd__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __iadd__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __iadd__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __iadd__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __iadd__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __iadd__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __iadd__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... @overload - def __iadd__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __iadd__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... @overload - def __iadd__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + def __iadd__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload - def __iadd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + def __iadd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... @overload - def __iadd__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __iadd__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __isub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __isub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __isub__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __isub__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __isub__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __isub__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __isub__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __isub__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... @overload - def __isub__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __isub__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... @overload - def __isub__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + def __isub__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload - def __isub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + def __isub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... @overload - def __isub__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __isub__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __imul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __imul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __imul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __imul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __imul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __imul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __imul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __imul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... @overload - def __imul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __imul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... @overload - def __imul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + def __imul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... @overload - def __imul__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __imul__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __itruediv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __itruediv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __itruediv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __itruediv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... @overload - def __itruediv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __itruediv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... @overload - def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co) -> NDArray[timedelta64]: ... + def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co, /) -> NDArray[timedelta64]: ... @overload - def __itruediv__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __itruediv__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __ifloordiv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __ifloordiv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __ifloordiv__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __ifloordiv__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __ifloordiv__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __ifloordiv__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __ifloordiv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __ifloordiv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... @overload - def __ifloordiv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __ifloordiv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... @overload - def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co) -> NDArray[timedelta64]: ... + def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co, /) -> NDArray[timedelta64]: ... @overload - def __ifloordiv__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __ifloordiv__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __ipow__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __ipow__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __ipow__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __ipow__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __ipow__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __ipow__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __ipow__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __ipow__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... @overload - def __ipow__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __ipow__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... @overload - def __ipow__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __ipow__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __imod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __imod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __imod__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __imod__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __imod__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __imod__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __imod__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __imod__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... @overload - def __imod__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[timedelta64]: ... + def __imod__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[timedelta64]: ... @overload - def __imod__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __imod__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __ilshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __ilshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __ilshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __ilshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __ilshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __ilshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __ilshift__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __ilshift__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __irshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __irshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __irshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __irshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __irshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __irshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __irshift__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __irshift__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __iand__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __iand__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __iand__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + def __iand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __iand__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __iand__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __iand__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __iand__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __iand__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __iand__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __ixor__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __ixor__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __ixor__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + def __ixor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __ixor__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __ixor__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __ixor__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __ixor__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __ixor__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __ixor__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __ior__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __ior__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __ior__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + def __ior__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __ior__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __ior__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __ior__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __ior__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __ior__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __ior__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __imatmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __imatmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __imatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + def __imatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __imatmul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __imatmul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __imatmul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __imatmul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __imatmul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __imatmul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... @overload - def __imatmul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __imatmul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... @overload - def __imatmul__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __imatmul__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... def __dlpack__( self: NDArray[number[Any]], From a6f4968008374e590229d49451db304b44b15331 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 13 Jul 2024 05:12:58 +0200 Subject: [PATCH 758/980] TYP: positional-only binop parameters in `numpy.dtype` --- numpy/__init__.pyi | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index cfdce573f9ab..f2bec42215af 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -818,36 +818,36 @@ class dtype(Generic[_DTypeScalar_co]): def __class_getitem__(self, item: Any) -> GenericAlias: ... @overload - def __getitem__(self: dtype[void], key: list[builtins.str]) -> dtype[void]: ... + def __getitem__(self: dtype[void], key: list[builtins.str], /) -> dtype[void]: ... @overload - def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex) -> dtype[Any]: ... + def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex, /) -> dtype[Any]: ... # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes @overload - def __mul__(self: _DType, value: L[1]) -> _DType: ... + def __mul__(self: _DType, value: L[1], /) -> _DType: ... @overload - def __mul__(self: _FlexDType, value: SupportsIndex) -> _FlexDType: ... + def __mul__(self: _FlexDType, value: SupportsIndex, /) -> _FlexDType: ... @overload - def __mul__(self, value: SupportsIndex) -> dtype[void]: ... + def __mul__(self, value: SupportsIndex, /) -> dtype[void]: ... # NOTE: `__rmul__` seems to be broken when used in combination with # literals as of mypy 0.902. Set the return-type to `dtype[Any]` for # now for non-flexible dtypes. @overload - def __rmul__(self: _FlexDType, value: SupportsIndex) -> _FlexDType: ... + def __rmul__(self: _FlexDType, value: SupportsIndex, /) -> _FlexDType: ... @overload - def __rmul__(self, value: SupportsIndex) -> dtype[Any]: ... + def __rmul__(self, value: SupportsIndex, /) -> dtype[Any]: ... - def __gt__(self, other: DTypeLike) -> builtins.bool: ... - def __ge__(self, other: DTypeLike) -> builtins.bool: ... - def __lt__(self, other: DTypeLike) -> builtins.bool: ... - def __le__(self, other: DTypeLike) -> builtins.bool: ... + def __gt__(self, other: DTypeLike, /) -> builtins.bool: ... + def __ge__(self, other: DTypeLike, /) -> builtins.bool: ... + def __lt__(self, other: DTypeLike, /) -> builtins.bool: ... + def __le__(self, other: DTypeLike, /) -> builtins.bool: ... # Explicitly defined `__eq__` and `__ne__` to get around mypy's # `strict_equality` option; even though their signatures are # identical to their `object`-based counterpart - def __eq__(self, other: Any) -> builtins.bool: ... - def __ne__(self, other: Any) -> builtins.bool: ... + def __eq__(self, other: Any, /) -> builtins.bool: ... + def __ne__(self, other: Any, /) -> builtins.bool: ... @property def alignment(self) -> int: ... From 53cd7a0d5ace836ab0bd1f83463cc65f0de0b96a Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 13 Jul 2024 05:14:08 +0200 Subject: [PATCH 759/980] TYP: positional-only binop parameters in `numpy.flatiter` --- numpy/__init__.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index f2bec42215af..4a74df7c5be2 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -975,8 +975,8 @@ class _ArrayOrScalarCommon: # TODO: How to deal with the non-commutative nature of `==` and `!=`? # xref numpy/numpy#17368 - def __eq__(self, other: Any) -> Any: ... - def __ne__(self, other: Any) -> Any: ... + def __eq__(self, other: Any, /) -> Any: ... + def __ne__(self, other: Any, /) -> Any: ... def copy(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ... def dump(self, file: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsWrite[bytes]) -> None: ... def dumps(self) -> bytes: ... From 3f71d090ae6401303b44afbcd37e5a8ad91e2f95 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 13 Jul 2024 05:18:29 +0200 Subject: [PATCH 760/980] TYP: positional-only binop parameters in `numpy.generic` subtypes --- numpy/__init__.pyi | 71 +++++++++++++++++++++++----------------------- 1 file changed, 36 insertions(+), 35 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 4a74df7c5be2..6e70e6d86159 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2882,13 +2882,13 @@ class datetime64(generic): format: _CharLike_co | tuple[_CharLike_co, _IntLike_co], /, ) -> None: ... - def __add__(self, other: _TD64Like_co) -> datetime64: ... - def __radd__(self, other: _TD64Like_co) -> datetime64: ... + def __add__(self, other: _TD64Like_co, /) -> datetime64: ... + def __radd__(self, other: _TD64Like_co, /) -> datetime64: ... @overload - def __sub__(self, other: datetime64) -> timedelta64: ... + def __sub__(self, other: datetime64, /) -> timedelta64: ... @overload - def __sub__(self, other: _TD64Like_co) -> datetime64: ... - def __rsub__(self, other: datetime64) -> timedelta64: ... + def __sub__(self, other: _TD64Like_co, /) -> datetime64: ... + def __rsub__(self, other: datetime64, /) -> timedelta64: ... __lt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] __le__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] __gt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] @@ -2911,9 +2911,9 @@ class integer(number[_NBit1]): # type: ignore @property def denominator(self) -> L[1]: ... @overload - def __round__(self, ndigits: None = ...) -> int: ... + def __round__(self, ndigits: None = ..., /) -> int: ... @overload - def __round__(self: _ScalarType, ndigits: SupportsIndex) -> _ScalarType: ... + def __round__(self: _ScalarType, ndigits: SupportsIndex, /) -> _ScalarType: ... # NOTE: `__index__` is technically defined in the bottom-most # sub-classes (`int64`, `uint32`, etc) @@ -2926,20 +2926,20 @@ class integer(number[_NBit1]): # type: ignore def __index__(self) -> int: ... __truediv__: _IntTrueDiv[_NBit1] __rtruediv__: _IntTrueDiv[_NBit1] - def __mod__(self, value: _IntLike_co) -> integer[Any]: ... - def __rmod__(self, value: _IntLike_co) -> integer[Any]: ... + def __mod__(self, value: _IntLike_co, /) -> integer[Any]: ... + def __rmod__(self, value: _IntLike_co, /) -> integer[Any]: ... def __invert__(self: _IntType) -> _IntType: ... # Ensure that objects annotated as `integer` support bit-wise operations - def __lshift__(self, other: _IntLike_co) -> integer[Any]: ... - def __rlshift__(self, other: _IntLike_co) -> integer[Any]: ... - def __rshift__(self, other: _IntLike_co) -> integer[Any]: ... - def __rrshift__(self, other: _IntLike_co) -> integer[Any]: ... - def __and__(self, other: _IntLike_co) -> integer[Any]: ... - def __rand__(self, other: _IntLike_co) -> integer[Any]: ... - def __or__(self, other: _IntLike_co) -> integer[Any]: ... - def __ror__(self, other: _IntLike_co) -> integer[Any]: ... - def __xor__(self, other: _IntLike_co) -> integer[Any]: ... - def __rxor__(self, other: _IntLike_co) -> integer[Any]: ... + def __lshift__(self, other: _IntLike_co, /) -> integer[Any]: ... + def __rlshift__(self, other: _IntLike_co, /) -> integer[Any]: ... + def __rshift__(self, other: _IntLike_co, /) -> integer[Any]: ... + def __rrshift__(self, other: _IntLike_co, /) -> integer[Any]: ... + def __and__(self, other: _IntLike_co, /) -> integer[Any]: ... + def __rand__(self, other: _IntLike_co, /) -> integer[Any]: ... + def __or__(self, other: _IntLike_co, /) -> integer[Any]: ... + def __ror__(self, other: _IntLike_co, /) -> integer[Any]: ... + def __xor__(self, other: _IntLike_co, /) -> integer[Any]: ... + def __rxor__(self, other: _IntLike_co, /) -> integer[Any]: ... class signedinteger(integer[_NBit1]): def __init__(self, value: _IntValue = ..., /) -> None: ... @@ -3003,20 +3003,20 @@ class timedelta64(generic): def __neg__(self: _ArraySelf) -> _ArraySelf: ... def __pos__(self: _ArraySelf) -> _ArraySelf: ... def __abs__(self: _ArraySelf) -> _ArraySelf: ... - def __add__(self, other: _TD64Like_co) -> timedelta64: ... - def __radd__(self, other: _TD64Like_co) -> timedelta64: ... - def __sub__(self, other: _TD64Like_co) -> timedelta64: ... - def __rsub__(self, other: _TD64Like_co) -> timedelta64: ... - def __mul__(self, other: _FloatLike_co) -> timedelta64: ... - def __rmul__(self, other: _FloatLike_co) -> timedelta64: ... + def __add__(self, other: _TD64Like_co, /) -> timedelta64: ... + def __radd__(self, other: _TD64Like_co, /) -> timedelta64: ... + def __sub__(self, other: _TD64Like_co, /) -> timedelta64: ... + def __rsub__(self, other: _TD64Like_co, /) -> timedelta64: ... + def __mul__(self, other: _FloatLike_co, /) -> timedelta64: ... + def __rmul__(self, other: _FloatLike_co, /) -> timedelta64: ... __truediv__: _TD64Div[float64] __floordiv__: _TD64Div[int64] - def __rtruediv__(self, other: timedelta64) -> float64: ... - def __rfloordiv__(self, other: timedelta64) -> int64: ... - def __mod__(self, other: timedelta64) -> timedelta64: ... - def __rmod__(self, other: timedelta64) -> timedelta64: ... - def __divmod__(self, other: timedelta64) -> tuple[int64, timedelta64]: ... - def __rdivmod__(self, other: timedelta64) -> tuple[int64, timedelta64]: ... + def __rtruediv__(self, other: timedelta64, /) -> float64: ... + def __rfloordiv__(self, other: timedelta64, /) -> int64: ... + def __mod__(self, other: timedelta64, /) -> timedelta64: ... + def __rmod__(self, other: timedelta64, /) -> timedelta64: ... + def __divmod__(self, other: timedelta64, /) -> tuple[int64, timedelta64]: ... + def __rdivmod__(self, other: timedelta64, /) -> tuple[int64, timedelta64]: ... __lt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] __le__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] __gt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] @@ -3087,9 +3087,9 @@ class floating(inexact[_NBit1]): def __getnewargs__(self: float64) -> tuple[float]: ... def __getformat__(self: float64, typestr: L["double", "float"], /) -> str: ... @overload - def __round__(self, ndigits: None = ...) -> int: ... + def __round__(self, ndigits: None = ..., /) -> int: ... @overload - def __round__(self: _ScalarType, ndigits: SupportsIndex) -> _ScalarType: ... + def __round__(self: _ScalarType, ndigits: SupportsIndex, /) -> _ScalarType: ... __add__: _FloatOp[_NBit1] __radd__: _FloatOp[_NBit1] __sub__: _FloatOp[_NBit1] @@ -3170,13 +3170,14 @@ class void(flexible): self, val: ArrayLike, dtype: DTypeLike, offset: int = ... ) -> None: ... @overload - def __getitem__(self, key: str | SupportsIndex) -> Any: ... + def __getitem__(self, key: str | SupportsIndex, /) -> Any: ... @overload - def __getitem__(self, key: list[str]) -> void: ... + def __getitem__(self, key: list[str], /) -> void: ... def __setitem__( self, key: str | list[str] | SupportsIndex, value: ArrayLike, + /, ) -> None: ... class character(flexible): # type: ignore From ecf2e078c12512289862b2febe6d37e75882c199 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 13 Jul 2024 05:21:00 +0200 Subject: [PATCH 761/980] TYP: positional-only binop parameters in `numpy.poly1d` --- numpy/__init__.pyi | 74 +++++++++++++++++++++++++--------------------- 1 file changed, 41 insertions(+), 33 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 6e70e6d86159..83c3e042f790 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3763,19 +3763,19 @@ class poly1d: def __len__(self) -> int: ... def __neg__(self) -> poly1d: ... def __pos__(self) -> poly1d: ... - def __mul__(self, other: ArrayLike) -> poly1d: ... - def __rmul__(self, other: ArrayLike) -> poly1d: ... - def __add__(self, other: ArrayLike) -> poly1d: ... - def __radd__(self, other: ArrayLike) -> poly1d: ... - def __pow__(self, val: _FloatLike_co) -> poly1d: ... # Integral floats are accepted - def __sub__(self, other: ArrayLike) -> poly1d: ... - def __rsub__(self, other: ArrayLike) -> poly1d: ... - def __div__(self, other: ArrayLike) -> poly1d: ... - def __truediv__(self, other: ArrayLike) -> poly1d: ... - def __rdiv__(self, other: ArrayLike) -> poly1d: ... - def __rtruediv__(self, other: ArrayLike) -> poly1d: ... - def __getitem__(self, val: int) -> Any: ... - def __setitem__(self, key: int, val: Any) -> None: ... + def __mul__(self, other: ArrayLike, /) -> poly1d: ... + def __rmul__(self, other: ArrayLike, /) -> poly1d: ... + def __add__(self, other: ArrayLike, /) -> poly1d: ... + def __radd__(self, other: ArrayLike, /) -> poly1d: ... + def __pow__(self, val: _FloatLike_co, /) -> poly1d: ... # Integral floats are accepted + def __sub__(self, other: ArrayLike, /) -> poly1d: ... + def __rsub__(self, other: ArrayLike, /) -> poly1d: ... + def __div__(self, other: ArrayLike, /) -> poly1d: ... + def __truediv__(self, other: ArrayLike, /) -> poly1d: ... + def __rdiv__(self, other: ArrayLike, /) -> poly1d: ... + def __rtruediv__(self, other: ArrayLike, /) -> poly1d: ... + def __getitem__(self, val: int, /) -> Any: ... + def __setitem__(self, key: int, val: Any, /) -> None: ... def __iter__(self) -> Iterator[Any]: ... def deriv(self, m: SupportsInt | SupportsIndex = ...) -> poly1d: ... def integ( @@ -3795,30 +3795,38 @@ class matrix(ndarray[_ShapeType, _DType_co]): def __array_finalize__(self, obj: object) -> None: ... @overload - def __getitem__(self, key: ( - SupportsIndex - | _ArrayLikeInt_co - | tuple[SupportsIndex | _ArrayLikeInt_co, ...] - )) -> Any: ... + def __getitem__( + self, + key: ( + SupportsIndex + | _ArrayLikeInt_co + | tuple[SupportsIndex | _ArrayLikeInt_co, ...] + ), + /, + ) -> Any: ... @overload - def __getitem__(self, key: ( - None - | slice - | ellipsis - | SupportsIndex - | _ArrayLikeInt_co - | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] - )) -> matrix[Any, _DType_co]: ... + def __getitem__( + self, + key: ( + None + | slice + | ellipsis + | SupportsIndex + | _ArrayLikeInt_co + | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] + ), + /, + ) -> matrix[Any, _DType_co]: ... @overload - def __getitem__(self: NDArray[void], key: str) -> matrix[Any, dtype[Any]]: ... + def __getitem__(self: NDArray[void], key: str, /) -> matrix[Any, dtype[Any]]: ... @overload - def __getitem__(self: NDArray[void], key: list[str]) -> matrix[_ShapeType, dtype[void]]: ... + def __getitem__(self: NDArray[void], key: list[str], /) -> matrix[_ShapeType, dtype[void]]: ... - def __mul__(self, other: ArrayLike) -> matrix[Any, Any]: ... - def __rmul__(self, other: ArrayLike) -> matrix[Any, Any]: ... - def __imul__(self, other: ArrayLike) -> matrix[_ShapeType, _DType_co]: ... - def __pow__(self, other: ArrayLike) -> matrix[Any, Any]: ... - def __ipow__(self, other: ArrayLike) -> matrix[_ShapeType, _DType_co]: ... + def __mul__(self, other: ArrayLike, /) -> matrix[Any, Any]: ... + def __rmul__(self, other: ArrayLike, /) -> matrix[Any, Any]: ... + def __imul__(self, other: ArrayLike, /) -> matrix[_ShapeType, _DType_co]: ... + def __pow__(self, other: ArrayLike, /) -> matrix[Any, Any]: ... + def __ipow__(self, other: ArrayLike, /) -> matrix[_ShapeType, _DType_co]: ... @overload def sum(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... From c899a67b596c6c43e37772663d6702e4b57084a6 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sat, 13 Jul 2024 16:38:15 +0200 Subject: [PATCH 762/980] BUG: Fix out-of-bound minimum offset for in1d table method We could always use ar2.dtype, but we want intp anyway, so that is nicer. But if the minimum value doesn't fit intp, NumPy would complain (it may even guaranteed to work to force-cast, but...) So just try to use intp, but if that doesn't work use ar2.dtype which must work, but means we'll have to cast once more (even if that cast is probably just a view in principle). Closes gh-26922 --- numpy/lib/_arraysetops_impl.py | 19 ++++++++++++++++++- numpy/lib/tests/test_arraysetops.py | 16 ++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index 435904c95321..8d88632e6266 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -915,8 +915,25 @@ def _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): # Mask out elements we know won't work basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min) + in_range_ar1 = ar1[basic_mask] + if in_range_ar1.size == 0: + # Nothing more to do, since all values are out of range. + return outgoing_array + + # Unfortunately, ar2_min can be out of range for `intp` even + # if the calculation result must fit in range (and be positive). + # In that case, use ar2.dtype which must work for all unmasked + # values. + try: + ar2_min = np.array(ar2_min, dtype=np.intp) + dtype = np.intp + except OverflowError: + dtype = ar2.dtype + + out = np.empty_like(in_range_ar1, dtype=np.intp) outgoing_array[basic_mask] = isin_helper_ar[ - np.subtract(ar1[basic_mask], ar2_min, dtype=np.intp)] + np.subtract(in_range_ar1, ar2_min, dtype=dtype, + out=out, casting="unsafe")] return outgoing_array elif kind == 'table': # not range_safe_from_overflow diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index 33b43b57a381..c8e592dc1452 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -440,6 +440,22 @@ def test_isin_mixed_dtype(self, dtype1, dtype2, kind): else: assert_array_equal(isin(ar1, ar2, kind=kind), expected) + @pytest.mark.parametrize("data", [ + np.array([2**63, 2**63+1], dtype=np.uint64), + np.array([-2**62, -2**62-1], dtype=np.int64), + ]) + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_mixed_huge_vals(self, kind, data): + """Test values outside intp range (negative ones if 32bit system)""" + query = data[1] + res = np.isin(data, query, kind=kind) + assert_array_equal(res, [False, True]) + # Also check that nothing weird happens for values can't possibly + # in range. + data = data.astype(np.int32) # clearly different values + res = np.isin(data, query, kind=kind) + assert_array_equal(res, [False, False]) + @pytest.mark.parametrize("kind", [None, "sort", "table"]) def test_isin_mixed_boolean(self, kind): """Test that isin works as expected for bool/int input.""" From b17dc3e1ef12720fa72a12e407569b7a7ccaba34 Mon Sep 17 00:00:00 2001 From: "Patrick J. Roddy" Date: Sat, 13 Jul 2024 18:20:39 +0100 Subject: [PATCH 763/980] BUG: FIx running full test command in docstring `spin -m full` doesn't work by itself --- .spin/cmds.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.spin/cmds.py b/.spin/cmds.py index 7c6c759c7ac6..0773578de913 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -246,7 +246,7 @@ def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose, *args, **kwargs): spin test -- -k "geometric and not rgeometric" By default, spin will run `-m 'not slow'`. To run the full test suite, use - `spin -m full` + `spin test -m full` For more, see `pytest --help`. """ # noqa: E501 From d8451bcf8491bb0fd04bd8343c21539847feb8fd Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 14 Jul 2024 04:16:28 +0200 Subject: [PATCH 764/980] TYP: positional-only `__class_getitem__` parameters --- numpy/__init__.pyi | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 83c3e042f790..6ba5cc9a19ba 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -815,7 +815,7 @@ class dtype(Generic[_DTypeScalar_co]): metadata: dict[builtins.str, Any] = ..., ) -> dtype[object_]: ... - def __class_getitem__(self, item: Any) -> GenericAlias: ... + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload def __getitem__(self: dtype[void], key: list[builtins.str], /) -> dtype[void]: ... @@ -1466,7 +1466,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): if sys.version_info >= (3, 12): def __buffer__(self, flags: int, /) -> memoryview: ... - def __class_getitem__(self, item: Any) -> GenericAlias: ... + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload def __array__( @@ -2767,7 +2767,7 @@ class number(generic, Generic[_NBit1]): # type: ignore def real(self: _ArraySelf) -> _ArraySelf: ... @property def imag(self: _ArraySelf) -> _ArraySelf: ... - def __class_getitem__(self, item: Any) -> GenericAlias: ... + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... def __int__(self) -> int: ... def __float__(self) -> float: ... def __complex__(self) -> complex: ... From 60dfa87a33d312dad80efba02ef38bba27367c5e Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 14 Jul 2024 22:05:24 +0200 Subject: [PATCH 765/980] TYP,BUG: Remove non-existant `numpy.__git_version__` in the stubs. --- numpy/__init__.pyi | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 23cef0725a85..45d9dab1e126 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -629,7 +629,6 @@ class _SupportsWrite(Protocol[_AnyStr_contra]): __all__: list[str] __dir__: list[str] __version__: str -__git_version__: str __array_api_version__: str test: PytestTester From dab253e2ee1d987189bbd5a24ec002a8e359dfdc Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 15 Jul 2024 17:23:09 +0200 Subject: [PATCH 766/980] TYP: Add missing typecodes in ``numpy._core.numerictypes.typecodes`` --- numpy/_core/numerictypes.pyi | 8 ++++---- numpy/typing/tests/data/reveal/numerictypes.pyi | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index 52ab73012604..b177dc55a6b6 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -45,14 +45,14 @@ _SCT = TypeVar("_SCT", bound=generic) class _TypeCodes(TypedDict): Character: L['c'] - Integer: L['bhilqp'] - UnsignedInteger: L['BHILQP'] + Integer: L['bhilqnp'] + UnsignedInteger: L['BHILQNP'] Float: L['efdg'] Complex: L['FDG'] - AllInteger: L['bBhHiIlLqQpP'] + AllInteger: L['bBhHiIlLqQnNpP'] AllFloat: L['efdgFDG'] Datetime: L['Mm'] - All: L['?bhilqpBHILQPefdgFDGSUVOMm'] + All: L['?bhilqnpBHILQNPefdgFDGSUVOMm'] __all__: list[str] diff --git a/numpy/typing/tests/data/reveal/numerictypes.pyi b/numpy/typing/tests/data/reveal/numerictypes.pyi index 9f094ba72e3c..cf558ddc9718 100644 --- a/numpy/typing/tests/data/reveal/numerictypes.pyi +++ b/numpy/typing/tests/data/reveal/numerictypes.pyi @@ -52,4 +52,4 @@ assert_type(np.bool_, type[np.bool]) assert_type(np.typecodes["Character"], Literal["c"]) assert_type(np.typecodes["Complex"], Literal["FDG"]) -assert_type(np.typecodes["All"], Literal["?bhilqpBHILQPefdgFDGSUVOMm"]) +assert_type(np.typecodes["All"], Literal["?bhilqnpBHILQNPefdgFDGSUVOMm"]) From ca522d169c8c5ad3e119cd84ab460fef13bdd1ea Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 15 Jul 2024 18:47:23 +0200 Subject: [PATCH 767/980] TYP,BUG: Remove ``numpy.cast`` and ``numpy.disp`` from the typing stubs. (#26937) * TYP,BUG: Remove `numpy.cast` and `numpy.disp` in the stubs. Accessing these at runtime will raise an `AttributeError` since their removal in NumPy 2.0. But this wasn't reflected in the typing stubs. * TYP: Remove the deprecated ``numpy._function_base_impl.disp`` function from the typing stubs --- numpy/__init__.pyi | 2 -- numpy/lib/_function_base_impl.pyi | 7 ------- 2 files changed, 9 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index c491aaee73c1..8f15fc0bbe4f 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -389,7 +389,6 @@ from numpy._core.numeric import ( from numpy._core.numerictypes import ( isdtype as isdtype, issubdtype as issubdtype, - cast as cast, ScalarType as ScalarType, typecodes as typecodes, ) @@ -439,7 +438,6 @@ from numpy.lib._function_base_impl import ( angle as angle, unwrap as unwrap, sort_complex as sort_complex, - disp as disp, flip as flip, rot90 as rot90, extract as extract, diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 834ad633efb6..0678cfaf98f5 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -12,7 +12,6 @@ from typing import ( from numpy import ( vectorize as vectorize, - ufunc, generic, floating, complexfloating, @@ -313,12 +312,6 @@ def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ... def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ... -def disp( - mesg: object, - device: None | _SupportsWriteFlush = ..., - linefeed: bool = ..., -) -> None: ... - @overload def cov( m: _ArrayLikeFloat_co, From 0858ae07b6941e40582aec4244b7b60fb826d6b6 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 15 Jul 2024 11:09:15 -0600 Subject: [PATCH 768/980] MAINT: add freethreading_compatible directive to cython build --- .github/workflows/linux.yml | 2 -- numpy/_core/tests/examples/cython/meson.build | 6 ++++++ numpy/_core/tests/examples/cython/setup.py | 10 +++++++++- numpy/meson.build | 2 +- numpy/random/_examples/cython/meson.build | 9 ++++++++- numpy/random/meson.build | 6 ++++++ tools/wheels/cibw_test_command.sh | 4 ---- 7 files changed, 30 insertions(+), 9 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index c536336c7115..5393f26b0fec 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -311,5 +311,3 @@ jobs: run: | pip install git+https://github.com/cython/cython - uses: ./.github/meson_actions - env: - PYTHON_GIL: 0 diff --git a/numpy/_core/tests/examples/cython/meson.build b/numpy/_core/tests/examples/cython/meson.build index c0ee5f89168c..8362c339ae73 100644 --- a/numpy/_core/tests/examples/cython/meson.build +++ b/numpy/_core/tests/examples/cython/meson.build @@ -10,6 +10,11 @@ if not cy.version().version_compare('>=3.0.6') error('tests requires Cython >= 3.0.6') endif +cython_args = [] +if cy.version().version_compare('>=3.1.0') + cython_args += ['-Xfreethreading_compatible=True'] +endif + npy_include_path = run_command(py, [ '-c', 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include()))' @@ -34,4 +39,5 @@ py.extension_module( '-DNPY_TARGET_VERSION=NPY_2_0_API_VERSION', ], include_directories: [npy_include_path], + cython_args: cython_args, ) diff --git a/numpy/_core/tests/examples/cython/setup.py b/numpy/_core/tests/examples/cython/setup.py index 97b7b4317ffa..1bf027700748 100644 --- a/numpy/_core/tests/examples/cython/setup.py +++ b/numpy/_core/tests/examples/cython/setup.py @@ -3,7 +3,9 @@ for testing. """ +import Cython import numpy as np +from numpy._utils import _pep440 from distutils.core import setup from Cython.Build import cythonize from setuptools.extension import Extension @@ -24,6 +26,12 @@ extensions = [checks] +compiler_directives = {} +if _pep440.parse(Cython.__version__) >= _pep440.parse("3.1.0a0"): + compiler_directives['freethreading_compatible'] = True + setup( - ext_modules=cythonize(extensions) + ext_modules=cythonize( + extensions, + compiler_directives=compiler_directives) ) diff --git a/numpy/meson.build b/numpy/meson.build index 032cdd5c6b60..84dffaa3d880 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -369,7 +369,7 @@ install_subdir('tests', install_dir: np_dir, install_tag: 'tests') compilers = { 'C': cc, 'CPP': cpp, - 'CYTHON': meson.get_compiler('cython') + 'CYTHON': cy, } machines = { diff --git a/numpy/random/_examples/cython/meson.build b/numpy/random/_examples/cython/meson.build index 1ad754c53691..9b3fedb81579 100644 --- a/numpy/random/_examples/cython/meson.build +++ b/numpy/random/_examples/cython/meson.build @@ -11,6 +11,11 @@ if not cy.version().version_compare('>=3.0.6') error('tests requires Cython >= 3.0.6') endif +base_cython_args = [] +if cy.version().version_compare('>=3.1.0a0') + base_cython_args += ['-Xfreethreading_compatible=True'] +endif + _numpy_abs = run_command(py3, ['-c', 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include() + "../../.."))'], check: true).stdout().strip() @@ -27,6 +32,7 @@ py3.extension_module( install: false, include_directories: [npy_include_path], dependencies: [npyrandom_lib, npymath_lib], + cython_args: base_cython_args, ) py3.extension_module( 'extending', @@ -34,13 +40,14 @@ py3.extension_module( install: false, include_directories: [npy_include_path], dependencies: [npyrandom_lib, npymath_lib], + cython_args: base_cython_args, ) py3.extension_module( 'extending_cpp', 'extending_distributions.pyx', install: false, override_options : ['cython_language=cpp'], - cython_args: ['--module-name', 'extending_cpp'], + cython_args: base_cython_args + ['--module-name', 'extending_cpp'], include_directories: [npy_include_path], dependencies: [npyrandom_lib, npymath_lib], ) diff --git a/numpy/random/meson.build b/numpy/random/meson.build index 1c90fb5866f2..f2f2e0ac755c 100644 --- a/numpy/random/meson.build +++ b/numpy/random/meson.build @@ -52,6 +52,11 @@ if host_machine.system() == 'cygwin' c_args_random += ['-Wl,--export-all-symbols'] endif +cython_args = [] +if cy.version().version_compare('>=3.1.0') + cython_args += ['-Xfreethreading_compatible=True'] +endif + # name, sources, extra c_args, extra static libs to link random_pyx_sources = [ ['_bounded_integers', _bounded_integers_pyx, [], [npyrandom_lib, npymath_lib]], @@ -83,6 +88,7 @@ foreach gen: random_pyx_sources link_with: gen[3], install: true, subdir: 'numpy/random', + cython_args: cython_args, ) endforeach diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index 9a462de2a684..649692bad2e7 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -34,10 +34,6 @@ if [[ $FREE_THREADED_BUILD == "True" ]]; then # with a released version of cython python -m pip uninstall -y cython python -m pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple cython - # TODO: delete when importing numpy no longer enables the GIL - # setting to zero ensures the GIL is disabled while running the - # tests under free-threaded python - export PYTHON_GIL=0 fi # Run full tests with -n=auto. This makes pytest-xdist distribute tests across From a8a42f6115dd471a9feaf8a0c0c2453ab7e4ac95 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 15 Jul 2024 19:09:37 +0200 Subject: [PATCH 769/980] TYP,BUG: fix ``numpy.__dir__`` annotations --- numpy/__init__.pyi | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 8f15fc0bbe4f..37b6c965eaa7 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -625,7 +625,8 @@ class _SupportsWrite(Protocol[_AnyStr_contra]): def write(self, s: _AnyStr_contra, /) -> object: ... __all__: list[str] -__dir__: list[str] +def __dir__() -> Sequence[str]: ... + __version__: str __array_api_version__: str test: PytestTester From 363f9a422312a0b7f5081887715537a75ce60afb Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 5 Jul 2024 22:39:55 +0200 Subject: [PATCH 770/980] TYP: use `types.CapsuleType` on python>=3.13 --- numpy/__init__.pyi | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 8f15fc0bbe4f..db9670d32c60 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1415,9 +1415,10 @@ _ArrayTD64_co: TypeAlias = NDArray[np.bool | integer[Any] | timedelta64] # Introduce an alias for `dtype` to avoid naming conflicts. _dtype: TypeAlias = dtype -# `builtins.PyCapsule` unfortunately lacks annotations as of the moment; -# use `Any` as a stopgap measure -_PyCapsule: TypeAlias = Any +if sys.version_info >= (3, 13): + from types import CapsuleType as _PyCapsule +else: + _PyCapsule: TypeAlias = Any class _SupportsItem(Protocol[_T_co]): def item(self, args: Any, /) -> _T_co: ... From b544f3c71df5ab10b4f9d3716e3b6160e86dfbd6 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 14 Jul 2024 03:54:15 +0200 Subject: [PATCH 771/980] TYP,BUG: Fix `dtype` type alias specialization issue in `__init__.pyi` This was introduced in #26858 --- numpy/__init__.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index db9670d32c60..033d1ee6dd2b 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1413,7 +1413,7 @@ _ArrayNumber_co: TypeAlias = NDArray[np.bool | number[Any]] _ArrayTD64_co: TypeAlias = NDArray[np.bool | integer[Any] | timedelta64] # Introduce an alias for `dtype` to avoid naming conflicts. -_dtype: TypeAlias = dtype +_dtype: TypeAlias = dtype[_ScalarType] if sys.version_info >= (3, 13): from types import CapsuleType as _PyCapsule From 1ee327c42efc90e54d576583dca9bd2f7c700875 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 15 Jul 2024 13:19:16 -0600 Subject: [PATCH 772/980] MAINT: re-add environment variable, add import test --- .github/workflows/linux.yml | 3 +++ tools/wheels/cibw_test_command.sh | 14 ++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 5393f26b0fec..497bb1ec7462 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -311,3 +311,6 @@ jobs: run: | pip install git+https://github.com/cython/cython - uses: ./.github/meson_actions + env: + # needed until f2py grows a config flag for GIL-disabled support + PYTHON_GIL: 0 diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index 649692bad2e7..b967ec1e67f5 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -34,6 +34,20 @@ if [[ $FREE_THREADED_BUILD == "True" ]]; then # with a released version of cython python -m pip uninstall -y cython python -m pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple cython + + # Manually check that importing NumPy does not re-enable the GIL. + # Afterwards, force the GIL to always be disabled so it does not get + # re-enabled during the tests. + # + # TODO: delete when f2py grows the ability to define extensions that declare + # they can run without the gil or when we can work around the fact the f2py + # tests import modules that don't declare gil-disabled support. + if [[ $(python -c "import numpy" 2>&1) == "*The global interpreter lock (GIL) has been enabled*" ]]; then + echo "Error: Importing NumPy re-enables the GIL in the free-threaded build" + exit 1 + fi + + export PYTHON_GIL=0 fi # Run full tests with -n=auto. This makes pytest-xdist distribute tests across From 9d1d254326a9ff650b50aa2cff28af2b25142a5f Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 15 Jul 2024 13:26:22 -0600 Subject: [PATCH 773/980] MAINT: verify importing numpy does not re-enable the GIL in CI --- .github/workflows/linux.yml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 497bb1ec7462..7363cf6d748f 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -310,7 +310,16 @@ jobs: - name: Install nightly Cython run: | pip install git+https://github.com/cython/cython + # Set PYTHON_GIL=0 to force GIL off during tests and then manually verify + # importing numpy does not enable the GIL. When f2py grows the ability to + # declare GIL-disabled support we can just run the tests without the + # environment variable - uses: ./.github/meson_actions env: - # needed until f2py grows a config flag for GIL-disabled support PYTHON_GIL: 0 + - name: Verify import does not re-enable GIL + run: | + if [[ $(python -c "import numpy" 2>&1) == "*The global interpreter lock (GIL) has been enabled*" ]]; then + echo "Error: Importing NumPy re-enables the GIL in the free-threaded build" + exit 1 + fi From 072bd3ce114f2db0643cc588d73cf3f8f2b9963e Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 15 Jul 2024 13:37:05 -0600 Subject: [PATCH 774/980] TST: install cython nightly using a wheel --- .github/workflows/linux.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 7363cf6d748f..147b89353bce 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -309,7 +309,7 @@ jobs: # TODO: remove cython nightly install when cython does a release - name: Install nightly Cython run: | - pip install git+https://github.com/cython/cython + pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple cython # Set PYTHON_GIL=0 to force GIL off during tests and then manually verify # importing numpy does not enable the GIL. When f2py grows the ability to # declare GIL-disabled support we can just run the tests without the From 3a6dbaf15405dd6944f10082b53319bf89dad291 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 16 Jul 2024 00:09:17 +0200 Subject: [PATCH 775/980] TYP: Replace ``typing.Union`` with ``|`` in ``numpy._typing`` --- numpy/_typing/_array_like.py | 97 ++++++------- numpy/_typing/_dtype_like.py | 266 +++++++++++++++++------------------ numpy/_typing/_scalars.py | 37 ++--- 3 files changed, 196 insertions(+), 204 deletions(-) diff --git a/numpy/_typing/_array_like.py b/numpy/_typing/_array_like.py index 79e13a8f5243..22ff5356f9d9 100644 --- a/numpy/_typing/_array_like.py +++ b/numpy/_typing/_array_like.py @@ -2,7 +2,7 @@ import sys from collections.abc import Collection, Callable, Sequence -from typing import Any, Protocol, Union, TypeAlias, TypeVar, runtime_checkable +from typing import Any, Protocol, TypeAlias, TypeVar, runtime_checkable import numpy as np from numpy import ( @@ -54,41 +54,41 @@ def __array_function__( # TODO: Wait until mypy supports recursive objects in combination with typevars -_FiniteNestedSequence: TypeAlias = Union[ - _T, - Sequence[_T], - Sequence[Sequence[_T]], - Sequence[Sequence[Sequence[_T]]], - Sequence[Sequence[Sequence[Sequence[_T]]]], -] +_FiniteNestedSequence: TypeAlias = ( + _T + | Sequence[_T] + | Sequence[Sequence[_T]] + | Sequence[Sequence[Sequence[_T]]] + | Sequence[Sequence[Sequence[Sequence[_T]]]] +) # A subset of `npt.ArrayLike` that can be parametrized w.r.t. `np.generic` -_ArrayLike: TypeAlias = Union[ - _SupportsArray[dtype[_ScalarType]], - _NestedSequence[_SupportsArray[dtype[_ScalarType]]], -] +_ArrayLike: TypeAlias = ( + _SupportsArray[dtype[_ScalarType]] + | _NestedSequence[_SupportsArray[dtype[_ScalarType]]] +) # A union representing array-like objects; consists of two typevars: # One representing types that can be parametrized w.r.t. `np.dtype` # and another one for the rest -_DualArrayLike: TypeAlias = Union[ - _SupportsArray[_DType], - _NestedSequence[_SupportsArray[_DType]], - _T, - _NestedSequence[_T], -] +_DualArrayLike: TypeAlias = ( + _SupportsArray[_DType] + | _NestedSequence[_SupportsArray[_DType]] + | _T + | _NestedSequence[_T] +) if sys.version_info >= (3, 12): from collections.abc import Buffer ArrayLike: TypeAlias = Buffer | _DualArrayLike[ dtype[Any], - Union[bool, int, float, complex, str, bytes], + bool | int | float | complex | str | bytes, ] else: ArrayLike: TypeAlias = _DualArrayLike[ dtype[Any], - Union[bool, int, float, complex, str, bytes], + bool | int | float | complex | str | bytes, ] # `ArrayLike_co`: array-like objects that can be coerced into `X` @@ -98,47 +98,47 @@ def __array_function__( bool, ] _ArrayLikeUInt_co: TypeAlias = _DualArrayLike[ - dtype[Union[np.bool, unsignedinteger[Any]]], + dtype[np.bool] | dtype[unsignedinteger[Any]], bool, ] _ArrayLikeInt_co: TypeAlias = _DualArrayLike[ - dtype[Union[np.bool, integer[Any]]], - Union[bool, int], + dtype[np.bool] | dtype[integer[Any]], + bool | int, ] _ArrayLikeFloat_co: TypeAlias = _DualArrayLike[ - dtype[Union[np.bool, integer[Any], floating[Any]]], - Union[bool, int, float], + dtype[np.bool] | dtype[integer[Any]] | dtype[floating[Any]], + bool | int | float, ] _ArrayLikeComplex_co: TypeAlias = _DualArrayLike[ - dtype[Union[ - np.bool, - integer[Any], - floating[Any], - complexfloating[Any, Any], - ]], - Union[bool, int, float, complex], + ( + dtype[np.bool] + | dtype[integer[Any]] + | dtype[floating[Any]] + | dtype[complexfloating[Any, Any]] + ), + bool | int | float | complex, ] _ArrayLikeNumber_co: TypeAlias = _DualArrayLike[ - dtype[Union[np.bool, number[Any]]], - Union[bool, int, float, complex], + dtype[np.bool] | dtype[number[Any]], + bool | int | float | complex, ] _ArrayLikeTD64_co: TypeAlias = _DualArrayLike[ - dtype[Union[np.bool, integer[Any], timedelta64]], - Union[bool, int], -] -_ArrayLikeDT64_co: TypeAlias = Union[ - _SupportsArray[dtype[datetime64]], - _NestedSequence[_SupportsArray[dtype[datetime64]]], -] -_ArrayLikeObject_co: TypeAlias = Union[ - _SupportsArray[dtype[object_]], - _NestedSequence[_SupportsArray[dtype[object_]]], + dtype[np.bool] | dtype[integer[Any]] | dtype[timedelta64], + bool | int, ] +_ArrayLikeDT64_co: TypeAlias = ( + _SupportsArray[dtype[datetime64]] + | _NestedSequence[_SupportsArray[dtype[datetime64]]] +) +_ArrayLikeObject_co: TypeAlias = ( + _SupportsArray[dtype[object_]] + | _NestedSequence[_SupportsArray[dtype[object_]]] +) -_ArrayLikeVoid_co: TypeAlias = Union[ - _SupportsArray[dtype[void]], - _NestedSequence[_SupportsArray[dtype[void]]], -] +_ArrayLikeVoid_co: TypeAlias = ( + _SupportsArray[dtype[void]] + | _NestedSequence[_SupportsArray[dtype[void]]] +) _ArrayLikeStr_co: TypeAlias = _DualArrayLike[ dtype[str_], str, @@ -148,6 +148,7 @@ def __array_function__( bytes, ] +# NOTE: This includes `builtins.bool`, but not `numpy.bool`. _ArrayLikeInt: TypeAlias = _DualArrayLike[ dtype[integer[Any]], int, diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index 217f92623984..b68b5337219d 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -1,8 +1,6 @@ from collections.abc import Sequence from typing import ( Any, - Sequence, - Union, TypeAlias, TypeVar, Protocol, @@ -88,164 +86,164 @@ def dtype(self) -> _DType_co: ... # A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` -_DTypeLike: TypeAlias = Union[ - np.dtype[_SCT], - type[_SCT], - _SupportsDType[np.dtype[_SCT]], -] +_DTypeLike: TypeAlias = ( + np.dtype[_SCT] + | type[_SCT] + | _SupportsDType[np.dtype[_SCT]] +) # Would create a dtype[np.void] -_VoidDTypeLike: TypeAlias = Union[ +_VoidDTypeLike: TypeAlias = ( # (flexible_dtype, itemsize) - tuple[_DTypeLikeNested, int], + tuple[_DTypeLikeNested, int] # (fixed_dtype, shape) - tuple[_DTypeLikeNested, _ShapeLike], + | tuple[_DTypeLikeNested, _ShapeLike] # [(field_name, field_dtype, field_shape), ...] # # The type here is quite broad because NumPy accepts quite a wide # range of inputs inside the list; see the tests for some # examples. - list[Any], + | list[Any] # {'names': ..., 'formats': ..., 'offsets': ..., 'titles': ..., # 'itemsize': ...} - _DTypeDict, + | _DTypeDict # (base_dtype, new_dtype) - tuple[_DTypeLikeNested, _DTypeLikeNested], -] + | tuple[_DTypeLikeNested, _DTypeLikeNested] +) # Anything that can be coerced into numpy.dtype. # Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html -DTypeLike: TypeAlias = Union[ - np.dtype[Any], +DTypeLike: TypeAlias = ( + np.dtype[Any] # default data type (float64) - None, + | None # array-scalar types and generic types - type[Any], # NOTE: We're stuck with `type[Any]` due to object dtypes + | type[Any] # NOTE: We're stuck with `type[Any]` due to object dtypes # anything with a dtype attribute - _SupportsDType[np.dtype[Any]], + | _SupportsDType[np.dtype[Any]] # character codes, type strings or comma-separated fields, e.g., 'float64' - str, - _VoidDTypeLike, -] + | str + | _VoidDTypeLike +) # NOTE: while it is possible to provide the dtype as a dict of # dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`), # this syntax is officially discourged and -# therefore not included in the Union defining `DTypeLike`. +# therefore not included in the type-union defining `DTypeLike`. # # See https://github.com/numpy/numpy/issues/16891 for more details. # Aliases for commonly used dtype-like objects. # Note that the precision of `np.number` subclasses is ignored herein. -_DTypeLikeBool: TypeAlias = Union[ - type[bool], - type[np.bool], - np.dtype[np.bool], - _SupportsDType[np.dtype[np.bool]], - _BoolCodes, -] -_DTypeLikeUInt: TypeAlias = Union[ - type[np.unsignedinteger], - np.dtype[np.unsignedinteger], - _SupportsDType[np.dtype[np.unsignedinteger]], - _UInt8Codes, - _UInt16Codes, - _UInt32Codes, - _UInt64Codes, - _UByteCodes, - _UShortCodes, - _UIntCCodes, - _LongCodes, - _ULongLongCodes, - _UIntPCodes, - _UIntCodes, -] -_DTypeLikeInt: TypeAlias = Union[ - type[int], - type[np.signedinteger], - np.dtype[np.signedinteger], - _SupportsDType[np.dtype[np.signedinteger]], - _Int8Codes, - _Int16Codes, - _Int32Codes, - _Int64Codes, - _ByteCodes, - _ShortCodes, - _IntCCodes, - _LongCodes, - _LongLongCodes, - _IntPCodes, - _IntCodes, -] -_DTypeLikeFloat: TypeAlias = Union[ - type[float], - type[np.floating], - np.dtype[np.floating], - _SupportsDType[np.dtype[np.floating]], - _Float16Codes, - _Float32Codes, - _Float64Codes, - _HalfCodes, - _SingleCodes, - _DoubleCodes, - _LongDoubleCodes, -] -_DTypeLikeComplex: TypeAlias = Union[ - type[complex], - type[np.complexfloating], - np.dtype[np.complexfloating], - _SupportsDType[np.dtype[np.complexfloating]], - _Complex64Codes, - _Complex128Codes, - _CSingleCodes, - _CDoubleCodes, - _CLongDoubleCodes, -] -_DTypeLikeDT64: TypeAlias = Union[ - type[np.timedelta64], - np.dtype[np.timedelta64], - _SupportsDType[np.dtype[np.timedelta64]], - _TD64Codes, -] -_DTypeLikeTD64: TypeAlias = Union[ - type[np.datetime64], - np.dtype[np.datetime64], - _SupportsDType[np.dtype[np.datetime64]], - _DT64Codes, -] -_DTypeLikeStr: TypeAlias = Union[ - type[str], - type[np.str_], - np.dtype[np.str_], - _SupportsDType[np.dtype[np.str_]], - _StrCodes, -] -_DTypeLikeBytes: TypeAlias = Union[ - type[bytes], - type[np.bytes_], - np.dtype[np.bytes_], - _SupportsDType[np.dtype[np.bytes_]], - _BytesCodes, -] -_DTypeLikeVoid: TypeAlias = Union[ - type[np.void], - np.dtype[np.void], - _SupportsDType[np.dtype[np.void]], - _VoidCodes, - _VoidDTypeLike, -] -_DTypeLikeObject: TypeAlias = Union[ - type, - np.dtype[np.object_], - _SupportsDType[np.dtype[np.object_]], - _ObjectCodes, -] - -_DTypeLikeComplex_co: TypeAlias = Union[ - _DTypeLikeBool, - _DTypeLikeUInt, - _DTypeLikeInt, - _DTypeLikeFloat, - _DTypeLikeComplex, -] +_DTypeLikeBool: TypeAlias = ( + type[bool] + | type[np.bool] + | np.dtype[np.bool] + | _SupportsDType[np.dtype[np.bool]] + | _BoolCodes +) +_DTypeLikeUInt: TypeAlias = ( + type[np.unsignedinteger] + | np.dtype[np.unsignedinteger] + | _SupportsDType[np.dtype[np.unsignedinteger]] + | _UInt8Codes + | _UInt16Codes + | _UInt32Codes + | _UInt64Codes + | _UByteCodes + | _UShortCodes + | _UIntCCodes + | _LongCodes + | _ULongLongCodes + | _UIntPCodes + | _UIntCodes +) +_DTypeLikeInt: TypeAlias = ( + type[int] + | type[np.signedinteger] + | np.dtype[np.signedinteger] + | _SupportsDType[np.dtype[np.signedinteger]] + | _Int8Codes + | _Int16Codes + | _Int32Codes + | _Int64Codes + | _ByteCodes + | _ShortCodes + | _IntCCodes + | _LongCodes + | _LongLongCodes + | _IntPCodes + | _IntCodes +) +_DTypeLikeFloat: TypeAlias = ( + type[float] + | type[np.floating] + | np.dtype[np.floating] + | _SupportsDType[np.dtype[np.floating]] + | _Float16Codes + | _Float32Codes + | _Float64Codes + | _HalfCodes + | _SingleCodes + | _DoubleCodes + | _LongDoubleCodes +) +_DTypeLikeComplex: TypeAlias = ( + type[complex] + | type[np.complexfloating] + | np.dtype[np.complexfloating] + | _SupportsDType[np.dtype[np.complexfloating]] + | _Complex64Codes + | _Complex128Codes + | _CSingleCodes + | _CDoubleCodes + | _CLongDoubleCodes +) +_DTypeLikeDT64: TypeAlias = ( + type[np.timedelta64] + | np.dtype[np.timedelta64] + | _SupportsDType[np.dtype[np.timedelta64]] + | _TD64Codes +) +_DTypeLikeTD64: TypeAlias = ( + type[np.datetime64] + | np.dtype[np.datetime64] + | _SupportsDType[np.dtype[np.datetime64]] + | _DT64Codes +) +_DTypeLikeStr: TypeAlias = ( + type[str] + | type[np.str_] + | np.dtype[np.str_] + | _SupportsDType[np.dtype[np.str_]] + | _StrCodes +) +_DTypeLikeBytes: TypeAlias = ( + type[bytes] + | type[np.bytes_] + | np.dtype[np.bytes_] + | _SupportsDType[np.dtype[np.bytes_]] + | _BytesCodes +) +_DTypeLikeVoid: TypeAlias = ( + type[np.void] + | np.dtype[np.void] + | _SupportsDType[np.dtype[np.void]] + | _VoidCodes + | _VoidDTypeLike +) +_DTypeLikeObject: TypeAlias = ( + type + | np.dtype[np.object_] + | _SupportsDType[np.dtype[np.object_]] + | _ObjectCodes +) + +_DTypeLikeComplex_co: TypeAlias = ( + _DTypeLikeBool + | _DTypeLikeUInt + | _DTypeLikeInt + | _DTypeLikeFloat + | _DTypeLikeComplex +) diff --git a/numpy/_typing/_scalars.py b/numpy/_typing/_scalars.py index 9d3f848ff110..97316d0209ba 100644 --- a/numpy/_typing/_scalars.py +++ b/numpy/_typing/_scalars.py @@ -1,34 +1,27 @@ -from typing import Any, TypeAlias, Union +from typing import Any, TypeAlias import numpy as np # NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and # `np.bytes_` are already subclasses of their builtin counterpart -_CharLike_co: TypeAlias = Union[str, bytes] +_CharLike_co: TypeAlias = str | bytes # The 6 `Like_co` type-aliases below represent all scalars that can be # coerced into `` (with the casting rule `same_kind`) -_BoolLike_co: TypeAlias = Union[bool, np.bool] -_UIntLike_co: TypeAlias = Union[_BoolLike_co, np.unsignedinteger[Any]] -_IntLike_co: TypeAlias = Union[_BoolLike_co, int, np.integer[Any]] -_FloatLike_co: TypeAlias = Union[_IntLike_co, float, np.floating[Any]] -_ComplexLike_co: TypeAlias = Union[ - _FloatLike_co, - complex, - np.complexfloating[Any, Any], -] -_TD64Like_co: TypeAlias = Union[_IntLike_co, np.timedelta64] +_BoolLike_co: TypeAlias = bool | np.bool +_UIntLike_co: TypeAlias = np.unsignedinteger[Any] | _BoolLike_co +_IntLike_co: TypeAlias = int | np.integer[Any] | _BoolLike_co +_FloatLike_co: TypeAlias = float | np.floating[Any] | _IntLike_co +_ComplexLike_co: TypeAlias = ( + complex + | np.complexfloating[Any, Any] + | _FloatLike_co +) +_TD64Like_co: TypeAlias = np.timedelta64 | _IntLike_co -_NumberLike_co: TypeAlias = Union[int, float, complex, np.number[Any], np.bool] -_ScalarLike_co: TypeAlias = Union[ - int, - float, - complex, - str, - bytes, - np.generic, -] +_NumberLike_co: TypeAlias = int | float | complex | np.number[Any] | np.bool +_ScalarLike_co: TypeAlias = int | float | complex | str | bytes | np.generic # `_VoidLike_co` is technically not a scalar, but it's close enough -_VoidLike_co: TypeAlias = Union[tuple[Any, ...], np.void] +_VoidLike_co: TypeAlias = tuple[Any, ...] | np.void From 1632faa84f4c02196f334793f4af09526d63c6fd Mon Sep 17 00:00:00 2001 From: Sam Morley Date: Tue, 16 Jul 2024 02:23:52 +0100 Subject: [PATCH 776/980] Added tests for stripping unicode from a string array with StringDType. --- numpy/_core/tests/test_stringdtype.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 9ff3224947d9..7cf4a96f40cc 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1673,3 +1673,30 @@ def test_arena_no_reuse_after_short(self): assert_array_equal(c, self.a) assert_array_equal(self.in_arena(c), False) assert_array_equal(self.is_on_heap(c), self.in_arena(self.a)) + + +STRIP_METHODS = ["lstrip", "rstrip", "strip"] + +@pytest.mark.parametrize("method", STRIP_METHODS) +@pytest.mark.parametrize( + "source,strip", + [ + ("λμ", "μ"), + ("λμ", "λ"), + ("λ"*5 + "μ"*2, "μ"), + ("λ" * 5 + "μ" * 2, "λ"), + ("λ" * 5 + "A" + "μ" * 2, "μλ"), + ("λμ" * 5, "μ"), + ("λμ" * 5, "λ"), + ] +) +def test_strip_functions_unicode(method, source, strip): + src_array = np.array([source], dtype=StringDType()) + + npy_func = getattr(np.strings, method) + py_func = getattr(str, method) + + expected = np.array([py_func(source, strip)], dtype=StringDType()) + actual = npy_func(src_array, strip) + + assert_array_equal(actual, expected) From 58c0d7464e2362b317b521ef2c33b2bf91418352 Mon Sep 17 00:00:00 2001 From: Sam Morley Date: Tue, 16 Jul 2024 02:29:45 +0100 Subject: [PATCH 777/980] Added fix for unicode characters not stripped when StringDType is used. --- numpy/_core/src/umath/string_buffer.h | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index 7f2345d2838f..665c47bbf067 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -1231,14 +1231,24 @@ string_lrstrip_chars(Buffer buf1, Buffer buf2, Buffer out, STRIPT if (striptype != STRIPTYPE::RIGHTSTRIP) { for (; new_start < len1; traverse_buf++) { Py_ssize_t res; + size_t current_point_bytes = traverse_buf.num_bytes_next_character(); switch (enc) { case ENCODING::ASCII: - case ENCODING::UTF8: { CheckedIndexer ind(buf2.buf, len2); res = findchar(ind, len2, *traverse_buf); break; } + case ENCODING::UTF8: + { + if (current_point_bytes == 1) { + CheckedIndexer ind(buf2.buf, len2); + res = findchar(ind, len2, *traverse_buf); + } else { + res = fastsearch(buf2.buf, buf2.after - buf2.buf,traverse_buf.buf, current_point_bytes, -1, FAST_SEARCH); + } + break; + } case ENCODING::UTF32: { CheckedIndexer ind((npy_ucs4 *)buf2.buf, len2); @@ -1264,15 +1274,25 @@ string_lrstrip_chars(Buffer buf1, Buffer buf2, Buffer out, STRIPT if (striptype != STRIPTYPE::LEFTSTRIP) { while (new_stop > new_start) { + size_t current_point_bytes = traverse_buf.num_bytes_next_character(); Py_ssize_t res; switch (enc) { case ENCODING::ASCII: - case ENCODING::UTF8: { CheckedIndexer ind(buf2.buf, len2); res = findchar(ind, len2, *traverse_buf); break; } + case ENCODING::UTF8: + { + if (current_point_bytes == 1) { + CheckedIndexer ind(buf2.buf, len2); + res = findchar(ind, len2, *traverse_buf); + } else { + res = fastsearch(buf2.buf, buf2.after - buf2.buf, traverse_buf.buf, current_point_bytes, -1, FAST_RSEARCH); + } + break; + } case ENCODING::UTF32: { CheckedIndexer ind((npy_ucs4 *)buf2.buf, len2); @@ -1283,7 +1303,7 @@ string_lrstrip_chars(Buffer buf1, Buffer buf2, Buffer out, STRIPT if (res < 0) { break; } - num_bytes -= traverse_buf.num_bytes_next_character(); + num_bytes -= current_point_bytes;; new_stop--; // Do not step to character -1: can't find it's start for utf-8. if (new_stop > 0) { From 06a0fdfe72d1ecf9c43767323f03182dcc94b2ae Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 17 Jul 2024 04:36:21 +0200 Subject: [PATCH 778/980] DOC: Issue template for static typing --- .github/ISSUE_TEMPLATE/typing.yml | 68 +++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/typing.yml diff --git a/.github/ISSUE_TEMPLATE/typing.yml b/.github/ISSUE_TEMPLATE/typing.yml new file mode 100644 index 000000000000..a35b339e4883 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/typing.yml @@ -0,0 +1,68 @@ +name: Static Typing +description: Report an issue with the NumPy typing hints. +title: "TYP: " +labels: [Static typing] + +body: +- type: markdown + attributes: + value: > + Thank you for taking the time to report this issue. + Please make sure that this issue hasn't already been reported before. + +- type: textarea + attributes: + label: "Describe the issue:" + validations: + required: true + +- type: textarea + attributes: + label: "Reproduce the code example:" + description: > + A short code example that reproduces the error in your type-checker. It + should be self-contained, i.e., can be run as-is via e.g. + `mypy myproblem.py` or `pyright myproblem.py`. + placeholder: | + import numpy as np + import numpy.typing as npt + << your code here >> + render: python + validations: + required: true + +- type: textarea + attributes: + label: "Error message:" + description: > + Please include all relevant error messages from your type-checker or IDE. + render: shell + +- type: textarea + attributes: + label: "Python and NumPy Versions:" + description: > + Output from `import sys, numpy; print(numpy.__version__); print(sys.version)`. + validations: + required: true + +- type: textarea + attributes: + label: "Type-checker version and settings:" + description: > + Please include the exact version of the type-checker you are using. + Popular (static) type checkers include Mypy, Pyright / Pylance, Pytype, + Pyre, PyCharm, etc. + Also include the full CLI command used to run the type-checker, and + all of the relevant configuration options. + validations: + required: true + +- type: textarea + attributes: + label: "Additional typing packages." + description: | + If you are using `typing-extensions` or typing-stub packages, please + list their versions here. + validations: + required: false From 7622b67bebc19437d2e6e6c7f748a557534c5d5c Mon Sep 17 00:00:00 2001 From: "Patrick J. Roddy" Date: Sat, 13 Jul 2024 11:55:29 -0700 Subject: [PATCH 779/980] BUG: fix f2py tests to work with v2 API This is a clear bug which CI unfortunately doesn't see because it just skips the f2py test if compilation fails (which it does here). The error is slightly more precise now, but not precise enough to avoid that issue. --- numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c | 2 +- numpy/f2py/tests/util.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c index f3bffdc1c220..8c6ba1396924 100644 --- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -115,7 +115,7 @@ static PyObject *f2py_rout_wrap_attrs(PyObject *capi_self, PyArray_DESCR(arr)->type, PyArray_TYPE(arr), PyArray_ITEMSIZE(arr), - PyArray_DESCR(arr)->alignment, + PyDataType_ALIGNMENT(arr), PyArray_FLAGS(arr), PyArray_ITEMSIZE(arr)); } diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index 0b21663e3e3f..f07131e14af5 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -328,7 +328,7 @@ def build_meson(source_files, module_name=None, **kwargs): # compiler stack is on the CI try: backend.compile() - except: + except subprocess.CalledProcessError: pytest.skip("Failed to compile module") # Import the compiled module From 5093711eb0e509a63101a6778b9627fd0f8276eb Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 17 Jul 2024 16:21:45 +0200 Subject: [PATCH 780/980] MAINT: add a 'tests' install tag to the `numpy._core._simd` extension module This avoids installing `_simd.so` when the 'tests' install tag is omitted in order to strip the whole test suite. This is a significant saving in binary size especially on x86-64 (see issue 25737, about 10% of the installed size). Tested on macOS arm64 too, there `_simd.so` is 363 kb for a release build, or ~12% of the size of the main extension module (`_multiarray_umath.so`). --- numpy/_core/meson.build | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 90f29a0f6d7f..30c3d0ba48c8 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1278,6 +1278,7 @@ py.extension_module('_simd', link_with: [npymath_lib, _simd_mtargets.static_lib('_simd_mtargets')], install: true, subdir: 'numpy/_core', + install_tag: 'tests', ) python_sources = [ From 2b04765d674d23091c75c0409b92de73c021b2d3 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 18 Jul 2024 03:55:26 +0200 Subject: [PATCH 781/980] TYP: ``Final`` constants in ``numpy.version`` and use ``LiteralString`` --- numpy/version.pyi | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/numpy/version.pyi b/numpy/version.pyi index 2c305466a7e0..1262189f2f38 100644 --- a/numpy/version.pyi +++ b/numpy/version.pyi @@ -1,7 +1,24 @@ -version: str -__version__: str -full_version: str +import sys +from typing import Final, TypeAlias -git_revision: str -release: bool -short_version: str +if sys.version_info >= (3, 11): + from typing import LiteralString +else: + LiteralString: TypeAlias = str + +__all__ = ( + '__version__', + 'full_version', + 'git_revision', + 'release', + 'short_version', + 'version', +) + +version: Final[LiteralString] +__version__: Final[LiteralString] +full_version: Final[LiteralString] + +git_revision: Final[LiteralString] +release: Final[bool] +short_version: Final[LiteralString] From d19263ac79fff17e52befa01b912a1b3d5ddfa91 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 18 Jul 2024 07:30:40 +0200 Subject: [PATCH 782/980] TYP: Replace ``typing.Optional[T]`` with ``T | None`` in the ``numpy.typing`` tests (#26954) Since Python 3.10 typing.Optional[T] has been deprecated in favour of the T | None syntax. See https://typing.readthedocs.io/en/latest/spec/historical.html#union-and-optional . --- numpy/typing/tests/data/pass/arithmetic.py | 6 +++--- numpy/typing/tests/data/pass/ufunclike.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py index 496586821582..4ac4e957445c 100644 --- a/numpy/typing/tests/data/pass/arithmetic.py +++ b/numpy/typing/tests/data/pass/arithmetic.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any, Optional +from typing import Any import numpy as np import pytest @@ -26,8 +26,8 @@ class Object: - def __array__(self, dtype: Optional[np.typing.DTypeLike] = None, - copy: Optional[bool] = None) -> np.ndarray[Any, np.dtype[np.object_]]: + def __array__(self, dtype: np.typing.DTypeLike = None, + copy: bool | None = None) -> np.ndarray[Any, np.dtype[np.object_]]: ret = np.empty((), dtype=object) ret[()] = self return ret diff --git a/numpy/typing/tests/data/pass/ufunclike.py b/numpy/typing/tests/data/pass/ufunclike.py index 4baa0334a404..f993939ddba1 100644 --- a/numpy/typing/tests/data/pass/ufunclike.py +++ b/numpy/typing/tests/data/pass/ufunclike.py @@ -1,5 +1,5 @@ from __future__ import annotations -from typing import Any, Optional +from typing import Any import numpy as np @@ -13,8 +13,8 @@ def __floor__(self) -> Object: def __ge__(self, value: object) -> bool: return True - def __array__(self, dtype: Optional[np.typing.DTypeLike] = None, - copy: Optional[bool] = None) -> np.ndarray[Any, np.dtype[np.object_]]: + def __array__(self, dtype: np.typing.DTypeLike | None = None, + copy: bool | None = None) -> np.ndarray[Any, np.dtype[np.object_]]: ret = np.empty((), dtype=object) ret[()] = self return ret From 19c0728b895f72d89faa090bd2832fd010a7f9e4 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 18 Jul 2024 10:28:55 +0200 Subject: [PATCH 783/980] TYP: Transparent `__array__` shape-type (#26927) This changes the ndarray.__array__ and flatiter.__array__ methods to return a ndarray with the same shape type. Due to technical limitations, flatiter will it only return the shape type of its underlying ndarray if it's 1-d (like tuple[int]). --- numpy/__init__.pyi | 13 +++++++++-- numpy/typing/tests/data/reveal/flatiter.pyi | 24 ++++++++++++++++++++- 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index db9670d32c60..b27366aab28a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -901,6 +901,7 @@ _ArrayLikeInt: TypeAlias = ( ) _FlatIterSelf = TypeVar("_FlatIterSelf", bound=flatiter[Any]) +_FlatShapeType = TypeVar("_FlatShapeType", bound=tuple[int]) @final class flatiter(Generic[_NdArraySubClass]): @@ -935,6 +936,10 @@ class flatiter(Generic[_NdArraySubClass]): value: Any, ) -> None: ... @overload + def __array__(self: flatiter[ndarray[_FlatShapeType, _DType]], dtype: None = ..., /) -> ndarray[_FlatShapeType, _DType]: ... + @overload + def __array__(self: flatiter[ndarray[_FlatShapeType, Any]], dtype: _DType, /) -> ndarray[_FlatShapeType, _DType]: ... + @overload def __array__(self: flatiter[ndarray[Any, _DType]], dtype: None = ..., /) -> ndarray[Any, _DType]: ... @overload def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ... @@ -1469,11 +1474,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __array__( self, dtype: None = ..., /, *, copy: None | bool = ... - ) -> ndarray[Any, _DType_co]: ... + ) -> ndarray[_ShapeType, _DType_co]: ... @overload def __array__( self, dtype: _DType, /, *, copy: None | bool = ... - ) -> ndarray[Any, _DType]: ... + ) -> ndarray[_ShapeType, _DType]: ... def __array_ufunc__( self, @@ -1704,11 +1709,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): axis: None | SupportsIndex = ..., ) -> ndarray[Any, _DType_co]: ... + # TODO: use `tuple[int]` as shape type once covariant (#26081) def flatten( self, order: _OrderKACF = ..., ) -> ndarray[Any, _DType_co]: ... + # TODO: use `tuple[int]` as shape type once covariant (#26081) def ravel( self, order: _OrderKACF = ..., @@ -2613,6 +2620,7 @@ _NBit2 = TypeVar("_NBit2", bound=NBitBase) class generic(_ArrayOrScalarCommon): @abstractmethod def __init__(self, *args: Any, **kwargs: Any) -> None: ... + # TODO: use `tuple[()]` as shape type once covariant (#26081) @overload def __array__(self: _ScalarType, dtype: None = ..., /) -> NDArray[_ScalarType]: ... @overload @@ -3740,6 +3748,7 @@ class poly1d: __hash__: ClassVar[None] # type: ignore + # TODO: use `tuple[int]` as shape type once covariant (#26081) @overload def __array__(self, t: None = ..., copy: None | bool = ...) -> NDArray[Any]: ... @overload diff --git a/numpy/typing/tests/data/reveal/flatiter.pyi b/numpy/typing/tests/data/reveal/flatiter.pyi index 84d3b03b7d37..efbe75cee26a 100644 --- a/numpy/typing/tests/data/reveal/flatiter.pyi +++ b/numpy/typing/tests/data/reveal/flatiter.pyi @@ -1,5 +1,5 @@ import sys -from typing import Any +from typing import Any, Literal, TypeAlias import numpy as np import numpy.typing as npt @@ -10,6 +10,10 @@ else: from typing_extensions import assert_type a: np.flatiter[npt.NDArray[np.str_]] +a_1d: np.flatiter[np.ndarray[tuple[int], np.dtype[np.bytes_]]] + +Size: TypeAlias = Literal[42] +a_1d_fixed: np.flatiter[np.ndarray[tuple[Size], np.dtype[np.object_]]] assert_type(a.base, npt.NDArray[np.str_]) assert_type(a.copy(), npt.NDArray[np.str_]) @@ -23,8 +27,26 @@ assert_type(a[...], npt.NDArray[np.str_]) assert_type(a[:], npt.NDArray[np.str_]) assert_type(a[(...,)], npt.NDArray[np.str_]) assert_type(a[(0,)], np.str_) + assert_type(a.__array__(), npt.NDArray[np.str_]) assert_type(a.__array__(np.dtype(np.float64)), npt.NDArray[np.float64]) +assert_type( + a_1d.__array__(), + np.ndarray[tuple[int], np.dtype[np.bytes_]], +) +assert_type( + a_1d.__array__(np.dtype(np.float64)), + np.ndarray[tuple[int], np.dtype[np.float64]], +) +assert_type( + a_1d_fixed.__array__(), + np.ndarray[tuple[Size], np.dtype[np.object_]], +) +assert_type( + a_1d_fixed.__array__(np.dtype(np.float64)), + np.ndarray[tuple[Size], np.dtype[np.float64]], +) + a[0] = "a" a[:5] = "a" a[...] = "a" From ea40b03de52fc7afa9f30d8696595dc93d04636d Mon Sep 17 00:00:00 2001 From: Moritz Schreiber <68053396+mosc9575@users.noreply.github.com> Date: Thu, 18 Jul 2024 19:14:46 +0200 Subject: [PATCH 784/980] add sphinx-copybutton --- doc/source/conf.py | 2 ++ environment.yml | 1 + requirements/doc_requirements.txt | 1 + 3 files changed, 4 insertions(+) diff --git a/doc/source/conf.py b/doc/source/conf.py index 2e54df1ac3c5..481201e8e5b6 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -87,6 +87,7 @@ class PyTypeObject(ctypes.Structure): 'IPython.sphinxext.ipython_console_highlighting', 'IPython.sphinxext.ipython_directive', 'sphinx.ext.mathjax', + 'sphinx_copybutton', 'sphinx_design', ] @@ -285,6 +286,7 @@ def setup(app): plot_html_show_formats = False plot_html_show_source_link = False +copybutton_prompt_text = ">>> " # ----------------------------------------------------------------------------- # LaTeX output # ----------------------------------------------------------------------------- diff --git a/environment.yml b/environment.yml index 7e347bccb6c9..86ee1058f440 100644 --- a/environment.yml +++ b/environment.yml @@ -29,6 +29,7 @@ dependencies: - mypy=1.10.0 # For building docs - sphinx>=4.5.0 + - sphinx-copybutton - sphinx-design - numpydoc=1.4.0 - ipython diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index 7dfb228c83f1..79de7a9f0802 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -2,6 +2,7 @@ sphinx==7.2.6 numpydoc==1.4 pydata-sphinx-theme>=0.15.2 +sphinx-copybutton sphinx-design scipy matplotlib From 0d8832e87df3e360b01a710600c8130ea382018a Mon Sep 17 00:00:00 2001 From: otieno-juma Date: Mon, 1 Jul 2024 06:21:39 -0500 Subject: [PATCH 785/980] DOC: AI-Gen examples ctypeslib.as_ctypes_types I used AI Llama 3 to help create these. @bmwoodruff and I reviewed them. [skip actions] [skip azp] [skip cirrus] --- numpy/ctypeslib.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py index 8faf9415375b..5ab89ba7dd15 100644 --- a/numpy/ctypeslib.py +++ b/numpy/ctypeslib.py @@ -499,6 +499,22 @@ def as_ctypes_type(dtype): `ctypes.Structure`\ s - insert padding fields + Examples + -------- + Converting a simple dtype: + + >>> dt = np.dtype('i4') + >>> ctype = np.ctypeslib.as_ctypes_type(dt) + >>> ctype + + + Converting a structured dtype: + + >>> dt = np.dtype([('x', 'i4'), ('y', 'f4')]) + >>> ctype = np.ctypeslib.as_ctypes_type(dt) + >>> ctype + + """ return _ctype_from_dtype(_dtype(dtype)) From c6a787e7ed7aaac62daeefc7c83651f2825a23c4 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 18 Jul 2024 12:04:38 -0600 Subject: [PATCH 786/980] ENH: add support in f2py to declare gil-disabled support --- doc/source/f2py/usage.rst | 7 +++ numpy/conftest.py | 20 ++++++++ numpy/f2py/capi_maps.py | 2 + numpy/f2py/f2py2e.py | 20 +++++++- numpy/f2py/rules.py | 5 ++ .../tests/src/array_from_pyobj/wrapmodule.c | 9 +++- numpy/f2py/tests/test_abstract_interface.py | 2 - numpy/f2py/tests/test_callback.py | 2 - numpy/f2py/tests/test_f2py2e.py | 51 +++++++++++++++++-- numpy/f2py/tests/util.py | 6 ++- 10 files changed, 113 insertions(+), 11 deletions(-) diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index 859a2c38be5f..73aa6ab3fa5d 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -86,6 +86,13 @@ Here ```` may also contain signature files. Among other options ``--wrap-functions`` is default because it ensures maximum portability and compiler independence. +``--[no-]requires-gil`` + Create a module that declares it does or doesn't require the GIL. The default + is ``--requires-gil`` for backwards compatibility. Inspect the fortran + code you are wrapping for thread safety issues before passing + ``--no-requires-gil``, as ``f2py`` does not analyze fortran code for thread + safety issues. + ``--include-paths ":..."`` Search include files from given directories. diff --git a/numpy/conftest.py b/numpy/conftest.py index ad331dbb3bed..49da3f6798f7 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -2,6 +2,7 @@ Pytest configuration and fixtures for the Numpy test suite. """ import os +import sys import tempfile from contextlib import contextmanager import warnings @@ -11,6 +12,7 @@ import numpy from numpy._core._multiarray_tests import get_fpu_mode +from numpy.testing._private.utils import NOGIL_BUILD try: from scipy_doctest.conftest import dt_config @@ -72,12 +74,30 @@ def pytest_addoption(parser): "automatically.")) +gil_enabled_at_start = True +if NOGIL_BUILD: + gil_enabled_at_start = sys._is_gil_enabled() + + def pytest_sessionstart(session): available_mem = session.config.getoption('available_memory') if available_mem is not None: os.environ['NPY_AVAILABLE_MEM'] = available_mem +def pytest_terminal_summary(terminalreporter, exitstatus, config): + if NOGIL_BUILD and not gil_enabled_at_start and sys._is_gil_enabled(): + terminalreporter.ensure_newline() + terminalreporter.section("GIL re-enabled", sep="=", red=True, bold=True) + terminalreporter.line("The GIL was re-enabled at runtime during the tests.") + terminalreporter.line("This can happen with no test failures if the RuntimeWarning") + terminalreporter.line("raised by Python when this happens is filtered by a test.") + terminalreporter.line("") + terminalreporter.line("Please ensure all new C modules declare support for running") + terminalreporter.line("without the GIL. Any new tests that intentionally imports code") + terminalreporter.line("that re-enables the GIL should do so in a subprocess.") + pytest.exit("GIL re-enabled during tests", returncode=1) + #FIXME when yield tests are gone. @pytest.hookimpl() def pytest_itemcollected(item): diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py index fa477a5b9aca..8a8939d7260a 100644 --- a/numpy/f2py/capi_maps.py +++ b/numpy/f2py/capi_maps.py @@ -689,6 +689,8 @@ def modsign2map(m): else: ret['interface_usercode'] = '' ret['pymethoddef'] = getpymethoddef(m) or '' + if 'gil_used' in m: + ret['gil_used'] = m['gil_used'] if 'coutput' in m: ret['coutput'] = m['coutput'] if 'f2py_wrapper_output' in m: diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index c6eac78b71f4..3011c822a85b 100755 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -106,6 +106,13 @@ functions. --wrap-functions is default because it ensures maximum portability/compiler independence. + --[no-]requires-gil Create a module that declares it does or doesn't + require the GIL. The default is --requires-gil for + backward compatibility. Inspect the Fortran code you are + wrapping for thread safety issues before passing + --no-requires-gil, as ``f2py`` does not analyze fortran + code for thread safety issues. + --include-paths ::... Search include files from the given directories. @@ -204,6 +211,7 @@ def scaninputline(inputline): options = {'buildpath': buildpath, 'coutput': None, 'f2py_wrapper_output': None} + requires_gil = 1 for l in inputline: if l == '': pass @@ -261,6 +269,10 @@ def scaninputline(inputline): cfuncs.userincludes[l[9:-1]] = '#include ' + l[8:] elif l == '--skip-empty-wrappers': emptygen = False + elif l == '--requires-gil': + requires_gil = 1 + elif l == '--no-requires-gil': + requires_gil = 0 elif l[0] == '-': errmess('Unknown option %s\n' % repr(l)) sys.exit() @@ -327,6 +339,7 @@ def scaninputline(inputline): options['wrapfuncs'] = wrapfuncs options['buildpath'] = buildpath options['include_paths'] = include_paths + options['requires_gil'] = requires_gil options.setdefault('f2cmap_file', None) return files, options @@ -364,6 +377,11 @@ def callcrackfortran(files, options): else: for mod in postlist: mod["f2py_wrapper_output"] = options["f2py_wrapper_output"] + for mod in postlist: + if options["requires_gil"]: + mod['gil_used'] = 'Py_MOD_GIL_USED' + else: + mod['gil_used'] = 'Py_MOD_GIL_NOT_USED' return postlist @@ -615,7 +633,7 @@ def run_compile(): sysinfo_flags = [f[7:] for f in sysinfo_flags] _reg2 = re.compile( - r'--((no-|)(wrap-functions|lower)|debug-capi|quiet|skip-empty-wrappers)|-include') + r'--((no-|)(wrap-functions|lower|requires-gil)|debug-capi|quiet|skip-empty-wrappers)|-include') f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)] sys.argv = [_m for _m in sys.argv if _m not in f2py_flags] f2py_flags2 = [] diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 009365e04761..7566e1ececeb 100755 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -236,6 +236,11 @@ #initcommonhooks# #interface_usercode# +#if Py_GIL_DISABLED + // signal whether this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m , #gil_used#); +#endif + #ifdef F2PY_REPORT_ATEXIT if (! PyErr_Occurred()) on_exit(f2py_report_on_exit,(void*)\"#modulename#\"); diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c index 8c6ba1396924..b66672a43e21 100644 --- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -115,7 +115,7 @@ static PyObject *f2py_rout_wrap_attrs(PyObject *capi_self, PyArray_DESCR(arr)->type, PyArray_TYPE(arr), PyArray_ITEMSIZE(arr), - PyDataType_ALIGNMENT(arr), + PyDataType_ALIGNMENT(PyArray_DESCR(arr)), PyArray_FLAGS(arr), PyArray_ITEMSIZE(arr)); } @@ -214,7 +214,7 @@ PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { ADDCONST("DEFAULT", NPY_ARRAY_DEFAULT); ADDCONST("UPDATE_ALL", NPY_ARRAY_UPDATE_ALL); -#undef ADDCONST( +#undef ADDCONST if (PyErr_Occurred()) Py_FatalError("can't initialize module wrap"); @@ -223,6 +223,11 @@ PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call"); #endif +#if Py_GIL_DISABLED + // signal whether this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; } #ifdef __cplusplus diff --git a/numpy/f2py/tests/test_abstract_interface.py b/numpy/f2py/tests/test_abstract_interface.py index 238e9197d122..2c6555aecea1 100644 --- a/numpy/f2py/tests/test_abstract_interface.py +++ b/numpy/f2py/tests/test_abstract_interface.py @@ -6,8 +6,6 @@ from numpy.testing import IS_WASM -@pytest.mark.filterwarnings(r"ignore:.*The global interpreter lock \(GIL\) " - r"has been enabled.*:RuntimeWarning") @pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") @pytest.mark.slow class TestAbstractInterface(util.F2PyTest): diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py index f31407d5a0f6..8bd6175a3eb9 100644 --- a/numpy/f2py/tests/test_callback.py +++ b/numpy/f2py/tests/test_callback.py @@ -11,8 +11,6 @@ from . import util -@pytest.mark.filterwarnings(r"ignore:.*The global interpreter lock \(GIL\) " - r"has been enabled.*:RuntimeWarning") class TestF77Callback(util.F2PyTest): sources = [util.getpath("tests", "src", "callback", "foo.f")] diff --git a/numpy/f2py/tests/test_f2py2e.py b/numpy/f2py/tests/test_f2py2e.py index 744049a2422d..3a9c8bdf6727 100644 --- a/numpy/f2py/tests/test_f2py2e.py +++ b/numpy/f2py/tests/test_f2py2e.py @@ -7,6 +7,7 @@ from . import util from numpy.f2py.f2py2e import main as f2pycli +from numpy.testing._private.utils import NOGIL_BUILD ######################### # CLI utils and classes # @@ -573,7 +574,7 @@ def test_debugcapi_bld(hello_world_f90, monkeypatch): with util.switchdir(ipath.parent): f2pycli() - cmd_run = shlex.split("python3 -c \"import blah; blah.hi()\"") + cmd_run = shlex.split(f"{sys.executable} -c \"import blah; blah.hi()\"") rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') eout = ' Hello World\n' eerr = textwrap.dedent("""\ @@ -742,16 +743,60 @@ def test_npdistop(hello_world_f90, monkeypatch): with util.switchdir(ipath.parent): f2pycli() - cmd_run = shlex.split("python -c \"import blah; blah.hi()\"") + cmd_run = shlex.split(f"{sys.executable} -c \"import blah; blah.hi()\"") rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') eout = ' Hello World\n' assert rout.stdout == eout +def test_requires_gil(hello_world_f90, monkeypatch): + """ + CLI :: --requires-gil + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c --requires-gil'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + cmd = f"{sys.executable} -c \"import blah; blah.hi();" + if NOGIL_BUILD: + cmd += "import sys; assert sys._is_gil_enabled() is True\"" + else: + cmd += "\"" + cmd_run = shlex.split(cmd) + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + assert rout.stdout == eout + if NOGIL_BUILD: + assert "The global interpreter lock (GIL) has been enabled to load module 'blah'" in rout.stderr + assert rout.returncode == 0 + + +def test_no_requires_gil(hello_world_f90, monkeypatch): + """ + CLI :: --no-requires-gil + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c --no-requires-gil'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + cmd = f"{sys.executable} -c \"import blah; blah.hi();" + if NOGIL_BUILD: + cmd += "import sys; assert sys._is_gil_enabled() is False\"" + else: + cmd += "\"" + cmd_run = shlex.split(cmd) + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + assert rout.stdout == eout + assert rout.stderr == "" + assert rout.returncode == 0 + + # Numpy distutils flags # TODO: These should be tested separately - def test_npd_fcompiler(): """ CLI :: -c --fcompiler diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index f07131e14af5..37d47ae41d4f 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -129,7 +129,11 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None): # Prepare options if module_name is None: module_name = get_temp_module_name() - f2py_opts = ["-c", "-m", module_name] + options + f2py_sources + gil_options = [] + if '--requires-gil' not in options and '--no-requires-gil' not in options: + # default to disabling the GIL if unset in options + gil_options = ['--no-requires-gil'] + f2py_opts = ["-c", "-m", module_name] + options + gil_options + f2py_sources f2py_opts += ["--backend", "meson"] if skip: f2py_opts += ["skip:"] + skip From 0c7564dd4fbf1a251dbd64b76f7f9b0607179bec Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 18 Jul 2024 12:07:25 -0600 Subject: [PATCH 787/980] TST: do not set PYTHON_GIL in tests --- .github/workflows/linux.yml | 12 ------------ tools/wheels/cibw_test_command.sh | 10 ++-------- 2 files changed, 2 insertions(+), 20 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 147b89353bce..89f3fef0f6d4 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -310,16 +310,4 @@ jobs: - name: Install nightly Cython run: | pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple cython - # Set PYTHON_GIL=0 to force GIL off during tests and then manually verify - # importing numpy does not enable the GIL. When f2py grows the ability to - # declare GIL-disabled support we can just run the tests without the - # environment variable - uses: ./.github/meson_actions - env: - PYTHON_GIL: 0 - - name: Verify import does not re-enable GIL - run: | - if [[ $(python -c "import numpy" 2>&1) == "*The global interpreter lock (GIL) has been enabled*" ]]; then - echo "Error: Importing NumPy re-enables the GIL in the free-threaded build" - exit 1 - fi diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index b967ec1e67f5..73328e26dd15 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -36,18 +36,12 @@ if [[ $FREE_THREADED_BUILD == "True" ]]; then python -m pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple cython # Manually check that importing NumPy does not re-enable the GIL. - # Afterwards, force the GIL to always be disabled so it does not get - # re-enabled during the tests. - # - # TODO: delete when f2py grows the ability to define extensions that declare - # they can run without the gil or when we can work around the fact the f2py - # tests import modules that don't declare gil-disabled support. + # In principle the tests should catch this but it seems harmless to leave it + # here as a final sanity check before uploading broken wheels if [[ $(python -c "import numpy" 2>&1) == "*The global interpreter lock (GIL) has been enabled*" ]]; then echo "Error: Importing NumPy re-enables the GIL in the free-threaded build" exit 1 fi - - export PYTHON_GIL=0 fi # Run full tests with -n=auto. This makes pytest-xdist distribute tests across From e4f868082d35294d8ec02645973b2025e908f316 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 18 Jul 2024 13:57:37 -0600 Subject: [PATCH 788/980] MAINT: fix linter --- numpy/conftest.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/numpy/conftest.py b/numpy/conftest.py index 49da3f6798f7..677537e206f0 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -87,15 +87,16 @@ def pytest_sessionstart(session): def pytest_terminal_summary(terminalreporter, exitstatus, config): if NOGIL_BUILD and not gil_enabled_at_start and sys._is_gil_enabled(): - terminalreporter.ensure_newline() - terminalreporter.section("GIL re-enabled", sep="=", red=True, bold=True) - terminalreporter.line("The GIL was re-enabled at runtime during the tests.") - terminalreporter.line("This can happen with no test failures if the RuntimeWarning") - terminalreporter.line("raised by Python when this happens is filtered by a test.") - terminalreporter.line("") - terminalreporter.line("Please ensure all new C modules declare support for running") - terminalreporter.line("without the GIL. Any new tests that intentionally imports code") - terminalreporter.line("that re-enables the GIL should do so in a subprocess.") + tr = terminalreporter + tr.ensure_newline() + tr.section("GIL re-enabled", sep="=", red=True, bold=True) + tr.line("The GIL was re-enabled at runtime during the tests.") + tr.line("This can happen with no test failures if the RuntimeWarning") + tr.line("raised by Python when this happens is filtered by a test.") + tr.line("") + tr.line("Please ensure all new C modules declare support for running") + tr.line("without the GIL. Any new tests that intentionally imports ") + tr.line("code that re-enables the GIL should do so in a subprocess.") pytest.exit("GIL re-enabled during tests", returncode=1) #FIXME when yield tests are gone. From 2b1daf05f0d0c801418412ed34efa382ca687306 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 18 Jul 2024 14:05:09 -0600 Subject: [PATCH 789/980] TST: run new test on Python 3.12 and newer to avoid deprecations --- numpy/f2py/tests/test_f2py2e.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/numpy/f2py/tests/test_f2py2e.py b/numpy/f2py/tests/test_f2py2e.py index 3a9c8bdf6727..0fea94cd6ad4 100644 --- a/numpy/f2py/tests/test_f2py2e.py +++ b/numpy/f2py/tests/test_f2py2e.py @@ -749,6 +749,8 @@ def test_npdistop(hello_world_f90, monkeypatch): assert rout.stdout == eout +@pytest.mark.skipif(sys.version_info <= (3, 12), + reason='Python 3.12 or newer required') def test_requires_gil(hello_world_f90, monkeypatch): """ CLI :: --requires-gil @@ -772,6 +774,8 @@ def test_requires_gil(hello_world_f90, monkeypatch): assert rout.returncode == 0 +@pytest.mark.skipif(sys.version_info <= (3, 12), + reason='Python 3.12 or newer required') def test_no_requires_gil(hello_world_f90, monkeypatch): """ CLI :: --no-requires-gil From 623ecfa223606cbb19074e8e60d161401cfcb2a5 Mon Sep 17 00:00:00 2001 From: Yair Chuchem Date: Tue, 16 Jul 2024 11:25:57 +0300 Subject: [PATCH 790/980] TST: Test for inconsistent results in complex squaring (#26940) The test fails when compiling on clang with "-ffp-contract=off", due to a combination of factors: * Difference between SIMD and non-SIMD loops * nomemoverlap falling back to non-SIMD loop due to off-by-one bug --- numpy/_core/tests/test_regression.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 02726d6a108c..2636295a0020 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -2622,3 +2622,17 @@ def test_vectorize_fixed_width_string(self): f = str.casefold res = np.vectorize(f, otypes=[arr.dtype])(arr) assert res.dtype == "U30" + + def test_repeated_square_consistency(self): + # gh-26940 + buf = np.array([-5.171866611150749e-07 + 2.5618634555957426e-07j, + 0, 0, 0, 0, 0]) + # Test buffer with regular and reverse strides + for in_vec in [buf[:3], buf[:3][::-1]]: + expected_res = np.square(in_vec) + # Output vector immediately follows input vector + # to reproduce off-by-one in nomemoverlap check. + for res in [buf[3:], buf[3:][::-1]]: + res = buf[3:] + np.square(in_vec, out=res) + assert_equal(res, expected_res) From 9d68e6e6335d3f3605dbdeedd2ade25609c7c995 Mon Sep 17 00:00:00 2001 From: Yair Chuchem Date: Tue, 16 Jul 2024 11:30:09 +0300 Subject: [PATCH 791/980] BUG: Fix off-by-one error in nomemoverlap check. The original check (since introduced in 085cdbe5e7755) returned overlap for adjacent arrays. This fixes one of the factors causing test_repeated_square_consistency failure, so the test should now always pass. --- numpy/_core/src/umath/fast_loop_macros.h | 10 +++++----- numpy/_core/src/umath/loops_utils.h.src | 23 +++++++++++++---------- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/numpy/_core/src/umath/fast_loop_macros.h b/numpy/_core/src/umath/fast_loop_macros.h index b8c1926b2f7e..1e19bf19bfbf 100644 --- a/numpy/_core/src/umath/fast_loop_macros.h +++ b/numpy/_core/src/umath/fast_loop_macros.h @@ -341,15 +341,15 @@ abs_ptrdiff(char *a, char *b) ((labs(steps[0]) < MAX_STEP_SIZE) && \ (labs(steps[1]) < MAX_STEP_SIZE) && \ (labs(steps[2]) < MAX_STEP_SIZE) && \ - (nomemoverlap(args[0], steps[0] * dimensions[0], args[2], steps[2] * dimensions[0])) && \ - (nomemoverlap(args[1], steps[1] * dimensions[0], args[2], steps[2] * dimensions[0]))) + (nomemoverlap(args[0], steps[0], args[2], steps[2], dimensions[0])) && \ + (nomemoverlap(args[1], steps[1], args[2], steps[2], dimensions[0]))) #define IS_UNARY_TWO_OUT_SMALL_STEPS_AND_NOMEMOVERLAP \ ((labs(steps[0]) < MAX_STEP_SIZE) && \ (labs(steps[1]) < MAX_STEP_SIZE) && \ (labs(steps[2]) < MAX_STEP_SIZE) && \ - (nomemoverlap(args[0], steps[0] * dimensions[0], args[2], steps[2] * dimensions[0])) && \ - (nomemoverlap(args[0], steps[0] * dimensions[0], args[1], steps[1] * dimensions[0]))) + (nomemoverlap(args[0], steps[0], args[2], steps[2], dimensions[0])) && \ + (nomemoverlap(args[0], steps[0], args[1], steps[1], dimensions[0]))) /* * 1) Output should be contiguous, can handle strided input data @@ -359,7 +359,7 @@ abs_ptrdiff(char *a, char *b) #define IS_OUTPUT_BLOCKABLE_UNARY(esizein, esizeout, vsize) \ ((steps[0] & (esizein-1)) == 0 && \ steps[1] == (esizeout) && llabs(steps[0]) < MAX_STEP_SIZE && \ - (nomemoverlap(args[1], steps[1] * dimensions[0], args[0], steps[0] * dimensions[0]))) + (nomemoverlap(args[1], steps[1], args[0], steps[0], dimensions[0]))) #define IS_BLOCKABLE_REDUCE(esize, vsize) \ (steps[1] == (esize) && abs_ptrdiff(args[1], args[0]) >= (vsize) && \ diff --git a/numpy/_core/src/umath/loops_utils.h.src b/numpy/_core/src/umath/loops_utils.h.src index 5640a1f0b646..828d16ee635c 100644 --- a/numpy/_core/src/umath/loops_utils.h.src +++ b/numpy/_core/src/umath/loops_utils.h.src @@ -16,28 +16,31 @@ #endif /* * nomemoverlap - returns false if two strided arrays have an overlapping - * region in memory. ip_size/op_size = size of the arrays which can be negative - * indicating negative steps. + * region in memory. */ NPY_FINLINE npy_bool -nomemoverlap(char *ip, npy_intp ip_size, char *op, npy_intp op_size) +nomemoverlap(char *ip, npy_intp ip_step, char *op, npy_intp op_step, npy_intp len) { + // Calculate inclusive ranges for offsets of items in arrays. + // The end pointer points to address of the last item. + const npy_intp ip_offset = ip_step * (len - 1); + const npy_intp op_offset = op_step * (len - 1); char *ip_start, *ip_end, *op_start, *op_end; - if (ip_size < 0) { - ip_start = ip + ip_size; + if (ip_step < 0) { + ip_start = ip + ip_offset; ip_end = ip; } else { ip_start = ip; - ip_end = ip + ip_size; + ip_end = ip + ip_offset; } - if (op_size < 0) { - op_start = op + op_size; + if (op_step < 0) { + op_start = op + op_offset; op_end = op; } else { op_start = op; - op_end = op + op_size; + op_end = op + op_offset; } return (ip_start == op_start && op_end == ip_end) || (ip_start > op_end) || (op_start > ip_end); @@ -48,7 +51,7 @@ nomemoverlap(char *ip, npy_intp ip_size, char *op, npy_intp op_size) NPY_FINLINE npy_bool is_mem_overlap(const void *src, npy_intp src_step, const void *dst, npy_intp dst_step, npy_intp len) { - return !(nomemoverlap((char*)src, src_step*len, (char*)dst, dst_step*len)); + return !(nomemoverlap((char*)src, src_step, (char*)dst, dst_step, len)); } /* From 4f68bdd7853c966ea9199047c4335887c17574e7 Mon Sep 17 00:00:00 2001 From: Moritz Schreiber <68053396+mosc9575@users.noreply.github.com> Date: Fri, 19 Jul 2024 13:11:47 +0200 Subject: [PATCH 792/980] use copybutton_propmt as regular expression --- doc/source/conf.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 481201e8e5b6..2019529cb53b 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -286,7 +286,9 @@ def setup(app): plot_html_show_formats = False plot_html_show_source_link = False -copybutton_prompt_text = ">>> " +# sphinx-copybutton configurations +copybutton_prompt_text = r">>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: " +copybutton_prompt_is_regexp = True # ----------------------------------------------------------------------------- # LaTeX output # ----------------------------------------------------------------------------- From 24fdf13094f8a31c59763776b5a5413e8fd6b7ac Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 19 Jul 2024 10:51:23 +0200 Subject: [PATCH 793/980] BUG: Add object cast to avoid warning with limited API To be honest, I think we should just delete this from the public API. But I thought I'll start with this, since at least in principle that isn't a bug-release thing to backport. I am not sure setting warnings to errors is wise (at least for Cython and MSVC). OTOH, the Cython module currently truly does nothing except include the headers (it doesn't even use the NumPy `.pyd` yet). --- numpy/_core/include/numpy/dtype_api.h | 2 +- .../examples/limited_api/limited_api_latest.c | 19 +++++++++++++++++++ .../tests/examples/limited_api/meson.build | 10 ++++++++++ numpy/_core/tests/test_limited_api.py | 11 +++++++---- 4 files changed, 37 insertions(+), 5 deletions(-) create mode 100644 numpy/_core/tests/examples/limited_api/limited_api_latest.c diff --git a/numpy/_core/include/numpy/dtype_api.h b/numpy/_core/include/numpy/dtype_api.h index c35577fbbcad..9dd3effa3a80 100644 --- a/numpy/_core/include/numpy/dtype_api.h +++ b/numpy/_core/include/numpy/dtype_api.h @@ -449,7 +449,7 @@ typedef PyArray_DTypeMeta *(PyArrayDTypeMeta_CommonDType)( static inline PyArray_DTypeMeta * NPY_DT_NewRef(PyArray_DTypeMeta *o) { - Py_INCREF(o); + Py_INCREF((PyObject *)o); return o; } diff --git a/numpy/_core/tests/examples/limited_api/limited_api_latest.c b/numpy/_core/tests/examples/limited_api/limited_api_latest.c new file mode 100644 index 000000000000..13668f2f0ebf --- /dev/null +++ b/numpy/_core/tests/examples/limited_api/limited_api_latest.c @@ -0,0 +1,19 @@ +#if Py_LIMITED_API != PY_VERSION_HEX & 0xffff0000 + # error "Py_LIMITED_API not defined to Python major+minor version" +#endif + +#include +#include +#include + +static PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "limited_api_latest" +}; + +PyMODINIT_FUNC PyInit_limited_api_latest(void) +{ + import_array(); + import_umath(); + return PyModule_Create(&moduledef); +} diff --git a/numpy/_core/tests/examples/limited_api/meson.build b/numpy/_core/tests/examples/limited_api/meson.build index a6d290304036..65287d8654f5 100644 --- a/numpy/_core/tests/examples/limited_api/meson.build +++ b/numpy/_core/tests/examples/limited_api/meson.build @@ -34,6 +34,16 @@ py.extension_module( limited_api: '3.6', ) +py.extension_module( + 'limited_api_latest', + 'limited_api_latest.c', + c_args: [ + '-DNPY_NO_DEPRECATED_API=NPY_1_21_API_VERSION', + ], + include_directories: [npy_include_path], + limited_api: py.language_version(), +) + py.extension_module( 'limited_api2', 'limited_api2.pyx', diff --git a/numpy/_core/tests/test_limited_api.py b/numpy/_core/tests/test_limited_api.py index 9b13208d81af..5a23b49171a0 100644 --- a/numpy/_core/tests/test_limited_api.py +++ b/numpy/_core/tests/test_limited_api.py @@ -47,16 +47,18 @@ def install_temp(tmpdir_factory): pytest.skip("No usable 'meson' found") if sys.platform == "win32": subprocess.check_call(["meson", "setup", + "--werror", "--buildtype=release", "--vsenv", str(srcdir)], cwd=build_dir, ) else: - subprocess.check_call(["meson", "setup", str(srcdir)], + subprocess.check_call(["meson", "setup", "--werror", str(srcdir)], cwd=build_dir ) try: - subprocess.check_call(["meson", "compile", "-vv"], cwd=build_dir) + subprocess.check_call( + ["meson", "compile", "-vv"], cwd=build_dir) except subprocess.CalledProcessError as p: print(f"{p.stdout=}") print(f"{p.stderr=}") @@ -84,5 +86,6 @@ def test_limited_api(install_temp): and building a cython extension with the limited API """ - import limited_api1 - import limited_api2 + import limited_api1 # Earliest (3.6) + import limited_api_latest # Latest version (current Python) + import limited_api2 # cython From a5b65191b41a98c5b58a3043efa34c64036ef2e4 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 19 Jul 2024 12:29:54 -0600 Subject: [PATCH 794/980] MAINT: rename requires-gil to freethreading-compatible --- doc/source/f2py/usage.rst | 10 +++++----- numpy/f2py/f2py2e.py | 19 ++++++++++--------- numpy/f2py/tests/test_f2py2e.py | 14 +++++++------- numpy/f2py/tests/util.py | 4 ++-- 4 files changed, 24 insertions(+), 23 deletions(-) diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index 73aa6ab3fa5d..635455fdb58a 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -86,12 +86,12 @@ Here ```` may also contain signature files. Among other options ``--wrap-functions`` is default because it ensures maximum portability and compiler independence. -``--[no-]requires-gil`` +``--[no-]freethreading-compatible`` Create a module that declares it does or doesn't require the GIL. The default - is ``--requires-gil`` for backwards compatibility. Inspect the fortran - code you are wrapping for thread safety issues before passing - ``--no-requires-gil``, as ``f2py`` does not analyze fortran code for thread - safety issues. + is ``--no-freethreading-compatible`` for backwards compatibility. Inspect the + fortran code you are wrapping for thread safety issues before passing + ``--freethreading-compatible``, as ``f2py`` does not analyze fortran code for + thread safety issues. ``--include-paths ":..."`` Search include files from given directories. diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index 3011c822a85b..ae6017d33ba8 100755 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -106,12 +106,13 @@ functions. --wrap-functions is default because it ensures maximum portability/compiler independence. - --[no-]requires-gil Create a module that declares it does or doesn't - require the GIL. The default is --requires-gil for - backward compatibility. Inspect the Fortran code you are - wrapping for thread safety issues before passing - --no-requires-gil, as ``f2py`` does not analyze fortran - code for thread safety issues. + --[no-]freethreading-compatible Create a module that declares it does or + doesn't require the GIL. The default is + --freethreading-compatible for backward + compatibility. Inspect the Fortran code you are wrapping for + thread safety issues before passing + --no-freethreading-compatible, as f2py does not analyze + fortran code for thread safety issues. --include-paths ::... Search include files from the given directories. @@ -269,9 +270,9 @@ def scaninputline(inputline): cfuncs.userincludes[l[9:-1]] = '#include ' + l[8:] elif l == '--skip-empty-wrappers': emptygen = False - elif l == '--requires-gil': + elif l == '--no-freethreading-compatible': requires_gil = 1 - elif l == '--no-requires-gil': + elif l == '--freethreading-compatible': requires_gil = 0 elif l[0] == '-': errmess('Unknown option %s\n' % repr(l)) @@ -633,7 +634,7 @@ def run_compile(): sysinfo_flags = [f[7:] for f in sysinfo_flags] _reg2 = re.compile( - r'--((no-|)(wrap-functions|lower|requires-gil)|debug-capi|quiet|skip-empty-wrappers)|-include') + r'--((no-|)(wrap-functions|lower|freethreading-compatible)|debug-capi|quiet|skip-empty-wrappers)|-include') f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)] sys.argv = [_m for _m in sys.argv if _m not in f2py_flags] f2py_flags2 = [] diff --git a/numpy/f2py/tests/test_f2py2e.py b/numpy/f2py/tests/test_f2py2e.py index 0fea94cd6ad4..ac6b2acf62e8 100644 --- a/numpy/f2py/tests/test_f2py2e.py +++ b/numpy/f2py/tests/test_f2py2e.py @@ -751,16 +751,16 @@ def test_npdistop(hello_world_f90, monkeypatch): @pytest.mark.skipif(sys.version_info <= (3, 12), reason='Python 3.12 or newer required') -def test_requires_gil(hello_world_f90, monkeypatch): +def test_no_freethreading_compatible(hello_world_f90, monkeypatch): """ - CLI :: --requires-gil + CLI :: --no-freethreading-compatible """ ipath = Path(hello_world_f90) - monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c --requires-gil'.split()) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c --no-freethreading-compatible'.split()) with util.switchdir(ipath.parent): f2pycli() - cmd = f"{sys.executable} -c \"import blah; blah.hi();" + cmd = f"{sys.executable} -c \"import sys; print(sys._is_gil_enabled()); import blah; blah.hi();" if NOGIL_BUILD: cmd += "import sys; assert sys._is_gil_enabled() is True\"" else: @@ -776,12 +776,12 @@ def test_requires_gil(hello_world_f90, monkeypatch): @pytest.mark.skipif(sys.version_info <= (3, 12), reason='Python 3.12 or newer required') -def test_no_requires_gil(hello_world_f90, monkeypatch): +def test_freethreading_compatible(hello_world_f90, monkeypatch): """ - CLI :: --no-requires-gil + CLI :: --freethreading_compatible """ ipath = Path(hello_world_f90) - monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c --no-requires-gil'.split()) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c --freethreading-compatible'.split()) with util.switchdir(ipath.parent): f2pycli() diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index 37d47ae41d4f..0cbfbfe50a8c 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -130,9 +130,9 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None): if module_name is None: module_name = get_temp_module_name() gil_options = [] - if '--requires-gil' not in options and '--no-requires-gil' not in options: + if '--freethreading-compatible' not in options and '--no-freethreading-compatible' not in options: # default to disabling the GIL if unset in options - gil_options = ['--no-requires-gil'] + gil_options = ['--freethreading-compatible'] f2py_opts = ["-c", "-m", module_name] + options + gil_options + f2py_sources f2py_opts += ["--backend", "meson"] if skip: From 26277f5274c52e394ef8fc5a9203a15629e61524 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 19 Jul 2024 12:33:28 -0600 Subject: [PATCH 795/980] MAINT: add release note --- doc/release/upcoming_changes/26981.new_feature.rst | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 doc/release/upcoming_changes/26981.new_feature.rst diff --git a/doc/release/upcoming_changes/26981.new_feature.rst b/doc/release/upcoming_changes/26981.new_feature.rst new file mode 100644 index 000000000000..f466faeb7590 --- /dev/null +++ b/doc/release/upcoming_changes/26981.new_feature.rst @@ -0,0 +1,9 @@ +``f2py`` can generate freethreading-compatible C extensions +----------------------------------------------------------- + +Pass ``--freethreading-compatible`` to the f2py CLI tool to produce a C +extension marked as compatible with the free threading CPython +interpreter. Doing so prevents the interpreter from re-enabling the GIL at +runtime when it imports the C extension. Note that ``f2py`` does not analyze +fortran code for thread safety, so you must verify that the wrapped fortran +code is thread safe before marking the extension as compatible. From ecf44cbd4bd5355a17fd91738dff874fe2704f99 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 19 Jul 2024 12:49:51 -0600 Subject: [PATCH 796/980] TST: fix errant print in test --- numpy/f2py/tests/test_f2py2e.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/f2py/tests/test_f2py2e.py b/numpy/f2py/tests/test_f2py2e.py index ac6b2acf62e8..9944da003a8b 100644 --- a/numpy/f2py/tests/test_f2py2e.py +++ b/numpy/f2py/tests/test_f2py2e.py @@ -760,7 +760,7 @@ def test_no_freethreading_compatible(hello_world_f90, monkeypatch): with util.switchdir(ipath.parent): f2pycli() - cmd = f"{sys.executable} -c \"import sys; print(sys._is_gil_enabled()); import blah; blah.hi();" + cmd = f"{sys.executable} -c \"import blah; blah.hi();" if NOGIL_BUILD: cmd += "import sys; assert sys._is_gil_enabled() is True\"" else: From de8528b281db1e21ee701d5bb796557325fde6a7 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 19 Jul 2024 13:21:09 -0600 Subject: [PATCH 797/980] DOC: fix ctypes example --- numpy/ctypeslib.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py index 5ab89ba7dd15..ea94ad30852e 100644 --- a/numpy/ctypeslib.py +++ b/numpy/ctypeslib.py @@ -503,17 +503,17 @@ def as_ctypes_type(dtype): -------- Converting a simple dtype: - >>> dt = np.dtype('i4') + >>> dt = np.dtype('int8') >>> ctype = np.ctypeslib.as_ctypes_type(dt) >>> ctype - + Converting a structured dtype: >>> dt = np.dtype([('x', 'i4'), ('y', 'f4')]) >>> ctype = np.ctypeslib.as_ctypes_type(dt) >>> ctype - + """ return _ctype_from_dtype(_dtype(dtype)) From 5cf3455f242594be1ad7b1bb5857165601e369c1 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 19 Jul 2024 14:35:43 -0600 Subject: [PATCH 798/980] MAINT: mark scipy-openblas nightly tests as allowed to fail --- .github/workflows/linux_blas.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index b7ac412499b3..2eee0ea72800 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -111,7 +111,7 @@ jobs: shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' env: TERM: xterm-256color - + continue-on-error: true run: | pip install pytest pytest-xdist hypothesis typing_extensions spin test -j auto From 8af10cd7972129af5bc0a42140988a8f878293c9 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 19 Jul 2024 16:04:25 -0600 Subject: [PATCH 799/980] MAINT: add a comment explaining why we are doing this --- .github/workflows/linux_blas.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 2eee0ea72800..e3d032ee25d4 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -111,6 +111,8 @@ jobs: shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' env: TERM: xterm-256color + # TODO: remove when scipy-openblas nightly tests aren't failing anymore. + # xref gh-26824 continue-on-error: true run: | pip install pytest pytest-xdist hypothesis typing_extensions From 240e8286d83dbeb417b211c397d5ad8f179e7f34 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 20 Jul 2024 03:52:48 +0200 Subject: [PATCH 800/980] ENH: Add ``__slots__`` to private (sub)classes of ``numpy.lib._index_tricks_impl`` --- numpy/lib/_index_tricks_impl.py | 12 ++++++++++-- numpy/ma/extras.py | 18 +++++++++++------- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index 62f1d213b29f..348d309a3549 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -140,6 +140,7 @@ class nd_grid: Users should use these pre-defined instances instead of using `nd_grid` directly. """ + __slots__ = ('sparse',) def __init__(self, sparse=False): self.sparse = sparse @@ -261,6 +262,7 @@ class MGridClass(nd_grid): (3, 4, 5, 6) """ + __slots__ = () def __init__(self): super().__init__(sparse=False) @@ -312,6 +314,7 @@ class OGridClass(nd_grid): array([[0, 1, 2, 3, 4]])) """ + __slots__ = () def __init__(self): super().__init__(sparse=True) @@ -326,6 +329,8 @@ class AxisConcatenator: For detailed documentation on usage, see `r_`. """ + __slots__ = ('axis', 'matrix', 'trans1d', 'ndmin') + # allow ma.mr_ to override this concatenate = staticmethod(_nx.concatenate) makemat = staticmethod(matrixlib.matrix) @@ -539,6 +544,7 @@ class RClass(AxisConcatenator): matrix([[1, 2, 3, 4, 5, 6]]) """ + __slots__ = () def __init__(self): AxisConcatenator.__init__(self, 0) @@ -571,6 +577,7 @@ class CClass(AxisConcatenator): array([[1, 2, 3, ..., 4, 5, 6]]) """ + __slots__ = () def __init__(self): AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0) @@ -771,6 +778,7 @@ class IndexExpression: array([2, 4]) """ + __slots__ = ('maketuple',) def __init__(self, maketuple): self.maketuple = maketuple @@ -1023,7 +1031,7 @@ def diag_indices_from(arr): Examples -------- - + Create a 4 by 4 array. >>> a = np.arange(16).reshape(4, 4) @@ -1032,7 +1040,7 @@ def diag_indices_from(arr): [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) - + Get the indices of the diagonal elements. >>> di = np.diag_indices_from(a) diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index dc1f8658f82a..b749ad3dca97 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1014,7 +1014,7 @@ def compress_rows(a): ... [0, 0, 0]]) >>> np.ma.compress_rows(a) array([[6, 7, 8]]) - + """ a = asarray(a) if a.ndim != 2: @@ -1387,7 +1387,7 @@ def setxor1d(ar1, ar2, assume_unique=False): >>> ar2 = np.ma.array([2, 3, 5, 7, 5]) >>> np.ma.setxor1d(ar1, ar2) masked_array(data=[1, 4, 5, 7], - mask=False, + mask=False, fill_value=999999) """ @@ -1569,8 +1569,8 @@ def _covhelper(x, y=None, rowvar=True, allow_masked=True): tup = (None, slice(None)) # if y is None: - # Check if we can guarantee that the integers in the (N - ddof) - # normalisation can be accurately represented with single-precision + # Check if we can guarantee that the integers in the (N - ddof) + # normalisation can be accurately represented with single-precision # before computing the dot product. if x.shape[0] > 2 ** 24 or x.shape[1] > 2 ** 24: xnm_dtype = np.float64 @@ -1591,8 +1591,8 @@ def _covhelper(x, y=None, rowvar=True, allow_masked=True): x._sharedmask = False y._sharedmask = False x = ma.concatenate((x, y), axis) - # Check if we can guarantee that the integers in the (N - ddof) - # normalisation can be accurately represented with single-precision + # Check if we can guarantee that the integers in the (N - ddof) + # normalisation can be accurately represented with single-precision # before computing the dot product. if x.shape[0] > 2 ** 24 or x.shape[1] > 2 ** 24: xnm_dtype = np.float64 @@ -1673,7 +1673,7 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): [ True, True, True, True]], fill_value=1e+20, dtype=float64) - + """ # Check inputs if ddof is not None and ddof != int(ddof): @@ -1791,6 +1791,8 @@ class MAxisConcatenator(AxisConcatenator): mr_class """ + __slots__ = () + concatenate = staticmethod(concatenate) @classmethod @@ -1828,6 +1830,8 @@ class mr_class(MAxisConcatenator): fill_value=999999) """ + __slots__ = () + def __init__(self): MAxisConcatenator.__init__(self, 0) From 623822ff005c0634ce6975295a7aa1b10f58213f Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 21 Jul 2024 08:48:31 -0600 Subject: [PATCH 801/980] MAINT: Update main after 2.0.1 release. --- .mailmap | 1 + doc/changelog/2.0.1-changelog.rst | 52 +++++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/2.0.1-notes.rst | 74 ++++++++++++++++++++++++++++++ 4 files changed, 128 insertions(+) create mode 100644 doc/changelog/2.0.1-changelog.rst create mode 100644 doc/source/release/2.0.1-notes.rst diff --git a/.mailmap b/.mailmap index a3b1a3a5856f..143ad1c4a9b2 100644 --- a/.mailmap +++ b/.mailmap @@ -553,6 +553,7 @@ Rohit Goswami Roland Kaufmann Roman Yurchak Ronan Lamy Ronan Lamy +Rostan Tabet Roy Jacobson Russell Hewett Ryan Blakemore diff --git a/doc/changelog/2.0.1-changelog.rst b/doc/changelog/2.0.1-changelog.rst new file mode 100644 index 000000000000..5a0b9dd207fc --- /dev/null +++ b/doc/changelog/2.0.1-changelog.rst @@ -0,0 +1,52 @@ + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* @vahidmech + +* Alex Herbert + +* Charles Harris +* Giovanni Del Monte + +* Leo Singer +* Lysandros Nikolaou +* Matti Picus +* Nathan Goldbaum +* Patrick J. Roddy + +* Raghuveer Devulapalli +* Ralf Gommers +* Rostan Tabet + +* Sebastian Berg +* Tyler Reddy +* Yannik Wicke + + +Pull requests merged +==================== + +A total of 24 pull requests were merged for this release. + +* `#26711 `__: MAINT: prepare 2.0.x for further development +* `#26792 `__: TYP: fix incorrect import in ``ma/extras.pyi`` stub +* `#26793 `__: DOC: Mention '1.25' legacy printing mode in ``set_printoptions`` +* `#26794 `__: DOC: Remove mention of NaN and NAN aliases from constants +* `#26821 `__: BLD: Fix x86-simd-sort build failure on openBSD +* `#26822 `__: BUG: Ensure output order follows input in numpy.fft +* `#26823 `__: TYP: fix missing sys import in numeric.pyi +* `#26832 `__: DOC: remove hack to override _add_newdocs_scalars (#26826) +* `#26835 `__: BUG: avoid side-effect of 'include complex.h' +* `#26836 `__: BUG: fix max_rows and chunked string/datetime reading in ``loadtxt`` +* `#26837 `__: BUG: fix PyArray_ImportNumPyAPI under -Werror=strict-prototypes +* `#26856 `__: DOC: Update some documentation +* `#26868 `__: BUG: fancy indexing copy +* `#26869 `__: BUG: Mismatched allocation domains in ``PyArray_FillWithScalar`` +* `#26870 `__: BUG: Handle --f77flags and --f90flags for meson [wheel build] +* `#26887 `__: BUG: Fix new DTypes and new string promotion when signature is... +* `#26888 `__: BUG: remove numpy.f2py from excludedimports +* `#26959 `__: BUG: Quantile closest_observation to round to nearest even order +* `#26960 `__: BUG: Fix off-by-one error in amount of characters in strip +* `#26961 `__: API: Partially revert unique with return_inverse +* `#26962 `__: BUG,MAINT: Fix utf-8 character stripping memory access +* `#26963 `__: BUG: Fix out-of-bound minimum offset for in1d table method +* `#26971 `__: BUG: fix f2py tests to work with v2 API +* `#26995 `__: BUG: Add object cast to avoid warning with limited API diff --git a/doc/source/release.rst b/doc/source/release.rst index 5226ef89a764..cad71725fe94 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -6,6 +6,7 @@ Release notes :maxdepth: 2 2.1.0 + 2.0.1 2.0.0 1.26.4 1.26.3 diff --git a/doc/source/release/2.0.1-notes.rst b/doc/source/release/2.0.1-notes.rst new file mode 100644 index 000000000000..a49f2ee36abd --- /dev/null +++ b/doc/source/release/2.0.1-notes.rst @@ -0,0 +1,74 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.0.1 Release Notes +========================== + +NumPy 2.0.1 is a maintenance release that fixes bugs and regressions +discovered after the 2.0.0 release. NumPy 2.0.1 is the last planned +release in the 2.0.x series, 2.1.0rc1 should be out shortly. + +The Python versions supported by this release are 3.9-3.12. + +Improvements +============ + +``np.quantile`` with method ``closest_observation`` chooses nearest even order statistic +---------------------------------------------------------------------------------------- +This changes the definition of nearest for border cases from the nearest odd +order statistic to nearest even order statistic. The numpy implementation now +matches other reference implementations. + +(`gh-26656 `__) + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* @vahidmech + +* Alex Herbert + +* Charles Harris +* Giovanni Del Monte + +* Leo Singer +* Lysandros Nikolaou +* Matti Picus +* Nathan Goldbaum +* Patrick J. Roddy + +* Raghuveer Devulapalli +* Ralf Gommers +* Rostan Tabet + +* Sebastian Berg +* Tyler Reddy +* Yannik Wicke + + +Pull requests merged +==================== + +A total of 24 pull requests were merged for this release. + +* `#26711 `__: MAINT: prepare 2.0.x for further development +* `#26792 `__: TYP: fix incorrect import in ``ma/extras.pyi`` stub +* `#26793 `__: DOC: Mention '1.25' legacy printing mode in ``set_printoptions`` +* `#26794 `__: DOC: Remove mention of NaN and NAN aliases from constants +* `#26821 `__: BLD: Fix x86-simd-sort build failure on openBSD +* `#26822 `__: BUG: Ensure output order follows input in numpy.fft +* `#26823 `__: TYP: fix missing sys import in numeric.pyi +* `#26832 `__: DOC: remove hack to override _add_newdocs_scalars (#26826) +* `#26835 `__: BUG: avoid side-effect of 'include complex.h' +* `#26836 `__: BUG: fix max_rows and chunked string/datetime reading in ``loadtxt`` +* `#26837 `__: BUG: fix PyArray_ImportNumPyAPI under -Werror=strict-prototypes +* `#26856 `__: DOC: Update some documentation +* `#26868 `__: BUG: fancy indexing copy +* `#26869 `__: BUG: Mismatched allocation domains in ``PyArray_FillWithScalar`` +* `#26870 `__: BUG: Handle --f77flags and --f90flags for meson [wheel build] +* `#26887 `__: BUG: Fix new DTypes and new string promotion when signature is... +* `#26888 `__: BUG: remove numpy.f2py from excludedimports +* `#26959 `__: BUG: Quantile closest_observation to round to nearest even order +* `#26960 `__: BUG: Fix off-by-one error in amount of characters in strip +* `#26961 `__: API: Partially revert unique with return_inverse +* `#26962 `__: BUG,MAINT: Fix utf-8 character stripping memory access +* `#26963 `__: BUG: Fix out-of-bound minimum offset for in1d table method +* `#26971 `__: BUG: fix f2py tests to work with v2 API +* `#26995 `__: BUG: Add object cast to avoid warning with limited API From 267e44c80ed8cb54a337668be1da53ac3677b27b Mon Sep 17 00:00:00 2001 From: otieno-juma Date: Mon, 17 Jun 2024 15:20:07 -0500 Subject: [PATCH 802/980] DOC: AI generated examples for ma.reshape I used AI Llama 3 to help create these. @bmwoodruff and I reviewed them. [skip azp] [skip cirrus] --- numpy/ma/core.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 04f6b434b731..4072067642ef 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -7506,6 +7506,37 @@ def reshape(a, new_shape, order='C'): -------- MaskedArray.reshape : equivalent function + Examples + -------- + Reshaping a 1-D array: + + >>> a = np.ma.array([1, 2, 3, 4]) + >>> np.ma.reshape(a, (2, 2)) + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + + Reshaping a 2-D array: + + >>> b = np.ma.array([[1, 2], [3, 4]]) + >>> np.ma.reshape(b, (1, 4)) + masked_array(data=[[1, 2, 3, 4]], + mask=False, + fill_value=999999) + + Reshaping a 1-D array with a mask: + + >>> c = np.ma.array([1, 2, 3, 4], mask=[False, True, False, False]) + >>> np.ma.reshape(c, (2, 2)) + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=999999) + """ # We can't use 'frommethod', it whine about some parameters. Dmmit. try: From e029d351c07611838dc6ef3b64270df39784a5be Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 19 Jul 2024 03:50:24 +0200 Subject: [PATCH 803/980] TYP: Covariant ``numpy.ndenumerate`` type parameter --- numpy/__init__.pyi | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index a3b5726b5a24..d33c2688d2f6 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -18,6 +18,7 @@ from numpy._typing import ( # Arrays ArrayLike, NDArray, + _ArrayLike, _SupportsArray, _NestedSequence, _FiniteNestedSequence, @@ -3422,8 +3423,12 @@ def _no_nep50_warning() -> Generator[None, None, None]: ... def _get_promotion_state() -> str: ... def _set_promotion_state(state: str, /) -> None: ... -class ndenumerate(Generic[_ScalarType]): - iter: flatiter[NDArray[_ScalarType]] +_ScalarType_co = TypeVar("_ScalarType_co", bound=generic, covariant=True) + +class ndenumerate(Generic[_ScalarType_co]): + @property + def iter(self) -> flatiter[NDArray[_ScalarType_co]]: ... + @overload def __new__( cls, arr: _FiniteNestedSequence[_SupportsArray[dtype[_ScalarType]]], @@ -3440,7 +3445,8 @@ class ndenumerate(Generic[_ScalarType]): def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[float64]: ... @overload def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[complex128]: ... - def __next__(self: ndenumerate[_ScalarType]) -> tuple[_Shape, _ScalarType]: ... + + def __next__(self) -> tuple[_Shape, _ScalarType_co]: ... def __iter__(self: _T) -> _T: ... class ndindex: From c7a1419a79dd6e1c874228d77ae990a2cfd1c630 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 18 Jul 2024 23:52:57 +0200 Subject: [PATCH 804/980] TYP,BUG: Fix potentially unresolved typevar in ``numpy.median`` and ``percentile`` --- numpy/lib/_function_base_impl.pyi | 29 ++++++++++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 0678cfaf98f5..b037a782c218 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -460,10 +460,20 @@ def median( keepdims: bool = ..., ) -> Any: ... @overload +def median( + a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + axis: None | _ShapeLike, + out: _ArrayType, + /, + overwrite_input: bool = ..., + keepdims: bool = ..., +) -> _ArrayType: ... +@overload def median( a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, axis: None | _ShapeLike = ..., - out: _ArrayType = ..., + *, + out: _ArrayType, overwrite_input: bool = ..., keepdims: bool = ..., ) -> _ArrayType: ... @@ -620,14 +630,27 @@ def percentile( def percentile( a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, q: _ArrayLikeFloat_co, - axis: None | _ShapeLike = ..., - out: _ArrayType = ..., + axis: None | _ShapeLike, + out: _ArrayType, + /, overwrite_input: bool = ..., method: _MethodKind = ..., keepdims: bool = ..., *, weights: None | _ArrayLikeFloat_co = ..., ) -> _ArrayType: ... +@overload +def percentile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: None | _ShapeLike = ..., + *, + out: _ArrayType, + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: bool = ..., + weights: None | _ArrayLikeFloat_co = ..., +) -> _ArrayType: ... # NOTE: Not an alias, but they do have identical signatures # (that we can reuse) From c333bfbe07d013847dc4b392d4c2b1bc07cfaf46 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 18 Jul 2024 23:30:09 +0200 Subject: [PATCH 805/980] TYP,BUG: Type annotations for ``numpy.trapezoid`` --- numpy/__init__.pyi | 1 + numpy/lib/_function_base_impl.pyi | 64 +++++++++++++++++++ .../tests/data/reveal/lib_function_base.pyi | 18 ++++++ 3 files changed, 83 insertions(+) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index a3b5726b5a24..e13c8be38c82 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -462,6 +462,7 @@ from numpy.lib._function_base_impl import ( append as append, interp as interp, quantile as quantile, + trapezoid as trapezoid, ) from numpy.lib._histograms_impl import ( diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 0678cfaf98f5..6f3b79486b00 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -13,6 +13,7 @@ from typing import ( from numpy import ( vectorize as vectorize, generic, + integer, floating, complexfloating, intp, @@ -21,6 +22,7 @@ from numpy import ( timedelta64, datetime64, object_, + bool as bool_, _OrderKACF, ) @@ -633,6 +635,68 @@ def percentile( # (that we can reuse) quantile = percentile + +_SCT_fm = TypeVar( + "_SCT_fm", + bound=floating[Any] | complexfloating[ANy, Any] | timedelta64, +) + +class _SupportsRMulFloat(Protocol[_T_co]): + def __rmul__(self, other: float, /) -> _T_co: ... + +@overload +def trapezoid( # type: ignore[overload-overlap] + y: Sequence[_FloatLike_co], + x: Sequence[_FloatLike_co] | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> float64: ... +@overload +def trapezoid( + y: Sequence[_ComplexLike_co], + x: Sequence[_ComplexLike_co] | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> complex128: ... +@overload +def trapezoid( + y: _ArrayLike[bool_ | integer[Any]], + x: _ArrayLike[bool_ | integer[Any]] | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> float64 | NDArray[float64]: ... +@overload +def trapezoid( # type: ignore[overload-overlap] + y: _ArrayLikeObject_co, + x: _ArrayLikeFloat_co | _ArrayLikeObject_co | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> float | NDArray[object_]: ... +@overload +def trapezoid( + y: _ArrayLike[_SCT_fm], + x: _ArrayLike[_SCT_fm] | _ArrayLikeInt_co | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> _SCT_fm | NDArray[_SCT_fm]: ... +@overload +def trapezoid( + y: Sequence[_SupportsRMulFloat[_T]], + x: Sequence[_SupportsRMulFloat[_T] | _T] | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> _T: ... +@overload +def trapezoid( + y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + x: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> ( + floating[Any] | complexfloating[Any, Any] | timedelta64 + | NDArray[floating[Any] | complexfloating[Any, Any] | timedelta64 | object_] +): ... + def meshgrid( *xi: ArrayLike, copy: bool = ..., diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi index 72974dce64bf..b630a130633a 100644 --- a/numpy/typing/tests/data/reveal/lib_function_base.pyi +++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -1,4 +1,5 @@ import sys +from fractions import Fraction from typing import Any from collections.abc import Callable @@ -14,6 +15,8 @@ vectorized_func: np.vectorize f8: np.float64 AR_LIKE_f8: list[float] +AR_LIKE_c16: list[complex] +AR_LIKE_O: list[Fraction] AR_i8: npt.NDArray[np.int64] AR_f8: npt.NDArray[np.float64] @@ -159,6 +162,21 @@ assert_type(np.quantile(AR_f8, [0.5], keepdims=True), Any) assert_type(np.quantile(AR_f8, [0.5], axis=[1]), Any) assert_type(np.quantile(AR_f8, [0.5], out=AR_c16), npt.NDArray[np.complex128]) +assert_type(np.trapezoid(AR_LIKE_f8), np.float64) +assert_type(np.trapezoid(AR_LIKE_f8, AR_LIKE_f8), np.float64) +assert_type(np.trapezoid(AR_LIKE_c16), np.complex128) +assert_type(np.trapezoid(AR_LIKE_c16, AR_LIKE_f8), np.complex128) +assert_type(np.trapezoid(AR_LIKE_f8, AR_LIKE_c16), np.complex128) +assert_type(np.trapezoid(AR_LIKE_O), float) +assert_type(np.trapezoid(AR_LIKE_O, AR_LIKE_f8), float) +assert_type(np.trapezoid(AR_f8), np.float64 | npt.NDArray[np.float64]) +assert_type(np.trapezoid(AR_f8, AR_f8), np.float64 | npt.NDArray[np.float64]) +assert_type(np.trapezoid(AR_c16), np.complex128 | npt.NDArray[np.complex128]) +assert_type(np.trapezoid(AR_c16, AR_c16), np.complex128 | npt.NDArray[np.complex128]) +assert_type(np.trapezoid(AR_m), np.timedelta64 | npt.NDArray[np.timedelta64]) +assert_type(np.trapezoid(AR_O), float | npt.NDArray[np.object_]) +assert_type(np.trapezoid(AR_O, AR_LIKE_f8), float | npt.NDArray[np.object_]) + assert_type(np.meshgrid(AR_f8, AR_i8, copy=False), tuple[npt.NDArray[Any], ...]) assert_type(np.meshgrid(AR_f8, AR_i8, AR_c16, indexing="ij"), tuple[npt.NDArray[Any], ...]) From e1e9df21fee7831ddfeefbf62b5170fa619990b9 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 10 Jul 2024 04:19:47 +0200 Subject: [PATCH 806/980] TYP: type hint `numpy.polynomial` --- numpy/polynomial/__init__.pyi | 38 +- numpy/polynomial/_polybase.pyi | 341 ++++-- numpy/polynomial/_polytypes.pyi | 1003 ++++++++++++++++++ numpy/polynomial/chebyshev.pyi | 240 ++++- numpy/polynomial/hermite.pyi | 153 ++- numpy/polynomial/hermite_e.pyi | 153 ++- numpy/polynomial/laguerre.pyi | 147 ++- numpy/polynomial/legendre.pyi | 132 ++- numpy/polynomial/polynomial.pyi | 119 ++- numpy/polynomial/polyutils.pyi | 439 +++++++- numpy/typing/tests/data/reveal/polyutils.pyi | 235 ++++ 11 files changed, 2645 insertions(+), 355 deletions(-) create mode 100644 numpy/polynomial/_polytypes.pyi create mode 100644 numpy/typing/tests/data/reveal/polyutils.pyi diff --git a/numpy/polynomial/__init__.pyi b/numpy/polynomial/__init__.pyi index 0fc5ef0f53e4..d36605b89250 100644 --- a/numpy/polynomial/__init__.pyi +++ b/numpy/polynomial/__init__.pyi @@ -1,21 +1,23 @@ -from numpy._pytesttester import PytestTester +from typing import Final, Literal -from numpy.polynomial import ( - chebyshev as chebyshev, - hermite as hermite, - hermite_e as hermite_e, - laguerre as laguerre, - legendre as legendre, - polynomial as polynomial, -) -from numpy.polynomial.chebyshev import Chebyshev as Chebyshev -from numpy.polynomial.hermite import Hermite as Hermite -from numpy.polynomial.hermite_e import HermiteE as HermiteE -from numpy.polynomial.laguerre import Laguerre as Laguerre -from numpy.polynomial.legendre import Legendre as Legendre -from numpy.polynomial.polynomial import Polynomial as Polynomial +from .polynomial import Polynomial +from .chebyshev import Chebyshev +from .legendre import Legendre +from .hermite import Hermite +from .hermite_e import HermiteE +from .laguerre import Laguerre -__all__: list[str] -test: PytestTester +__all__ = [ + "set_default_printstyle", + "polynomial", "Polynomial", + "chebyshev", "Chebyshev", + "legendre", "Legendre", + "hermite", "Hermite", + "hermite_e", "HermiteE", + "laguerre", "Laguerre", +] -def set_default_printstyle(style): ... +def set_default_printstyle(style: Literal["ascii", "unicode"]) -> None: ... + +from numpy._pytesttester import PytestTester as _PytestTester +test: Final[_PytestTester] diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index 25c740dbedd0..9baed2ea91f9 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -1,71 +1,290 @@ -import abc -from typing import Any, ClassVar +import sys +from collections.abc import Iterator, Mapping, Sequence +from typing import ( + TYPE_CHECKING, + Any, + ClassVar, + Literal, + SupportsComplex, + SupportsIndex, + SupportsInt, + TypeAlias, + TypeGuard, + TypeVar, + overload, +) -__all__: list[str] +import numpy as np +import numpy.typing as npt +from numpy._typing import _ArrayLikeInt_co -class ABCPolyBase(abc.ABC): +from ._polytypes import ( + _AnySeriesND, + _Array1D, + _AnyComplexSeriesND, + _AnyObjectSeriesND, + _AnyScalar, + _AnySeries1D, + _Interval, + _CoefArray1D, + _AnyNumberScalar, + _SupportsLenAndGetItem, + _Tuple2, +) + +if sys.version_info >= (3, 11): + from typing import LiteralString +elif TYPE_CHECKING: + from typing_extensions import LiteralString +else: + LiteralString: TypeAlias = str + +__all__ = ["ABCPolyBase"] + + +_Self = TypeVar("_Self", bound="ABCPolyBase") +_Size = TypeVar("_Size", bound=int) + +_AnyOther: TypeAlias = ABCPolyBase | _AnyScalar | _AnySeries1D +_Hundred: TypeAlias = Literal[100] + +class ABCPolyBase: __hash__: ClassVar[None] # type: ignore[assignment] __array_ufunc__: ClassVar[None] - maxpower: ClassVar[int] - coef: Any - @property - def symbol(self) -> str: ... - @property - @abc.abstractmethod - def domain(self): ... - @property - @abc.abstractmethod - def window(self): ... + + basis_name: ClassVar[None | LiteralString] + maxpower: ClassVar[_Hundred] + _superscript_mapping: ClassVar[Mapping[int, str]] + _subscript_mapping: ClassVar[Mapping[int, str]] + _use_unicode: ClassVar[bool] + + coef: _Array1D[np.number[Any]] + domain: _Interval[Any] + window: _Interval[Any] + + _symbol: LiteralString @property - @abc.abstractmethod - def basis_name(self): ... - def has_samecoef(self, other): ... - def has_samedomain(self, other): ... - def has_samewindow(self, other): ... - def has_sametype(self, other): ... - def __init__(self, coef, domain=..., window=..., symbol: str = ...) -> None: ... - def __format__(self, fmt_str): ... - def __call__(self, arg): ... - def __iter__(self): ... - def __len__(self): ... - def __neg__(self): ... - def __pos__(self): ... - def __add__(self, other): ... - def __sub__(self, other): ... - def __mul__(self, other): ... - def __truediv__(self, other): ... - def __floordiv__(self, other): ... - def __mod__(self, other): ... - def __divmod__(self, other): ... - def __pow__(self, other): ... - def __radd__(self, other): ... - def __rsub__(self, other): ... - def __rmul__(self, other): ... - def __rdiv__(self, other): ... - def __rtruediv__(self, other): ... - def __rfloordiv__(self, other): ... - def __rmod__(self, other): ... - def __rdivmod__(self, other): ... - def __eq__(self, other): ... - def __ne__(self, other): ... - def copy(self): ... - def degree(self): ... - def cutdeg(self, deg): ... - def trim(self, tol=...): ... - def truncate(self, size): ... - def convert(self, domain=..., kind=..., window=...): ... - def mapparms(self): ... - def integ(self, m=..., k = ..., lbnd=...): ... - def deriv(self, m=...): ... - def roots(self): ... - def linspace(self, n=..., domain=...): ... + def symbol(self, /) -> LiteralString: ... + + def __init__( + self, + /, + coef: _AnySeries1D, + domain: None | _AnySeries1D = ..., + window: None | _AnySeries1D = ..., + symbol: str = ..., + ) -> None: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + arg: complex | np.complexfloating[Any, Any] + ) -> np.complex128: ... + @overload + def __call__( + self, /, + arg: _AnyScalar, + ) -> np.float64 | np.complex128: ... + @overload + def __call__( + self, /, + arg: _AnyObjectSeriesND, + ) -> npt.NDArray[np.object_]: ... + @overload + def __call__( + self, /, + arg: _AnyComplexSeriesND, + ) -> npt.NDArray[np.complex128 | np.object_]: ... + @overload + def __call__( + self, /, + arg: _AnySeries1D, + ) -> npt.NDArray[np.float64 | np.complex128 | np.object_]: ... + + def __str__(self, /) -> str: ... + def __repr__(self, /) -> str: ... + def __format__(self, fmt_str: str, /) -> str: ... + def __eq__(self, x: object, /) -> bool: ... + def __ne__(self, x: object, /) -> bool: ... + def __neg__(self: _Self, /) -> _Self: ... + def __pos__(self: _Self, /) -> _Self: ... + def __add__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __sub__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __mul__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __truediv__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __floordiv__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __mod__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __divmod__(self: _Self, x: _AnyOther, /) -> _Tuple2[_Self]: ... + def __pow__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __radd__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __rsub__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __rmul__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __rdiv__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __rtruediv__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __rfloordiv__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __rmod__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __rdivmod__(self: _Self, x: _AnyOther, /) -> _Tuple2[_Self]: ... + def __len__(self, /) -> int: ... + def __iter__(self, /) -> Iterator[np.number[Any] | SupportsComplex]: ... + def __getstate__(self, /) -> dict[str, Any]: ... + def __setstate__(self, dict: dict[str, Any], /) -> None: ... + + def has_samecoef(self, /, other: ABCPolyBase) -> bool: ... + def has_samedomain(self, /, other: ABCPolyBase) -> bool: ... + def has_samewindow(self, /, other: ABCPolyBase) -> bool: ... + def has_sametype(self: _Self, /, other: object) -> TypeGuard[_Self]: ... + + def copy(self: _Self, /) -> _Self: ... + def degree(self, /) -> int: ... + def cutdeg(self: _Self, /) -> _Self: ... + def trim(self: _Self, /, tol: float = ...) -> _Self: ... + def truncate(self: _Self, /, size: SupportsInt) -> _Self: ... + + @overload + def convert( + self, + domain: None | _AnySeries1D, + kind: type[_Self], + /, + window: None | _AnySeries1D = ..., + ) -> _Self: ... + @overload + def convert( + self, + /, + domain: None | _AnySeries1D = ..., + *, + kind: type[_Self], + window: None | _AnySeries1D = ..., + ) -> _Self: ... + @overload + def convert( + self: _Self, + /, + domain: None | _AnySeries1D = ..., + kind: type[_Self] = ..., + window: None | _AnySeries1D = ..., + ) -> _Self: ... + + def mapparms(self, /) -> _Tuple2[Any]: ... + + def integ( + self: _Self, + /, + m: SupportsIndex = ..., + k: _AnyNumberScalar | _SupportsLenAndGetItem[_AnyNumberScalar] = ..., + lbnd: None | _AnyNumberScalar = ..., + ) -> _Self: ... + + def deriv(self: _Self, /, m: SupportsIndex = ...) -> _Self: ... + + def roots(self, /) -> _CoefArray1D: ... + + @overload + def linspace( + self, + /, + n: _Size, + domain: None | _AnySeries1D = ..., + ) -> tuple[ + np.ndarray[tuple[_Size], np.dtype[np.float64]], + np.ndarray[tuple[_Size], np.dtype[np.float64 | np.complex128]], + ]: ... + @overload + def linspace( + self, + /, + n: _Hundred = ..., + domain: None | _AnySeries1D = ..., + ) -> tuple[ + np.ndarray[tuple[_Hundred], np.dtype[np.float64]], + np.ndarray[tuple[_Hundred], np.dtype[np.float64 | np.complex128]], + ]: ... + + @overload @classmethod - def fit(cls, x, y, deg, domain=..., rcond=..., full=..., w=..., window=...): ... + def fit( + cls: type[_Self], + /, + x: _AnySeries1D, + y: _AnySeries1D, + deg: _ArrayLikeInt_co, + domain: None | _AnySeries1D = ..., + rcond: float = ..., + full: Literal[False] = ..., + w: None | _AnySeries1D = ..., + window: None | _AnySeries1D = ..., + symbol: str = ..., + ) -> _Self: ... + @overload @classmethod - def fromroots(cls, roots, domain = ..., window=...): ... + def fit( + cls: type[_Self], /, + x: _AnySeries1D, + y: _AnySeries1D, + deg: _ArrayLikeInt_co, + domain: None | _AnySeries1D = ..., + rcond: float = ..., + *, + full: Literal[True], + w: None | _AnySeries1D = ..., + window: None | _AnySeries1D = ..., + symbol: str = ..., + ) -> tuple[_Self, Sequence[np.inexact[Any] | np.int32]]: ... + @overload @classmethod - def identity(cls, domain=..., window=...): ... + def fit( + cls: type[_Self], + x: _AnySeries1D, + y: _AnySeries1D, + deg: _ArrayLikeInt_co, + domain: None | _AnySeries1D, + rcond: float, + full: Literal[True], + /, + w: None | _AnySeries1D = ..., + window: None | _AnySeries1D = ..., + symbol: str = ..., + ) -> tuple[_Self, Sequence[np.inexact[Any] | np.int32]]: ... + + @classmethod + def fromroots( + cls: type[_Self], + /, + roots: _AnySeriesND, + domain: None | _AnySeries1D = ..., + window: None | _AnySeries1D = ..., + ) -> _Self: ... + + @classmethod + def identity( + cls: type[_Self], + /, + domain: None | _AnySeries1D = ..., + window: None | _AnySeries1D = ..., + ) -> _Self: ... + + @classmethod + def basis( + cls: type[_Self], + /, + deg: int, + domain: None | _AnySeries1D = ..., + window: None | _AnySeries1D = ..., + ) -> _Self: ... + @classmethod - def basis(cls, deg, domain=..., window=...): ... + def cast( + cls: type[_Self], + /, + series: ABCPolyBase, + domain: None | _AnySeries1D = ..., + window: None | _AnySeries1D = ..., + ) -> _Self: ... + @classmethod - def cast(cls, series, domain=..., window=...): ... + def _str_term_unicode(cls, i: str, arg_str: str) -> str: ... + @staticmethod + def _str_term_ascii(i: str, arg_str: str) -> str: ... + @staticmethod + def _repr_latex_term(i: str, arg_str: str, needs_parens: bool) -> str: ... diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi new file mode 100644 index 000000000000..23c2fffe45f3 --- /dev/null +++ b/numpy/polynomial/_polytypes.pyi @@ -0,0 +1,1003 @@ +import decimal +import fractions +import numbers +from collections.abc import Callable, Iterable, Sequence +from typing import ( + Any, + Literal, + Protocol, + SupportsComplex, + SupportsFloat, + SupportsIndex, + SupportsInt, + TypeAlias, + TypeVar, + final, + overload, +) + +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _NestedSequence, + _SupportsArray, +) + +_V = TypeVar("_V") +_V_co = TypeVar("_V_co", covariant=True) +_Self = TypeVar("_Self", bound=object) + +class _SupportsLenAndGetItem(Protocol[_V_co]): + def __len__(self, /) -> int: ... + def __getitem__(self, i: int, /) -> _V_co: ... + +class _SimpleSequence(Protocol[_V_co]): + def __len__(self, /) -> int: ... + @overload + def __getitem__(self, i: int, /) -> _V_co: ... + @overload + def __getitem__(self: _Self, ii: slice, /) -> _Self: ... + +_SCT = TypeVar("_SCT", bound=np.generic) +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_SCT]] +_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_SCT]] + +_CoefScalarType: TypeAlias = np.number[Any] | np.object_ +_CoefArray1D: TypeAlias = _Array1D[_CoefScalarType] +_CoefArrayND: TypeAlias = npt.NDArray[_CoefScalarType] + +class _SupportsBool(Protocol): + def __bool__(self, /) -> bool: ... + +_AnyFloatScalar: TypeAlias = float | np.floating[Any] | np.integer[Any] +_AnyComplexScalar: TypeAlias = complex | np.complexfloating[Any, Any] +_AnyNumberScalar: TypeAlias = complex | np.number[Any] +_AnyObjectScalar: TypeAlias = ( + fractions.Fraction + | decimal.Decimal + | numbers.Complex + | np.object_ +) +_AnyScalar: TypeAlias = _AnyNumberScalar | _AnyObjectScalar +_AnyInt: TypeAlias = SupportsInt | SupportsIndex + +_AnyFloatSeries1D: TypeAlias = ( + _SupportsArray[np.dtype[np.floating[Any] | np.integer[Any]]] + | _SupportsLenAndGetItem[float | np.floating[Any] | np.integer[Any]] +) +_AnyComplexSeries1D: TypeAlias = ( + npt.NDArray[np.complexfloating[Any, Any]] + | _SupportsArray[np.dtype[np.complexfloating[Any, Any]]] + | _SupportsLenAndGetItem[_AnyComplexScalar] +) +_AnyNumberSeries1D: TypeAlias = ( + npt.NDArray[np.number[Any]] + | _SupportsArray[np.dtype[np.number[Any]]] + | _SupportsLenAndGetItem[_AnyNumberScalar] +) +_AnyObjectSeries1D: TypeAlias = ( + npt.NDArray[np.object_] + | _SupportsLenAndGetItem[_AnyObjectScalar] +) +_AnySeries1D: TypeAlias = ( + npt.NDArray[_CoefScalarType] + | _SupportsLenAndGetItem[_AnyScalar | object] +) + +_AnyFloatSeriesND: TypeAlias = ( + _AnyFloatScalar + | _SupportsArray[np.dtype[np.floating[Any] | np.integer[Any]]] + | _NestedSequence[float | np.floating[Any] | np.integer[Any]] +) +_AnyComplexSeriesND: TypeAlias = ( + _AnyComplexScalar + | _SupportsArray[np.dtype[np.number[Any]]] + | _NestedSequence[complex | np.number[Any]] +) +_AnyObjectSeriesND: TypeAlias = ( + _AnyObjectScalar + | _SupportsArray[np.dtype[np.object_]] + | _NestedSequence[_AnyObjectScalar] +) +_AnySeriesND: TypeAlias = ( + _AnyScalar + | _SupportsArray[np.dtype[_CoefScalarType]] + | _NestedSequence[SupportsComplex | SupportsFloat] +) + +_SCT_domain = TypeVar("_SCT_domain", np.float64, np.complex128, np.object_) +_Interval: TypeAlias = np.ndarray[tuple[Literal[2]], np.dtype[_SCT_domain]] + +_T = TypeVar("_T", bound=object) +_Tuple2: TypeAlias = tuple[_T, _T] + +_SCT_number = TypeVar("_SCT_number", bound=_CoefScalarType) +_Array1: TypeAlias = np.ndarray[tuple[Literal[1]], np.dtype[_SCT]] +_Array2: TypeAlias = np.ndarray[tuple[Literal[2]], np.dtype[_SCT]] +_Line: TypeAlias = _Array1[_SCT_number] | _Array2[_SCT_number] + +_Name_co = TypeVar("_Name_co", bound=str, covariant=True) + +@final +class _FuncLine(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + off: _SCT_number, + scl: _SCT_number, + ) -> _Line[_SCT_number]: ... + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + off: int, + scl: int, + ) -> _Line[np.int_] : ... + @overload + def __call__( + self, /, + off: float, + scl: float, + ) -> _Line[np.float64]: ... + @overload + def __call__( + self, /, + off: complex, + scl: complex, + ) -> _Line[np.complex128]: ... + @overload + def __call__( + self, /, + off: _AnyObjectScalar, + scl: _AnyObjectScalar, + ) -> _Line[np.object_]: ... + +@final +class _FuncFromRoots(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + roots: _AnyFloatSeries1D, + ) -> _Array1D[np.floating[Any]]: ... + @overload + def __call__( + self, /, + roots: _AnyComplexSeries1D, + ) -> _Array1D[np.complexfloating[Any, Any]]: ... + @overload + def __call__( + self, /, + roots: _AnyObjectSeries1D, + ) -> _Array1D[np.object_]: ... + @overload + def __call__(self, /, roots: _AnySeries1D) -> _CoefArray1D: ... + +@final +class _FuncBinOp(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + c1: _AnyFloatSeries1D, + c2: _AnyFloatSeries1D, + ) -> _Array1D[np.floating[Any]]: ... + @overload + def __call__( + self, /, + c1: _AnyComplexSeries1D, + c2: _AnyComplexSeries1D, + ) -> _Array1D[np.complexfloating[Any, Any]]: ... + @overload + def __call__( + self, /, + c1: _AnyObjectSeries1D, + c2: _AnyObjectSeries1D, + ) -> _Array1D[np.object_]: ... + @overload + def __call__( + self, /, + c1: _AnySeries1D, + c2: _AnySeries1D, + ) -> _CoefArray1D: ... + +@final +class _FuncUnOp(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + c: _AnyFloatSeries1D, + ) -> _Array1D[np.floating[Any]]: ... + @overload + def __call__( + self, /, + c: _AnyComplexSeries1D, + ) -> _Array1D[np.complexfloating[Any, Any]]: ... + @overload + def __call__(self, /, c: _AnyObjectSeries1D) -> _Array1D[np.object_]: ... + @overload + def __call__(self, /, c: _AnySeries1D) -> _CoefArray1D: ... + +@final +class _FuncPoly2Ortho(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + pol: _AnyFloatSeries1D, + ) -> _Array1D[np.floating[Any]]: ... + @overload + def __call__( + self, /, + pol: _AnyComplexSeries1D, + ) -> _Array1D[np.complexfloating[Any, Any]]: ... + @overload + def __call__(self, /, pol: _AnyObjectSeries1D) -> _Array1D[np.object_]: ... + @overload + def __call__(self, /, pol: _AnySeries1D) -> _CoefArray1D: ... + +@final +class _FuncPow(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + c: _AnyFloatSeries1D, + pow: _AnyInt, + maxpower: None | _AnyInt = ..., + ) -> _Array1D[np.floating[Any]]: ... + @overload + def __call__( + self, /, + c: _AnyComplexSeries1D, + pow: _AnyInt, + maxpower: None | _AnyInt = ..., + ) -> _Array1D[np.complexfloating[Any, Any]]: ... + @overload + def __call__( + self, /, + c: _AnyObjectSeries1D, + pow: _AnyInt, + maxpower: None | _AnyInt = ..., + ) -> _Array1D[np.object_]: ... + @overload + def __call__( + self, /, + c: _AnySeries1D, + pow: _AnyInt, + maxpower: None | _AnyInt = ..., + ) -> _CoefArray1D: ... + + +@final +class _FuncDer(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + c: _AnyFloatSeriesND, + m: SupportsIndex = ..., + scl: _AnyNumberScalar = ..., + axis: SupportsIndex = ..., + ) -> npt.NDArray[np.floating[Any]]: ... + @overload + def __call__( + self, /, + c: _AnyComplexSeriesND, + m: SupportsIndex = ..., + scl: _AnyNumberScalar = ..., + axis: SupportsIndex = ..., + ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + @overload + def __call__( + self, /, + c: _AnyObjectSeriesND, + m: SupportsIndex = ..., + scl: _AnyNumberScalar = ..., + axis: SupportsIndex = ..., + ) -> npt.NDArray[np.object_]: ... + @overload + def __call__( + self, /, + c: _AnySeriesND, + m: SupportsIndex = ..., + scl: _AnyNumberScalar = ..., + axis: SupportsIndex = ..., + ) -> _CoefArrayND: ... + + +@final +class _FuncInteg(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + c: _AnyFloatSeriesND, + m: SupportsIndex = ..., + k: _AnyNumberScalar | _SupportsLenAndGetItem[_AnyNumberScalar] = ..., + lbnd: _AnyNumberScalar = ..., + scl: _AnyNumberScalar = ..., + axis: SupportsIndex = ..., + ) -> npt.NDArray[np.floating[Any]]: ... + @overload + def __call__( + self, /, + c: _AnyComplexSeriesND, + m: SupportsIndex = ..., + k: _AnyNumberScalar | _SupportsLenAndGetItem[_AnyNumberScalar] = ..., + lbnd: _AnyNumberScalar = ..., + scl: _AnyNumberScalar = ..., + axis: SupportsIndex = ..., + ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + @overload + def __call__( + self, /, + c: _AnyObjectSeriesND, + m: SupportsIndex = ..., + k: _AnyNumberScalar | _SupportsLenAndGetItem[_AnyNumberScalar] = ..., + lbnd: _AnyNumberScalar = ..., + scl: _AnyNumberScalar = ..., + axis: SupportsIndex = ..., + ) -> npt.NDArray[np.object_]: ... + @overload + def __call__( + self, /, + c: _AnySeriesND, + m: SupportsIndex = ..., + k: _AnyNumberScalar | _SupportsLenAndGetItem[_AnyNumberScalar] = ..., + lbnd: _AnyNumberScalar = ..., + scl: _AnyNumberScalar = ..., + axis: SupportsIndex = ..., + ) -> _CoefArrayND: ... + + +_AnyFloatRoots: TypeAlias = ( + _Array1D[np.floating[Any] | np.integer[Any]] + | Sequence[_AnyFloatScalar] +) +_AnyComplexRoots: TypeAlias = ( + _Array1D[np.number[Any]] + | Sequence[_AnyComplexScalar] +) +_AnyObjectRoots: TypeAlias = ( + _Array1D[np.object_] + | Sequence[_AnyObjectScalar] +) + +_AnyFloatPoints: TypeAlias = ( + npt.NDArray[np.floating[Any] | np.integer[Any]] + | tuple[_AnyFloatSeriesND, ...] + | list[_AnyFloatSeriesND] +) +_AnyComplexPoints: TypeAlias = ( + npt.NDArray[np.complexfloating[Any, Any]] + | tuple[_AnyComplexSeriesND, ...] + | list[_AnyComplexSeriesND] +) +_AnyObjectPoints: TypeAlias = ( + npt.NDArray[np.object_] + | tuple[_AnyObjectSeriesND, ...] + | list[_AnyObjectSeriesND] +) +_AnyPoints: TypeAlias = ( + _CoefArrayND + | tuple[_AnySeriesND, ...] + | list[_AnySeriesND] +) + +@final +class _FuncValFromRoots(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + x: _AnyFloatScalar, + r: _AnyFloatScalar, + tensor: bool = ..., + ) -> np.floating[Any]: ... + @overload + def __call__( + self, /, + x: _AnyComplexScalar, + r: _AnyComplexScalar, + tensor: bool = ..., + ) -> np.complexfloating[Any, Any]: ... + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + x: _AnyObjectScalar, + r: _AnyObjectScalar, + tensor: bool = ..., + ) -> object: ... + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + x: _AnyFloatScalar | _AnyFloatPoints, + r: _AnyFloatSeriesND, + tensor: bool = ..., + ) -> npt.NDArray[np.floating[Any]]: ... + @overload + def __call__( + self, /, + x: _AnyComplexScalar | _AnyComplexPoints, + r: _AnyComplexSeriesND, + tensor: bool = ..., + ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + @overload + def __call__( + self, /, + x: _AnyScalar | _AnyObjectPoints | _AnyComplexPoints, + r: _AnyObjectSeriesND, + tensor: bool = ..., + ) -> npt.NDArray[np.object_]: ... + @overload + def __call__( + self, /, + x: _AnyScalar | _AnyPoints, + r: _AnySeriesND, + tensor: bool = ..., + ) -> _CoefArrayND: ... + +@final +class _FuncVal(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + x: _AnyFloatScalar, + c: _AnyFloatRoots, + tensor: bool = ..., + ) -> np.floating[Any]: ... + @overload + def __call__( + self, /, + x: _AnyComplexScalar, + c: _AnyComplexRoots, + tensor: bool = ..., + ) -> np.complexfloating[Any, Any]: ... + @overload + def __call__( + self, /, + x: _AnyObjectScalar, + c: _AnyObjectRoots, + tensor: bool = ..., + ) -> object: ... + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + x: _AnyFloatPoints, + c: _AnyFloatSeriesND, + tensor: bool = ..., + ) -> npt.NDArray[np.floating[Any]]: ... + @overload + def __call__( + self, /, + x: _AnyComplexPoints, + c: _AnyComplexSeriesND, + tensor: bool = ..., + ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + @overload + def __call__( + self, /, + x: _AnyObjectPoints, + c: _AnyObjectSeries1D | _AnyComplexSeriesND, + tensor: bool = ..., + ) -> npt.NDArray[np.object_]: ... + @overload + def __call__( + self, /, + x: _AnyPoints, + c: _AnySeriesND, + tensor: bool = ..., + ) -> _CoefArrayND: ... + +@final +class _FuncVal2D(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + x: _AnyFloatScalar, + y: _AnyFloatScalar, + c: _AnyFloatRoots, + ) -> np.floating[Any]: ... + @overload + def __call__( + self, /, + x: _AnyComplexScalar, + y: _AnyComplexScalar, + c: _AnyComplexRoots, + ) -> np.complexfloating[Any, Any]: ... + @overload + def __call__( + self, /, + x: _AnyObjectScalar, + y: _AnyObjectScalar, + c: _AnyObjectRoots, + ) -> object: ... + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + x: _AnyFloatPoints, + y: _AnyFloatPoints, + c: _AnyFloatSeriesND, + ) -> npt.NDArray[np.floating[Any]]: ... + @overload + def __call__( + self, /, + x: _AnyComplexPoints, + y: _AnyComplexPoints, + c: _AnyComplexSeriesND, + ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + @overload + def __call__( + self, /, + x: _AnyObjectPoints, + y: _AnyObjectPoints, + c: _AnyObjectSeries1D | _AnyComplexSeriesND, + ) -> npt.NDArray[np.object_]: ... + @overload + def __call__( + self, /, + x: _AnyPoints, + y: _AnyPoints, + c: _AnySeriesND, + ) -> _CoefArrayND: ... + +@final +class _FuncVal3D(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + x: _AnyFloatScalar, + y: _AnyFloatScalar, + z: _AnyFloatScalar, + c: _AnyFloatRoots + ) -> np.floating[Any]: ... + @overload + def __call__( + self, /, + x: _AnyComplexScalar, + y: _AnyComplexScalar, + z: _AnyComplexScalar, + c: _AnyComplexRoots, + ) -> np.complexfloating[Any, Any]: ... + @overload + def __call__( + self, /, + x: _AnyObjectScalar, + y: _AnyObjectScalar, + z: _AnyObjectScalar, + c: _AnyObjectRoots, + ) -> object: ... + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + x: _AnyFloatPoints, + y: _AnyFloatPoints, + z: _AnyFloatPoints, + c: _AnyFloatSeriesND, + ) -> npt.NDArray[np.floating[Any]]: ... + @overload + def __call__( + self, /, + x: _AnyComplexPoints, + y: _AnyComplexPoints, + z: _AnyComplexPoints, + c: _AnyComplexSeriesND, + ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + @overload + def __call__( + self, /, + x: _AnyObjectPoints, + y: _AnyObjectPoints, + z: _AnyObjectPoints, + c: _AnyObjectSeries1D | _AnyComplexSeriesND, + ) -> npt.NDArray[np.object_]: ... + @overload + def __call__( + self, /, + x: _AnyPoints, + y: _AnyPoints, + z: _AnyPoints, + c: _AnySeriesND, + ) -> _CoefArrayND: ... + +_AnyValF: TypeAlias = Callable[ + [npt.ArrayLike, npt.ArrayLike, bool], + _CoefArrayND, +] + +@final +class _FuncValND(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, + val_f: _AnyValF, + c: _AnyFloatRoots, + /, + *args: _AnyFloatScalar, + ) -> np.floating[Any]: ... + @overload + def __call__( + self, + val_f: _AnyValF, + c: _AnyComplexRoots, + /, + *args: _AnyComplexScalar, + ) -> np.complexfloating[Any, Any]: ... + @overload + def __call__( + self, + val_f: _AnyValF, + c: _AnyObjectRoots, + /, + *args: _AnyObjectScalar, + ) -> object: ... + @overload + def __call__( # type: ignore[overload-overlap] + self, + val_f: _AnyValF, + c: _AnyFloatSeriesND, + /, + *args: _AnyFloatPoints, + ) -> npt.NDArray[np.floating[Any]]: ... + @overload + def __call__( + self, + val_f: _AnyValF, + c: _AnyComplexSeriesND, + /, + *args: _AnyComplexPoints, + ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + @overload + def __call__( + self, + val_f: _AnyValF, + c: _AnyObjectSeries1D | _AnyComplexSeriesND, + /, + *args: _AnyObjectPoints, + ) -> npt.NDArray[np.object_]: ... + @overload + def __call__( + self, + val_f: _AnyValF, + c: _AnySeriesND, + /, + *args: _AnyPoints, + ) -> _CoefArrayND: ... + +@final +class _FuncVander(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + x: _ArrayLikeFloat_co, + deg: SupportsIndex, + ) -> npt.NDArray[np.floating[Any]]: ... + @overload + def __call__( + self, /, + x: _ArrayLikeComplex_co, + deg: SupportsIndex, + ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + @overload + def __call__( + self, /, + x: _ArrayLikeObject_co, + deg: SupportsIndex, + ) -> npt.NDArray[np.object_]: ... + @overload + def __call__( + self, /, + x: npt.ArrayLike, + deg: SupportsIndex, + ) -> _CoefArrayND: ... + +_AnyDegrees: TypeAlias = _SupportsLenAndGetItem[SupportsIndex] + +@final +class _FuncVander2D(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: _AnyDegrees, + ) -> npt.NDArray[np.floating[Any]]: ... + @overload + def __call__( + self, /, + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: _AnyDegrees, + ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + @overload + def __call__( + self, /, + x: _ArrayLikeObject_co, + y: _ArrayLikeObject_co, + deg: _AnyDegrees, + ) -> npt.NDArray[np.object_]: ... + @overload + def __call__( + self, /, + x: npt.ArrayLike, + y: npt.ArrayLike, + deg: _AnyDegrees, + ) -> _CoefArrayND: ... + +@final +class _FuncVander3D(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + z: _ArrayLikeFloat_co, + deg: _AnyDegrees, + ) -> npt.NDArray[np.floating[Any]]: ... + @overload + def __call__( + self, /, + x: _ArrayLikeComplex_co, + z: _ArrayLikeComplex_co, + deg: _AnyDegrees, + ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + @overload + def __call__( + self, /, + x: _ArrayLikeObject_co, + y: _ArrayLikeObject_co, + z: _ArrayLikeObject_co, + deg: _AnyDegrees, + ) -> npt.NDArray[np.object_]: ... + @overload + def __call__( + self, /, + x: npt.ArrayLike, + y: npt.ArrayLike, + z: npt.ArrayLike, + deg: _AnyDegrees, + ) -> _CoefArrayND: ... + +# keep in sync with the broadest overload of `._FuncVander` +_AnyFuncVander: TypeAlias = Callable[ + [npt.ArrayLike, SupportsIndex], + _CoefArrayND, +] + +@final +class _FuncVanderND(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + vander_fs: _SupportsLenAndGetItem[_AnyFuncVander], + points: _SupportsLenAndGetItem[_ArrayLikeFloat_co], + degrees: _SupportsLenAndGetItem[SupportsIndex], + ) -> npt.NDArray[np.floating[Any]]: ... + @overload + def __call__( + self, /, + vander_fs: _SupportsLenAndGetItem[_AnyFuncVander], + points: _SupportsLenAndGetItem[_ArrayLikeComplex_co], + degrees: _SupportsLenAndGetItem[SupportsIndex], + ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + @overload + def __call__( + self, /, + vander_fs: _SupportsLenAndGetItem[_AnyFuncVander], + points: _SupportsLenAndGetItem[_ArrayLikeObject_co], + degrees: _SupportsLenAndGetItem[SupportsIndex], + ) -> npt.NDArray[np.object_]: ... + @overload + def __call__( + self, /, + vander_fs: _SupportsLenAndGetItem[_AnyFuncVander], + points: _SupportsLenAndGetItem[npt.ArrayLike], + degrees: _SupportsLenAndGetItem[SupportsIndex], + ) -> _CoefArrayND: ... + +@final +class _FuncFit(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + x: _AnyFloatSeries1D, + y: _AnyFloatSeriesND, + deg: _ArrayLikeInt_co, + rcond: None | float = ..., + full: Literal[False] = ..., + w: None | _AnyFloatSeries1D = ..., + ) -> npt.NDArray[np.floating[Any]]: ... + @overload + def __call__( + self, /, + x: _AnyComplexSeries1D, + y: _AnyComplexSeriesND, + deg: _ArrayLikeInt_co, + rcond: None | float = ..., + full: Literal[False] = ..., + w: None | _AnyComplexSeriesND = ..., + ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + @overload + def __call__( + self, /, + x: _AnySeries1D, + y: _AnySeriesND, + deg: _ArrayLikeInt_co, + rcond: None | float = ..., + full: Literal[False] = ..., + w: None | _AnySeries1D = ..., + ) -> _CoefArrayND: ... + + @overload + def __call__( + self, + x: _AnySeries1D, + y: _AnySeriesND, + deg: _ArrayLikeInt_co, + rcond: None | float, + full: Literal[True], + /, + w: None | _AnySeries1D = ..., + ) -> tuple[_CoefArrayND, Sequence[np.inexact[Any] | np.int32]]: ... + @overload + def __call__( + self, /, + x: _AnySeries1D, + y: _AnySeriesND, + deg: _ArrayLikeInt_co, + rcond: None | float = ..., + *, + full: Literal[True], + w: None | _AnySeries1D = ..., + ) -> tuple[_CoefArrayND, Sequence[np.inexact[Any] | np.int32]]: ... + +@final +class _FuncRoots(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + c: _AnyFloatSeries1D, + ) -> _Array1D[np.float64]: ... + @overload + def __call__( + self, /, + c: _AnyComplexSeries1D, + ) -> _Array1D[np.complex128]: ... + @overload + def __call__( + self, /, + c: _AnyObjectSeries1D, + ) -> _Array1D[np.object_]: ... + @overload + def __call__( + self, /, + c: _AnySeries1D, + ) -> _Array1D[np.float64 | np.complex128 | np.object_]: ... + +@final +class _FuncCompanion(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + c: _AnyFloatSeries1D, + ) -> _Array2D[np.float64]: ... + @overload + def __call__( + self, /, + c: _AnyComplexSeries1D, + ) -> _Array2D[np.complex128]: ... + @overload + def __call__( + self, /, + c: _AnyObjectSeries1D, + ) -> _Array2D[np.object_]: ... + @overload + def __call__( + self, /, + c: _AnySeries1D, + ) -> _Array2D[np.float64 | np.complex128 | np.object_]: ... + +@final +class _FuncGauss(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + def __call__(self, /, SupportsIndex) -> _Tuple2[_Array1D[np.float64]]: ... + +@final +class _FuncWeight(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + c: _AnyFloatSeriesND, + ) -> npt.NDArray[np.float64]: ... + @overload + def __call__( + self, /, + c: _AnyComplexSeriesND, + ) -> npt.NDArray[np.complex128]: ... + @overload + def __call__( + self, /, + c: _AnyObjectSeriesND, + ) -> npt.NDArray[np.object_]: ... + @overload + def __call__( + self, /, + c: _AnySeriesND, + ) -> npt.NDArray[np.float64 | np.complex128 | np.object_]: ... + +_N_pts = TypeVar("_N_pts", bound=int) + +@final +class _FuncPts(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + + @overload + def __call__( # type: ignore[overload-overlap] + self, /, + npts: _N_pts, + ) -> np.ndarray[tuple[_N_pts], np.dtype[np.float64]]: ... + @overload + def __call__(self, /, npts: _AnyInt) -> _Array1D[np.float64]: ... diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index f8cbacfc2f96..e6df3d328e72 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -1,52 +1,194 @@ -from typing import Any - -from numpy import int_ -from numpy.typing import NDArray -from numpy.polynomial._polybase import ABCPolyBase -from numpy.polynomial.polyutils import trimcoef - -__all__: list[str] - -chebtrim = trimcoef - -def poly2cheb(pol): ... -def cheb2poly(c): ... - -chebdomain: NDArray[int_] -chebzero: NDArray[int_] -chebone: NDArray[int_] -chebx: NDArray[int_] - -def chebline(off, scl): ... -def chebfromroots(roots): ... -def chebadd(c1, c2): ... -def chebsub(c1, c2): ... -def chebmulx(c): ... -def chebmul(c1, c2): ... -def chebdiv(c1, c2): ... -def chebpow(c, pow, maxpower=...): ... -def chebder(c, m=..., scl=..., axis=...): ... -def chebint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... -def chebval(x, c, tensor=...): ... -def chebval2d(x, y, c): ... -def chebgrid2d(x, y, c): ... -def chebval3d(x, y, z, c): ... -def chebgrid3d(x, y, z, c): ... -def chebvander(x, deg): ... -def chebvander2d(x, y, deg): ... -def chebvander3d(x, y, z, deg): ... -def chebfit(x, y, deg, rcond=..., full=..., w=...): ... -def chebcompanion(c): ... -def chebroots(c): ... -def chebinterpolate(func, deg, args = ...): ... -def chebgauss(deg): ... -def chebweight(x): ... -def chebpts1(npts): ... -def chebpts2(npts): ... +from collections.abc import Callable, Iterable +from typing import ( + Any, + Concatenate, + Final, + Literal as L, + TypeVar, + overload, +) + +import numpy as np +import numpy.typing as npt +from numpy._typing import _IntLike_co + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _AnySeries1D, + _Array1, + _Array1D, + _Array2, + _CoefArray1D, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncPts, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as chebtrim + +__all__ = [ + "chebzero", + "chebone", + "chebx", + "chebdomain", + "chebline", + "chebadd", + "chebsub", + "chebmulx", + "chebmul", + "chebdiv", + "chebpow", + "chebval", + "chebder", + "chebint", + "cheb2poly", + "poly2cheb", + "chebfromroots", + "chebvander", + "chebfit", + "chebtrim", + "chebroots", + "chebpts1", + "chebpts2", + "Chebyshev", + "chebval2d", + "chebval3d", + "chebgrid2d", + "chebgrid3d", + "chebvander2d", + "chebvander3d", + "chebcompanion", + "chebgauss", + "chebweight", + "chebinterpolate", +] + +_SCT = TypeVar("_SCT", bound=np.number[Any] | np.object_) +def _cseries_to_zseries(c: npt.NDArray[_SCT]) -> _Array1D[_SCT]: ... +def _zseries_to_cseries(zs: npt.NDArray[_SCT]) -> _Array1D[_SCT]: ... +def _zseries_mul( + z1: npt.NDArray[_SCT], + z2: npt.NDArray[_SCT], +) -> _Array1D[_SCT]: ... +def _zseries_div( + z1: npt.NDArray[_SCT], + z2: npt.NDArray[_SCT], +) -> _Array1D[_SCT]: ... +def _zseries_der(zs: npt.NDArray[_SCT]) -> _Array1D[_SCT]: ... +def _zseries_int(zs: npt.NDArray[_SCT]) -> _Array1D[_SCT]: ... + +poly2cheb: _FuncPoly2Ortho[L["poly2cheb"]] +cheb2poly: _FuncUnOp[L["cheb2poly"]] + +chebdomain: Final[_Array2[np.float64]] +chebzero: Final[_Array1[np.int_]] +chebone: Final[_Array1[np.int_]] +chebx: Final[_Array2[np.int_]] + +chebline: _FuncLine[L["chebline"]] +chebfromroots: _FuncFromRoots[L["chebfromroots"]] +chebadd: _FuncBinOp[L["chebadd"]] +chebsub: _FuncBinOp[L["chebsub"]] +chebmulx: _FuncUnOp[L["chebmulx"]] +chebmul: _FuncBinOp[L["chebmul"]] +chebdiv: _FuncBinOp[L["chebdiv"]] +chebpow: _FuncPow[L["chebpow"]] +chebder: _FuncDer[L["chebder"]] +chebint: _FuncInteg[L["chebint"]] +chebval: _FuncVal[L["chebval"]] +chebval2d: _FuncVal2D[L["chebval2d"]] +chebval3d: _FuncVal3D[L["chebval3d"]] +chebvalfromroots: _FuncValFromRoots[L["chebvalfromroots"]] +chebgrid2d: _FuncVal2D[L["chebgrid2d"]] +chebgrid3d: _FuncVal3D[L["chebgrid3d"]] +chebvander: _FuncVander[L["chebvander"]] +chebvander2d: _FuncVander2D[L["chebvander2d"]] +chebvander3d: _FuncVander3D[L["chebvander3d"]] +chebfit: _FuncFit[L["chebfit"]] +chebcompanion: _FuncCompanion[L["chebcompanion"]] +chebroots: _FuncRoots[L["chebroots"]] +chebgauss: _FuncGauss[L["chebgauss"]] +chebweight: _FuncWeight[L["chebweight"]] +chebpts1: _FuncPts[L["chebpts1"]] +chebpts2: _FuncPts[L["chebpts2"]] + +# keep in sync with `Chebyshev.interpolate` +_RT = TypeVar( + "_RT", + np.float64, + np.complex128, + np.floating[Any], + np.complexfloating[Any, Any], + np.number[Any], + np.object_, +) +@overload +def chebinterpolate( + func: Callable[[npt.NDArray[np.float64]], _RT], + deg: _IntLike_co, + args: tuple[()] = ..., +) -> npt.NDArray[_RT]: ... +@overload +def chebinterpolate( + func: Callable[Concatenate[npt.NDArray[np.float64], ...], _RT], + deg: _IntLike_co, + args: Iterable[Any], +) -> npt.NDArray[_RT]: ... + +_Self = TypeVar("_Self", bound=object) class Chebyshev(ABCPolyBase): + @overload + @classmethod + def interpolate( + cls: type[_Self], + /, + func: Callable[[npt.NDArray[np.float64]], _CoefArray1D], + deg: _IntLike_co, + domain: None | _AnySeries1D = ..., + args: tuple[()] = ..., + ) -> _Self: ... + @overload + @classmethod + def interpolate( + cls: type[_Self], + /, + func: Callable[ + Concatenate[npt.NDArray[np.float64], ...], + _CoefArray1D, + ], + deg: _IntLike_co, + domain: None | _AnySeries1D = ..., + *, + args: Iterable[Any], + ) -> _Self: ... + @overload @classmethod - def interpolate(cls, func, deg, domain=..., args = ...): ... - domain: Any - window: Any - basis_name: Any + def interpolate( + cls: type[_Self], + func: Callable[ + Concatenate[npt.NDArray[np.float64], ...], + _CoefArray1D, + ], + deg: _IntLike_co, + domain: None | _AnySeries1D, + args: Iterable[Any], + /, + ) -> _Self: ... diff --git a/numpy/polynomial/hermite.pyi b/numpy/polynomial/hermite.pyi index 0a1628ab39c1..d77e5cb66ecc 100644 --- a/numpy/polynomial/hermite.pyi +++ b/numpy/polynomial/hermite.pyi @@ -1,47 +1,106 @@ -from typing import Any - -from numpy import int_, float64 -from numpy.typing import NDArray -from numpy.polynomial._polybase import ABCPolyBase -from numpy.polynomial.polyutils import trimcoef - -__all__: list[str] - -hermtrim = trimcoef - -def poly2herm(pol): ... -def herm2poly(c): ... - -hermdomain: NDArray[int_] -hermzero: NDArray[int_] -hermone: NDArray[int_] -hermx: NDArray[float64] - -def hermline(off, scl): ... -def hermfromroots(roots): ... -def hermadd(c1, c2): ... -def hermsub(c1, c2): ... -def hermmulx(c): ... -def hermmul(c1, c2): ... -def hermdiv(c1, c2): ... -def hermpow(c, pow, maxpower=...): ... -def hermder(c, m=..., scl=..., axis=...): ... -def hermint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... -def hermval(x, c, tensor=...): ... -def hermval2d(x, y, c): ... -def hermgrid2d(x, y, c): ... -def hermval3d(x, y, z, c): ... -def hermgrid3d(x, y, z, c): ... -def hermvander(x, deg): ... -def hermvander2d(x, y, deg): ... -def hermvander3d(x, y, z, deg): ... -def hermfit(x, y, deg, rcond=..., full=..., w=...): ... -def hermcompanion(c): ... -def hermroots(c): ... -def hermgauss(deg): ... -def hermweight(x): ... - -class Hermite(ABCPolyBase): - domain: Any - window: Any - basis_name: Any +from typing import Any, Final, Literal as L, TypeVar + +import numpy as np + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as hermtrim + +__all__ = [ + "hermzero", + "hermone", + "hermx", + "hermdomain", + "hermline", + "hermadd", + "hermsub", + "hermmulx", + "hermmul", + "hermdiv", + "hermpow", + "hermval", + "hermder", + "hermint", + "herm2poly", + "poly2herm", + "hermfromroots", + "hermvander", + "hermfit", + "hermtrim", + "hermroots", + "Hermite", + "hermval2d", + "hermval3d", + "hermgrid2d", + "hermgrid3d", + "hermvander2d", + "hermvander3d", + "hermcompanion", + "hermgauss", + "hermweight", +] + +poly2herm: _FuncPoly2Ortho[L["poly2herm"]] +herm2poly: _FuncUnOp[L["herm2poly"]] + +hermdomain: Final[_Array2[np.float64]] +hermzero: Final[_Array1[np.int_]] +hermone: Final[_Array1[np.int_]] +hermx: Final[_Array2[np.int_]] + +hermline: _FuncLine[L["hermline"]] +hermfromroots: _FuncFromRoots[L["hermfromroots"]] +hermadd: _FuncBinOp[L["hermadd"]] +hermsub: _FuncBinOp[L["hermsub"]] +hermmulx: _FuncUnOp[L["hermmulx"]] +hermmul: _FuncBinOp[L["hermmul"]] +hermdiv: _FuncBinOp[L["hermdiv"]] +hermpow: _FuncPow[L["hermpow"]] +hermder: _FuncDer[L["hermder"]] +hermint: _FuncInteg[L["hermint"]] +hermval: _FuncVal[L["hermval"]] +hermval2d: _FuncVal2D[L["hermval2d"]] +hermval3d: _FuncVal3D[L["hermval3d"]] +hermvalfromroots: _FuncValFromRoots[L["hermvalfromroots"]] +hermgrid2d: _FuncVal2D[L["hermgrid2d"]] +hermgrid3d: _FuncVal3D[L["hermgrid3d"]] +hermvander: _FuncVander[L["hermvander"]] +hermvander2d: _FuncVander2D[L["hermvander2d"]] +hermvander3d: _FuncVander3D[L["hermvander3d"]] +hermfit: _FuncFit[L["hermfit"]] +hermcompanion: _FuncCompanion[L["hermcompanion"]] +hermroots: _FuncRoots[L["hermroots"]] + +_ND = TypeVar("_ND", bound=Any) +def _normed_hermite_n( + x: np.ndarray[_ND, np.dtype[np.float64]], + n: int | np.intp, +) -> np.ndarray[_ND, np.dtype[np.float64]]: ... + +hermgauss: _FuncGauss[L["hermgauss"]] +hermweight: _FuncWeight[L["hermweight"]] + +class Hermite(ABCPolyBase): ... diff --git a/numpy/polynomial/hermite_e.pyi b/numpy/polynomial/hermite_e.pyi index cca0dd636785..01537e260ea1 100644 --- a/numpy/polynomial/hermite_e.pyi +++ b/numpy/polynomial/hermite_e.pyi @@ -1,47 +1,106 @@ -from typing import Any - -from numpy import int_ -from numpy.typing import NDArray -from numpy.polynomial._polybase import ABCPolyBase -from numpy.polynomial.polyutils import trimcoef - -__all__: list[str] - -hermetrim = trimcoef - -def poly2herme(pol): ... -def herme2poly(c): ... - -hermedomain: NDArray[int_] -hermezero: NDArray[int_] -hermeone: NDArray[int_] -hermex: NDArray[int_] - -def hermeline(off, scl): ... -def hermefromroots(roots): ... -def hermeadd(c1, c2): ... -def hermesub(c1, c2): ... -def hermemulx(c): ... -def hermemul(c1, c2): ... -def hermediv(c1, c2): ... -def hermepow(c, pow, maxpower=...): ... -def hermeder(c, m=..., scl=..., axis=...): ... -def hermeint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... -def hermeval(x, c, tensor=...): ... -def hermeval2d(x, y, c): ... -def hermegrid2d(x, y, c): ... -def hermeval3d(x, y, z, c): ... -def hermegrid3d(x, y, z, c): ... -def hermevander(x, deg): ... -def hermevander2d(x, y, deg): ... -def hermevander3d(x, y, z, deg): ... -def hermefit(x, y, deg, rcond=..., full=..., w=...): ... -def hermecompanion(c): ... -def hermeroots(c): ... -def hermegauss(deg): ... -def hermeweight(x): ... - -class HermiteE(ABCPolyBase): - domain: Any - window: Any - basis_name: Any +from typing import Any, Final, Literal as L, TypeVar + +import numpy as np + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as hermetrim + +__all__ = [ + "hermezero", + "hermeone", + "hermex", + "hermedomain", + "hermeline", + "hermeadd", + "hermesub", + "hermemulx", + "hermemul", + "hermediv", + "hermepow", + "hermeval", + "hermeder", + "hermeint", + "herme2poly", + "poly2herme", + "hermefromroots", + "hermevander", + "hermefit", + "hermetrim", + "hermeroots", + "HermiteE", + "hermeval2d", + "hermeval3d", + "hermegrid2d", + "hermegrid3d", + "hermevander2d", + "hermevander3d", + "hermecompanion", + "hermegauss", + "hermeweight", +] + +poly2herme: _FuncPoly2Ortho[L["poly2herme"]] +herme2poly: _FuncUnOp[L["herme2poly"]] + +hermedomain: Final[_Array2[np.float64]] +hermezero: Final[_Array1[np.int_]] +hermeone: Final[_Array1[np.int_]] +hermex: Final[_Array2[np.int_]] + +hermeline: _FuncLine[L["hermeline"]] +hermefromroots: _FuncFromRoots[L["hermefromroots"]] +hermeadd: _FuncBinOp[L["hermeadd"]] +hermesub: _FuncBinOp[L["hermesub"]] +hermemulx: _FuncUnOp[L["hermemulx"]] +hermemul: _FuncBinOp[L["hermemul"]] +hermediv: _FuncBinOp[L["hermediv"]] +hermepow: _FuncPow[L["hermepow"]] +hermeder: _FuncDer[L["hermeder"]] +hermeint: _FuncInteg[L["hermeint"]] +hermeval: _FuncVal[L["hermeval"]] +hermeval2d: _FuncVal2D[L["hermeval2d"]] +hermeval3d: _FuncVal3D[L["hermeval3d"]] +hermevalfromroots: _FuncValFromRoots[L["hermevalfromroots"]] +hermegrid2d: _FuncVal2D[L["hermegrid2d"]] +hermegrid3d: _FuncVal3D[L["hermegrid3d"]] +hermevander: _FuncVander[L["hermevander"]] +hermevander2d: _FuncVander2D[L["hermevander2d"]] +hermevander3d: _FuncVander3D[L["hermevander3d"]] +hermefit: _FuncFit[L["hermefit"]] +hermecompanion: _FuncCompanion[L["hermecompanion"]] +hermeroots: _FuncRoots[L["hermeroots"]] + +_ND = TypeVar("_ND", bound=Any) +def _normed_hermite_e_n( + x: np.ndarray[_ND, np.dtype[np.float64]], + n: int | np.intp, +) -> np.ndarray[_ND, np.dtype[np.float64]]: ... + +hermegauss: _FuncGauss[L["hermegauss"]] +hermeweight: _FuncWeight[L["hermeweight"]] + +class HermiteE(ABCPolyBase): ... diff --git a/numpy/polynomial/laguerre.pyi b/numpy/polynomial/laguerre.pyi index 541d3911832f..b69f4dbbb384 100644 --- a/numpy/polynomial/laguerre.pyi +++ b/numpy/polynomial/laguerre.pyi @@ -1,47 +1,100 @@ -from typing import Any - -from numpy import int_ -from numpy.typing import NDArray -from numpy.polynomial._polybase import ABCPolyBase -from numpy.polynomial.polyutils import trimcoef - -__all__: list[str] - -lagtrim = trimcoef - -def poly2lag(pol): ... -def lag2poly(c): ... - -lagdomain: NDArray[int_] -lagzero: NDArray[int_] -lagone: NDArray[int_] -lagx: NDArray[int_] - -def lagline(off, scl): ... -def lagfromroots(roots): ... -def lagadd(c1, c2): ... -def lagsub(c1, c2): ... -def lagmulx(c): ... -def lagmul(c1, c2): ... -def lagdiv(c1, c2): ... -def lagpow(c, pow, maxpower=...): ... -def lagder(c, m=..., scl=..., axis=...): ... -def lagint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... -def lagval(x, c, tensor=...): ... -def lagval2d(x, y, c): ... -def laggrid2d(x, y, c): ... -def lagval3d(x, y, z, c): ... -def laggrid3d(x, y, z, c): ... -def lagvander(x, deg): ... -def lagvander2d(x, y, deg): ... -def lagvander3d(x, y, z, deg): ... -def lagfit(x, y, deg, rcond=..., full=..., w=...): ... -def lagcompanion(c): ... -def lagroots(c): ... -def laggauss(deg): ... -def lagweight(x): ... - -class Laguerre(ABCPolyBase): - domain: Any - window: Any - basis_name: Any +from typing import Final, Literal as L + +import numpy as np + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as lagtrim + +__all__ = [ + "lagzero", + "lagone", + "lagx", + "lagdomain", + "lagline", + "lagadd", + "lagsub", + "lagmulx", + "lagmul", + "lagdiv", + "lagpow", + "lagval", + "lagder", + "lagint", + "lag2poly", + "poly2lag", + "lagfromroots", + "lagvander", + "lagfit", + "lagtrim", + "lagroots", + "Laguerre", + "lagval2d", + "lagval3d", + "laggrid2d", + "laggrid3d", + "lagvander2d", + "lagvander3d", + "lagcompanion", + "laggauss", + "lagweight", +] + +poly2lag: _FuncPoly2Ortho[L["poly2lag"]] +lag2poly: _FuncUnOp[L["lag2poly"]] + +lagdomain: Final[_Array2[np.float64]] +lagzero: Final[_Array1[np.int_]] +lagone: Final[_Array1[np.int_]] +lagx: Final[_Array2[np.int_]] + +lagline: _FuncLine[L["lagline"]] +lagfromroots: _FuncFromRoots[L["lagfromroots"]] +lagadd: _FuncBinOp[L["lagadd"]] +lagsub: _FuncBinOp[L["lagsub"]] +lagmulx: _FuncUnOp[L["lagmulx"]] +lagmul: _FuncBinOp[L["lagmul"]] +lagdiv: _FuncBinOp[L["lagdiv"]] +lagpow: _FuncPow[L["lagpow"]] +lagder: _FuncDer[L["lagder"]] +lagint: _FuncInteg[L["lagint"]] +lagval: _FuncVal[L["lagval"]] +lagval2d: _FuncVal2D[L["lagval2d"]] +lagval3d: _FuncVal3D[L["lagval3d"]] +lagvalfromroots: _FuncValFromRoots[L["lagvalfromroots"]] +laggrid2d: _FuncVal2D[L["laggrid2d"]] +laggrid3d: _FuncVal3D[L["laggrid3d"]] +lagvander: _FuncVander[L["lagvander"]] +lagvander2d: _FuncVander2D[L["lagvander2d"]] +lagvander3d: _FuncVander3D[L["lagvander3d"]] +lagfit: _FuncFit[L["lagfit"]] +lagcompanion: _FuncCompanion[L["lagcompanion"]] +lagroots: _FuncRoots[L["lagroots"]] +laggauss: _FuncGauss[L["laggauss"]] +lagweight: _FuncWeight[L["lagweight"]] + + +class Laguerre(ABCPolyBase): ... diff --git a/numpy/polynomial/legendre.pyi b/numpy/polynomial/legendre.pyi index 97c6478f80f8..8712c5ddc274 100644 --- a/numpy/polynomial/legendre.pyi +++ b/numpy/polynomial/legendre.pyi @@ -1,47 +1,99 @@ -from typing import Any +from typing import Final, Literal as L -from numpy import int_ -from numpy.typing import NDArray -from numpy.polynomial._polybase import ABCPolyBase -from numpy.polynomial.polyutils import trimcoef +import numpy as np -__all__: list[str] +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as legtrim -legtrim = trimcoef +__all__ = [ + "legzero", + "legone", + "legx", + "legdomain", + "legline", + "legadd", + "legsub", + "legmulx", + "legmul", + "legdiv", + "legpow", + "legval", + "legder", + "legint", + "leg2poly", + "poly2leg", + "legfromroots", + "legvander", + "legfit", + "legtrim", + "legroots", + "Legendre", + "legval2d", + "legval3d", + "leggrid2d", + "leggrid3d", + "legvander2d", + "legvander3d", + "legcompanion", + "leggauss", + "legweight", +] -def poly2leg(pol): ... -def leg2poly(c): ... +poly2leg: _FuncPoly2Ortho[L["poly2leg"]] +leg2poly: _FuncUnOp[L["leg2poly"]] -legdomain: NDArray[int_] -legzero: NDArray[int_] -legone: NDArray[int_] -legx: NDArray[int_] +legdomain: Final[_Array2[np.float64]] +legzero: Final[_Array1[np.int_]] +legone: Final[_Array1[np.int_]] +legx: Final[_Array2[np.int_]] -def legline(off, scl): ... -def legfromroots(roots): ... -def legadd(c1, c2): ... -def legsub(c1, c2): ... -def legmulx(c): ... -def legmul(c1, c2): ... -def legdiv(c1, c2): ... -def legpow(c, pow, maxpower=...): ... -def legder(c, m=..., scl=..., axis=...): ... -def legint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... -def legval(x, c, tensor=...): ... -def legval2d(x, y, c): ... -def leggrid2d(x, y, c): ... -def legval3d(x, y, z, c): ... -def leggrid3d(x, y, z, c): ... -def legvander(x, deg): ... -def legvander2d(x, y, deg): ... -def legvander3d(x, y, z, deg): ... -def legfit(x, y, deg, rcond=..., full=..., w=...): ... -def legcompanion(c): ... -def legroots(c): ... -def leggauss(deg): ... -def legweight(x): ... +legline: _FuncLine[L["legline"]] +legfromroots: _FuncFromRoots[L["legfromroots"]] +legadd: _FuncBinOp[L["legadd"]] +legsub: _FuncBinOp[L["legsub"]] +legmulx: _FuncUnOp[L["legmulx"]] +legmul: _FuncBinOp[L["legmul"]] +legdiv: _FuncBinOp[L["legdiv"]] +legpow: _FuncPow[L["legpow"]] +legder: _FuncDer[L["legder"]] +legint: _FuncInteg[L["legint"]] +legval: _FuncVal[L["legval"]] +legval2d: _FuncVal2D[L["legval2d"]] +legval3d: _FuncVal3D[L["legval3d"]] +legvalfromroots: _FuncValFromRoots[L["legvalfromroots"]] +leggrid2d: _FuncVal2D[L["leggrid2d"]] +leggrid3d: _FuncVal3D[L["leggrid3d"]] +legvander: _FuncVander[L["legvander"]] +legvander2d: _FuncVander2D[L["legvander2d"]] +legvander3d: _FuncVander3D[L["legvander3d"]] +legfit: _FuncFit[L["legfit"]] +legcompanion: _FuncCompanion[L["legcompanion"]] +legroots: _FuncRoots[L["legroots"]] +leggauss: _FuncGauss[L["leggauss"]] +legweight: _FuncWeight[L["legweight"]] -class Legendre(ABCPolyBase): - domain: Any - window: Any - basis_name: Any +class Legendre(ABCPolyBase): ... diff --git a/numpy/polynomial/polynomial.pyi b/numpy/polynomial/polynomial.pyi index f8b62e529f23..4746b74ef251 100644 --- a/numpy/polynomial/polynomial.pyi +++ b/numpy/polynomial/polynomial.pyi @@ -1,42 +1,87 @@ -from typing import Any +from typing import Final, Literal as L -from numpy import int_ -from numpy.typing import NDArray -from numpy.polynomial._polybase import ABCPolyBase -from numpy.polynomial.polyutils import trimcoef +import numpy as np +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncVal2D, + _FuncVal3D, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncInteg, + _FuncLine, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncValFromRoots, +) +from .polyutils import trimcoef as polytrim -__all__: list[str] +__all__ = [ + "polyzero", + "polyone", + "polyx", + "polydomain", + "polyline", + "polyadd", + "polysub", + "polymulx", + "polymul", + "polydiv", + "polypow", + "polyval", + "polyvalfromroots", + "polyder", + "polyint", + "polyfromroots", + "polyvander", + "polyfit", + "polytrim", + "polyroots", + "Polynomial", + "polyval2d", + "polyval3d", + "polygrid2d", + "polygrid3d", + "polyvander2d", + "polyvander3d", + "polycompanion", +] -polytrim = trimcoef +polydomain: Final[_Array2[np.float64]] +polyzero: Final[_Array1[np.int_]] +polyone: Final[_Array1[np.int_]] +polyx: Final[_Array2[np.int_]] -polydomain: NDArray[int_] -polyzero: NDArray[int_] -polyone: NDArray[int_] -polyx: NDArray[int_] +polyline: _FuncLine[L["Polyline"]] +polyfromroots: _FuncFromRoots[L["polyfromroots"]] +polyadd: _FuncBinOp[L["polyadd"]] +polysub: _FuncBinOp[L["polysub"]] +polymulx: _FuncUnOp[L["polymulx"]] +polymul: _FuncBinOp[L["polymul"]] +polydiv: _FuncBinOp[L["polydiv"]] +polypow: _FuncPow[L["polypow"]] +polyder: _FuncDer[L["polyder"]] +polyint: _FuncInteg[L["polyint"]] +polyval: _FuncVal[L["polyval"]] +polyval2d: _FuncVal2D[L["polyval2d"]] +polyval3d: _FuncVal3D[L["polyval3d"]] +polyvalfromroots: _FuncValFromRoots[L["polyvalfromroots"]] +polygrid2d: _FuncVal2D[L["polygrid2d"]] +polygrid3d: _FuncVal3D[L["polygrid3d"]] +polyvander: _FuncVander[L["polyvander"]] +polyvander2d: _FuncVander2D[L["polyvander2d"]] +polyvander3d: _FuncVander3D[L["polyvander3d"]] +polyfit: _FuncFit[L["polyfit"]] +polycompanion: _FuncCompanion[L["polycompanion"]] +polyroots: _FuncRoots[L["polyroots"]] -def polyline(off, scl): ... -def polyfromroots(roots): ... -def polyadd(c1, c2): ... -def polysub(c1, c2): ... -def polymulx(c): ... -def polymul(c1, c2): ... -def polydiv(c1, c2): ... -def polypow(c, pow, maxpower=...): ... -def polyder(c, m=..., scl=..., axis=...): ... -def polyint(c, m=..., k=..., lbnd=..., scl=..., axis=...): ... -def polyval(x, c, tensor=...): ... -def polyvalfromroots(x, r, tensor=...): ... -def polyval2d(x, y, c): ... -def polygrid2d(x, y, c): ... -def polyval3d(x, y, z, c): ... -def polygrid3d(x, y, z, c): ... -def polyvander(x, deg): ... -def polyvander2d(x, y, deg): ... -def polyvander3d(x, y, z, deg): ... -def polyfit(x, y, deg, rcond=..., full=..., w=...): ... -def polyroots(c): ... - -class Polynomial(ABCPolyBase): - domain: Any - window: Any - basis_name: Any +class Polynomial(ABCPolyBase): ... diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index 0eccd6cdc2a4..ee84d63d7352 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -1,9 +1,430 @@ -__all__: list[str] - -def trimseq(seq): ... -def as_series(alist, trim=...): ... -def trimcoef(c, tol=...): ... -def getdomain(x): ... -def mapparms(old, new): ... -def mapdomain(x, old, new): ... -def format_float(x, parens=...): ... +from collections.abc import Callable, Iterable, Sequence +from typing import ( + Any, + Final, + Literal, + SupportsIndex, + TypeAlias, + TypeVar, + overload, +) + +import numpy as np +import numpy.typing as npt +from numpy._typing import _ArrayLikeInt_co, _FloatLike_co + +from ._polytypes import ( + _AnyComplexSeriesND, + _AnyFloatSeriesND, + _AnyInt, + _AnyScalar, + _AnyComplexSeries1D, + _AnyFloatSeries1D, + _AnyNumberSeries1D, + _AnyObjectScalar, + _AnyObjectSeries1D, + _AnySeries1D, + _AnySeriesND, + _Array1D, + _AnyFloatScalar, + _CoefArrayND, + _CoefArray1D, + _AnyFloatScalar, + _FuncBinOp, + _FuncValND, + _FuncVanderND, + _Interval, + _AnyNumberScalar, + _SimpleSequence, + _SupportsLenAndGetItem, + _Tuple2, +) + +___all__ = [ + "as_series", + "format_float" + "getdomain", + "mapdomain", + "mapparms", + "trimcoef", + "trimseq", +] + +_AnyLineF: TypeAlias = Callable[[_AnyScalar, _AnyScalar], _CoefArrayND] +_AnyMulF: TypeAlias = Callable[[npt.ArrayLike, npt.ArrayLike], _CoefArrayND] +_AnyVanderF: TypeAlias = Callable[[npt.ArrayLike, SupportsIndex], _CoefArrayND] + +@overload +def as_series( + alist: npt.NDArray[np.integer[Any]], + trim: bool = ..., +) -> list[_Array1D[np.floating[Any]]]: ... +@overload +def as_series( + alist: npt.NDArray[np.floating[Any]], + trim: bool = ..., +) -> list[_Array1D[np.floating[Any]]]: ... +@overload +def as_series( + alist: npt.NDArray[np.complexfloating[Any, Any]], + trim: bool = ..., +) -> list[_Array1D[np.complexfloating[Any, Any]]]: ... +@overload +def as_series( + alist: npt.NDArray[np.object_], + trim: bool = ..., +) -> list[_Array1D[np.object_]]: ... +@overload +def as_series( # type: ignore[overload-overlap] + alist: Iterable[npt.NDArray[np.integer[Any]]], + trim: bool = ..., +) -> list[_Array1D[np.floating[Any]]]: ... +@overload +def as_series( + alist: Iterable[npt.NDArray[np.floating[Any]]], + trim: bool = ..., +) -> list[_Array1D[np.floating[Any]]]: ... +@overload +def as_series( + alist: Iterable[npt.NDArray[np.complexfloating[Any, Any]]], + trim: bool = ..., +) -> list[_Array1D[np.complexfloating[Any, Any]]]: ... +@overload +def as_series( + alist: Iterable[npt.NDArray[np.object_]], + trim: bool = ..., +) -> list[_Array1D[np.object_]]: ... +@overload +def as_series( # type: ignore[overload-overlap] + alist: Iterable[_AnyFloatSeries1D | float], + trim: bool = ..., +) -> list[_Array1D[np.floating[Any]]]: ... +@overload +def as_series( + alist: Iterable[_AnyComplexSeries1D | complex], + trim: bool = ..., +) -> list[_Array1D[np.complexfloating[Any, Any]]]: ... +@overload +def as_series( + alist: Iterable[_AnyObjectSeries1D | object], + trim: bool = ..., +) -> list[_Array1D[np.object_]]: ... + +_T_seq = TypeVar("_T_seq", bound=_CoefArrayND | _SimpleSequence[_AnyScalar]) +def trimseq(seq: _T_seq) -> _T_seq: ... + +@overload +def trimcoef( # type: ignore[overload-overlap] + c: npt.NDArray[np.integer[Any]] | npt.NDArray[np.floating[Any]], + tol: _AnyFloatScalar = ..., +) -> _Array1D[np.floating[Any]]: ... +@overload +def trimcoef( + c: npt.NDArray[np.complexfloating[Any, Any]], + tol: _AnyFloatScalar = ..., +) -> _Array1D[np.complexfloating[Any, Any]]: ... +@overload +def trimcoef( + c: npt.NDArray[np.object_], + tol: _AnyFloatScalar = ..., +) -> _Array1D[np.object_]: ... +@overload +def trimcoef( # type: ignore[overload-overlap] + c: _AnyFloatSeries1D | float, + tol: _AnyFloatScalar = ..., +) -> _Array1D[np.floating[Any]]: ... +@overload +def trimcoef( + c: _AnyComplexSeries1D | complex, + tol: _AnyFloatScalar = ..., +) -> _Array1D[np.complexfloating[Any, Any]]: ... +@overload +def trimcoef( + c: _AnyObjectSeries1D | object, + tol: _AnyFloatScalar = ..., +) -> _Array1D[np.object_]: ... + +@overload +def getdomain( # type: ignore[overload-overlap] + x: npt.NDArray[np.floating[Any]] | npt.NDArray[np.integer[Any]], +) -> _Interval[np.float64]: ... +@overload +def getdomain( + x: npt.NDArray[np.complexfloating[Any, Any]], +) -> _Interval[np.complex128]: ... +@overload +def getdomain( + x: npt.NDArray[np.object_], +) -> _Interval[np.object_]: ... +@overload +def getdomain( # type: ignore[overload-overlap] + x: _AnyFloatSeries1D | float, +) -> _Interval[np.float64]: ... +@overload +def getdomain( + x: _AnyComplexSeries1D | complex, +) -> _Interval[np.complex128]: ... +@overload +def getdomain( + x: _AnyObjectSeries1D | object, +) -> _Interval[np.object_]: ... + +@overload +def mapparms( # type: ignore[overload-overlap] + old: npt.NDArray[np.floating[Any] | np.integer[Any]], + new: npt.NDArray[np.floating[Any] | np.integer[Any]], +) -> _Tuple2[np.floating[Any]]: ... +@overload +def mapparms( + old: npt.NDArray[np.number[Any]], + new: npt.NDArray[np.number[Any]], +) -> _Tuple2[np.complexfloating[Any, Any]]: ... +@overload +def mapparms( + old: npt.NDArray[np.object_ | np.number[Any]], + new: npt.NDArray[np.object_ | np.number[Any]], +) -> _Tuple2[object]: ... +@overload +def mapparms( # type: ignore[overload-overlap] + old: _SupportsLenAndGetItem[float], + new: _SupportsLenAndGetItem[float], +) -> _Tuple2[float]: ... +@overload +def mapparms( + old: _SupportsLenAndGetItem[complex], + new: _SupportsLenAndGetItem[complex], +) -> _Tuple2[complex]: ... +@overload +def mapparms( + old: _AnyFloatSeries1D, + new: _AnyFloatSeries1D, +) -> _Tuple2[np.floating[Any]]: ... +@overload +def mapparms( + old: _AnyNumberSeries1D, + new: _AnyNumberSeries1D, +) -> _Tuple2[np.complexfloating[Any, Any]]: ... +@overload +def mapparms( + old: _AnySeries1D, + new: _AnySeries1D, +) -> _Tuple2[object]: ... + +@overload +def mapdomain( # type: ignore[overload-overlap] + x: _AnyFloatScalar, + old: _AnyFloatSeries1D, + new: _AnyFloatSeries1D, +) -> np.floating[Any]: ... +@overload +def mapdomain( + x: _AnyNumberScalar, + old: _AnyComplexSeries1D, + new: _AnyComplexSeries1D, +) -> np.complexfloating[Any, Any]: ... +@overload +def mapdomain( + x: _AnyObjectScalar | _AnyNumberScalar, + old: _AnyObjectSeries1D | _AnyComplexSeries1D, + new: _AnyObjectSeries1D | _AnyComplexSeries1D, +) -> object: ... +@overload +def mapdomain( # type: ignore[overload-overlap] + x: npt.NDArray[np.floating[Any] | np.integer[Any]], + old: npt.NDArray[np.floating[Any] | np.integer[Any]], + new: npt.NDArray[np.floating[Any] | np.integer[Any]], +) -> _Array1D[np.floating[Any]]: ... +@overload +def mapdomain( + x: npt.NDArray[np.number[Any]], + old: npt.NDArray[np.number[Any]], + new: npt.NDArray[np.number[Any]], +) -> _Array1D[np.complexfloating[Any, Any]]: ... +@overload +def mapdomain( + x: npt.NDArray[np.object_ | np.number[Any]], + old: npt.NDArray[np.object_ | np.number[Any]], + new: npt.NDArray[np.object_ | np.number[Any]], +) -> _Array1D[np.object_]: ... +@overload +def mapdomain( + x: _AnyFloatSeries1D, + old: _AnyFloatSeries1D, + new: _AnyFloatSeries1D, +) -> _Array1D[np.floating[Any]]: ... +@overload +def mapdomain( + x: _AnyNumberSeries1D, + old: _AnyNumberSeries1D, + new: _AnyNumberSeries1D, +) -> _Array1D[np.complexfloating[Any, Any]]: ... +@overload +def mapdomain( + x: _AnySeries1D, + old:_AnySeries1D, + new: _AnySeries1D, +) -> _Array1D[np.object_]: ... +@overload +def mapdomain( + x: object, + old: _AnySeries1D, + new: _AnySeries1D, +) -> object: ... + +def _nth_slice( + i: SupportsIndex, + ndim: SupportsIndex, +) -> tuple[None | slice, ...]: ... + +_vander_nd: _FuncVanderND[Literal["_vander_nd"]] +_vander_nd_flat: _FuncVanderND[Literal["_vander_nd_flat"]] + +# keep in sync with `._polytypes._FuncFromRoots` +@overload +def _fromroots( # type: ignore[overload-overlap] + line_f: _AnyLineF, + mul_f: _AnyMulF, + roots: _AnyFloatSeries1D, +) -> _Array1D[np.floating[Any]]: ... +@overload +def _fromroots( + line_f: _AnyLineF, + mul_f: _AnyMulF, + roots: _AnyComplexSeries1D, +) -> _Array1D[np.complexfloating[Any, Any]]: ... +@overload +def _fromroots( + line_f: _AnyLineF, + mul_f: _AnyMulF, + roots: _AnyObjectSeries1D, +) -> _Array1D[np.object_]: ... +@overload +def _fromroots( + line_f: _AnyLineF, + mul_f: _AnyMulF, + roots: _AnySeries1D, +) -> _CoefArray1D: ... + +_valnd: _FuncValND[Literal["_valnd"]] +_gridnd: _FuncValND[Literal["_gridnd"]] + +# keep in sync with `_polytypes._FuncBinOp` +@overload +def _div( # type: ignore[overload-overlap] + mul_f: _AnyMulF, + c1: _AnyFloatSeries1D, + c2: _AnyFloatSeries1D, +) -> _Tuple2[_Array1D[np.floating[Any]]]: ... +@overload +def _div( + mul_f: _AnyMulF, + c1: _AnyComplexSeries1D, + c2: _AnyComplexSeries1D, +) -> _Tuple2[_Array1D[np.complexfloating[Any, Any]]]: ... +@overload +def _div( + mul_f: _AnyMulF, + c1: _AnyObjectSeries1D, + c2: _AnyObjectSeries1D, +) -> _Tuple2[_Array1D[np.object_]]: ... +@overload +def _div( + mul_f: _AnyMulF, + c1: _AnySeries1D, + c2: _AnySeries1D, +) -> _Tuple2[_CoefArray1D]: ... + +_add: Final[_FuncBinOp] +_sub: Final[_FuncBinOp] + +# keep in sync with `_polytypes._FuncPow` +@overload +def _pow( # type: ignore[overload-overlap] + mul_f: _AnyMulF, + c: _AnyFloatSeries1D, + pow: _AnyInt, + maxpower: None | _AnyInt = ..., +) -> _Array1D[np.floating[Any]]: ... +@overload +def _pow( + mul_f: _AnyMulF, + c: _AnyComplexSeries1D, + pow: _AnyInt, + maxpower: None | _AnyInt = ..., +) -> _Array1D[np.complexfloating[Any, Any]]: ... +@overload +def _pow( + mul_f: _AnyMulF, + c: _AnyObjectSeries1D, + pow: _AnyInt, + maxpower: None | _AnyInt = ..., +) -> _Array1D[np.object_]: ... +@overload +def _pow( + mul_f: _AnyMulF, + c: _AnySeries1D, + pow: _AnyInt, + maxpower: None | _AnyInt = ..., +) -> _CoefArray1D: ... + +# keep in sync with `_polytypes._FuncFit` +@overload +def _fit( # type: ignore[overload-overlap] + vander_f: _AnyVanderF, + x: _AnyFloatSeries1D, + y: _AnyFloatSeriesND, + deg: _ArrayLikeInt_co, + domain: None | _AnyFloatSeries1D = ..., + rcond: None | float = ..., + full: Literal[False] = ..., + w: None | _AnyFloatSeries1D = ..., +) -> npt.NDArray[np.floating[Any]]: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _AnyComplexSeries1D, + y: _AnyComplexSeriesND, + deg: _ArrayLikeInt_co, + domain: None | _AnyComplexSeries1D = ..., + rcond: None | float = ..., + full: Literal[False] = ..., + w: None | _AnyComplexSeries1D = ..., +) -> npt.NDArray[np.complexfloating[Any, Any]]: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _AnySeries1D, + y: _AnySeriesND, + deg: _ArrayLikeInt_co, + domain: None | _AnySeries1D = ..., + rcond: None | float = ..., + full: Literal[False] = ..., + w: None | _AnySeries1D = ..., +) -> _CoefArrayND: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _AnySeries1D, + y: _AnySeries1D, + deg: _ArrayLikeInt_co, + domain: None | _AnySeries1D, + rcond: None | float , + full: Literal[True], + /, + w: None | _AnySeries1D = ..., +) -> tuple[_CoefArray1D, Sequence[np.inexact[Any] | np.int32]]: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _AnySeries1D, + y: _AnySeries1D, + deg: _ArrayLikeInt_co, + domain: None | _AnySeries1D = ..., + rcond: None | float = ..., + *, + full: Literal[True], + w: None | _AnySeries1D = ..., +) -> tuple[_CoefArray1D, Sequence[np.inexact[Any] | np.int32]]: ... + +def _as_int(x: SupportsIndex, desc: str) -> int: ... +def format_float(x: _FloatLike_co, parens: bool = ...) -> str: ... diff --git a/numpy/typing/tests/data/reveal/polyutils.pyi b/numpy/typing/tests/data/reveal/polyutils.pyi new file mode 100644 index 000000000000..4a8377d81deb --- /dev/null +++ b/numpy/typing/tests/data/reveal/polyutils.pyi @@ -0,0 +1,235 @@ +import sys +from collections.abc import Sequence +from fractions import Fraction +from typing import Any, Literal as L, TypeAlias + +import numpy as np +import numpy.typing as npt +import numpy.polynomial.polyutils as pu +from numpy.polynomial._polytypes import _Tuple2 + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + +_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating[Any]]] +_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating[Any, Any]]] +_ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] + +_ArrFloat1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.float64]] +_ArrComplex1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complex128]] +_ArrObject1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] + +num_int: int +num_float: float +num_complex: complex +# will result in an `object_` dtype +num_fraction: Fraction +num_object: object + +sct_int: np.int_ +sct_float: np.float64 +sct_complex: np.complex128 +sct_object: np.object_ # doesn't exist at runtime + +arr_int: npt.NDArray[np.int_] +arr_float: npt.NDArray[np.float64] +arr_complex: npt.NDArray[np.complex128] +arr_object: npt.NDArray[np.object_] + +seq_num_int: Sequence[int] +seq_num_float: Sequence[float] +seq_num_complex: Sequence[complex] +seq_num_fraction: Sequence[Fraction] +seq_num_object: Sequence[object] + +seq_sct_int: Sequence[np.int_] +seq_sct_float: Sequence[np.float64] +seq_sct_complex: Sequence[np.complex128] +seq_sct_object: Sequence[np.object_] + +seq_arr_int: Sequence[npt.NDArray[np.int_]] +seq_arr_float: Sequence[npt.NDArray[np.float64]] +seq_arr_complex: Sequence[npt.NDArray[np.complex128]] +seq_arr_object: Sequence[npt.NDArray[np.object_]] + +seq_seq_num_int: Sequence[Sequence[int]] +seq_seq_num_float: Sequence[Sequence[float]] +seq_seq_num_complex: Sequence[Sequence[complex]] +seq_seq_num_fraction: Sequence[Sequence[Fraction]] +seq_seq_num_object: Sequence[Sequence[object]] + +seq_seq_sct_int: Sequence[Sequence[np.int_]] +seq_seq_sct_float: Sequence[Sequence[np.float64]] +seq_seq_sct_complex: Sequence[Sequence[np.complex128]] +seq_seq_sct_object: Sequence[Sequence[np.object_]] # doesn't exist at runtime + +# as_series + +assert_type(pu.as_series(arr_int), list[_ArrFloat1D]) +assert_type(pu.as_series(arr_float), list[_ArrFloat1D]) +assert_type(pu.as_series(arr_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(arr_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_num_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_num_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_num_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_num_fraction), list[_ArrObject1D]) +assert_type(pu.as_series(seq_num_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_sct_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_sct_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_sct_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_sct_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_arr_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_arr_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_arr_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_arr_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_seq_num_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_num_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_num_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_seq_num_fraction), list[_ArrObject1D]) +assert_type(pu.as_series(seq_seq_num_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_seq_sct_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_sct_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_sct_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_seq_sct_object), list[_ArrObject1D]) + +# trimcoef + +assert_type(pu.trimcoef(num_int), _ArrFloat1D) +assert_type(pu.trimcoef(num_float), _ArrFloat1D) +assert_type(pu.trimcoef(num_complex), _ArrComplex1D) +assert_type(pu.trimcoef(num_fraction), _ArrObject1D) +assert_type(pu.trimcoef(num_object), _ArrObject1D) + +assert_type(pu.trimcoef(sct_int), _ArrFloat1D) +assert_type(pu.trimcoef(sct_float), _ArrFloat1D) +assert_type(pu.trimcoef(sct_complex), _ArrComplex1D) +assert_type(pu.trimcoef(sct_object), _ArrObject1D) + +assert_type(pu.trimcoef(arr_int), _ArrFloat1D) +assert_type(pu.trimcoef(arr_float), _ArrFloat1D) +assert_type(pu.trimcoef(arr_complex), _ArrComplex1D) +assert_type(pu.trimcoef(arr_object), _ArrObject1D) + +assert_type(pu.trimcoef(seq_num_int), _ArrFloat1D) +assert_type(pu.trimcoef(seq_num_float), _ArrFloat1D) +assert_type(pu.trimcoef(seq_num_complex), _ArrComplex1D) +assert_type(pu.trimcoef(seq_num_fraction), _ArrObject1D) +assert_type(pu.trimcoef(seq_num_object), _ArrObject1D) + +assert_type(pu.trimcoef(seq_sct_int), _ArrFloat1D) +assert_type(pu.trimcoef(seq_sct_float), _ArrFloat1D) +assert_type(pu.trimcoef(seq_sct_complex), _ArrComplex1D) +assert_type(pu.trimcoef(seq_sct_object), _ArrObject1D) + +# getdomain + +assert_type(pu.getdomain(num_int), _ArrFloat1D_2) +assert_type(pu.getdomain(num_float), _ArrFloat1D_2) +assert_type(pu.getdomain(num_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(num_fraction), _ArrObject1D_2) +assert_type(pu.getdomain(num_object), _ArrObject1D_2) + +assert_type(pu.getdomain(sct_int), _ArrFloat1D_2) +assert_type(pu.getdomain(sct_float), _ArrFloat1D_2) +assert_type(pu.getdomain(sct_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(sct_object), _ArrObject1D_2) + +assert_type(pu.getdomain(arr_int), _ArrFloat1D_2) +assert_type(pu.getdomain(arr_float), _ArrFloat1D_2) +assert_type(pu.getdomain(arr_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(arr_object), _ArrObject1D_2) + +assert_type(pu.getdomain(seq_num_int), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_num_float), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_num_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(seq_num_fraction), _ArrObject1D_2) +assert_type(pu.getdomain(seq_num_object), _ArrObject1D_2) + +assert_type(pu.getdomain(seq_sct_int), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_sct_float), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_sct_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(seq_sct_object), _ArrObject1D_2) + +# mapparms + +assert_type(pu.mapparms(seq_num_int, seq_num_int), _Tuple2[float]) +assert_type(pu.mapparms(seq_num_int, seq_num_float), _Tuple2[float]) +assert_type(pu.mapparms(seq_num_float, seq_num_float), _Tuple2[float]) +assert_type(pu.mapparms(seq_num_float, seq_num_complex), _Tuple2[complex]) +assert_type(pu.mapparms(seq_num_complex, seq_num_complex), _Tuple2[complex]) +assert_type(pu.mapparms(seq_num_complex, seq_num_fraction), _Tuple2[object]) +assert_type(pu.mapparms(seq_num_fraction, seq_num_fraction), _Tuple2[object]) +assert_type(pu.mapparms(seq_num_fraction, seq_num_fraction), _Tuple2[object]) +assert_type(pu.mapparms(seq_num_object, seq_num_object), _Tuple2[object]) + +assert_type(pu.mapparms(seq_sct_int, seq_sct_int), _Tuple2[np.floating[Any]]) +assert_type(pu.mapparms(seq_sct_int, seq_sct_float), _Tuple2[np.floating[Any]]) +assert_type(pu.mapparms(seq_sct_float, seq_sct_float), _Tuple2[np.floating[Any]]) +assert_type(pu.mapparms(seq_sct_float, seq_sct_complex), _Tuple2[np.complexfloating[Any, Any]]) +assert_type(pu.mapparms(seq_sct_complex, seq_sct_complex), _Tuple2[np.complexfloating[Any, Any]]) +assert_type(pu.mapparms(seq_sct_complex, seq_sct_object), _Tuple2[object]) +assert_type(pu.mapparms(seq_sct_object, seq_sct_object), _Tuple2[object]) + +assert_type(pu.mapparms(arr_int, arr_int), _Tuple2[np.floating[Any]]) +assert_type(pu.mapparms(arr_int, arr_float), _Tuple2[np.floating[Any]]) +assert_type(pu.mapparms(arr_float, arr_float), _Tuple2[np.floating[Any]]) +assert_type(pu.mapparms(arr_float, arr_complex), _Tuple2[np.complexfloating[Any, Any]]) +assert_type(pu.mapparms(arr_complex, arr_complex), _Tuple2[np.complexfloating[Any, Any]]) +assert_type(pu.mapparms(arr_complex, arr_object), _Tuple2[object]) +assert_type(pu.mapparms(arr_object, arr_object), _Tuple2[object]) + +# mapdomain + +assert_type(pu.mapdomain(num_int, seq_num_int, seq_num_int), np.floating[Any]) +assert_type(pu.mapdomain(num_int, seq_num_int, seq_num_float), np.floating[Any]) +assert_type(pu.mapdomain(num_int, seq_num_float, seq_num_float), np.floating[Any]) +assert_type(pu.mapdomain(num_float, seq_num_float, seq_num_float), np.floating[Any]) +assert_type(pu.mapdomain(num_float, seq_num_float, seq_num_complex), np.complexfloating[Any, Any]) +assert_type(pu.mapdomain(num_float, seq_num_complex, seq_num_complex), np.complexfloating[Any, Any]) +assert_type(pu.mapdomain(num_complex, seq_num_complex, seq_num_complex), np.complexfloating[Any, Any]) +assert_type(pu.mapdomain(num_complex, seq_num_complex, seq_num_fraction), object) +assert_type(pu.mapdomain(num_complex, seq_num_fraction, seq_num_fraction), object) +assert_type(pu.mapdomain(num_fraction, seq_num_fraction, seq_num_fraction), object) +assert_type(pu.mapdomain(num_object, seq_num_object, seq_num_object), object) + +assert_type(pu.mapdomain(seq_num_int, seq_num_int, seq_num_int), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_int, seq_num_int, seq_num_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_int, seq_num_float, seq_num_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_float, seq_num_float, seq_num_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_float, seq_num_float, seq_num_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_num_float, seq_num_complex, seq_num_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_num_complex, seq_num_complex, seq_num_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_num_complex, seq_num_complex, seq_num_fraction), _ArrObject1D) +assert_type(pu.mapdomain(seq_num_complex, seq_num_fraction, seq_num_fraction), _ArrObject1D) +assert_type(pu.mapdomain(seq_num_fraction, seq_num_fraction, seq_num_fraction), _ArrObject1D) +assert_type(pu.mapdomain(seq_num_object, seq_num_object, seq_num_object), _ArrObject1D) + +assert_type(pu.mapdomain(seq_sct_int, seq_sct_int, seq_sct_int), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_int, seq_sct_int, seq_sct_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_int, seq_sct_float, seq_sct_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_float, seq_sct_float, seq_sct_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_float, seq_sct_float, seq_sct_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_sct_float, seq_sct_complex, seq_sct_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_sct_complex, seq_sct_complex, seq_sct_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_sct_complex, seq_sct_complex, seq_sct_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_sct_complex, seq_sct_object, seq_sct_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_sct_object, seq_sct_object, seq_sct_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_sct_object, seq_sct_object, seq_sct_object), _ArrObject1D) + +assert_type(pu.mapdomain(arr_int, arr_int, arr_int), _ArrFloat1D) +assert_type(pu.mapdomain(arr_int, arr_int, arr_float), _ArrFloat1D) +assert_type(pu.mapdomain(arr_int, arr_float, arr_float), _ArrFloat1D) +assert_type(pu.mapdomain(arr_float, arr_float, arr_float), _ArrFloat1D) +assert_type(pu.mapdomain(arr_float, arr_float, arr_complex), _ArrComplex1D) +assert_type(pu.mapdomain(arr_float, arr_complex, arr_complex), _ArrComplex1D) +assert_type(pu.mapdomain(arr_complex, arr_complex, arr_complex), _ArrComplex1D) +assert_type(pu.mapdomain(arr_complex, arr_complex, arr_object), _ArrObject1D) +assert_type(pu.mapdomain(arr_complex, arr_object, arr_object), _ArrObject1D) +assert_type(pu.mapdomain(arr_object, arr_object, arr_object), _ArrObject1D) From b3df5ae1fec3adb565d84d5da23dda711f242181 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 10 Jul 2024 18:14:58 +0200 Subject: [PATCH 807/980] TYP: annotate `ABCPolyBase.basis_name` in `numpy.polynomial._polybase` through generic type params --- numpy/polynomial/_polybase.pyi | 8 +++++--- numpy/polynomial/chebyshev.pyi | 2 +- numpy/polynomial/hermite.pyi | 2 +- numpy/polynomial/hermite_e.pyi | 2 +- numpy/polynomial/laguerre.pyi | 2 +- numpy/polynomial/legendre.pyi | 2 +- numpy/polynomial/polynomial.pyi | 2 +- 7 files changed, 11 insertions(+), 9 deletions(-) diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index 9baed2ea91f9..27fa9bfc5bc7 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -4,6 +4,7 @@ from typing import ( TYPE_CHECKING, Any, ClassVar, + Generic, Literal, SupportsComplex, SupportsIndex, @@ -42,22 +43,23 @@ else: __all__ = ["ABCPolyBase"] +_NameCo = TypeVar("_NameCo", bound=None | LiteralString, covariant=True) _Self = TypeVar("_Self", bound="ABCPolyBase") _Size = TypeVar("_Size", bound=int) _AnyOther: TypeAlias = ABCPolyBase | _AnyScalar | _AnySeries1D _Hundred: TypeAlias = Literal[100] -class ABCPolyBase: +class ABCPolyBase(Generic[_NameCo]): __hash__: ClassVar[None] # type: ignore[assignment] __array_ufunc__: ClassVar[None] - basis_name: ClassVar[None | LiteralString] maxpower: ClassVar[_Hundred] _superscript_mapping: ClassVar[Mapping[int, str]] _subscript_mapping: ClassVar[Mapping[int, str]] _use_unicode: ClassVar[bool] + basis_name: _NameCo coef: _Array1D[np.number[Any]] domain: _Interval[Any] window: _Interval[Any] @@ -83,7 +85,7 @@ class ABCPolyBase: @overload def __call__( self, /, - arg: _AnyScalar, + arg: _AnyNumberScalar, ) -> np.float64 | np.complex128: ... @overload def __call__( diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index e6df3d328e72..7e0fe46093a8 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -154,7 +154,7 @@ def chebinterpolate( _Self = TypeVar("_Self", bound=object) -class Chebyshev(ABCPolyBase): +class Chebyshev(ABCPolyBase[L["T"]]): @overload @classmethod def interpolate( diff --git a/numpy/polynomial/hermite.pyi b/numpy/polynomial/hermite.pyi index d77e5cb66ecc..07db43d0c000 100644 --- a/numpy/polynomial/hermite.pyi +++ b/numpy/polynomial/hermite.pyi @@ -103,4 +103,4 @@ def _normed_hermite_n( hermgauss: _FuncGauss[L["hermgauss"]] hermweight: _FuncWeight[L["hermweight"]] -class Hermite(ABCPolyBase): ... +class Hermite(ABCPolyBase[L["H"]]): ... diff --git a/numpy/polynomial/hermite_e.pyi b/numpy/polynomial/hermite_e.pyi index 01537e260ea1..94ad7248f268 100644 --- a/numpy/polynomial/hermite_e.pyi +++ b/numpy/polynomial/hermite_e.pyi @@ -103,4 +103,4 @@ def _normed_hermite_e_n( hermegauss: _FuncGauss[L["hermegauss"]] hermeweight: _FuncWeight[L["hermeweight"]] -class HermiteE(ABCPolyBase): ... +class HermiteE(ABCPolyBase[L["He"]]): ... diff --git a/numpy/polynomial/laguerre.pyi b/numpy/polynomial/laguerre.pyi index b69f4dbbb384..ee8115795748 100644 --- a/numpy/polynomial/laguerre.pyi +++ b/numpy/polynomial/laguerre.pyi @@ -97,4 +97,4 @@ laggauss: _FuncGauss[L["laggauss"]] lagweight: _FuncWeight[L["lagweight"]] -class Laguerre(ABCPolyBase): ... +class Laguerre(ABCPolyBase[L["L"]]): ... diff --git a/numpy/polynomial/legendre.pyi b/numpy/polynomial/legendre.pyi index 8712c5ddc274..d81f3e6f54a4 100644 --- a/numpy/polynomial/legendre.pyi +++ b/numpy/polynomial/legendre.pyi @@ -96,4 +96,4 @@ legroots: _FuncRoots[L["legroots"]] leggauss: _FuncGauss[L["leggauss"]] legweight: _FuncWeight[L["legweight"]] -class Legendre(ABCPolyBase): ... +class Legendre(ABCPolyBase[L["P"]]): ... diff --git a/numpy/polynomial/polynomial.pyi b/numpy/polynomial/polynomial.pyi index 4746b74ef251..89a8b57185f3 100644 --- a/numpy/polynomial/polynomial.pyi +++ b/numpy/polynomial/polynomial.pyi @@ -84,4 +84,4 @@ polyfit: _FuncFit[L["polyfit"]] polycompanion: _FuncCompanion[L["polycompanion"]] polyroots: _FuncRoots[L["polyroots"]] -class Polynomial(ABCPolyBase): ... +class Polynomial(ABCPolyBase[None]): ... From d0ad0e500986a5754aa80d0d5be59337b945cf77 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 10 Jul 2024 18:18:02 +0200 Subject: [PATCH 808/980] TYP: allow more scalar types in the `polynomial._polytypes._Interval` alias --- numpy/polynomial/_polytypes.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index 23c2fffe45f3..6c8bec3662ee 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -109,7 +109,7 @@ _AnySeriesND: TypeAlias = ( | _NestedSequence[SupportsComplex | SupportsFloat] ) -_SCT_domain = TypeVar("_SCT_domain", np.float64, np.complex128, np.object_) +_SCT_domain = TypeVar("_SCT_domain", bound=np.inexact[Any] | np.object_) _Interval: TypeAlias = np.ndarray[tuple[Literal[2]], np.dtype[_SCT_domain]] _T = TypeVar("_T", bound=object) From 252cf3f8ced8cd6579d4cb378b3e864ce4cb7509 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 10 Jul 2024 18:19:14 +0200 Subject: [PATCH 809/980] TYP: rename "reveal" type-tests `polyutils.pyi` to `polynomial_polyutils.pyi` --- .../tests/data/reveal/{polyutils.pyi => polynomial_polyutils.pyi} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename numpy/typing/tests/data/reveal/{polyutils.pyi => polynomial_polyutils.pyi} (100%) diff --git a/numpy/typing/tests/data/reveal/polyutils.pyi b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi similarity index 100% rename from numpy/typing/tests/data/reveal/polyutils.pyi rename to numpy/typing/tests/data/reveal/polynomial_polyutils.pyi From 8e105bf4ff122a8a00faf2b65eadc9cbe69a69a4 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 10 Jul 2024 19:27:55 +0200 Subject: [PATCH 810/980] TYP: consistent type alias naming and overload signatures in `numpy.polynomial` --- numpy/polynomial/_polybase.pyi | 26 +- numpy/polynomial/_polytypes.pyi | 535 +++++++++++--------------------- numpy/polynomial/polyutils.pyi | 93 +++--- 3 files changed, 241 insertions(+), 413 deletions(-) diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index 27fa9bfc5bc7..5cf8c3d473a7 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -6,9 +6,7 @@ from typing import ( ClassVar, Generic, Literal, - SupportsComplex, SupportsIndex, - SupportsInt, TypeAlias, TypeGuard, TypeVar, @@ -20,15 +18,15 @@ import numpy.typing as npt from numpy._typing import _ArrayLikeInt_co from ._polytypes import ( - _AnySeriesND, - _Array1D, + _AnyInt, + _AnyComplexScalar, _AnyComplexSeriesND, _AnyObjectSeriesND, _AnyScalar, _AnySeries1D, - _Interval, + _AnySeriesND, + _Array2, _CoefArray1D, - _AnyNumberScalar, _SupportsLenAndGetItem, _Tuple2, ) @@ -60,9 +58,9 @@ class ABCPolyBase(Generic[_NameCo]): _use_unicode: ClassVar[bool] basis_name: _NameCo - coef: _Array1D[np.number[Any]] - domain: _Interval[Any] - window: _Interval[Any] + coef: _CoefArray1D + domain: _Array2[np.inexact[Any] | np.object_] + window: _Array2[np.inexact[Any] | np.object_] _symbol: LiteralString @property @@ -85,7 +83,7 @@ class ABCPolyBase(Generic[_NameCo]): @overload def __call__( self, /, - arg: _AnyNumberScalar, + arg: _AnyComplexScalar, ) -> np.float64 | np.complex128: ... @overload def __call__( @@ -127,7 +125,7 @@ class ABCPolyBase(Generic[_NameCo]): def __rmod__(self: _Self, x: _AnyOther, /) -> _Self: ... def __rdivmod__(self: _Self, x: _AnyOther, /) -> _Tuple2[_Self]: ... def __len__(self, /) -> int: ... - def __iter__(self, /) -> Iterator[np.number[Any] | SupportsComplex]: ... + def __iter__(self, /) -> Iterator[np.inexact[Any] | object]: ... def __getstate__(self, /) -> dict[str, Any]: ... def __setstate__(self, dict: dict[str, Any], /) -> None: ... @@ -140,7 +138,7 @@ class ABCPolyBase(Generic[_NameCo]): def degree(self, /) -> int: ... def cutdeg(self: _Self, /) -> _Self: ... def trim(self: _Self, /, tol: float = ...) -> _Self: ... - def truncate(self: _Self, /, size: SupportsInt) -> _Self: ... + def truncate(self: _Self, /, size: _AnyInt) -> _Self: ... @overload def convert( @@ -174,8 +172,8 @@ class ABCPolyBase(Generic[_NameCo]): self: _Self, /, m: SupportsIndex = ..., - k: _AnyNumberScalar | _SupportsLenAndGetItem[_AnyNumberScalar] = ..., - lbnd: None | _AnyNumberScalar = ..., + k: _AnyComplexScalar | _SupportsLenAndGetItem[_AnyComplexScalar] = ..., + lbnd: None | _AnyComplexScalar = ..., ) -> _Self: ... def deriv(self: _Self, /, m: SupportsIndex = ...) -> _Self: ... diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index 6c8bec3662ee..c4c6bdc85c37 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -1,13 +1,13 @@ import decimal import fractions import numbers -from collections.abc import Callable, Iterable, Sequence +import sys +from collections.abc import Callable, Sequence from typing import ( + TYPE_CHECKING, Any, Literal, Protocol, - SupportsComplex, - SupportsFloat, SupportsIndex, SupportsInt, TypeAlias, @@ -27,6 +27,16 @@ from numpy._typing import ( _SupportsArray, ) +if sys.version_info >= (3, 11): + from typing import LiteralString +elif TYPE_CHECKING: + from typing_extensions import LiteralString +else: + LiteralString: TypeAlias = str + +_T = TypeVar("_T", bound=object) +_Tuple2: TypeAlias = tuple[_T, _T] + _V = TypeVar("_V") _V_co = TypeVar("_V_co", covariant=True) _Self = TypeVar("_Self", bound=object) @@ -42,97 +52,87 @@ class _SimpleSequence(Protocol[_V_co]): @overload def __getitem__(self: _Self, ii: slice, /) -> _Self: ... -_SCT = TypeVar("_SCT", bound=np.generic) +_SCT = TypeVar("_SCT", bound=np.number[Any] | np.object_) + _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_SCT]] _Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_SCT]] -_CoefScalarType: TypeAlias = np.number[Any] | np.object_ -_CoefArray1D: TypeAlias = _Array1D[_CoefScalarType] -_CoefArrayND: TypeAlias = npt.NDArray[_CoefScalarType] +_Array1: TypeAlias = np.ndarray[tuple[Literal[1]], np.dtype[_SCT]] +_Array2: TypeAlias = np.ndarray[tuple[Literal[2]], np.dtype[_SCT]] +_Line: TypeAlias = np.ndarray[tuple[Literal[1, 2]], np.dtype[_SCT]] -class _SupportsBool(Protocol): - def __bool__(self, /) -> bool: ... +_CoefArray1D: TypeAlias = _Array1D[np.inexact[Any] | np.object_] +_CoefArrayND: TypeAlias = npt.NDArray[np.inexact[Any] | np.object_] -_AnyFloatScalar: TypeAlias = float | np.floating[Any] | np.integer[Any] -_AnyComplexScalar: TypeAlias = complex | np.complexfloating[Any, Any] -_AnyNumberScalar: TypeAlias = complex | np.number[Any] +_AnyRealScalar: TypeAlias = float | np.floating[Any] | np.integer[Any] +_AnyComplexScalar: TypeAlias = complex | np.number[Any] _AnyObjectScalar: TypeAlias = ( - fractions.Fraction + np.object_ + | fractions.Fraction | decimal.Decimal | numbers.Complex - | np.object_ ) -_AnyScalar: TypeAlias = _AnyNumberScalar | _AnyObjectScalar +_AnyScalar: TypeAlias = _AnyComplexScalar | _AnyObjectScalar _AnyInt: TypeAlias = SupportsInt | SupportsIndex -_AnyFloatSeries1D: TypeAlias = ( - _SupportsArray[np.dtype[np.floating[Any] | np.integer[Any]]] +_AnyRealSeries1D: TypeAlias = ( + npt.NDArray[np.floating[Any] | np.integer[Any]] + | _SupportsArray[np.dtype[np.floating[Any] | np.integer[Any]]] | _SupportsLenAndGetItem[float | np.floating[Any] | np.integer[Any]] ) _AnyComplexSeries1D: TypeAlias = ( - npt.NDArray[np.complexfloating[Any, Any]] - | _SupportsArray[np.dtype[np.complexfloating[Any, Any]]] - | _SupportsLenAndGetItem[_AnyComplexScalar] -) -_AnyNumberSeries1D: TypeAlias = ( npt.NDArray[np.number[Any]] | _SupportsArray[np.dtype[np.number[Any]]] - | _SupportsLenAndGetItem[_AnyNumberScalar] + | _SupportsLenAndGetItem[_AnyComplexScalar] ) _AnyObjectSeries1D: TypeAlias = ( npt.NDArray[np.object_] | _SupportsLenAndGetItem[_AnyObjectScalar] ) _AnySeries1D: TypeAlias = ( - npt.NDArray[_CoefScalarType] + npt.NDArray[np.number[Any] | np.object_] | _SupportsLenAndGetItem[_AnyScalar | object] ) -_AnyFloatSeriesND: TypeAlias = ( - _AnyFloatScalar +_AnyRealSeriesND: TypeAlias = ( + npt.NDArray[np.floating[Any] | np.integer[Any]] + | _AnyRealScalar | _SupportsArray[np.dtype[np.floating[Any] | np.integer[Any]]] | _NestedSequence[float | np.floating[Any] | np.integer[Any]] ) _AnyComplexSeriesND: TypeAlias = ( - _AnyComplexScalar + npt.NDArray[np.number[Any]] + | _AnyComplexScalar | _SupportsArray[np.dtype[np.number[Any]]] | _NestedSequence[complex | np.number[Any]] ) _AnyObjectSeriesND: TypeAlias = ( - _AnyObjectScalar + npt.NDArray[np.object_] + | _AnyObjectScalar | _SupportsArray[np.dtype[np.object_]] | _NestedSequence[_AnyObjectScalar] ) _AnySeriesND: TypeAlias = ( - _AnyScalar - | _SupportsArray[np.dtype[_CoefScalarType]] - | _NestedSequence[SupportsComplex | SupportsFloat] + npt.NDArray[np.number[Any] | np.object_] + | _AnyScalar + | _SupportsArray[np.dtype[np.number[Any] | np.object_]] + | _NestedSequence[_AnyScalar | object] ) -_SCT_domain = TypeVar("_SCT_domain", bound=np.inexact[Any] | np.object_) -_Interval: TypeAlias = np.ndarray[tuple[Literal[2]], np.dtype[_SCT_domain]] - -_T = TypeVar("_T", bound=object) -_Tuple2: TypeAlias = tuple[_T, _T] - -_SCT_number = TypeVar("_SCT_number", bound=_CoefScalarType) -_Array1: TypeAlias = np.ndarray[tuple[Literal[1]], np.dtype[_SCT]] -_Array2: TypeAlias = np.ndarray[tuple[Literal[2]], np.dtype[_SCT]] -_Line: TypeAlias = _Array1[_SCT_number] | _Array2[_SCT_number] - -_Name_co = TypeVar("_Name_co", bound=str, covariant=True) +_Name_co = TypeVar("_Name_co", bound=LiteralString, covariant=True) -@final -class _FuncLine(Protocol[_Name_co]): +class _Named(Protocol[_Name_co]): @property def __name__(self, /) -> _Name_co: ... +@final +class _FuncLine(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - off: _SCT_number, - scl: _SCT_number, - ) -> _Line[_SCT_number]: ... + off: _SCT, + scl: _SCT, + ) -> _Line[_SCT]: ... @overload def __call__( # type: ignore[overload-overlap] self, /, @@ -140,11 +140,7 @@ class _FuncLine(Protocol[_Name_co]): scl: int, ) -> _Line[np.int_] : ... @overload - def __call__( - self, /, - off: float, - scl: float, - ) -> _Line[np.float64]: ... + def __call__(self, /, off: float, scl: float) -> _Line[np.float64]: ... @overload def __call__( self, /, @@ -152,21 +148,14 @@ class _FuncLine(Protocol[_Name_co]): scl: complex, ) -> _Line[np.complex128]: ... @overload - def __call__( - self, /, - off: _AnyObjectScalar, - scl: _AnyObjectScalar, - ) -> _Line[np.object_]: ... + def __call__(self, /, off: object, scl: object) -> _Line[np.object_]: ... @final -class _FuncFromRoots(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - +class _FuncFromRoots(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - roots: _AnyFloatSeries1D, + roots: _AnyRealSeries1D, ) -> _Array1D[np.floating[Any]]: ... @overload def __call__( @@ -176,21 +165,16 @@ class _FuncFromRoots(Protocol[_Name_co]): @overload def __call__( self, /, - roots: _AnyObjectSeries1D, + roots: _AnySeries1D, ) -> _Array1D[np.object_]: ... - @overload - def __call__(self, /, roots: _AnySeries1D) -> _CoefArray1D: ... @final -class _FuncBinOp(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - +class _FuncBinOp(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - c1: _AnyFloatSeries1D, - c2: _AnyFloatSeries1D, + c1: _AnyRealSeries1D, + c2: _AnyRealSeries1D, ) -> _Array1D[np.floating[Any]]: ... @overload def __call__( @@ -199,27 +183,18 @@ class _FuncBinOp(Protocol[_Name_co]): c2: _AnyComplexSeries1D, ) -> _Array1D[np.complexfloating[Any, Any]]: ... @overload - def __call__( - self, /, - c1: _AnyObjectSeries1D, - c2: _AnyObjectSeries1D, - ) -> _Array1D[np.object_]: ... - @overload def __call__( self, /, c1: _AnySeries1D, c2: _AnySeries1D, - ) -> _CoefArray1D: ... + ) -> _Array1D[np.object_]: ... @final -class _FuncUnOp(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - +class _FuncUnOp(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - c: _AnyFloatSeries1D, + c: _AnyRealSeries1D, ) -> _Array1D[np.floating[Any]]: ... @overload def __call__( @@ -227,19 +202,14 @@ class _FuncUnOp(Protocol[_Name_co]): c: _AnyComplexSeries1D, ) -> _Array1D[np.complexfloating[Any, Any]]: ... @overload - def __call__(self, /, c: _AnyObjectSeries1D) -> _Array1D[np.object_]: ... - @overload - def __call__(self, /, c: _AnySeries1D) -> _CoefArray1D: ... + def __call__(self, /, c: _AnySeries1D) -> _Array1D[np.object_]: ... @final -class _FuncPoly2Ortho(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - +class _FuncPoly2Ortho(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - pol: _AnyFloatSeries1D, + pol: _AnyRealSeries1D, ) -> _Array1D[np.floating[Any]]: ... @overload def __call__( @@ -247,19 +217,14 @@ class _FuncPoly2Ortho(Protocol[_Name_co]): pol: _AnyComplexSeries1D, ) -> _Array1D[np.complexfloating[Any, Any]]: ... @overload - def __call__(self, /, pol: _AnyObjectSeries1D) -> _Array1D[np.object_]: ... - @overload - def __call__(self, /, pol: _AnySeries1D) -> _CoefArray1D: ... + def __call__(self, /, pol: _AnySeries1D) -> _Array1D[np.object_]: ... @final -class _FuncPow(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - +class _FuncPow(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - c: _AnyFloatSeries1D, + c: _AnyRealSeries1D, pow: _AnyInt, maxpower: None | _AnyInt = ..., ) -> _Array1D[np.floating[Any]]: ... @@ -271,32 +236,21 @@ class _FuncPow(Protocol[_Name_co]): maxpower: None | _AnyInt = ..., ) -> _Array1D[np.complexfloating[Any, Any]]: ... @overload - def __call__( - self, /, - c: _AnyObjectSeries1D, - pow: _AnyInt, - maxpower: None | _AnyInt = ..., - ) -> _Array1D[np.object_]: ... - @overload def __call__( self, /, c: _AnySeries1D, pow: _AnyInt, maxpower: None | _AnyInt = ..., - ) -> _CoefArray1D: ... - + ) -> _Array1D[np.object_]: ... @final -class _FuncDer(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - +class _FuncDer(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - c: _AnyFloatSeriesND, + c: _AnyRealSeriesND, m: SupportsIndex = ..., - scl: _AnyNumberScalar = ..., + scl: _AnyComplexScalar = ..., axis: SupportsIndex = ..., ) -> npt.NDArray[np.floating[Any]]: ... @overload @@ -304,7 +258,7 @@ class _FuncDer(Protocol[_Name_co]): self, /, c: _AnyComplexSeriesND, m: SupportsIndex = ..., - scl: _AnyNumberScalar = ..., + scl: _AnyComplexScalar = ..., axis: SupportsIndex = ..., ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... @overload @@ -312,32 +266,20 @@ class _FuncDer(Protocol[_Name_co]): self, /, c: _AnyObjectSeriesND, m: SupportsIndex = ..., - scl: _AnyNumberScalar = ..., + scl: _AnyComplexScalar = ..., axis: SupportsIndex = ..., ) -> npt.NDArray[np.object_]: ... - @overload - def __call__( - self, /, - c: _AnySeriesND, - m: SupportsIndex = ..., - scl: _AnyNumberScalar = ..., - axis: SupportsIndex = ..., - ) -> _CoefArrayND: ... - @final -class _FuncInteg(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - +class _FuncInteg(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - c: _AnyFloatSeriesND, + c: _AnyRealSeriesND, m: SupportsIndex = ..., - k: _AnyNumberScalar | _SupportsLenAndGetItem[_AnyNumberScalar] = ..., - lbnd: _AnyNumberScalar = ..., - scl: _AnyNumberScalar = ..., + k: _AnyComplexScalar | _SupportsLenAndGetItem[_AnyComplexScalar] = ..., + lbnd: _AnyComplexScalar = ..., + scl: _AnyComplexScalar = ..., axis: SupportsIndex = ..., ) -> npt.NDArray[np.floating[Any]]: ... @overload @@ -345,9 +287,9 @@ class _FuncInteg(Protocol[_Name_co]): self, /, c: _AnyComplexSeriesND, m: SupportsIndex = ..., - k: _AnyNumberScalar | _SupportsLenAndGetItem[_AnyNumberScalar] = ..., - lbnd: _AnyNumberScalar = ..., - scl: _AnyNumberScalar = ..., + k: _AnyComplexScalar | _SupportsLenAndGetItem[_AnyComplexScalar] = ..., + lbnd: _AnyComplexScalar = ..., + scl: _AnyComplexScalar = ..., axis: SupportsIndex = ..., ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... @overload @@ -355,43 +297,30 @@ class _FuncInteg(Protocol[_Name_co]): self, /, c: _AnyObjectSeriesND, m: SupportsIndex = ..., - k: _AnyNumberScalar | _SupportsLenAndGetItem[_AnyNumberScalar] = ..., - lbnd: _AnyNumberScalar = ..., - scl: _AnyNumberScalar = ..., + k: _AnyComplexScalar | _SupportsLenAndGetItem[_AnyScalar] = ..., + lbnd: _AnyComplexScalar = ..., + scl: _AnyComplexScalar = ..., axis: SupportsIndex = ..., ) -> npt.NDArray[np.object_]: ... - @overload - def __call__( - self, /, - c: _AnySeriesND, - m: SupportsIndex = ..., - k: _AnyNumberScalar | _SupportsLenAndGetItem[_AnyNumberScalar] = ..., - lbnd: _AnyNumberScalar = ..., - scl: _AnyNumberScalar = ..., - axis: SupportsIndex = ..., - ) -> _CoefArrayND: ... - -_AnyFloatRoots: TypeAlias = ( +_AnyRealRoots: TypeAlias = ( _Array1D[np.floating[Any] | np.integer[Any]] - | Sequence[_AnyFloatScalar] + | Sequence[_AnyRealScalar] ) _AnyComplexRoots: TypeAlias = ( _Array1D[np.number[Any]] | Sequence[_AnyComplexScalar] ) -_AnyObjectRoots: TypeAlias = ( - _Array1D[np.object_] - | Sequence[_AnyObjectScalar] -) +_AnyObjectRoots: TypeAlias = _Array1D[np.object_] | Sequence[_AnyObjectScalar] +_AnyRoots: TypeAlias = _Array1D[np.object_] | Sequence[_AnyScalar] -_AnyFloatPoints: TypeAlias = ( +_AnyRealPoints: TypeAlias = ( npt.NDArray[np.floating[Any] | np.integer[Any]] - | tuple[_AnyFloatSeriesND, ...] - | list[_AnyFloatSeriesND] + | tuple[_AnyRealSeriesND, ...] + | list[_AnyRealSeriesND] ) _AnyComplexPoints: TypeAlias = ( - npt.NDArray[np.complexfloating[Any, Any]] + npt.NDArray[np.number[Any]] | tuple[_AnyComplexSeriesND, ...] | list[_AnyComplexSeriesND] ) @@ -401,21 +330,18 @@ _AnyObjectPoints: TypeAlias = ( | list[_AnyObjectSeriesND] ) _AnyPoints: TypeAlias = ( - _CoefArrayND + npt.NDArray[np.number[Any] | np.object_] | tuple[_AnySeriesND, ...] | list[_AnySeriesND] ) @final -class _FuncValFromRoots(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - +class _FuncValFromRoots(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - x: _AnyFloatScalar, - r: _AnyFloatScalar, + x: _AnyRealScalar, + r: _AnyRealScalar, tensor: bool = ..., ) -> np.floating[Any]: ... @overload @@ -428,15 +354,15 @@ class _FuncValFromRoots(Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - x: _AnyObjectScalar, - r: _AnyObjectScalar, + x: _AnyScalar, + r: _AnyScalar, tensor: bool = ..., ) -> object: ... @overload - def __call__( # type: ignore[overload-overlap] + def __call__( self, /, - x: _AnyFloatScalar | _AnyFloatPoints, - r: _AnyFloatSeriesND, + x: _AnyRealScalar | _AnyRealPoints, + r: _AnyRealSeriesND, tensor: bool = ..., ) -> npt.NDArray[np.floating[Any]]: ... @overload @@ -447,30 +373,20 @@ class _FuncValFromRoots(Protocol[_Name_co]): tensor: bool = ..., ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... @overload - def __call__( - self, /, - x: _AnyScalar | _AnyObjectPoints | _AnyComplexPoints, - r: _AnyObjectSeriesND, - tensor: bool = ..., - ) -> npt.NDArray[np.object_]: ... - @overload def __call__( self, /, x: _AnyScalar | _AnyPoints, r: _AnySeriesND, tensor: bool = ..., - ) -> _CoefArrayND: ... + ) -> npt.NDArray[np.object_]: ... @final -class _FuncVal(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - +class _FuncVal(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - x: _AnyFloatScalar, - c: _AnyFloatRoots, + x: _AnyRealScalar, + c: _AnyRealRoots, tensor: bool = ..., ) -> np.floating[Any]: ... @overload @@ -483,15 +399,15 @@ class _FuncVal(Protocol[_Name_co]): @overload def __call__( self, /, - x: _AnyObjectScalar, + x: _AnyScalar, c: _AnyObjectRoots, tensor: bool = ..., ) -> object: ... @overload def __call__( # type: ignore[overload-overlap] self, /, - x: _AnyFloatPoints, - c: _AnyFloatSeriesND, + x: _AnyRealPoints, + c: _AnyRealSeriesND, tensor: bool = ..., ) -> npt.NDArray[np.floating[Any]]: ... @overload @@ -502,31 +418,21 @@ class _FuncVal(Protocol[_Name_co]): tensor: bool = ..., ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... @overload - def __call__( - self, /, - x: _AnyObjectPoints, - c: _AnyObjectSeries1D | _AnyComplexSeriesND, - tensor: bool = ..., - ) -> npt.NDArray[np.object_]: ... - @overload def __call__( self, /, x: _AnyPoints, c: _AnySeriesND, tensor: bool = ..., - ) -> _CoefArrayND: ... + ) -> npt.NDArray[np.object_]: ... @final -class _FuncVal2D(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - +class _FuncVal2D(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - x: _AnyFloatScalar, - y: _AnyFloatScalar, - c: _AnyFloatRoots, + x: _AnyRealScalar, + y: _AnyRealScalar, + c: _AnyRealRoots, ) -> np.floating[Any]: ... @overload def __call__( @@ -538,16 +444,16 @@ class _FuncVal2D(Protocol[_Name_co]): @overload def __call__( self, /, - x: _AnyObjectScalar, - y: _AnyObjectScalar, - c: _AnyObjectRoots, + x: _AnyScalar, + y: _AnyScalar, + c: _AnyRoots, ) -> object: ... @overload def __call__( # type: ignore[overload-overlap] self, /, - x: _AnyFloatPoints, - y: _AnyFloatPoints, - c: _AnyFloatSeriesND, + x: _AnyRealPoints, + y: _AnyRealPoints, + c: _AnyRealSeriesND, ) -> npt.NDArray[np.floating[Any]]: ... @overload def __call__( @@ -557,32 +463,22 @@ class _FuncVal2D(Protocol[_Name_co]): c: _AnyComplexSeriesND, ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... @overload - def __call__( - self, /, - x: _AnyObjectPoints, - y: _AnyObjectPoints, - c: _AnyObjectSeries1D | _AnyComplexSeriesND, - ) -> npt.NDArray[np.object_]: ... - @overload def __call__( self, /, x: _AnyPoints, y: _AnyPoints, c: _AnySeriesND, - ) -> _CoefArrayND: ... + ) -> npt.NDArray[np.object_]: ... @final -class _FuncVal3D(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - +class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - x: _AnyFloatScalar, - y: _AnyFloatScalar, - z: _AnyFloatScalar, - c: _AnyFloatRoots + x: _AnyRealScalar, + y: _AnyRealScalar, + z: _AnyRealScalar, + c: _AnyRealRoots ) -> np.floating[Any]: ... @overload def __call__( @@ -595,18 +491,18 @@ class _FuncVal3D(Protocol[_Name_co]): @overload def __call__( self, /, - x: _AnyObjectScalar, - y: _AnyObjectScalar, - z: _AnyObjectScalar, - c: _AnyObjectRoots, + x: _AnyScalar, + y: _AnyScalar, + z: _AnyScalar, + c: _AnyRoots, ) -> object: ... @overload def __call__( # type: ignore[overload-overlap] self, /, - x: _AnyFloatPoints, - y: _AnyFloatPoints, - z: _AnyFloatPoints, - c: _AnyFloatSeriesND, + x: _AnyRealPoints, + y: _AnyRealPoints, + z: _AnyRealPoints, + c: _AnyRealSeriesND, ) -> npt.NDArray[np.floating[Any]]: ... @overload def __call__( @@ -617,21 +513,13 @@ class _FuncVal3D(Protocol[_Name_co]): c: _AnyComplexSeriesND, ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... @overload - def __call__( - self, /, - x: _AnyObjectPoints, - y: _AnyObjectPoints, - z: _AnyObjectPoints, - c: _AnyObjectSeries1D | _AnyComplexSeriesND, - ) -> npt.NDArray[np.object_]: ... - @overload def __call__( self, /, x: _AnyPoints, y: _AnyPoints, z: _AnyPoints, c: _AnySeriesND, - ) -> _CoefArrayND: ... + ) -> npt.NDArray[np.object_]: ... _AnyValF: TypeAlias = Callable[ [npt.ArrayLike, npt.ArrayLike, bool], @@ -639,17 +527,14 @@ _AnyValF: TypeAlias = Callable[ ] @final -class _FuncValND(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - +class _FuncValND(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, val_f: _AnyValF, - c: _AnyFloatRoots, + c: _AnyRealRoots, /, - *args: _AnyFloatScalar, + *args: _AnyRealScalar, ) -> np.floating[Any]: ... @overload def __call__( @@ -671,9 +556,9 @@ class _FuncValND(Protocol[_Name_co]): def __call__( # type: ignore[overload-overlap] self, val_f: _AnyValF, - c: _AnyFloatSeriesND, + c: _AnyRealSeriesND, /, - *args: _AnyFloatPoints, + *args: _AnyRealPoints, ) -> npt.NDArray[np.floating[Any]]: ... @overload def __call__( @@ -687,40 +572,29 @@ class _FuncValND(Protocol[_Name_co]): def __call__( self, val_f: _AnyValF, - c: _AnyObjectSeries1D | _AnyComplexSeriesND, + c: _AnySeriesND, /, *args: _AnyObjectPoints, ) -> npt.NDArray[np.object_]: ... - @overload - def __call__( - self, - val_f: _AnyValF, - c: _AnySeriesND, - /, - *args: _AnyPoints, - ) -> _CoefArrayND: ... @final -class _FuncVander(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - +class _FuncVander(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - x: _ArrayLikeFloat_co, + x: _AnyRealSeriesND, deg: SupportsIndex, ) -> npt.NDArray[np.floating[Any]]: ... @overload def __call__( self, /, - x: _ArrayLikeComplex_co, + x: _AnyComplexSeriesND, deg: SupportsIndex, ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... @overload def __call__( self, /, - x: _ArrayLikeObject_co, + x: _AnySeriesND, deg: SupportsIndex, ) -> npt.NDArray[np.object_]: ... @overload @@ -733,29 +607,26 @@ class _FuncVander(Protocol[_Name_co]): _AnyDegrees: TypeAlias = _SupportsLenAndGetItem[SupportsIndex] @final -class _FuncVander2D(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - +class _FuncVander2D(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - x: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co, + x: _AnyRealSeriesND, + y: _AnyRealSeriesND, deg: _AnyDegrees, ) -> npt.NDArray[np.floating[Any]]: ... @overload def __call__( self, /, - x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co, + x: _AnyComplexSeriesND, + y: _AnyComplexSeriesND, deg: _AnyDegrees, ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... @overload def __call__( self, /, - x: _ArrayLikeObject_co, - y: _ArrayLikeObject_co, + x: _AnySeriesND, + y: _AnySeriesND, deg: _AnyDegrees, ) -> npt.NDArray[np.object_]: ... @overload @@ -767,31 +638,29 @@ class _FuncVander2D(Protocol[_Name_co]): ) -> _CoefArrayND: ... @final -class _FuncVander3D(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - +class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - x: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co, - z: _ArrayLikeFloat_co, + x: _AnyRealSeriesND, + y: _AnyRealSeriesND, + z: _AnyRealSeriesND, deg: _AnyDegrees, ) -> npt.NDArray[np.floating[Any]]: ... @overload def __call__( self, /, - x: _ArrayLikeComplex_co, - z: _ArrayLikeComplex_co, + x: _AnyComplexSeriesND, + y: _AnyComplexSeriesND, + z: _AnyComplexSeriesND, deg: _AnyDegrees, ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... @overload def __call__( self, /, - x: _ArrayLikeObject_co, - y: _ArrayLikeObject_co, - z: _ArrayLikeObject_co, + x: _AnySeriesND, + y: _AnySeriesND, + z: _AnySeriesND, deg: _AnyDegrees, ) -> npt.NDArray[np.object_]: ... @overload @@ -810,10 +679,7 @@ _AnyFuncVander: TypeAlias = Callable[ ] @final -class _FuncVanderND(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - +class _FuncVanderND(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, @@ -832,7 +698,9 @@ class _FuncVanderND(Protocol[_Name_co]): def __call__( self, /, vander_fs: _SupportsLenAndGetItem[_AnyFuncVander], - points: _SupportsLenAndGetItem[_ArrayLikeObject_co], + points: _SupportsLenAndGetItem[ + _ArrayLikeObject_co | _ArrayLikeComplex_co, + ], degrees: _SupportsLenAndGetItem[SupportsIndex], ) -> npt.NDArray[np.object_]: ... @overload @@ -844,19 +712,16 @@ class _FuncVanderND(Protocol[_Name_co]): ) -> _CoefArrayND: ... @final -class _FuncFit(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - +class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - x: _AnyFloatSeries1D, - y: _AnyFloatSeriesND, + x: _AnyRealSeries1D, + y: _AnyRealSeriesND, deg: _ArrayLikeInt_co, rcond: None | float = ..., full: Literal[False] = ..., - w: None | _AnyFloatSeries1D = ..., + w: None | _AnyRealSeries1D = ..., ) -> npt.NDArray[np.floating[Any]]: ... @overload def __call__( @@ -877,8 +742,7 @@ class _FuncFit(Protocol[_Name_co]): rcond: None | float = ..., full: Literal[False] = ..., w: None | _AnySeries1D = ..., - ) -> _CoefArrayND: ... - + ) -> npt.NDArray[np.object_]: ... @overload def __call__( self, @@ -903,14 +767,14 @@ class _FuncFit(Protocol[_Name_co]): ) -> tuple[_CoefArrayND, Sequence[np.inexact[Any] | np.int32]]: ... @final -class _FuncRoots(Protocol[_Name_co]): +class _FuncRoots(_Named[_Name_co], Protocol[_Name_co]): @property def __name__(self, /) -> _Name_co: ... @overload def __call__( # type: ignore[overload-overlap] self, /, - c: _AnyFloatSeries1D, + c: _AnyRealSeries1D, ) -> _Array1D[np.float64]: ... @overload def __call__( @@ -918,25 +782,14 @@ class _FuncRoots(Protocol[_Name_co]): c: _AnyComplexSeries1D, ) -> _Array1D[np.complex128]: ... @overload - def __call__( - self, /, - c: _AnyObjectSeries1D, - ) -> _Array1D[np.object_]: ... - @overload - def __call__( - self, /, - c: _AnySeries1D, - ) -> _Array1D[np.float64 | np.complex128 | np.object_]: ... + def __call__(self, /, c: _AnySeries1D) -> _Array1D[np.object_]: ... @final -class _FuncCompanion(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - +class _FuncCompanion(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - c: _AnyFloatSeries1D, + c: _AnyRealSeries1D, ) -> _Array2D[np.float64]: ... @overload def __call__( @@ -944,32 +797,21 @@ class _FuncCompanion(Protocol[_Name_co]): c: _AnyComplexSeries1D, ) -> _Array2D[np.complex128]: ... @overload - def __call__( - self, /, - c: _AnyObjectSeries1D, - ) -> _Array2D[np.object_]: ... - @overload - def __call__( - self, /, - c: _AnySeries1D, - ) -> _Array2D[np.float64 | np.complex128 | np.object_]: ... + def __call__(self, /, c: _AnySeries1D) -> _Array2D[np.object_]: ... @final -class _FuncGauss(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - - def __call__(self, /, SupportsIndex) -> _Tuple2[_Array1D[np.float64]]: ... +class _FuncGauss(_Named[_Name_co], Protocol[_Name_co]): + def __call__( + self, /, + deg: SupportsIndex, + ) -> _Tuple2[_Array1D[np.float64]]: ... @final -class _FuncWeight(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - +class _FuncWeight(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - c: _AnyFloatSeriesND, + c: _AnyRealSeriesND, ) -> npt.NDArray[np.float64]: ... @overload def __call__( @@ -977,23 +819,12 @@ class _FuncWeight(Protocol[_Name_co]): c: _AnyComplexSeriesND, ) -> npt.NDArray[np.complex128]: ... @overload - def __call__( - self, /, - c: _AnyObjectSeriesND, - ) -> npt.NDArray[np.object_]: ... - @overload - def __call__( - self, /, - c: _AnySeriesND, - ) -> npt.NDArray[np.float64 | np.complex128 | np.object_]: ... + def __call__(self, /, c: _AnySeriesND) -> npt.NDArray[np.object_]: ... _N_pts = TypeVar("_N_pts", bound=int) @final -class _FuncPts(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - +class _FuncPts(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index ee84d63d7352..346298ec09aa 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -14,27 +14,26 @@ import numpy.typing as npt from numpy._typing import _ArrayLikeInt_co, _FloatLike_co from ._polytypes import ( + _AnyComplexScalar, + _AnyComplexSeries1D, _AnyComplexSeriesND, - _AnyFloatSeriesND, + _AnyRealSeries1D, + _AnyRealSeriesND, _AnyInt, - _AnyScalar, _AnyComplexSeries1D, - _AnyFloatSeries1D, - _AnyNumberSeries1D, - _AnyObjectScalar, _AnyObjectSeries1D, + _AnyRealScalar, + _AnyScalar, _AnySeries1D, _AnySeriesND, + _AnyRealScalar, _Array1D, - _AnyFloatScalar, + _Array2, _CoefArrayND, _CoefArray1D, - _AnyFloatScalar, _FuncBinOp, _FuncValND, _FuncVanderND, - _Interval, - _AnyNumberScalar, _SimpleSequence, _SupportsLenAndGetItem, _Tuple2, @@ -96,7 +95,7 @@ def as_series( ) -> list[_Array1D[np.object_]]: ... @overload def as_series( # type: ignore[overload-overlap] - alist: Iterable[_AnyFloatSeries1D | float], + alist: Iterable[_AnyRealSeries1D | float], trim: bool = ..., ) -> list[_Array1D[np.floating[Any]]]: ... @overload @@ -116,58 +115,58 @@ def trimseq(seq: _T_seq) -> _T_seq: ... @overload def trimcoef( # type: ignore[overload-overlap] c: npt.NDArray[np.integer[Any]] | npt.NDArray[np.floating[Any]], - tol: _AnyFloatScalar = ..., + tol: _AnyRealScalar = ..., ) -> _Array1D[np.floating[Any]]: ... @overload def trimcoef( c: npt.NDArray[np.complexfloating[Any, Any]], - tol: _AnyFloatScalar = ..., + tol: _AnyRealScalar = ..., ) -> _Array1D[np.complexfloating[Any, Any]]: ... @overload def trimcoef( c: npt.NDArray[np.object_], - tol: _AnyFloatScalar = ..., + tol: _AnyRealScalar = ..., ) -> _Array1D[np.object_]: ... @overload def trimcoef( # type: ignore[overload-overlap] - c: _AnyFloatSeries1D | float, - tol: _AnyFloatScalar = ..., + c: _AnyRealSeries1D | float, + tol: _AnyRealScalar = ..., ) -> _Array1D[np.floating[Any]]: ... @overload def trimcoef( c: _AnyComplexSeries1D | complex, - tol: _AnyFloatScalar = ..., + tol: _AnyRealScalar = ..., ) -> _Array1D[np.complexfloating[Any, Any]]: ... @overload def trimcoef( c: _AnyObjectSeries1D | object, - tol: _AnyFloatScalar = ..., + tol: _AnyRealScalar = ..., ) -> _Array1D[np.object_]: ... @overload def getdomain( # type: ignore[overload-overlap] x: npt.NDArray[np.floating[Any]] | npt.NDArray[np.integer[Any]], -) -> _Interval[np.float64]: ... +) -> _Array2[np.float64]: ... @overload def getdomain( x: npt.NDArray[np.complexfloating[Any, Any]], -) -> _Interval[np.complex128]: ... +) -> _Array2[np.complex128]: ... @overload def getdomain( x: npt.NDArray[np.object_], -) -> _Interval[np.object_]: ... +) -> _Array2[np.object_]: ... @overload def getdomain( # type: ignore[overload-overlap] - x: _AnyFloatSeries1D | float, -) -> _Interval[np.float64]: ... + x: _AnyRealSeries1D | float, +) -> _Array2[np.float64]: ... @overload def getdomain( x: _AnyComplexSeries1D | complex, -) -> _Interval[np.complex128]: ... +) -> _Array2[np.complex128]: ... @overload def getdomain( x: _AnyObjectSeries1D | object, -) -> _Interval[np.object_]: ... +) -> _Array2[np.object_]: ... @overload def mapparms( # type: ignore[overload-overlap] @@ -196,13 +195,13 @@ def mapparms( ) -> _Tuple2[complex]: ... @overload def mapparms( - old: _AnyFloatSeries1D, - new: _AnyFloatSeries1D, + old: _AnyRealSeries1D, + new: _AnyRealSeries1D, ) -> _Tuple2[np.floating[Any]]: ... @overload def mapparms( - old: _AnyNumberSeries1D, - new: _AnyNumberSeries1D, + old: _AnyComplexSeries1D, + new: _AnyComplexSeries1D, ) -> _Tuple2[np.complexfloating[Any, Any]]: ... @overload def mapparms( @@ -212,19 +211,19 @@ def mapparms( @overload def mapdomain( # type: ignore[overload-overlap] - x: _AnyFloatScalar, - old: _AnyFloatSeries1D, - new: _AnyFloatSeries1D, + x: _AnyRealScalar, + old: _AnyRealSeries1D, + new: _AnyRealSeries1D, ) -> np.floating[Any]: ... @overload def mapdomain( - x: _AnyNumberScalar, + x: _AnyComplexScalar, old: _AnyComplexSeries1D, new: _AnyComplexSeries1D, ) -> np.complexfloating[Any, Any]: ... @overload def mapdomain( - x: _AnyObjectScalar | _AnyNumberScalar, + x: _AnyScalar, old: _AnyObjectSeries1D | _AnyComplexSeries1D, new: _AnyObjectSeries1D | _AnyComplexSeries1D, ) -> object: ... @@ -248,15 +247,15 @@ def mapdomain( ) -> _Array1D[np.object_]: ... @overload def mapdomain( - x: _AnyFloatSeries1D, - old: _AnyFloatSeries1D, - new: _AnyFloatSeries1D, + x: _AnyRealSeries1D, + old: _AnyRealSeries1D, + new: _AnyRealSeries1D, ) -> _Array1D[np.floating[Any]]: ... @overload def mapdomain( - x: _AnyNumberSeries1D, - old: _AnyNumberSeries1D, - new: _AnyNumberSeries1D, + x: _AnyComplexSeries1D, + old: _AnyComplexSeries1D, + new: _AnyComplexSeries1D, ) -> _Array1D[np.complexfloating[Any, Any]]: ... @overload def mapdomain( @@ -284,7 +283,7 @@ _vander_nd_flat: _FuncVanderND[Literal["_vander_nd_flat"]] def _fromroots( # type: ignore[overload-overlap] line_f: _AnyLineF, mul_f: _AnyMulF, - roots: _AnyFloatSeries1D, + roots: _AnyRealSeries1D, ) -> _Array1D[np.floating[Any]]: ... @overload def _fromroots( @@ -312,8 +311,8 @@ _gridnd: _FuncValND[Literal["_gridnd"]] @overload def _div( # type: ignore[overload-overlap] mul_f: _AnyMulF, - c1: _AnyFloatSeries1D, - c2: _AnyFloatSeries1D, + c1: _AnyRealSeries1D, + c2: _AnyRealSeries1D, ) -> _Tuple2[_Array1D[np.floating[Any]]]: ... @overload def _div( @@ -341,7 +340,7 @@ _sub: Final[_FuncBinOp] @overload def _pow( # type: ignore[overload-overlap] mul_f: _AnyMulF, - c: _AnyFloatSeries1D, + c: _AnyRealSeries1D, pow: _AnyInt, maxpower: None | _AnyInt = ..., ) -> _Array1D[np.floating[Any]]: ... @@ -371,13 +370,13 @@ def _pow( @overload def _fit( # type: ignore[overload-overlap] vander_f: _AnyVanderF, - x: _AnyFloatSeries1D, - y: _AnyFloatSeriesND, + x: _AnyRealSeries1D, + y: _AnyRealSeriesND, deg: _ArrayLikeInt_co, - domain: None | _AnyFloatSeries1D = ..., + domain: None | _AnyRealSeries1D = ..., rcond: None | float = ..., full: Literal[False] = ..., - w: None | _AnyFloatSeries1D = ..., + w: None | _AnyRealSeries1D = ..., ) -> npt.NDArray[np.floating[Any]]: ... @overload def _fit( From a7a72e108cfc7ae97070db71202071a386ea2a57 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 10 Jul 2024 19:45:14 +0200 Subject: [PATCH 811/980] TYP: extract common array annotations expressions into type aliases --- numpy/polynomial/_polytypes.pyi | 156 ++++++++++++++------------------ numpy/polynomial/polyutils.pyi | 99 ++++++++++---------- 2 files changed, 119 insertions(+), 136 deletions(-) diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index c4c6bdc85c37..ba53205a8331 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -57,6 +57,13 @@ _SCT = TypeVar("_SCT", bound=np.number[Any] | np.object_) _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_SCT]] _Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_SCT]] +_FloatArray1D: TypeAlias = _Array1D[np.floating[Any]] +_FloatArrayND: TypeAlias = npt.NDArray[np.floating[Any]] +_ComplexArray1D: TypeAlias = _Array1D[np.complexfloating[Any, Any]] +_ComplexArrayND: TypeAlias = npt.NDArray[np.complexfloating[Any, Any]] +_ObjectArray1D: TypeAlias = _Array1D[np.object_] +_ObjectArrayND: TypeAlias = npt.NDArray[np.object_] + _Array1: TypeAlias = np.ndarray[tuple[Literal[1]], np.dtype[_SCT]] _Array2: TypeAlias = np.ndarray[tuple[Literal[2]], np.dtype[_SCT]] _Line: TypeAlias = np.ndarray[tuple[Literal[1, 2]], np.dtype[_SCT]] @@ -86,7 +93,7 @@ _AnyComplexSeries1D: TypeAlias = ( | _SupportsLenAndGetItem[_AnyComplexScalar] ) _AnyObjectSeries1D: TypeAlias = ( - npt.NDArray[np.object_] + _ObjectArrayND | _SupportsLenAndGetItem[_AnyObjectScalar] ) _AnySeries1D: TypeAlias = ( @@ -107,7 +114,7 @@ _AnyComplexSeriesND: TypeAlias = ( | _NestedSequence[complex | np.number[Any]] ) _AnyObjectSeriesND: TypeAlias = ( - npt.NDArray[np.object_] + _ObjectArrayND | _AnyObjectScalar | _SupportsArray[np.dtype[np.object_]] | _NestedSequence[_AnyObjectScalar] @@ -128,17 +135,9 @@ class _Named(Protocol[_Name_co]): @final class _FuncLine(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__( # type: ignore[overload-overlap] - self, /, - off: _SCT, - scl: _SCT, - ) -> _Line[_SCT]: ... + def __call__(self, /, off: _SCT, scl: _SCT) -> _Line[_SCT]: ... # type: ignore[overload-overlap] @overload - def __call__( # type: ignore[overload-overlap] - self, /, - off: int, - scl: int, - ) -> _Line[np.int_] : ... + def __call__(self, /, off: int, scl: int) -> _Line[np.int_] : ... # type: ignore[overload-overlap] @overload def __call__(self, /, off: float, scl: float) -> _Line[np.float64]: ... @overload @@ -153,20 +152,11 @@ class _FuncLine(_Named[_Name_co], Protocol[_Name_co]): @final class _FuncFromRoots(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__( # type: ignore[overload-overlap] - self, /, - roots: _AnyRealSeries1D, - ) -> _Array1D[np.floating[Any]]: ... + def __call__(self, /, roots: _AnyRealSeries1D) -> _FloatArray1D: ... # type: ignore[overload-overlap] @overload - def __call__( - self, /, - roots: _AnyComplexSeries1D, - ) -> _Array1D[np.complexfloating[Any, Any]]: ... + def __call__(self, /, roots: _AnyComplexSeries1D) -> _ComplexArray1D: ... @overload - def __call__( - self, /, - roots: _AnySeries1D, - ) -> _Array1D[np.object_]: ... + def __call__(self, /, roots: _AnySeries1D) -> _ObjectArray1D: ... @final class _FuncBinOp(_Named[_Name_co], Protocol[_Name_co]): @@ -175,49 +165,37 @@ class _FuncBinOp(_Named[_Name_co], Protocol[_Name_co]): self, /, c1: _AnyRealSeries1D, c2: _AnyRealSeries1D, - ) -> _Array1D[np.floating[Any]]: ... + ) -> _FloatArray1D: ... @overload def __call__( self, /, c1: _AnyComplexSeries1D, c2: _AnyComplexSeries1D, - ) -> _Array1D[np.complexfloating[Any, Any]]: ... + ) -> _ComplexArray1D: ... @overload def __call__( self, /, c1: _AnySeries1D, c2: _AnySeries1D, - ) -> _Array1D[np.object_]: ... + ) -> _ObjectArray1D: ... @final class _FuncUnOp(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__( # type: ignore[overload-overlap] - self, /, - c: _AnyRealSeries1D, - ) -> _Array1D[np.floating[Any]]: ... + def __call__(self, /, c: _AnyRealSeries1D) -> _FloatArray1D: ... # type: ignore[overload-overlap] @overload - def __call__( - self, /, - c: _AnyComplexSeries1D, - ) -> _Array1D[np.complexfloating[Any, Any]]: ... + def __call__(self, /, c: _AnyComplexSeries1D) -> _ComplexArray1D: ... @overload - def __call__(self, /, c: _AnySeries1D) -> _Array1D[np.object_]: ... + def __call__(self, /, c: _AnySeries1D) -> _ObjectArray1D: ... @final class _FuncPoly2Ortho(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__( # type: ignore[overload-overlap] - self, /, - pol: _AnyRealSeries1D, - ) -> _Array1D[np.floating[Any]]: ... + def __call__(self, /, pol: _AnyRealSeries1D) -> _FloatArray1D: ... # type: ignore[overload-overlap] @overload - def __call__( - self, /, - pol: _AnyComplexSeries1D, - ) -> _Array1D[np.complexfloating[Any, Any]]: ... + def __call__(self, /, pol: _AnyComplexSeries1D) -> _ComplexArray1D: ... @overload - def __call__(self, /, pol: _AnySeries1D) -> _Array1D[np.object_]: ... + def __call__(self, /, pol: _AnySeries1D) -> _ObjectArray1D: ... @final class _FuncPow(_Named[_Name_co], Protocol[_Name_co]): @@ -227,21 +205,21 @@ class _FuncPow(_Named[_Name_co], Protocol[_Name_co]): c: _AnyRealSeries1D, pow: _AnyInt, maxpower: None | _AnyInt = ..., - ) -> _Array1D[np.floating[Any]]: ... + ) -> _FloatArray1D: ... @overload def __call__( self, /, c: _AnyComplexSeries1D, pow: _AnyInt, maxpower: None | _AnyInt = ..., - ) -> _Array1D[np.complexfloating[Any, Any]]: ... + ) -> _ComplexArray1D: ... @overload def __call__( self, /, c: _AnySeries1D, pow: _AnyInt, maxpower: None | _AnyInt = ..., - ) -> _Array1D[np.object_]: ... + ) -> _ObjectArray1D: ... @final class _FuncDer(_Named[_Name_co], Protocol[_Name_co]): @@ -252,7 +230,7 @@ class _FuncDer(_Named[_Name_co], Protocol[_Name_co]): m: SupportsIndex = ..., scl: _AnyComplexScalar = ..., axis: SupportsIndex = ..., - ) -> npt.NDArray[np.floating[Any]]: ... + ) -> _FloatArrayND: ... @overload def __call__( self, /, @@ -260,7 +238,7 @@ class _FuncDer(_Named[_Name_co], Protocol[_Name_co]): m: SupportsIndex = ..., scl: _AnyComplexScalar = ..., axis: SupportsIndex = ..., - ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + ) -> _ComplexArrayND: ... @overload def __call__( self, /, @@ -268,7 +246,7 @@ class _FuncDer(_Named[_Name_co], Protocol[_Name_co]): m: SupportsIndex = ..., scl: _AnyComplexScalar = ..., axis: SupportsIndex = ..., - ) -> npt.NDArray[np.object_]: ... + ) -> _ObjectArrayND: ... @final class _FuncInteg(_Named[_Name_co], Protocol[_Name_co]): @@ -281,7 +259,7 @@ class _FuncInteg(_Named[_Name_co], Protocol[_Name_co]): lbnd: _AnyComplexScalar = ..., scl: _AnyComplexScalar = ..., axis: SupportsIndex = ..., - ) -> npt.NDArray[np.floating[Any]]: ... + ) -> _FloatArrayND: ... @overload def __call__( self, /, @@ -291,7 +269,7 @@ class _FuncInteg(_Named[_Name_co], Protocol[_Name_co]): lbnd: _AnyComplexScalar = ..., scl: _AnyComplexScalar = ..., axis: SupportsIndex = ..., - ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + ) -> _ComplexArrayND: ... @overload def __call__( self, /, @@ -301,7 +279,7 @@ class _FuncInteg(_Named[_Name_co], Protocol[_Name_co]): lbnd: _AnyComplexScalar = ..., scl: _AnyComplexScalar = ..., axis: SupportsIndex = ..., - ) -> npt.NDArray[np.object_]: ... + ) -> _ObjectArrayND: ... _AnyRealRoots: TypeAlias = ( _Array1D[np.floating[Any] | np.integer[Any]] @@ -311,8 +289,8 @@ _AnyComplexRoots: TypeAlias = ( _Array1D[np.number[Any]] | Sequence[_AnyComplexScalar] ) -_AnyObjectRoots: TypeAlias = _Array1D[np.object_] | Sequence[_AnyObjectScalar] -_AnyRoots: TypeAlias = _Array1D[np.object_] | Sequence[_AnyScalar] +_AnyObjectRoots: TypeAlias = _ObjectArray1D | Sequence[_AnyObjectScalar] +_AnyRoots: TypeAlias = _ObjectArray1D | Sequence[_AnyScalar] _AnyRealPoints: TypeAlias = ( npt.NDArray[np.floating[Any] | np.integer[Any]] @@ -325,7 +303,7 @@ _AnyComplexPoints: TypeAlias = ( | list[_AnyComplexSeriesND] ) _AnyObjectPoints: TypeAlias = ( - npt.NDArray[np.object_] + _ObjectArrayND | tuple[_AnyObjectSeriesND, ...] | list[_AnyObjectSeriesND] ) @@ -364,21 +342,21 @@ class _FuncValFromRoots(_Named[_Name_co], Protocol[_Name_co]): x: _AnyRealScalar | _AnyRealPoints, r: _AnyRealSeriesND, tensor: bool = ..., - ) -> npt.NDArray[np.floating[Any]]: ... + ) -> _FloatArrayND: ... @overload def __call__( self, /, x: _AnyComplexScalar | _AnyComplexPoints, r: _AnyComplexSeriesND, tensor: bool = ..., - ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + ) -> _ComplexArrayND: ... @overload def __call__( self, /, x: _AnyScalar | _AnyPoints, r: _AnySeriesND, tensor: bool = ..., - ) -> npt.NDArray[np.object_]: ... + ) -> _ObjectArrayND: ... @final class _FuncVal(_Named[_Name_co], Protocol[_Name_co]): @@ -409,21 +387,21 @@ class _FuncVal(_Named[_Name_co], Protocol[_Name_co]): x: _AnyRealPoints, c: _AnyRealSeriesND, tensor: bool = ..., - ) -> npt.NDArray[np.floating[Any]]: ... + ) -> _FloatArrayND: ... @overload def __call__( self, /, x: _AnyComplexPoints, c: _AnyComplexSeriesND, tensor: bool = ..., - ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + ) -> _ComplexArrayND: ... @overload def __call__( self, /, x: _AnyPoints, c: _AnySeriesND, tensor: bool = ..., - ) -> npt.NDArray[np.object_]: ... + ) -> _ObjectArrayND: ... @final class _FuncVal2D(_Named[_Name_co], Protocol[_Name_co]): @@ -454,21 +432,21 @@ class _FuncVal2D(_Named[_Name_co], Protocol[_Name_co]): x: _AnyRealPoints, y: _AnyRealPoints, c: _AnyRealSeriesND, - ) -> npt.NDArray[np.floating[Any]]: ... + ) -> _FloatArrayND: ... @overload def __call__( self, /, x: _AnyComplexPoints, y: _AnyComplexPoints, c: _AnyComplexSeriesND, - ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + ) -> _ComplexArrayND: ... @overload def __call__( self, /, x: _AnyPoints, y: _AnyPoints, c: _AnySeriesND, - ) -> npt.NDArray[np.object_]: ... + ) -> _ObjectArrayND: ... @final class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): @@ -503,7 +481,7 @@ class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): y: _AnyRealPoints, z: _AnyRealPoints, c: _AnyRealSeriesND, - ) -> npt.NDArray[np.floating[Any]]: ... + ) -> _FloatArrayND: ... @overload def __call__( self, /, @@ -511,7 +489,7 @@ class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): y: _AnyComplexPoints, z: _AnyComplexPoints, c: _AnyComplexSeriesND, - ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + ) -> _ComplexArrayND: ... @overload def __call__( self, /, @@ -519,7 +497,7 @@ class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): y: _AnyPoints, z: _AnyPoints, c: _AnySeriesND, - ) -> npt.NDArray[np.object_]: ... + ) -> _ObjectArrayND: ... _AnyValF: TypeAlias = Callable[ [npt.ArrayLike, npt.ArrayLike, bool], @@ -559,7 +537,7 @@ class _FuncValND(_Named[_Name_co], Protocol[_Name_co]): c: _AnyRealSeriesND, /, *args: _AnyRealPoints, - ) -> npt.NDArray[np.floating[Any]]: ... + ) -> _FloatArrayND: ... @overload def __call__( self, @@ -567,7 +545,7 @@ class _FuncValND(_Named[_Name_co], Protocol[_Name_co]): c: _AnyComplexSeriesND, /, *args: _AnyComplexPoints, - ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + ) -> _ComplexArrayND: ... @overload def __call__( self, @@ -575,7 +553,7 @@ class _FuncValND(_Named[_Name_co], Protocol[_Name_co]): c: _AnySeriesND, /, *args: _AnyObjectPoints, - ) -> npt.NDArray[np.object_]: ... + ) -> _ObjectArrayND: ... @final class _FuncVander(_Named[_Name_co], Protocol[_Name_co]): @@ -584,19 +562,19 @@ class _FuncVander(_Named[_Name_co], Protocol[_Name_co]): self, /, x: _AnyRealSeriesND, deg: SupportsIndex, - ) -> npt.NDArray[np.floating[Any]]: ... + ) -> _FloatArrayND: ... @overload def __call__( self, /, x: _AnyComplexSeriesND, deg: SupportsIndex, - ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + ) -> _ComplexArrayND: ... @overload def __call__( self, /, x: _AnySeriesND, deg: SupportsIndex, - ) -> npt.NDArray[np.object_]: ... + ) -> _ObjectArrayND: ... @overload def __call__( self, /, @@ -614,21 +592,21 @@ class _FuncVander2D(_Named[_Name_co], Protocol[_Name_co]): x: _AnyRealSeriesND, y: _AnyRealSeriesND, deg: _AnyDegrees, - ) -> npt.NDArray[np.floating[Any]]: ... + ) -> _FloatArrayND: ... @overload def __call__( self, /, x: _AnyComplexSeriesND, y: _AnyComplexSeriesND, deg: _AnyDegrees, - ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + ) -> _ComplexArrayND: ... @overload def __call__( self, /, x: _AnySeriesND, y: _AnySeriesND, deg: _AnyDegrees, - ) -> npt.NDArray[np.object_]: ... + ) -> _ObjectArrayND: ... @overload def __call__( self, /, @@ -646,7 +624,7 @@ class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): y: _AnyRealSeriesND, z: _AnyRealSeriesND, deg: _AnyDegrees, - ) -> npt.NDArray[np.floating[Any]]: ... + ) -> _FloatArrayND: ... @overload def __call__( self, /, @@ -654,7 +632,7 @@ class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): y: _AnyComplexSeriesND, z: _AnyComplexSeriesND, deg: _AnyDegrees, - ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + ) -> _ComplexArrayND: ... @overload def __call__( self, /, @@ -662,7 +640,7 @@ class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): y: _AnySeriesND, z: _AnySeriesND, deg: _AnyDegrees, - ) -> npt.NDArray[np.object_]: ... + ) -> _ObjectArrayND: ... @overload def __call__( self, /, @@ -686,14 +664,14 @@ class _FuncVanderND(_Named[_Name_co], Protocol[_Name_co]): vander_fs: _SupportsLenAndGetItem[_AnyFuncVander], points: _SupportsLenAndGetItem[_ArrayLikeFloat_co], degrees: _SupportsLenAndGetItem[SupportsIndex], - ) -> npt.NDArray[np.floating[Any]]: ... + ) -> _FloatArrayND: ... @overload def __call__( self, /, vander_fs: _SupportsLenAndGetItem[_AnyFuncVander], points: _SupportsLenAndGetItem[_ArrayLikeComplex_co], degrees: _SupportsLenAndGetItem[SupportsIndex], - ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + ) -> _ComplexArrayND: ... @overload def __call__( self, /, @@ -702,7 +680,7 @@ class _FuncVanderND(_Named[_Name_co], Protocol[_Name_co]): _ArrayLikeObject_co | _ArrayLikeComplex_co, ], degrees: _SupportsLenAndGetItem[SupportsIndex], - ) -> npt.NDArray[np.object_]: ... + ) -> _ObjectArrayND: ... @overload def __call__( self, /, @@ -722,7 +700,7 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): rcond: None | float = ..., full: Literal[False] = ..., w: None | _AnyRealSeries1D = ..., - ) -> npt.NDArray[np.floating[Any]]: ... + ) -> _FloatArrayND: ... @overload def __call__( self, /, @@ -732,7 +710,7 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): rcond: None | float = ..., full: Literal[False] = ..., w: None | _AnyComplexSeriesND = ..., - ) -> npt.NDArray[np.complexfloating[Any, Any]]: ... + ) -> _ComplexArrayND: ... @overload def __call__( self, /, @@ -742,7 +720,7 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): rcond: None | float = ..., full: Literal[False] = ..., w: None | _AnySeries1D = ..., - ) -> npt.NDArray[np.object_]: ... + ) -> _ObjectArrayND: ... @overload def __call__( self, @@ -782,7 +760,7 @@ class _FuncRoots(_Named[_Name_co], Protocol[_Name_co]): c: _AnyComplexSeries1D, ) -> _Array1D[np.complex128]: ... @overload - def __call__(self, /, c: _AnySeries1D) -> _Array1D[np.object_]: ... + def __call__(self, /, c: _AnySeries1D) -> _ObjectArray1D: ... @final class _FuncCompanion(_Named[_Name_co], Protocol[_Name_co]): @@ -819,7 +797,7 @@ class _FuncWeight(_Named[_Name_co], Protocol[_Name_co]): c: _AnyComplexSeriesND, ) -> npt.NDArray[np.complex128]: ... @overload - def __call__(self, /, c: _AnySeriesND) -> npt.NDArray[np.object_]: ... + def __call__(self, /, c: _AnySeriesND) -> _ObjectArrayND: ... _N_pts = TypeVar("_N_pts", bound=int) diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index 346298ec09aa..dba8c2a75b68 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -27,13 +27,18 @@ from ._polytypes import ( _AnySeries1D, _AnySeriesND, _AnyRealScalar, - _Array1D, _Array2, _CoefArrayND, _CoefArray1D, + _ComplexArray1D, + _ComplexArrayND, + _FloatArray1D, + _FloatArrayND, _FuncBinOp, _FuncValND, _FuncVanderND, + _ObjectArray1D, + _ObjectArrayND, _SimpleSequence, _SupportsLenAndGetItem, _Tuple2, @@ -57,103 +62,103 @@ _AnyVanderF: TypeAlias = Callable[[npt.ArrayLike, SupportsIndex], _CoefArrayND] def as_series( alist: npt.NDArray[np.integer[Any]], trim: bool = ..., -) -> list[_Array1D[np.floating[Any]]]: ... +) -> list[_FloatArray1D]: ... @overload def as_series( - alist: npt.NDArray[np.floating[Any]], + alist: _FloatArrayND, trim: bool = ..., -) -> list[_Array1D[np.floating[Any]]]: ... +) -> list[_FloatArray1D]: ... @overload def as_series( - alist: npt.NDArray[np.complexfloating[Any, Any]], + alist: _ComplexArrayND, trim: bool = ..., -) -> list[_Array1D[np.complexfloating[Any, Any]]]: ... +) -> list[_ComplexArray1D]: ... @overload def as_series( - alist: npt.NDArray[np.object_], + alist: _ObjectArrayND, trim: bool = ..., -) -> list[_Array1D[np.object_]]: ... +) -> list[_ObjectArray1D]: ... @overload def as_series( # type: ignore[overload-overlap] alist: Iterable[npt.NDArray[np.integer[Any]]], trim: bool = ..., -) -> list[_Array1D[np.floating[Any]]]: ... +) -> list[_FloatArray1D]: ... @overload def as_series( - alist: Iterable[npt.NDArray[np.floating[Any]]], + alist: Iterable[_FloatArrayND], trim: bool = ..., -) -> list[_Array1D[np.floating[Any]]]: ... +) -> list[_FloatArray1D]: ... @overload def as_series( - alist: Iterable[npt.NDArray[np.complexfloating[Any, Any]]], + alist: Iterable[_ComplexArrayND], trim: bool = ..., -) -> list[_Array1D[np.complexfloating[Any, Any]]]: ... +) -> list[_ComplexArray1D]: ... @overload def as_series( - alist: Iterable[npt.NDArray[np.object_]], + alist: Iterable[_ObjectArrayND], trim: bool = ..., -) -> list[_Array1D[np.object_]]: ... +) -> list[_ObjectArray1D]: ... @overload def as_series( # type: ignore[overload-overlap] alist: Iterable[_AnyRealSeries1D | float], trim: bool = ..., -) -> list[_Array1D[np.floating[Any]]]: ... +) -> list[_FloatArray1D]: ... @overload def as_series( alist: Iterable[_AnyComplexSeries1D | complex], trim: bool = ..., -) -> list[_Array1D[np.complexfloating[Any, Any]]]: ... +) -> list[_ComplexArray1D]: ... @overload def as_series( alist: Iterable[_AnyObjectSeries1D | object], trim: bool = ..., -) -> list[_Array1D[np.object_]]: ... +) -> list[_ObjectArray1D]: ... _T_seq = TypeVar("_T_seq", bound=_CoefArrayND | _SimpleSequence[_AnyScalar]) def trimseq(seq: _T_seq) -> _T_seq: ... @overload def trimcoef( # type: ignore[overload-overlap] - c: npt.NDArray[np.integer[Any]] | npt.NDArray[np.floating[Any]], + c: npt.NDArray[np.integer[Any]] | _FloatArrayND, tol: _AnyRealScalar = ..., -) -> _Array1D[np.floating[Any]]: ... +) -> _FloatArray1D: ... @overload def trimcoef( - c: npt.NDArray[np.complexfloating[Any, Any]], + c: _ComplexArrayND, tol: _AnyRealScalar = ..., -) -> _Array1D[np.complexfloating[Any, Any]]: ... +) -> _ComplexArray1D: ... @overload def trimcoef( - c: npt.NDArray[np.object_], + c: _ObjectArrayND, tol: _AnyRealScalar = ..., -) -> _Array1D[np.object_]: ... +) -> _ObjectArray1D: ... @overload def trimcoef( # type: ignore[overload-overlap] c: _AnyRealSeries1D | float, tol: _AnyRealScalar = ..., -) -> _Array1D[np.floating[Any]]: ... +) -> _FloatArray1D: ... @overload def trimcoef( c: _AnyComplexSeries1D | complex, tol: _AnyRealScalar = ..., -) -> _Array1D[np.complexfloating[Any, Any]]: ... +) -> _ComplexArray1D: ... @overload def trimcoef( c: _AnyObjectSeries1D | object, tol: _AnyRealScalar = ..., -) -> _Array1D[np.object_]: ... +) -> _ObjectArray1D: ... @overload def getdomain( # type: ignore[overload-overlap] - x: npt.NDArray[np.floating[Any]] | npt.NDArray[np.integer[Any]], + x: _FloatArrayND | npt.NDArray[np.integer[Any]], ) -> _Array2[np.float64]: ... @overload def getdomain( - x: npt.NDArray[np.complexfloating[Any, Any]], + x: _ComplexArrayND, ) -> _Array2[np.complex128]: ... @overload def getdomain( - x: npt.NDArray[np.object_], + x: _ObjectArrayND, ) -> _Array2[np.object_]: ... @overload def getdomain( # type: ignore[overload-overlap] @@ -232,37 +237,37 @@ def mapdomain( # type: ignore[overload-overlap] x: npt.NDArray[np.floating[Any] | np.integer[Any]], old: npt.NDArray[np.floating[Any] | np.integer[Any]], new: npt.NDArray[np.floating[Any] | np.integer[Any]], -) -> _Array1D[np.floating[Any]]: ... +) -> _FloatArray1D: ... @overload def mapdomain( x: npt.NDArray[np.number[Any]], old: npt.NDArray[np.number[Any]], new: npt.NDArray[np.number[Any]], -) -> _Array1D[np.complexfloating[Any, Any]]: ... +) -> _ComplexArray1D: ... @overload def mapdomain( x: npt.NDArray[np.object_ | np.number[Any]], old: npt.NDArray[np.object_ | np.number[Any]], new: npt.NDArray[np.object_ | np.number[Any]], -) -> _Array1D[np.object_]: ... +) -> _ObjectArray1D: ... @overload def mapdomain( x: _AnyRealSeries1D, old: _AnyRealSeries1D, new: _AnyRealSeries1D, -) -> _Array1D[np.floating[Any]]: ... +) -> _FloatArray1D: ... @overload def mapdomain( x: _AnyComplexSeries1D, old: _AnyComplexSeries1D, new: _AnyComplexSeries1D, -) -> _Array1D[np.complexfloating[Any, Any]]: ... +) -> _ComplexArray1D: ... @overload def mapdomain( x: _AnySeries1D, old:_AnySeries1D, new: _AnySeries1D, -) -> _Array1D[np.object_]: ... +) -> _ObjectArray1D: ... @overload def mapdomain( x: object, @@ -284,19 +289,19 @@ def _fromroots( # type: ignore[overload-overlap] line_f: _AnyLineF, mul_f: _AnyMulF, roots: _AnyRealSeries1D, -) -> _Array1D[np.floating[Any]]: ... +) -> _FloatArray1D: ... @overload def _fromroots( line_f: _AnyLineF, mul_f: _AnyMulF, roots: _AnyComplexSeries1D, -) -> _Array1D[np.complexfloating[Any, Any]]: ... +) -> _ComplexArray1D: ... @overload def _fromroots( line_f: _AnyLineF, mul_f: _AnyMulF, roots: _AnyObjectSeries1D, -) -> _Array1D[np.object_]: ... +) -> _ObjectArray1D: ... @overload def _fromroots( line_f: _AnyLineF, @@ -313,19 +318,19 @@ def _div( # type: ignore[overload-overlap] mul_f: _AnyMulF, c1: _AnyRealSeries1D, c2: _AnyRealSeries1D, -) -> _Tuple2[_Array1D[np.floating[Any]]]: ... +) -> _Tuple2[_FloatArray1D]: ... @overload def _div( mul_f: _AnyMulF, c1: _AnyComplexSeries1D, c2: _AnyComplexSeries1D, -) -> _Tuple2[_Array1D[np.complexfloating[Any, Any]]]: ... +) -> _Tuple2[_ComplexArray1D]: ... @overload def _div( mul_f: _AnyMulF, c1: _AnyObjectSeries1D, c2: _AnyObjectSeries1D, -) -> _Tuple2[_Array1D[np.object_]]: ... +) -> _Tuple2[_ObjectArray1D]: ... @overload def _div( mul_f: _AnyMulF, @@ -343,21 +348,21 @@ def _pow( # type: ignore[overload-overlap] c: _AnyRealSeries1D, pow: _AnyInt, maxpower: None | _AnyInt = ..., -) -> _Array1D[np.floating[Any]]: ... +) -> _FloatArray1D: ... @overload def _pow( mul_f: _AnyMulF, c: _AnyComplexSeries1D, pow: _AnyInt, maxpower: None | _AnyInt = ..., -) -> _Array1D[np.complexfloating[Any, Any]]: ... +) -> _ComplexArray1D: ... @overload def _pow( mul_f: _AnyMulF, c: _AnyObjectSeries1D, pow: _AnyInt, maxpower: None | _AnyInt = ..., -) -> _Array1D[np.object_]: ... +) -> _ObjectArray1D: ... @overload def _pow( mul_f: _AnyMulF, @@ -377,7 +382,7 @@ def _fit( # type: ignore[overload-overlap] rcond: None | float = ..., full: Literal[False] = ..., w: None | _AnyRealSeries1D = ..., -) -> npt.NDArray[np.floating[Any]]: ... +) -> _FloatArrayND: ... @overload def _fit( vander_f: _AnyVanderF, @@ -388,7 +393,7 @@ def _fit( rcond: None | float = ..., full: Literal[False] = ..., w: None | _AnyComplexSeries1D = ..., -) -> npt.NDArray[np.complexfloating[Any, Any]]: ... +) -> _ComplexArrayND: ... @overload def _fit( vander_f: _AnyVanderF, From 388b0b332e04d59dda9e47e96b0606b150dd6177 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 10 Jul 2024 19:46:56 +0200 Subject: [PATCH 812/980] TYP: initial type-tests for the polynomial classes in `numpy.polynomial` --- .../tests/data/reveal/polynomial_polybase.pyi | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 numpy/typing/tests/data/reveal/polynomial_polybase.pyi diff --git a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi new file mode 100644 index 000000000000..a43d8c284113 --- /dev/null +++ b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -0,0 +1,42 @@ +import sys +from collections.abc import Sequence +from fractions import Fraction +from typing import Any, Literal as L, TypeAlias + +import numpy as np +import numpy.typing as npt +import numpy.polynomial.polyutils as pu +from numpy.polynomial._polybase import ABCPolyBase + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + +_Arr1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.inexact[Any] | np.object_]] +_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating[Any]]] +_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating[Any, Any]]] +_ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] + +_Arr1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.inexact[Any] | np.object_]] +_ArrFloat1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.floating[Any]]] +_ArrComplex1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complexfloating[Any, Any]]] +_ArrObject1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] + +_BasisName: TypeAlias = L["X"] + +AR_u1: npt.NDArray[np.uint8] +AR_i2: npt.NDArray[np.int16] +AR_f4: npt.NDArray[np.float32] +AR_c8: npt.NDArray[np.complex64] +AR_O: npt.NDArray[np.object_] + +poly_obj: ABCPolyBase[_BasisName] + +assert_type(poly_obj.basis_name, _BasisName) +assert_type(poly_obj.coef, _Arr1D) +assert_type(poly_obj.domain, _Arr1D_2) +assert_type(poly_obj.window, _Arr1D_2) + +# TODO: ABCPolyBase methods +# TODO: ABCPolyBase operators From 5d41e112205e2939a431e20a992fdb682a9b19f7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 12 Jul 2024 04:23:17 +0200 Subject: [PATCH 813/980] TYP: type-tests and related fixes for the `numpy.polynomial` classes --- numpy/polynomial/_polybase.pyi | 94 +++++----- numpy/polynomial/_polytypes.pyi | 90 ++++++---- numpy/polynomial/polyutils.pyi | 67 +++---- .../tests/data/reveal/polynomial_polybase.pyi | 170 +++++++++++++++--- 4 files changed, 268 insertions(+), 153 deletions(-) diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index 5cf8c3d473a7..8ed1ec3154c0 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -1,3 +1,4 @@ +import abc import sys from collections.abc import Iterator, Mapping, Sequence from typing import ( @@ -15,18 +16,24 @@ from typing import ( import numpy as np import numpy.typing as npt -from numpy._typing import _ArrayLikeInt_co from ._polytypes import ( - _AnyInt, + _AnyComplexSeries1D, + _AnyIntArg, _AnyComplexScalar, _AnyComplexSeriesND, + _AnyIntSeries1D, + _AnyObjectSeries1D, _AnyObjectSeriesND, + _AnyRealScalar, + _AnyRealSeries1D, _AnyScalar, _AnySeries1D, _AnySeriesND, + _Array1D, _Array2, _CoefArray1D, + _ComplexArrayND, _SupportsLenAndGetItem, _Tuple2, ) @@ -43,12 +50,11 @@ __all__ = ["ABCPolyBase"] _NameCo = TypeVar("_NameCo", bound=None | LiteralString, covariant=True) _Self = TypeVar("_Self", bound="ABCPolyBase") -_Size = TypeVar("_Size", bound=int) _AnyOther: TypeAlias = ABCPolyBase | _AnyScalar | _AnySeries1D _Hundred: TypeAlias = Literal[100] -class ABCPolyBase(Generic[_NameCo]): +class ABCPolyBase(Generic[_NameCo], metaclass=abc.ABCMeta): __hash__: ClassVar[None] # type: ignore[assignment] __array_ufunc__: ClassVar[None] @@ -67,8 +73,7 @@ class ABCPolyBase(Generic[_NameCo]): def symbol(self, /) -> LiteralString: ... def __init__( - self, - /, + self, /, coef: _AnySeries1D, domain: None | _AnySeries1D = ..., window: None | _AnySeries1D = ..., @@ -132,26 +137,27 @@ class ABCPolyBase(Generic[_NameCo]): def has_samecoef(self, /, other: ABCPolyBase) -> bool: ... def has_samedomain(self, /, other: ABCPolyBase) -> bool: ... def has_samewindow(self, /, other: ABCPolyBase) -> bool: ... - def has_sametype(self: _Self, /, other: object) -> TypeGuard[_Self]: ... + @overload + def has_sametype(self: _Self, /, other: ABCPolyBase) -> TypeGuard[_Self]: ... + @overload + def has_sametype(self, /, other: object) -> Literal[False]: ... def copy(self: _Self, /) -> _Self: ... def degree(self, /) -> int: ... def cutdeg(self: _Self, /) -> _Self: ... - def trim(self: _Self, /, tol: float = ...) -> _Self: ... - def truncate(self: _Self, /, size: _AnyInt) -> _Self: ... + def trim(self: _Self, /, tol: _AnyRealScalar = ...) -> _Self: ... + def truncate(self: _Self, /, size: _AnyIntArg) -> _Self: ... @overload def convert( self, domain: None | _AnySeries1D, - kind: type[_Self], - /, + kind: type[_Self], /, window: None | _AnySeries1D = ..., ) -> _Self: ... @overload def convert( - self, - /, + self, /, domain: None | _AnySeries1D = ..., *, kind: type[_Self], @@ -159,8 +165,7 @@ class ABCPolyBase(Generic[_NameCo]): ) -> _Self: ... @overload def convert( - self: _Self, - /, + self: _Self, /, domain: None | _AnySeries1D = ..., kind: type[_Self] = ..., window: None | _AnySeries1D = ..., @@ -169,8 +174,7 @@ class ABCPolyBase(Generic[_NameCo]): def mapparms(self, /) -> _Tuple2[Any]: ... def integ( - self: _Self, - /, + self: _Self, /, m: SupportsIndex = ..., k: _AnyComplexScalar | _SupportsLenAndGetItem[_AnyComplexScalar] = ..., lbnd: None | _AnyComplexScalar = ..., @@ -180,37 +184,21 @@ class ABCPolyBase(Generic[_NameCo]): def roots(self, /) -> _CoefArray1D: ... - @overload def linspace( - self, - /, - n: _Size, - domain: None | _AnySeries1D = ..., - ) -> tuple[ - np.ndarray[tuple[_Size], np.dtype[np.float64]], - np.ndarray[tuple[_Size], np.dtype[np.float64 | np.complex128]], - ]: ... - @overload - def linspace( - self, - /, - n: _Hundred = ..., + self, /, + n: SupportsIndex = ..., domain: None | _AnySeries1D = ..., - ) -> tuple[ - np.ndarray[tuple[_Hundred], np.dtype[np.float64]], - np.ndarray[tuple[_Hundred], np.dtype[np.float64 | np.complex128]], - ]: ... + ) -> _Tuple2[_Array1D[np.float64 | np.complex128]]: ... @overload @classmethod def fit( - cls: type[_Self], - /, + cls: type[_Self], /, x: _AnySeries1D, y: _AnySeries1D, - deg: _ArrayLikeInt_co, + deg: int | _AnyIntSeries1D, domain: None | _AnySeries1D = ..., - rcond: float = ..., + rcond: _AnyRealScalar = ..., full: Literal[False] = ..., w: None | _AnySeries1D = ..., window: None | _AnySeries1D = ..., @@ -222,9 +210,9 @@ class ABCPolyBase(Generic[_NameCo]): cls: type[_Self], /, x: _AnySeries1D, y: _AnySeries1D, - deg: _ArrayLikeInt_co, + deg: int | _AnyIntSeries1D, domain: None | _AnySeries1D = ..., - rcond: float = ..., + rcond: _AnyRealScalar = ..., *, full: Literal[True], w: None | _AnySeries1D = ..., @@ -237,11 +225,10 @@ class ABCPolyBase(Generic[_NameCo]): cls: type[_Self], x: _AnySeries1D, y: _AnySeries1D, - deg: _ArrayLikeInt_co, + deg: int | _AnyIntSeries1D, domain: None | _AnySeries1D, - rcond: float, - full: Literal[True], - /, + rcond: _AnyRealScalar, + full: Literal[True], /, w: None | _AnySeries1D = ..., window: None | _AnySeries1D = ..., symbol: str = ..., @@ -249,34 +236,33 @@ class ABCPolyBase(Generic[_NameCo]): @classmethod def fromroots( - cls: type[_Self], - /, + cls: type[_Self], /, roots: _AnySeriesND, domain: None | _AnySeries1D = ..., window: None | _AnySeries1D = ..., + symbol: str = ..., ) -> _Self: ... @classmethod def identity( - cls: type[_Self], - /, + cls: type[_Self], /, domain: None | _AnySeries1D = ..., window: None | _AnySeries1D = ..., + symbol: str = ..., ) -> _Self: ... @classmethod def basis( - cls: type[_Self], - /, - deg: int, + cls: type[_Self], /, + deg: _AnyIntArg, domain: None | _AnySeries1D = ..., window: None | _AnySeries1D = ..., + symbol: str = ..., ) -> _Self: ... @classmethod def cast( - cls: type[_Self], - /, + cls: type[_Self], /, series: ABCPolyBase, domain: None | _AnySeries1D = ..., window: None | _AnySeries1D = ..., diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index ba53205a8331..510c459b2864 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -24,7 +24,6 @@ from numpy._typing import ( _ArrayLikeInt_co, _ArrayLikeObject_co, _NestedSequence, - _SupportsArray, ) if sys.version_info >= (3, 11): @@ -53,10 +52,16 @@ class _SimpleSequence(Protocol[_V_co]): def __getitem__(self: _Self, ii: slice, /) -> _Self: ... _SCT = TypeVar("_SCT", bound=np.number[Any] | np.object_) +_SCT_co = TypeVar("_SCT_co", bound=np.number[Any] | np.object_, covariant=True) + +class _SupportsArray(Protocol[_SCT_co]): + def __array__(self ,) -> npt.NDArray[_SCT_co]: ... _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_SCT]] _Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_SCT]] +_IntArray1D: TypeAlias = _Array1D[np.integer[Any]] +_IntArrayND: TypeAlias = npt.NDArray[np.integer[Any]] _FloatArray1D: TypeAlias = _Array1D[np.floating[Any]] _FloatArrayND: TypeAlias = npt.NDArray[np.floating[Any]] _ComplexArray1D: TypeAlias = _Array1D[np.complexfloating[Any, Any]] @@ -71,6 +76,9 @@ _Line: TypeAlias = np.ndarray[tuple[Literal[1, 2]], np.dtype[_SCT]] _CoefArray1D: TypeAlias = _Array1D[np.inexact[Any] | np.object_] _CoefArrayND: TypeAlias = npt.NDArray[np.inexact[Any] | np.object_] +_AnyIntArg: TypeAlias = SupportsInt | SupportsIndex + +_AnyIntScalar: TypeAlias = int | np.integer[Any] _AnyRealScalar: TypeAlias = float | np.floating[Any] | np.integer[Any] _AnyComplexScalar: TypeAlias = complex | np.number[Any] _AnyObjectScalar: TypeAlias = ( @@ -80,50 +88,58 @@ _AnyObjectScalar: TypeAlias = ( | numbers.Complex ) _AnyScalar: TypeAlias = _AnyComplexScalar | _AnyObjectScalar -_AnyInt: TypeAlias = SupportsInt | SupportsIndex +_AnyIntSeries1D: TypeAlias = ( + _SupportsArray[np.integer[Any]] + | _SupportsLenAndGetItem[_AnyIntScalar] +) _AnyRealSeries1D: TypeAlias = ( - npt.NDArray[np.floating[Any] | np.integer[Any]] - | _SupportsArray[np.dtype[np.floating[Any] | np.integer[Any]]] - | _SupportsLenAndGetItem[float | np.floating[Any] | np.integer[Any]] + _SupportsArray[np.integer[Any] | np.floating[Any]] + | _SupportsLenAndGetItem[_AnyRealScalar] ) _AnyComplexSeries1D: TypeAlias = ( - npt.NDArray[np.number[Any]] - | _SupportsArray[np.dtype[np.number[Any]]] + _SupportsArray[np.number[Any]] | _SupportsLenAndGetItem[_AnyComplexScalar] ) _AnyObjectSeries1D: TypeAlias = ( - _ObjectArrayND + _SupportsArray[np.object_] | _SupportsLenAndGetItem[_AnyObjectScalar] ) _AnySeries1D: TypeAlias = ( - npt.NDArray[np.number[Any] | np.object_] - | _SupportsLenAndGetItem[_AnyScalar | object] + _SupportsArray[np.number[Any] | np.object_] + | _SupportsLenAndGetItem[object] +) + +_AnyIntSeriesND: TypeAlias = ( + int + | _SupportsArray[np.integer[Any]] + | _NestedSequence[int] + | _NestedSequence[_SupportsArray[np.integer[Any]]] ) _AnyRealSeriesND: TypeAlias = ( - npt.NDArray[np.floating[Any] | np.integer[Any]] - | _AnyRealScalar - | _SupportsArray[np.dtype[np.floating[Any] | np.integer[Any]]] - | _NestedSequence[float | np.floating[Any] | np.integer[Any]] + float + | _SupportsArray[np.integer[Any] | np.floating[Any]] + | _NestedSequence[float] + | _NestedSequence[_SupportsArray[np.integer[Any] | np.floating[Any]]] ) _AnyComplexSeriesND: TypeAlias = ( - npt.NDArray[np.number[Any]] - | _AnyComplexScalar - | _SupportsArray[np.dtype[np.number[Any]]] - | _NestedSequence[complex | np.number[Any]] + complex + | _SupportsArray[np.number[Any]] + | _NestedSequence[complex] + | _NestedSequence[_SupportsArray[np.number[Any]]] ) _AnyObjectSeriesND: TypeAlias = ( - _ObjectArrayND - | _AnyObjectScalar - | _SupportsArray[np.dtype[np.object_]] + _AnyObjectScalar + | _SupportsArray[np.object_] | _NestedSequence[_AnyObjectScalar] + | _NestedSequence[_SupportsArray[np.object_]] ) _AnySeriesND: TypeAlias = ( - npt.NDArray[np.number[Any] | np.object_] - | _AnyScalar - | _SupportsArray[np.dtype[np.number[Any] | np.object_]] - | _NestedSequence[_AnyScalar | object] + _AnyScalar + | _SupportsArray[np.number[Any] | np.object_] + | _NestedSequence[object] + | _NestedSequence[_SupportsArray[np.number[Any] | np.object_]] ) _Name_co = TypeVar("_Name_co", bound=LiteralString, covariant=True) @@ -203,22 +219,22 @@ class _FuncPow(_Named[_Name_co], Protocol[_Name_co]): def __call__( # type: ignore[overload-overlap] self, /, c: _AnyRealSeries1D, - pow: _AnyInt, - maxpower: None | _AnyInt = ..., + pow: _AnyIntArg, + maxpower: None | _AnyIntArg = ..., ) -> _FloatArray1D: ... @overload def __call__( self, /, c: _AnyComplexSeries1D, - pow: _AnyInt, - maxpower: None | _AnyInt = ..., + pow: _AnyIntArg, + maxpower: None | _AnyIntArg = ..., ) -> _ComplexArray1D: ... @overload def __call__( self, /, c: _AnySeries1D, - pow: _AnyInt, - maxpower: None | _AnyInt = ..., + pow: _AnyIntArg, + maxpower: None | _AnyIntArg = ..., ) -> _ObjectArray1D: ... @final @@ -696,7 +712,7 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): self, /, x: _AnyRealSeries1D, y: _AnyRealSeriesND, - deg: _ArrayLikeInt_co, + deg: int | _AnyIntSeries1D, rcond: None | float = ..., full: Literal[False] = ..., w: None | _AnyRealSeries1D = ..., @@ -706,7 +722,7 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): self, /, x: _AnyComplexSeries1D, y: _AnyComplexSeriesND, - deg: _ArrayLikeInt_co, + deg: int | _AnyIntSeries1D, rcond: None | float = ..., full: Literal[False] = ..., w: None | _AnyComplexSeriesND = ..., @@ -716,7 +732,7 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): self, /, x: _AnySeries1D, y: _AnySeriesND, - deg: _ArrayLikeInt_co, + deg: int | _AnyIntSeries1D, rcond: None | float = ..., full: Literal[False] = ..., w: None | _AnySeries1D = ..., @@ -726,7 +742,7 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): self, x: _AnySeries1D, y: _AnySeriesND, - deg: _ArrayLikeInt_co, + deg: int | _AnyIntSeries1D, rcond: None | float, full: Literal[True], /, @@ -737,7 +753,7 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): self, /, x: _AnySeries1D, y: _AnySeriesND, - deg: _ArrayLikeInt_co, + deg: int | _AnyIntSeries1D, rcond: None | float = ..., *, full: Literal[True], @@ -809,4 +825,4 @@ class _FuncPts(_Named[_Name_co], Protocol[_Name_co]): npts: _N_pts, ) -> np.ndarray[tuple[_N_pts], np.dtype[np.float64]]: ... @overload - def __call__(self, /, npts: _AnyInt) -> _Array1D[np.float64]: ... + def __call__(self, /, npts: _AnyIntArg) -> _Array1D[np.float64]: ... diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index dba8c2a75b68..632249d77366 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -11,15 +11,15 @@ from typing import ( import numpy as np import numpy.typing as npt -from numpy._typing import _ArrayLikeInt_co, _FloatLike_co from ._polytypes import ( _AnyComplexScalar, _AnyComplexSeries1D, _AnyComplexSeriesND, + _AnyIntSeries1D, _AnyRealSeries1D, _AnyRealSeriesND, - _AnyInt, + _AnyIntArg, _AnyComplexSeries1D, _AnyObjectSeries1D, _AnyRealScalar, @@ -37,6 +37,7 @@ from ._polytypes import ( _FuncBinOp, _FuncValND, _FuncVanderND, + _IntArrayND, _ObjectArray1D, _ObjectArrayND, _SimpleSequence, @@ -60,12 +61,7 @@ _AnyVanderF: TypeAlias = Callable[[npt.ArrayLike, SupportsIndex], _CoefArrayND] @overload def as_series( - alist: npt.NDArray[np.integer[Any]], - trim: bool = ..., -) -> list[_FloatArray1D]: ... -@overload -def as_series( - alist: _FloatArrayND, + alist: _IntArrayND | _FloatArrayND, trim: bool = ..., ) -> list[_FloatArray1D]: ... @overload @@ -80,12 +76,7 @@ def as_series( ) -> list[_ObjectArray1D]: ... @overload def as_series( # type: ignore[overload-overlap] - alist: Iterable[npt.NDArray[np.integer[Any]]], - trim: bool = ..., -) -> list[_FloatArray1D]: ... -@overload -def as_series( - alist: Iterable[_FloatArrayND], + alist: Iterable[_FloatArrayND | _IntArrayND], trim: bool = ..., ) -> list[_FloatArray1D]: ... @overload @@ -119,7 +110,7 @@ def trimseq(seq: _T_seq) -> _T_seq: ... @overload def trimcoef( # type: ignore[overload-overlap] - c: npt.NDArray[np.integer[Any]] | _FloatArrayND, + c: _IntArrayND | _FloatArrayND, tol: _AnyRealScalar = ..., ) -> _FloatArray1D: ... @overload @@ -150,7 +141,7 @@ def trimcoef( @overload def getdomain( # type: ignore[overload-overlap] - x: _FloatArrayND | npt.NDArray[np.integer[Any]], + x: _FloatArrayND | _IntArrayND, ) -> _Array2[np.float64]: ... @overload def getdomain( @@ -229,8 +220,8 @@ def mapdomain( @overload def mapdomain( x: _AnyScalar, - old: _AnyObjectSeries1D | _AnyComplexSeries1D, - new: _AnyObjectSeries1D | _AnyComplexSeries1D, + old: _AnySeries1D, + new: _AnySeries1D, ) -> object: ... @overload def mapdomain( # type: ignore[overload-overlap] @@ -251,7 +242,7 @@ def mapdomain( new: npt.NDArray[np.object_ | np.number[Any]], ) -> _ObjectArray1D: ... @overload -def mapdomain( +def mapdomain( # type: ignore[overload-overlap] x: _AnyRealSeries1D, old: _AnyRealSeries1D, new: _AnyRealSeries1D, @@ -346,29 +337,29 @@ _sub: Final[_FuncBinOp] def _pow( # type: ignore[overload-overlap] mul_f: _AnyMulF, c: _AnyRealSeries1D, - pow: _AnyInt, - maxpower: None | _AnyInt = ..., + pow: _AnyIntArg, + maxpower: None | _AnyIntArg = ..., ) -> _FloatArray1D: ... @overload def _pow( mul_f: _AnyMulF, c: _AnyComplexSeries1D, - pow: _AnyInt, - maxpower: None | _AnyInt = ..., + pow: _AnyIntArg, + maxpower: None | _AnyIntArg = ..., ) -> _ComplexArray1D: ... @overload def _pow( mul_f: _AnyMulF, c: _AnyObjectSeries1D, - pow: _AnyInt, - maxpower: None | _AnyInt = ..., + pow: _AnyIntArg, + maxpower: None | _AnyIntArg = ..., ) -> _ObjectArray1D: ... @overload def _pow( mul_f: _AnyMulF, c: _AnySeries1D, - pow: _AnyInt, - maxpower: None | _AnyInt = ..., + pow: _AnyIntArg, + maxpower: None | _AnyIntArg = ..., ) -> _CoefArray1D: ... # keep in sync with `_polytypes._FuncFit` @@ -377,9 +368,9 @@ def _fit( # type: ignore[overload-overlap] vander_f: _AnyVanderF, x: _AnyRealSeries1D, y: _AnyRealSeriesND, - deg: _ArrayLikeInt_co, + deg: _AnyIntSeries1D, domain: None | _AnyRealSeries1D = ..., - rcond: None | float = ..., + rcond: None | _AnyRealScalar = ..., full: Literal[False] = ..., w: None | _AnyRealSeries1D = ..., ) -> _FloatArrayND: ... @@ -388,9 +379,9 @@ def _fit( vander_f: _AnyVanderF, x: _AnyComplexSeries1D, y: _AnyComplexSeriesND, - deg: _ArrayLikeInt_co, + deg: _AnyIntSeries1D, domain: None | _AnyComplexSeries1D = ..., - rcond: None | float = ..., + rcond: None | _AnyRealScalar = ..., full: Literal[False] = ..., w: None | _AnyComplexSeries1D = ..., ) -> _ComplexArrayND: ... @@ -399,9 +390,9 @@ def _fit( vander_f: _AnyVanderF, x: _AnySeries1D, y: _AnySeriesND, - deg: _ArrayLikeInt_co, + deg: _AnyIntSeries1D, domain: None | _AnySeries1D = ..., - rcond: None | float = ..., + rcond: None | _AnyRealScalar = ..., full: Literal[False] = ..., w: None | _AnySeries1D = ..., ) -> _CoefArrayND: ... @@ -410,9 +401,9 @@ def _fit( vander_f: _AnyVanderF, x: _AnySeries1D, y: _AnySeries1D, - deg: _ArrayLikeInt_co, + deg: _AnyIntSeries1D, domain: None | _AnySeries1D, - rcond: None | float , + rcond: None | _AnyRealScalar , full: Literal[True], /, w: None | _AnySeries1D = ..., @@ -422,13 +413,13 @@ def _fit( vander_f: _AnyVanderF, x: _AnySeries1D, y: _AnySeries1D, - deg: _ArrayLikeInt_co, + deg: _AnyIntSeries1D, domain: None | _AnySeries1D = ..., - rcond: None | float = ..., + rcond: None | _AnyRealScalar = ..., *, full: Literal[True], w: None | _AnySeries1D = ..., ) -> tuple[_CoefArray1D, Sequence[np.inexact[Any] | np.int32]]: ... def _as_int(x: SupportsIndex, desc: str) -> int: ... -def format_float(x: _FloatLike_co, parens: bool = ...) -> str: ... +def format_float(x: _AnyRealScalar, parens: bool = ...) -> str: ... diff --git a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi index a43d8c284113..eb3dbe0f05a3 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -1,42 +1,164 @@ import sys from collections.abc import Sequence -from fractions import Fraction -from typing import Any, Literal as L, TypeAlias +from typing import Any, Literal as L, TypeAlias, TypeVar import numpy as np +import numpy.polynomial as npp import numpy.typing as npt -import numpy.polynomial.polyutils as pu -from numpy.polynomial._polybase import ABCPolyBase if sys.version_info >= (3, 11): - from typing import assert_type + from typing import LiteralString, assert_type else: - from typing_extensions import assert_type + from typing_extensions import LiteralString, assert_type -_Arr1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.inexact[Any] | np.object_]] -_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating[Any]]] -_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating[Any, Any]]] -_ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] +_Ar_x: TypeAlias = npt.NDArray[np.inexact[Any] | np.object_] +_Ar_f: TypeAlias = npt.NDArray[np.floating[Any]] +_Ar_c: TypeAlias = npt.NDArray[np.complexfloating[Any, Any]] +_Ar_O: TypeAlias = npt.NDArray[np.object_] -_Arr1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.inexact[Any] | np.object_]] -_ArrFloat1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.floating[Any]]] -_ArrComplex1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complexfloating[Any, Any]]] -_ArrObject1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] +_Ar_x_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.inexact[Any] | np.object_]] +_Ar_f_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating[Any]]] +_Ar_c_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating[Any, Any]]] +_Ar_O_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] + +_Ar_x_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.inexact[Any] | np.object_]] +_Ar_f_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.floating[Any]]] +_Ar_c_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complexfloating[Any, Any]]] +_Ar_O_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] + +_SCT = TypeVar("_SCT", bound=np.generic) +_Ar_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[_SCT]] _BasisName: TypeAlias = L["X"] -AR_u1: npt.NDArray[np.uint8] -AR_i2: npt.NDArray[np.int16] -AR_f4: npt.NDArray[np.float32] -AR_c8: npt.NDArray[np.complex64] +SC_i: np.integer[Any] +SC_i_co: int | np.integer[Any] +SC_f: np.floating[Any] +SC_f_co: float | np.floating[Any] | np.integer[Any] +SC_c: np.complexfloating[Any, Any] +SC_c_co: complex | np.number[Any] +SC_O: np.object_ +SC_O_co: np.object_ | np.number[Any] | object + +AR_i: npt.NDArray[np.integer[Any]] +AR_f: npt.NDArray[np.floating[Any]] +AR_f_co: npt.NDArray[np.floating[Any] | np.integer[Any]] +AR_c: npt.NDArray[np.complexfloating[Any, Any]] +AR_c_co: npt.NDArray[np.number[Any]] AR_O: npt.NDArray[np.object_] +AR_O_co: npt.NDArray[np.object_ | np.number[Any]] + +SQ_i: Sequence[int] +SQ_f: Sequence[float] +SQ_c: Sequence[complex] +SQ_O: Sequence[object] + +PS_poly: npp.Polynomial +PS_cheb: npp.Chebyshev +PS_herm: npp.Hermite +PS_herme: npp.HermiteE +PS_lag: npp.Laguerre +PS_leg: npp.Legendre +PS_all: ( + npp.Polynomial + | npp.Chebyshev + | npp.Hermite + | npp.HermiteE + | npp.Laguerre + | npp.Legendre +) + +assert_type(type(PS_poly).basis_name, None) +assert_type(type(PS_cheb).basis_name, L['T']) +assert_type(type(PS_herm).basis_name, L['H']) +assert_type(type(PS_herme).basis_name, L['He']) +assert_type(type(PS_lag).basis_name, L['L']) +assert_type(type(PS_leg).basis_name, L['P']) + +assert_type(type(PS_all).__hash__, None) +assert_type(type(PS_all).__array_ufunc__, None) +assert_type(type(PS_all).maxpower, L[100]) + +assert_type(PS_all.coef, _Ar_x_n) +assert_type(PS_all.domain, _Ar_x_2) +assert_type(PS_all.window, _Ar_x_2) +assert_type(PS_all.symbol, LiteralString) + +assert_type(PS_all.has_samecoef(PS_all), bool) +assert_type(PS_all.has_samedomain(PS_all), bool) +assert_type(PS_all.has_samewindow(PS_all), bool) +assert_type(PS_all.has_sametype(PS_all), bool) +assert_type(PS_poly.has_sametype(PS_poly), bool) +assert_type(PS_poly.has_sametype(PS_leg), bool) +assert_type(PS_poly.has_sametype(NotADirectoryError), L[False]) + +assert_type(PS_poly.copy(), npp.Polynomial) +assert_type(PS_cheb.copy(), npp.Chebyshev) +assert_type(PS_herm.copy(), npp.Hermite) +assert_type(PS_herme.copy(), npp.HermiteE) +assert_type(PS_lag.copy(), npp.Laguerre) +assert_type(PS_leg.copy(), npp.Legendre) + +assert_type(PS_leg.cutdeg(), npp.Legendre) +assert_type(PS_leg.trim(), npp.Legendre) +assert_type(PS_leg.trim(tol=SC_f_co), npp.Legendre) +assert_type(PS_leg.truncate(SC_i_co), npp.Legendre) + +assert_type(PS_all.convert(None, npp.Chebyshev), npp.Chebyshev) +assert_type(PS_all.convert((0, 1), npp.Laguerre), npp.Laguerre) +assert_type(PS_all.convert([0, 1], npp.Hermite, [-1, 1]), npp.Hermite) + +assert_type(PS_all.degree(), int) +assert_type(PS_all.mapparms(), tuple[Any, Any]) + +assert_type(PS_poly.integ(), npp.Polynomial) +assert_type(PS_herme.integ(SC_i_co), npp.HermiteE) +assert_type(PS_lag.integ(SC_i_co, SC_f_co), npp.Laguerre) +assert_type(PS_poly.deriv(), npp.Polynomial) +assert_type(PS_herm.deriv(SC_i_co), npp.Hermite) + +assert_type(PS_poly.roots(), _Ar_x_n) + +assert_type( + PS_poly.linspace(), + tuple[_Ar_1d[np.float64 | np.complex128], _Ar_1d[np.float64 | np.complex128]], +) + +assert_type( + PS_poly.linspace(9), + tuple[_Ar_1d[np.float64 | np.complex128], _Ar_1d[np.float64 | np.complex128]], +) + +assert_type(PS_cheb.fit(AR_c_co, AR_c_co, SC_i_co), npp.Chebyshev) +assert_type(PS_leg.fit(AR_c_co, AR_c_co, AR_i), npp.Legendre) +assert_type(PS_herm.fit(AR_c_co, AR_c_co, SQ_i), npp.Hermite) +assert_type(PS_poly.fit(AR_c_co, SQ_c, SQ_i), npp.Polynomial) +assert_type(PS_lag.fit(SQ_c, SQ_c, SQ_i, full=False), npp.Laguerre) +assert_type( + PS_herme.fit(SQ_c, AR_c_co, SC_i_co, full=True), + tuple[npp.HermiteE, Sequence[np.inexact[Any] | np.int32]], +) + +assert_type(type(PS_poly).fromroots(SC_i), npp.Polynomial) +assert_type(type(PS_poly).fromroots(SQ_i), npp.Polynomial) +assert_type(type(PS_poly).fromroots(AR_i), npp.Polynomial) +assert_type(type(PS_cheb).fromroots(SC_f), npp.Chebyshev) +assert_type(type(PS_cheb).fromroots(SQ_f), npp.Chebyshev) +assert_type(type(PS_cheb).fromroots(AR_f_co), npp.Chebyshev) +assert_type(type(PS_herm).fromroots(SC_c), npp.Hermite) +assert_type(type(PS_herm).fromroots(SQ_c), npp.Hermite) +assert_type(type(PS_herm).fromroots(AR_c_co), npp.Hermite) +assert_type(type(PS_leg).fromroots(SC_O), npp.Legendre) +assert_type(type(PS_leg).fromroots(SQ_O), npp.Legendre) +assert_type(type(PS_leg).fromroots(AR_O_co), npp.Legendre) + +assert_type(type(PS_poly).identity(), npp.Polynomial) +assert_type(type(PS_cheb).identity(symbol='z'), npp.Chebyshev) -poly_obj: ABCPolyBase[_BasisName] +assert_type(type(PS_lag).basis(SC_i), npp.Laguerre) +assert_type(type(PS_leg).basis(32, symbol='u'), npp.Legendre) -assert_type(poly_obj.basis_name, _BasisName) -assert_type(poly_obj.coef, _Arr1D) -assert_type(poly_obj.domain, _Arr1D_2) -assert_type(poly_obj.window, _Arr1D_2) +assert_type(type(PS_herm).cast(PS_poly), npp.Hermite) +assert_type(type(PS_herme).cast(PS_leg), npp.HermiteE) -# TODO: ABCPolyBase methods # TODO: ABCPolyBase operators From 19b3265a62d7bc533c52a47c353548955cbdba2f Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 16 Jul 2024 07:14:49 +0200 Subject: [PATCH 814/980] TYP: A better story around the internal type aliases of ``numpy.polynomial`` --- numpy/polynomial/_polybase.pyi | 172 +++--- numpy/polynomial/_polytypes.pyi | 538 +++++++++--------- numpy/polynomial/chebyshev.pyi | 30 +- numpy/polynomial/polyutils.pyi | 376 ++++++------ .../tests/data/reveal/polynomial_polybase.pyi | 3 +- .../data/reveal/polynomial_polyutils.pyi | 32 +- 6 files changed, 575 insertions(+), 576 deletions(-) diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index 8ed1ec3154c0..b22025427e83 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -5,6 +5,7 @@ from typing import ( TYPE_CHECKING, Any, ClassVar, + Final, Generic, Literal, SupportsIndex, @@ -16,26 +17,29 @@ from typing import ( import numpy as np import numpy.typing as npt +from numpy._typing import ( + _FloatLike_co, + _NumberLike_co, + + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeObject_co, +) from ._polytypes import ( - _AnyComplexSeries1D, - _AnyIntArg, - _AnyComplexScalar, - _AnyComplexSeriesND, - _AnyIntSeries1D, - _AnyObjectSeries1D, - _AnyObjectSeriesND, - _AnyRealScalar, - _AnyRealSeries1D, - _AnyScalar, - _AnySeries1D, - _AnySeriesND, - _Array1D, + _AnyInt, + _CoefLike_co, + _Array2, - _CoefArray1D, - _ComplexArrayND, - _SupportsLenAndGetItem, _Tuple2, + + _Series, + _CoefSeries, + + _SeriesLikeInt_co, + _SeriesLikeCoef_co, + + _ArrayLikeCoef_co, ) if sys.version_info >= (3, 11): @@ -45,15 +49,17 @@ elif TYPE_CHECKING: else: LiteralString: TypeAlias = str -__all__ = ["ABCPolyBase"] + +__all__: Final[Sequence[str]] = ("ABCPolyBase",) _NameCo = TypeVar("_NameCo", bound=None | LiteralString, covariant=True) _Self = TypeVar("_Self", bound="ABCPolyBase") -_AnyOther: TypeAlias = ABCPolyBase | _AnyScalar | _AnySeries1D +_AnyOther: TypeAlias = ABCPolyBase | _CoefLike_co | _SeriesLikeCoef_co _Hundred: TypeAlias = Literal[100] + class ABCPolyBase(Generic[_NameCo], metaclass=abc.ABCMeta): __hash__: ClassVar[None] # type: ignore[assignment] __array_ufunc__: ClassVar[None] @@ -64,7 +70,7 @@ class ABCPolyBase(Generic[_NameCo], metaclass=abc.ABCMeta): _use_unicode: ClassVar[bool] basis_name: _NameCo - coef: _CoefArray1D + coef: _CoefSeries domain: _Array2[np.inexact[Any] | np.object_] window: _Array2[np.inexact[Any] | np.object_] @@ -73,38 +79,40 @@ class ABCPolyBase(Generic[_NameCo], metaclass=abc.ABCMeta): def symbol(self, /) -> LiteralString: ... def __init__( - self, /, - coef: _AnySeries1D, - domain: None | _AnySeries1D = ..., - window: None | _AnySeries1D = ..., + self, + /, + coef: _SeriesLikeCoef_co, + domain: None | _SeriesLikeCoef_co = ..., + window: None | _SeriesLikeCoef_co = ..., symbol: str = ..., ) -> None: ... - @overload - def __call__( # type: ignore[overload-overlap] - self, /, - arg: complex | np.complexfloating[Any, Any] - ) -> np.complex128: ... @overload def __call__( - self, /, - arg: _AnyComplexScalar, + self, + /, + arg: _FloatLike_co, ) -> np.float64 | np.complex128: ... @overload + def __call__(self, /, arg: _NumberLike_co) -> np.complex128: ... + @overload def __call__( - self, /, - arg: _AnyObjectSeriesND, - ) -> npt.NDArray[np.object_]: ... + self, + /, + arg: _ArrayLikeFloat_co, + ) -> npt.NDArray[np.float64 | np.complex128 | np.object_]: ... @overload def __call__( - self, /, - arg: _AnyComplexSeriesND, + self, + /, + arg: _ArrayLikeComplex_co, ) -> npt.NDArray[np.complex128 | np.object_]: ... @overload def __call__( - self, /, - arg: _AnySeries1D, - ) -> npt.NDArray[np.float64 | np.complex128 | np.object_]: ... + self, + /, + arg: _ArrayLikeObject_co, + ) -> npt.NDArray[np.object_]: ... def __str__(self, /) -> str: ... def __repr__(self, /) -> str: ... @@ -145,30 +153,30 @@ class ABCPolyBase(Generic[_NameCo], metaclass=abc.ABCMeta): def copy(self: _Self, /) -> _Self: ... def degree(self, /) -> int: ... def cutdeg(self: _Self, /) -> _Self: ... - def trim(self: _Self, /, tol: _AnyRealScalar = ...) -> _Self: ... - def truncate(self: _Self, /, size: _AnyIntArg) -> _Self: ... + def trim(self: _Self, /, tol: _FloatLike_co = ...) -> _Self: ... + def truncate(self: _Self, /, size: _AnyInt) -> _Self: ... @overload def convert( self, - domain: None | _AnySeries1D, + domain: None | _SeriesLikeCoef_co, kind: type[_Self], /, - window: None | _AnySeries1D = ..., + window: None | _SeriesLikeCoef_co = ..., ) -> _Self: ... @overload def convert( self, /, - domain: None | _AnySeries1D = ..., + domain: None | _SeriesLikeCoef_co = ..., *, kind: type[_Self], - window: None | _AnySeries1D = ..., + window: None | _SeriesLikeCoef_co = ..., ) -> _Self: ... @overload def convert( self: _Self, /, - domain: None | _AnySeries1D = ..., + domain: None | _SeriesLikeCoef_co = ..., kind: type[_Self] = ..., - window: None | _AnySeries1D = ..., + window: None | _SeriesLikeCoef_co = ..., ) -> _Self: ... def mapparms(self, /) -> _Tuple2[Any]: ... @@ -176,87 +184,87 @@ class ABCPolyBase(Generic[_NameCo], metaclass=abc.ABCMeta): def integ( self: _Self, /, m: SupportsIndex = ..., - k: _AnyComplexScalar | _SupportsLenAndGetItem[_AnyComplexScalar] = ..., - lbnd: None | _AnyComplexScalar = ..., + k: _CoefLike_co | _SeriesLikeCoef_co = ..., + lbnd: None | _CoefLike_co = ..., ) -> _Self: ... def deriv(self: _Self, /, m: SupportsIndex = ...) -> _Self: ... - def roots(self, /) -> _CoefArray1D: ... + def roots(self, /) -> _CoefSeries: ... def linspace( self, /, n: SupportsIndex = ..., - domain: None | _AnySeries1D = ..., - ) -> _Tuple2[_Array1D[np.float64 | np.complex128]]: ... + domain: None | _SeriesLikeCoef_co = ..., + ) -> _Tuple2[_Series[np.float64 | np.complex128]]: ... @overload @classmethod def fit( cls: type[_Self], /, - x: _AnySeries1D, - y: _AnySeries1D, - deg: int | _AnyIntSeries1D, - domain: None | _AnySeries1D = ..., - rcond: _AnyRealScalar = ..., + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: int | _SeriesLikeInt_co, + domain: None | _SeriesLikeCoef_co = ..., + rcond: _FloatLike_co = ..., full: Literal[False] = ..., - w: None | _AnySeries1D = ..., - window: None | _AnySeries1D = ..., + w: None | _SeriesLikeCoef_co = ..., + window: None | _SeriesLikeCoef_co = ..., symbol: str = ..., ) -> _Self: ... @overload @classmethod def fit( cls: type[_Self], /, - x: _AnySeries1D, - y: _AnySeries1D, - deg: int | _AnyIntSeries1D, - domain: None | _AnySeries1D = ..., - rcond: _AnyRealScalar = ..., + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: int | _SeriesLikeInt_co, + domain: None | _SeriesLikeCoef_co = ..., + rcond: _FloatLike_co = ..., *, full: Literal[True], - w: None | _AnySeries1D = ..., - window: None | _AnySeries1D = ..., + w: None | _SeriesLikeCoef_co = ..., + window: None | _SeriesLikeCoef_co = ..., symbol: str = ..., ) -> tuple[_Self, Sequence[np.inexact[Any] | np.int32]]: ... @overload @classmethod def fit( cls: type[_Self], - x: _AnySeries1D, - y: _AnySeries1D, - deg: int | _AnyIntSeries1D, - domain: None | _AnySeries1D, - rcond: _AnyRealScalar, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: int | _SeriesLikeInt_co, + domain: None | _SeriesLikeCoef_co, + rcond: _FloatLike_co, full: Literal[True], /, - w: None | _AnySeries1D = ..., - window: None | _AnySeries1D = ..., + w: None | _SeriesLikeCoef_co = ..., + window: None | _SeriesLikeCoef_co = ..., symbol: str = ..., ) -> tuple[_Self, Sequence[np.inexact[Any] | np.int32]]: ... @classmethod def fromroots( cls: type[_Self], /, - roots: _AnySeriesND, - domain: None | _AnySeries1D = ..., - window: None | _AnySeries1D = ..., + roots: _ArrayLikeCoef_co, + domain: None | _SeriesLikeCoef_co = ..., + window: None | _SeriesLikeCoef_co = ..., symbol: str = ..., ) -> _Self: ... @classmethod def identity( cls: type[_Self], /, - domain: None | _AnySeries1D = ..., - window: None | _AnySeries1D = ..., + domain: None | _SeriesLikeCoef_co = ..., + window: None | _SeriesLikeCoef_co = ..., symbol: str = ..., ) -> _Self: ... @classmethod def basis( cls: type[_Self], /, - deg: _AnyIntArg, - domain: None | _AnySeries1D = ..., - window: None | _AnySeries1D = ..., + deg: _AnyInt, + domain: None | _SeriesLikeCoef_co = ..., + window: None | _SeriesLikeCoef_co = ..., symbol: str = ..., ) -> _Self: ... @@ -264,8 +272,8 @@ class ABCPolyBase(Generic[_NameCo], metaclass=abc.ABCMeta): def cast( cls: type[_Self], /, series: ABCPolyBase, - domain: None | _AnySeries1D = ..., - window: None | _AnySeries1D = ..., + domain: None | _SeriesLikeCoef_co = ..., + window: None | _SeriesLikeCoef_co = ..., ) -> _Self: ... @classmethod diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index 510c459b2864..30d6e7d906f8 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -1,5 +1,4 @@ import decimal -import fractions import numbers import sys from collections.abc import Callable, Sequence @@ -19,11 +18,18 @@ from typing import ( import numpy as np import numpy.typing as npt from numpy._typing import ( - _ArrayLikeComplex_co, + # array-likes _ArrayLikeFloat_co, - _ArrayLikeInt_co, + _ArrayLikeComplex_co, + _ArrayLikeNumber_co, _ArrayLikeObject_co, _NestedSequence, + + # scalar-likes + _IntLike_co, + _FloatLike_co, + _ComplexLike_co, + _NumberLike_co, ) if sys.version_info >= (3, 11): @@ -51,96 +57,73 @@ class _SimpleSequence(Protocol[_V_co]): @overload def __getitem__(self: _Self, ii: slice, /) -> _Self: ... -_SCT = TypeVar("_SCT", bound=np.number[Any] | np.object_) -_SCT_co = TypeVar("_SCT_co", bound=np.number[Any] | np.object_, covariant=True) +_SCT = TypeVar("_SCT", bound=np.number[Any] | np.bool | np.object_) +_SCT_co = TypeVar( + "_SCT_co", + bound=np.number[Any] | np.bool | np.object_, + covariant=True, +) class _SupportsArray(Protocol[_SCT_co]): def __array__(self ,) -> npt.NDArray[_SCT_co]: ... -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_SCT]] -_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_SCT]] +_Series: TypeAlias = np.ndarray[tuple[int], np.dtype[_SCT]] + +_FloatSeries: TypeAlias = _Series[np.floating[Any]] +_ComplexSeries: TypeAlias = _Series[np.complexfloating[Any, Any]] +_NumberSeries: TypeAlias = _Series[np.number[Any]] +_ObjectSeries: TypeAlias = _Series[np.object_] +_CoefSeries: TypeAlias = _Series[np.inexact[Any] | np.object_] -_IntArray1D: TypeAlias = _Array1D[np.integer[Any]] -_IntArrayND: TypeAlias = npt.NDArray[np.integer[Any]] -_FloatArray1D: TypeAlias = _Array1D[np.floating[Any]] -_FloatArrayND: TypeAlias = npt.NDArray[np.floating[Any]] -_ComplexArray1D: TypeAlias = _Array1D[np.complexfloating[Any, Any]] -_ComplexArrayND: TypeAlias = npt.NDArray[np.complexfloating[Any, Any]] -_ObjectArray1D: TypeAlias = _Array1D[np.object_] -_ObjectArrayND: TypeAlias = npt.NDArray[np.object_] +_FloatArray: TypeAlias = npt.NDArray[np.floating[Any]] +_ComplexArray: TypeAlias = npt.NDArray[np.complexfloating[Any, Any]] +_ObjectArray: TypeAlias = npt.NDArray[np.object_] +_CoefArray: TypeAlias = npt.NDArray[np.inexact[Any] | np.object_] _Array1: TypeAlias = np.ndarray[tuple[Literal[1]], np.dtype[_SCT]] _Array2: TypeAlias = np.ndarray[tuple[Literal[2]], np.dtype[_SCT]] -_Line: TypeAlias = np.ndarray[tuple[Literal[1, 2]], np.dtype[_SCT]] -_CoefArray1D: TypeAlias = _Array1D[np.inexact[Any] | np.object_] -_CoefArrayND: TypeAlias = npt.NDArray[np.inexact[Any] | np.object_] -_AnyIntArg: TypeAlias = SupportsInt | SupportsIndex +_AnyInt: TypeAlias = SupportsInt | SupportsIndex -_AnyIntScalar: TypeAlias = int | np.integer[Any] -_AnyRealScalar: TypeAlias = float | np.floating[Any] | np.integer[Any] -_AnyComplexScalar: TypeAlias = complex | np.number[Any] -_AnyObjectScalar: TypeAlias = ( +# NOTE: `decimal.Decimal` isn't compatible with `numbers.Real`, but e.g. +# `fractions.Fraction` is. +_NumberObjectLike_co: TypeAlias = ( np.object_ - | fractions.Fraction - | decimal.Decimal | numbers.Complex + | decimal.Decimal ) -_AnyScalar: TypeAlias = _AnyComplexScalar | _AnyObjectScalar +_CoefLike_co: TypeAlias = _NumberLike_co | _NumberObjectLike_co -_AnyIntSeries1D: TypeAlias = ( - _SupportsArray[np.integer[Any]] - | _SupportsLenAndGetItem[_AnyIntScalar] +# The term "series" is used here to refer to 1-d arrays of numeric scalars. +_SeriesLikeInt_co: TypeAlias = ( + _SupportsArray[np.integer[Any] | np.bool] + | _SupportsLenAndGetItem[_IntLike_co] ) -_AnyRealSeries1D: TypeAlias = ( - _SupportsArray[np.integer[Any] | np.floating[Any]] - | _SupportsLenAndGetItem[_AnyRealScalar] +_SeriesLikeFloat_co: TypeAlias = ( + _SupportsArray[np.floating[Any] | np.integer[Any] | np.bool] + | _SupportsLenAndGetItem[_FloatLike_co] ) -_AnyComplexSeries1D: TypeAlias = ( - _SupportsArray[np.number[Any]] - | _SupportsLenAndGetItem[_AnyComplexScalar] +_SeriesLikeComplex_co: TypeAlias = ( + _SupportsArray[np.integer[Any] | np.inexact[Any] | np.bool] + | _SupportsLenAndGetItem[_ComplexLike_co] ) -_AnyObjectSeries1D: TypeAlias = ( +_SeriesLikeNumberObject_co: TypeAlias = ( _SupportsArray[np.object_] - | _SupportsLenAndGetItem[_AnyObjectScalar] + | _SupportsLenAndGetItem[_NumberObjectLike_co] ) -_AnySeries1D: TypeAlias = ( - _SupportsArray[np.number[Any] | np.object_] - | _SupportsLenAndGetItem[object] +_SeriesLikeCoef_co: TypeAlias = ( + _SupportsArray[np.number[Any] | np.bool | np.object_] + | _SupportsLenAndGetItem[_CoefLike_co] ) -_AnyIntSeriesND: TypeAlias = ( - int - | _SupportsArray[np.integer[Any]] - | _NestedSequence[int] - | _NestedSequence[_SupportsArray[np.integer[Any]]] -) - -_AnyRealSeriesND: TypeAlias = ( - float - | _SupportsArray[np.integer[Any] | np.floating[Any]] - | _NestedSequence[float] - | _NestedSequence[_SupportsArray[np.integer[Any] | np.floating[Any]]] -) -_AnyComplexSeriesND: TypeAlias = ( - complex - | _SupportsArray[np.number[Any]] - | _NestedSequence[complex] - | _NestedSequence[_SupportsArray[np.number[Any]]] -) -_AnyObjectSeriesND: TypeAlias = ( - _AnyObjectScalar - | _SupportsArray[np.object_] - | _NestedSequence[_AnyObjectScalar] - | _NestedSequence[_SupportsArray[np.object_]] -) -_AnySeriesND: TypeAlias = ( - _AnyScalar - | _SupportsArray[np.number[Any] | np.object_] - | _NestedSequence[object] - | _NestedSequence[_SupportsArray[np.number[Any] | np.object_]] +_ArrayLikeNumberObject_co: TypeAlias = ( + _NumberObjectLike_co + | _ArrayLikeComplex_co + | _SeriesLikeNumberObject_co + | _NestedSequence[_SeriesLikeNumberObject_co] ) +_ArrayLikeCoef_co: TypeAlias = _ArrayLikeNumber_co | _ArrayLikeNumberObject_co _Name_co = TypeVar("_Name_co", bound=LiteralString, covariant=True) @@ -148,6 +131,8 @@ class _Named(Protocol[_Name_co]): @property def __name__(self, /) -> _Name_co: ... +_Line: TypeAlias = np.ndarray[tuple[Literal[1, 2]], np.dtype[_SCT]] + @final class _FuncLine(_Named[_Name_co], Protocol[_Name_co]): @overload @@ -168,165 +153,165 @@ class _FuncLine(_Named[_Name_co], Protocol[_Name_co]): @final class _FuncFromRoots(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__(self, /, roots: _AnyRealSeries1D) -> _FloatArray1D: ... # type: ignore[overload-overlap] + def __call__(self, /, roots: _SeriesLikeFloat_co) -> _FloatSeries: ... # type: ignore[overload-overlap] @overload - def __call__(self, /, roots: _AnyComplexSeries1D) -> _ComplexArray1D: ... + def __call__(self, /, roots: _SeriesLikeComplex_co) -> _ComplexSeries: ... @overload - def __call__(self, /, roots: _AnySeries1D) -> _ObjectArray1D: ... + def __call__(self, /, roots: _SeriesLikeCoef_co) -> _ObjectSeries: ... @final class _FuncBinOp(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - c1: _AnyRealSeries1D, - c2: _AnyRealSeries1D, - ) -> _FloatArray1D: ... + c1: _SeriesLikeFloat_co, + c2: _SeriesLikeFloat_co, + ) -> _FloatSeries: ... @overload def __call__( self, /, - c1: _AnyComplexSeries1D, - c2: _AnyComplexSeries1D, - ) -> _ComplexArray1D: ... + c1: _SeriesLikeComplex_co, + c2: _SeriesLikeComplex_co, + ) -> _ComplexSeries: ... @overload def __call__( self, /, - c1: _AnySeries1D, - c2: _AnySeries1D, - ) -> _ObjectArray1D: ... + c1: _SeriesLikeCoef_co, + c2: _SeriesLikeCoef_co, + ) -> _ObjectSeries: ... @final class _FuncUnOp(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__(self, /, c: _AnyRealSeries1D) -> _FloatArray1D: ... # type: ignore[overload-overlap] + def __call__(self, /, c: _SeriesLikeFloat_co) -> _FloatSeries: ... # type: ignore[overload-overlap] @overload - def __call__(self, /, c: _AnyComplexSeries1D) -> _ComplexArray1D: ... + def __call__(self, /, c: _SeriesLikeComplex_co) -> _ComplexSeries: ... @overload - def __call__(self, /, c: _AnySeries1D) -> _ObjectArray1D: ... + def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... @final class _FuncPoly2Ortho(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__(self, /, pol: _AnyRealSeries1D) -> _FloatArray1D: ... # type: ignore[overload-overlap] + def __call__(self, /, pol: _SeriesLikeFloat_co) -> _FloatSeries: ... # type: ignore[overload-overlap] @overload - def __call__(self, /, pol: _AnyComplexSeries1D) -> _ComplexArray1D: ... + def __call__(self, /, pol: _SeriesLikeComplex_co) -> _ComplexSeries: ... @overload - def __call__(self, /, pol: _AnySeries1D) -> _ObjectArray1D: ... + def __call__(self, /, pol: _SeriesLikeCoef_co) -> _ObjectSeries: ... @final class _FuncPow(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - c: _AnyRealSeries1D, - pow: _AnyIntArg, - maxpower: None | _AnyIntArg = ..., - ) -> _FloatArray1D: ... + c: _SeriesLikeFloat_co, + pow: _AnyInt, + maxpower: None | _AnyInt = ..., + ) -> _FloatSeries: ... @overload def __call__( self, /, - c: _AnyComplexSeries1D, - pow: _AnyIntArg, - maxpower: None | _AnyIntArg = ..., - ) -> _ComplexArray1D: ... + c: _SeriesLikeComplex_co, + pow: _AnyInt, + maxpower: None | _AnyInt = ..., + ) -> _ComplexSeries: ... @overload def __call__( self, /, - c: _AnySeries1D, - pow: _AnyIntArg, - maxpower: None | _AnyIntArg = ..., - ) -> _ObjectArray1D: ... + c: _SeriesLikeCoef_co, + pow: _AnyInt, + maxpower: None | _AnyInt = ..., + ) -> _ObjectSeries: ... @final class _FuncDer(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - c: _AnyRealSeriesND, + c: _ArrayLikeFloat_co, m: SupportsIndex = ..., - scl: _AnyComplexScalar = ..., + scl: _NumberLike_co = ..., axis: SupportsIndex = ..., - ) -> _FloatArrayND: ... + ) -> _FloatArray: ... @overload def __call__( self, /, - c: _AnyComplexSeriesND, + c: _ArrayLikeComplex_co, m: SupportsIndex = ..., - scl: _AnyComplexScalar = ..., + scl: _NumberLike_co = ..., axis: SupportsIndex = ..., - ) -> _ComplexArrayND: ... + ) -> _ComplexArray: ... @overload def __call__( self, /, - c: _AnyObjectSeriesND, + c: _ArrayLikeNumberObject_co, m: SupportsIndex = ..., - scl: _AnyComplexScalar = ..., + scl: _NumberLike_co = ..., axis: SupportsIndex = ..., - ) -> _ObjectArrayND: ... + ) -> _ObjectArray: ... @final class _FuncInteg(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - c: _AnyRealSeriesND, + c: _ArrayLikeFloat_co, m: SupportsIndex = ..., - k: _AnyComplexScalar | _SupportsLenAndGetItem[_AnyComplexScalar] = ..., - lbnd: _AnyComplexScalar = ..., - scl: _AnyComplexScalar = ..., + k: _NumberLike_co | _SupportsLenAndGetItem[_NumberLike_co] = ..., + lbnd: _NumberLike_co = ..., + scl: _NumberLike_co = ..., axis: SupportsIndex = ..., - ) -> _FloatArrayND: ... + ) -> _FloatArray: ... @overload def __call__( self, /, - c: _AnyComplexSeriesND, + c: _ArrayLikeComplex_co, m: SupportsIndex = ..., - k: _AnyComplexScalar | _SupportsLenAndGetItem[_AnyComplexScalar] = ..., - lbnd: _AnyComplexScalar = ..., - scl: _AnyComplexScalar = ..., + k: _NumberLike_co | _SupportsLenAndGetItem[_NumberLike_co] = ..., + lbnd: _NumberLike_co = ..., + scl: _NumberLike_co = ..., axis: SupportsIndex = ..., - ) -> _ComplexArrayND: ... + ) -> _ComplexArray: ... @overload def __call__( self, /, - c: _AnyObjectSeriesND, + c: _ArrayLikeNumberObject_co, m: SupportsIndex = ..., - k: _AnyComplexScalar | _SupportsLenAndGetItem[_AnyScalar] = ..., - lbnd: _AnyComplexScalar = ..., - scl: _AnyComplexScalar = ..., + k: _NumberLike_co | _SupportsLenAndGetItem[_CoefLike_co] = ..., + lbnd: _NumberLike_co = ..., + scl: _NumberLike_co = ..., axis: SupportsIndex = ..., - ) -> _ObjectArrayND: ... + ) -> _ObjectArray: ... _AnyRealRoots: TypeAlias = ( - _Array1D[np.floating[Any] | np.integer[Any]] - | Sequence[_AnyRealScalar] + _Series[np.floating[Any] | np.integer[Any]] + | Sequence[_FloatLike_co] ) _AnyComplexRoots: TypeAlias = ( - _Array1D[np.number[Any]] - | Sequence[_AnyComplexScalar] + _Series[np.number[Any]] + | Sequence[_NumberLike_co] ) -_AnyObjectRoots: TypeAlias = _ObjectArray1D | Sequence[_AnyObjectScalar] -_AnyRoots: TypeAlias = _ObjectArray1D | Sequence[_AnyScalar] +_AnyObjectRoots: TypeAlias = _ObjectSeries | Sequence[_NumberObjectLike_co] +_AnyRoots: TypeAlias = _CoefSeries | Sequence[_CoefLike_co] _AnyRealPoints: TypeAlias = ( npt.NDArray[np.floating[Any] | np.integer[Any]] - | tuple[_AnyRealSeriesND, ...] - | list[_AnyRealSeriesND] + | tuple[_ArrayLikeFloat_co, ...] + | list[_ArrayLikeFloat_co] ) _AnyComplexPoints: TypeAlias = ( npt.NDArray[np.number[Any]] - | tuple[_AnyComplexSeriesND, ...] - | list[_AnyComplexSeriesND] + | tuple[_ArrayLikeComplex_co, ...] + | list[_ArrayLikeComplex_co] ) _AnyObjectPoints: TypeAlias = ( - _ObjectArrayND - | tuple[_AnyObjectSeriesND, ...] - | list[_AnyObjectSeriesND] + _ObjectArray + | tuple[_ArrayLikeNumberObject_co, ...] + | list[_ArrayLikeNumberObject_co] ) _AnyPoints: TypeAlias = ( npt.NDArray[np.number[Any] | np.object_] - | tuple[_AnySeriesND, ...] - | list[_AnySeriesND] + | tuple[_ArrayLikeCoef_co, ...] + | list[_ArrayLikeCoef_co] ) @final @@ -334,66 +319,66 @@ class _FuncValFromRoots(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - x: _AnyRealScalar, - r: _AnyRealScalar, + x: _FloatLike_co, + r: _FloatLike_co, tensor: bool = ..., ) -> np.floating[Any]: ... @overload def __call__( self, /, - x: _AnyComplexScalar, - r: _AnyComplexScalar, + x: _NumberLike_co, + r: _NumberLike_co, tensor: bool = ..., ) -> np.complexfloating[Any, Any]: ... @overload def __call__( # type: ignore[overload-overlap] self, /, - x: _AnyScalar, - r: _AnyScalar, + x: _CoefLike_co, + r: _CoefLike_co, tensor: bool = ..., ) -> object: ... @overload def __call__( self, /, - x: _AnyRealScalar | _AnyRealPoints, - r: _AnyRealSeriesND, + x: _FloatLike_co | _AnyRealPoints, + r: _ArrayLikeFloat_co, tensor: bool = ..., - ) -> _FloatArrayND: ... + ) -> _FloatArray: ... @overload def __call__( self, /, - x: _AnyComplexScalar | _AnyComplexPoints, - r: _AnyComplexSeriesND, + x: _NumberLike_co | _AnyComplexPoints, + r: _ArrayLikeComplex_co, tensor: bool = ..., - ) -> _ComplexArrayND: ... + ) -> _ComplexArray: ... @overload def __call__( self, /, - x: _AnyScalar | _AnyPoints, - r: _AnySeriesND, + x: _CoefLike_co | _AnyPoints, + r: _ArrayLikeCoef_co, tensor: bool = ..., - ) -> _ObjectArrayND: ... + ) -> _ObjectArray: ... @final class _FuncVal(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - x: _AnyRealScalar, + x: _FloatLike_co, c: _AnyRealRoots, tensor: bool = ..., ) -> np.floating[Any]: ... @overload def __call__( self, /, - x: _AnyComplexScalar, + x: _NumberLike_co, c: _AnyComplexRoots, tensor: bool = ..., ) -> np.complexfloating[Any, Any]: ... @overload def __call__( self, /, - x: _AnyScalar, + x: _CoefLike_co, c: _AnyObjectRoots, tensor: bool = ..., ) -> object: ... @@ -401,45 +386,45 @@ class _FuncVal(_Named[_Name_co], Protocol[_Name_co]): def __call__( # type: ignore[overload-overlap] self, /, x: _AnyRealPoints, - c: _AnyRealSeriesND, + c: _ArrayLikeFloat_co, tensor: bool = ..., - ) -> _FloatArrayND: ... + ) -> _FloatArray: ... @overload def __call__( self, /, x: _AnyComplexPoints, - c: _AnyComplexSeriesND, + c: _ArrayLikeComplex_co, tensor: bool = ..., - ) -> _ComplexArrayND: ... + ) -> _ComplexArray: ... @overload def __call__( self, /, x: _AnyPoints, - c: _AnySeriesND, + c: _ArrayLikeCoef_co, tensor: bool = ..., - ) -> _ObjectArrayND: ... + ) -> _ObjectArray: ... @final class _FuncVal2D(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - x: _AnyRealScalar, - y: _AnyRealScalar, + x: _FloatLike_co, + y: _FloatLike_co, c: _AnyRealRoots, ) -> np.floating[Any]: ... @overload def __call__( self, /, - x: _AnyComplexScalar, - y: _AnyComplexScalar, + x: _NumberLike_co, + y: _NumberLike_co, c: _AnyComplexRoots, ) -> np.complexfloating[Any, Any]: ... @overload def __call__( self, /, - x: _AnyScalar, - y: _AnyScalar, + x: _CoefLike_co, + y: _CoefLike_co, c: _AnyRoots, ) -> object: ... @overload @@ -447,47 +432,47 @@ class _FuncVal2D(_Named[_Name_co], Protocol[_Name_co]): self, /, x: _AnyRealPoints, y: _AnyRealPoints, - c: _AnyRealSeriesND, - ) -> _FloatArrayND: ... + c: _ArrayLikeFloat_co, + ) -> _FloatArray: ... @overload def __call__( self, /, x: _AnyComplexPoints, y: _AnyComplexPoints, - c: _AnyComplexSeriesND, - ) -> _ComplexArrayND: ... + c: _ArrayLikeComplex_co, + ) -> _ComplexArray: ... @overload def __call__( self, /, x: _AnyPoints, y: _AnyPoints, - c: _AnySeriesND, - ) -> _ObjectArrayND: ... + c: _ArrayLikeCoef_co, + ) -> _ObjectArray: ... @final class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - x: _AnyRealScalar, - y: _AnyRealScalar, - z: _AnyRealScalar, + x: _FloatLike_co, + y: _FloatLike_co, + z: _FloatLike_co, c: _AnyRealRoots ) -> np.floating[Any]: ... @overload def __call__( self, /, - x: _AnyComplexScalar, - y: _AnyComplexScalar, - z: _AnyComplexScalar, + x: _NumberLike_co, + y: _NumberLike_co, + z: _NumberLike_co, c: _AnyComplexRoots, ) -> np.complexfloating[Any, Any]: ... @overload def __call__( self, /, - x: _AnyScalar, - y: _AnyScalar, - z: _AnyScalar, + x: _CoefLike_co, + y: _CoefLike_co, + z: _CoefLike_co, c: _AnyRoots, ) -> object: ... @overload @@ -496,28 +481,28 @@ class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): x: _AnyRealPoints, y: _AnyRealPoints, z: _AnyRealPoints, - c: _AnyRealSeriesND, - ) -> _FloatArrayND: ... + c: _ArrayLikeFloat_co, + ) -> _FloatArray: ... @overload def __call__( self, /, x: _AnyComplexPoints, y: _AnyComplexPoints, z: _AnyComplexPoints, - c: _AnyComplexSeriesND, - ) -> _ComplexArrayND: ... + c: _ArrayLikeComplex_co, + ) -> _ComplexArray: ... @overload def __call__( self, /, x: _AnyPoints, y: _AnyPoints, z: _AnyPoints, - c: _AnySeriesND, - ) -> _ObjectArrayND: ... + c: _ArrayLikeCoef_co, + ) -> _ObjectArray: ... _AnyValF: TypeAlias = Callable[ [npt.ArrayLike, npt.ArrayLike, bool], - _CoefArrayND, + _CoefArray, ] @final @@ -528,7 +513,7 @@ class _FuncValND(_Named[_Name_co], Protocol[_Name_co]): val_f: _AnyValF, c: _AnyRealRoots, /, - *args: _AnyRealScalar, + *args: _FloatLike_co, ) -> np.floating[Any]: ... @overload def __call__( @@ -536,7 +521,7 @@ class _FuncValND(_Named[_Name_co], Protocol[_Name_co]): val_f: _AnyValF, c: _AnyComplexRoots, /, - *args: _AnyComplexScalar, + *args: _NumberLike_co, ) -> np.complexfloating[Any, Any]: ... @overload def __call__( @@ -544,59 +529,59 @@ class _FuncValND(_Named[_Name_co], Protocol[_Name_co]): val_f: _AnyValF, c: _AnyObjectRoots, /, - *args: _AnyObjectScalar, + *args: _NumberObjectLike_co, ) -> object: ... @overload def __call__( # type: ignore[overload-overlap] self, val_f: _AnyValF, - c: _AnyRealSeriesND, + c: _ArrayLikeFloat_co, /, *args: _AnyRealPoints, - ) -> _FloatArrayND: ... + ) -> _FloatArray: ... @overload def __call__( self, val_f: _AnyValF, - c: _AnyComplexSeriesND, + c: _ArrayLikeComplex_co, /, *args: _AnyComplexPoints, - ) -> _ComplexArrayND: ... + ) -> _ComplexArray: ... @overload def __call__( self, val_f: _AnyValF, - c: _AnySeriesND, + c: _ArrayLikeCoef_co, /, *args: _AnyObjectPoints, - ) -> _ObjectArrayND: ... + ) -> _ObjectArray: ... @final class _FuncVander(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - x: _AnyRealSeriesND, + x: _ArrayLikeFloat_co, deg: SupportsIndex, - ) -> _FloatArrayND: ... + ) -> _FloatArray: ... @overload def __call__( self, /, - x: _AnyComplexSeriesND, + x: _ArrayLikeComplex_co, deg: SupportsIndex, - ) -> _ComplexArrayND: ... + ) -> _ComplexArray: ... @overload def __call__( self, /, - x: _AnySeriesND, + x: _ArrayLikeCoef_co, deg: SupportsIndex, - ) -> _ObjectArrayND: ... + ) -> _ObjectArray: ... @overload def __call__( self, /, x: npt.ArrayLike, deg: SupportsIndex, - ) -> _CoefArrayND: ... + ) -> _CoefArray: ... _AnyDegrees: TypeAlias = _SupportsLenAndGetItem[SupportsIndex] @@ -605,58 +590,58 @@ class _FuncVander2D(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - x: _AnyRealSeriesND, - y: _AnyRealSeriesND, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, deg: _AnyDegrees, - ) -> _FloatArrayND: ... + ) -> _FloatArray: ... @overload def __call__( self, /, - x: _AnyComplexSeriesND, - y: _AnyComplexSeriesND, + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, deg: _AnyDegrees, - ) -> _ComplexArrayND: ... + ) -> _ComplexArray: ... @overload def __call__( self, /, - x: _AnySeriesND, - y: _AnySeriesND, + x: _ArrayLikeCoef_co, + y: _ArrayLikeCoef_co, deg: _AnyDegrees, - ) -> _ObjectArrayND: ... + ) -> _ObjectArray: ... @overload def __call__( self, /, x: npt.ArrayLike, y: npt.ArrayLike, deg: _AnyDegrees, - ) -> _CoefArrayND: ... + ) -> _CoefArray: ... @final class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - x: _AnyRealSeriesND, - y: _AnyRealSeriesND, - z: _AnyRealSeriesND, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + z: _ArrayLikeFloat_co, deg: _AnyDegrees, - ) -> _FloatArrayND: ... + ) -> _FloatArray: ... @overload def __call__( self, /, - x: _AnyComplexSeriesND, - y: _AnyComplexSeriesND, - z: _AnyComplexSeriesND, + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + z: _ArrayLikeComplex_co, deg: _AnyDegrees, - ) -> _ComplexArrayND: ... + ) -> _ComplexArray: ... @overload def __call__( self, /, - x: _AnySeriesND, - y: _AnySeriesND, - z: _AnySeriesND, + x: _ArrayLikeCoef_co, + y: _ArrayLikeCoef_co, + z: _ArrayLikeCoef_co, deg: _AnyDegrees, - ) -> _ObjectArrayND: ... + ) -> _ObjectArray: ... @overload def __call__( self, /, @@ -664,12 +649,12 @@ class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): y: npt.ArrayLike, z: npt.ArrayLike, deg: _AnyDegrees, - ) -> _CoefArrayND: ... + ) -> _CoefArray: ... # keep in sync with the broadest overload of `._FuncVander` _AnyFuncVander: TypeAlias = Callable[ [npt.ArrayLike, SupportsIndex], - _CoefArrayND, + _CoefArray, ] @final @@ -680,14 +665,14 @@ class _FuncVanderND(_Named[_Name_co], Protocol[_Name_co]): vander_fs: _SupportsLenAndGetItem[_AnyFuncVander], points: _SupportsLenAndGetItem[_ArrayLikeFloat_co], degrees: _SupportsLenAndGetItem[SupportsIndex], - ) -> _FloatArrayND: ... + ) -> _FloatArray: ... @overload def __call__( self, /, vander_fs: _SupportsLenAndGetItem[_AnyFuncVander], points: _SupportsLenAndGetItem[_ArrayLikeComplex_co], degrees: _SupportsLenAndGetItem[SupportsIndex], - ) -> _ComplexArrayND: ... + ) -> _ComplexArray: ... @overload def __call__( self, /, @@ -696,69 +681,69 @@ class _FuncVanderND(_Named[_Name_co], Protocol[_Name_co]): _ArrayLikeObject_co | _ArrayLikeComplex_co, ], degrees: _SupportsLenAndGetItem[SupportsIndex], - ) -> _ObjectArrayND: ... + ) -> _ObjectArray: ... @overload def __call__( self, /, vander_fs: _SupportsLenAndGetItem[_AnyFuncVander], points: _SupportsLenAndGetItem[npt.ArrayLike], degrees: _SupportsLenAndGetItem[SupportsIndex], - ) -> _CoefArrayND: ... + ) -> _CoefArray: ... @final class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - x: _AnyRealSeries1D, - y: _AnyRealSeriesND, - deg: int | _AnyIntSeries1D, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int | _SeriesLikeInt_co, rcond: None | float = ..., full: Literal[False] = ..., - w: None | _AnyRealSeries1D = ..., - ) -> _FloatArrayND: ... + w: None | _SeriesLikeFloat_co = ..., + ) -> _FloatArray: ... @overload def __call__( self, /, - x: _AnyComplexSeries1D, - y: _AnyComplexSeriesND, - deg: int | _AnyIntSeries1D, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int | _SeriesLikeInt_co, rcond: None | float = ..., full: Literal[False] = ..., - w: None | _AnyComplexSeriesND = ..., - ) -> _ComplexArrayND: ... + w: None | _ArrayLikeComplex_co = ..., + ) -> _ComplexArray: ... @overload def __call__( self, /, - x: _AnySeries1D, - y: _AnySeriesND, - deg: int | _AnyIntSeries1D, + x: _SeriesLikeCoef_co, + y: _ArrayLikeCoef_co, + deg: int | _SeriesLikeInt_co, rcond: None | float = ..., full: Literal[False] = ..., - w: None | _AnySeries1D = ..., - ) -> _ObjectArrayND: ... + w: None | _SeriesLikeCoef_co = ..., + ) -> _ObjectArray: ... @overload def __call__( self, - x: _AnySeries1D, - y: _AnySeriesND, - deg: int | _AnyIntSeries1D, + x: _SeriesLikeCoef_co, + y: _ArrayLikeCoef_co, + deg: int | _SeriesLikeInt_co, rcond: None | float, full: Literal[True], /, - w: None | _AnySeries1D = ..., - ) -> tuple[_CoefArrayND, Sequence[np.inexact[Any] | np.int32]]: ... + w: None | _SeriesLikeCoef_co = ..., + ) -> tuple[_CoefArray, Sequence[np.inexact[Any] | np.int32]]: ... @overload def __call__( self, /, - x: _AnySeries1D, - y: _AnySeriesND, - deg: int | _AnyIntSeries1D, + x: _SeriesLikeCoef_co, + y: _ArrayLikeCoef_co, + deg: int | _SeriesLikeInt_co, rcond: None | float = ..., *, full: Literal[True], - w: None | _AnySeries1D = ..., - ) -> tuple[_CoefArrayND, Sequence[np.inexact[Any] | np.int32]]: ... + w: None | _SeriesLikeCoef_co = ..., + ) -> tuple[_CoefArray, Sequence[np.inexact[Any] | np.int32]]: ... @final class _FuncRoots(_Named[_Name_co], Protocol[_Name_co]): @@ -768,52 +753,55 @@ class _FuncRoots(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - c: _AnyRealSeries1D, - ) -> _Array1D[np.float64]: ... + c: _SeriesLikeFloat_co, + ) -> _Series[np.float64]: ... @overload def __call__( self, /, - c: _AnyComplexSeries1D, - ) -> _Array1D[np.complex128]: ... + c: _SeriesLikeComplex_co, + ) -> _Series[np.complex128]: ... @overload - def __call__(self, /, c: _AnySeries1D) -> _ObjectArray1D: ... + def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... + + +_Companion: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_SCT]] @final class _FuncCompanion(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - c: _AnyRealSeries1D, - ) -> _Array2D[np.float64]: ... + c: _SeriesLikeFloat_co, + ) -> _Companion[np.float64]: ... @overload def __call__( self, /, - c: _AnyComplexSeries1D, - ) -> _Array2D[np.complex128]: ... + c: _SeriesLikeComplex_co, + ) -> _Companion[np.complex128]: ... @overload - def __call__(self, /, c: _AnySeries1D) -> _Array2D[np.object_]: ... + def __call__(self, /, c: _SeriesLikeCoef_co) -> _Companion[np.object_]: ... @final class _FuncGauss(_Named[_Name_co], Protocol[_Name_co]): def __call__( self, /, deg: SupportsIndex, - ) -> _Tuple2[_Array1D[np.float64]]: ... + ) -> _Tuple2[_Series[np.float64]]: ... @final class _FuncWeight(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - c: _AnyRealSeriesND, + c: _ArrayLikeFloat_co, ) -> npt.NDArray[np.float64]: ... @overload def __call__( self, /, - c: _AnyComplexSeriesND, + c: _ArrayLikeComplex_co, ) -> npt.NDArray[np.complex128]: ... @overload - def __call__(self, /, c: _AnySeriesND) -> _ObjectArrayND: ... + def __call__(self, /, c: _ArrayLikeCoef_co) -> _ObjectArray: ... _N_pts = TypeVar("_N_pts", bound=int) @@ -825,4 +813,4 @@ class _FuncPts(_Named[_Name_co], Protocol[_Name_co]): npts: _N_pts, ) -> np.ndarray[tuple[_N_pts], np.dtype[np.float64]]: ... @overload - def __call__(self, /, npts: _AnyIntArg) -> _Array1D[np.float64]: ... + def __call__(self, /, npts: _AnyInt) -> _Series[np.float64]: ... diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index 7e0fe46093a8..e1d6e5d1ffda 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -14,11 +14,11 @@ from numpy._typing import _IntLike_co from ._polybase import ABCPolyBase from ._polytypes import ( - _AnySeries1D, + _SeriesLikeCoef_co, _Array1, - _Array1D, + _Series, _Array2, - _CoefArray1D, + _CoefSeries, _FuncBinOp, _FuncCompanion, _FuncDer, @@ -81,18 +81,18 @@ __all__ = [ ] _SCT = TypeVar("_SCT", bound=np.number[Any] | np.object_) -def _cseries_to_zseries(c: npt.NDArray[_SCT]) -> _Array1D[_SCT]: ... -def _zseries_to_cseries(zs: npt.NDArray[_SCT]) -> _Array1D[_SCT]: ... +def _cseries_to_zseries(c: npt.NDArray[_SCT]) -> _Series[_SCT]: ... +def _zseries_to_cseries(zs: npt.NDArray[_SCT]) -> _Series[_SCT]: ... def _zseries_mul( z1: npt.NDArray[_SCT], z2: npt.NDArray[_SCT], -) -> _Array1D[_SCT]: ... +) -> _Series[_SCT]: ... def _zseries_div( z1: npt.NDArray[_SCT], z2: npt.NDArray[_SCT], -) -> _Array1D[_SCT]: ... -def _zseries_der(zs: npt.NDArray[_SCT]) -> _Array1D[_SCT]: ... -def _zseries_int(zs: npt.NDArray[_SCT]) -> _Array1D[_SCT]: ... +) -> _Series[_SCT]: ... +def _zseries_der(zs: npt.NDArray[_SCT]) -> _Series[_SCT]: ... +def _zseries_int(zs: npt.NDArray[_SCT]) -> _Series[_SCT]: ... poly2cheb: _FuncPoly2Ortho[L["poly2cheb"]] cheb2poly: _FuncUnOp[L["cheb2poly"]] @@ -160,9 +160,9 @@ class Chebyshev(ABCPolyBase[L["T"]]): def interpolate( cls: type[_Self], /, - func: Callable[[npt.NDArray[np.float64]], _CoefArray1D], + func: Callable[[npt.NDArray[np.float64]], _CoefSeries], deg: _IntLike_co, - domain: None | _AnySeries1D = ..., + domain: None | _SeriesLikeCoef_co = ..., args: tuple[()] = ..., ) -> _Self: ... @overload @@ -172,10 +172,10 @@ class Chebyshev(ABCPolyBase[L["T"]]): /, func: Callable[ Concatenate[npt.NDArray[np.float64], ...], - _CoefArray1D, + _CoefSeries, ], deg: _IntLike_co, - domain: None | _AnySeries1D = ..., + domain: None | _SeriesLikeCoef_co = ..., *, args: Iterable[Any], ) -> _Self: ... @@ -185,10 +185,10 @@ class Chebyshev(ABCPolyBase[L["T"]]): cls: type[_Self], func: Callable[ Concatenate[npt.NDArray[np.float64], ...], - _CoefArray1D, + _CoefSeries, ], deg: _IntLike_co, - domain: None | _AnySeries1D, + domain: None | _SeriesLikeCoef_co, args: Iterable[Any], /, ) -> _Self: ... diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index 632249d77366..f000b9f067fe 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -11,43 +11,46 @@ from typing import ( import numpy as np import numpy.typing as npt +from numpy._typing import ( + _FloatLike_co, + _NumberLike_co, + + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, +) from ._polytypes import ( - _AnyComplexScalar, - _AnyComplexSeries1D, - _AnyComplexSeriesND, - _AnyIntSeries1D, - _AnyRealSeries1D, - _AnyRealSeriesND, - _AnyIntArg, - _AnyComplexSeries1D, - _AnyObjectSeries1D, - _AnyRealScalar, - _AnyScalar, - _AnySeries1D, - _AnySeriesND, - _AnyRealScalar, + _AnyInt, + _CoefLike_co, + _Array2, - _CoefArrayND, - _CoefArray1D, - _ComplexArray1D, - _ComplexArrayND, - _FloatArray1D, - _FloatArrayND, + _Tuple2, + + _FloatSeries, + _CoefSeries, + _ComplexSeries, + _ObjectSeries, + + _ComplexArray, + _FloatArray, + _CoefArray, + _ObjectArray, + + _SeriesLikeInt_co, + _SeriesLikeFloat_co, + _SeriesLikeComplex_co, + _SeriesLikeCoef_co, + + _ArrayLikeCoef_co, + _FuncBinOp, _FuncValND, _FuncVanderND, - _IntArrayND, - _ObjectArray1D, - _ObjectArrayND, - _SimpleSequence, - _SupportsLenAndGetItem, - _Tuple2, ) -___all__ = [ +__all__: Final[Sequence[str]] = [ "as_series", - "format_float" + "format_float", "getdomain", "mapdomain", "mapparms", @@ -55,113 +58,122 @@ ___all__ = [ "trimseq", ] -_AnyLineF: TypeAlias = Callable[[_AnyScalar, _AnyScalar], _CoefArrayND] -_AnyMulF: TypeAlias = Callable[[npt.ArrayLike, npt.ArrayLike], _CoefArrayND] -_AnyVanderF: TypeAlias = Callable[[npt.ArrayLike, SupportsIndex], _CoefArrayND] +_AnyLineF: TypeAlias = Callable[ + [_CoefLike_co, _CoefLike_co], + _CoefArray, +] +_AnyMulF: TypeAlias = Callable[ + [npt.ArrayLike, npt.ArrayLike], + _CoefArray, +] +_AnyVanderF: TypeAlias = Callable[ + [npt.ArrayLike, SupportsIndex], + _CoefArray, +] @overload def as_series( - alist: _IntArrayND | _FloatArrayND, + alist: npt.NDArray[np.integer[Any]] | _FloatArray, trim: bool = ..., -) -> list[_FloatArray1D]: ... +) -> list[_FloatSeries]: ... @overload def as_series( - alist: _ComplexArrayND, + alist: _ComplexArray, trim: bool = ..., -) -> list[_ComplexArray1D]: ... +) -> list[_ComplexSeries]: ... @overload def as_series( - alist: _ObjectArrayND, + alist: _ObjectArray, trim: bool = ..., -) -> list[_ObjectArray1D]: ... +) -> list[_ObjectSeries]: ... @overload def as_series( # type: ignore[overload-overlap] - alist: Iterable[_FloatArrayND | _IntArrayND], + alist: Iterable[_FloatArray | npt.NDArray[np.integer[Any]]], trim: bool = ..., -) -> list[_FloatArray1D]: ... +) -> list[_FloatSeries]: ... @overload def as_series( - alist: Iterable[_ComplexArrayND], + alist: Iterable[_ComplexArray], trim: bool = ..., -) -> list[_ComplexArray1D]: ... +) -> list[_ComplexSeries]: ... @overload def as_series( - alist: Iterable[_ObjectArrayND], + alist: Iterable[_ObjectArray], trim: bool = ..., -) -> list[_ObjectArray1D]: ... +) -> list[_ObjectSeries]: ... @overload def as_series( # type: ignore[overload-overlap] - alist: Iterable[_AnyRealSeries1D | float], + alist: Iterable[_SeriesLikeFloat_co | float], trim: bool = ..., -) -> list[_FloatArray1D]: ... +) -> list[_FloatSeries]: ... @overload def as_series( - alist: Iterable[_AnyComplexSeries1D | complex], + alist: Iterable[_SeriesLikeComplex_co | complex], trim: bool = ..., -) -> list[_ComplexArray1D]: ... +) -> list[_ComplexSeries]: ... @overload def as_series( - alist: Iterable[_AnyObjectSeries1D | object], + alist: Iterable[_SeriesLikeCoef_co | object], trim: bool = ..., -) -> list[_ObjectArray1D]: ... +) -> list[_ObjectSeries]: ... -_T_seq = TypeVar("_T_seq", bound=_CoefArrayND | _SimpleSequence[_AnyScalar]) +_T_seq = TypeVar("_T_seq", bound=_CoefArray | Sequence[_CoefLike_co]) def trimseq(seq: _T_seq) -> _T_seq: ... @overload def trimcoef( # type: ignore[overload-overlap] - c: _IntArrayND | _FloatArrayND, - tol: _AnyRealScalar = ..., -) -> _FloatArray1D: ... + c: npt.NDArray[np.integer[Any]] | _FloatArray, + tol: _FloatLike_co = ..., +) -> _FloatSeries: ... @overload def trimcoef( - c: _ComplexArrayND, - tol: _AnyRealScalar = ..., -) -> _ComplexArray1D: ... + c: _ComplexArray, + tol: _FloatLike_co = ..., +) -> _ComplexSeries: ... @overload def trimcoef( - c: _ObjectArrayND, - tol: _AnyRealScalar = ..., -) -> _ObjectArray1D: ... + c: _ObjectArray, + tol: _FloatLike_co = ..., +) -> _ObjectSeries: ... @overload def trimcoef( # type: ignore[overload-overlap] - c: _AnyRealSeries1D | float, - tol: _AnyRealScalar = ..., -) -> _FloatArray1D: ... + c: _SeriesLikeFloat_co | float, + tol: _FloatLike_co = ..., +) -> _FloatSeries: ... @overload def trimcoef( - c: _AnyComplexSeries1D | complex, - tol: _AnyRealScalar = ..., -) -> _ComplexArray1D: ... + c: _SeriesLikeComplex_co | complex, + tol: _FloatLike_co = ..., +) -> _ComplexSeries: ... @overload def trimcoef( - c: _AnyObjectSeries1D | object, - tol: _AnyRealScalar = ..., -) -> _ObjectArray1D: ... + c: _SeriesLikeCoef_co | object, + tol: _FloatLike_co = ..., +) -> _ObjectSeries: ... @overload def getdomain( # type: ignore[overload-overlap] - x: _FloatArrayND | _IntArrayND, + x: _FloatArray | npt.NDArray[np.integer[Any]], ) -> _Array2[np.float64]: ... @overload def getdomain( - x: _ComplexArrayND, + x: _ComplexArray, ) -> _Array2[np.complex128]: ... @overload def getdomain( - x: _ObjectArrayND, + x: _ObjectArray, ) -> _Array2[np.object_]: ... @overload def getdomain( # type: ignore[overload-overlap] - x: _AnyRealSeries1D | float, + x: _SeriesLikeFloat_co | float, ) -> _Array2[np.float64]: ... @overload def getdomain( - x: _AnyComplexSeries1D | complex, + x: _SeriesLikeComplex_co | complex, ) -> _Array2[np.complex128]: ... @overload def getdomain( - x: _AnyObjectSeries1D | object, + x: _SeriesLikeCoef_co | object, ) -> _Array2[np.object_]: ... @overload @@ -181,89 +193,89 @@ def mapparms( ) -> _Tuple2[object]: ... @overload def mapparms( # type: ignore[overload-overlap] - old: _SupportsLenAndGetItem[float], - new: _SupportsLenAndGetItem[float], + old: Sequence[float], + new: Sequence[float], ) -> _Tuple2[float]: ... @overload def mapparms( - old: _SupportsLenAndGetItem[complex], - new: _SupportsLenAndGetItem[complex], + old: Sequence[complex], + new: Sequence[complex], ) -> _Tuple2[complex]: ... @overload def mapparms( - old: _AnyRealSeries1D, - new: _AnyRealSeries1D, + old: _SeriesLikeFloat_co, + new: _SeriesLikeFloat_co, ) -> _Tuple2[np.floating[Any]]: ... @overload def mapparms( - old: _AnyComplexSeries1D, - new: _AnyComplexSeries1D, + old: _SeriesLikeComplex_co, + new: _SeriesLikeComplex_co, ) -> _Tuple2[np.complexfloating[Any, Any]]: ... @overload def mapparms( - old: _AnySeries1D, - new: _AnySeries1D, + old: _SeriesLikeCoef_co, + new: _SeriesLikeCoef_co, ) -> _Tuple2[object]: ... @overload def mapdomain( # type: ignore[overload-overlap] - x: _AnyRealScalar, - old: _AnyRealSeries1D, - new: _AnyRealSeries1D, + x: _FloatLike_co, + old: _SeriesLikeFloat_co, + new: _SeriesLikeFloat_co, ) -> np.floating[Any]: ... @overload def mapdomain( - x: _AnyComplexScalar, - old: _AnyComplexSeries1D, - new: _AnyComplexSeries1D, + x: _NumberLike_co, + old: _SeriesLikeComplex_co, + new: _SeriesLikeComplex_co, ) -> np.complexfloating[Any, Any]: ... @overload def mapdomain( - x: _AnyScalar, - old: _AnySeries1D, - new: _AnySeries1D, + x: _CoefLike_co, + old: _SeriesLikeCoef_co, + new: _SeriesLikeCoef_co, ) -> object: ... @overload def mapdomain( # type: ignore[overload-overlap] x: npt.NDArray[np.floating[Any] | np.integer[Any]], old: npt.NDArray[np.floating[Any] | np.integer[Any]], new: npt.NDArray[np.floating[Any] | np.integer[Any]], -) -> _FloatArray1D: ... +) -> _FloatSeries: ... @overload def mapdomain( x: npt.NDArray[np.number[Any]], old: npt.NDArray[np.number[Any]], new: npt.NDArray[np.number[Any]], -) -> _ComplexArray1D: ... +) -> _ComplexSeries: ... @overload def mapdomain( x: npt.NDArray[np.object_ | np.number[Any]], old: npt.NDArray[np.object_ | np.number[Any]], new: npt.NDArray[np.object_ | np.number[Any]], -) -> _ObjectArray1D: ... +) -> _ObjectSeries: ... @overload def mapdomain( # type: ignore[overload-overlap] - x: _AnyRealSeries1D, - old: _AnyRealSeries1D, - new: _AnyRealSeries1D, -) -> _FloatArray1D: ... + x: _SeriesLikeFloat_co, + old: _SeriesLikeFloat_co, + new: _SeriesLikeFloat_co, +) -> _FloatSeries: ... @overload def mapdomain( - x: _AnyComplexSeries1D, - old: _AnyComplexSeries1D, - new: _AnyComplexSeries1D, -) -> _ComplexArray1D: ... + x: _SeriesLikeComplex_co, + old: _SeriesLikeComplex_co, + new: _SeriesLikeComplex_co, +) -> _ComplexSeries: ... @overload def mapdomain( - x: _AnySeries1D, - old:_AnySeries1D, - new: _AnySeries1D, -) -> _ObjectArray1D: ... + x: _SeriesLikeCoef_co, + old:_SeriesLikeCoef_co, + new: _SeriesLikeCoef_co, +) -> _ObjectSeries: ... @overload def mapdomain( x: object, - old: _AnySeries1D, - new: _AnySeries1D, + old: _SeriesLikeCoef_co, + new: _SeriesLikeCoef_co, ) -> object: ... def _nth_slice( @@ -279,26 +291,26 @@ _vander_nd_flat: _FuncVanderND[Literal["_vander_nd_flat"]] def _fromroots( # type: ignore[overload-overlap] line_f: _AnyLineF, mul_f: _AnyMulF, - roots: _AnyRealSeries1D, -) -> _FloatArray1D: ... + roots: _SeriesLikeFloat_co, +) -> _FloatSeries: ... @overload def _fromroots( line_f: _AnyLineF, mul_f: _AnyMulF, - roots: _AnyComplexSeries1D, -) -> _ComplexArray1D: ... + roots: _SeriesLikeComplex_co, +) -> _ComplexSeries: ... @overload def _fromroots( line_f: _AnyLineF, mul_f: _AnyMulF, - roots: _AnyObjectSeries1D, -) -> _ObjectArray1D: ... + roots: _SeriesLikeCoef_co, +) -> _ObjectSeries: ... @overload def _fromroots( line_f: _AnyLineF, mul_f: _AnyMulF, - roots: _AnySeries1D, -) -> _CoefArray1D: ... + roots: _SeriesLikeCoef_co, +) -> _CoefSeries: ... _valnd: _FuncValND[Literal["_valnd"]] _gridnd: _FuncValND[Literal["_gridnd"]] @@ -307,27 +319,27 @@ _gridnd: _FuncValND[Literal["_gridnd"]] @overload def _div( # type: ignore[overload-overlap] mul_f: _AnyMulF, - c1: _AnyRealSeries1D, - c2: _AnyRealSeries1D, -) -> _Tuple2[_FloatArray1D]: ... + c1: _SeriesLikeFloat_co, + c2: _SeriesLikeFloat_co, +) -> _Tuple2[_FloatSeries]: ... @overload def _div( mul_f: _AnyMulF, - c1: _AnyComplexSeries1D, - c2: _AnyComplexSeries1D, -) -> _Tuple2[_ComplexArray1D]: ... + c1: _SeriesLikeComplex_co, + c2: _SeriesLikeComplex_co, +) -> _Tuple2[_ComplexSeries]: ... @overload def _div( mul_f: _AnyMulF, - c1: _AnyObjectSeries1D, - c2: _AnyObjectSeries1D, -) -> _Tuple2[_ObjectArray1D]: ... + c1: _SeriesLikeCoef_co, + c2: _SeriesLikeCoef_co, +) -> _Tuple2[_ObjectSeries]: ... @overload def _div( mul_f: _AnyMulF, - c1: _AnySeries1D, - c2: _AnySeries1D, -) -> _Tuple2[_CoefArray1D]: ... + c1: _SeriesLikeCoef_co, + c2: _SeriesLikeCoef_co, +) -> _Tuple2[_CoefSeries]: ... _add: Final[_FuncBinOp] _sub: Final[_FuncBinOp] @@ -336,90 +348,90 @@ _sub: Final[_FuncBinOp] @overload def _pow( # type: ignore[overload-overlap] mul_f: _AnyMulF, - c: _AnyRealSeries1D, - pow: _AnyIntArg, - maxpower: None | _AnyIntArg = ..., -) -> _FloatArray1D: ... + c: _SeriesLikeFloat_co, + pow: _AnyInt, + maxpower: None | _AnyInt = ..., +) -> _FloatSeries: ... @overload def _pow( mul_f: _AnyMulF, - c: _AnyComplexSeries1D, - pow: _AnyIntArg, - maxpower: None | _AnyIntArg = ..., -) -> _ComplexArray1D: ... + c: _SeriesLikeComplex_co, + pow: _AnyInt, + maxpower: None | _AnyInt = ..., +) -> _ComplexSeries: ... @overload def _pow( mul_f: _AnyMulF, - c: _AnyObjectSeries1D, - pow: _AnyIntArg, - maxpower: None | _AnyIntArg = ..., -) -> _ObjectArray1D: ... + c: _SeriesLikeCoef_co, + pow: _AnyInt, + maxpower: None | _AnyInt = ..., +) -> _ObjectSeries: ... @overload def _pow( mul_f: _AnyMulF, - c: _AnySeries1D, - pow: _AnyIntArg, - maxpower: None | _AnyIntArg = ..., -) -> _CoefArray1D: ... + c: _SeriesLikeCoef_co, + pow: _AnyInt, + maxpower: None | _AnyInt = ..., +) -> _CoefSeries: ... # keep in sync with `_polytypes._FuncFit` @overload def _fit( # type: ignore[overload-overlap] vander_f: _AnyVanderF, - x: _AnyRealSeries1D, - y: _AnyRealSeriesND, - deg: _AnyIntSeries1D, - domain: None | _AnyRealSeries1D = ..., - rcond: None | _AnyRealScalar = ..., + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: _SeriesLikeInt_co, + domain: None | _SeriesLikeFloat_co = ..., + rcond: None | _FloatLike_co = ..., full: Literal[False] = ..., - w: None | _AnyRealSeries1D = ..., -) -> _FloatArrayND: ... + w: None | _SeriesLikeFloat_co = ..., +) -> _FloatArray: ... @overload def _fit( vander_f: _AnyVanderF, - x: _AnyComplexSeries1D, - y: _AnyComplexSeriesND, - deg: _AnyIntSeries1D, - domain: None | _AnyComplexSeries1D = ..., - rcond: None | _AnyRealScalar = ..., + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: _SeriesLikeInt_co, + domain: None | _SeriesLikeComplex_co = ..., + rcond: None | _FloatLike_co = ..., full: Literal[False] = ..., - w: None | _AnyComplexSeries1D = ..., -) -> _ComplexArrayND: ... + w: None | _SeriesLikeComplex_co = ..., +) -> _ComplexArray: ... @overload def _fit( vander_f: _AnyVanderF, - x: _AnySeries1D, - y: _AnySeriesND, - deg: _AnyIntSeries1D, - domain: None | _AnySeries1D = ..., - rcond: None | _AnyRealScalar = ..., + x: _SeriesLikeCoef_co, + y: _ArrayLikeCoef_co, + deg: _SeriesLikeInt_co, + domain: None | _SeriesLikeCoef_co = ..., + rcond: None | _FloatLike_co = ..., full: Literal[False] = ..., - w: None | _AnySeries1D = ..., -) -> _CoefArrayND: ... + w: None | _SeriesLikeCoef_co = ..., +) -> _CoefArray: ... @overload def _fit( vander_f: _AnyVanderF, - x: _AnySeries1D, - y: _AnySeries1D, - deg: _AnyIntSeries1D, - domain: None | _AnySeries1D, - rcond: None | _AnyRealScalar , + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: _SeriesLikeInt_co, + domain: None | _SeriesLikeCoef_co, + rcond: None | _FloatLike_co , full: Literal[True], /, - w: None | _AnySeries1D = ..., -) -> tuple[_CoefArray1D, Sequence[np.inexact[Any] | np.int32]]: ... + w: None | _SeriesLikeCoef_co = ..., +) -> tuple[_CoefSeries, Sequence[np.inexact[Any] | np.int32]]: ... @overload def _fit( vander_f: _AnyVanderF, - x: _AnySeries1D, - y: _AnySeries1D, - deg: _AnyIntSeries1D, - domain: None | _AnySeries1D = ..., - rcond: None | _AnyRealScalar = ..., + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: _SeriesLikeInt_co, + domain: None | _SeriesLikeCoef_co = ..., + rcond: None | _FloatLike_co = ..., *, full: Literal[True], - w: None | _AnySeries1D = ..., -) -> tuple[_CoefArray1D, Sequence[np.inexact[Any] | np.int32]]: ... + w: None | _SeriesLikeCoef_co = ..., +) -> tuple[_CoefSeries, Sequence[np.inexact[Any] | np.int32]]: ... def _as_int(x: SupportsIndex, desc: str) -> int: ... -def format_float(x: _AnyRealScalar, parens: bool = ...) -> str: ... +def format_float(x: _FloatLike_co, parens: bool = ...) -> str: ... diff --git a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi index eb3dbe0f05a3..ce3963880c5e 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -1,5 +1,6 @@ import sys from collections.abc import Sequence +from decimal import Decimal from typing import Any, Literal as L, TypeAlias, TypeVar import numpy as np @@ -51,7 +52,7 @@ AR_O_co: npt.NDArray[np.object_ | np.number[Any]] SQ_i: Sequence[int] SQ_f: Sequence[float] SQ_c: Sequence[complex] -SQ_O: Sequence[object] +SQ_O: Sequence[Decimal] PS_poly: npp.Polynomial PS_cheb: npp.Chebyshev diff --git a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi index 4a8377d81deb..c6a68f0b742c 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi @@ -1,5 +1,6 @@ import sys from collections.abc import Sequence +from decimal import Decimal from fractions import Fraction from typing import Any, Literal as L, TypeAlias @@ -25,8 +26,7 @@ num_int: int num_float: float num_complex: complex # will result in an `object_` dtype -num_fraction: Fraction -num_object: object +num_object: Decimal | Fraction sct_int: np.int_ sct_float: np.float64 @@ -41,8 +41,7 @@ arr_object: npt.NDArray[np.object_] seq_num_int: Sequence[int] seq_num_float: Sequence[float] seq_num_complex: Sequence[complex] -seq_num_fraction: Sequence[Fraction] -seq_num_object: Sequence[object] +seq_num_object: Sequence[Decimal | Fraction] seq_sct_int: Sequence[np.int_] seq_sct_float: Sequence[np.float64] @@ -57,8 +56,7 @@ seq_arr_object: Sequence[npt.NDArray[np.object_]] seq_seq_num_int: Sequence[Sequence[int]] seq_seq_num_float: Sequence[Sequence[float]] seq_seq_num_complex: Sequence[Sequence[complex]] -seq_seq_num_fraction: Sequence[Sequence[Fraction]] -seq_seq_num_object: Sequence[Sequence[object]] +seq_seq_num_object: Sequence[Sequence[Decimal | Fraction]] seq_seq_sct_int: Sequence[Sequence[np.int_]] seq_seq_sct_float: Sequence[Sequence[np.float64]] @@ -75,7 +73,6 @@ assert_type(pu.as_series(arr_object), list[_ArrObject1D]) assert_type(pu.as_series(seq_num_int), list[_ArrFloat1D]) assert_type(pu.as_series(seq_num_float), list[_ArrFloat1D]) assert_type(pu.as_series(seq_num_complex), list[_ArrComplex1D]) -assert_type(pu.as_series(seq_num_fraction), list[_ArrObject1D]) assert_type(pu.as_series(seq_num_object), list[_ArrObject1D]) assert_type(pu.as_series(seq_sct_int), list[_ArrFloat1D]) @@ -91,7 +88,6 @@ assert_type(pu.as_series(seq_arr_object), list[_ArrObject1D]) assert_type(pu.as_series(seq_seq_num_int), list[_ArrFloat1D]) assert_type(pu.as_series(seq_seq_num_float), list[_ArrFloat1D]) assert_type(pu.as_series(seq_seq_num_complex), list[_ArrComplex1D]) -assert_type(pu.as_series(seq_seq_num_fraction), list[_ArrObject1D]) assert_type(pu.as_series(seq_seq_num_object), list[_ArrObject1D]) assert_type(pu.as_series(seq_seq_sct_int), list[_ArrFloat1D]) @@ -104,7 +100,7 @@ assert_type(pu.as_series(seq_seq_sct_object), list[_ArrObject1D]) assert_type(pu.trimcoef(num_int), _ArrFloat1D) assert_type(pu.trimcoef(num_float), _ArrFloat1D) assert_type(pu.trimcoef(num_complex), _ArrComplex1D) -assert_type(pu.trimcoef(num_fraction), _ArrObject1D) +assert_type(pu.trimcoef(num_object), _ArrObject1D) assert_type(pu.trimcoef(num_object), _ArrObject1D) assert_type(pu.trimcoef(sct_int), _ArrFloat1D) @@ -120,7 +116,6 @@ assert_type(pu.trimcoef(arr_object), _ArrObject1D) assert_type(pu.trimcoef(seq_num_int), _ArrFloat1D) assert_type(pu.trimcoef(seq_num_float), _ArrFloat1D) assert_type(pu.trimcoef(seq_num_complex), _ArrComplex1D) -assert_type(pu.trimcoef(seq_num_fraction), _ArrObject1D) assert_type(pu.trimcoef(seq_num_object), _ArrObject1D) assert_type(pu.trimcoef(seq_sct_int), _ArrFloat1D) @@ -133,7 +128,7 @@ assert_type(pu.trimcoef(seq_sct_object), _ArrObject1D) assert_type(pu.getdomain(num_int), _ArrFloat1D_2) assert_type(pu.getdomain(num_float), _ArrFloat1D_2) assert_type(pu.getdomain(num_complex), _ArrComplex1D_2) -assert_type(pu.getdomain(num_fraction), _ArrObject1D_2) +assert_type(pu.getdomain(num_object), _ArrObject1D_2) assert_type(pu.getdomain(num_object), _ArrObject1D_2) assert_type(pu.getdomain(sct_int), _ArrFloat1D_2) @@ -149,7 +144,6 @@ assert_type(pu.getdomain(arr_object), _ArrObject1D_2) assert_type(pu.getdomain(seq_num_int), _ArrFloat1D_2) assert_type(pu.getdomain(seq_num_float), _ArrFloat1D_2) assert_type(pu.getdomain(seq_num_complex), _ArrComplex1D_2) -assert_type(pu.getdomain(seq_num_fraction), _ArrObject1D_2) assert_type(pu.getdomain(seq_num_object), _ArrObject1D_2) assert_type(pu.getdomain(seq_sct_int), _ArrFloat1D_2) @@ -164,9 +158,7 @@ assert_type(pu.mapparms(seq_num_int, seq_num_float), _Tuple2[float]) assert_type(pu.mapparms(seq_num_float, seq_num_float), _Tuple2[float]) assert_type(pu.mapparms(seq_num_float, seq_num_complex), _Tuple2[complex]) assert_type(pu.mapparms(seq_num_complex, seq_num_complex), _Tuple2[complex]) -assert_type(pu.mapparms(seq_num_complex, seq_num_fraction), _Tuple2[object]) -assert_type(pu.mapparms(seq_num_fraction, seq_num_fraction), _Tuple2[object]) -assert_type(pu.mapparms(seq_num_fraction, seq_num_fraction), _Tuple2[object]) +assert_type(pu.mapparms(seq_num_complex, seq_num_object), _Tuple2[object]) assert_type(pu.mapparms(seq_num_object, seq_num_object), _Tuple2[object]) assert_type(pu.mapparms(seq_sct_int, seq_sct_int), _Tuple2[np.floating[Any]]) @@ -194,9 +186,8 @@ assert_type(pu.mapdomain(num_float, seq_num_float, seq_num_float), np.floating[A assert_type(pu.mapdomain(num_float, seq_num_float, seq_num_complex), np.complexfloating[Any, Any]) assert_type(pu.mapdomain(num_float, seq_num_complex, seq_num_complex), np.complexfloating[Any, Any]) assert_type(pu.mapdomain(num_complex, seq_num_complex, seq_num_complex), np.complexfloating[Any, Any]) -assert_type(pu.mapdomain(num_complex, seq_num_complex, seq_num_fraction), object) -assert_type(pu.mapdomain(num_complex, seq_num_fraction, seq_num_fraction), object) -assert_type(pu.mapdomain(num_fraction, seq_num_fraction, seq_num_fraction), object) +assert_type(pu.mapdomain(num_complex, seq_num_complex, seq_num_object), object) +assert_type(pu.mapdomain(num_complex, seq_num_object, seq_num_object), object) assert_type(pu.mapdomain(num_object, seq_num_object, seq_num_object), object) assert_type(pu.mapdomain(seq_num_int, seq_num_int, seq_num_int), _ArrFloat1D) @@ -206,9 +197,8 @@ assert_type(pu.mapdomain(seq_num_float, seq_num_float, seq_num_float), _ArrFloat assert_type(pu.mapdomain(seq_num_float, seq_num_float, seq_num_complex), _ArrComplex1D) assert_type(pu.mapdomain(seq_num_float, seq_num_complex, seq_num_complex), _ArrComplex1D) assert_type(pu.mapdomain(seq_num_complex, seq_num_complex, seq_num_complex), _ArrComplex1D) -assert_type(pu.mapdomain(seq_num_complex, seq_num_complex, seq_num_fraction), _ArrObject1D) -assert_type(pu.mapdomain(seq_num_complex, seq_num_fraction, seq_num_fraction), _ArrObject1D) -assert_type(pu.mapdomain(seq_num_fraction, seq_num_fraction, seq_num_fraction), _ArrObject1D) +assert_type(pu.mapdomain(seq_num_complex, seq_num_complex, seq_num_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_num_complex, seq_num_object, seq_num_object), _ArrObject1D) assert_type(pu.mapdomain(seq_num_object, seq_num_object, seq_num_object), _ArrObject1D) assert_type(pu.mapdomain(seq_sct_int, seq_sct_int, seq_sct_int), _ArrFloat1D) From 2d2482d42efe2449670adb031b98625153fc97c1 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 16 Jul 2024 07:41:57 +0200 Subject: [PATCH 815/980] TYP: Simple true-negative type-tests for the low-level ``numpy.polynomial`` functions --- numpy/polynomial/_polytypes.pyi | 74 +++++++++---------- .../tests/data/fail/polynomial_polybase.pyi | 30 ++++++++ .../data/reveal/polynomial_polyutils.pyi | 1 - 3 files changed, 64 insertions(+), 41 deletions(-) create mode 100644 numpy/typing/tests/data/fail/polynomial_polybase.pyi diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index 30d6e7d906f8..6d2fb757c64f 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -46,17 +46,6 @@ _V = TypeVar("_V") _V_co = TypeVar("_V_co", covariant=True) _Self = TypeVar("_Self", bound=object) -class _SupportsLenAndGetItem(Protocol[_V_co]): - def __len__(self, /) -> int: ... - def __getitem__(self, i: int, /) -> _V_co: ... - -class _SimpleSequence(Protocol[_V_co]): - def __len__(self, /) -> int: ... - @overload - def __getitem__(self, i: int, /) -> _V_co: ... - @overload - def __getitem__(self: _Self, ii: slice, /) -> _Self: ... - _SCT = TypeVar("_SCT", bound=np.number[Any] | np.bool | np.object_) _SCT_co = TypeVar( "_SCT_co", @@ -98,23 +87,24 @@ _CoefLike_co: TypeAlias = _NumberLike_co | _NumberObjectLike_co # The term "series" is used here to refer to 1-d arrays of numeric scalars. _SeriesLikeInt_co: TypeAlias = ( _SupportsArray[np.integer[Any] | np.bool] - | _SupportsLenAndGetItem[_IntLike_co] + | Sequence[_IntLike_co] ) _SeriesLikeFloat_co: TypeAlias = ( _SupportsArray[np.floating[Any] | np.integer[Any] | np.bool] - | _SupportsLenAndGetItem[_FloatLike_co] + | Sequence[_FloatLike_co] ) _SeriesLikeComplex_co: TypeAlias = ( _SupportsArray[np.integer[Any] | np.inexact[Any] | np.bool] - | _SupportsLenAndGetItem[_ComplexLike_co] + | Sequence[_ComplexLike_co] ) _SeriesLikeNumberObject_co: TypeAlias = ( _SupportsArray[np.object_] - | _SupportsLenAndGetItem[_NumberObjectLike_co] + | Sequence[_NumberObjectLike_co] ) _SeriesLikeCoef_co: TypeAlias = ( + # npt.NDArray[np.number[Any] | np.bool | np.object_] _SupportsArray[np.number[Any] | np.bool | np.object_] - | _SupportsLenAndGetItem[_CoefLike_co] + | Sequence[_CoefLike_co] ) _ArrayLikeNumberObject_co: TypeAlias = ( @@ -123,7 +113,11 @@ _ArrayLikeNumberObject_co: TypeAlias = ( | _SeriesLikeNumberObject_co | _NestedSequence[_SeriesLikeNumberObject_co] ) -_ArrayLikeCoef_co: TypeAlias = _ArrayLikeNumber_co | _ArrayLikeNumberObject_co +_ArrayLikeCoef_co: TypeAlias = ( + npt.NDArray[np.number[Any] | np.bool | np.object_] + | _ArrayLikeNumber_co + | _ArrayLikeNumberObject_co +) _Name_co = TypeVar("_Name_co", bound=LiteralString, covariant=True) @@ -256,7 +250,7 @@ class _FuncInteg(_Named[_Name_co], Protocol[_Name_co]): self, /, c: _ArrayLikeFloat_co, m: SupportsIndex = ..., - k: _NumberLike_co | _SupportsLenAndGetItem[_NumberLike_co] = ..., + k: _NumberLike_co | Sequence[_NumberLike_co] = ..., lbnd: _NumberLike_co = ..., scl: _NumberLike_co = ..., axis: SupportsIndex = ..., @@ -266,7 +260,7 @@ class _FuncInteg(_Named[_Name_co], Protocol[_Name_co]): self, /, c: _ArrayLikeComplex_co, m: SupportsIndex = ..., - k: _NumberLike_co | _SupportsLenAndGetItem[_NumberLike_co] = ..., + k: _NumberLike_co | Sequence[_NumberLike_co] = ..., lbnd: _NumberLike_co = ..., scl: _NumberLike_co = ..., axis: SupportsIndex = ..., @@ -276,7 +270,7 @@ class _FuncInteg(_Named[_Name_co], Protocol[_Name_co]): self, /, c: _ArrayLikeNumberObject_co, m: SupportsIndex = ..., - k: _NumberLike_co | _SupportsLenAndGetItem[_CoefLike_co] = ..., + k: _NumberLike_co | Sequence[_CoefLike_co] = ..., lbnd: _NumberLike_co = ..., scl: _NumberLike_co = ..., axis: SupportsIndex = ..., @@ -583,7 +577,7 @@ class _FuncVander(_Named[_Name_co], Protocol[_Name_co]): deg: SupportsIndex, ) -> _CoefArray: ... -_AnyDegrees: TypeAlias = _SupportsLenAndGetItem[SupportsIndex] +_AnyDegrees: TypeAlias = Sequence[SupportsIndex] @final class _FuncVander2D(_Named[_Name_co], Protocol[_Name_co]): @@ -662,32 +656,32 @@ class _FuncVanderND(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( # type: ignore[overload-overlap] self, /, - vander_fs: _SupportsLenAndGetItem[_AnyFuncVander], - points: _SupportsLenAndGetItem[_ArrayLikeFloat_co], - degrees: _SupportsLenAndGetItem[SupportsIndex], + vander_fs: Sequence[_AnyFuncVander], + points: Sequence[_ArrayLikeFloat_co], + degrees: Sequence[SupportsIndex], ) -> _FloatArray: ... @overload def __call__( self, /, - vander_fs: _SupportsLenAndGetItem[_AnyFuncVander], - points: _SupportsLenAndGetItem[_ArrayLikeComplex_co], - degrees: _SupportsLenAndGetItem[SupportsIndex], + vander_fs: Sequence[_AnyFuncVander], + points: Sequence[_ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], ) -> _ComplexArray: ... @overload def __call__( self, /, - vander_fs: _SupportsLenAndGetItem[_AnyFuncVander], - points: _SupportsLenAndGetItem[ + vander_fs: Sequence[_AnyFuncVander], + points: Sequence[ _ArrayLikeObject_co | _ArrayLikeComplex_co, ], - degrees: _SupportsLenAndGetItem[SupportsIndex], + degrees: Sequence[SupportsIndex], ) -> _ObjectArray: ... @overload def __call__( self, /, - vander_fs: _SupportsLenAndGetItem[_AnyFuncVander], - points: _SupportsLenAndGetItem[npt.ArrayLike], - degrees: _SupportsLenAndGetItem[SupportsIndex], + vander_fs: Sequence[_AnyFuncVander], + points: Sequence[npt.ArrayLike], + degrees: Sequence[SupportsIndex], ) -> _CoefArray: ... @final @@ -710,39 +704,39 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): deg: int | _SeriesLikeInt_co, rcond: None | float = ..., full: Literal[False] = ..., - w: None | _ArrayLikeComplex_co = ..., + w: None | _SeriesLikeFloat_co = ..., ) -> _ComplexArray: ... @overload def __call__( self, /, - x: _SeriesLikeCoef_co, + x: _SeriesLikeComplex_co, y: _ArrayLikeCoef_co, deg: int | _SeriesLikeInt_co, rcond: None | float = ..., full: Literal[False] = ..., - w: None | _SeriesLikeCoef_co = ..., + w: None | _SeriesLikeFloat_co = ..., ) -> _ObjectArray: ... @overload def __call__( self, - x: _SeriesLikeCoef_co, + x: _SeriesLikeComplex_co, y: _ArrayLikeCoef_co, deg: int | _SeriesLikeInt_co, rcond: None | float, full: Literal[True], /, - w: None | _SeriesLikeCoef_co = ..., + w: None | _SeriesLikeFloat_co = ..., ) -> tuple[_CoefArray, Sequence[np.inexact[Any] | np.int32]]: ... @overload def __call__( self, /, - x: _SeriesLikeCoef_co, + x: _SeriesLikeComplex_co, y: _ArrayLikeCoef_co, deg: int | _SeriesLikeInt_co, rcond: None | float = ..., *, full: Literal[True], - w: None | _SeriesLikeCoef_co = ..., + w: None | _SeriesLikeFloat_co = ..., ) -> tuple[_CoefArray, Sequence[np.inexact[Any] | np.int32]]: ... @final diff --git a/numpy/typing/tests/data/fail/polynomial_polybase.pyi b/numpy/typing/tests/data/fail/polynomial_polybase.pyi new file mode 100644 index 000000000000..b698066010ff --- /dev/null +++ b/numpy/typing/tests/data/fail/polynomial_polybase.pyi @@ -0,0 +1,30 @@ +import numpy as np +import numpy.polynomial as npp +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_O: npt.NDArray[np.object_] +AR_U: npt.NDArray[np.str_] + +poly_obj: npp.polynomial.Polynomial + +npp.polynomial.polymul(AR_f8, AR_U) # E: incompatible type +npp.polynomial.polydiv(AR_f8, AR_U) # E: incompatible type + +5**poly_obj # E: No overload variant + +npp.polynomial.polyint(AR_U) # E: incompatible type +npp.polynomial.polyint(AR_f8, m=1j) # E: No overload variant + +npp.polynomial.polyder(AR_U) # E: incompatible type +npp.polynomial.polyder(AR_f8, m=1j) # E: No overload variant + +npp.polynomial.polyfit(AR_O, AR_f8, 1) # E: incompatible type +npp.polynomial.polyfit(AR_f8, AR_f8, 1, rcond=1j) # E: No overload variant +npp.polynomial.polyfit(AR_f8, AR_f8, 1, w=AR_c16) # E: incompatible type +npp.polynomial.polyfit(AR_f8, AR_f8, 1, cov="bob") # E: No overload variant + +npp.polynomial.polyval(AR_f8, AR_U) # E: incompatible type +npp.polynomial.polyadd(AR_f8, AR_U) # E: incompatible type +npp.polynomial.polysub(AR_f8, AR_U) # E: incompatible type diff --git a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi index c6a68f0b742c..eecdb14e1c3c 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi @@ -211,7 +211,6 @@ assert_type(pu.mapdomain(seq_sct_complex, seq_sct_complex, seq_sct_complex), _Ar assert_type(pu.mapdomain(seq_sct_complex, seq_sct_complex, seq_sct_object), _ArrObject1D) assert_type(pu.mapdomain(seq_sct_complex, seq_sct_object, seq_sct_object), _ArrObject1D) assert_type(pu.mapdomain(seq_sct_object, seq_sct_object, seq_sct_object), _ArrObject1D) -assert_type(pu.mapdomain(seq_sct_object, seq_sct_object, seq_sct_object), _ArrObject1D) assert_type(pu.mapdomain(arr_int, arr_int, arr_int), _ArrFloat1D) assert_type(pu.mapdomain(arr_int, arr_int, arr_float), _ArrFloat1D) From 6de9cb2490f4bbbbc6eacb1354fa78b706a9a156 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 16 Jul 2024 20:29:53 +0200 Subject: [PATCH 816/980] TYP: Add type-tests for ``numpy.polynomial`` convenience class ops --- numpy/polynomial/_polybase.pyi | 42 ++++-- ...ial_polybase.pyi => polynomial_series.pyi} | 0 .../tests/data/reveal/polynomial_polybase.pyi | 133 +++++++++++++----- 3 files changed, 124 insertions(+), 51 deletions(-) rename numpy/typing/tests/data/fail/{polynomial_polybase.pyi => polynomial_series.pyi} (100%) diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index b22025427e83..ed3e45406092 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -1,4 +1,6 @@ import abc +import decimal +import numbers import sys from collections.abc import Iterator, Mapping, Sequence from typing import ( @@ -23,7 +25,6 @@ from numpy._typing import ( _ArrayLikeFloat_co, _ArrayLikeComplex_co, - _ArrayLikeObject_co, ) from ._polytypes import ( @@ -39,6 +40,7 @@ from ._polytypes import ( _SeriesLikeInt_co, _SeriesLikeCoef_co, + _ArrayLikeNumberObject_co, _ArrayLikeCoef_co, ) @@ -55,6 +57,7 @@ __all__: Final[Sequence[str]] = ("ABCPolyBase",) _NameCo = TypeVar("_NameCo", bound=None | LiteralString, covariant=True) _Self = TypeVar("_Self", bound="ABCPolyBase") +_Other = TypeVar("_Other", bound="ABCPolyBase") _AnyOther: TypeAlias = ABCPolyBase | _CoefLike_co | _SeriesLikeCoef_co _Hundred: TypeAlias = Literal[100] @@ -87,31 +90,39 @@ class ABCPolyBase(Generic[_NameCo], metaclass=abc.ABCMeta): symbol: str = ..., ) -> None: ... + @overload + def __call__(self, /, arg: _Other) -> _Other: ... + # TODO: Once `_ShapeType@ndarray` is covariant and bounded (see #26081), + # additionally include 0-d arrays as input types with scalar return type. @overload def __call__( self, /, - arg: _FloatLike_co, + arg: _FloatLike_co | decimal.Decimal | numbers.Real | np.object_, ) -> np.float64 | np.complex128: ... @overload - def __call__(self, /, arg: _NumberLike_co) -> np.complex128: ... - @overload def __call__( self, /, - arg: _ArrayLikeFloat_co, - ) -> npt.NDArray[np.float64 | np.complex128 | np.object_]: ... + arg: _NumberLike_co | numbers.Complex, + ) -> np.complex128: ... + @overload + def __call__(self, /, arg: _ArrayLikeFloat_co) -> ( + npt.NDArray[np.float64] + | npt.NDArray[np.complex128] + | npt.NDArray[np.object_] + ): ... @overload def __call__( self, /, arg: _ArrayLikeComplex_co, - ) -> npt.NDArray[np.complex128 | np.object_]: ... + ) -> npt.NDArray[np.complex128] | npt.NDArray[np.object_]: ... @overload def __call__( self, /, - arg: _ArrayLikeObject_co, + arg: _ArrayLikeNumberObject_co, ) -> npt.NDArray[np.object_]: ... def __str__(self, /) -> str: ... @@ -160,20 +171,23 @@ class ABCPolyBase(Generic[_NameCo], metaclass=abc.ABCMeta): def convert( self, domain: None | _SeriesLikeCoef_co, - kind: type[_Self], /, + kind: type[_Other], + /, window: None | _SeriesLikeCoef_co = ..., - ) -> _Self: ... + ) -> _Other: ... @overload def convert( - self, /, + self, + /, domain: None | _SeriesLikeCoef_co = ..., *, - kind: type[_Self], + kind: type[_Other], window: None | _SeriesLikeCoef_co = ..., - ) -> _Self: ... + ) -> _Other: ... @overload def convert( - self: _Self, /, + self: _Self, + /, domain: None | _SeriesLikeCoef_co = ..., kind: type[_Self] = ..., window: None | _SeriesLikeCoef_co = ..., diff --git a/numpy/typing/tests/data/fail/polynomial_polybase.pyi b/numpy/typing/tests/data/fail/polynomial_series.pyi similarity index 100% rename from numpy/typing/tests/data/fail/polynomial_polybase.pyi rename to numpy/typing/tests/data/fail/polynomial_series.pyi diff --git a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi index ce3963880c5e..ebb5cb5bee2c 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -1,3 +1,4 @@ +from fractions import Fraction import sys from collections.abc import Sequence from decimal import Decimal @@ -32,20 +33,19 @@ _Ar_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[_SCT]] _BasisName: TypeAlias = L["X"] -SC_i: np.integer[Any] -SC_i_co: int | np.integer[Any] -SC_f: np.floating[Any] -SC_f_co: float | np.floating[Any] | np.integer[Any] -SC_c: np.complexfloating[Any, Any] -SC_c_co: complex | np.number[Any] -SC_O: np.object_ -SC_O_co: np.object_ | np.number[Any] | object - -AR_i: npt.NDArray[np.integer[Any]] -AR_f: npt.NDArray[np.floating[Any]] -AR_f_co: npt.NDArray[np.floating[Any] | np.integer[Any]] -AR_c: npt.NDArray[np.complexfloating[Any, Any]] -AR_c_co: npt.NDArray[np.number[Any]] +SC_i: np.int_ +SC_i_co: int | np.int_ +SC_f: np.float64 +SC_f_co: float | np.float64 | np.int_ +SC_c: np.complex128 +SC_c_co: complex | np.complex128 +SC_O: Decimal + +AR_i: npt.NDArray[np.int_] +AR_f: npt.NDArray[np.float64] +AR_f_co: npt.NDArray[np.float64] | npt.NDArray[np.int_] +AR_c: npt.NDArray[np.complex128] +AR_c_co: npt.NDArray[np.complex128] |npt.NDArray[np.float64] | npt.NDArray[np.int_] AR_O: npt.NDArray[np.object_] AR_O_co: npt.NDArray[np.object_ | np.number[Any]] @@ -69,6 +69,8 @@ PS_all: ( | npp.Legendre ) +# static- and classmethods + assert_type(type(PS_poly).basis_name, None) assert_type(type(PS_cheb).basis_name, L['T']) assert_type(type(PS_herm).basis_name, L['H']) @@ -80,11 +82,37 @@ assert_type(type(PS_all).__hash__, None) assert_type(type(PS_all).__array_ufunc__, None) assert_type(type(PS_all).maxpower, L[100]) +assert_type(type(PS_poly).fromroots(SC_i), npp.Polynomial) +assert_type(type(PS_poly).fromroots(SQ_i), npp.Polynomial) +assert_type(type(PS_poly).fromroots(AR_i), npp.Polynomial) +assert_type(type(PS_cheb).fromroots(SC_f), npp.Chebyshev) +assert_type(type(PS_cheb).fromroots(SQ_f), npp.Chebyshev) +assert_type(type(PS_cheb).fromroots(AR_f_co), npp.Chebyshev) +assert_type(type(PS_herm).fromroots(SC_c), npp.Hermite) +assert_type(type(PS_herm).fromroots(SQ_c), npp.Hermite) +assert_type(type(PS_herm).fromroots(AR_c_co), npp.Hermite) +assert_type(type(PS_leg).fromroots(SC_O), npp.Legendre) +assert_type(type(PS_leg).fromroots(SQ_O), npp.Legendre) +assert_type(type(PS_leg).fromroots(AR_O_co), npp.Legendre) + +assert_type(type(PS_poly).identity(), npp.Polynomial) +assert_type(type(PS_cheb).identity(symbol='z'), npp.Chebyshev) + +assert_type(type(PS_lag).basis(SC_i), npp.Laguerre) +assert_type(type(PS_leg).basis(32, symbol='u'), npp.Legendre) + +assert_type(type(PS_herm).cast(PS_poly), npp.Hermite) +assert_type(type(PS_herme).cast(PS_leg), npp.HermiteE) + +# attributes / properties + assert_type(PS_all.coef, _Ar_x_n) assert_type(PS_all.domain, _Ar_x_2) assert_type(PS_all.window, _Ar_x_2) assert_type(PS_all.symbol, LiteralString) +# instance methods + assert_type(PS_all.has_samecoef(PS_all), bool) assert_type(PS_all.has_samedomain(PS_all), bool) assert_type(PS_all.has_samewindow(PS_all), bool) @@ -140,26 +168,57 @@ assert_type( tuple[npp.HermiteE, Sequence[np.inexact[Any] | np.int32]], ) -assert_type(type(PS_poly).fromroots(SC_i), npp.Polynomial) -assert_type(type(PS_poly).fromroots(SQ_i), npp.Polynomial) -assert_type(type(PS_poly).fromroots(AR_i), npp.Polynomial) -assert_type(type(PS_cheb).fromroots(SC_f), npp.Chebyshev) -assert_type(type(PS_cheb).fromroots(SQ_f), npp.Chebyshev) -assert_type(type(PS_cheb).fromroots(AR_f_co), npp.Chebyshev) -assert_type(type(PS_herm).fromroots(SC_c), npp.Hermite) -assert_type(type(PS_herm).fromroots(SQ_c), npp.Hermite) -assert_type(type(PS_herm).fromroots(AR_c_co), npp.Hermite) -assert_type(type(PS_leg).fromroots(SC_O), npp.Legendre) -assert_type(type(PS_leg).fromroots(SQ_O), npp.Legendre) -assert_type(type(PS_leg).fromroots(AR_O_co), npp.Legendre) - -assert_type(type(PS_poly).identity(), npp.Polynomial) -assert_type(type(PS_cheb).identity(symbol='z'), npp.Chebyshev) - -assert_type(type(PS_lag).basis(SC_i), npp.Laguerre) -assert_type(type(PS_leg).basis(32, symbol='u'), npp.Legendre) - -assert_type(type(PS_herm).cast(PS_poly), npp.Hermite) -assert_type(type(PS_herme).cast(PS_leg), npp.HermiteE) - -# TODO: ABCPolyBase operators +# custom operations + +assert_type(PS_all.__hash__, None) +assert_type(PS_all.__array_ufunc__, None) + +assert_type(str(PS_all), str) +assert_type(repr(PS_all), str) +assert_type(format(PS_all), str) + +assert_type(len(PS_all), int) +assert_type(next(iter(PS_all)), np.inexact[Any] | object) + +assert_type(PS_all(SC_f_co), np.float64 | np.complex128) +assert_type(PS_all(SC_c_co), np.complex128) +assert_type(PS_all(Decimal()), np.float64 | np.complex128) +assert_type(PS_all(Fraction()), np.float64 | np.complex128) +assert_type(PS_poly(SQ_f), npt.NDArray[np.float64] | npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(PS_poly(SQ_c), npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(PS_poly(SQ_O), npt.NDArray[np.object_]) +assert_type(PS_poly(AR_f), npt.NDArray[np.float64] | npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(PS_poly(AR_c), npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(PS_poly(AR_O), npt.NDArray[np.object_]) +assert_type(PS_all(PS_poly), npp.Polynomial) + +assert_type(PS_poly == PS_poly, bool) +assert_type(PS_poly != PS_poly, bool) + +assert_type(-PS_poly, npp.Polynomial) +assert_type(+PS_poly, npp.Polynomial) + +assert_type(PS_poly + 5, npp.Polynomial) +assert_type(PS_poly - 5, npp.Polynomial) +assert_type(PS_poly * 5, npp.Polynomial) +assert_type(PS_poly / 5, npp.Polynomial) +assert_type(PS_poly // 5, npp.Polynomial) +assert_type(PS_poly % 5, npp.Polynomial) + +assert_type(PS_poly + PS_leg, npp.Polynomial) +assert_type(PS_poly - PS_leg, npp.Polynomial) +assert_type(PS_poly * PS_leg, npp.Polynomial) +assert_type(PS_poly / PS_leg, npp.Polynomial) +assert_type(PS_poly // PS_leg, npp.Polynomial) +assert_type(PS_poly % PS_leg, npp.Polynomial) + +assert_type(5 + PS_poly, npp.Polynomial) +assert_type(5 - PS_poly, npp.Polynomial) +assert_type(5 * PS_poly, npp.Polynomial) +assert_type(5 / PS_poly, npp.Polynomial) +assert_type(5 // PS_poly, npp.Polynomial) +assert_type(5 % PS_poly, npp.Polynomial) + +assert_type(divmod(PS_poly, 5), tuple[npp.Polynomial, npp.Polynomial]) +assert_type(PS_poly**1, npp.Polynomial) +assert_type(PS_poly**1.0, npp.Polynomial) From c59ef5b7576a312f06e8e00758bf13f245d8a159 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 16 Jul 2024 22:17:54 +0200 Subject: [PATCH 817/980] TYP: Fix several minor typing inconsistencies in ``numpy.polynomial`` --- numpy/polynomial/_polybase.pyi | 5 +- numpy/polynomial/_polytypes.pyi | 576 +++++++++++++++++++------------- numpy/polynomial/polyutils.pyi | 8 +- 3 files changed, 342 insertions(+), 247 deletions(-) diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index ed3e45406092..7519a755f528 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -40,7 +40,7 @@ from ._polytypes import ( _SeriesLikeInt_co, _SeriesLikeCoef_co, - _ArrayLikeNumberObject_co, + _ArrayLikeCoefObject_co, _ArrayLikeCoef_co, ) @@ -122,7 +122,7 @@ class ABCPolyBase(Generic[_NameCo], metaclass=abc.ABCMeta): def __call__( self, /, - arg: _ArrayLikeNumberObject_co, + arg: _ArrayLikeCoefObject_co, ) -> npt.NDArray[np.object_]: ... def __str__(self, /) -> str: ... @@ -143,7 +143,6 @@ class ABCPolyBase(Generic[_NameCo], metaclass=abc.ABCMeta): def __radd__(self: _Self, x: _AnyOther, /) -> _Self: ... def __rsub__(self: _Self, x: _AnyOther, /) -> _Self: ... def __rmul__(self: _Self, x: _AnyOther, /) -> _Self: ... - def __rdiv__(self: _Self, x: _AnyOther, /) -> _Self: ... def __rtruediv__(self: _Self, x: _AnyOther, /) -> _Self: ... def __rfloordiv__(self: _Self, x: _AnyOther, /) -> _Self: ... def __rmod__(self: _Self, x: _AnyOther, /) -> _Self: ... diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index 6d2fb757c64f..98b202065d2b 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -1,11 +1,10 @@ -import decimal -import numbers import sys from collections.abc import Callable, Sequence from typing import ( TYPE_CHECKING, Any, Literal, + NoReturn, Protocol, SupportsIndex, SupportsInt, @@ -39,7 +38,9 @@ elif TYPE_CHECKING: else: LiteralString: TypeAlias = str -_T = TypeVar("_T", bound=object) +_T = TypeVar("_T") +_T_contra = TypeVar("_T_contra", contravariant=True) + _Tuple2: TypeAlias = tuple[_T, _T] _V = TypeVar("_V") @@ -53,9 +54,31 @@ _SCT_co = TypeVar( covariant=True, ) +@final class _SupportsArray(Protocol[_SCT_co]): def __array__(self ,) -> npt.NDArray[_SCT_co]: ... +@final +class _SupportsCoefOps(Protocol[_T_contra]): + # compatible with e.g. `int`, `float`, `complex`, `Decimal`, `Fraction`, + # and `ABCPolyBase` + def __eq__(self, x: object, /) -> bool: ... + def __ne__(self, x: object, /) -> bool: ... + + def __neg__(self: _Self, /) -> _Self: ... + def __pos__(self: _Self, /) -> _Self: ... + + def __add__(self: _Self, x: _T_contra, /) -> _Self: ... + def __sub__(self: _Self, x: _T_contra, /) -> _Self: ... + def __mul__(self: _Self, x: _T_contra, /) -> _Self: ... + def __truediv__(self: _Self, x: _T_contra, /) -> _Self | float: ... + def __pow__(self: _Self, x: _T_contra, /) -> _Self | float: ... + + def __radd__(self: _Self, x: _T_contra, /) -> _Self: ... + def __rsub__(self: _Self, x: _T_contra, /) -> _Self: ... + def __rmul__(self: _Self, x: _T_contra, /) -> _Self: ... + def __rtruediv__(self: _Self, x: _T_contra, /) -> _Self | float: ... + _Series: TypeAlias = np.ndarray[tuple[int], np.dtype[_SCT]] _FloatSeries: TypeAlias = _Series[np.floating[Any]] @@ -72,19 +95,16 @@ _CoefArray: TypeAlias = npt.NDArray[np.inexact[Any] | np.object_] _Array1: TypeAlias = np.ndarray[tuple[Literal[1]], np.dtype[_SCT]] _Array2: TypeAlias = np.ndarray[tuple[Literal[2]], np.dtype[_SCT]] - _AnyInt: TypeAlias = SupportsInt | SupportsIndex -# NOTE: `decimal.Decimal` isn't compatible with `numbers.Real`, but e.g. -# `fractions.Fraction` is. -_NumberObjectLike_co: TypeAlias = ( - np.object_ - | numbers.Complex - | decimal.Decimal -) -_CoefLike_co: TypeAlias = _NumberLike_co | _NumberObjectLike_co +_CoefObjectLike_co: TypeAlias = np.object_ | _SupportsCoefOps +_CoefLike_co: TypeAlias = _NumberLike_co | _CoefObjectLike_co # The term "series" is used here to refer to 1-d arrays of numeric scalars. +_SeriesLikeBool_co: TypeAlias = ( + _SupportsArray[np.bool] + | Sequence[bool | np.bool] +) _SeriesLikeInt_co: TypeAlias = ( _SupportsArray[np.integer[Any] | np.bool] | Sequence[_IntLike_co] @@ -97,9 +117,9 @@ _SeriesLikeComplex_co: TypeAlias = ( _SupportsArray[np.integer[Any] | np.inexact[Any] | np.bool] | Sequence[_ComplexLike_co] ) -_SeriesLikeNumberObject_co: TypeAlias = ( +_SeriesLikeObject_co: TypeAlias = ( _SupportsArray[np.object_] - | Sequence[_NumberObjectLike_co] + | Sequence[_CoefObjectLike_co] ) _SeriesLikeCoef_co: TypeAlias = ( # npt.NDArray[np.number[Any] | np.bool | np.object_] @@ -107,16 +127,15 @@ _SeriesLikeCoef_co: TypeAlias = ( | Sequence[_CoefLike_co] ) -_ArrayLikeNumberObject_co: TypeAlias = ( - _NumberObjectLike_co - | _ArrayLikeComplex_co - | _SeriesLikeNumberObject_co - | _NestedSequence[_SeriesLikeNumberObject_co] +_ArrayLikeCoefObject_co: TypeAlias = ( + _CoefObjectLike_co + | _SeriesLikeObject_co + | _NestedSequence[_SeriesLikeObject_co] ) _ArrayLikeCoef_co: TypeAlias = ( npt.NDArray[np.number[Any] | np.bool | np.object_] | _ArrayLikeNumber_co - | _ArrayLikeNumberObject_co + | _ArrayLikeCoefObject_co ) _Name_co = TypeVar("_Name_co", bound=LiteralString, covariant=True) @@ -130,24 +149,30 @@ _Line: TypeAlias = np.ndarray[tuple[Literal[1, 2]], np.dtype[_SCT]] @final class _FuncLine(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__(self, /, off: _SCT, scl: _SCT) -> _Line[_SCT]: ... # type: ignore[overload-overlap] + def __call__(self, /, off: _SCT, scl: _SCT) -> _Line[_SCT]: ... @overload - def __call__(self, /, off: int, scl: int) -> _Line[np.int_] : ... # type: ignore[overload-overlap] + def __call__(self, /, off: int, scl: int) -> _Line[np.int_] : ... @overload def __call__(self, /, off: float, scl: float) -> _Line[np.float64]: ... @overload def __call__( - self, /, + self, + /, off: complex, scl: complex, ) -> _Line[np.complex128]: ... @overload - def __call__(self, /, off: object, scl: object) -> _Line[np.object_]: ... + def __call__( + self, + /, + off: _SupportsCoefOps, + scl: _SupportsCoefOps, + ) -> _Line[np.object_]: ... @final class _FuncFromRoots(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__(self, /, roots: _SeriesLikeFloat_co) -> _FloatSeries: ... # type: ignore[overload-overlap] + def __call__(self, /, roots: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload def __call__(self, /, roots: _SeriesLikeComplex_co) -> _ComplexSeries: ... @overload @@ -156,20 +181,30 @@ class _FuncFromRoots(_Named[_Name_co], Protocol[_Name_co]): @final class _FuncBinOp(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__( # type: ignore[overload-overlap] - self, /, + def __call__( + self, + /, + c1: _SeriesLikeBool_co, + c2: _SeriesLikeBool_co, + ) -> NoReturn: ... + @overload + def __call__( + self, + /, c1: _SeriesLikeFloat_co, c2: _SeriesLikeFloat_co, ) -> _FloatSeries: ... @overload def __call__( - self, /, + self, + /, c1: _SeriesLikeComplex_co, c2: _SeriesLikeComplex_co, ) -> _ComplexSeries: ... @overload def __call__( - self, /, + self, + /, c1: _SeriesLikeCoef_co, c2: _SeriesLikeCoef_co, ) -> _ObjectSeries: ... @@ -177,7 +212,7 @@ class _FuncBinOp(_Named[_Name_co], Protocol[_Name_co]): @final class _FuncUnOp(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__(self, /, c: _SeriesLikeFloat_co) -> _FloatSeries: ... # type: ignore[overload-overlap] + def __call__(self, /, c: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload def __call__(self, /, c: _SeriesLikeComplex_co) -> _ComplexSeries: ... @overload @@ -186,7 +221,7 @@ class _FuncUnOp(_Named[_Name_co], Protocol[_Name_co]): @final class _FuncPoly2Ortho(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__(self, /, pol: _SeriesLikeFloat_co) -> _FloatSeries: ... # type: ignore[overload-overlap] + def __call__(self, /, pol: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload def __call__(self, /, pol: _SeriesLikeComplex_co) -> _ComplexSeries: ... @overload @@ -195,304 +230,305 @@ class _FuncPoly2Ortho(_Named[_Name_co], Protocol[_Name_co]): @final class _FuncPow(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__( # type: ignore[overload-overlap] - self, /, + def __call__( + self, + /, c: _SeriesLikeFloat_co, - pow: _AnyInt, - maxpower: None | _AnyInt = ..., + pow: _IntLike_co, + maxpower: None | _IntLike_co = ..., ) -> _FloatSeries: ... @overload def __call__( - self, /, + self, + /, c: _SeriesLikeComplex_co, - pow: _AnyInt, - maxpower: None | _AnyInt = ..., + pow: _IntLike_co, + maxpower: None | _IntLike_co = ..., ) -> _ComplexSeries: ... @overload def __call__( - self, /, + self, + /, c: _SeriesLikeCoef_co, - pow: _AnyInt, - maxpower: None | _AnyInt = ..., + pow: _IntLike_co, + maxpower: None | _IntLike_co = ..., ) -> _ObjectSeries: ... @final class _FuncDer(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__( # type: ignore[overload-overlap] - self, /, + def __call__( + self, + /, c: _ArrayLikeFloat_co, m: SupportsIndex = ..., - scl: _NumberLike_co = ..., + scl: _FloatLike_co = ..., axis: SupportsIndex = ..., ) -> _FloatArray: ... @overload def __call__( - self, /, + self, + /, c: _ArrayLikeComplex_co, m: SupportsIndex = ..., - scl: _NumberLike_co = ..., + scl: _ComplexLike_co = ..., axis: SupportsIndex = ..., ) -> _ComplexArray: ... @overload def __call__( - self, /, - c: _ArrayLikeNumberObject_co, + self, + /, + c: _ArrayLikeCoef_co, m: SupportsIndex = ..., - scl: _NumberLike_co = ..., + scl: _CoefLike_co = ..., axis: SupportsIndex = ..., ) -> _ObjectArray: ... @final class _FuncInteg(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__( # type: ignore[overload-overlap] - self, /, + def __call__( + self, + /, c: _ArrayLikeFloat_co, m: SupportsIndex = ..., - k: _NumberLike_co | Sequence[_NumberLike_co] = ..., - lbnd: _NumberLike_co = ..., - scl: _NumberLike_co = ..., + k: _FloatLike_co | _SeriesLikeFloat_co = ..., + lbnd: _FloatLike_co = ..., + scl: _FloatLike_co = ..., axis: SupportsIndex = ..., ) -> _FloatArray: ... @overload def __call__( - self, /, + self, + /, c: _ArrayLikeComplex_co, m: SupportsIndex = ..., - k: _NumberLike_co | Sequence[_NumberLike_co] = ..., - lbnd: _NumberLike_co = ..., - scl: _NumberLike_co = ..., + k: _ComplexLike_co | _SeriesLikeComplex_co = ..., + lbnd: _ComplexLike_co = ..., + scl: _ComplexLike_co = ..., axis: SupportsIndex = ..., ) -> _ComplexArray: ... @overload def __call__( - self, /, - c: _ArrayLikeNumberObject_co, + self, + /, + c: _ArrayLikeCoef_co, m: SupportsIndex = ..., - k: _NumberLike_co | Sequence[_CoefLike_co] = ..., - lbnd: _NumberLike_co = ..., - scl: _NumberLike_co = ..., + k: _SeriesLikeCoef_co | _SeriesLikeCoef_co = ..., + lbnd: _CoefLike_co = ..., + scl: _CoefLike_co = ..., axis: SupportsIndex = ..., ) -> _ObjectArray: ... -_AnyRealRoots: TypeAlias = ( - _Series[np.floating[Any] | np.integer[Any]] - | Sequence[_FloatLike_co] -) -_AnyComplexRoots: TypeAlias = ( - _Series[np.number[Any]] - | Sequence[_NumberLike_co] -) -_AnyObjectRoots: TypeAlias = _ObjectSeries | Sequence[_NumberObjectLike_co] -_AnyRoots: TypeAlias = _CoefSeries | Sequence[_CoefLike_co] - -_AnyRealPoints: TypeAlias = ( - npt.NDArray[np.floating[Any] | np.integer[Any]] - | tuple[_ArrayLikeFloat_co, ...] - | list[_ArrayLikeFloat_co] -) -_AnyComplexPoints: TypeAlias = ( - npt.NDArray[np.number[Any]] - | tuple[_ArrayLikeComplex_co, ...] - | list[_ArrayLikeComplex_co] -) -_AnyObjectPoints: TypeAlias = ( - _ObjectArray - | tuple[_ArrayLikeNumberObject_co, ...] - | list[_ArrayLikeNumberObject_co] -) -_AnyPoints: TypeAlias = ( - npt.NDArray[np.number[Any] | np.object_] - | tuple[_ArrayLikeCoef_co, ...] - | list[_ArrayLikeCoef_co] -) - @final class _FuncValFromRoots(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__( # type: ignore[overload-overlap] - self, /, + def __call__( + self, + /, x: _FloatLike_co, r: _FloatLike_co, tensor: bool = ..., ) -> np.floating[Any]: ... @overload def __call__( - self, /, + self, + /, x: _NumberLike_co, r: _NumberLike_co, tensor: bool = ..., ) -> np.complexfloating[Any, Any]: ... @overload - def __call__( # type: ignore[overload-overlap] - self, /, - x: _CoefLike_co, - r: _CoefLike_co, - tensor: bool = ..., - ) -> object: ... - @overload def __call__( - self, /, - x: _FloatLike_co | _AnyRealPoints, + self, + /, + x: _FloatLike_co | _ArrayLikeFloat_co, r: _ArrayLikeFloat_co, tensor: bool = ..., ) -> _FloatArray: ... @overload def __call__( - self, /, - x: _NumberLike_co | _AnyComplexPoints, + self, + /, + x: _NumberLike_co | _ArrayLikeComplex_co, r: _ArrayLikeComplex_co, tensor: bool = ..., ) -> _ComplexArray: ... @overload def __call__( - self, /, - x: _CoefLike_co | _AnyPoints, + self, + /, + x: _CoefLike_co | _ArrayLikeCoef_co, r: _ArrayLikeCoef_co, tensor: bool = ..., ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: _CoefLike_co, + r: _CoefLike_co, + tensor: bool = ..., + ) -> _SupportsCoefOps: ... @final class _FuncVal(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__( # type: ignore[overload-overlap] - self, /, + def __call__( + self, + /, x: _FloatLike_co, - c: _AnyRealRoots, + c: _SeriesLikeFloat_co, tensor: bool = ..., ) -> np.floating[Any]: ... @overload def __call__( - self, /, + self, + /, x: _NumberLike_co, - c: _AnyComplexRoots, + c: _SeriesLikeComplex_co, tensor: bool = ..., ) -> np.complexfloating[Any, Any]: ... @overload def __call__( - self, /, - x: _CoefLike_co, - c: _AnyObjectRoots, - tensor: bool = ..., - ) -> object: ... - @overload - def __call__( # type: ignore[overload-overlap] - self, /, - x: _AnyRealPoints, + self, + /, + x: _ArrayLikeFloat_co, c: _ArrayLikeFloat_co, tensor: bool = ..., ) -> _FloatArray: ... @overload def __call__( - self, /, - x: _AnyComplexPoints, + self, + /, + x: _ArrayLikeComplex_co, c: _ArrayLikeComplex_co, tensor: bool = ..., ) -> _ComplexArray: ... @overload def __call__( - self, /, - x: _AnyPoints, + self, + /, + x: _ArrayLikeCoef_co, c: _ArrayLikeCoef_co, tensor: bool = ..., ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: _CoefLike_co, + c: _SeriesLikeObject_co, + tensor: bool = ..., + ) -> _SupportsCoefOps: ... @final class _FuncVal2D(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__( # type: ignore[overload-overlap] - self, /, + def __call__( + self, + /, x: _FloatLike_co, y: _FloatLike_co, - c: _AnyRealRoots, + c: _SeriesLikeFloat_co, ) -> np.floating[Any]: ... @overload def __call__( - self, /, + self, + /, x: _NumberLike_co, y: _NumberLike_co, - c: _AnyComplexRoots, + c: _SeriesLikeComplex_co, ) -> np.complexfloating[Any, Any]: ... @overload def __call__( - self, /, - x: _CoefLike_co, - y: _CoefLike_co, - c: _AnyRoots, - ) -> object: ... - @overload - def __call__( # type: ignore[overload-overlap] - self, /, - x: _AnyRealPoints, - y: _AnyRealPoints, + self, + /, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, c: _ArrayLikeFloat_co, ) -> _FloatArray: ... @overload def __call__( - self, /, - x: _AnyComplexPoints, - y: _AnyComplexPoints, + self, + /, + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, c: _ArrayLikeComplex_co, ) -> _ComplexArray: ... @overload def __call__( - self, /, - x: _AnyPoints, - y: _AnyPoints, + self, + /, + x: _ArrayLikeCoef_co, + y: _ArrayLikeCoef_co, c: _ArrayLikeCoef_co, ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: _CoefLike_co, + y: _CoefLike_co, + c: _SeriesLikeCoef_co, + ) -> _SupportsCoefOps: ... @final class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__( # type: ignore[overload-overlap] - self, /, + def __call__( + self, + /, x: _FloatLike_co, y: _FloatLike_co, z: _FloatLike_co, - c: _AnyRealRoots + c: _SeriesLikeFloat_co ) -> np.floating[Any]: ... @overload def __call__( - self, /, + self, + /, x: _NumberLike_co, y: _NumberLike_co, z: _NumberLike_co, - c: _AnyComplexRoots, + c: _SeriesLikeComplex_co, ) -> np.complexfloating[Any, Any]: ... @overload def __call__( - self, /, - x: _CoefLike_co, - y: _CoefLike_co, - z: _CoefLike_co, - c: _AnyRoots, - ) -> object: ... - @overload - def __call__( # type: ignore[overload-overlap] - self, /, - x: _AnyRealPoints, - y: _AnyRealPoints, - z: _AnyRealPoints, + self, + /, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + z: _ArrayLikeFloat_co, c: _ArrayLikeFloat_co, ) -> _FloatArray: ... @overload def __call__( - self, /, - x: _AnyComplexPoints, - y: _AnyComplexPoints, - z: _AnyComplexPoints, + self, + /, + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + z: _ArrayLikeComplex_co, c: _ArrayLikeComplex_co, ) -> _ComplexArray: ... @overload def __call__( - self, /, - x: _AnyPoints, - y: _AnyPoints, - z: _AnyPoints, + self, + /, + x: _ArrayLikeCoef_co, + y: _ArrayLikeCoef_co, + z: _ArrayLikeCoef_co, c: _ArrayLikeCoef_co, ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: _CoefLike_co, + y: _CoefLike_co, + z: _CoefLike_co, + c: _SeriesLikeCoef_co, + ) -> _SupportsCoefOps: ... _AnyValF: TypeAlias = Callable[ [npt.ArrayLike, npt.ArrayLike, bool], @@ -502,10 +538,10 @@ _AnyValF: TypeAlias = Callable[ @final class _FuncValND(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__( # type: ignore[overload-overlap] + def __call__( self, val_f: _AnyValF, - c: _AnyRealRoots, + c: _SeriesLikeFloat_co, /, *args: _FloatLike_co, ) -> np.floating[Any]: ... @@ -513,25 +549,17 @@ class _FuncValND(_Named[_Name_co], Protocol[_Name_co]): def __call__( self, val_f: _AnyValF, - c: _AnyComplexRoots, + c: _SeriesLikeComplex_co, /, *args: _NumberLike_co, ) -> np.complexfloating[Any, Any]: ... @overload def __call__( - self, - val_f: _AnyValF, - c: _AnyObjectRoots, - /, - *args: _NumberObjectLike_co, - ) -> object: ... - @overload - def __call__( # type: ignore[overload-overlap] self, val_f: _AnyValF, c: _ArrayLikeFloat_co, /, - *args: _AnyRealPoints, + *args: _ArrayLikeFloat_co, ) -> _FloatArray: ... @overload def __call__( @@ -539,7 +567,7 @@ class _FuncValND(_Named[_Name_co], Protocol[_Name_co]): val_f: _AnyValF, c: _ArrayLikeComplex_co, /, - *args: _AnyComplexPoints, + *args: _ArrayLikeComplex_co, ) -> _ComplexArray: ... @overload def __call__( @@ -547,32 +575,44 @@ class _FuncValND(_Named[_Name_co], Protocol[_Name_co]): val_f: _AnyValF, c: _ArrayLikeCoef_co, /, - *args: _AnyObjectPoints, + *args: _ArrayLikeCoef_co, ) -> _ObjectArray: ... + @overload + def __call__( + self, + val_f: _AnyValF, + c: _SeriesLikeObject_co, + /, + *args: _CoefObjectLike_co, + ) -> _SupportsCoefOps: ... @final class _FuncVander(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__( # type: ignore[overload-overlap] - self, /, + def __call__( + self, + /, x: _ArrayLikeFloat_co, deg: SupportsIndex, ) -> _FloatArray: ... @overload def __call__( - self, /, + self, + /, x: _ArrayLikeComplex_co, deg: SupportsIndex, ) -> _ComplexArray: ... @overload def __call__( - self, /, + self, + /, x: _ArrayLikeCoef_co, deg: SupportsIndex, ) -> _ObjectArray: ... @overload def __call__( - self, /, + self, + /, x: npt.ArrayLike, deg: SupportsIndex, ) -> _CoefArray: ... @@ -582,29 +622,33 @@ _AnyDegrees: TypeAlias = Sequence[SupportsIndex] @final class _FuncVander2D(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__( # type: ignore[overload-overlap] - self, /, + def __call__( + self, + /, x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: _AnyDegrees, ) -> _FloatArray: ... @overload def __call__( - self, /, + self, + /, x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: _AnyDegrees, ) -> _ComplexArray: ... @overload def __call__( - self, /, + self, + /, x: _ArrayLikeCoef_co, y: _ArrayLikeCoef_co, deg: _AnyDegrees, ) -> _ObjectArray: ... @overload def __call__( - self, /, + self, + /, x: npt.ArrayLike, y: npt.ArrayLike, deg: _AnyDegrees, @@ -613,8 +657,9 @@ class _FuncVander2D(_Named[_Name_co], Protocol[_Name_co]): @final class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__( # type: ignore[overload-overlap] - self, /, + def __call__( + self, + /, x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, z: _ArrayLikeFloat_co, @@ -622,7 +667,8 @@ class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): ) -> _FloatArray: ... @overload def __call__( - self, /, + self, + /, x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, z: _ArrayLikeComplex_co, @@ -630,7 +676,8 @@ class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): ) -> _ComplexArray: ... @overload def __call__( - self, /, + self, + /, x: _ArrayLikeCoef_co, y: _ArrayLikeCoef_co, z: _ArrayLikeCoef_co, @@ -638,7 +685,8 @@ class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): ) -> _ObjectArray: ... @overload def __call__( - self, /, + self, + /, x: npt.ArrayLike, y: npt.ArrayLike, z: npt.ArrayLike, @@ -654,22 +702,25 @@ _AnyFuncVander: TypeAlias = Callable[ @final class _FuncVanderND(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__( # type: ignore[overload-overlap] - self, /, + def __call__( + self, + /, vander_fs: Sequence[_AnyFuncVander], points: Sequence[_ArrayLikeFloat_co], degrees: Sequence[SupportsIndex], ) -> _FloatArray: ... @overload def __call__( - self, /, + self, + /, vander_fs: Sequence[_AnyFuncVander], points: Sequence[_ArrayLikeComplex_co], degrees: Sequence[SupportsIndex], ) -> _ComplexArray: ... @overload def __call__( - self, /, + self, + /, vander_fs: Sequence[_AnyFuncVander], points: Sequence[ _ArrayLikeObject_co | _ArrayLikeComplex_co, @@ -678,17 +729,21 @@ class _FuncVanderND(_Named[_Name_co], Protocol[_Name_co]): ) -> _ObjectArray: ... @overload def __call__( - self, /, + self, + /, vander_fs: Sequence[_AnyFuncVander], points: Sequence[npt.ArrayLike], degrees: Sequence[SupportsIndex], ) -> _CoefArray: ... +_FullFitResult: TypeAlias = Sequence[np.inexact[Any] | np.int32] + @final class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__( # type: ignore[overload-overlap] - self, /, + def __call__( + self, + /, x: _SeriesLikeFloat_co, y: _ArrayLikeFloat_co, deg: int | _SeriesLikeInt_co, @@ -698,7 +753,32 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): ) -> _FloatArray: ... @overload def __call__( - self, /, + self, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int | _SeriesLikeInt_co, + rcond: None | float, + full: Literal[True], + / + w: None | _SeriesLikeFloat_co = ..., + ) -> tuple[_FloatArray, _FullFitResult]: ... + @overload + def __call__( + self, + /, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int | _SeriesLikeInt_co, + rcond: None | float = ..., + *, + full: Literal[True], + w: None | _SeriesLikeFloat_co = ..., + ) -> tuple[_FloatArray, _FullFitResult]: ... + + @overload + def __call__( + self, + /, x: _SeriesLikeComplex_co, y: _ArrayLikeComplex_co, deg: int | _SeriesLikeInt_co, @@ -708,7 +788,32 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): ) -> _ComplexArray: ... @overload def __call__( - self, /, + self, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int | _SeriesLikeInt_co, + rcond: None | float, + full: Literal[True], + /, + w: None | _SeriesLikeFloat_co = ..., + ) -> tuple[_ComplexArray, _FullFitResult]: ... + @overload + def __call__( + self, + /, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int | _SeriesLikeInt_co, + rcond: None | float = ..., + *, + full: Literal[True], + w: None | _SeriesLikeFloat_co = ..., + ) -> tuple[_ComplexArray, _FullFitResult]: ... + + @overload + def __call__( + self, + /, x: _SeriesLikeComplex_co, y: _ArrayLikeCoef_co, deg: int | _SeriesLikeInt_co, @@ -726,10 +831,11 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): full: Literal[True], /, w: None | _SeriesLikeFloat_co = ..., - ) -> tuple[_CoefArray, Sequence[np.inexact[Any] | np.int32]]: ... + ) -> tuple[_ObjectArray, _FullFitResult]: ... @overload def __call__( - self, /, + self, + /, x: _SeriesLikeComplex_co, y: _ArrayLikeCoef_co, deg: int | _SeriesLikeInt_co, @@ -737,21 +843,20 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): *, full: Literal[True], w: None | _SeriesLikeFloat_co = ..., - ) -> tuple[_CoefArray, Sequence[np.inexact[Any] | np.int32]]: ... + ) -> tuple[_ObjectArray, _FullFitResult]: ... @final class _FuncRoots(_Named[_Name_co], Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - @overload - def __call__( # type: ignore[overload-overlap] - self, /, + def __call__( + self, + /, c: _SeriesLikeFloat_co, ) -> _Series[np.float64]: ... @overload def __call__( - self, /, + self, + /, c: _SeriesLikeComplex_co, ) -> _Series[np.complex128]: ... @overload @@ -763,13 +868,15 @@ _Companion: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_SCT]] @final class _FuncCompanion(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__( # type: ignore[overload-overlap] - self, /, + def __call__( + self, + /, c: _SeriesLikeFloat_co, ) -> _Companion[np.float64]: ... @overload def __call__( - self, /, + self, + /, c: _SeriesLikeComplex_co, ) -> _Companion[np.complex128]: ... @overload @@ -778,33 +885,28 @@ class _FuncCompanion(_Named[_Name_co], Protocol[_Name_co]): @final class _FuncGauss(_Named[_Name_co], Protocol[_Name_co]): def __call__( - self, /, + self, + /, deg: SupportsIndex, ) -> _Tuple2[_Series[np.float64]]: ... @final class _FuncWeight(_Named[_Name_co], Protocol[_Name_co]): @overload - def __call__( # type: ignore[overload-overlap] - self, /, + def __call__( + self, + /, c: _ArrayLikeFloat_co, ) -> npt.NDArray[np.float64]: ... @overload def __call__( - self, /, + self, + /, c: _ArrayLikeComplex_co, ) -> npt.NDArray[np.complex128]: ... @overload def __call__(self, /, c: _ArrayLikeCoef_co) -> _ObjectArray: ... -_N_pts = TypeVar("_N_pts", bound=int) - @final class _FuncPts(_Named[_Name_co], Protocol[_Name_co]): - @overload - def __call__( # type: ignore[overload-overlap] - self, /, - npts: _N_pts, - ) -> np.ndarray[tuple[_N_pts], np.dtype[np.float64]]: ... - @overload def __call__(self, /, npts: _AnyInt) -> _Series[np.float64]: ... diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index f000b9f067fe..9299b23975b1 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -230,12 +230,6 @@ def mapdomain( new: _SeriesLikeComplex_co, ) -> np.complexfloating[Any, Any]: ... @overload -def mapdomain( - x: _CoefLike_co, - old: _SeriesLikeCoef_co, - new: _SeriesLikeCoef_co, -) -> object: ... -@overload def mapdomain( # type: ignore[overload-overlap] x: npt.NDArray[np.floating[Any] | np.integer[Any]], old: npt.NDArray[np.floating[Any] | np.integer[Any]], @@ -273,7 +267,7 @@ def mapdomain( ) -> _ObjectSeries: ... @overload def mapdomain( - x: object, + x: _CoefLike_co, old: _SeriesLikeCoef_co, new: _SeriesLikeCoef_co, ) -> object: ... From 3c9fe759e1c7348c3eb077298659ef06f65175b1 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 16 Jul 2024 22:18:40 +0200 Subject: [PATCH 818/980] TYP: Add type tests for ``numpy.polynomial`` shared series methods --- .../tests/data/reveal/polynomial_polybase.pyi | 3 +- .../tests/data/reveal/polynomial_series.pyi | 121 ++++++++++++++++++ 2 files changed, 123 insertions(+), 1 deletion(-) create mode 100644 numpy/typing/tests/data/reveal/polynomial_series.pyi diff --git a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi index ebb5cb5bee2c..60e92709a2e6 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -218,7 +218,8 @@ assert_type(5 * PS_poly, npp.Polynomial) assert_type(5 / PS_poly, npp.Polynomial) assert_type(5 // PS_poly, npp.Polynomial) assert_type(5 % PS_poly, npp.Polynomial) - assert_type(divmod(PS_poly, 5), tuple[npp.Polynomial, npp.Polynomial]) +assert_type(divmod(5, PS_poly), tuple[npp.Polynomial, npp.Polynomial]) + assert_type(PS_poly**1, npp.Polynomial) assert_type(PS_poly**1.0, npp.Polynomial) diff --git a/numpy/typing/tests/data/reveal/polynomial_series.pyi b/numpy/typing/tests/data/reveal/polynomial_series.pyi new file mode 100644 index 000000000000..2961b43501d9 --- /dev/null +++ b/numpy/typing/tests/data/reveal/polynomial_series.pyi @@ -0,0 +1,121 @@ +from collections.abc import Sequence +import sys +from typing import Any, TypeAlias + +import numpy as np +import numpy.polynomial as npp +import numpy.typing as npt + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + +_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating[Any]]] +_ArrFloat1D64: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float64]] +_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating[Any, Any]]] +_ArrComplex1D128: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex128]] +_ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] + +AR_b: npt.NDArray[np.bool] +AR_u4: npt.NDArray[np.uint32] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_O: npt.NDArray[np.object_] + +PS_poly: npp.Polynomial + +assert_type(npp.polynomial.polyroots(AR_f8), _ArrFloat1D64) +assert_type(npp.polynomial.polyroots(AR_c16), _ArrComplex1D128) +assert_type(npp.polynomial.polyroots(AR_O), _ArrObject1D) + +assert_type(npp.polynomial.polyfromroots(AR_f8), _ArrFloat1D) +assert_type(npp.polynomial.polyfromroots(AR_c16), _ArrComplex1D) +assert_type(npp.polynomial.polyfromroots(AR_O), _ArrObject1D) + +# assert_type(npp.polynomial.polyadd(AR_b, AR_b), NoReturn) +assert_type(npp.polynomial.polyadd(AR_u4, AR_b), _ArrFloat1D) +assert_type(npp.polynomial.polyadd(AR_i8, AR_i8), _ArrFloat1D) +assert_type(npp.polynomial.polyadd(AR_f8, AR_i8), _ArrFloat1D) +assert_type(npp.polynomial.polyadd(AR_i8, AR_c16), _ArrComplex1D) +assert_type(npp.polynomial.polyadd(AR_O, AR_O), _ArrObject1D) + +assert_type(npp.polynomial.polymulx(AR_u4), _ArrFloat1D) +assert_type(npp.polynomial.polymulx(AR_i8), _ArrFloat1D) +assert_type(npp.polynomial.polymulx(AR_f8), _ArrFloat1D) +assert_type(npp.polynomial.polymulx(AR_c16), _ArrComplex1D) +assert_type(npp.polynomial.polymulx(AR_O), _ArrObject1D) + +assert_type(npp.polynomial.polypow(AR_u4, 2), _ArrFloat1D) +assert_type(npp.polynomial.polypow(AR_i8, 2), _ArrFloat1D) +assert_type(npp.polynomial.polypow(AR_f8, 2), _ArrFloat1D) +assert_type(npp.polynomial.polypow(AR_c16, 2), _ArrComplex1D) +assert_type(npp.polynomial.polypow(AR_O, 2), _ArrObject1D) + +# assert_type(npp.polynomial.polyder(PS_poly), npt.NDArray[np.object_]) +assert_type(npp.polynomial.polyder(AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyder(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyder(AR_O, m=2), npt.NDArray[np.object_]) + +# assert_type(npp.polynomial.polyint(PS_poly), npt.NDArray[np.object_]) +assert_type(npp.polynomial.polyint(AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyint(AR_f8, k=AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyint(AR_O, m=2), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyval(AR_b, AR_b), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval(AR_u4, AR_b), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval(AR_i8, AR_i8), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval(AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyval(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyval2d(AR_b, AR_b, AR_b), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval2d(AR_u4, AR_u4, AR_b), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval2d(AR_i8, AR_i8, AR_i8), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval2d(AR_f8, AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval2d(AR_i8, AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyval2d(AR_O, AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyval3d(AR_b, AR_b, AR_b, AR_b), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval3d(AR_u4, AR_u4, AR_u4, AR_b), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval3d(AR_i8, AR_i8, AR_i8, AR_i8), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval3d(AR_f8, AR_f8, AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval3d(AR_i8, AR_i8, AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyval3d(AR_O, AR_O, AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvalfromroots(AR_b, AR_b), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyvalfromroots(AR_u4, AR_b), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_i8), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyvalfromroots(AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyvalfromroots(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvander(AR_f8, 3), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyvander(AR_c16, 3), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyvander(AR_O, 3), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvander2d(AR_f8, AR_f8, [4, 2]), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyvander2d(AR_c16, AR_c16, [4, 2]), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyvander2d(AR_O, AR_O, [4, 2]), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvander3d(AR_f8, AR_f8, AR_f8, [4, 3, 2]), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyvander3d(AR_c16, AR_c16, AR_c16, [4, 3, 2]), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyvander3d(AR_O, AR_O, AR_O, [4, 3, 2]), npt.NDArray[np.object_]) + +assert_type( + npp.polynomial.polyfit(AR_f8, AR_f8, 2), + npt.NDArray[np.floating[Any]], +) +assert_type( + npp.polynomial.polyfit(AR_f8, AR_i8, 1, full=True), + tuple[npt.NDArray[np.floating[Any]], Sequence[np.inexact[Any] | np.int32]], +) +assert_type( + npp.polynomial.polyfit(AR_c16, AR_f8, 2), + npt.NDArray[np.complexfloating[Any, Any]], +) +assert_type( + npp.polynomial.polyfit(AR_f8, AR_c16, 1, full=True)[0], + npt.NDArray[np.complexfloating[Any, Any]], +) From 56a9226c339643f82bd0799b8b4375f7e3b225a9 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 16 Jul 2024 22:28:23 +0200 Subject: [PATCH 819/980] TYP: Fix the type-tests, and remove ``numpy.polynomial`` "fail" tests due to (many) mypy bugs --- numpy/polynomial/_polytypes.pyi | 2 +- .../tests/data/fail/polynomial_series.pyi | 30 ------------------- 2 files changed, 1 insertion(+), 31 deletions(-) delete mode 100644 numpy/typing/tests/data/fail/polynomial_series.pyi diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index 98b202065d2b..54771c0581e4 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -759,7 +759,7 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): deg: int | _SeriesLikeInt_co, rcond: None | float, full: Literal[True], - / + /, w: None | _SeriesLikeFloat_co = ..., ) -> tuple[_FloatArray, _FullFitResult]: ... @overload diff --git a/numpy/typing/tests/data/fail/polynomial_series.pyi b/numpy/typing/tests/data/fail/polynomial_series.pyi deleted file mode 100644 index b698066010ff..000000000000 --- a/numpy/typing/tests/data/fail/polynomial_series.pyi +++ /dev/null @@ -1,30 +0,0 @@ -import numpy as np -import numpy.polynomial as npp -import numpy.typing as npt - -AR_f8: npt.NDArray[np.float64] -AR_c16: npt.NDArray[np.complex128] -AR_O: npt.NDArray[np.object_] -AR_U: npt.NDArray[np.str_] - -poly_obj: npp.polynomial.Polynomial - -npp.polynomial.polymul(AR_f8, AR_U) # E: incompatible type -npp.polynomial.polydiv(AR_f8, AR_U) # E: incompatible type - -5**poly_obj # E: No overload variant - -npp.polynomial.polyint(AR_U) # E: incompatible type -npp.polynomial.polyint(AR_f8, m=1j) # E: No overload variant - -npp.polynomial.polyder(AR_U) # E: incompatible type -npp.polynomial.polyder(AR_f8, m=1j) # E: No overload variant - -npp.polynomial.polyfit(AR_O, AR_f8, 1) # E: incompatible type -npp.polynomial.polyfit(AR_f8, AR_f8, 1, rcond=1j) # E: No overload variant -npp.polynomial.polyfit(AR_f8, AR_f8, 1, w=AR_c16) # E: incompatible type -npp.polynomial.polyfit(AR_f8, AR_f8, 1, cov="bob") # E: No overload variant - -npp.polynomial.polyval(AR_f8, AR_U) # E: incompatible type -npp.polynomial.polyadd(AR_f8, AR_U) # E: incompatible type -npp.polynomial.polysub(AR_f8, AR_U) # E: incompatible type From ef3278aa941135178807b8d487374ee106813715 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 17 Jul 2024 01:24:09 +0200 Subject: [PATCH 820/980] TYP: Add type tests for the ``numpy.polynomial`` series-specific methods --- numpy/polynomial/chebyshev.pyi | 30 +++++++++---------- .../tests/data/reveal/polynomial_series.pyi | 23 ++++++++++++++ 2 files changed, 37 insertions(+), 16 deletions(-) diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index e1d6e5d1ffda..067af81d635d 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -86,11 +86,11 @@ def _zseries_to_cseries(zs: npt.NDArray[_SCT]) -> _Series[_SCT]: ... def _zseries_mul( z1: npt.NDArray[_SCT], z2: npt.NDArray[_SCT], -) -> _Series[_SCT]: ... +) -> _Series[_SCT]: ... def _zseries_div( z1: npt.NDArray[_SCT], z2: npt.NDArray[_SCT], -) -> _Series[_SCT]: ... +) -> _Series[_SCT]: ... def _zseries_der(zs: npt.NDArray[_SCT]) -> _Series[_SCT]: ... def _zseries_int(zs: npt.NDArray[_SCT]) -> _Series[_SCT]: ... @@ -130,27 +130,25 @@ chebpts1: _FuncPts[L["chebpts1"]] chebpts2: _FuncPts[L["chebpts2"]] # keep in sync with `Chebyshev.interpolate` -_RT = TypeVar( - "_RT", - np.float64, - np.complex128, - np.floating[Any], - np.complexfloating[Any, Any], - np.number[Any], - np.object_, -) +_RT = TypeVar("_RT", bound=np.number[Any] | np.bool | np.object_) +@overload +def chebinterpolate( + func: np.ufunc, + deg: _IntLike_co, + args: tuple[()] = ..., +) -> npt.NDArray[np.float64 | np.complex128 | np.object_]: ... @overload def chebinterpolate( func: Callable[[npt.NDArray[np.float64]], _RT], deg: _IntLike_co, args: tuple[()] = ..., -) -> npt.NDArray[_RT]: ... +) -> npt.NDArray[_RT]: ... @overload def chebinterpolate( func: Callable[Concatenate[npt.NDArray[np.float64], ...], _RT], deg: _IntLike_co, args: Iterable[Any], -) -> npt.NDArray[_RT]: ... +) -> npt.NDArray[_RT]: ... _Self = TypeVar("_Self", bound=object) @@ -164,7 +162,7 @@ class Chebyshev(ABCPolyBase[L["T"]]): deg: _IntLike_co, domain: None | _SeriesLikeCoef_co = ..., args: tuple[()] = ..., - ) -> _Self: ... + ) -> _Self: ... @overload @classmethod def interpolate( @@ -178,7 +176,7 @@ class Chebyshev(ABCPolyBase[L["T"]]): domain: None | _SeriesLikeCoef_co = ..., *, args: Iterable[Any], - ) -> _Self: ... + ) -> _Self: ... @overload @classmethod def interpolate( @@ -191,4 +189,4 @@ class Chebyshev(ABCPolyBase[L["T"]]): domain: None | _SeriesLikeCoef_co, args: Iterable[Any], /, - ) -> _Self: ... + ) -> _Self: ... diff --git a/numpy/typing/tests/data/reveal/polynomial_series.pyi b/numpy/typing/tests/data/reveal/polynomial_series.pyi index 2961b43501d9..a60d05afd01d 100644 --- a/numpy/typing/tests/data/reveal/polynomial_series.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_series.pyi @@ -25,6 +25,7 @@ AR_c16: npt.NDArray[np.complex128] AR_O: npt.NDArray[np.object_] PS_poly: npp.Polynomial +PS_cheb: npp.Chebyshev assert_type(npp.polynomial.polyroots(AR_f8), _ArrFloat1D64) assert_type(npp.polynomial.polyroots(AR_c16), _ArrComplex1D128) @@ -119,3 +120,25 @@ assert_type( npp.polynomial.polyfit(AR_f8, AR_c16, 1, full=True)[0], npt.NDArray[np.complexfloating[Any, Any]], ) + +assert_type(npp.chebyshev.chebgauss(2), tuple[_ArrFloat1D64, _ArrFloat1D64]) + +assert_type(npp.chebyshev.chebweight(AR_f8), npt.NDArray[np.float64]) +assert_type(npp.chebyshev.chebweight(AR_c16), npt.NDArray[np.complex128]) +assert_type(npp.chebyshev.chebweight(AR_O), npt.NDArray[np.object_]) + +assert_type(npp.chebyshev.poly2cheb(AR_f8), _ArrFloat1D) +assert_type(npp.chebyshev.poly2cheb(AR_c16), _ArrComplex1D) +assert_type(npp.chebyshev.poly2cheb(AR_O), _ArrObject1D) + +assert_type(npp.chebyshev.cheb2poly(AR_f8), _ArrFloat1D) +assert_type(npp.chebyshev.cheb2poly(AR_c16), _ArrComplex1D) +assert_type(npp.chebyshev.cheb2poly(AR_O), _ArrObject1D) + +assert_type(npp.chebyshev.chebpts1(6), _ArrFloat1D64) +assert_type(npp.chebyshev.chebpts2(6), _ArrFloat1D64) + +assert_type( + npp.chebyshev.chebinterpolate(np.tanh, 3), + npt.NDArray[np.float64 | np.complex128 | np.object_], +) From 30eb358d99e497b346f5b2aa4cfe1938f687c8d3 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 4 Jul 2024 11:03:26 +0200 Subject: [PATCH 821/980] TYP: include the `|` prefix for `dtype` char codes This fixes e.g. `numpy.dtype('|?')` from being falsely rejected by typecheckers, which is allowed at runtime. --- numpy/_typing/_char_codes.py | 218 ++++++++++++++++++++--------------- 1 file changed, 123 insertions(+), 95 deletions(-) diff --git a/numpy/_typing/_char_codes.py b/numpy/_typing/_char_codes.py index e5c4fa5d1bd2..1d36cc81e018 100644 --- a/numpy/_typing/_char_codes.py +++ b/numpy/_typing/_char_codes.py @@ -1,113 +1,141 @@ from typing import Literal -_BoolCodes = Literal["?", "=?", "?", "bool", "bool_"] +_BoolCodes = Literal["bool", "bool_", "?", "|?", "=?", "?"] -_UInt8Codes = Literal["uint8", "u1", "=u1", "u1"] -_UInt16Codes = Literal["uint16", "u2", "=u2", "u2"] -_UInt32Codes = Literal["uint32", "u4", "=u4", "u4"] -_UInt64Codes = Literal["uint64", "u8", "=u8", "u8"] +_UInt8Codes = Literal["uint8", "u1", "|u1", "=u1", "u1"] +_UInt16Codes = Literal["uint16", "u2", "|u2", "=u2", "u2"] +_UInt32Codes = Literal["uint32", "u4", "|u4", "=u4", "u4"] +_UInt64Codes = Literal["uint64", "u8", "|u8", "=u8", "u8"] -_Int8Codes = Literal["int8", "i1", "=i1", "i1"] -_Int16Codes = Literal["int16", "i2", "=i2", "i2"] -_Int32Codes = Literal["int32", "i4", "=i4", "i4"] -_Int64Codes = Literal["int64", "i8", "=i8", "i8"] +_Int8Codes = Literal["int8", "i1", "|i1", "=i1", "i1"] +_Int16Codes = Literal["int16", "i2", "|i2", "=i2", "i2"] +_Int32Codes = Literal["int32", "i4", "|i4", "=i4", "i4"] +_Int64Codes = Literal["int64", "i8", "|i8", "=i8", "i8"] -_Float16Codes = Literal["float16", "f2", "=f2", "f2"] -_Float32Codes = Literal["float32", "f4", "=f4", "f4"] -_Float64Codes = Literal["float64", "f8", "=f8", "f8"] +_Float16Codes = Literal["float16", "f2", "|f2", "=f2", "f2"] +_Float32Codes = Literal["float32", "f4", "|f4", "=f4", "f4"] +_Float64Codes = Literal["float64", "f8", "|f8", "=f8", "f8"] -_Complex64Codes = Literal["complex64", "c8", "=c8", "c8"] -_Complex128Codes = Literal["complex128", "c16", "=c16", "c16"] +_Complex64Codes = Literal["complex64", "c8", "|c8", "=c8", "c8"] +_Complex128Codes = Literal["complex128", "c16", "|c16", "=c16", "c16"] -_ByteCodes = Literal["byte", "b", "=b", "b"] -_ShortCodes = Literal["short", "h", "=h", "h"] -_IntCCodes = Literal["intc", "i", "=i", "i"] -_IntPCodes = Literal["intp", "int", "int_", "n", "=n", "n"] -_LongCodes = Literal["long", "l", "=l", "l"] +_ByteCodes = Literal["byte", "b", "|b", "=b", "b"] +_ShortCodes = Literal["short", "h", "|h", "=h", "h"] +_IntCCodes = Literal["intc", "i", "|i", "=i", "i"] +_IntPCodes = Literal["intp", "int", "int_", "n", "|n", "=n", "n"] +_LongCodes = Literal["long", "l", "|l", "=l", "l"] _IntCodes = _IntPCodes -_LongLongCodes = Literal["longlong", "q", "=q", "q"] +_LongLongCodes = Literal["longlong", "q", "|q", "=q", "q"] -_UByteCodes = Literal["ubyte", "B", "=B", "B"] -_UShortCodes = Literal["ushort", "H", "=H", "H"] -_UIntCCodes = Literal["uintc", "I", "=I", "I"] -_UIntPCodes = Literal["uintp", "uint", "N", "=N", "N"] -_ULongCodes = Literal["ulong", "L", "=L", "L"] +_UByteCodes = Literal["ubyte", "B", "|B", "=B", "B"] +_UShortCodes = Literal["ushort", "H", "|H", "=H", "H"] +_UIntCCodes = Literal["uintc", "I", "|I", "=I", "I"] +_UIntPCodes = Literal["uintp", "uint", "N", "|N", "=N", "N"] +_ULongCodes = Literal["ulong", "L", "|L", "=L", "L"] _UIntCodes = _UIntPCodes -_ULongLongCodes = Literal["ulonglong", "Q", "=Q", "Q"] +_ULongLongCodes = Literal["ulonglong", "Q", "|Q", "=Q", "Q"] -_HalfCodes = Literal["half", "e", "=e", "e"] -_SingleCodes = Literal["single", "f", "=f", "f"] -_DoubleCodes = Literal["double", "float", "d", "=d", "d"] -_LongDoubleCodes = Literal["longdouble", "g", "=g", "g"] +_HalfCodes = Literal["half", "e", "|e", "=e", "e"] +_SingleCodes = Literal["single", "f", "|f", "=f", "f"] +_DoubleCodes = Literal["double", "float", "d", "|d", "=d", "d"] +_LongDoubleCodes = Literal["longdouble", "g", "|g", "=g", "g"] -_CSingleCodes = Literal["csingle", "F", "=F", "F"] -_CDoubleCodes = Literal["cdouble", "complex", "D", "=D", "D"] -_CLongDoubleCodes = Literal["clongdouble", "G", "=G", "G"] +_CSingleCodes = Literal["csingle", "F", "|F", "=F", "F"] +_CDoubleCodes = Literal["cdouble", "complex", "D", "|D", "=D", "D"] +_CLongDoubleCodes = Literal["clongdouble", "G", "|G", "=G", "G"] -_StrCodes = Literal["str", "str_", "unicode", "U", "=U", "U"] -_BytesCodes = Literal["bytes", "bytes_", "S", "=S", "S"] -_VoidCodes = Literal["void", "V", "=V", "V"] -_ObjectCodes = Literal["object", "object_", "O", "=O", "O"] +_StrCodes = Literal["str", "str_", "unicode", "U", "|U", "=U", "U"] +_BytesCodes = Literal["bytes", "bytes_", "S", "|S", "=S", "S"] +_VoidCodes = Literal["void", "V", "|V", "=V", "V"] +_ObjectCodes = Literal["object", "object_", "O", "|O", "=O", "O"] _DT64Codes = Literal[ - "datetime64", "=datetime64", "datetime64", - "datetime64[Y]", "=datetime64[Y]", "datetime64[Y]", - "datetime64[M]", "=datetime64[M]", "datetime64[M]", - "datetime64[W]", "=datetime64[W]", "datetime64[W]", - "datetime64[D]", "=datetime64[D]", "datetime64[D]", - "datetime64[h]", "=datetime64[h]", "datetime64[h]", - "datetime64[m]", "=datetime64[m]", "datetime64[m]", - "datetime64[s]", "=datetime64[s]", "datetime64[s]", - "datetime64[ms]", "=datetime64[ms]", "datetime64[ms]", - "datetime64[us]", "=datetime64[us]", "datetime64[us]", - "datetime64[ns]", "=datetime64[ns]", "datetime64[ns]", - "datetime64[ps]", "=datetime64[ps]", "datetime64[ps]", - "datetime64[fs]", "=datetime64[fs]", "datetime64[fs]", - "datetime64[as]", "=datetime64[as]", "datetime64[as]", - "M", "=M", "M", - "M8", "=M8", "M8", - "M8[Y]", "=M8[Y]", "M8[Y]", - "M8[M]", "=M8[M]", "M8[M]", - "M8[W]", "=M8[W]", "M8[W]", - "M8[D]", "=M8[D]", "M8[D]", - "M8[h]", "=M8[h]", "M8[h]", - "M8[m]", "=M8[m]", "M8[m]", - "M8[s]", "=M8[s]", "M8[s]", - "M8[ms]", "=M8[ms]", "M8[ms]", - "M8[us]", "=M8[us]", "M8[us]", - "M8[ns]", "=M8[ns]", "M8[ns]", - "M8[ps]", "=M8[ps]", "M8[ps]", - "M8[fs]", "=M8[fs]", "M8[fs]", - "M8[as]", "=M8[as]", "M8[as]", + "datetime64", "|datetime64", "=datetime64", + "datetime64", + "datetime64[Y]", "|datetime64[Y]", "=datetime64[Y]", + "datetime64[Y]", + "datetime64[M]", "|datetime64[M]", "=datetime64[M]", + "datetime64[M]", + "datetime64[W]", "|datetime64[W]", "=datetime64[W]", + "datetime64[W]", + "datetime64[D]", "|datetime64[D]", "=datetime64[D]", + "datetime64[D]", + "datetime64[h]", "|datetime64[h]", "=datetime64[h]", + "datetime64[h]", + "datetime64[m]", "|datetime64[m]", "=datetime64[m]", + "datetime64[m]", + "datetime64[s]", "|datetime64[s]", "=datetime64[s]", + "datetime64[s]", + "datetime64[ms]", "|datetime64[ms]", "=datetime64[ms]", + "datetime64[ms]", + "datetime64[us]", "|datetime64[us]", "=datetime64[us]", + "datetime64[us]", + "datetime64[ns]", "|datetime64[ns]", "=datetime64[ns]", + "datetime64[ns]", + "datetime64[ps]", "|datetime64[ps]", "=datetime64[ps]", + "datetime64[ps]", + "datetime64[fs]", "|datetime64[fs]", "=datetime64[fs]", + "datetime64[fs]", + "datetime64[as]", "|datetime64[as]", "=datetime64[as]", + "datetime64[as]", + "M", "|M", "=M", "M", + "M8", "|M8", "=M8", "M8", + "M8[Y]", "|M8[Y]", "=M8[Y]", "M8[Y]", + "M8[M]", "|M8[M]", "=M8[M]", "M8[M]", + "M8[W]", "|M8[W]", "=M8[W]", "M8[W]", + "M8[D]", "|M8[D]", "=M8[D]", "M8[D]", + "M8[h]", "|M8[h]", "=M8[h]", "M8[h]", + "M8[m]", "|M8[m]", "=M8[m]", "M8[m]", + "M8[s]", "|M8[s]", "=M8[s]", "M8[s]", + "M8[ms]", "|M8[ms]", "=M8[ms]", "M8[ms]", + "M8[us]", "|M8[us]", "=M8[us]", "M8[us]", + "M8[ns]", "|M8[ns]", "=M8[ns]", "M8[ns]", + "M8[ps]", "|M8[ps]", "=M8[ps]", "M8[ps]", + "M8[fs]", "|M8[fs]", "=M8[fs]", "M8[fs]", + "M8[as]", "|M8[as]", "=M8[as]", "M8[as]", ] _TD64Codes = Literal[ - "timedelta64", "=timedelta64", "timedelta64", - "timedelta64[Y]", "=timedelta64[Y]", "timedelta64[Y]", - "timedelta64[M]", "=timedelta64[M]", "timedelta64[M]", - "timedelta64[W]", "=timedelta64[W]", "timedelta64[W]", - "timedelta64[D]", "=timedelta64[D]", "timedelta64[D]", - "timedelta64[h]", "=timedelta64[h]", "timedelta64[h]", - "timedelta64[m]", "=timedelta64[m]", "timedelta64[m]", - "timedelta64[s]", "=timedelta64[s]", "timedelta64[s]", - "timedelta64[ms]", "=timedelta64[ms]", "timedelta64[ms]", - "timedelta64[us]", "=timedelta64[us]", "timedelta64[us]", - "timedelta64[ns]", "=timedelta64[ns]", "timedelta64[ns]", - "timedelta64[ps]", "=timedelta64[ps]", "timedelta64[ps]", - "timedelta64[fs]", "=timedelta64[fs]", "timedelta64[fs]", - "timedelta64[as]", "=timedelta64[as]", "timedelta64[as]", - "m", "=m", "m", - "m8", "=m8", "m8", - "m8[Y]", "=m8[Y]", "m8[Y]", - "m8[M]", "=m8[M]", "m8[M]", - "m8[W]", "=m8[W]", "m8[W]", - "m8[D]", "=m8[D]", "m8[D]", - "m8[h]", "=m8[h]", "m8[h]", - "m8[m]", "=m8[m]", "m8[m]", - "m8[s]", "=m8[s]", "m8[s]", - "m8[ms]", "=m8[ms]", "m8[ms]", - "m8[us]", "=m8[us]", "m8[us]", - "m8[ns]", "=m8[ns]", "m8[ns]", - "m8[ps]", "=m8[ps]", "m8[ps]", - "m8[fs]", "=m8[fs]", "m8[fs]", - "m8[as]", "=m8[as]", "m8[as]", + "timedelta64", "|timedelta64", "=timedelta64", + "timedelta64", + "timedelta64[Y]", "|timedelta64[Y]", "=timedelta64[Y]", + "timedelta64[Y]", + "timedelta64[M]", "|timedelta64[M]", "=timedelta64[M]", + "timedelta64[M]", + "timedelta64[W]", "|timedelta64[W]", "=timedelta64[W]", + "timedelta64[W]", + "timedelta64[D]", "|timedelta64[D]", "=timedelta64[D]", + "timedelta64[D]", + "timedelta64[h]", "|timedelta64[h]", "=timedelta64[h]", + "timedelta64[h]", + "timedelta64[m]", "|timedelta64[m]", "=timedelta64[m]", + "timedelta64[m]", + "timedelta64[s]", "|timedelta64[s]", "=timedelta64[s]", + "timedelta64[s]", + "timedelta64[ms]", "|timedelta64[ms]", "=timedelta64[ms]", + "timedelta64[ms]", + "timedelta64[us]", "|timedelta64[us]", "=timedelta64[us]", + "timedelta64[us]", + "timedelta64[ns]", "|timedelta64[ns]", "=timedelta64[ns]", + "timedelta64[ns]", + "timedelta64[ps]", "|timedelta64[ps]", "=timedelta64[ps]", + "timedelta64[ps]", + "timedelta64[fs]", "|timedelta64[fs]", "=timedelta64[fs]", + "timedelta64[fs]", + "timedelta64[as]", "|timedelta64[as]", "=timedelta64[as]", + "timedelta64[as]", + "m", "|m", "=m", "m", + "m8", "|m8", "=m8", "m8", + "m8[Y]", "|m8[Y]", "=m8[Y]", "m8[Y]", + "m8[M]", "|m8[M]", "=m8[M]", "m8[M]", + "m8[W]", "|m8[W]", "=m8[W]", "m8[W]", + "m8[D]", "|m8[D]", "=m8[D]", "m8[D]", + "m8[h]", "|m8[h]", "=m8[h]", "m8[h]", + "m8[m]", "|m8[m]", "=m8[m]", "m8[m]", + "m8[s]", "|m8[s]", "=m8[s]", "m8[s]", + "m8[ms]", "|m8[ms]", "=m8[ms]", "m8[ms]", + "m8[us]", "|m8[us]", "=m8[us]", "m8[us]", + "m8[ns]", "|m8[ns]", "=m8[ns]", "m8[ns]", + "m8[ps]", "|m8[ps]", "=m8[ps]", "m8[ps]", + "m8[fs]", "|m8[fs]", "=m8[fs]", "m8[fs]", + "m8[as]", "|m8[as]", "=m8[as]", "m8[as]", ] From 70a7a413cf7be4ec7717d7c4ca43569a9bae58ce Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 14 Jul 2024 21:50:50 +0200 Subject: [PATCH 822/980] TYP: Improved `numpy.generic` rich comparison operator type annotations. --- numpy/__init__.pyi | 37 ++++++----- numpy/_typing/_callable.pyi | 63 ++++++++++++++++--- .../typing/tests/data/reveal/comparisons.pyi | 8 +-- 3 files changed, 78 insertions(+), 30 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index a3b5726b5a24..ecd788c2a0ea 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -150,7 +150,10 @@ from numpy._typing._callable import ( _FloatDivMod, _ComplexOp, _NumberOp, - _ComparisonOp, + _ComparisonOpLT, + _ComparisonOpLE, + _ComparisonOpGT, + _ComparisonOpGE, ) # NOTE: Numpy's mypy plugin is used for removing the types unavailable @@ -2795,10 +2798,10 @@ class number(generic, Generic[_NBit1]): # type: ignore __rpow__: _NumberOp __truediv__: _NumberOp __rtruediv__: _NumberOp - __lt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] - __le__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] - __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] - __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __lt__: _ComparisonOpLT[_NumberLike_co, _ArrayLikeNumber_co] + __le__: _ComparisonOpLE[_NumberLike_co, _ArrayLikeNumber_co] + __gt__: _ComparisonOpGT[_NumberLike_co, _ArrayLikeNumber_co] + __ge__: _ComparisonOpGE[_NumberLike_co, _ArrayLikeNumber_co] class bool(generic): def __init__(self, value: object = ..., /) -> None: ... @@ -2841,10 +2844,10 @@ class bool(generic): __rmod__: _BoolMod __divmod__: _BoolDivMod __rdivmod__: _BoolDivMod - __lt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] - __le__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] - __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] - __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __lt__: _ComparisonOpLT[_NumberLike_co, _ArrayLikeNumber_co] + __le__: _ComparisonOpLE[_NumberLike_co, _ArrayLikeNumber_co] + __gt__: _ComparisonOpGT[_NumberLike_co, _ArrayLikeNumber_co] + __ge__: _ComparisonOpGE[_NumberLike_co, _ArrayLikeNumber_co] bool_: TypeAlias = bool @@ -2897,10 +2900,10 @@ class datetime64(generic): @overload def __sub__(self, other: _TD64Like_co, /) -> datetime64: ... def __rsub__(self, other: datetime64, /) -> timedelta64: ... - __lt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] - __le__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] - __gt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] - __ge__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] + __lt__: _ComparisonOpLT[datetime64, _ArrayLikeDT64_co] + __le__: _ComparisonOpLE[datetime64, _ArrayLikeDT64_co] + __gt__: _ComparisonOpGT[datetime64, _ArrayLikeDT64_co] + __ge__: _ComparisonOpGE[datetime64, _ArrayLikeDT64_co] _IntValue: TypeAlias = SupportsInt | _CharLike_co | SupportsIndex _FloatValue: TypeAlias = None | _CharLike_co | SupportsFloat | SupportsIndex @@ -3025,10 +3028,10 @@ class timedelta64(generic): def __rmod__(self, other: timedelta64, /) -> timedelta64: ... def __divmod__(self, other: timedelta64, /) -> tuple[int64, timedelta64]: ... def __rdivmod__(self, other: timedelta64, /) -> tuple[int64, timedelta64]: ... - __lt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] - __le__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] - __gt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] - __ge__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] + __lt__: _ComparisonOpLT[_TD64Like_co, _ArrayLikeTD64_co] + __le__: _ComparisonOpLE[_TD64Like_co, _ArrayLikeTD64_co] + __gt__: _ComparisonOpGT[_TD64Like_co, _ArrayLikeTD64_co] + __ge__: _ComparisonOpGE[_TD64Like_co, _ArrayLikeTD64_co] class unsignedinteger(integer[_NBit1]): # NOTE: `uint64 + signedinteger -> float64` diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi index 843a0d07c2fb..2dd2233665fc 100644 --- a/numpy/_typing/_callable.pyi +++ b/numpy/_typing/_callable.pyi @@ -11,7 +11,9 @@ See the `Mypy documentation`_ on protocols for more details. from __future__ import annotations from typing import ( + TypeAlias, TypeVar, + final, overload, Any, NoReturn, @@ -48,7 +50,8 @@ _T1 = TypeVar("_T1") _T2 = TypeVar("_T2") _T1_contra = TypeVar("_T1_contra", contravariant=True) _T2_contra = TypeVar("_T2_contra", contravariant=True) -_2Tuple = tuple[_T1, _T1] + +_2Tuple: TypeAlias = tuple[_T1, _T1] _NBit1 = TypeVar("_NBit1", bound=NBitBase) _NBit2 = TypeVar("_NBit2", bound=NBitBase) @@ -317,20 +320,62 @@ class _ComplexOp(Protocol[_NBit1]): class _NumberOp(Protocol): def __call__(self, other: _NumberLike_co, /) -> Any: ... +@final class _SupportsLT(Protocol): - def __lt__(self, other: Any, /) -> object: ... + def __lt__(self, other: Any, /) -> Any: ... + +@final +class _SupportsLE(Protocol): + def __le__(self, other: Any, /) -> Any: ... +@final class _SupportsGT(Protocol): - def __gt__(self, other: Any, /) -> object: ... + def __gt__(self, other: Any, /) -> Any: ... -class _ComparisonOp(Protocol[_T1_contra, _T2_contra]): +@final +class _SupportsGE(Protocol): + def __ge__(self, other: Any, /) -> Any: ... + +@final +class _ComparisonOpLT(Protocol[_T1_contra, _T2_contra]): @overload def __call__(self, other: _T1_contra, /) -> np.bool: ... @overload def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... @overload - def __call__( - self, - other: _SupportsLT | _SupportsGT | _NestedSequence[_SupportsLT | _SupportsGT], - /, - ) -> Any: ... + def __call__(self, other: _NestedSequence[_SupportsGT], /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _SupportsGT, /) -> np.bool: ... + +@final +class _ComparisonOpLE(Protocol[_T1_contra, _T2_contra]): + @overload + def __call__(self, other: _T1_contra, /) -> np.bool: ... + @overload + def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _NestedSequence[_SupportsGE], /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _SupportsGE, /) -> np.bool: ... + +@final +class _ComparisonOpGT(Protocol[_T1_contra, _T2_contra]): + @overload + def __call__(self, other: _T1_contra, /) -> np.bool: ... + @overload + def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _NestedSequence[_SupportsLT], /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _SupportsLT, /) -> np.bool: ... + +@final +class _ComparisonOpGE(Protocol[_T1_contra, _T2_contra]): + @overload + def __call__(self, other: _T1_contra, /) -> np.bool: ... + @overload + def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _NestedSequence[_SupportsGT], /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _SupportsGT, /) -> np.bool: ... diff --git a/numpy/typing/tests/data/reveal/comparisons.pyi b/numpy/typing/tests/data/reveal/comparisons.pyi index 78c6a8e207fe..034efbef377e 100644 --- a/numpy/typing/tests/data/reveal/comparisons.pyi +++ b/numpy/typing/tests/data/reveal/comparisons.pyi @@ -38,10 +38,10 @@ SEQ = (0, 1, 2, 3, 4) # object-like comparisons -assert_type(i8 > fractions.Fraction(1, 5), Any) -assert_type(i8 > [fractions.Fraction(1, 5)], Any) -assert_type(i8 > decimal.Decimal("1.5"), Any) -assert_type(i8 > [decimal.Decimal("1.5")], Any) +assert_type(i8 > fractions.Fraction(1, 5), np.bool) +assert_type(i8 > [fractions.Fraction(1, 5)], npt.NDArray[np.bool]) +assert_type(i8 > decimal.Decimal("1.5"), np.bool) +assert_type(i8 > [decimal.Decimal("1.5")], npt.NDArray[np.bool]) # Time structures From 5a390b23fe66e7e438f8e238dd83568a6d67de57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Mon, 22 Jul 2024 10:49:33 +0200 Subject: [PATCH 823/980] Reduce number of typing overloads for `to_device` --- numpy/__init__.pyi | 11 ++--------- numpy/_core/src/multiarray/array_api_standard.c | 2 +- numpy/typing/tests/data/reveal/ndarray_misc.pyi | 6 ++++++ 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 3028c1f4c0f5..6b12cbc06450 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -643,6 +643,7 @@ def show_config() -> None: ... _NdArraySubClass = TypeVar("_NdArraySubClass", bound=NDArray[Any]) _DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic) _ByteOrder: TypeAlias = L["S", "<", ">", "=", "|", "L", "B", "N", "I", "little", "big", "native"] +_SCT = TypeVar("_SCT", bound=generic) @final class dtype(Generic[_DTypeScalar_co]): @@ -2582,15 +2583,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __dlpack_device__(self) -> tuple[int, L[0]]: ... @overload - def to_device(self: NDArray[_UnknownType], device: L["cpu"], /, *, stream: None | int | Any = ...) -> NDArray[Any]: ... - @overload - def to_device(self: NDArray[np.bool], device: L["cpu"], /, *, stream: None | int | Any = ...) -> NDArray[np.bool]: ... - @overload - def to_device(self: NDArray[_NumberType], device: L["cpu"], /, *, stream: None | int | Any = ...) -> NDArray[_NumberType]: ... - @overload - def to_device(self: NDArray[timedelta64], device: L["cpu"], /, *, stream: None | int | Any = ...) -> NDArray[timedelta64]: ... - @overload - def to_device(self: NDArray[object_], device: L["cpu"], /, *, stream: None | int | Any = ...) -> NDArray[object_]: ... + def to_device(self: NDArray[_SCT], device: L["cpu"], /, *, stream: None | int | Any = ...) -> NDArray[_SCT]: ... @overload def to_device(self: NDArray[Any], device: L["cpu"], /, *, stream: None | int | Any = ...) -> NDArray[Any]: ... diff --git a/numpy/_core/src/multiarray/array_api_standard.c b/numpy/_core/src/multiarray/array_api_standard.c index b473dc0823d9..76612cff36fb 100644 --- a/numpy/_core/src/multiarray/array_api_standard.c +++ b/numpy/_core/src/multiarray/array_api_standard.c @@ -32,7 +32,7 @@ array_to_device(PyObject *self, PyObject *args, PyObject *kwds) if (strcmp(device, "cpu") != 0) { PyErr_Format(PyExc_ValueError, - "Unsupported device: %s.", device); + "Unsupported device: %s. Only 'cpu' is accepted.", device); return NULL; } diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index b9a5fe433653..783e18f5c632 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -26,6 +26,9 @@ i8: np.int64 B: SubClass AR_f8: npt.NDArray[np.float64] AR_i8: npt.NDArray[np.int64] +AR_u1: npt.NDArray[np.uint8] +AR_c8: npt.NDArray[np.complex64] +AR_m: npt.NDArray[np.timedelta64] AR_U: npt.NDArray[np.str_] AR_V: npt.NDArray[np.void] @@ -222,6 +225,9 @@ assert_type(f8.to_device("cpu"), np.float64) assert_type(i8.to_device("cpu"), np.int64) assert_type(AR_f8.to_device("cpu"), npt.NDArray[np.float64]) assert_type(AR_i8.to_device("cpu"), npt.NDArray[np.int64]) +assert_type(AR_u1.to_device("cpu"), npt.NDArray[np.uint8]) +assert_type(AR_c8.to_device("cpu"), npt.NDArray[np.complex64]) +assert_type(AR_m.to_device("cpu"), npt.NDArray[np.timedelta64]) assert_type(f8.__array_namespace__(), Any) assert_type(AR_f8.__array_namespace__(), Any) From 510619eaca1fa898cea46a36a004234fff39037b Mon Sep 17 00:00:00 2001 From: otieno-juma Date: Mon, 17 Jun 2024 12:20:00 -0500 Subject: [PATCH 824/980] DOC: AI-Gen examples for ma.put I used AI Llama 3 to help create these. @bmwoodruff and I reviewed them. [skip azp] [skip cirrus] --- numpy/ma/core.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 04f6b434b731..51c3495f69ad 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -7394,6 +7394,28 @@ def put(a, indices, values, mode='raise'): -------- MaskedArray.put + Examples + -------- + Putting values in a masked array: + + >>> a = np.ma.array([1, 2, 3, 4], mask=[False, True, False, False]) + >>> np.ma.put(a, [1, 3], [10, 30]) + >>> a + masked_array(data=[ 1, 10, 3, 30], + mask=False, + fill_value=999999) + + Using put with a 2D array: + + >>> b = np.ma.array([[1, 2], [3, 4]], mask=[[False, True], [False, False]]) + >>> np.ma.put(b, [[0, 1], [1, 0]], [[10, 20], [30, 40]]) + >>> b + masked_array( + data=[[40, 30], + [ 3, 4]], + mask=False, + fill_value=999999) + """ # We can't use 'frommethod', the order of arguments is different try: From 04c951da2d173c106c2a76c5aaeb1fe4b8202be6 Mon Sep 17 00:00:00 2001 From: otieno-juma Date: Mon, 17 Jun 2024 10:45:08 -0500 Subject: [PATCH 825/980] DOC: AI generated examples for ma.left_shift. I used AI Llama 3 to help create these. @bmwoodruff and I reviewed them. [skip azp] [skip cirrus] --- numpy/ma/core.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 04f6b434b731..c4dae547202d 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -7337,6 +7337,33 @@ def left_shift(a, n): -------- numpy.left_shift + Examples + -------- + Shift with a masked array: + + >>> arr = np.ma.array([10, 20, 30], mask=[False, True, False]) + >>> np.ma.left_shift(arr, 1) + masked_array(data=[20, --, 60], + mask=[False, True, False], + fill_value=999999) + + Large shift: + + >>> np.ma.left_shift(10, 10) + masked_array(data=10240, + mask=False, + fill_value=999999) + + Shift with a scalar and an array: + + >>> scalar = 10 + >>> arr = np.ma.array([1, 2, 3], mask=[False, True, False]) + >>> np.ma.left_shift(scalar, arr) + masked_array(data=[20, --, 80], + mask=[False, True, False], + fill_value=999999) + + """ m = getmask(a) if m is nomask: From be863764d55d00919ea60cbd48d72e50dbce6b7f Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 22 Jul 2024 11:20:28 -0600 Subject: [PATCH 826/980] TST, MAINT: Loosen required test precision `TestFFT1D::test_identity_long_short[longdouble]` has been failing on musl linux, but not by much. This loosens the specified atol a hair to avoid that. --- numpy/fft/tests/test_pocketfft.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py index ca60427a53ea..d1e4da2eb831 100644 --- a/numpy/fft/tests/test_pocketfft.py +++ b/numpy/fft/tests/test_pocketfft.py @@ -38,7 +38,7 @@ def test_identity_long_short(self, dtype): # Test with explicitly given number of points, both for n # smaller and for n larger than the input size. maxlen = 16 - atol = 4 * np.spacing(np.array(1., dtype=dtype)) + atol = 5 * np.spacing(np.array(1., dtype=dtype)) x = random(maxlen).astype(dtype) + 1j*random(maxlen).astype(dtype) xx = np.concatenate([x, np.zeros_like(x)]) xr = random(maxlen).astype(dtype) From 9f5d35802a7d4fd9767306e27896b74e9d5110de Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 22 Jul 2024 11:39:51 -0600 Subject: [PATCH 827/980] TST: move new strip tests to test_strings.py --- numpy/_core/tests/test_stringdtype.py | 27 --------------------------- numpy/_core/tests/test_strings.py | 23 +++++++++++++++++++++++ 2 files changed, 23 insertions(+), 27 deletions(-) diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 7cf4a96f40cc..9ff3224947d9 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1673,30 +1673,3 @@ def test_arena_no_reuse_after_short(self): assert_array_equal(c, self.a) assert_array_equal(self.in_arena(c), False) assert_array_equal(self.is_on_heap(c), self.in_arena(self.a)) - - -STRIP_METHODS = ["lstrip", "rstrip", "strip"] - -@pytest.mark.parametrize("method", STRIP_METHODS) -@pytest.mark.parametrize( - "source,strip", - [ - ("λμ", "μ"), - ("λμ", "λ"), - ("λ"*5 + "μ"*2, "μ"), - ("λ" * 5 + "μ" * 2, "λ"), - ("λ" * 5 + "A" + "μ" * 2, "μλ"), - ("λμ" * 5, "μ"), - ("λμ" * 5, "λ"), - ] -) -def test_strip_functions_unicode(method, source, strip): - src_array = np.array([source], dtype=StringDType()) - - npy_func = getattr(np.strings, method) - py_func = getattr(str, method) - - expected = np.array([py_func(source, strip)], dtype=StringDType()) - actual = npy_func(src_array, strip) - - assert_array_equal(actual, expected) diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index d74f2388e55b..a94b52939b1d 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -1073,6 +1073,29 @@ def test_rpartition(self, buf, sep, res1, res2, res3, dt): assert_array_equal(act3, res3) assert_array_equal(act1 + act2 + act3, buf) + @pytest.mark.parametrize("method", ["strip", "lstrip", "rstrip"]) + @pytest.mark.parametrize( + "source,strip", + [ + ("λμ", "μ"), + ("λμ", "λ"), + ("λ"*5 + "μ"*2, "μ"), + ("λ" * 5 + "μ" * 2, "λ"), + ("λ" * 5 + "A" + "μ" * 2, "μλ"), + ("λμ" * 5, "μ"), + ("λμ" * 5, "λ"), + ]) + def test_strip_functions_unicode(self, source, strip, method, dt): + src_array = np.array([source], dtype=dt) + + npy_func = getattr(np.strings, method) + py_func = getattr(str, method) + + expected = np.array([py_func(source, strip)], dtype=dt) + actual = npy_func(src_array, strip) + + assert_array_equal(actual, expected) + class TestMixedTypeMethods: def test_center(self): From 69c73f92e89dd585f3f018a5f257ae4dfbc1f011 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 22 Jul 2024 20:17:16 +0200 Subject: [PATCH 828/980] Apply suggestions from code review Co-authored-by: Nathan Goldbaum --- numpy/_core/src/multiarray/scalartypes.c.src | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index cbd9e345717c..65a43c0a9f74 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -229,9 +229,9 @@ find_binary_operation_path( * If we are here, we need to operate on Python scalars. In general * that would just fails since NumPy doesn't know the other object! * - * However, NumPy (historically) often makes this work magically because - * it object ufuncs end up casting to object with `.item()` and that may - * returns Python type often (e.g. float for float32, float64)! + * However, NumPy (historically) made this often work magically because + * ufuncs for object dtype end up casting to object with `.item()`. This in + * turn ofthen returns a Python type (e.g. float for float32, float64)! * Retrying then succeeds. So if (and only if) `self.item()` returns a new * type, we can safely attempt the operation (again) with that. */ @@ -245,6 +245,7 @@ find_binary_operation_path( return 0; } /* The operation can't work and we will return NotImplemented */ + Py_DECREF(self_item); return 0; } From 3af87c4c52168969f55f68e272fa6e551dea89e3 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 22 Jul 2024 20:23:16 +0200 Subject: [PATCH 829/980] TST,MAINT: Remane scalar operator list for clarity --- numpy/_core/tests/test_scalarmath.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index b6fb8e961a16..cdbb2fad910a 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -29,11 +29,14 @@ objecty_things = [object(), None] -reasonable_operators_for_scalars = [ +binary_operators_for_scalars = [ operator.lt, operator.le, operator.eq, operator.ne, operator.ge, operator.gt, operator.add, operator.floordiv, operator.mod, operator.mul, operator.pow, operator.sub, operator.truediv ] +binary_operators_for_scalar_ints = binary_operators_for_scalars + [ + operator.xor, operator.or_, operator.and_ +] # This compares scalarmath against ufuncs. @@ -109,7 +112,7 @@ def check_ufunc_scalar_equivalence(op, arr1, arr2): @pytest.mark.slow @settings(max_examples=10000, deadline=2000) -@given(sampled_from(reasonable_operators_for_scalars), +@given(sampled_from(binary_operators_for_scalars), hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()), hynp.arrays(dtype=hynp.scalar_dtypes(), shape=())) def test_array_scalar_ufunc_equivalence(op, arr1, arr2): @@ -122,7 +125,7 @@ def test_array_scalar_ufunc_equivalence(op, arr1, arr2): @pytest.mark.slow -@given(sampled_from(reasonable_operators_for_scalars), +@given(sampled_from(binary_operators_for_scalars), hynp.scalar_dtypes(), hynp.scalar_dtypes()) def test_array_scalar_ufunc_dtypes(op, dt1, dt2): # Same as above, but don't worry about sampling weird values so that we @@ -865,7 +868,7 @@ def recursionlimit(n): @given(sampled_from(objecty_things), - sampled_from(reasonable_operators_for_scalars + [operator.xor]), + sampled_from(binary_operators_for_scalar_ints), sampled_from(types + [rational])) def test_operator_object_left(o, op, type_): try: @@ -876,7 +879,7 @@ def test_operator_object_left(o, op, type_): @given(sampled_from(objecty_things), - sampled_from(reasonable_operators_for_scalars + [operator.xor]), + sampled_from(binary_operators_for_scalar_ints), sampled_from(types + [rational])) def test_operator_object_right(o, op, type_): try: @@ -886,7 +889,7 @@ def test_operator_object_right(o, op, type_): pass -@given(sampled_from(reasonable_operators_for_scalars), +@given(sampled_from(binary_operators_for_scalars), sampled_from(types), sampled_from(types)) def test_operator_scalars(op, type1, type2): @@ -896,7 +899,7 @@ def test_operator_scalars(op, type1, type2): pass -@pytest.mark.parametrize("op", reasonable_operators_for_scalars) +@pytest.mark.parametrize("op", binary_operators_for_scalars) @pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble]) def test_longdouble_operators_with_obj(sctype, op): # This is/used to be tricky, because NumPy generally falls back to @@ -931,7 +934,7 @@ def test_longdouble_with_arrlike(sctype, op): assert_array_equal(op([1, 2], sctype(3)), op(np.array([1, 2]), 3)) -@pytest.mark.parametrize("op", reasonable_operators_for_scalars) +@pytest.mark.parametrize("op", binary_operators_for_scalars) @pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble]) @np.errstate(all="ignore") def test_longdouble_operators_with_large_int(sctype, op): @@ -1121,7 +1124,7 @@ def test_truediv_int(): @pytest.mark.slow @pytest.mark.parametrize("op", # TODO: Power is a bit special, but here mostly bools seem to behave oddly - [op for op in reasonable_operators_for_scalars if op is not operator.pow]) + [op for op in binary_operators_for_scalars if op is not operator.pow]) @pytest.mark.parametrize("sctype", types) @pytest.mark.parametrize("other_type", [float, int, complex]) @pytest.mark.parametrize("rop", [True, False]) From d43d677c34bf8c0b7bc01d97b34de38e2509fcbc Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 22 Jul 2024 20:29:22 +0200 Subject: [PATCH 830/980] DOC: Clarify inline docs slightly and add link --- numpy/_core/src/umath/scalarmath.c.src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index af21887a6099..fe492805eae3 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -1264,7 +1264,7 @@ static PyObject * * even a subclass of a NumPy scalar (currently). * * We drop through to the generic path here which checks for the - * (c)longdouble infinite recursion problem. + * infinite recursion problem (gh-18548, gh-26767). */ case PROMOTION_REQUIRED: /* From db93110b2b18e741261c0d4b24f05637b7ca1251 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Mon, 22 Jul 2024 14:44:50 -0400 Subject: [PATCH 831/980] ENH: Provide a hook for gufuncs to process core dimensions. (#26908) The field `process_core_dims_func` is added to the PyUFuncObject. This is a pointer to a function typedef'd as typedef int (PyUFunc_ProcessCoreDimsFunc)( struct _tagPyUFuncObject *ufunc, npy_intp *core_dim_sizes); The author of a gufunc can set the field with a function that they implement. The function will be called when the gufunc is called. (The actual call is in the internal function _get_coredim_sizes.) The user-defined function can set an exception and return an error status if any of the core dimensions in `core_dim_sizes` do not satisfy the assumptions of the gufunc. The user-defined function can also *set* the value of core dimensions that are passed in as -1, meaning the correspond out parameter was not given. This allows calculations such pairwise distances (which generates m*(m-1)/2 output values for an input with shape (m, n)) and full convolution (generates m + n - 1 output values from two inputs with shapes m and n) to be implemented as gufuncs with automatic allocation of the output with the correct shape. The output shape is computed and set in the user-defined function. * MAINT: Update 2.1 C-API version and use it --------- Co-authored-by: Sebastian Berg --- .../reference/c-api/generalized-ufuncs.rst | 125 +++++++++++++++++- numpy/_core/code_generators/cversions.txt | 3 +- numpy/_core/include/numpy/numpyconfig.h | 3 + numpy/_core/include/numpy/ufuncobject.h | 39 ++++++ numpy/_core/meson.build | 3 +- numpy/_core/src/umath/_umath_tests.c.src | 118 ++++++++++++++++- numpy/_core/src/umath/ufunc_object.c | 9 ++ numpy/_core/tests/test_ufunc.py | 45 ++++++- 8 files changed, 336 insertions(+), 9 deletions(-) diff --git a/doc/source/reference/c-api/generalized-ufuncs.rst b/doc/source/reference/c-api/generalized-ufuncs.rst index 44b16f90eed4..b4750688b5e6 100644 --- a/doc/source/reference/c-api/generalized-ufuncs.rst +++ b/doc/source/reference/c-api/generalized-ufuncs.rst @@ -17,7 +17,7 @@ what the "core" dimensionality of the inputs is, as well as the corresponding dimensionality of the outputs (the element-wise ufuncs have zero core dimensions). The list of the core dimensions for all arguments is called the "signature" of a ufunc. For example, the -ufunc numpy.add has signature ``(),()->()`` defining two scalar inputs +ufunc ``numpy.add`` has signature ``(),()->()`` defining two scalar inputs and one scalar output. Another example is the function ``inner1d(a, b)`` with a signature of @@ -57,10 +57,12 @@ taken when calling such a function. An example would be the function ``euclidean_pdist(a)``, with signature ``(n,d)->(p)``, that given an array of ``n`` ``d``-dimensional vectors, computes all unique pairwise Euclidean distances among them. The output dimension ``p`` must therefore be equal to -``n * (n - 1) / 2``, but it is the caller's responsibility to pass in an -output array of the right size. If the size of a core dimension of an output +``n * (n - 1) / 2``, but by default, it is the caller's responsibility to pass +in an output array of the right size. If the size of a core dimension of an output cannot be determined from a passed in input or output array, an error will be -raised. +raised. This can be changed by defining a ``PyUFunc_ProcessCoreDimsFunc`` function +and assigning it to the ``proces_core_dims_func`` field of the ``PyUFuncObject`` +structure. See below for more details. Note: Prior to NumPy 1.10.0, less strict checks were in place: missing core dimensions were created by prepending 1's to the shape as necessary, core @@ -77,7 +79,7 @@ Elementary Function (e.g. adding two numbers is the most basic operation in adding two arrays). The ufunc applies the elementary function multiple times on different parts of the arrays. The input/output of elementary - functions can be vectors; e.g., the elementary function of inner1d + functions can be vectors; e.g., the elementary function of ``inner1d`` takes two vectors as input. Signature @@ -214,3 +216,116 @@ input/output arrays ``a``, ``b``, ``c``. Furthermore, ``dimensions`` will be ``[N, I, J]`` to define the size of ``N`` of the loop and the sizes ``I`` and ``J`` for the core dimensions ``i`` and ``j``. Finally, ``steps`` will be ``[a_N, b_N, c_N, a_i, a_j, b_i]``, containing all necessary strides. + +Customizing core dimension size processing +------------------------------------------ + +The optional function of type ``PyUFunc_ProcessCoreDimsFunc``, stored +on the ``process_core_dims_func`` attribute of the ufunc, provides the +author of the ufunc a "hook" into the processing of the core dimensions +of the arrays that were passed to the ufunc. The two primary uses of +this "hook" are: + +* Check that constraints on the core dimensions required + by the ufunc are satisfied (and set an exception if they are not). +* Compute output shapes for any output core dimensions that were not + determined by the input arrays. + +As an example of the first use, consider the generalized ufunc ``minmax`` +with signature ``(n)->(2)`` that simultaneously computes the minimum and +maximum of a sequence. It should require that ``n > 0``, because +the minimum and maximum of a sequence with length 0 is not meaningful. +In this case, the ufunc author might define the function like this: + + .. code-block:: c + + int minmax_process_core_dims(PyUFuncObject ufunc, + npy_intp *core_dim_sizes) + { + npy_intp n = core_dim_sizes[0]; + if (n == 0) { + PyExc_SetString("minmax requires the core dimension " + "to be at least 1."); + return -1; + } + return 0; + } + +In this case, the length of the array ``core_dim_sizes`` will be 2. +The second value in the array will always be 2, so there is no need +for the function to inspect it. The core dimension ``n`` is stored +in the first element. The function sets an exception and returns -1 +if it finds that ``n`` is 0. + +The second use for the "hook" is to compute the size of output arrays +when the output arrays are not provided by the caller and one or more +core dimension of the output is not also an input core dimension. +If the ufunc does not have a function defined on the +``process_core_dims_func`` attribute, an unspecified output core +dimension size will result in an exception being raised. With the +"hook" provided by ``process_core_dims_func``, the author of the ufunc +can set the output size to whatever is appropriate for the ufunc. + +In the array passed to the "hook" function, core dimensions that +were not determined by the input are indicating by having the value -1 +in the ``core_dim_sizes`` array. The function can replace the -1 with +whatever value is appropriate for the ufunc, based on the core dimensions +that occurred in the input arrays. + +.. warning:: + The function must never change a value in ``core_dim_sizes`` that + is not -1 on input. Changing a value that was not -1 will generally + result in incorrect output from the ufunc, and could result in the + Python interpreter crashing. + +For example, consider the generalized ufunc ``conv1d`` for which +the elementary function computes the "full" convolution of two +one-dimensional arrays ``x`` and ``y`` with lengths ``m`` and ``n``, +respectively. The output of this convolution has length ``m + n - 1``. +To implement this as a generalized ufunc, the signature is set to +``(m),(n)->(p)``, and in the "hook" function, if the core dimension +``p`` is found to be -1, it is replaced with ``m + n - 1``. If ``p`` +is *not* -1, it must be verified that the given value equals ``m + n - 1``. +If it does not, the function must set an exception and return -1. +For a meaningful result, the operation also requires that ``m + n`` +is at least 1, i.e. both inputs can't have length 0. + +Here's how that might look in code: + + .. code-block:: c + + int conv1d_process_core_dims(PyUFuncObject *ufunc, + npy_intp *core_dim_sizes) + { + // core_dim_sizes will hold the core dimensions [m, n, p]. + // p will be -1 if the caller did not provide the out argument. + npy_intp m = core_dim_sizes[0]; + npy_intp n = core_dim_sizes[1]; + npy_intp p = core_dim_sizes[2]; + npy_intp required_p = m + n - 1; + + if (m == 0 && n == 0) { + // Disallow both inputs having length 0. + PyErr_SetString(PyExc_ValueError, + "conv1d: both inputs have core dimension 0; the function " + "requires that at least one input has size greater than 0."); + return -1; + } + if (p == -1) { + // Output array was not given in the call of the ufunc. + // Set the correct output size here. + core_dim_sizes[2] = required_p; + return 0; + } + // An output array *was* given. Validate its core dimension. + if (p != required_p) { + PyErr_Format(PyExc_ValueError, + "conv1d: the core dimension p of the out parameter " + "does not equal m + n - 1, where m and n are the " + "core dimensions of the inputs x and y; got m=%zd " + "and n=%zd so p must be %zd, but got p=%zd.", + m, n, required_p, p); + return -1; + } + return 0; + } diff --git a/numpy/_core/code_generators/cversions.txt b/numpy/_core/code_generators/cversions.txt index ccba8a1c25b3..4ce44ada45bf 100644 --- a/numpy/_core/code_generators/cversions.txt +++ b/numpy/_core/code_generators/cversions.txt @@ -74,5 +74,6 @@ 0x00000011 = ca1aebdad799358149567d9d93cbca09 # Version 18 (NumPy 2.0.0) -# Version 18 (NumPy 2.1.0) No change 0x00000012 = 2b8f1f4da822491ff030b2b37dff07e3 +# Version 19 (NumPy 2.1.0) Only header additions +0x00000013 = 2b8f1f4da822491ff030b2b37dff07e3 diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index 0b6b2dda4290..b49d215614ac 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -81,6 +81,7 @@ #define NPY_1_24_API_VERSION 0x00000010 #define NPY_1_25_API_VERSION 0x00000011 #define NPY_2_0_API_VERSION 0x00000012 +#define NPY_2_1_API_VERSION 0x00000013 /* @@ -160,6 +161,8 @@ #define NPY_FEATURE_VERSION_STRING "1.25" #elif NPY_FEATURE_VERSION == NPY_2_0_API_VERSION #define NPY_FEATURE_VERSION_STRING "2.0" +#elif NPY_FEATURE_VERSION == NPY_2_1_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.1" #else #error "Missing version string define for new NumPy version." #endif diff --git a/numpy/_core/include/numpy/ufuncobject.h b/numpy/_core/include/numpy/ufuncobject.h index dca375b32673..ada23626f70b 100644 --- a/numpy/_core/include/numpy/ufuncobject.h +++ b/numpy/_core/include/numpy/ufuncobject.h @@ -65,6 +65,39 @@ typedef int (PyUFunc_TypeResolutionFunc)( PyObject *type_tup, PyArray_Descr **out_dtypes); +/* + * This is the signature for the functions that may be assigned to the + * `process_core_dims_func` field of the PyUFuncObject structure. + * Implementation of this function is optional. This function is only used + * by generalized ufuncs (i.e. those with the field `core_enabled` set to 1). + * The function is called by the ufunc during the processing of the arguments + * of a call of the ufunc. The function can check the core dimensions of the + * input and output arrays and return -1 with an exception set if any + * requirements are not satisfied. If the caller of the ufunc didn't provide + * output arrays, the core dimensions associated with the output arrays (i.e. + * those that are not also used in input arrays) will have the value -1 in + * `core_dim_sizes`. This function can replace any output core dimensions + * that are -1 with a value that is appropriate for the ufunc. + * + * Parameter Description + * --------------- ------------------------------------------------------ + * ufunc The ufunc object + * core_dim_sizes An array with length `ufunc->core_num_dim_ix`. + * The core dimensions of the arrays passed to the ufunc + * will have been set. If the caller of the ufunc didn't + * provide the output array(s), the output-only core + * dimensions will have the value -1. + * + * The function must not change any element in `core_dim_sizes` that is + * not -1 on input. Doing so will result in incorrect output from the + * ufunc, and could result in a crash of the Python interpreter. + * + * The function must return 0 on success, -1 on failure (with an exception + * set). + */ +typedef int (PyUFunc_ProcessCoreDimsFunc)( + struct _tagPyUFuncObject *ufunc, + npy_intp *core_dim_sizes); typedef struct _tagPyUFuncObject { PyObject_HEAD @@ -191,6 +224,12 @@ typedef struct _tagPyUFuncObject { /* A PyListObject of `(tuple of DTypes, ArrayMethod/Promoter)` */ PyObject *_loops; #endif + #if NPY_FEATURE_VERSION >= NPY_2_1_API_VERSION + /* + * Optional function to process core dimensions of a gufunc. + */ + PyUFunc_ProcessCoreDimsFunc *process_core_dims_func; + #endif } PyUFuncObject; #include "arrayobject.h" diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 73bda069e0d8..ee8ef3fe0358 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -47,7 +47,8 @@ C_ABI_VERSION = '0x02000000' # 0x00000010 - 1.24.x # 0x00000011 - 1.25.x # 0x00000012 - 2.0.x -C_API_VERSION = '0x00000012' +# 0x00000013 - 2.1.x +C_API_VERSION = '0x00000013' # Check whether we have a mismatch between the set C API VERSION and the # actual C API VERSION. Will raise a MismatchCAPIError if so. diff --git a/numpy/_core/src/umath/_umath_tests.c.src b/numpy/_core/src/umath/_umath_tests.c.src index 8b5c3b65a9a4..c1bcc3c8957e 100644 --- a/numpy/_core/src/umath/_umath_tests.c.src +++ b/numpy/_core/src/umath/_umath_tests.c.src @@ -13,7 +13,7 @@ #undef NPY_INTERNAL_BUILD #endif // for add_INT32_negative_indexed -#define NPY_TARGET_VERSION NPY_2_0_API_VERSION +#define NPY_TARGET_VERSION NPY_2_1_API_VERSION #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "numpy/ndarrayobject.h" @@ -761,6 +761,95 @@ add_INT32_negative_indexed(PyObject *module, PyObject *dict) { return 0; } +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +// Define the gufunc 'conv1d_full' +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#define MAX(a, b) (((a) < (b)) ? (b) : (a)) + +int conv1d_full_process_core_dims(PyUFuncObject *ufunc, + npy_intp *core_dim_sizes) +{ + // + // core_dim_sizes will hold the core dimensions [m, n, p]. + // p will be -1 if the caller did not provide the out argument. + // + npy_intp m = core_dim_sizes[0]; + npy_intp n = core_dim_sizes[1]; + npy_intp p = core_dim_sizes[2]; + npy_intp required_p = m + n - 1; + + if (m == 0 && n == 0) { + PyErr_SetString(PyExc_ValueError, + "conv1d_full: both inputs have core dimension 0; the function " + "requires that at least one input has positive size."); + return -1; + } + if (p == -1) { + core_dim_sizes[2] = required_p; + return 0; + } + if (p != required_p) { + PyErr_Format(PyExc_ValueError, + "conv1d_full: the core dimension p of the out parameter " + "does not equal m + n - 1, where m and n are the core " + "dimensions of the inputs x and y; got m=%zd and n=%zd so " + "p must be %zd, but got p=%zd.", + m, n, required_p, p); + return -1; + } + return 0; +} + +static void +conv1d_full_double_loop(char **args, + npy_intp const *dimensions, + npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + // Input and output arrays + char *p_x = args[0]; + char *p_y = args[1]; + char *p_out = args[2]; + // Number of loops of pdist calculations to execute. + npy_intp nloops = dimensions[0]; + // Core dimensions + npy_intp m = dimensions[1]; + npy_intp n = dimensions[2]; + npy_intp p = dimensions[3]; // Must be m + n - 1. + // Core strides + npy_intp x_stride = steps[0]; + npy_intp y_stride = steps[1]; + npy_intp out_stride = steps[2]; + // Inner strides + npy_intp x_inner_stride = steps[3]; + npy_intp y_inner_stride = steps[4]; + npy_intp out_inner_stride = steps[5]; + + for (npy_intp loop = 0; loop < nloops; ++loop, p_x += x_stride, + p_y += y_stride, + p_out += out_stride) { + // Basic implementation of 1d convolution + for (npy_intp k = 0; k < p; ++k) { + double sum = 0.0; + for (npy_intp i = MAX(0, k - n + 1); i < MIN(m, k + 1); ++i) { + double x_i = *(double *)(p_x + i*x_inner_stride); + double y_k_minus_i = *(double *)(p_y + (k - i)*y_inner_stride); + sum += x_i * y_k_minus_i; + } + *(double *)(p_out + k*out_inner_stride) = sum; + } + } +} + +static PyUFuncGenericFunction conv1d_full_functions[] = { + (PyUFuncGenericFunction) &conv1d_full_double_loop +}; +static void *const conv1d_full_data[] = {NULL}; +static const char conv1d_full_typecodes[] = {NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE}; + + static PyMethodDef UMath_TestsMethods[] = { {"test_signature", UMath_Tests_test_signature, METH_VARARGS, "Test signature parsing of ufunc. \n" @@ -830,6 +919,33 @@ PyMODINIT_FUNC PyInit__umath_tests(void) { return NULL; } + // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + // Define the gufunc 'conv1d_full' + // Shape signature is (m),(n)->(p) where p must be m + n - 1. + // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + PyUFuncObject *gufunc = (PyUFuncObject *) PyUFunc_FromFuncAndDataAndSignature( + conv1d_full_functions, + conv1d_full_data, + conv1d_full_typecodes, + 1, 2, 1, PyUFunc_None, "conv1d_full", + "convolution of x and y ('full' mode)", + 0, "(m),(n)->(p)"); + if (gufunc == NULL) { + Py_DECREF(m); + return NULL; + } + gufunc->process_core_dims_func = &conv1d_full_process_core_dims; + + int status = PyModule_AddObject(m, "conv1d_full", (PyObject *) gufunc); + if (status == -1) { + Py_DECREF(gufunc); + Py_DECREF(m); + return NULL; + } + + // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + #if Py_GIL_DISABLED // signal this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 3715866a2a83..92bc7793f2ad 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -1590,6 +1590,13 @@ _get_coredim_sizes(PyUFuncObject *ufunc, PyArrayObject **op, } } + if (ufunc->process_core_dims_func != NULL) { + int status = ufunc->process_core_dims_func(ufunc, core_dim_sizes); + if (status != 0) { + return -1; + } + } + /* * Make sure no core dimension is unspecified. */ @@ -4689,6 +4696,8 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi /* Type resolution and inner loop selection functions */ ufunc->type_resolver = &PyUFunc_DefaultTypeResolver; + ufunc->process_core_dims_func = NULL; + ufunc->op_flags = NULL; ufunc->_loops = NULL; if (nin + nout != 0) { diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index dfe20bc577a9..e777d7e07be3 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -2652,8 +2652,51 @@ def test_nat_is_not_inf(self, nat): pass # ok, just not implemented +class TestGUFuncProcessCoreDims: + + def test_conv1d_full_without_out(self): + x = np.arange(5.0) + y = np.arange(13.0) + w = umt.conv1d_full(x, y) + assert_equal(w, np.convolve(x, y, mode='full')) + + def test_conv1d_full_with_out(self): + x = np.arange(5.0) + y = np.arange(13.0) + out = np.zeros(len(x) + len(y) - 1) + umt.conv1d_full(x, y, out=out) + assert_equal(out, np.convolve(x, y, mode='full')) + + def test_conv1d_full_basic_broadcast(self): + # x.shape is (3, 6) + x = np.array([[1, 3, 0, -10, 2, 2], + [0, -1, 2, 2, 10, 4], + [8, 9, 10, 2, 23, 3]]) + # y.shape is (2, 1, 7) + y = np.array([[[3, 4, 5, 20, 30, 40, 29]], + [[5, 6, 7, 10, 11, 12, -5]]]) + # result should have shape (2, 3, 12) + result = umt.conv1d_full(x, y) + assert result.shape == (2, 3, 12) + for i in range(2): + for j in range(3): + assert_equal(result[i, j], np.convolve(x[j], y[i, 0])) + + def test_bad_out_shape(self): + x = np.ones((1, 2)) + y = np.ones((2, 3)) + out = np.zeros((2, 3)) # Not the correct shape. + with pytest.raises(ValueError, match=r'does not equal m \+ n - 1'): + umt.conv1d_full(x, y, out=out) + + def test_bad_input_both_inputs_length_zero(self): + with pytest.raises(ValueError, + match='both inputs have core dimension 0'): + umt.conv1d_full([], []) + + @pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np) - if isinstance(getattr(np, x), np.ufunc)]) + if isinstance(getattr(np, x), np.ufunc)]) def test_ufunc_types(ufunc): ''' Check all ufuncs that the correct type is returned. Avoid From 39ef0a8fd36119018447d3c0eefc18b87ff7f217 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 22 Jul 2024 13:04:15 -0600 Subject: [PATCH 832/980] MAINT: apply rohit's refactoring --- numpy/f2py/f2py2e.py | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index ae6017d33ba8..6da1b95911cd 100755 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -207,12 +207,11 @@ def scaninputline(inputline): dorestdoc = 0 wrapfuncs = 1 buildpath = '.' - include_paths, inputline = get_includes(inputline) + include_paths, freethreading_compatible, inputline = get_newer_options(inputline) signsfile, modulename = None, None options = {'buildpath': buildpath, 'coutput': None, 'f2py_wrapper_output': None} - requires_gil = 1 for l in inputline: if l == '': pass @@ -270,10 +269,6 @@ def scaninputline(inputline): cfuncs.userincludes[l[9:-1]] = '#include ' + l[8:] elif l == '--skip-empty-wrappers': emptygen = False - elif l == '--no-freethreading-compatible': - requires_gil = 1 - elif l == '--freethreading-compatible': - requires_gil = 0 elif l[0] == '-': errmess('Unknown option %s\n' % repr(l)) sys.exit() @@ -340,7 +335,7 @@ def scaninputline(inputline): options['wrapfuncs'] = wrapfuncs options['buildpath'] = buildpath options['include_paths'] = include_paths - options['requires_gil'] = requires_gil + options['requires_gil'] = not freethreading_compatible options.setdefault('f2cmap_file', None) return files, options @@ -553,21 +548,22 @@ def __call__(self, parser, namespace, values, option_string=None): include_paths_set.add(values) setattr(namespace, 'include_paths', list(include_paths_set)) -def include_parser(): +def f2py_parser(): parser = argparse.ArgumentParser(add_help=False) parser.add_argument("-I", dest="include_paths", action=CombineIncludePaths) parser.add_argument("--include-paths", dest="include_paths", action=CombineIncludePaths) parser.add_argument("--include_paths", dest="include_paths", action=CombineIncludePaths) + parser.add_argument("--freethreading-compatible", dest="ftcompat", action=argparse.BooleanOptionalAction) return parser -def get_includes(iline): +def get_newer_options(iline): iline = (' '.join(iline)).split() - parser = include_parser() + parser = f2py_parser() args, remain = parser.parse_known_args(iline) ipaths = args.include_paths if args.include_paths is None: ipaths = [] - return ipaths, remain + return ipaths, args.ftcompat, remain def make_f2py_compile_parser(): parser = argparse.ArgumentParser(add_help=False) @@ -736,7 +732,7 @@ def run_compile(): run_main(f" {' '.join(f2py_flags)} {' '.join(pyf_files)}".split()) # Order matters here, includes are needed for run_main above - include_dirs, sources = get_includes(sources) + include_dirs, _, sources = get_newer_options(sources) # Now use the builder builder = build_backend( modulename, From 9f66869074e8cad63b7eb0056a6587ac16febf09 Mon Sep 17 00:00:00 2001 From: GUAN MING <105915352+guan404ming@users.noreply.github.com> Date: Tue, 23 Jul 2024 03:07:46 +0800 Subject: [PATCH 833/980] DOC: update tutorials link (#27010) --- doc/source/user/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst index 8d4c500fd021..5a002ba8375e 100644 --- a/doc/source/user/index.rst +++ b/doc/source/user/index.rst @@ -26,7 +26,7 @@ details are found in :ref:`reference`. :maxdepth: 1 numpy-for-matlab-users - NumPy tutorials + NumPy tutorials howtos_index .. toctree:: From b3feb3cb249c0027bd5589f5d5d5fc630e853c0a Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 22 Jul 2024 13:12:39 -0600 Subject: [PATCH 834/980] MAINT: back printoptions with a true context variable (#26846) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a re-do of gh-26345, I'm taking over from @mtsokol because this is needed for the free-threaded work. The new _printoptions.py file exists to avoid a circular import during setup of the multiarray module. I'm guessing this adds some overhead to printing. I haven't benchmarked it because it wasn't clear to me: do we care about printing performance? I could certainly add some caching or a way to avoid repeatedly calling get_legacy_print_mode for every printed value. We could also keep the C global we had before but make it thread-local. I just thought it made things conceptually simpler to store all the printoptions state in the context variable. Co-authored-by: Mateusz Sokół --- .../upcoming_changes/26846.improvement.rst | 6 ++ numpy/_core/arrayprint.py | 96 ++++++++----------- numpy/_core/meson.build | 1 + numpy/_core/multiarray.py | 1 - numpy/_core/printoptions.py | 32 +++++++ numpy/_core/src/multiarray/multiarraymodule.c | 51 +++++++--- numpy/_core/src/multiarray/multiarraymodule.h | 11 +-- numpy/_core/src/multiarray/npy_static_data.c | 4 + numpy/_core/src/multiarray/npy_static_data.h | 2 + numpy/_core/src/multiarray/scalartypes.c.src | 96 +++++++++++++++---- numpy/_core/tests/test_arrayprint.py | 51 +++++++++- numpy/_core/tests/test_multithreading.py | 46 +++++++++ 12 files changed, 299 insertions(+), 98 deletions(-) create mode 100644 doc/release/upcoming_changes/26846.improvement.rst create mode 100644 numpy/_core/printoptions.py diff --git a/doc/release/upcoming_changes/26846.improvement.rst b/doc/release/upcoming_changes/26846.improvement.rst new file mode 100644 index 000000000000..ae9b72d195bf --- /dev/null +++ b/doc/release/upcoming_changes/26846.improvement.rst @@ -0,0 +1,6 @@ +The `numpy.printoptions` context manager is now thread and async-safe +--------------------------------------------------------------------- + +In prior versions of NumPy, the printoptions were defined using a combination +of Python and C global variables. We have refactored so the state is stored in +a python ``ContextVar``, making the context manager thread and async-safe. diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 93e803a8216d..07ff182bdb8f 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -35,33 +35,17 @@ from .umath import absolute, isinf, isfinite, isnat from . import multiarray from .multiarray import (array, dragon4_positional, dragon4_scientific, - datetime_as_string, datetime_data, ndarray, - set_legacy_print_mode) + datetime_as_string, datetime_data, ndarray) from .fromnumeric import any from .numeric import concatenate, asarray, errstate from .numerictypes import (longlong, intc, int_, float64, complex128, flexible) from .overrides import array_function_dispatch, set_module +from .printoptions import format_options import operator import warnings import contextlib -_format_options = { - 'edgeitems': 3, # repr N leading and trailing items of each dimension - 'threshold': 1000, # total items > triggers array summarization - 'floatmode': 'maxprec', - 'precision': 8, # precision of floating point representations - 'suppress': False, # suppress printing small floating values in exp format - 'linewidth': 75, - 'nanstr': 'nan', - 'infstr': 'inf', - 'sign': '-', - 'formatter': None, - # Internally stored as an int to simplify comparisons; converted from/to - # str/False on the way in/out. - 'legacy': sys.maxsize, - 'override_repr': None, -} def _make_options_dict(precision=None, threshold=None, edgeitems=None, linewidth=None, suppress=None, nanstr=None, infstr=None, @@ -295,25 +279,21 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ]) """ - opt = _make_options_dict(precision, threshold, edgeitems, linewidth, - suppress, nanstr, infstr, sign, formatter, - floatmode, legacy, override_repr) + new_opt = _make_options_dict(precision, threshold, edgeitems, linewidth, + suppress, nanstr, infstr, sign, formatter, + floatmode, legacy) # formatter and override_repr are always reset - opt['formatter'] = formatter - opt['override_repr'] = override_repr - _format_options.update(opt) - - # set the C variable for legacy mode - if _format_options['legacy'] == 113: - set_legacy_print_mode(113) - # reset the sign option in legacy mode to avoid confusion - _format_options['sign'] = '-' - elif _format_options['legacy'] == 121: - set_legacy_print_mode(121) - elif _format_options['legacy'] == 125: - set_legacy_print_mode(125) - elif _format_options['legacy'] == sys.maxsize: - set_legacy_print_mode(0) + new_opt['formatter'] = formatter + new_opt['override_repr'] = override_repr + + updated_opt = format_options.get() | new_opt + updated_opt.update(new_opt) + + if updated_opt['legacy'] == 113: + updated_opt['sign'] = '-' + + token = format_options.set(updated_opt) + return token @set_module('numpy') @@ -355,7 +335,7 @@ def get_printoptions(): 100 """ - opts = _format_options.copy() + opts = format_options.get().copy() opts['legacy'] = { 113: '1.13', 121: '1.21', 125: '1.25', sys.maxsize: False, }[opts['legacy']] @@ -364,7 +344,7 @@ def get_printoptions(): def _get_legacy_print_mode(): """Return the legacy print mode as an int.""" - return _format_options['legacy'] + return format_options.get()['legacy'] @set_module('numpy') @@ -393,13 +373,12 @@ def printoptions(*args, **kwargs): -------- set_printoptions, get_printoptions - """ - opts = np.get_printoptions() + """ + token = set_printoptions(*args, **kwargs) try: - np.set_printoptions(*args, **kwargs) - yield np.get_printoptions() + yield get_printoptions() finally: - np.set_printoptions(**opts) + format_options.reset(token) def _leading_trailing(a, edgeitems, index=()): @@ -757,7 +736,7 @@ def array2string(a, max_line_width=None, precision=None, overrides = _make_options_dict(precision, threshold, edgeitems, max_line_width, suppress_small, None, None, sign, formatter, floatmode, legacy) - options = _format_options.copy() + options = format_options.get().copy() options.update(overrides) if options['legacy'] <= 113: @@ -980,7 +959,6 @@ def __init__(self, data, precision, floatmode, suppress_small, sign=False, self.sign = sign self.exp_format = False self.large_exponent = False - self.fillFormat(data) def fillFormat(self, data): @@ -1062,22 +1040,23 @@ def fillFormat(self, data): # if there are non-finite values, may need to increase pad_left if data.size != finite_vals.size: neginf = self.sign != '-' or any(data[isinf(data)] < 0) - nanlen = len(_format_options['nanstr']) - inflen = len(_format_options['infstr']) + neginf offset = self.pad_right + 1 # +1 for decimal pt + current_options = format_options.get() self.pad_left = max( - self.pad_left, nanlen - offset, inflen - offset + self.pad_left, len(current_options['nanstr']) - offset, + len(current_options['infstr']) + neginf - offset ) def __call__(self, x): if not np.isfinite(x): with errstate(invalid='ignore'): + current_options = format_options.get() if np.isnan(x): sign = '+' if self.sign == '+' else '' - ret = sign + _format_options['nanstr'] + ret = sign + current_options['nanstr'] else: # isinf sign = '-' if x < 0 else '+' if self.sign == '+' else '' - ret = sign + _format_options['infstr'] + ret = sign + current_options['infstr'] return ' '*( self.pad_left + self.pad_right + 1 - len(ret) ) + ret @@ -1468,10 +1447,10 @@ def _void_scalar_to_string(x, is_repr=True): scalartypes.c.src code, and is placed here because it uses the elementwise formatters defined above. """ - options = _format_options.copy() + options = format_options.get().copy() if options["legacy"] <= 125: - return StructuredVoidFormat.from_data(array(x), **_format_options)(x) + return StructuredVoidFormat.from_data(array(x), **options)(x) if options.get('formatter') is None: options['formatter'] = {} @@ -1515,7 +1494,7 @@ def dtype_is_implied(dtype): array([1, 2, 3], dtype=int8) """ dtype = np.dtype(dtype) - if _format_options['legacy'] <= 113 and dtype.type == np.bool: + if format_options.get()['legacy'] <= 113 and dtype.type == np.bool: return False # not just void types can be structured, and names are not part of the repr @@ -1565,12 +1544,13 @@ def _array_repr_implementation( arr, max_line_width=None, precision=None, suppress_small=None, array2string=array2string): """Internal version of array_repr() that allows overriding array2string.""" - override_repr = _format_options["override_repr"] + current_options = format_options.get() + override_repr = current_options["override_repr"] if override_repr is not None: return override_repr(arr) if max_line_width is None: - max_line_width = _format_options['linewidth'] + max_line_width = current_options['linewidth'] if type(arr) is not ndarray: class_name = type(arr).__name__ @@ -1582,7 +1562,7 @@ def _array_repr_implementation( prefix = class_name + "(" suffix = ")" if skipdtype else "," - if (_format_options['legacy'] <= 113 and + if (current_options['legacy'] <= 113 and arr.shape == () and not arr.dtype.names): lst = repr(arr.item()) elif arr.size > 0 or arr.shape == (0,): @@ -1603,7 +1583,7 @@ def _array_repr_implementation( # Note: This line gives the correct result even when rfind returns -1. last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1) spacer = " " - if _format_options['legacy'] <= 113: + if current_options['legacy'] <= 113: if issubclass(arr.dtype.type, flexible): spacer = '\n' + ' '*len(class_name + "(") elif last_line_len + len(dtype_str) + 1 > max_line_width: @@ -1677,7 +1657,7 @@ def _array_str_implementation( a, max_line_width=None, precision=None, suppress_small=None, array2string=array2string): """Internal version of array_str() that allows overriding array2string.""" - if (_format_options['legacy'] <= 113 and + if (format_options.get()['legacy'] <= 113 and a.shape == () and not a.dtype.names): return str(a.item()) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index ee8ef3fe0358..96c6dc2848d8 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1324,6 +1324,7 @@ python_sources = [ 'numerictypes.py', 'numerictypes.pyi', 'overrides.py', + 'printoptions.py', 'records.py', 'records.pyi', 'shape_base.py', diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 75ab59851abf..b293a1dd040a 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -39,7 +39,6 @@ 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer', 'nested_iters', 'normalize_axis_index', 'packbits', 'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar', 'set_datetimeparse_function', - 'set_legacy_print_mode', 'set_typeDict', 'shares_memory', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros', '_get_promotion_state', '_set_promotion_state'] diff --git a/numpy/_core/printoptions.py b/numpy/_core/printoptions.py new file mode 100644 index 000000000000..7ac93c2290e0 --- /dev/null +++ b/numpy/_core/printoptions.py @@ -0,0 +1,32 @@ +""" +Stores and defines the low-level format_options context variable. + +This is defined in its own file outside of the arrayprint module +so we can import it from C while initializing the multiarray +C module during import without introducing circular dependencies. +""" + +import sys +from contextvars import ContextVar + +__all__ = ["format_options"] + +default_format_options_dict = { + "edgeitems": 3, # repr N leading and trailing items of each dimension + "threshold": 1000, # total items > triggers array summarization + "floatmode": "maxprec", + "precision": 8, # precision of floating point representations + "suppress": False, # suppress printing small floating values in exp format + "linewidth": 75, + "nanstr": "nan", + "infstr": "inf", + "sign": "-", + "formatter": None, + # Internally stored as an int to simplify comparisons; converted from/to + # str/False on the way in/out. + 'legacy': sys.maxsize, + 'override_repr': None, +} + +format_options = ContextVar( + "format_options", default=default_format_options_dict.copy()) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 679324ff16cd..944898ceecf7 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -29,6 +29,7 @@ #include "npy_config.h" #include "npy_pycompat.h" #include "npy_import.h" +#include "npy_static_data.h" #include "convert_datatype.h" #include "legacy_dtype_implementation.h" @@ -64,7 +65,6 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0; #include "ctors.h" #include "array_assign.h" #include "common.h" -#include "npy_static_data.h" #include "cblasfuncs.h" #include "vdot.h" #include "templ_common.h" /* for npy_mul_sizes_with_overflow */ @@ -99,16 +99,45 @@ _umath_strings_richcompare( PyArrayObject *self, PyArrayObject *other, int cmp_op, int rstrip); -static PyObject * -set_legacy_print_mode(PyObject *NPY_UNUSED(self), PyObject *args) -{ - if (!PyArg_ParseTuple(args, "i", &npy_thread_unsafe_state.legacy_print_mode)) { - return NULL; +NPY_NO_EXPORT int +get_legacy_print_mode(void) { + /* Get the C value of the legacy printing mode. + * + * It is stored as a Python context variable so we access it via the C + * API. For simplicity the mode is encoded as an integer where INT_MAX + * means no legacy mode, and '113'/'121'/'125' means 1.13/1.21/1.25 legacy + * mode; and 0 maps to INT_MAX. We can upgrade this if we have more + * complex requirements in the future. + */ + PyObject *format_options = NULL; + PyContextVar_Get(npy_static_pydata.format_options, NULL, &format_options); + if (format_options == NULL) { + PyErr_SetString(PyExc_SystemError, + "NumPy internal error: unable to get format_options " + "context variable"); + return -1; } - if (!npy_thread_unsafe_state.legacy_print_mode) { - npy_thread_unsafe_state.legacy_print_mode = INT_MAX; + PyObject *legacy_print_mode = NULL; + if (PyDict_GetItemRef(format_options, npy_interned_str.legacy, + &legacy_print_mode) == -1) { + return -1; } - Py_RETURN_NONE; + Py_DECREF(format_options); + if (legacy_print_mode == NULL) { + PyErr_SetString(PyExc_SystemError, + "NumPy internal error: unable to get legacy print " + "mode"); + return -1; + } + Py_ssize_t ret = PyLong_AsSsize_t(legacy_print_mode); + Py_DECREF(legacy_print_mode); + if (error_converting(ret)) { + return -1; + } + if (ret > INT_MAX) { + return INT_MAX; + } + return (int)ret; } @@ -4540,8 +4569,6 @@ static struct PyMethodDef array_module_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"normalize_axis_index", (PyCFunction)normalize_axis_index, METH_FASTCALL | METH_KEYWORDS, NULL}, - {"set_legacy_print_mode", (PyCFunction)set_legacy_print_mode, - METH_VARARGS, NULL}, {"_discover_array_parameters", (PyCFunction)_discover_array_parameters, METH_FASTCALL | METH_KEYWORDS, NULL}, {"_get_castingimpl", (PyCFunction)_get_castingimpl, @@ -4771,8 +4798,6 @@ initialize_thread_unsafe_state(void) { npy_thread_unsafe_state.warn_if_no_mem_policy = 0; } - npy_thread_unsafe_state.legacy_print_mode = INT_MAX; - return 0; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index 25dc0b54f1e3..de234a8495d3 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -71,15 +71,6 @@ typedef struct npy_thread_unsafe_state_struct { */ int reload_guard_initialized; - /* - * global variable to determine if legacy printing is enabled, - * accessible from C. For simplicity the mode is encoded as an - * integer where INT_MAX means no legacy mode, and '113'/'121' - * means 1.13/1.21 legacy mode; and 0 maps to INT_MAX. We can - * upgrade this if we have more complex requirements in the future. - */ - int legacy_print_mode; - /* * Holds the user-defined setting for whether or not to warn * if there is no memory policy set @@ -91,5 +82,7 @@ typedef struct npy_thread_unsafe_state_struct { NPY_VISIBILITY_HIDDEN extern npy_thread_unsafe_state_struct npy_thread_unsafe_state; +NPY_NO_EXPORT int +get_legacy_print_mode(void); #endif /* NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ */ diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index e8f554d40d9b..aa2489cbf37e 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -61,6 +61,7 @@ intern_strings(void) INTERN_STRING(errmode_strings[5], "log"); INTERN_STRING(__dlpack__, "__dlpack__"); INTERN_STRING(pyvals_name, "UFUNC_PYVALS_NAME"); + INTERN_STRING(legacy, "legacy"); return 0; } @@ -148,6 +149,9 @@ initialize_static_globals(void) IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncOutputCastingError", npy_static_pydata._UFuncOutputCastingError); + IMPORT_GLOBAL("numpy._core.printoptions", "format_options", + npy_static_pydata.format_options); + IMPORT_GLOBAL("os", "fspath", npy_static_pydata.os_fspath); diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index c4d3ef4cdfee..ef4ca07c643a 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -36,6 +36,7 @@ typedef struct npy_interned_str_struct { PyObject *errmode_strings[6]; PyObject *__dlpack__; PyObject *pyvals_name; + PyObject *legacy; } npy_interned_str_struct; /* @@ -105,6 +106,7 @@ typedef struct npy_static_pydata_struct { PyObject *math_gcd_func; PyObject *os_PathLike; PyObject *os_fspath; + PyObject *format_options; /* * Used in the __array__ internals to avoid building a tuple inline diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 7073a7de2733..c168759ee0dc 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -337,7 +337,11 @@ genint_type_repr(PyObject *self) if (value_string == NULL) { return NULL; } - if (npy_thread_unsafe_state.legacy_print_mode <= 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode <= 125) { return value_string; } @@ -374,7 +378,11 @@ genbool_type_str(PyObject *self) static PyObject * genbool_type_repr(PyObject *self) { - if (npy_thread_unsafe_state.legacy_print_mode <= 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode <= 125) { return genbool_type_str(self); } return PyUnicode_FromString( @@ -500,7 +508,11 @@ stringtype_@form@(PyObject *self) if (ret == NULL) { return NULL; } - if (npy_thread_unsafe_state.legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { Py_SETREF(ret, PyUnicode_FromFormat("np.bytes_(%S)", ret)); } #endif /* IS_repr */ @@ -547,7 +559,11 @@ unicodetype_@form@(PyObject *self) if (ret == NULL) { return NULL; } - if (npy_thread_unsafe_state.legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { Py_SETREF(ret, PyUnicode_FromFormat("np.str_(%S)", ret)); } #endif /* IS_repr */ @@ -627,7 +643,11 @@ voidtype_repr(PyObject *self) /* Python helper checks for the legacy mode printing */ return _void_scalar_to_string(self, 1); } - if (npy_thread_unsafe_state.legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { return _void_to_hex(s->obval, s->descr->elsize, "np.void(b'", "\\x", "')"); } else { @@ -679,7 +699,11 @@ datetimetype_repr(PyObject *self) */ if ((scal->obmeta.num == 1 && scal->obmeta.base != NPY_FR_h) || scal->obmeta.base == NPY_FR_GENERIC) { - if (npy_thread_unsafe_state.legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { ret = PyUnicode_FromFormat("np.datetime64('%s')", iso); } else { @@ -691,7 +715,11 @@ datetimetype_repr(PyObject *self) if (meta == NULL) { return NULL; } - if (npy_thread_unsafe_state.legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { ret = PyUnicode_FromFormat("np.datetime64('%s','%S')", iso, meta); } else { @@ -735,7 +763,11 @@ timedeltatype_repr(PyObject *self) /* The metadata unit */ if (scal->obmeta.base == NPY_FR_GENERIC) { - if (npy_thread_unsafe_state.legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { ret = PyUnicode_FromFormat("np.timedelta64(%S)", val); } else { @@ -748,7 +780,11 @@ timedeltatype_repr(PyObject *self) Py_DECREF(val); return NULL; } - if (npy_thread_unsafe_state.legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { ret = PyUnicode_FromFormat("np.timedelta64(%S,'%S')", val, meta); } else { @@ -1050,7 +1086,11 @@ static PyObject * npy_bool sign) { - if (npy_thread_unsafe_state.legacy_print_mode <= 113) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode <= 113) { return legacy_@name@_format@kind@(val); } @@ -1081,7 +1121,11 @@ static PyObject * if (string == NULL) { return NULL; } - if (npy_thread_unsafe_state.legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { Py_SETREF(string, PyUnicode_FromFormat("@repr_format@", string)); } #endif /* IS_repr */ @@ -1096,7 +1140,11 @@ c@name@type_@kind@(PyObject *self) npy_c@name@ val = PyArrayScalar_VAL(self, C@Name@); TrimMode trim = TrimMode_DptZeros; - if (npy_thread_unsafe_state.legacy_print_mode <= 113) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode <= 113) { return legacy_c@name@_format@kind@(val); } @@ -1109,7 +1157,11 @@ c@name@type_@kind@(PyObject *self) #ifdef IS_str ret = PyUnicode_FromFormat("%Sj", istr); #else /* IS_repr */ - if (npy_thread_unsafe_state.legacy_print_mode <= 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode <= 125) { ret = PyUnicode_FromFormat("%Sj", istr); } else { @@ -1157,7 +1209,11 @@ c@name@type_@kind@(PyObject *self) #ifdef IS_str string = PyUnicode_FromFormat("(%S%Sj)", rstr, istr); #else /* IS_repr */ - if (npy_thread_unsafe_state.legacy_print_mode > 125) { + legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { string = PyUnicode_FromFormat("@crepr_format@", rstr, istr); } else { @@ -1182,7 +1238,11 @@ halftype_@kind@(PyObject *self) float floatval = npy_half_to_float(val); float absval; - if (npy_thread_unsafe_state.legacy_print_mode <= 113) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode <= 113) { return legacy_float_format@kind@(floatval); } @@ -1198,7 +1258,11 @@ halftype_@kind@(PyObject *self) #ifdef IS_str return string; #else - if (string == NULL || npy_thread_unsafe_state.legacy_print_mode <= 125) { + legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (string == NULL || legacy_print_mode <= 125) { return string; } PyObject *res = PyUnicode_FromFormat("np.float16(%S)", string); diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index 1b40a9392c84..c15c60ab3fab 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -7,7 +7,7 @@ import numpy as np from numpy.testing import ( assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT, - assert_raises_regex, + assert_raises_regex, IS_WASM ) from numpy._core.arrayprint import _typelessdata import textwrap @@ -1200,3 +1200,52 @@ def test_scalar_void_float_str(): # we do not do that. scalar = np.void((1.0, 2.0), dtype=[('f0', 'f4')]) assert str(scalar) == "(1.0, 2.0)" + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't support asyncio") +@pytest.mark.skipif(sys.version_info < (3, 11), + reason="asyncio.barrier was added in Python 3.11") +def test_printoptions_asyncio_safe(): + asyncio = pytest.importorskip("asyncio") + + b = asyncio.Barrier(2) + + async def legacy_113(): + np.set_printoptions(legacy='1.13', precision=12) + await b.wait() + po = np.get_printoptions() + assert po['legacy'] == '1.13' + assert po['precision'] == 12 + orig_linewidth = po['linewidth'] + with np.printoptions(linewidth=34, legacy='1.21'): + po = np.get_printoptions() + assert po['legacy'] == '1.21' + assert po['precision'] == 12 + assert po['linewidth'] == 34 + po = np.get_printoptions() + assert po['linewidth'] == orig_linewidth + assert po['legacy'] == '1.13' + assert po['precision'] == 12 + + async def legacy_125(): + np.set_printoptions(legacy='1.25', precision=7) + await b.wait() + po = np.get_printoptions() + assert po['legacy'] == '1.25' + assert po['precision'] == 7 + orig_linewidth = po['linewidth'] + with np.printoptions(linewidth=6, legacy='1.13'): + po = np.get_printoptions() + assert po['legacy'] == '1.13' + assert po['precision'] == 7 + assert po['linewidth'] == 6 + po = np.get_printoptions() + assert po['linewidth'] == orig_linewidth + assert po['legacy'] == '1.25' + assert po['precision'] == 7 + + async def main(): + await asyncio.gather(legacy_125(), legacy_125()) + + loop = asyncio.new_event_loop() + asyncio.run(main()) + loop.close() diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index ca8606ca6e88..0fabed294fed 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -84,3 +84,49 @@ def test_eigvalsh_thread_safety(): run_threaded(lambda i: np.linalg.eigvalsh(matrices[i]), 2, pass_count=True) + + +def test_printoptions_thread_safety(): + # until NumPy 2.1 the printoptions state was stored in globals + # this verifies that they are now stored in a context variable + b = threading.Barrier(2) + + def legacy_113(): + np.set_printoptions(legacy='1.13', precision=12) + b.wait() + po = np.get_printoptions() + assert po['legacy'] == '1.13' + assert po['precision'] == 12 + orig_linewidth = po['linewidth'] + with np.printoptions(linewidth=34, legacy='1.21'): + po = np.get_printoptions() + assert po['legacy'] == '1.21' + assert po['precision'] == 12 + assert po['linewidth'] == 34 + po = np.get_printoptions() + assert po['linewidth'] == orig_linewidth + assert po['legacy'] == '1.13' + assert po['precision'] == 12 + + def legacy_125(): + np.set_printoptions(legacy='1.25', precision=7) + b.wait() + po = np.get_printoptions() + assert po['legacy'] == '1.25' + assert po['precision'] == 7 + orig_linewidth = po['linewidth'] + with np.printoptions(linewidth=6, legacy='1.13'): + po = np.get_printoptions() + assert po['legacy'] == '1.13' + assert po['precision'] == 7 + assert po['linewidth'] == 6 + po = np.get_printoptions() + assert po['linewidth'] == orig_linewidth + assert po['legacy'] == '1.25' + assert po['precision'] == 7 + + task1 = threading.Thread(target=legacy_113) + task2 = threading.Thread(target=legacy_125) + + task1.start() + task2.start() From 3887aafc360f14c8335c488b63a50a63f252a56f Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Mon, 22 Jul 2024 19:20:44 -0300 Subject: [PATCH 835/980] cfuncs.py: fix crash when sys.stderr is not available In some environments (for example frozen executables created with PyInstaller for GUI applications) `sys.stderr` and `sys.stdout` might be `None`. The import-time access to `sys.stderr.write` in some `f2py` modules was causing such applications to crash during startup. Fix #26862 --- numpy/f2py/auxfuncs.py | 2 +- numpy/f2py/cfuncs.py | 11 ++++++++++- numpy/f2py/f2py2e.py | 3 ++- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 5acf770b8e74..68b56c5a640c 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -17,6 +17,7 @@ from . import __version__ from . import cfuncs +from .cfuncs import errmess __all__ = [ 'applyrules', 'debugcapi', 'dictappend', 'errmess', 'gentitle', @@ -51,7 +52,6 @@ f2py_version = __version__.version -errmess = sys.stderr.write show = pprint.pprint options = {} diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index 4328a6e5004c..1dc3247323d5 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -16,7 +16,16 @@ from . import __version__ f2py_version = __version__.version -errmess = sys.stderr.write + + +def errmess(s: str) -> None: + """ + Write an error message to stderr. + + This indirection is needed because sys.stderr might not always be available (see #26862). + """ + if sys.stderr is not None: + sys.stderr.write(s) ##################### Definitions ################## diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index c6eac78b71f4..32cfbd0a3bea 100755 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -28,11 +28,12 @@ from . import f90mod_rules from . import __version__ from . import capi_maps +from .cfuncs import errmess from numpy.f2py._backends import f2py_build_generator f2py_version = __version__.version numpy_version = __version__.version -errmess = sys.stderr.write + # outmess=sys.stdout.write show = pprint.pprint outmess = auxfuncs.outmess From a83d530a0768ba9495c6e534c9f7d2d8bcd68eeb Mon Sep 17 00:00:00 2001 From: GUAN MING Date: Tue, 23 Jul 2024 12:19:06 +0800 Subject: [PATCH 836/980] MAINT: fix _gcd() --- numpy/_core/_internal.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/numpy/_core/_internal.py b/numpy/_core/_internal.py index 058e93644dec..e50c5434588f 100644 --- a/numpy/_core/_internal.py +++ b/numpy/_core/_internal.py @@ -5,6 +5,7 @@ """ import ast +import math import re import sys import warnings @@ -860,6 +861,9 @@ def _prod(a): def _gcd(a, b): """Calculate the greatest common divisor of a and b""" + if not (math.isfinite(a) and math.isfinite(b)): + raise ValueError('Can only find greatest common demoninator of a ' + f'finite argument, found "{a}" and "{b}"') while b: a, b = b, a % b return a From f9d08a6e5360f26652c6f0c1b5e668e7b4601409 Mon Sep 17 00:00:00 2001 From: GUAN MING Date: Tue, 23 Jul 2024 12:19:42 +0800 Subject: [PATCH 837/980] TST: test for gcd inf --- numpy/_core/tests/test_umath.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 0941a522ab64..a7944e54b334 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4138,6 +4138,11 @@ def test_huge_integers(self): assert_equal(np.gcd(a, b), [2**100, 2**50 * 3**5]) assert_equal(np.lcm(a, b), [2**100 * 3**5 * 5**7, 2**100 * 3**10]) + def test_inf(self): + inf = np.array([np.inf], dtype=np.object_) + assert_raises(ValueError, np.gcd, inf, 1) + assert_raises(ValueError, np.gcd, 1, inf) + class TestRoundingFunctions: From d3d7b55616a062d91af8e20d2a4eb817a41cea56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= <8431159+mtsokol@users.noreply.github.com> Date: Tue, 23 Jul 2024 11:32:42 +0200 Subject: [PATCH 838/980] DOC: Fix migration note for `alltrue` and `sometrue` (#27015) --- doc/source/numpy_2_0_migration_guide.rst | 4 ++-- numpy/_expired_attrs_2_0.py | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index 665da641f237..2ff49b162fe4 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -246,7 +246,7 @@ removed member migration guideline add_docstring It's still available as ``np.lib.add_docstring``. add_newdoc It's still available as ``np.lib.add_newdoc``. add_newdoc_ufunc It's an internal function and doesn't have a replacement. -alltrue Use ``all`` instead. +alltrue Use ``np.all`` instead. asfarray Use ``np.asarray`` with a float dtype instead. byte_bounds Now it's available under ``np.lib.array_utils.byte_bounds`` cast Use ``np.asarray(arr, dtype=dtype)`` instead. @@ -307,7 +307,7 @@ set_string_function Use ``np.set_printoptions`` instead with a formatter for custom printing of NumPy objects. singlecomplex Use ``np.complex64`` instead. string\_ Use ``np.bytes_`` instead. -sometrue Use ``any`` instead. +sometrue Use ``np.any`` instead. source Use ``inspect.getsource`` instead. tracemalloc_domain It's now available from ``np.lib``. unicode\_ Use ``np.str_`` instead. diff --git a/numpy/_expired_attrs_2_0.py b/numpy/_expired_attrs_2_0.py index 1dad38c5a60f..06de514e35e4 100644 --- a/numpy/_expired_attrs_2_0.py +++ b/numpy/_expired_attrs_2_0.py @@ -75,4 +75,6 @@ "compare_chararrays": "It's still available as `np.char.compare_chararrays`.", "format_parser": "It's still available as `np.rec.format_parser`.", + "alltrue": "Use `np.all` instead.", + "sometrue": "Use `np.any` instead.", } From 7a044b219c50f8e37edbee0a5a098ab1b1466358 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Tue, 23 Jul 2024 12:42:57 -0400 Subject: [PATCH 839/980] DOC: Release note for feature added in gh-26908. [skip actions] [skip azp] [skip cirrus] --- doc/release/upcoming_changes/26908.c_api.rst | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 doc/release/upcoming_changes/26908.c_api.rst diff --git a/doc/release/upcoming_changes/26908.c_api.rst b/doc/release/upcoming_changes/26908.c_api.rst new file mode 100644 index 000000000000..d6e43591819d --- /dev/null +++ b/doc/release/upcoming_changes/26908.c_api.rst @@ -0,0 +1,8 @@ +New ``PyUFuncObject`` field ``process_core_dims_func`` +------------------------------------------------------ +The field ``process_core_dims_func`` was added to the structure +``PyUFuncObject``. For generalized ufuncs, this field can be set to a +function of type ``PyUFunc_ProcessCoreDimsFunc`` that will be called when the +ufunc is called. It allows the ufunc author to check that core dimensions +satisfy additional constraints, and to set output core dimension sizes if they +have not been provided. From 9a8b760d08a69f8fbff0597e29bd9508218fcab7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 5 Jul 2024 22:21:10 +0200 Subject: [PATCH 840/980] TYP: adopt `typing.LiteralString` and use more of `typing.Literal` Limited to the global `numpy` namespace. --- numpy/__init__.pyi | 162 +++++++++++++++---- numpy/typing/tests/data/reveal/getlimits.pyi | 8 +- 2 files changed, 138 insertions(+), 32 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 0712b529ad52..94d3769e9e47 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -184,6 +184,7 @@ from collections.abc import ( Sequence, ) from typing import ( + TYPE_CHECKING, Literal as L, Any, Generator, @@ -199,9 +200,16 @@ from typing import ( Final, final, ClassVar, - TypeAlias + TypeAlias, ) +if sys.version_info >= (3, 11): + from typing import LiteralString +elif TYPE_CHECKING: + from typing_extensions import LiteralString +else: + LiteralString: TypeAlias = str + # Ensures that the stubs are picked up from numpy import ( ctypeslib as ctypeslib, @@ -604,7 +612,7 @@ from numpy.matrixlib import ( bmat as bmat, ) -_AnyStr_contra = TypeVar("_AnyStr_contra", str, bytes, contravariant=True) +_AnyStr_contra = TypeVar("_AnyStr_contra", LiteralString, builtins.str, bytes, contravariant=True) # Protocol for representing file-like-objects accepted # by `ndarray.tofile` and `fromfile` @@ -631,8 +639,8 @@ class _SupportsWrite(Protocol[_AnyStr_contra]): __all__: list[str] def __dir__() -> Sequence[str]: ... -__version__: str -__array_api_version__: str +__version__: LiteralString +__array_api_version__: LiteralString test: PytestTester # TODO: Move placeholders to their respective module once @@ -645,9 +653,100 @@ def show_config() -> None: ... _NdArraySubClass = TypeVar("_NdArraySubClass", bound=NDArray[Any]) _NdArraySubClass_co = TypeVar("_NdArraySubClass_co", bound=NDArray[Any], covariant=True) _DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic) -_ByteOrder: TypeAlias = L["S", "<", ">", "=", "|", "L", "B", "N", "I", "little", "big", "native"] _SCT = TypeVar("_SCT", bound=generic) +_ByteOrderChar: TypeAlias = L[ + "<", # little-endian + ">", # big-endian + "=", # native order + "|", # ignore +] +# can be anything, is case-insensitive, and only the first character matters +_ByteOrder: TypeAlias = L[ + "S", # swap the current order (default) + "<", "L", "little", # little-endian + ">", "B", "big", # big endian + "=", "N", "native", # native order + "|", "I", # ignore +] +_DTypeKind: TypeAlias = L[ + "b", # boolean + "i", # signed integer + "u", # unsigned integer + "f", # floating-point + "c", # complex floating-point + "m", # timedelta64 + "M", # datetime64 + "O", # python object + "S", # byte-string (fixed-width) + "U", # unicode-string (fixed-width) + "V", # void + "T", # unicode-string (variable-width) +] +_DTypeChar: TypeAlias = L[ + "?", # bool + "b", # byte + "B", # ubyte + "h", # short + "H", # ushort + "i", # intc + "I", # uintc + "l", # long + "L", # ulong + "q", # longlong + "Q", # ulonglong + "e", # half + "f", # single + "d", # double + "g", # longdouble + "F", # csingle + "D", # cdouble + "G", # clongdouble + "O", # object + "S", # bytes_ (S0) + "a", # bytes_ (deprecated) + "U", # str_ + "V", # void + "M", # datetime64 + "m", # timedelta64 + "c", # bytes_ (S1) + "T", # StringDType +] +_DTypeNum: TypeAlias = L[ + 0, # bool + 1, # byte + 2, # ubyte + 3, # short + 4, # ushort + 5, # intc + 6, # uintc + 7, # long + 8, # ulong + 9, # longlong + 10, # ulonglong + 23, # half + 11, # single + 12, # double + 13, # longdouble + 14, # csingle + 15, # cdouble + 16, # clongdouble + 17, # object + 18, # bytes_ + 19, # str_ + 20, # void + 21, # datetime64 + 22, # timedelta64 + 25, # no type + 256, # user-defined + 2056, # StringDType +] +_DTypeBuiltinKind: TypeAlias = L[ + 0, # structured array type, with fields + 1, # compiled into numpy + 2, # user-defined +] + @final class dtype(Generic[_DTypeScalar_co]): names: None | tuple[builtins.str, ...] @@ -858,21 +957,19 @@ class dtype(Generic[_DTypeScalar_co]): @property def base(self) -> dtype[Any]: ... @property - def byteorder(self) -> builtins.str: ... + def byteorder(self) -> _ByteOrderChar: ... @property - def char(self) -> builtins.str: ... + def char(self) -> _DTypeChar: ... @property - def descr(self) -> list[tuple[builtins.str, builtins.str] | tuple[builtins.str, builtins.str, _Shape]]: ... + def descr(self) -> list[tuple[LiteralString, LiteralString] | tuple[LiteralString, LiteralString, _Shape]]: ... @property - def fields( - self, - ) -> None | MappingProxyType[builtins.str, tuple[dtype[Any], int] | tuple[dtype[Any], int, Any]]: ... + def fields(self,) -> None | MappingProxyType[LiteralString, tuple[dtype[Any], int] | tuple[dtype[Any], int, Any]]: ... @property def flags(self) -> int: ... @property def hasobject(self) -> builtins.bool: ... @property - def isbuiltin(self) -> int: ... + def isbuiltin(self) -> _DTypeBuiltinKind: ... @property def isnative(self) -> builtins.bool: ... @property @@ -880,22 +977,22 @@ class dtype(Generic[_DTypeScalar_co]): @property def itemsize(self) -> int: ... @property - def kind(self) -> builtins.str: ... + def kind(self) -> _DTypeKind: ... @property def metadata(self) -> None | MappingProxyType[builtins.str, Any]: ... @property - def name(self) -> builtins.str: ... + def name(self) -> LiteralString: ... @property - def num(self) -> int: ... + def num(self) -> _DTypeNum: ... @property - def shape(self) -> _Shape: ... + def shape(self) -> tuple[()] | _Shape: ... @property def ndim(self) -> int: ... @property def subdtype(self) -> None | tuple[dtype[Any], _Shape]: ... - def newbyteorder(self: _DType, __new_order: _ByteOrder = ...) -> _DType: ... + def newbyteorder(self: _DType, new_order: _ByteOrder = ..., /) -> _DType: ... @property - def str(self) -> builtins.str: ... + def str(self) -> LiteralString: ... @property def type(self) -> type[_DTypeScalar_co]: ... @@ -957,7 +1054,14 @@ _OrderCF: TypeAlias = L[None, "C", "F"] _ModeKind: TypeAlias = L["raise", "wrap", "clip"] _PartitionKind: TypeAlias = L["introselect"] -_SortKind: TypeAlias = L["quicksort", "mergesort", "heapsort", "stable"] +# in practice, only the first case-insensitive character is considered (so e.g. +# "QuantumSort3000" will be interpreted as quicksort). +_SortKind: TypeAlias = L[ + "Q", "quick", "quicksort", + "M", "merge", "mergesort", + "H", "heap", "heapsort", + "S", "stable", "stablesort", +] _SortSide: TypeAlias = L["left", "right"] _ArraySelf = TypeVar("_ArraySelf", bound=_ArrayOrScalarCommon) @@ -1009,7 +1113,7 @@ class _ArrayOrScalarCommon: def __array_priority__(self) -> float: ... @property def __array_struct__(self) -> Any: ... # builtins.PyCapsule - def __array_namespace__(self, *, api_version: str | None = ...) -> Any: ... + def __array_namespace__(self, *, api_version: None | _ArrayAPIVersion = ...) -> Any: ... def __setstate__(self, state: tuple[ SupportsIndex, # version _ShapeLike, # Shape @@ -1435,6 +1539,8 @@ if sys.version_info >= (3, 13): else: _PyCapsule: TypeAlias = Any +_ArrayAPIVersion: TypeAlias = L["2021.12", "2022.12", "2023.12"] + class _SupportsItem(Protocol[_T_co]): def item(self, args: Any, /) -> _T_co: ... @@ -3247,7 +3353,7 @@ newaxis: None @final class ufunc: @property - def __name__(self) -> str: ... + def __name__(self) -> LiteralString: ... @property def __doc__(self) -> str: ... __call__: Callable[..., Any] @@ -3260,7 +3366,7 @@ class ufunc: @property def ntypes(self) -> int: ... @property - def types(self) -> list[str]: ... + def types(self) -> list[LiteralString]: ... # Broad return type because it has to encompass things like # # >>> np.logical_and.identity is True @@ -3275,7 +3381,7 @@ class ufunc: def identity(self) -> Any: ... # This is None for ufuncs and a string for gufuncs. @property - def signature(self) -> None | str: ... + def signature(self) -> None | LiteralString: ... # The next four methods will always exist, but they will just # raise a ValueError ufuncs with that don't accept two input # arguments and return one output argument. Because of that we @@ -3536,9 +3642,9 @@ class finfo(Generic[_FloatType]): class iinfo(Generic[_IntType]): dtype: dtype[_IntType] - kind: str + kind: LiteralString bits: int - key: str + key: LiteralString @property def min(self) -> int: ... @property @@ -3714,8 +3820,8 @@ class memmap(ndarray[_ShapeType, _DType_co]): class vectorize: pyfunc: Callable[..., Any] cache: builtins.bool - signature: None | str - otypes: None | str + signature: None | LiteralString + otypes: None | LiteralString excluded: set[int | str] __doc__: None | str def __init__( @@ -3731,7 +3837,7 @@ class vectorize: class poly1d: @property - def variable(self) -> str: ... + def variable(self) -> LiteralString: ... @property def order(self) -> int: ... @property diff --git a/numpy/typing/tests/data/reveal/getlimits.pyi b/numpy/typing/tests/data/reveal/getlimits.pyi index f53fdf48824e..57af90cccb8a 100644 --- a/numpy/typing/tests/data/reveal/getlimits.pyi +++ b/numpy/typing/tests/data/reveal/getlimits.pyi @@ -4,9 +4,9 @@ from typing import Any import numpy as np if sys.version_info >= (3, 11): - from typing import assert_type + from typing import assert_type, LiteralString else: - from typing_extensions import assert_type + from typing_extensions import assert_type, LiteralString f: float f8: np.float64 @@ -49,8 +49,8 @@ assert_type(np.iinfo(u4), np.iinfo[np.uint32]) assert_type(np.iinfo('i2'), np.iinfo[Any]) assert_type(iinfo_i8.dtype, np.dtype[np.int64]) -assert_type(iinfo_i8.kind, str) +assert_type(iinfo_i8.kind, LiteralString) assert_type(iinfo_i8.bits, int) -assert_type(iinfo_i8.key, str) +assert_type(iinfo_i8.key, LiteralString) assert_type(iinfo_i8.min, int) assert_type(iinfo_i8.max, int) From 24fddc7f86d78e54da7f460ec87b05f1b5aa1b7b Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 20 Jul 2024 00:59:19 +0200 Subject: [PATCH 841/980] TYP,BUG: FIx ``numpy.ndenumerate`` annotations for ``object_`` sctype --- numpy/__init__.pyi | 15 ++++++++++++++- numpy/typing/tests/data/reveal/index_tricks.pyi | 7 +++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 0712b529ad52..77e0556ed820 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2856,6 +2856,7 @@ class bool(generic): bool_: TypeAlias = bool +@final class object_(generic): def __init__(self, value: object = ..., /) -> None: ... @property @@ -3452,8 +3453,20 @@ class ndenumerate(Generic[_ScalarType_co]): def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[float64]: ... @overload def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[complex128]: ... + @overload + def __new__(cls, arr: object) -> ndenumerate[object_]: ... + + # The first overload is a (semi-)workaround for a mypy bug (tested with v1.10 and v1.11) + @overload + def __next__( + self: ndenumerate[np.bool | datetime64 | timedelta64 | number[Any] | flexible], + /, + ) -> tuple[_Shape, _ScalarType_co]: ... + @overload + def __next__(self: ndenumerate[object_], /) -> tuple[_Shape, Any]: ... + @overload + def __next__(self, /) -> tuple[_Shape, _ScalarType_co]: ... - def __next__(self) -> tuple[_Shape, _ScalarType_co]: ... def __iter__(self: _T) -> _T: ... class ndindex: diff --git a/numpy/typing/tests/data/reveal/index_tricks.pyi b/numpy/typing/tests/data/reveal/index_tricks.pyi index 029c8228cae7..ad8be765fbc1 100644 --- a/numpy/typing/tests/data/reveal/index_tricks.pyi +++ b/numpy/typing/tests/data/reveal/index_tricks.pyi @@ -13,24 +13,31 @@ AR_LIKE_b: list[bool] AR_LIKE_i: list[int] AR_LIKE_f: list[float] AR_LIKE_U: list[str] +AR_LIKE_O: list[object] AR_i8: npt.NDArray[np.int64] +AR_O: npt.NDArray[np.object_] assert_type(np.ndenumerate(AR_i8), np.ndenumerate[np.int64]) assert_type(np.ndenumerate(AR_LIKE_f), np.ndenumerate[np.float64]) assert_type(np.ndenumerate(AR_LIKE_U), np.ndenumerate[np.str_]) +assert_type(np.ndenumerate(AR_LIKE_O), np.ndenumerate[np.object_]) assert_type(np.ndenumerate(AR_i8).iter, np.flatiter[npt.NDArray[np.int64]]) assert_type(np.ndenumerate(AR_LIKE_f).iter, np.flatiter[npt.NDArray[np.float64]]) assert_type(np.ndenumerate(AR_LIKE_U).iter, np.flatiter[npt.NDArray[np.str_]]) +assert_type(np.ndenumerate(AR_LIKE_O).iter, np.flatiter[npt.NDArray[np.object_]]) assert_type(next(np.ndenumerate(AR_i8)), tuple[tuple[int, ...], np.int64]) assert_type(next(np.ndenumerate(AR_LIKE_f)), tuple[tuple[int, ...], np.float64]) assert_type(next(np.ndenumerate(AR_LIKE_U)), tuple[tuple[int, ...], np.str_]) +# this fails due to an unknown mypy bug +# assert_type(next(np.ndenumerate(AR_LIKE_O)), tuple[tuple[int, ...], Any]) assert_type(iter(np.ndenumerate(AR_i8)), np.ndenumerate[np.int64]) assert_type(iter(np.ndenumerate(AR_LIKE_f)), np.ndenumerate[np.float64]) assert_type(iter(np.ndenumerate(AR_LIKE_U)), np.ndenumerate[np.str_]) +assert_type(iter(np.ndenumerate(AR_LIKE_O)), np.ndenumerate[np.object_]) assert_type(np.ndindex(1, 2, 3), np.ndindex) assert_type(np.ndindex((1, 2, 3)), np.ndindex) From 69516b90e4e2267959a5d618a6ea9294d9ae1f89 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 23 Jul 2024 21:36:04 +0200 Subject: [PATCH 842/980] TYP: Fix ``Any`` annotation typo in ``numpy.lib._function_base_impl`` Co-authored-by: Ralf Gommers --- numpy/lib/_function_base_impl.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 6f3b79486b00..9b0fbaa503de 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -638,7 +638,7 @@ quantile = percentile _SCT_fm = TypeVar( "_SCT_fm", - bound=floating[Any] | complexfloating[ANy, Any] | timedelta64, + bound=floating[Any] | complexfloating[Any, Any] | timedelta64, ) class _SupportsRMulFloat(Protocol[_T_co]): From f89de8744feeca67288d8d4c87d788554f7defbb Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 23 Jul 2024 22:15:33 +0200 Subject: [PATCH 843/980] TYP,BUG: Replace ``numpy._typing._UnknownType`` with ``typing.Never`` --- numpy/_typing/_array_like.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/_typing/_array_like.py b/numpy/_typing/_array_like.py index 22ff5356f9d9..5cc501ab3ec5 100644 --- a/numpy/_typing/_array_like.py +++ b/numpy/_typing/_array_like.py @@ -158,8 +158,10 @@ def __array_function__( # Used as the first overload, should only match NDArray[Any], # not any actual types. # https://github.com/numpy/numpy/pull/22193 -class _UnknownType: - ... +if sys.version_info >= (3, 11): + from typing import Never as _UnknownType +else: + from typing import NoReturn as _UnknownType _ArrayLikeUnknown: TypeAlias = _DualArrayLike[ From 7da7f9cc380117b834595863647d7c89ed0c844d Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 4 Jul 2024 13:01:58 +0200 Subject: [PATCH 844/980] TYPE: fix `ufunc` method type annotations This fixes #26839 In the cases where the `ufunc` methods `reduce`, `accumulate`, `reduceat`, `outer`, or `at` are unavailable (i.e. raise a `ValueError` at runtime), they are now annotated as *methods* that return `typing.NoReturn`, instead of *attributes* that return `None`. --- numpy/__init__.pyi | 13 ++-- numpy/_typing/_ufunc.pyi | 75 ++++++++++------------- numpy/typing/tests/data/fail/ufuncs.pyi | 24 -------- numpy/typing/tests/data/reveal/ufuncs.pyi | 26 +++++++- 4 files changed, 63 insertions(+), 75 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index eccb34c28670..3fd278afa0fb 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3357,7 +3357,6 @@ class ufunc: def __name__(self) -> LiteralString: ... @property def __doc__(self) -> str: ... - __call__: Callable[..., Any] @property def nin(self) -> int: ... @property @@ -3383,17 +3382,19 @@ class ufunc: # This is None for ufuncs and a string for gufuncs. @property def signature(self) -> None | LiteralString: ... + + def __call__(self, *args: Any, **kwargs: Any) -> Any: ... # The next four methods will always exist, but they will just # raise a ValueError ufuncs with that don't accept two input # arguments and return one output argument. Because of that we # can't type them very precisely. - reduce: Any - accumulate: Any - reduceat: Any - outer: Any + def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn | Any: ... + def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn | NDArray[Any]: ... + def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn | NDArray[Any]: ... + def outer(self, *args: Any, **kwargs: Any) -> NoReturn | Any: ... # Similarly at won't be defined for ufuncs that return multiple # outputs, so we can't type it very precisely. - at: Any + def at(self, /, *args: Any, **kwargs: Any) -> NoReturn | None: ... # Parameters: `__name__`, `ntypes` and `identity` absolute: _UFunc_Nin1_Nout1[L['absolute'], L[20], None] diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index b6e4db4b5e13..5e52039864b7 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -15,6 +15,7 @@ from typing import ( Literal, SupportsIndex, Protocol, + NoReturn, ) from numpy import ufunc, _CastingKind, _OrderKACF @@ -30,10 +31,10 @@ _2Tuple = tuple[_T, _T] _3Tuple = tuple[_T, _T, _T] _4Tuple = tuple[_T, _T, _T, _T] -_NTypes = TypeVar("_NTypes", bound=int) -_IDType = TypeVar("_IDType", bound=Any) -_NameType = TypeVar("_NameType", bound=str) -_Signature = TypeVar("_Signature", bound=str) +_NTypes = TypeVar("_NTypes", bound=int, covariant=True) +_IDType = TypeVar("_IDType", bound=Any, covariant=True) +_NameType = TypeVar("_NameType", bound=str, covariant=True) +_Signature = TypeVar("_Signature", bound=str, covariant=True) class _SupportsArrayUFunc(Protocol): @@ -48,10 +49,10 @@ class _SupportsArrayUFunc(Protocol): # NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for # ufuncs that don't accept two input arguments and return one output argument. -# In such cases the respective methods are simply typed as `None`. +# In such cases the respective methods return `NoReturn` # NOTE: Similarly, `at` won't be defined for ufuncs that return -# multiple outputs; in such cases `at` is typed as `None` +# multiple outputs; in such cases `at` is typed to return `NoReturn` # NOTE: If 2 output types are returned then `out` must be a # 2-tuple of arrays. Otherwise `None` or a plain array are also acceptable @@ -71,14 +72,6 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def nargs(self) -> Literal[2]: ... @property def signature(self) -> None: ... - @property - def reduce(self) -> None: ... - @property - def accumulate(self) -> None: ... - @property - def reduceat(self) -> None: ... - @property - def outer(self) -> None: ... @overload def __call__( @@ -127,6 +120,12 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i /, ) -> None: ... + def reduce(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, *args, **kwargs) -> NoReturn: ... + def reduceat(self, *args, **kwargs) -> NoReturn: ... + def outer(self, *args, **kwargs) -> NoReturn: ... + + class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @@ -253,16 +252,6 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def nargs(self) -> Literal[3]: ... @property def signature(self) -> None: ... - @property - def at(self) -> None: ... - @property - def reduce(self) -> None: ... - @property - def accumulate(self) -> None: ... - @property - def reduceat(self) -> None: ... - @property - def outer(self) -> None: ... @overload def __call__( @@ -309,6 +298,12 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i signature: str | _3Tuple[None | str] = ..., ) -> _2Tuple[Any]: ... + def at(self, *args, **kwargs) -> NoReturn: ... + def reduce(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, *args, **kwargs) -> NoReturn: ... + def reduceat(self, *args, **kwargs) -> NoReturn: ... + def outer(self, *args, **kwargs) -> NoReturn: ... + class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @@ -324,16 +319,6 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def nargs(self) -> Literal[4]: ... @property def signature(self) -> None: ... - @property - def at(self) -> None: ... - @property - def reduce(self) -> None: ... - @property - def accumulate(self) -> None: ... - @property - def reduceat(self) -> None: ... - @property - def outer(self) -> None: ... @overload def __call__( @@ -367,6 +352,12 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i signature: str | _4Tuple[None | str] = ..., ) -> _2Tuple[NDArray[Any]]: ... + def at(self, *args, **kwargs) -> NoReturn: ... + def reduce(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, *args, **kwargs) -> NoReturn: ... + def reduceat(self, *args, **kwargs) -> NoReturn: ... + def outer(self, *args, **kwargs) -> NoReturn: ... + class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @@ -382,16 +373,6 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] def nargs(self) -> Literal[3]: ... @property def signature(self) -> _Signature: ... - @property - def reduce(self) -> None: ... - @property - def accumulate(self) -> None: ... - @property - def reduceat(self) -> None: ... - @property - def outer(self) -> None: ... - @property - def at(self) -> None: ... # Scalar for 1D array-likes; ndarray otherwise @overload @@ -422,3 +403,9 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] signature: str | _3Tuple[None | str] = ..., axes: list[_2Tuple[SupportsIndex]] = ..., ) -> NDArray[Any]: ... + + def at(self, *args, **kwargs) -> NoReturn: ... + def reduce(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, *args, **kwargs) -> NoReturn: ... + def reduceat(self, *args, **kwargs) -> NoReturn: ... + def outer(self, *args, **kwargs) -> NoReturn: ... diff --git a/numpy/typing/tests/data/fail/ufuncs.pyi b/numpy/typing/tests/data/fail/ufuncs.pyi index e827267c6072..bbab0dfe3fc2 100644 --- a/numpy/typing/tests/data/fail/ufuncs.pyi +++ b/numpy/typing/tests/data/fail/ufuncs.pyi @@ -15,27 +15,3 @@ np.matmul(AR_f8, AR_f8, where=True) # E: No overload variant np.frexp(AR_f8, out=None) # E: No overload variant np.frexp(AR_f8, out=AR_f8) # E: No overload variant - -np.absolute.outer() # E: "None" not callable -np.frexp.outer() # E: "None" not callable -np.divmod.outer() # E: "None" not callable -np.matmul.outer() # E: "None" not callable - -np.absolute.reduceat() # E: "None" not callable -np.frexp.reduceat() # E: "None" not callable -np.divmod.reduceat() # E: "None" not callable -np.matmul.reduceat() # E: "None" not callable - -np.absolute.reduce() # E: "None" not callable -np.frexp.reduce() # E: "None" not callable -np.divmod.reduce() # E: "None" not callable -np.matmul.reduce() # E: "None" not callable - -np.absolute.accumulate() # E: "None" not callable -np.frexp.accumulate() # E: "None" not callable -np.divmod.accumulate() # E: "None" not callable -np.matmul.accumulate() # E: "None" not callable - -np.frexp.at() # E: "None" not callable -np.divmod.at() # E: "None" not callable -np.matmul.at() # E: "None" not callable diff --git a/numpy/typing/tests/data/reveal/ufuncs.pyi b/numpy/typing/tests/data/reveal/ufuncs.pyi index 859c202c3766..39a796bf6845 100644 --- a/numpy/typing/tests/data/reveal/ufuncs.pyi +++ b/numpy/typing/tests/data/reveal/ufuncs.pyi @@ -1,5 +1,5 @@ import sys -from typing import Literal, Any +from typing import Literal, Any, NoReturn import numpy as np import numpy.typing as npt @@ -96,3 +96,27 @@ assert_type(np.bitwise_count.signature, None) assert_type(np.bitwise_count.identity, None) assert_type(np.bitwise_count(i8), Any) assert_type(np.bitwise_count(AR_i8), npt.NDArray[Any]) + +assert_type(np.absolute.outer(), NoReturn) +assert_type(np.frexp.outer(), NoReturn) +assert_type(np.divmod.outer(), NoReturn) +assert_type(np.matmul.outer(), NoReturn) + +assert_type(np.absolute.reduceat(), NoReturn) +assert_type(np.frexp.reduceat(), NoReturn) +assert_type(np.divmod.reduceat(), NoReturn) +assert_type(np.matmul.reduceat(), NoReturn) + +assert_type(np.absolute.reduce(), NoReturn) +assert_type(np.frexp.reduce(), NoReturn) +assert_type(np.divmod.reduce(), NoReturn) +assert_type(np.matmul.reduce(), NoReturn) + +assert_type(np.absolute.accumulate(), NoReturn) +assert_type(np.frexp.accumulate(), NoReturn) +assert_type(np.divmod.accumulate(), NoReturn) +assert_type(np.matmul.accumulate(), NoReturn) + +assert_type(np.frexp.at(), NoReturn) +assert_type(np.divmod.at(), NoReturn) +assert_type(np.matmul.at(), NoReturn) From a968ea8f8ed800f4e57f9e996c8eebbc70ec19e7 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 22 Jul 2024 13:45:46 -0600 Subject: [PATCH 845/980] MAINT: replace PyThread_type_lock with PyMutex in lapack_lite --- numpy/linalg/umath_linalg.cpp | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/numpy/linalg/umath_linalg.cpp b/numpy/linalg/umath_linalg.cpp index 8c36ba43864d..cf8f469a022a 100644 --- a/numpy/linalg/umath_linalg.cpp +++ b/numpy/linalg/umath_linalg.cpp @@ -28,7 +28,13 @@ static const char* umath_linalg_version_string = "0.1.5"; // global lock to serialize calls into lapack_lite +#if !HAVE_EXTERNAL_LAPACK +#if PY_VERSION_HEX < 0x30d00b3 static PyThread_type_lock lapack_lite_lock; +#else +static PyMutex lapack_lite_lock = {0}; +#endif +#endif /* **************************************************************************** @@ -407,8 +413,13 @@ FNAME(zgemm)(char *transa, char *transb, #define LOCK_LAPACK_LITE #define UNLOCK_LAPACK_LITE #else +#if PY_VERSION_HEX < 0x30d00b3 #define LOCK_LAPACK_LITE PyThread_acquire_lock(lapack_lite_lock, WAIT_LOCK) #define UNLOCK_LAPACK_LITE PyThread_release_lock(lapack_lite_lock) +#else + #define LOCK_LAPACK_LITE PyMutex_Lock(&lapack_lite_lock) + #define UNLOCK_LAPACK_LITE PyMutex_Unlock(&lapack_lite_lock) +#endif #endif /* @@ -4687,11 +4698,13 @@ PyMODINIT_FUNC PyInit__umath_linalg(void) return NULL; } +#if PY_VERSION_HEX < 0x30d00b3 && !HAVE_EXTERNAL_LAPACK lapack_lite_lock = PyThread_allocate_lock(); if (lapack_lite_lock == NULL) { PyErr_NoMemory(); return NULL; } +#endif #ifdef HAVE_BLAS_ILP64 PyDict_SetItemString(d, "_ilp64", Py_True); From 352760e03f83a8983c2472dea3005d182325de2d Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 22 Jul 2024 14:15:52 -0600 Subject: [PATCH 846/980] MAINT: refactor the import cache to use PyMutex --- numpy/_core/src/common/npy_import.c | 2 ++ numpy/_core/src/common/npy_import.h | 12 ++++++++++++ 2 files changed, 14 insertions(+) diff --git a/numpy/_core/src/common/npy_import.c b/numpy/_core/src/common/npy_import.c index d220b840f7a9..cff071e9b522 100644 --- a/numpy/_core/src/common/npy_import.c +++ b/numpy/_core/src/common/npy_import.c @@ -10,10 +10,12 @@ NPY_VISIBILITY_HIDDEN npy_runtime_imports_struct npy_runtime_imports; NPY_NO_EXPORT int init_import_mutex(void) { +#if PY_VERSION_HEX < 0x30d00b3 npy_runtime_imports.import_mutex = PyThread_allocate_lock(); if (npy_runtime_imports.import_mutex == NULL) { PyErr_NoMemory(); return -1; } +#endif return 0; } diff --git a/numpy/_core/src/common/npy_import.h b/numpy/_core/src/common/npy_import.h index 89b300159b61..9df85357b5ec 100644 --- a/numpy/_core/src/common/npy_import.h +++ b/numpy/_core/src/common/npy_import.h @@ -11,7 +11,11 @@ * can be initialized at any time by npy_cache_import_runtime. */ typedef struct npy_runtime_imports_struct { +#if PY_VERSION_HEX < 0x30d00b3 PyThread_type_lock import_mutex; +#else + PyMutex import_mutex; +#endif PyObject *_add_dtype_helper; PyObject *_all; PyObject *_amax; @@ -86,11 +90,19 @@ npy_cache_import_runtime(const char *module, const char *attr, PyObject **obj) { if (value == NULL) { return -1; } +#if PY_VERSION_HEX < 0x30d00b3 PyThread_acquire_lock(npy_runtime_imports.import_mutex, WAIT_LOCK); +#else + PyMutex_Lock(&npy_runtime_imports.import_mutex); +#endif if (!npy_atomic_load_ptr(obj)) { npy_atomic_store_ptr(obj, Py_NewRef(value)); } +#if PY_VERSION_HEX < 0x30d00b3 PyThread_release_lock(npy_runtime_imports.import_mutex); +#else + PyMutex_Unlock(&npy_runtime_imports.import_mutex); +#endif Py_DECREF(value); } return 0; From 571b60d727dbe12fbaed5591242d960adac440d0 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 22 Jul 2024 14:27:24 -0600 Subject: [PATCH 847/980] MAINT: refactor the identity hash table to use PyMutex --- numpy/_core/src/common/npy_hashtable.c | 9 ++++++++- numpy/_core/src/common/npy_hashtable.h | 6 +++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/common/npy_hashtable.c b/numpy/_core/src/common/npy_hashtable.c index d361777d26ca..82cd33e33fb8 100644 --- a/numpy/_core/src/common/npy_hashtable.c +++ b/numpy/_core/src/common/npy_hashtable.c @@ -30,6 +30,7 @@ #endif #ifdef Py_GIL_DISABLED +#if PY_VERSION_HEX < 0x30d00b3 // TODO: replace with PyMutex when it is public #define LOCK_TABLE(tb) \ if (!PyThread_acquire_lock(tb->mutex, NOWAIT_LOCK)) { \ @@ -48,6 +49,12 @@ PyThread_free_lock(tb->mutex); \ } #else +#define LOCK_TABLE(tb) PyMutex_Lock(&tb->mutex) +#define UNLOCK_TABLE(tb) PyMutex_Unlock(&tb->mutex) +#define INITIALIZE_LOCK(tb) memset(&tb->mutex, 0, sizeof(PyMutex)) +#define FREE_LOCK(tb) +#endif +#else // the GIL serializes access to the table so no need // for locking if it is enabled #define LOCK_TABLE(tb) @@ -252,7 +259,7 @@ PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, NPY_NO_EXPORT PyObject * -PyArrayIdentityHash_GetItem(PyArrayIdentityHash const *tb, PyObject *const *key) +PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key) { LOCK_TABLE(tb); PyObject *res = find_item(tb, key)[0]; diff --git a/numpy/_core/src/common/npy_hashtable.h b/numpy/_core/src/common/npy_hashtable.h index fdb241667164..42087089bf6d 100644 --- a/numpy/_core/src/common/npy_hashtable.h +++ b/numpy/_core/src/common/npy_hashtable.h @@ -14,7 +14,11 @@ typedef struct { npy_intp size; /* current size */ npy_intp nelem; /* number of elements */ #ifdef Py_GIL_DISABLED +#if PY_VERSION_HEX < 0x30d00b3 PyThread_type_lock *mutex; +#else + PyMutex mutex; +#endif #endif } PyArrayIdentityHash; @@ -24,7 +28,7 @@ PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, PyObject *const *key, PyObject *value, int replace); NPY_NO_EXPORT PyObject * -PyArrayIdentityHash_GetItem(PyArrayIdentityHash const *tb, PyObject *const *key); +PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key); NPY_NO_EXPORT PyArrayIdentityHash * PyArrayIdentityHash_New(int key_len); From d7713e66617b8f6fdef843749f4e3bfca9636986 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 22 Jul 2024 14:37:27 -0600 Subject: [PATCH 848/980] MAINT: refactor StringDType to use PyMutex --- .../multiarray/stringdtype/static_string.c | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.c b/numpy/_core/src/multiarray/stringdtype/static_string.c index c9b5620211dc..4d33479409cd 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.c +++ b/numpy/_core/src/multiarray/stringdtype/static_string.c @@ -131,7 +131,11 @@ struct npy_string_allocator { npy_string_free_func free; npy_string_realloc_func realloc; npy_string_arena arena; +#if PY_VERSION_HEX < 0x30d00b3 PyThread_type_lock *allocator_lock; +#else + PyMutex allocator_lock; +#endif }; static void @@ -245,18 +249,22 @@ NpyString_new_allocator(npy_string_malloc_func m, npy_string_free_func f, if (allocator == NULL) { return NULL; } +#if PY_VERSION_HEX < 0x30d00b3 PyThread_type_lock *allocator_lock = PyThread_allocate_lock(); if (allocator_lock == NULL) { f(allocator); PyErr_SetString(PyExc_MemoryError, "Unable to allocate thread lock"); return NULL; } + allocator->allocator_lock = allocator_lock; +#else + memset(&allocator->allocator_lock, 0, sizeof(PyMutex)); +#endif allocator->malloc = m; allocator->free = f; allocator->realloc = r; // arena buffer gets allocated in arena_malloc allocator->arena = NEW_ARENA; - allocator->allocator_lock = allocator_lock; return allocator; } @@ -269,9 +277,11 @@ NpyString_free_allocator(npy_string_allocator *allocator) if (allocator->arena.buffer != NULL) { f(allocator->arena.buffer); } +#if PY_VERSION_HEX < 0x30d00b3 if (allocator->allocator_lock != NULL) { PyThread_free_lock(allocator->allocator_lock); } +#endif f(allocator); } @@ -288,9 +298,13 @@ NpyString_free_allocator(npy_string_allocator *allocator) NPY_NO_EXPORT npy_string_allocator * NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr) { +#if PY_VERSION_HEX < 0x30d00b3 if (!PyThread_acquire_lock(descr->allocator->allocator_lock, NOWAIT_LOCK)) { PyThread_acquire_lock(descr->allocator->allocator_lock, WAIT_LOCK); } +#else + PyMutex_Lock(&descr->allocator->allocator_lock); +#endif return descr->allocator; } @@ -358,7 +372,11 @@ NpyString_acquire_allocators(size_t n_descriptors, NPY_NO_EXPORT void NpyString_release_allocator(npy_string_allocator *allocator) { +#if PY_VERSION_HEX < 0x30d00b3 PyThread_release_lock(allocator->allocator_lock); +#else + PyMutex_Unlock(&allocator->allocator_lock); +#endif } /*NUMPY_API From dec1f8de33969dac845c98f47b60caf2f012893d Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 22 Jul 2024 15:11:45 -0600 Subject: [PATCH 849/980] MAINT: use PyMutex in the argparse cache --- numpy/_core/src/common/npy_argparse.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/common/npy_argparse.c b/numpy/_core/src/common/npy_argparse.c index eb1597c0ebb9..70cb82bb4b2c 100644 --- a/numpy/_core/src/common/npy_argparse.c +++ b/numpy/_core/src/common/npy_argparse.c @@ -12,15 +12,27 @@ #include "arrayfunction_override.h" +#if PY_VERSION_HEX < 0x30d00b3 static PyThread_type_lock argparse_mutex; +#define LOCK_ARGPARSE_MUTEX \ + PyThread_acquire_lock(argparse_mutex, WAIT_LOCK) +#define UNLOCK_ARGPARSE_MUTEX \ + PyThread_release_lock(argparse_mutex) +#else +static PyMutex argparse_mutex = {0}; +#define LOCK_ARGPARSE_MUTEX PyMutex_Lock(&argparse_mutex) +#define UNLOCK_ARGPARSE_MUTEX PyMutex_Unlock(&argparse_mutex) +#endif NPY_NO_EXPORT int init_argparse_mutex(void) { +#if PY_VERSION_HEX < 0x30d00b3 argparse_mutex = PyThread_allocate_lock(); if (argparse_mutex == NULL) { PyErr_NoMemory(); return -1; } +#endif return 0; } @@ -286,19 +298,19 @@ _npy_parse_arguments(const char *funcname, ...) { if (!npy_atomic_load_uint8(&cache->initialized)) { - PyThread_acquire_lock(argparse_mutex, WAIT_LOCK); + LOCK_ARGPARSE_MUTEX; if (!npy_atomic_load_uint8(&cache->initialized)) { va_list va; va_start(va, kwnames); int res = initialize_keywords(funcname, cache, va); va_end(va); if (res < 0) { - PyThread_release_lock(argparse_mutex); + UNLOCK_ARGPARSE_MUTEX; return -1; } npy_atomic_store_uint8(&cache->initialized, 1); } - PyThread_release_lock(argparse_mutex); + UNLOCK_ARGPARSE_MUTEX; } if (NPY_UNLIKELY(len_args > cache->npositional)) { From f92009f21a7edee9b22ec379a8afb5a89cb99477 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 23 Jul 2024 19:10:42 -0600 Subject: [PATCH 850/980] MAINT: simplify identify hash by dropping support for 3.13.0b2 and older --- numpy/_core/src/common/npy_hashtable.c | 23 ----------------------- numpy/_core/src/common/npy_hashtable.h | 2 +- 2 files changed, 1 insertion(+), 24 deletions(-) diff --git a/numpy/_core/src/common/npy_hashtable.c b/numpy/_core/src/common/npy_hashtable.c index 82cd33e33fb8..5c745ba388cd 100644 --- a/numpy/_core/src/common/npy_hashtable.c +++ b/numpy/_core/src/common/npy_hashtable.c @@ -30,37 +30,15 @@ #endif #ifdef Py_GIL_DISABLED -#if PY_VERSION_HEX < 0x30d00b3 -// TODO: replace with PyMutex when it is public -#define LOCK_TABLE(tb) \ - if (!PyThread_acquire_lock(tb->mutex, NOWAIT_LOCK)) { \ - PyThread_acquire_lock(tb->mutex, WAIT_LOCK); \ - } -#define UNLOCK_TABLE(tb) PyThread_release_lock(tb->mutex); -#define INITIALIZE_LOCK(tb) \ - tb->mutex = PyThread_allocate_lock(); \ - if (tb->mutex == NULL) { \ - PyErr_NoMemory(); \ - PyMem_Free(res); \ - return NULL; \ - } -#define FREE_LOCK(tb) \ - if (tb->mutex != NULL) { \ - PyThread_free_lock(tb->mutex); \ - } -#else #define LOCK_TABLE(tb) PyMutex_Lock(&tb->mutex) #define UNLOCK_TABLE(tb) PyMutex_Unlock(&tb->mutex) #define INITIALIZE_LOCK(tb) memset(&tb->mutex, 0, sizeof(PyMutex)) -#define FREE_LOCK(tb) -#endif #else // the GIL serializes access to the table so no need // for locking if it is enabled #define LOCK_TABLE(tb) #define UNLOCK_TABLE(tb) #define INITIALIZE_LOCK(tb) -#define FREE_LOCK(tb) #endif /* @@ -150,7 +128,6 @@ NPY_NO_EXPORT void PyArrayIdentityHash_Dealloc(PyArrayIdentityHash *tb) { PyMem_Free(tb->buckets); - FREE_LOCK(tb); PyMem_Free(tb); } diff --git a/numpy/_core/src/common/npy_hashtable.h b/numpy/_core/src/common/npy_hashtable.h index 42087089bf6d..583f3d9861a6 100644 --- a/numpy/_core/src/common/npy_hashtable.h +++ b/numpy/_core/src/common/npy_hashtable.h @@ -15,7 +15,7 @@ typedef struct { npy_intp nelem; /* number of elements */ #ifdef Py_GIL_DISABLED #if PY_VERSION_HEX < 0x30d00b3 - PyThread_type_lock *mutex; +#error "GIL-disabled builds require Python 3.13.0b3 or newer" #else PyMutex mutex; #endif From 582d56641bcd451eaca2871918e97ac6cff1d49b Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Sat, 13 Jul 2024 14:49:52 -0700 Subject: [PATCH 851/980] MAINT: only fill resized array with zeros for legacy dtypes --- numpy/_core/src/multiarray/shape.c | 5 +++-- numpy/_core/tests/test_stringdtype.py | 5 +++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index c33272de4eb5..4b0d2987f703 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -141,8 +141,9 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, } if (newnbytes > oldnbytes && PyArray_ISWRITEABLE(self)) { - /* Fill new memory with zeros */ - if (PyDataType_FLAGCHK(PyArray_DESCR(self), NPY_ITEM_REFCOUNT)) { + /* Fill new memory with zeros (PyLong zero for object arrays) */ + if (PyDataType_ISLEGACY(PyArray_DESCR(self)) && + PyDataType_FLAGCHK(PyArray_DESCR(self), NPY_ITEM_REFCOUNT)) { PyObject *zero = PyLong_FromLong(0); char *optr; optr = PyArray_BYTES(self) + oldnbytes; diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 9ff3224947d9..92aea8d20058 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -510,6 +510,11 @@ def test_concatenate(string_list): assert_array_equal(np.concatenate([sarr], axis=0), sarr) +def test_resize_method(string_list): + sarr = np.array(string_list, dtype="T") + sarr.resize(len(string_list)+3) + assert_array_equal(sarr, np.array(string_list + ['']*3, dtype="T")) + def test_create_with_copy_none(string_list): arr = np.array(string_list, dtype=StringDType()) From 79d6256d94502ce62c5191febd4dd30bd2325be0 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Sat, 13 Jul 2024 15:46:38 -0700 Subject: [PATCH 852/980] TST: fix resize test on pypy --- numpy/_core/tests/test_stringdtype.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 92aea8d20058..069a9c00d5de 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -11,7 +11,7 @@ from numpy.dtypes import StringDType from numpy._core.tests._natype import pd_NA -from numpy.testing import assert_array_equal, IS_WASM +from numpy.testing import assert_array_equal, IS_WASM, IS_PYPY @pytest.fixture @@ -510,9 +510,13 @@ def test_concatenate(string_list): assert_array_equal(np.concatenate([sarr], axis=0), sarr) + def test_resize_method(string_list): sarr = np.array(string_list, dtype="T") - sarr.resize(len(string_list)+3) + if IS_PYPY: + sarr.resize(len(string_list)+3, refcheck=False) + else: + sarr.resize(len(string_list)+3) assert_array_equal(sarr, np.array(string_list + ['']*3, dtype="T")) From fe2b69b60d1761d3087091a329ac225ed54d405e Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 22 Jul 2024 19:24:59 -0600 Subject: [PATCH 853/980] MAINT: add PyArray_ZeroBuffer and use it in PyArray_Resize --- numpy/_core/src/multiarray/refcount.c | 45 ++++++++++++++++++ numpy/_core/src/multiarray/refcount.h | 5 ++ numpy/_core/src/multiarray/shape.c | 66 ++++----------------------- 3 files changed, 58 insertions(+), 58 deletions(-) diff --git a/numpy/_core/src/multiarray/refcount.c b/numpy/_core/src/multiarray/refcount.c index 1bc693532646..37fe6a016417 100644 --- a/numpy/_core/src/multiarray/refcount.c +++ b/numpy/_core/src/multiarray/refcount.c @@ -56,6 +56,51 @@ PyArray_ClearBuffer( } +/* + * Helper function to zero an array buffer. + * + * Here "zeroing" means an abstract zeroing operation, + * which for an array of references might be something + * more complicated than zero-filling the buffer. + * + * Failure (returns -1) indicates some sort of programming or + * logical error and should not happen for a data type that has + * been set up correctly. + */ +NPY_NO_EXPORT int +PyArray_ZeroBuffer( + PyArray_Descr *descr, char *data, + npy_intp stride, npy_intp size, int aligned) +{ + if (!PyDataType_REFCHK(descr)) { + return 0; + } + + NPY_traverse_info zero_info; + NPY_traverse_info_init(&zero_info); + /* Flags unused: float errors do not matter and we do not release GIL */ + NPY_ARRAYMETHOD_FLAGS flags_unused; + PyArrayMethod_GetTraverseLoop *get_fill_zero_loop = + NPY_DT_SLOTS(NPY_DTYPE(descr))->get_fill_zero_loop; + if (get_fill_zero_loop != NULL) { + if (get_fill_zero_loop( + NULL, descr, 1, descr->elsize, &(zero_info.func), + &(zero_info.auxdata), &flags_unused) < 0) { + goto fail; + } + } + + int res = zero_info.func( + NULL, descr, data, size, stride, zero_info.auxdata); + NPY_traverse_info_xfree(&zero_info); + return res; + + fail: + NPY_traverse_info_xfree(&zero_info); + return -1; +} + + /* * Helper function to clear whole array. It seems plausible that we should * be able to get away with assuming the array is contiguous. diff --git a/numpy/_core/src/multiarray/refcount.h b/numpy/_core/src/multiarray/refcount.h index d9f472b2697e..55cb07f60706 100644 --- a/numpy/_core/src/multiarray/refcount.h +++ b/numpy/_core/src/multiarray/refcount.h @@ -6,6 +6,11 @@ PyArray_ClearBuffer( PyArray_Descr *descr, char *data, npy_intp stride, npy_intp size, int aligned); +NPY_NO_EXPORT int +PyArray_ZeroBuffer( + PyArray_Descr *descr, char *data, + npy_intp stride, npy_intp size, int aligned); + NPY_NO_EXPORT int PyArray_ClearArray(PyArrayObject *arr); diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index 4b0d2987f703..32c3a72db9ca 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -7,22 +7,17 @@ #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" - #include "numpy/npy_math.h" #include "npy_config.h" - - - #include "arraywrap.h" #include "ctors.h" - #include "shape.h" - #include "npy_static_data.h" /* for interned strings */ #include "templ_common.h" /* for npy_mul_sizes_with_overflow */ #include "common.h" /* for convert_shape_to_string */ #include "alloc.h" +#include "refcount.h" static int _fix_unknown_dimension(PyArray_Dims *newshape, PyArrayObject *arr); @@ -31,9 +26,6 @@ static int _attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims, npy_intp *newstrides, int is_f_order); -static void -_putzero(char *optr, PyObject *zero, PyArray_Descr *dtype); - /*NUMPY_API * Resize (reallocate data). Only works if nothing else is referencing this * array and it is contiguous. If refcheck is 0, then the reference count is @@ -142,20 +134,13 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, if (newnbytes > oldnbytes && PyArray_ISWRITEABLE(self)) { /* Fill new memory with zeros (PyLong zero for object arrays) */ - if (PyDataType_ISLEGACY(PyArray_DESCR(self)) && - PyDataType_FLAGCHK(PyArray_DESCR(self), NPY_ITEM_REFCOUNT)) { - PyObject *zero = PyLong_FromLong(0); - char *optr; - optr = PyArray_BYTES(self) + oldnbytes; - npy_intp n_new = newsize - oldsize; - for (npy_intp i = 0; i < n_new; i++) { - _putzero((char *)optr, zero, PyArray_DESCR(self)); - optr += elsize; - } - Py_DECREF(zero); - } - else{ - memset(PyArray_BYTES(self) + oldnbytes, 0, newnbytes - oldnbytes); + npy_intp stride = elsize; + npy_intp size = newsize - oldsize; + char *data = PyArray_BYTES(self) + oldnbytes; + int aligned = PyArray_ISALIGNED(self); + if (PyArray_ZeroBuffer(PyArray_DESCR(self), data, + stride, size, aligned) < 0) { + return NULL; } } @@ -339,41 +324,6 @@ PyArray_Reshape(PyArrayObject *self, PyObject *shape) } -static void -_putzero(char *optr, PyObject *zero, PyArray_Descr *dtype) -{ - if (!PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT)) { - memset(optr, 0, dtype->elsize); - } - else if (PyDataType_HASFIELDS(dtype)) { - PyObject *key, *value, *title = NULL; - PyArray_Descr *new; - int offset; - Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { - if (NPY_TITLE_KEY(key, value)) { - continue; - } - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { - return; - } - _putzero(optr + offset, zero, new); - } - } - else { - npy_intp i; - npy_intp nsize = dtype->elsize / sizeof(zero); - - for (i = 0; i < nsize; i++) { - Py_INCREF(zero); - memcpy(optr, &zero, sizeof(zero)); - optr += sizeof(zero); - } - } - return; -} - - /* * attempt to reshape an array without copying data * From 4211e7d69ca2d6b98ed43ed58f8ff67d617f6528 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 22 Jul 2024 19:25:17 -0600 Subject: [PATCH 854/980] BUG: fix stringdtype resizing by defining a zero loop --- numpy/_core/src/multiarray/stringdtype/dtype.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 81a846bf6d96..1a293ab483ea 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -584,6 +584,20 @@ stringdtype_get_clear_loop(void *NPY_UNUSED(traverse_context), return 0; } +static int +stringdtype_get_fill_zero_loop(void *NPY_UNUSED(traverse_context), + PyArray_Descr *NPY_UNUSED(descr), + int NPY_UNUSED(aligned), + npy_intp NPY_UNUSED(fixed_stride), + PyArrayMethod_TraverseLoop **out_loop, + NpyAuxData **NPY_UNUSED(out_auxdata), + NPY_ARRAYMETHOD_FLAGS *flags) +{ + *flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + *out_loop = &stringdtype_clear_loop; + return 0; +} + static int stringdtype_is_known_scalar_type(PyArray_DTypeMeta *cls, PyTypeObject *pytype) @@ -648,6 +662,7 @@ static PyType_Slot PyArray_StringDType_Slots[] = { {NPY_DT_PyArray_ArrFuncs_argmax, &argmax}, {NPY_DT_PyArray_ArrFuncs_argmin, &argmin}, {NPY_DT_get_clear_loop, &stringdtype_get_clear_loop}, + {NPY_DT_get_fill_zero_loop, &stringdtype_get_fill_zero_loop}, {NPY_DT_finalize_descr, &stringdtype_finalize_descr}, {_NPY_DT_is_known_scalar_type, &stringdtype_is_known_scalar_type}, {0, NULL}}; From 9715a8da33d202320429cf57c944a4651aef1059 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 23 Jul 2024 19:38:23 -0600 Subject: [PATCH 855/980] MAINT: refactor so zero-filling defaults to a memset --- numpy/_core/src/multiarray/ctors.c | 6 +++++ numpy/_core/src/multiarray/refcount.c | 26 ++++++++++--------- numpy/_core/src/multiarray/refcount.h | 2 +- numpy/_core/src/multiarray/shape.c | 4 +-- .../_core/src/multiarray/stringdtype/dtype.c | 15 ----------- 5 files changed, 23 insertions(+), 30 deletions(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 8ee9d28c3086..5c1a78daf0c5 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -822,6 +822,12 @@ PyArray_NewFromDescr_int( if (data == NULL) { + /* This closely follows PyArray_ZeroContiguousBuffer. We can't use + * that because here we need to allocate after checking if there is + * custom zeroing logic and that function accepts an already-allocated + * array + */ + /* float errors do not matter and we do not release GIL */ NPY_ARRAYMETHOD_FLAGS zero_flags; PyArrayMethod_GetTraverseLoop *get_fill_zero_loop = diff --git a/numpy/_core/src/multiarray/refcount.c b/numpy/_core/src/multiarray/refcount.c index 37fe6a016417..2f72e8e22771 100644 --- a/numpy/_core/src/multiarray/refcount.c +++ b/numpy/_core/src/multiarray/refcount.c @@ -59,23 +59,20 @@ PyArray_ClearBuffer( /* * Helper function to zero an array buffer. * - * Here "zeroing" means an abstract zeroing operation, - * which for an array of references might be something - * more complicated than zero-filling the buffer. + * Here "zeroing" means an abstract zeroing operation, which for an + * array of references might be something more complicated than + * zero-filling the buffer. * - * Failure (returns -1) indicates some sort of programming or - * logical error and should not happen for a data type that has - * been set up correctly. + * Failure (returns -1) indicates some sort of programming or logical + * error and should not happen for a data type that has been set up + * correctly. In principle a sufficiently weird dtype might run out of + * memory but in practice this likely won't happen. */ NPY_NO_EXPORT int -PyArray_ZeroBuffer( +PyArray_ZeroContiguousBuffer( PyArray_Descr *descr, char *data, npy_intp stride, npy_intp size, int aligned) { - if (!PyDataType_REFCHK(descr)) { - return 0; - } - NPY_traverse_info zero_info; NPY_traverse_info_init(&zero_info); /* Flags unused: float errors do not matter and we do not release GIL */ @@ -84,11 +81,16 @@ PyArray_ZeroBuffer( NPY_DT_SLOTS(NPY_DTYPE(descr))->get_fill_zero_loop; if (get_fill_zero_loop != NULL) { if (get_fill_zero_loop( - NULL, descr, 1, descr->elsize, &(zero_info.func), + NULL, descr, aligned, descr->elsize, &(zero_info.func), &(zero_info.auxdata), &flags_unused) < 0) { goto fail; } } + else { + memset(data, 0, size); + NPY_traverse_info_xfree(&zero_info); + return 0; + } int res = zero_info.func( NULL, descr, data, size, stride, zero_info.auxdata); diff --git a/numpy/_core/src/multiarray/refcount.h b/numpy/_core/src/multiarray/refcount.h index 55cb07f60706..41c428f321e4 100644 --- a/numpy/_core/src/multiarray/refcount.h +++ b/numpy/_core/src/multiarray/refcount.h @@ -7,7 +7,7 @@ PyArray_ClearBuffer( npy_intp stride, npy_intp size, int aligned); NPY_NO_EXPORT int -PyArray_ZeroBuffer( +PyArray_ZeroContiguousBuffer( PyArray_Descr *descr, char *data, npy_intp stride, npy_intp size, int aligned); diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index 32c3a72db9ca..340fe7289ac8 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -138,8 +138,8 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, npy_intp size = newsize - oldsize; char *data = PyArray_BYTES(self) + oldnbytes; int aligned = PyArray_ISALIGNED(self); - if (PyArray_ZeroBuffer(PyArray_DESCR(self), data, - stride, size, aligned) < 0) { + if (PyArray_ZeroContiguousBuffer(PyArray_DESCR(self), data, + stride, size, aligned) < 0) { return NULL; } } diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 1a293ab483ea..81a846bf6d96 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -584,20 +584,6 @@ stringdtype_get_clear_loop(void *NPY_UNUSED(traverse_context), return 0; } -static int -stringdtype_get_fill_zero_loop(void *NPY_UNUSED(traverse_context), - PyArray_Descr *NPY_UNUSED(descr), - int NPY_UNUSED(aligned), - npy_intp NPY_UNUSED(fixed_stride), - PyArrayMethod_TraverseLoop **out_loop, - NpyAuxData **NPY_UNUSED(out_auxdata), - NPY_ARRAYMETHOD_FLAGS *flags) -{ - *flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; - *out_loop = &stringdtype_clear_loop; - return 0; -} - static int stringdtype_is_known_scalar_type(PyArray_DTypeMeta *cls, PyTypeObject *pytype) @@ -662,7 +648,6 @@ static PyType_Slot PyArray_StringDType_Slots[] = { {NPY_DT_PyArray_ArrFuncs_argmax, &argmax}, {NPY_DT_PyArray_ArrFuncs_argmin, &argmin}, {NPY_DT_get_clear_loop, &stringdtype_get_clear_loop}, - {NPY_DT_get_fill_zero_loop, &stringdtype_get_fill_zero_loop}, {NPY_DT_finalize_descr, &stringdtype_finalize_descr}, {_NPY_DT_is_known_scalar_type, &stringdtype_is_known_scalar_type}, {0, NULL}}; From c4770b7c577e8490e8ed734a50515177d83b4d5c Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 23 Jul 2024 20:15:53 -0600 Subject: [PATCH 856/980] BUG: properly zero-fill the buffer --- numpy/_core/src/multiarray/refcount.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/multiarray/refcount.c b/numpy/_core/src/multiarray/refcount.c index 2f72e8e22771..4fc19979bac5 100644 --- a/numpy/_core/src/multiarray/refcount.c +++ b/numpy/_core/src/multiarray/refcount.c @@ -18,8 +18,8 @@ #include "iterators.h" #include "dtypemeta.h" #include "refcount.h" - #include "npy_config.h" +#include "templ_common.h" /* for npy_mul_sizes_with_overflow */ @@ -87,9 +87,17 @@ PyArray_ZeroContiguousBuffer( } } else { - memset(data, 0, size); - NPY_traverse_info_xfree(&zero_info); - return 0; + npy_intp nbytes; + if (!npy_mul_sizes_with_overflow(&nbytes, size, stride)) { + memset(data, 0, nbytes); + NPY_traverse_info_xfree(&zero_info); + return 0; + } + else { + PyErr_SetString(PyExc_OverflowError, + "Integer overflow in computing resized buffer size"); + goto fail; + } } int res = zero_info.func( From 5f2dd35aa676e0ed0fca441f0b54452f55b89230 Mon Sep 17 00:00:00 2001 From: GUAN MING Date: Wed, 24 Jul 2024 11:27:28 +0800 Subject: [PATCH 857/980] MAINT: update test and description --- numpy/_core/_internal.py | 4 ++-- numpy/_core/tests/test_umath.py | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/numpy/_core/_internal.py b/numpy/_core/_internal.py index e50c5434588f..74f5ba2d55cc 100644 --- a/numpy/_core/_internal.py +++ b/numpy/_core/_internal.py @@ -862,8 +862,8 @@ def _prod(a): def _gcd(a, b): """Calculate the greatest common divisor of a and b""" if not (math.isfinite(a) and math.isfinite(b)): - raise ValueError('Can only find greatest common demoninator of a ' - f'finite argument, found "{a}" and "{b}"') + raise ValueError('Can only find greatest common demoninator of ' + f'finite arguments, found "{a}" and "{b}"') while b: a, b = b, a % b return a diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index a7944e54b334..44a02572cb55 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4142,6 +4142,9 @@ def test_inf(self): inf = np.array([np.inf], dtype=np.object_) assert_raises(ValueError, np.gcd, inf, 1) assert_raises(ValueError, np.gcd, 1, inf) + assert_raises(ValueError, np.gcd, np.nan, inf) + assert_raises(TypeError, np.gcd, 4, float(np.inf)) + class TestRoundingFunctions: From b985b15e71700bddffdcdb059607708f03047cd9 Mon Sep 17 00:00:00 2001 From: GUAN MING <105915352+guan404ming@users.noreply.github.com> Date: Wed, 24 Jul 2024 16:33:54 +0800 Subject: [PATCH 858/980] TST: update test name Co-authored-by: Pieter Eendebak --- numpy/_core/tests/test_umath.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 44a02572cb55..0b1fc2dd0632 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4138,7 +4138,7 @@ def test_huge_integers(self): assert_equal(np.gcd(a, b), [2**100, 2**50 * 3**5]) assert_equal(np.lcm(a, b), [2**100 * 3**5 * 5**7, 2**100 * 3**10]) - def test_inf(self): + def test_inf_and_nan(self): inf = np.array([np.inf], dtype=np.object_) assert_raises(ValueError, np.gcd, inf, 1) assert_raises(ValueError, np.gcd, 1, inf) From c64d775e872aaabac8ea258250c66921c7e7ed70 Mon Sep 17 00:00:00 2001 From: GUAN MING <105915352+guan404ming@users.noreply.github.com> Date: Wed, 24 Jul 2024 16:34:48 +0800 Subject: [PATCH 859/980] MAINT: update error message Co-authored-by: Pieter Eendebak --- numpy/_core/_internal.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/_internal.py b/numpy/_core/_internal.py index 74f5ba2d55cc..6def5128f37f 100644 --- a/numpy/_core/_internal.py +++ b/numpy/_core/_internal.py @@ -862,7 +862,7 @@ def _prod(a): def _gcd(a, b): """Calculate the greatest common divisor of a and b""" if not (math.isfinite(a) and math.isfinite(b)): - raise ValueError('Can only find greatest common demoninator of ' + raise ValueError('Can only find greatest common divisor of ' f'finite arguments, found "{a}" and "{b}"') while b: a, b = b, a % b From 84ccac8151c35da487e2c4bc84da1092f7624d2b Mon Sep 17 00:00:00 2001 From: Ross Barnowski Date: Wed, 24 Jul 2024 02:44:08 -0700 Subject: [PATCH 860/980] DOC: Replace np.matrix in view docstring example. Replace the use of np.matrix in .view() example illustrating the use-case where both dtype and type are changed. --- numpy/_core/_add_newdocs.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 3e5829cce9d9..2f6b8af5078e 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -4612,15 +4612,16 @@ Examples -------- - >>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) + >>> x = np.array([(-1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) Viewing array data using a different type and dtype: - >>> y = x.view(dtype=np.int16, type=np.matrix) - >>> y - matrix([[513]], dtype=int16) - >>> print(type(y)) - + >>> nonneg = np.dtype([("a", np.uint8), ("b", np.uint8)]) + >>> y = x.view(dtype=nonneg, type=np.recarray) + >>> x["a"] + array([-1], dtype=int8) + >>> y.a + array([255], dtype=uint8) Creating a view on a structured array so it can be used in calculations From 1c9746b2c2881c8007979577f87b4a72d7a20bdc Mon Sep 17 00:00:00 2001 From: GUAN MING <105915352+guan404ming@users.noreply.github.com> Date: Wed, 24 Jul 2024 20:23:21 +0800 Subject: [PATCH 861/980] MAINT: update description Co-authored-by: Nathan Goldbaum --- numpy/_core/_internal.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/_internal.py b/numpy/_core/_internal.py index 6def5128f37f..c0142bf44f03 100644 --- a/numpy/_core/_internal.py +++ b/numpy/_core/_internal.py @@ -863,7 +863,7 @@ def _gcd(a, b): """Calculate the greatest common divisor of a and b""" if not (math.isfinite(a) and math.isfinite(b)): raise ValueError('Can only find greatest common divisor of ' - f'finite arguments, found "{a}" and "{b}"') + f'finite arguments, found "{a}" and "{b}"') while b: a, b = b, a % b return a From a69b7e6161ab8b523d8f950229bf5cb5435a3380 Mon Sep 17 00:00:00 2001 From: Gabriel Fougeron Date: Wed, 24 Jul 2024 14:57:50 +0200 Subject: [PATCH 862/980] Tiny typo in docs --- benchmarks/README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/README.rst b/benchmarks/README.rst index e44f8fe02f1e..e7e42a377819 100644 --- a/benchmarks/README.rst +++ b/benchmarks/README.rst @@ -127,4 +127,4 @@ Some things to consider: you are benchmarking an algorithm, it is unlikely that a user will be executing said algorithm on a newly created empty/zero array. One can force pagefaults to occur in the setup phase either by calling ``np.ones`` or - ``arr.fill(value)`` after creating the array, + ``arr.fill(value)`` after creating the array. From 94854dbc772ae454b38a64f34e62b8ad42602659 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 24 Jul 2024 14:37:29 +0200 Subject: [PATCH 863/980] BUG: Fix simd loadable stride logic It was never correct that strides are divisable by their itemsize and *some* code even checked for it correctly, while others had asserts in place that had no reason why they should be guaranteed to pass. This moves the check into the `loadable_stride` macro. The exp implementation has it's own custom logic, that is probably OK. I assume that compilers will optimize the duplicate division away, if that may not be the case, the code should be optimized to avoid it probably. --- numpy/_core/src/common/simd/simd.h | 37 +++++++++---- .../src/umath/loops_arithm_fp.dispatch.c.src | 39 +++++++------- .../umath/loops_exponent_log.dispatch.c.src | 18 +++---- .../src/umath/loops_hyperbolic.dispatch.c.src | 29 +++++----- .../src/umath/loops_minmax.dispatch.c.src | 6 +-- .../umath/loops_trigonometric.dispatch.cpp | 46 ++++++++-------- .../src/umath/loops_umath_fp.dispatch.c.src | 54 +++++++++++-------- .../src/umath/loops_unary.dispatch.c.src | 10 ++-- .../umath/loops_unary_complex.dispatch.c.src | 10 ++-- .../src/umath/loops_unary_fp.dispatch.c.src | 9 ++-- .../umath/loops_unary_fp_le.dispatch.c.src | 17 +++--- 11 files changed, 149 insertions(+), 126 deletions(-) diff --git a/numpy/_core/src/common/simd/simd.h b/numpy/_core/src/common/simd/simd.h index 2d9d48cf1cdd..fc0024131768 100644 --- a/numpy/_core/src/common/simd/simd.h +++ b/numpy/_core/src/common/simd/simd.h @@ -123,10 +123,11 @@ typedef double npyv_lanetype_f64; * acceptable limit of strides before using any of non-contiguous load/store intrinsics. * * For instance: - * npy_intp ld_stride = step[0] / sizeof(float); - * npy_intp st_stride = step[1] / sizeof(float); * - * if (npyv_loadable_stride_f32(ld_stride) && npyv_storable_stride_f32(st_stride)) { + * if (npyv_loadable_stride_f32(steps[0]) && npyv_storable_stride_f32(steps[1])) { + * // Strides are now guaranteed to be a multiple and compatible + * npy_intp ld_stride = steps[0] / sizeof(float); + * npy_intp st_stride = steps[1] / sizeof(float); * for (;;) * npyv_f32 a = npyv_loadn_f32(ld_pointer, ld_stride); * // ... @@ -134,7 +135,7 @@ typedef double npyv_lanetype_f64; * } * else { * for (;;) - * // C scalars + * // C scalars, use byte steps/strides. * } */ #ifndef NPY_SIMD_MAXLOAD_STRIDE32 @@ -149,11 +150,29 @@ typedef double npyv_lanetype_f64; #ifndef NPY_SIMD_MAXSTORE_STRIDE64 #define NPY_SIMD_MAXSTORE_STRIDE64 0 #endif -#define NPYV_IMPL_MAXSTRIDE(SFX, MAXLOAD, MAXSTORE) \ - NPY_FINLINE int npyv_loadable_stride_##SFX(npy_intp stride) \ - { return MAXLOAD > 0 ? llabs(stride) <= MAXLOAD : 1; } \ - NPY_FINLINE int npyv_storable_stride_##SFX(npy_intp stride) \ - { return MAXSTORE > 0 ? llabs(stride) <= MAXSTORE : 1; } +#define NPYV_IMPL_MAXSTRIDE(SFX, MAXLOAD, MAXSTORE) \ + NPY_FINLINE int \ + npyv_loadable_stride_##SFX(npy_intp stride) \ + { \ + if (_Alignof(npyv_lanetype_##SFX) != sizeof(npyv_lanetype_##SFX) && \ + stride % sizeof(npyv_lanetype_##SFX) != 0) { \ + /* stride not a multiple of itemsize, cannot handle. */ \ + return 0; \ + } \ + stride = stride / sizeof(npyv_lanetype_##SFX); \ + return MAXLOAD > 0 ? llabs(stride) <= MAXLOAD : 1; \ + } \ + NPY_FINLINE int \ + npyv_storable_stride_##SFX(npy_intp stride) \ + { \ + if (_Alignof(npyv_lanetype_##SFX) != sizeof(npyv_lanetype_##SFX) && \ + stride % sizeof(npyv_lanetype_##SFX) != 0) { \ + /* stride not a multiple of itemsize, cannot handle. */ \ + return 0; \ + } \ + stride = stride / sizeof(npyv_lanetype_##SFX); \ + return MAXSTORE > 0 ? llabs(stride) <= MAXSTORE : 1; \ + } #if NPY_SIMD NPYV_IMPL_MAXSTRIDE(u32, NPY_SIMD_MAXLOAD_STRIDE32, NPY_SIMD_MAXSTORE_STRIDE32) NPYV_IMPL_MAXSTRIDE(s32, NPY_SIMD_MAXLOAD_STRIDE32, NPY_SIMD_MAXSTORE_STRIDE32) diff --git a/numpy/_core/src/umath/loops_arithm_fp.dispatch.c.src b/numpy/_core/src/umath/loops_arithm_fp.dispatch.c.src index a5453501836e..21e01c115a7d 100644 --- a/numpy/_core/src/umath/loops_arithm_fp.dispatch.c.src +++ b/numpy/_core/src/umath/loops_arithm_fp.dispatch.c.src @@ -346,14 +346,17 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) && __apple_build_version__ < 14030000 goto loop_scalar; #endif // end affected Apple clang. + if (is_mem_overlap(b_src0, b_ssrc0, b_dst, b_sdst, len) || is_mem_overlap(b_src1, b_ssrc1, b_dst, b_sdst, len) || - b_sdst % sizeof(@ftype@) != 0 || b_sdst == 0 || - b_ssrc0 % sizeof(@ftype@) != 0 || - b_ssrc1 % sizeof(@ftype@) != 0 + !npyv_loadable_stride_@sfx@(b_ssrc0) || + !npyv_loadable_stride_@sfx@(b_ssrc1) || + !npyv_storable_stride_@sfx@(b_sdst) || + b_sdst == 0 ) { goto loop_scalar; } + const @ftype@ *src0 = (@ftype@*)b_src0; const @ftype@ *src1 = (@ftype@*)b_src1; @ftype@ *dst = (@ftype@*)b_dst; @@ -366,10 +369,6 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) const int wstep = vstep * 2; const int hstep = vstep / 2; - const int loadable0 = npyv_loadable_stride_s64(ssrc0); - const int loadable1 = npyv_loadable_stride_s64(ssrc1); - const int storable = npyv_storable_stride_s64(sdst); - // lots**lots of specializations, to squeeze out max performance // contig if (ssrc0 == 2 && ssrc0 == ssrc1 && ssrc0 == sdst) { @@ -414,7 +413,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) } } // non-contig - else if (loadable1 && storable) { + else { for (; len >= vstep; len -= vstep, src1 += ssrc1*vstep, dst += sdst*vstep) { npyv_@sfx@ b0 = npyv_loadn2_@sfx@(src1, ssrc1); npyv_@sfx@ b1 = npyv_loadn2_@sfx@(src1 + ssrc1*hstep, ssrc1); @@ -433,9 +432,6 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npyv_storen2_till_@sfx@(dst, sdst, len, r); } } - else { - goto loop_scalar; - } } // scalar 1 else if (ssrc1 == 0) { @@ -460,7 +456,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) } } // non-contig - else if (loadable0 && storable) { + else { for (; len >= vstep; len -= vstep, src0 += ssrc0*vstep, dst += sdst*vstep) { npyv_@sfx@ a0 = npyv_loadn2_@sfx@(src0, ssrc0); npyv_@sfx@ a1 = npyv_loadn2_@sfx@(src0 + ssrc0*hstep, ssrc0); @@ -479,13 +475,10 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npyv_storen2_till_@sfx@(dst, sdst, len, r); } } - else { - goto loop_scalar; - } } #if @is_mul@ // non-contig - else if (loadable0 && loadable1 && storable) { + else { for (; len >= vstep; len -= vstep, src0 += ssrc0*vstep, src1 += ssrc1*vstep, dst += sdst*vstep ) { @@ -512,12 +505,16 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npyv_storen2_till_@sfx@(dst, sdst, len, r); } } - #endif + #else /* @is_mul@ */ else { + // Only multiply is vectorized for the generic non-contig case. goto loop_scalar; } + #endif /* @is_mul@ */ + npyv_cleanup(); return; + loop_scalar: #endif for (; len > 0; --len, b_src0 += b_ssrc0, b_src1 += b_ssrc1, b_dst += b_sdst) { @@ -580,8 +577,8 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npy_intp b_ssrc = steps[0], b_sdst = steps[1]; #if @VECTOR@ if (is_mem_overlap(b_src, b_ssrc, b_dst, b_sdst, len) || - b_sdst % sizeof(@ftype@) != 0 || - b_ssrc % sizeof(@ftype@) != 0 + !npyv_loadable_stride_@sfx@(b_ssrc) || + !npyv_storable_stride_@sfx@(b_sdst) ) { goto loop_scalar; } @@ -609,7 +606,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npyv_store2_till_@sfx@(dst, len, r); } } - else if (ssrc == 2 && npyv_storable_stride_s64(sdst)) { + else if (ssrc == 2) { for (; len >= vstep; len -= vstep, src += wstep, dst += sdst*vstep) { npyv_@sfx@ a0 = npyv_load_@sfx@(src); npyv_@sfx@ a1 = npyv_load_@sfx@(src + vstep); @@ -624,7 +621,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npyv_storen2_till_@sfx@(dst, sdst, len, r); } } - else if (sdst == 2 && npyv_loadable_stride_s64(ssrc)) { + else if (sdst == 2) { for (; len >= vstep; len -= vstep, src += ssrc*vstep, dst += wstep) { npyv_@sfx@ a0 = npyv_loadn2_@sfx@(src, ssrc); npyv_@sfx@ a1 = npyv_loadn2_@sfx@(src + ssrc*hstep, ssrc); diff --git a/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src b/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src index 159e275bd45e..4285708fe703 100644 --- a/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src +++ b/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src @@ -1315,16 +1315,16 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(DOUBLE_@func@) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { #if NPY_SIMD && defined(NPY_HAVE_AVX512_SKX) && defined(NPY_CAN_LINK_SVML) - const npy_double *src = (npy_double*)args[0]; - npy_double *dst = (npy_double*)args[1]; - const int lsize = sizeof(src[0]); - const npy_intp ssrc = steps[0] / lsize; - const npy_intp sdst = steps[1] / lsize; const npy_intp len = dimensions[0]; - assert(steps[0] % lsize == 0 && steps[1] % lsize == 0); - if (!is_mem_overlap(src, steps[0], dst, steps[1], len) && - npyv_loadable_stride_f64(ssrc) && - npyv_storable_stride_f64(sdst)) { + + if (!is_mem_overlap(args[0], steps[0], args[1], steps[1], len) && + npyv_loadable_stride_f64(steps[0]) && + npyv_storable_stride_f64(steps[1])) { + const npy_double *src = (npy_double*)args[0]; + npy_double *dst = (npy_double*)args[1]; + const npy_intp ssrc = steps[0] / sizeof(src[0]); + const npy_intp sdst = steps[1] / sizeof(src[0]); + simd_@func@_f64(src, ssrc, dst, sdst, len); return; } diff --git a/numpy/_core/src/umath/loops_hyperbolic.dispatch.c.src b/numpy/_core/src/umath/loops_hyperbolic.dispatch.c.src index 8e09de941168..9043e1cd6a73 100644 --- a/numpy/_core/src/umath/loops_hyperbolic.dispatch.c.src +++ b/numpy/_core/src/umath/loops_hyperbolic.dispatch.c.src @@ -9,6 +9,8 @@ #include "simd/simd.h" #include "loops_utils.h" #include "loops.h" +// Provides the various *_LOOP macros +#include "fast_loop_macros.h" #if NPY_SIMD_FMA3 // native support /* @@ -608,32 +610,29 @@ simd_tanh_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, npy_in NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { - const @type@ *src = (@type@*)args[0]; - @type@ *dst = (@type@*)args[1]; - - const int lsize = sizeof(src[0]); - const npy_intp ssrc = steps[0] / lsize; - const npy_intp sdst = steps[1] / lsize; npy_intp len = dimensions[0]; - assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); + #if @simd@ - if (is_mem_overlap(src, steps[0], dst, steps[1], len) || - !npyv_loadable_stride_@sfx@(ssrc) || !npyv_storable_stride_@sfx@(sdst) + if (is_mem_overlap(args[0], steps[0], args[1], steps[1], len) || + !npyv_loadable_stride_@sfx@(steps[0]) || + !npyv_storable_stride_@sfx@(steps[1]) ) { - for (; len > 0; --len, src += ssrc, dst += sdst) { - simd_@func@_@sfx@(src, 1, dst, 1, 1); + UNARY_LOOP { + simd_@func@_@sfx@((@type@ *)ip1, 1, (@type@ *)op1, 1, 1); } } else { - simd_@func@_@sfx@(src, ssrc, dst, sdst, len); + npy_intp ssrc = steps[0] / sizeof(@type@); + npy_intp sdst = steps[1] / sizeof(@type@); + simd_@func@_@sfx@((@type@ *)args[0], ssrc, (@type@ *)args[1], sdst, len); } npyv_cleanup(); #if @simd_req_clear@ npy_clear_floatstatus_barrier((char*)dimensions); #endif #else - for (; len > 0; --len, src += ssrc, dst += sdst) { - const @type@ src0 = *src; - *dst = npy_@func@@ssfx@(src0); + UNARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + *(@type@ *)op1 = npy_@func@@ssfx@(in1); } #endif } diff --git a/numpy/_core/src/umath/loops_minmax.dispatch.c.src b/numpy/_core/src/umath/loops_minmax.dispatch.c.src index 319072c01fbe..a67e7d490f5b 100644 --- a/numpy/_core/src/umath/loops_minmax.dispatch.c.src +++ b/numpy/_core/src/umath/loops_minmax.dispatch.c.src @@ -352,9 +352,9 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) } // unroll scalars faster than non-contiguous vector load/store on Arm #if !defined(NPY_HAVE_NEON) && @is_fp@ - if (TO_SIMD_SFX(npyv_loadable_stride)(is1/sizeof(STYPE)) && - TO_SIMD_SFX(npyv_loadable_stride)(is2/sizeof(STYPE)) && - TO_SIMD_SFX(npyv_storable_stride)(os1/sizeof(STYPE)) + if (TO_SIMD_SFX(npyv_loadable_stride)(is1) && + TO_SIMD_SFX(npyv_loadable_stride)(is2) && + TO_SIMD_SFX(npyv_storable_stride)(os1) ) { TO_SIMD_SFX(simd_binary_@intrin@)( (STYPE*)ip1, is1/sizeof(STYPE), diff --git a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp index 746e3b92263e..1bc6ecfb14d6 100644 --- a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp +++ b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp @@ -214,21 +214,22 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(FLOAT_sin) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { #if NPY_SIMD_FMA3 - const npy_float *src = (npy_float*)args[0]; - npy_float *dst = (npy_float*)args[1]; - - const int lsize = sizeof(src[0]); - const npy_intp ssrc = steps[0] / lsize; - const npy_intp sdst = steps[1] / lsize; npy_intp len = dimensions[0]; - assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); - if (is_mem_overlap(src, steps[0], dst, steps[1], len) || - !npyv_loadable_stride_f32(ssrc) || !npyv_storable_stride_f32(sdst) + + if (is_mem_overlap(args[0], steps[0], args[1], steps[1], len) || + !npyv_loadable_stride_f32(steps[0]) || + !npyv_storable_stride_f32(steps[1]) ) { - for (; len > 0; --len, src += ssrc, dst += sdst) { - simd_sincos_f32(src, 1, dst, 1, 1, SIMD_COMPUTE_SIN); + UNARY_LOOP { + simd_sincos_f32( + (npy_float *)ip1, 1, (npy_float *)op1, 1, 1, SIMD_COMPUTE_SIN); } } else { + const npy_float *src = (npy_float*)args[0]; + npy_float *dst = (npy_float*)args[1]; + const npy_intp ssrc = steps[0] / sizeof(npy_float); + const npy_intp sdst = steps[1] / sizeof(npy_float); + simd_sincos_f32(src, ssrc, dst, sdst, len, SIMD_COMPUTE_SIN); } #else @@ -243,21 +244,22 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(FLOAT_cos) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { #if NPY_SIMD_FMA3 - const npy_float *src = (npy_float*)args[0]; - npy_float *dst = (npy_float*)args[1]; - - const int lsize = sizeof(src[0]); - const npy_intp ssrc = steps[0] / lsize; - const npy_intp sdst = steps[1] / lsize; npy_intp len = dimensions[0]; - assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); - if (is_mem_overlap(src, steps[0], dst, steps[1], len) || - !npyv_loadable_stride_f32(ssrc) || !npyv_storable_stride_f32(sdst) + + if (is_mem_overlap(args[0], steps[0], args[1], steps[1], len) || + !npyv_loadable_stride_f32(steps[0]) || + !npyv_storable_stride_f32(steps[1]) ) { - for (; len > 0; --len, src += ssrc, dst += sdst) { - simd_sincos_f32(src, 1, dst, 1, 1, SIMD_COMPUTE_COS); + UNARY_LOOP { + simd_sincos_f32( + (npy_float *)ip1, 1, (npy_float *)op1, 1, 1, SIMD_COMPUTE_COS); } } else { + const npy_float *src = (npy_float*)args[0]; + npy_float *dst = (npy_float*)args[1]; + const npy_intp ssrc = steps[0] / sizeof(npy_float); + const npy_intp sdst = steps[1] / sizeof(npy_float); + simd_sincos_f32(src, ssrc, dst, sdst, len, SIMD_COMPUTE_COS); } #else diff --git a/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src b/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src index 74af8edaa1f5..9711cef84173 100644 --- a/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src +++ b/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src @@ -199,16 +199,16 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { #if NPY_SIMD && defined(NPY_HAVE_AVX512_SKX) && defined(NPY_CAN_LINK_SVML) - const @type@ *src = (@type@*)args[0]; - @type@ *dst = (@type@*)args[1]; - const int lsize = sizeof(src[0]); - const npy_intp ssrc = steps[0] / lsize; - const npy_intp sdst = steps[1] / lsize; const npy_intp len = dimensions[0]; - assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); + if (!is_mem_overlap(src, steps[0], dst, steps[1], len) && - npyv_loadable_stride_@sfx@(ssrc) && - npyv_storable_stride_@sfx@(sdst)) { + npyv_loadable_stride_@sfx@(steps[0]) && + npyv_storable_stride_@sfx@(steps[1])) + { + const @type@ *src = (@type@*)args[0]; + @type@ *dst = (@type@*)args[1]; + const npy_intp ssrc = steps[0] / sizeof(@type@); + const npy_intp sdst = steps[1] / sizeof(@type@); simd_@intrin@_@sfx@(src, ssrc, dst, sdst, len); return; } @@ -251,15 +251,19 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) const @type@ *src1 = (@type@*)args[0]; const @type@ *src2 = (@type@*)args[1]; @type@ *dst = (@type@*)args[2]; - const int lsize = sizeof(src1[0]); - const npy_intp ssrc1 = steps[0] / lsize; - const npy_intp ssrc2 = steps[1] / lsize; - const npy_intp sdst = steps[2] / lsize; + const npy_intp len = dimensions[0]; - assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); - if (!is_mem_overlap(src1, steps[0], dst, steps[2], len) && !is_mem_overlap(src2, steps[1], dst, steps[2], len) && - npyv_loadable_stride_@sfx@(ssrc1) && npyv_loadable_stride_@sfx@(ssrc2) && - npyv_storable_stride_@sfx@(sdst)) { + + if (!is_mem_overlap(src1, steps[0], dst, steps[2], len) && + !is_mem_overlap(src2, steps[1], dst, steps[2], len) && + npyv_loadable_stride_@sfx@(steps[0]) && + npyv_loadable_stride_@sfx@(steps[1]) && + npyv_storable_stride_@sfx@(steps[2]) + ) { + const npy_intp ssrc1 = steps[0] / sizeof(@type@); + const npy_intp ssrc2 = steps[1] / sizeof(@type@); + const npy_intp sdst = steps[2] / sizeof(@type@); + simd_@intrin@_@sfx@(src1, ssrc1, src2, ssrc2, dst, sdst, len); return; } @@ -283,15 +287,19 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) const @type@ *src1 = (@type@*)args[0]; const @type@ *src2 = (@type@*)args[1]; @type@ *dst = (@type@*)args[2]; - const int lsize = sizeof(src1[0]); - const npy_intp ssrc1 = steps[0] / lsize; - const npy_intp ssrc2 = steps[1] / lsize; - const npy_intp sdst = steps[2] / lsize; + const npy_intp len = dimensions[0]; assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); - if (!is_mem_overlap(src1, steps[0], dst, steps[2], len) && !is_mem_overlap(src2, steps[1], dst, steps[2], len) && - npyv_loadable_stride_@sfx@(ssrc1) && npyv_loadable_stride_@sfx@(ssrc2) && - npyv_storable_stride_@sfx@(sdst)) { + if (!is_mem_overlap(src1, steps[0], dst, steps[2], len) && + !is_mem_overlap(src2, steps[1], dst, steps[2], len) && + npyv_loadable_stride_@sfx@(steps[0]) && + npyv_loadable_stride_@sfx@(steps[1]) && + npyv_storable_stride_@sfx@(steps[2]) + ) { + const npy_intp ssrc1 = steps[0] / sizeof(@type@); + const npy_intp ssrc2 = steps[1] / sizeof(@type@); + const npy_intp sdst = steps[2] / sizeof(@type@); + simd_@intrin@_@sfx@(src1, ssrc1, src2, ssrc2, dst, sdst, len); return; } diff --git a/numpy/_core/src/umath/loops_unary.dispatch.c.src b/numpy/_core/src/umath/loops_unary.dispatch.c.src index bfe4d892d0c9..4c87c2279c3b 100644 --- a/numpy/_core/src/umath/loops_unary.dispatch.c.src +++ b/numpy/_core/src/umath/loops_unary.dispatch.c.src @@ -298,12 +298,12 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) goto clear; } #if @supports_ncontig@ - const npy_intp istride = istep / sizeof(STYPE); - const npy_intp ostride = ostep / sizeof(STYPE); - if (TO_SIMD_SFX(npyv_loadable_stride)(istride) && - TO_SIMD_SFX(npyv_storable_stride)(ostride)) + if (TO_SIMD_SFX(npyv_loadable_stride)(istep) && + TO_SIMD_SFX(npyv_storable_stride)(ostep)) { - if (istride == 1 && ostride != 1) { + const npy_intp istride = istep / sizeof(STYPE); + const npy_intp ostride = ostep / sizeof(STYPE); + if (istride == sizeof(STYPE) && ostride != 1) { // contiguous input, non-contiguous output TO_SIMD_SFX(simd_unary_cn_@intrin@)( (STYPE*)ip, (STYPE*)op, ostride, len diff --git a/numpy/_core/src/umath/loops_unary_complex.dispatch.c.src b/numpy/_core/src/umath/loops_unary_complex.dispatch.c.src index 052ad464c7a8..ede46485313b 100644 --- a/numpy/_core/src/umath/loops_unary_complex.dispatch.c.src +++ b/numpy/_core/src/umath/loops_unary_complex.dispatch.c.src @@ -88,14 +88,14 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_absolute) { #if @VECTOR@ npy_intp len = dimensions[0]; - npy_intp ssrc = steps[0] / sizeof(@ftype@); - npy_intp sdst = steps[1] / sizeof(@ftype@); if (!is_mem_overlap(args[0], steps[0], args[1], steps[1], len) && - npyv_loadable_stride_@sfx@(ssrc) && npyv_storable_stride_@sfx@(sdst) - && steps[0] % sizeof(@ftype@) == 0 - && steps[1] % sizeof(@ftype@) == 0 + npyv_loadable_stride_@sfx@(steps[0]) && + npyv_storable_stride_@sfx@(steps[1]) ) { + npy_intp ssrc = steps[0] / sizeof(@ftype@); + npy_intp sdst = steps[1] / sizeof(@ftype@); + const @ftype@ *src = (@ftype@*)args[0]; @ftype@ *dst = (@ftype@*)args[1]; diff --git a/numpy/_core/src/umath/loops_unary_fp.dispatch.c.src b/numpy/_core/src/umath/loops_unary_fp.dispatch.c.src index f6404f6f7d68..6cce02cd37bc 100644 --- a/numpy/_core/src/umath/loops_unary_fp.dispatch.c.src +++ b/numpy/_core/src/umath/loops_unary_fp.dispatch.c.src @@ -212,15 +212,16 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npy_intp len = dimensions[0]; #if @VCHK@ const int lsize = sizeof(npyv_lanetype_@sfx@); - assert(len <= 1 || (src_step % lsize == 0 && dst_step % lsize == 0)); + if (is_mem_overlap(src, src_step, dst, dst_step, len)) { goto no_unroll; } - const npy_intp ssrc = src_step / lsize; - const npy_intp sdst = dst_step / lsize; - if (!npyv_loadable_stride_@sfx@(ssrc) || !npyv_storable_stride_@sfx@(sdst)) { + if (!npyv_loadable_stride_@sfx@(src_step) || !npyv_storable_stride_@sfx@(dst_step)) { goto no_unroll; } + + const npy_intp ssrc = src_step / lsize; + const npy_intp sdst = dst_step / lsize; if (ssrc == 1 && sdst == 1) { simd_@TYPE@_@kind@_CONTIG_CONTIG(src, 1, dst, 1, len); } diff --git a/numpy/_core/src/umath/loops_unary_fp_le.dispatch.c.src b/numpy/_core/src/umath/loops_unary_fp_le.dispatch.c.src index ba133dc1e60f..9f7ed6c1dfc4 100644 --- a/numpy/_core/src/umath/loops_unary_fp_le.dispatch.c.src +++ b/numpy/_core/src/umath/loops_unary_fp_le.dispatch.c.src @@ -528,17 +528,14 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) const npy_intp istep = steps[0]; const npy_intp ostep = steps[1]; npy_intp len = dimensions[0]; - const int ilsize = sizeof(npyv_lanetype_@sfx@); - const int olsize = sizeof(npy_bool); - const npy_intp istride = istep / ilsize; - const npy_intp ostride = ostep / olsize; - assert(len <= 1 || ostep % olsize == 0); - - if ((istep % ilsize == 0) && - !is_mem_overlap(ip, istep, op, ostep, len) && - npyv_loadable_stride_@sfx@(istride) && - npyv_storable_stride_@sfx@(ostride)) + + if (!is_mem_overlap(ip, istep, op, ostep, len) && + npyv_loadable_stride_@sfx@(istep) && + npyv_storable_stride_@sfx@(ostep)) { + const npy_intp istride = istep / sizeof(npyv_lanetype_@sfx@); + const npy_intp ostride = ostep / sizeof(npy_bool); + if (istride == 1 && ostride == 1) { simd_unary_@kind@_@TYPE@_CONTIG_CONTIG(ip, 1, op, 1, len); } From 9fc6a965c56d56aff09da637cda814c62f1ca872 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 24 Jul 2024 15:43:40 +0200 Subject: [PATCH 864/980] TST: Add test to partially cover SIMD alignment issues --- numpy/_core/tests/test_ufunc.py | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index e777d7e07be3..aba729167bd8 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -2725,7 +2725,7 @@ def test_ufunc_types(ufunc): @pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np) if isinstance(getattr(np, x), np.ufunc)]) @np._no_nep50_warning() -def test_ufunc_noncontiguous(ufunc): +def test_ufunc_noncontiguous_or_offset(ufunc): ''' Check that contiguous and non-contiguous calls to ufuncs have the same results for values in range(9) @@ -2737,19 +2737,31 @@ def test_ufunc_noncontiguous(ufunc): continue inp, out = typ.split('->') args_c = [np.empty(6, t) for t in inp] + # non contiguous (3 step) args_n = [np.empty(18, t)[::3] for t in inp] - for a in args_c: - a.flat = range(1,7) - for a in args_n: + # If alignment != itemsize, `args_o` is (probably) not itemsize aligned + # something that SIMD code needs. + args_o = [] + for t in inp: + dtype = np.dtype(t) + start = dtype.alignment + stop = start + 6 * dtype.itemsize + a = np.empty(7 * dtype.itemsize, dtype="b")[start:stop].view(dtype) + args_o.append(a) + + for a in args_c + args_n + args_o: a.flat = range(1,7) + with warnings.catch_warnings(record=True): warnings.filterwarnings("always") res_c = ufunc(*args_c) res_n = ufunc(*args_n) + res_o = ufunc(*args_o) if len(out) == 1: res_c = (res_c,) res_n = (res_n,) - for c_ar, n_ar in zip(res_c, res_n): + res_o = (res_o,) + for c_ar, n_ar, o_ar in zip(res_c, res_n, res_o): dt = c_ar.dtype if np.issubdtype(dt, np.floating): # for floating point results allow a small fuss in comparisons @@ -2758,8 +2770,10 @@ def test_ufunc_noncontiguous(ufunc): res_eps = np.finfo(dt).eps tol = 2*res_eps assert_allclose(res_c, res_n, atol=tol, rtol=tol) + assert_allclose(res_c, res_o, atol=tol, rtol=tol) else: assert_equal(c_ar, n_ar) + assert_equal(c_ar, o_ar) @pytest.mark.parametrize('ufunc', [np.sign, np.equal]) From a0c9247b3706c245dc4318dd18102653883eafd1 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 24 Jul 2024 15:50:44 +0200 Subject: [PATCH 865/980] MAINT: Move `len` placement to avoid warning --- numpy/_core/src/umath/loops_hyperbolic.dispatch.c.src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/umath/loops_hyperbolic.dispatch.c.src b/numpy/_core/src/umath/loops_hyperbolic.dispatch.c.src index 9043e1cd6a73..d72ace50ff19 100644 --- a/numpy/_core/src/umath/loops_hyperbolic.dispatch.c.src +++ b/numpy/_core/src/umath/loops_hyperbolic.dispatch.c.src @@ -610,9 +610,9 @@ simd_tanh_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, npy_in NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { +#if @simd@ npy_intp len = dimensions[0]; -#if @simd@ if (is_mem_overlap(args[0], steps[0], args[1], steps[1], len) || !npyv_loadable_stride_@sfx@(steps[0]) || !npyv_storable_stride_@sfx@(steps[1]) From 935ef6f7dd10c6a396317ddb6b3509063ea205cb Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 24 Jul 2024 15:55:51 +0200 Subject: [PATCH 866/980] MAINT: Use stdalign.h `alignof` to be compatible with C++/C --- numpy/_core/src/common/simd/simd.h | 46 ++++++++++++++++-------------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/numpy/_core/src/common/simd/simd.h b/numpy/_core/src/common/simd/simd.h index fc0024131768..706229af0a62 100644 --- a/numpy/_core/src/common/simd/simd.h +++ b/numpy/_core/src/common/simd/simd.h @@ -1,5 +1,7 @@ #ifndef _NPY_SIMD_H_ #define _NPY_SIMD_H_ + +#include /* for alignof until C23 */ /** * the NumPy C SIMD vectorization interface "NPYV" are types and functions intended * to simplify vectorization of code on different platforms, currently supports @@ -150,28 +152,28 @@ typedef double npyv_lanetype_f64; #ifndef NPY_SIMD_MAXSTORE_STRIDE64 #define NPY_SIMD_MAXSTORE_STRIDE64 0 #endif -#define NPYV_IMPL_MAXSTRIDE(SFX, MAXLOAD, MAXSTORE) \ - NPY_FINLINE int \ - npyv_loadable_stride_##SFX(npy_intp stride) \ - { \ - if (_Alignof(npyv_lanetype_##SFX) != sizeof(npyv_lanetype_##SFX) && \ - stride % sizeof(npyv_lanetype_##SFX) != 0) { \ - /* stride not a multiple of itemsize, cannot handle. */ \ - return 0; \ - } \ - stride = stride / sizeof(npyv_lanetype_##SFX); \ - return MAXLOAD > 0 ? llabs(stride) <= MAXLOAD : 1; \ - } \ - NPY_FINLINE int \ - npyv_storable_stride_##SFX(npy_intp stride) \ - { \ - if (_Alignof(npyv_lanetype_##SFX) != sizeof(npyv_lanetype_##SFX) && \ - stride % sizeof(npyv_lanetype_##SFX) != 0) { \ - /* stride not a multiple of itemsize, cannot handle. */ \ - return 0; \ - } \ - stride = stride / sizeof(npyv_lanetype_##SFX); \ - return MAXSTORE > 0 ? llabs(stride) <= MAXSTORE : 1; \ +#define NPYV_IMPL_MAXSTRIDE(SFX, MAXLOAD, MAXSTORE) \ + NPY_FINLINE int \ + npyv_loadable_stride_##SFX(npy_intp stride) \ + { \ + if (alignof(npyv_lanetype_##SFX) != sizeof(npyv_lanetype_##SFX) && \ + stride % sizeof(npyv_lanetype_##SFX) != 0) { \ + /* stride not a multiple of itemsize, cannot handle. */ \ + return 0; \ + } \ + stride = stride / sizeof(npyv_lanetype_##SFX); \ + return MAXLOAD > 0 ? llabs(stride) <= MAXLOAD : 1; \ + } \ + NPY_FINLINE int \ + npyv_storable_stride_##SFX(npy_intp stride) \ + { \ + if (alignof(npyv_lanetype_##SFX) != sizeof(npyv_lanetype_##SFX) && \ + stride % sizeof(npyv_lanetype_##SFX) != 0) { \ + /* stride not a multiple of itemsize, cannot handle. */ \ + return 0; \ + } \ + stride = stride / sizeof(npyv_lanetype_##SFX); \ + return MAXSTORE > 0 ? llabs(stride) <= MAXSTORE : 1; \ } #if NPY_SIMD NPYV_IMPL_MAXSTRIDE(u32, NPY_SIMD_MAXLOAD_STRIDE32, NPY_SIMD_MAXSTORE_STRIDE32) From e8f9f54a72f74829072c266c8612d155b094ef2b Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 24 Jul 2024 16:06:43 +0200 Subject: [PATCH 867/980] TST: Fixup test (need to use alignment in stride not offset) --- numpy/_core/tests/test_ufunc.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index aba729167bd8..33e52748c3d5 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -2725,7 +2725,7 @@ def test_ufunc_types(ufunc): @pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np) if isinstance(getattr(np, x), np.ufunc)]) @np._no_nep50_warning() -def test_ufunc_noncontiguous_or_offset(ufunc): +def test_ufunc_noncontiguous(ufunc): ''' Check that contiguous and non-contiguous calls to ufuncs have the same results for values in range(9) @@ -2739,15 +2739,14 @@ def test_ufunc_noncontiguous_or_offset(ufunc): args_c = [np.empty(6, t) for t in inp] # non contiguous (3 step) args_n = [np.empty(18, t)[::3] for t in inp] - # If alignment != itemsize, `args_o` is (probably) not itemsize aligned - # something that SIMD code needs. + # alignment != itemsize is possible. So create an array with such + # an odd step manually. args_o = [] for t in inp: - dtype = np.dtype(t) - start = dtype.alignment - stop = start + 6 * dtype.itemsize - a = np.empty(7 * dtype.itemsize, dtype="b")[start:stop].view(dtype) - args_o.append(a) + orig_dt = np.dtype(t) + off_dt = f"S{orig_dt.alignment}" # offset by alignment + dtype = np.dtype([("_", off_dt), ("t", orig_dt)], align=False) + args_o.append(np.empty(6, dtype="b")["t"]) for a in args_c + args_n + args_o: a.flat = range(1,7) From b43e62a783c968ed8f255223b67b850b4fce9d19 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 24 Jul 2024 16:16:10 +0200 Subject: [PATCH 868/980] BUG: Fix definition, one more incorrect check and remove incorrect assert --- .../src/umath/loops_umath_fp.dispatch.c.src | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src b/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src index 9711cef84173..bf358e8ee7c1 100644 --- a/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src +++ b/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src @@ -160,13 +160,12 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(HALF_@func@) #if NPY_SIMD && defined(NPY_CAN_LINK_SVML) const npy_half *src = (npy_half*)args[0]; npy_half *dst = (npy_half*)args[1]; - const int lsize = sizeof(src[0]); - const npy_intp ssrc = steps[0] / lsize; - const npy_intp sdst = steps[1] / lsize; + const npy_intp len = dimensions[0]; + if (!is_mem_overlap(src, steps[0], dst, steps[1], len) && - (ssrc == 1) && - (sdst == 1)) { + (steps[0] == sizeof(npy_half)) && + (steps[1] == sizeof(npy_half))) { #if defined(NPY_HAVE_AVX512_SPR) __svml_@intrin@s32(src, dst, len); return; @@ -199,14 +198,15 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { #if NPY_SIMD && defined(NPY_HAVE_AVX512_SKX) && defined(NPY_CAN_LINK_SVML) + const @type@ *src = (@type@*)args[0]; + @type@ *dst = (@type@*)args[1]; + const npy_intp len = dimensions[0]; if (!is_mem_overlap(src, steps[0], dst, steps[1], len) && npyv_loadable_stride_@sfx@(steps[0]) && npyv_storable_stride_@sfx@(steps[1])) { - const @type@ *src = (@type@*)args[0]; - @type@ *dst = (@type@*)args[1]; const npy_intp ssrc = steps[0] / sizeof(@type@); const npy_intp sdst = steps[1] / sizeof(@type@); simd_@intrin@_@sfx@(src, ssrc, dst, sdst, len); @@ -289,7 +289,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) @type@ *dst = (@type@*)args[2]; const npy_intp len = dimensions[0]; - assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); + if (!is_mem_overlap(src1, steps[0], dst, steps[2], len) && !is_mem_overlap(src2, steps[1], dst, steps[2], len) && npyv_loadable_stride_@sfx@(steps[0]) && From e27f4190a1b44685cda5aab44088c6109c1a67c5 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 24 Jul 2024 16:18:49 +0200 Subject: [PATCH 869/980] TST: Test fixup (seems I ran the wrong one locally...) --- numpy/_core/tests/test_ufunc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 33e52748c3d5..97e79c3f844a 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -2746,7 +2746,7 @@ def test_ufunc_noncontiguous(ufunc): orig_dt = np.dtype(t) off_dt = f"S{orig_dt.alignment}" # offset by alignment dtype = np.dtype([("_", off_dt), ("t", orig_dt)], align=False) - args_o.append(np.empty(6, dtype="b")["t"]) + args_o.append(np.empty(6, dtype=dtype)["t"]) for a in args_c + args_n + args_o: a.flat = range(1,7) From 5b1444f03d931c5f09e9c81d475f64183ec8f36f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 24 Jul 2024 19:43:45 +0200 Subject: [PATCH 870/980] DOC: document 'floatmode' and 'legacy' keys from np.get_printoptions' return dict --- numpy/_core/arrayprint.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 07ff182bdb8f..bb97898842b1 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -313,8 +313,10 @@ def get_printoptions(): - suppress : bool - nanstr : str - infstr : str - - formatter : dict of callables - sign : str + - formatter : dict of callables + - floatmode : str + - legacy : str or False For a full description of these options, see `set_printoptions`. From f3b669d391cfe660b064af052973b9bff9372dd8 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 10 Jul 2024 16:52:27 +0200 Subject: [PATCH 871/980] API: Do not consider subclasses for NEP 50 weak promotion This disables remaining checks for subclasses of floats. We only apply the weak rules to literals, and thus just ignore subclasses. --- numpy/_core/src/multiarray/abstractdtypes.h | 17 +++---------- numpy/_core/src/multiarray/scalartypes.c.src | 6 ++--- numpy/_core/src/umath/scalarmath.c.src | 26 +++----------------- numpy/_core/tests/test_scalarmath.py | 19 +++++++++----- 4 files changed, 22 insertions(+), 46 deletions(-) diff --git a/numpy/_core/src/multiarray/abstractdtypes.h b/numpy/_core/src/multiarray/abstractdtypes.h index b4cf1a13f673..7bf8191e6917 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.h +++ b/numpy/_core/src/multiarray/abstractdtypes.h @@ -41,16 +41,7 @@ static inline int npy_mark_tmp_array_if_pyscalar( PyObject *obj, PyArrayObject *arr, PyArray_DTypeMeta **dtype) { - /* - * We check the array dtype for two reasons: First, booleans are - * integer subclasses. Second, an int, float, or complex could have - * a custom DType registered, and then we should use that. - * Further, `np.float64` is a double subclass, so must reject it. - */ - // TODO,NOTE: This function should be changed to do exact long checks - // For NumPy 2.1! - if (PyLong_Check(obj) - && (PyArray_ISINTEGER(arr) || PyArray_ISOBJECT(arr))) { + if (PyLong_CheckExact(obj)) { ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_INT; if (dtype != NULL) { Py_INCREF(&PyArray_PyLongDType); @@ -58,8 +49,7 @@ npy_mark_tmp_array_if_pyscalar( } return 1; } - else if (PyFloat_Check(obj) && !PyArray_IsScalar(obj, Double) - && PyArray_TYPE(arr) == NPY_DOUBLE) { + else if (PyFloat_CheckExact(obj)) { ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_FLOAT; if (dtype != NULL) { Py_INCREF(&PyArray_PyFloatDType); @@ -67,8 +57,7 @@ npy_mark_tmp_array_if_pyscalar( } return 1; } - else if (PyComplex_Check(obj) && !PyArray_IsScalar(obj, CDouble) - && PyArray_TYPE(arr) == NPY_CDOUBLE) { + else if (PyComplex_CheckExact(obj)) { ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_COMPLEX; if (dtype != NULL) { Py_INCREF(&PyArray_PyComplexDType); diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 2c0525253cf2..f3f931de33bc 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -191,9 +191,9 @@ find_binary_operation_path( *self_op = NULL; if (PyArray_IsScalar(other, Generic) || - PyLong_Check(other) || - PyFloat_Check(other) || - PyComplex_Check(other) || + PyLong_CheckExact(other) || + PyFloat_CheckExact(other) || + PyComplex_CheckExact(other) || PyBool_Check(other)) { /* * The other operand is ready for the operation already. Must pass on diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index fe492805eae3..d98b343b2d96 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -954,15 +954,7 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) return CONVERSION_SUCCESS; } - if (PyFloat_Check(value)) { - if (!PyFloat_CheckExact(value)) { - /* A NumPy double is a float subclass, but special. */ - if (PyArray_IsScalar(value, Double)) { - descr = PyArray_DescrFromType(NPY_DOUBLE); - goto numpy_scalar; - } - *may_need_deferring = NPY_TRUE; - } + if (PyFloat_CheckExact(value)) { if (!IS_SAFE(NPY_DOUBLE, NPY_@TYPE@)) { if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { /* Legacy promotion and weak-and-warn not handled here */ @@ -978,10 +970,7 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) return CONVERSION_SUCCESS; } - if (PyLong_Check(value)) { - if (!PyLong_CheckExact(value)) { - *may_need_deferring = NPY_TRUE; - } + if (PyLong_CheckExact(value)) { if (!IS_SAFE(NPY_LONG, NPY_@TYPE@)) { /* * long -> (c)longdouble is safe, so `OTHER_IS_UNKNOWN_OBJECT` will @@ -1009,15 +998,7 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) return CONVERSION_SUCCESS; } - if (PyComplex_Check(value)) { - if (!PyComplex_CheckExact(value)) { - /* A NumPy complex double is a float subclass, but special. */ - if (PyArray_IsScalar(value, CDouble)) { - descr = PyArray_DescrFromType(NPY_CDOUBLE); - goto numpy_scalar; - } - *may_need_deferring = NPY_TRUE; - } + if (PyComplex_CheckExact(value)) { if (!IS_SAFE(NPY_CDOUBLE, NPY_@TYPE@)) { if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { /* Legacy promotion and weak-and-warn not handled here */ @@ -1079,7 +1060,6 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) return OTHER_IS_UNKNOWN_OBJECT; } - numpy_scalar: if (descr->typeobj != Py_TYPE(value)) { /* * This is a subclass of a builtin type, we may continue normally, diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index cdbb2fad910a..4429e70fe66b 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -1073,6 +1073,9 @@ def test_longdouble_complex(): @pytest.mark.parametrize("subtype", [float, int, complex, np.float16]) @np._no_nep50_warning() def test_pyscalar_subclasses(subtype, __op__, __rop__, op, cmp): + # This tests that python scalar subclasses behave like a float64 (if they + # don't override it). + # In an earlier version of NEP 50, they behaved like the Python buildins. def op_func(self, other): return __op__ @@ -1095,25 +1098,29 @@ def rop_func(self, other): # When no deferring is indicated, subclasses are handled normally. myt = type("myt", (subtype,), {__rop__: rop_func}) + behaves_like = lambda x: np.array(subtype(x))[()] # Check for float32, as a float subclass float64 may behave differently res = op(myt(1), np.float16(2)) - expected = op(subtype(1), np.float16(2)) + expected = op(behaves_like(1), np.float16(2)) assert res == expected assert type(res) == type(expected) res = op(np.float32(2), myt(1)) - expected = op(np.float32(2), subtype(1)) + expected = op(np.float32(2), behaves_like(1)) assert res == expected assert type(res) == type(expected) - # Same check for longdouble: + # Same check for longdouble (compare via dtype to accept float64 when + # longdouble has the identical size), which is currently not perfectly + # consistent. res = op(myt(1), np.longdouble(2)) - expected = op(subtype(1), np.longdouble(2)) + expected = op(behaves_like(1), np.longdouble(2)) assert res == expected - assert type(res) == type(expected) + assert np.dtype(type(res)) == np.dtype(type(expected)) res = op(np.float32(2), myt(1)) - expected = op(np.longdouble(2), subtype(1)) + expected = op(np.float32(2), behaves_like(1)) assert res == expected + assert np.dtype(type(res)) == np.dtype(type(expected)) def test_truediv_int(): From 51a06ef113b04b663876ef3ec618eb5005fc7174 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 24 Jul 2024 19:59:38 -0600 Subject: [PATCH 872/980] MAINT: remove unnecessary overflow checking --- numpy/_core/src/multiarray/refcount.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/numpy/_core/src/multiarray/refcount.c b/numpy/_core/src/multiarray/refcount.c index 4fc19979bac5..153c71cceec7 100644 --- a/numpy/_core/src/multiarray/refcount.c +++ b/numpy/_core/src/multiarray/refcount.c @@ -87,17 +87,11 @@ PyArray_ZeroContiguousBuffer( } } else { - npy_intp nbytes; - if (!npy_mul_sizes_with_overflow(&nbytes, size, stride)) { - memset(data, 0, nbytes); - NPY_traverse_info_xfree(&zero_info); - return 0; - } - else { - PyErr_SetString(PyExc_OverflowError, - "Integer overflow in computing resized buffer size"); - goto fail; - } + /* the multiply here should never overflow, since we already + checked if the new array size doesn't overflow */ + memset(data, 0, size*stride); + NPY_traverse_info_xfree(&zero_info); + return 0; } int res = zero_info.func( From 38ed53f89425247ade252e25907b20126adc467e Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 24 Jul 2024 20:00:43 -0600 Subject: [PATCH 873/980] Update numpy/_core/src/multiarray/refcount.c Co-authored-by: Sebastian Berg --- numpy/_core/src/multiarray/refcount.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/refcount.c b/numpy/_core/src/multiarray/refcount.c index 153c71cceec7..0da40cbdc60e 100644 --- a/numpy/_core/src/multiarray/refcount.c +++ b/numpy/_core/src/multiarray/refcount.c @@ -59,9 +59,9 @@ PyArray_ClearBuffer( /* * Helper function to zero an array buffer. * - * Here "zeroing" means an abstract zeroing operation, which for an - * array of references might be something more complicated than - * zero-filling the buffer. + * Here "zeroing" means an abstract zeroing operation, implementing the + * the behavior of `np.zeros`. E.g. for an of references this is more + * complicated than zero-filling the buffer. * * Failure (returns -1) indicates some sort of programming or logical * error and should not happen for a data type that has been set up From 1e9291790aeb7e24b877f334b484b995eb59d452 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 25 Jul 2024 07:21:25 +0200 Subject: [PATCH 874/980] MAINT: Move assignment to (hopefully) avoid warning --- numpy/_core/src/umath/scalarmath.c.src | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index d98b343b2d96..cd28e4405b6d 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -1389,7 +1389,8 @@ static PyObject * npy_bool may_need_deferring; conversion_result res = convert_to_@name@( other, &other_val_conv, &may_need_deferring); - other_val = other_val_conv; /* Need a float value */ + /* Actual float cast `other_val` is set below on success. */ + if (res == CONVERSION_ERROR) { return NULL; /* an error occurred (should never happen) */ } @@ -1400,6 +1401,7 @@ static PyObject * case DEFER_TO_OTHER_KNOWN_SCALAR: Py_RETURN_NOTIMPLEMENTED; case CONVERSION_SUCCESS: + other_val = other_val_conv; /* Need a float value */ break; /* successfully extracted value we can proceed */ case OTHER_IS_UNKNOWN_OBJECT: case PROMOTION_REQUIRED: From 0db1a86964da389f3d1cd9936b0f1b67df4a5452 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 Jul 2024 17:32:54 +0000 Subject: [PATCH 875/980] MAINT: Bump github/codeql-action from 3.25.12 to 3.25.14 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.25.12 to 3.25.14. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/4fa2a7953630fd2f3fb380f21be14ede0169dd4f...5cf07d8b700b67e235fbb65cbc84f69c0cf10464) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index c49f5114cce4..a76f49bb390f 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -45,7 +45,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@4fa2a7953630fd2f3fb380f21be14ede0169dd4f # v3.25.12 + uses: github/codeql-action/init@5cf07d8b700b67e235fbb65cbc84f69c0cf10464 # v3.25.14 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -55,7 +55,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@4fa2a7953630fd2f3fb380f21be14ede0169dd4f # v3.25.12 + uses: github/codeql-action/autobuild@5cf07d8b700b67e235fbb65cbc84f69c0cf10464 # v3.25.14 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -68,6 +68,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@4fa2a7953630fd2f3fb380f21be14ede0169dd4f # v3.25.12 + uses: github/codeql-action/analyze@5cf07d8b700b67e235fbb65cbc84f69c0cf10464 # v3.25.14 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 349941a1e13c..5bec66395d8c 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@4fa2a7953630fd2f3fb380f21be14ede0169dd4f # v2.1.27 + uses: github/codeql-action/upload-sarif@5cf07d8b700b67e235fbb65cbc84f69c0cf10464 # v2.1.27 with: sarif_file: results.sarif From 17f529a2682660b563fb87acebbf5413c382fc0b Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 25 Jul 2024 12:09:16 -0600 Subject: [PATCH 876/980] TST: mark f2py test as linux-only --- numpy/f2py/tests/test_f2py2e.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/f2py/tests/test_f2py2e.py b/numpy/f2py/tests/test_f2py2e.py index 9944da003a8b..4fb028e145ca 100644 --- a/numpy/f2py/tests/test_f2py2e.py +++ b/numpy/f2py/tests/test_f2py2e.py @@ -749,8 +749,8 @@ def test_npdistop(hello_world_f90, monkeypatch): assert rout.stdout == eout -@pytest.mark.skipif(sys.version_info <= (3, 12), - reason='Python 3.12 or newer required') +@pytest.mark.skipif((platform.system() != 'Linux') or sys.version_info <= (3, 12), + reason='Compiler and Python 3.12 or newer required') def test_no_freethreading_compatible(hello_world_f90, monkeypatch): """ CLI :: --no-freethreading-compatible @@ -774,8 +774,8 @@ def test_no_freethreading_compatible(hello_world_f90, monkeypatch): assert rout.returncode == 0 -@pytest.mark.skipif(sys.version_info <= (3, 12), - reason='Python 3.12 or newer required') +@pytest.mark.skipif((platform.system() != 'Linux') or sys.version_info <= (3, 12), + reason='Compiler and Python 3.12 or newer required') def test_freethreading_compatible(hello_world_f90, monkeypatch): """ CLI :: --freethreading_compatible From 2d7ede41ce54be25fb20efac706c578a535bb5b4 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 25 Jul 2024 11:07:56 -0600 Subject: [PATCH 877/980] CI: unify free-threaded builds with other builds [wheel build] --- .github/workflows/free-threaded-wheels.yml | 186 --------------------- .github/workflows/wheels.yml | 15 +- 2 files changed, 14 insertions(+), 187 deletions(-) delete mode 100644 .github/workflows/free-threaded-wheels.yml diff --git a/.github/workflows/free-threaded-wheels.yml b/.github/workflows/free-threaded-wheels.yml deleted file mode 100644 index 3c5cc9294cef..000000000000 --- a/.github/workflows/free-threaded-wheels.yml +++ /dev/null @@ -1,186 +0,0 @@ -# Workflow to build and test wheels for the free-threaded Python build. -# -# This should be merged back into wheels.yml when free-threaded wheel -# builds can be uploaded to pypi along with the rest of numpy's release -# artifacts. -# -# To work on the wheel building infrastructure on a fork, comment out: -# -# if: github.repository == 'numpy/numpy' -# -# in the get_commit_message job. Be sure to include [wheel build] in your commit -# message to trigger the build. All files related to wheel building are located -# at tools/wheels/ -name: Free-Threaded Wheel Builder - -on: - schedule: - # ┌───────────── minute (0 - 59) - # │ ┌───────────── hour (0 - 23) - # │ │ ┌───────────── day of the month (1 - 31) - # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) - # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) - # │ │ │ │ │ - - cron: "42 2 * * SUN,WED" - pull_request: - branches: - - main - - maintenance/** - # we don't want to upload free-threaded wheels to pypi yet - # so we don't build on tags - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -permissions: - contents: read # to fetch code (actions/checkout) - -jobs: - get_commit_message: - name: Get commit message - runs-on: ubuntu-latest - # To enable this job and subsequent jobs on a fork, comment out: - if: github.repository == 'numpy/numpy' - outputs: - message: ${{ steps.commit_message.outputs.message }} - steps: - - name: Checkout numpy - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - # Gets the correct commit message for pull request - with: - ref: ${{ github.event.pull_request.head.sha }} - - name: Get commit message - id: commit_message - run: | - set -xe - COMMIT_MSG=$(git log --no-merges -1 --oneline) - echo "message=$COMMIT_MSG" >> $GITHUB_OUTPUT - echo github.ref ${{ github.ref }} - - build_wheels: - name: Build wheel ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} - needs: get_commit_message - if: >- - contains(needs.get_commit_message.outputs.message, '[wheel build]') || - github.event_name == 'schedule' || - github.event_name == 'workflow_dispatch' - runs-on: ${{ matrix.buildplat[0] }} - strategy: - # Ensure that a wheel builder finishes even if another fails - fail-fast: false - matrix: - # Github Actions doesn't support pairing matrix values together, let's improvise - # https://github.com/github/feedback/discussions/7835#discussioncomment-1769026 - buildplat: - - [ubuntu-20.04, manylinux_x86_64, ""] - - [ubuntu-20.04, musllinux_x86_64, ""] - - [macos-13, macosx_x86_64, openblas] - # targeting macos >= 14. Could probably build on macos-14, but it would be a cross-compile - - [macos-13, macosx_x86_64, accelerate] - - [macos-14, macosx_arm64, accelerate] # always use accelerate - # TODO: set up Windows wheels windows is supported on cibuildwheel but - # numpy doesn't build on the image yet - python: ["cp313t"] - env: - IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} - IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} - steps: - - name: Checkout numpy - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - submodules: true - - - name: Setup MSVC (32-bit) - if: ${{ matrix.buildplat[1] == 'win32' }} - uses: bus1/cabuild/action/msdevshell@e22aba57d6e74891d059d66501b6b5aed8123c4d # v1 - with: - architecture: 'x86' - - - name: pkg-config-for-win - run: | - choco install -y --no-progress --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite - $CIBW = "${{ github.workspace }}/.openblas" - # pkgconfig needs a complete path, and not just "./openblas since the - # build is run in a tmp dir (?) - # It seems somewhere in the env passing, `\` is not - # passed through, so convert it to '/' - $CIBW = $CIBW.replace("\","/") - echo "CIBW_ENVIRONMENT_WINDOWS=PKG_CONFIG_PATH=$CIBW" >> $env:GITHUB_ENV - if: runner.os == 'windows' - - # Used to push the built wheels - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 - with: - python-version: "3.x" - - - name: Setup macOS - if: matrix.buildplat[0] == 'macos-13' || matrix.buildplat[0] == 'macos-14' - run: | - # Needed due to https://github.com/actions/runner-images/issues/3371 - # Supported versions: https://github.com/actions/runner-images/blob/main/images/macos/macos-14-arm64-Readme.md - echo "FC=gfortran-13" >> "$GITHUB_ENV" - echo "F77=gfortran-13" >> "$GITHUB_ENV" - echo "F90=gfortran-13" >> "$GITHUB_ENV" - if [[ ${{ matrix.buildplat[2] }} == 'accelerate' ]]; then - # macosx_arm64 and macosx_x86_64 with accelerate - # only target Sonoma onwards - CIBW="MACOSX_DEPLOYMENT_TARGET=14.0 INSTALL_OPENBLAS=false RUNNER_OS=macOS" - echo "CIBW_ENVIRONMENT_MACOS=$CIBW" >> "$GITHUB_ENV" - - # the macos-13 image that's used for building the x86_64 wheel can't test - # a wheel with deployment target >= 14 without further work - echo "CIBW_TEST_SKIP=*-macosx_x86_64" >> "$GITHUB_ENV" - else - # macosx_x86_64 with OpenBLAS - # if INSTALL_OPENBLAS isn't specified then scipy-openblas is automatically installed - CIBW="RUNNER_OS=macOS" - PKG_CONFIG_PATH="$PWD/.openblas" - DYLD="$DYLD_LIBRARY_PATH:/$PWD/.openblas/lib" - echo "CIBW_ENVIRONMENT_MACOS=$CIBW PKG_CONFIG_PATH=$PKG_CONFIG_PATH DYLD_LIBRARY_PATH=$DYLD" >> "$GITHUB_ENV" - fi - - - name: Build wheels - uses: pypa/cibuildwheel@7e5a838a63ac8128d71ab2dfd99e4634dd1bca09 # v2.19.2 - env: - CIBW_PRERELEASE_PYTHONS: True - CIBW_FREE_THREADED_SUPPORT: True - CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - # TODO: remove along with installing build deps in - # cibw_before_build.sh when a released cython can build numpy - CIBW_BUILD_FRONTEND: "pip; args: --no-build-isolation" - - - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 - with: - name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} - path: ./wheelhouse/*.whl - - - uses: mamba-org/setup-micromamba@f8b8a1e23a26f60a44c853292711bacfd3eac822 - with: - # for installation of anaconda-client, required for upload to - # anaconda.org - # Note that this step is *after* specific pythons have been used to - # build and test the wheel - # for installation of anaconda-client, for upload to anaconda.org - # environment will be activated after creation, and in future bash steps - init-shell: bash - environment-name: upload-env - create-args: >- - anaconda-client - - - name: Upload wheels - if: success() - shell: bash -el {0} - # see https://github.com/marketplace/actions/setup-miniconda for why - # `-el {0}` is required. - env: - NUMPY_NIGHTLY_UPLOAD_TOKEN: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} - run: | - source tools/wheels/upload_wheels.sh - set_upload_vars - # trigger an upload to - # https://anaconda.org/scientific-python-nightly-wheels/numpy - # for cron jobs or "Run workflow" (restricted to main branch). - # The tokens were originally generated at anaconda.org - upload_wheels diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 76168ef9e6c5..f73616ef46c3 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -85,7 +85,7 @@ jobs: - [macos-14, macosx_arm64, accelerate] # always use accelerate - [windows-2019, win_amd64, ""] - [windows-2019, win32, ""] - python: ["cp310", "cp311", "cp312", "pp310", "cp313"] + python: ["cp310", "cp311", "cp312", "pp310", "cp313", "cp313t"] exclude: # Don't build PyPy 32-bit windows - buildplat: [windows-2019, win32, ""] @@ -94,6 +94,13 @@ jobs: python: "pp310" - buildplat: [ macos-14, macosx_arm64, accelerate ] python: "pp310" + - buildplat: [ windows-2019, win_amd64, "" ] + python: "cp313t" + - buildplat: [ windows-2019, win32, "" ] + python: "cp313t" + - buildplat: [ macos13, macosx_x86_64, openblas ] + python: "cp313t" + env: IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} IS_PUSH: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') }} @@ -153,10 +160,16 @@ jobs: echo "CIBW_ENVIRONMENT_MACOS=$CIBW PKG_CONFIG_PATH=$PKG_CONFIG_PATH DYLD_LIBRARY_PATH=$DYLD" >> "$GITHUB_ENV" fi + - name: Set up free-threaded build + if: matrix.python == 'cp313t' + run: | + echo "CIBW_BUILD_FRONTEND=pip; args: --no-build-isolation" >> "$GITHUB_ENV" + - name: Build wheels uses: pypa/cibuildwheel@7e5a838a63ac8128d71ab2dfd99e4634dd1bca09 # v2.19.2 env: CIBW_PRERELEASE_PYTHONS: True + CIBW_FREE_THREADED_SUPPORT: True CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 From 339c9278244840e9fd9df3c837e8bc1f353df646 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Thu, 25 Jul 2024 20:56:45 -0400 Subject: [PATCH 878/980] BUG: random: prevent zipf from hanging when parameter is large. --- numpy/random/src/distributions/distributions.c | 9 +++++++++ numpy/random/tests/test_generator_mt19937_regressions.py | 7 +++++++ 2 files changed, 16 insertions(+) diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index 1241329151a9..4ce4b728584c 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -1000,6 +1000,15 @@ int64_t random_geometric(bitgen_t *bitgen_state, double p) { RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a) { double am1, b; + if (a >= 1025) { + /* + * If a exceeds 1025, the calculation of b will overflow and the loop + * will not terminate. It is safe to simply return 1 here, because the + * probability of generating a value greater than 1 in this case is + * less than 3e-309. + */ + return (RAND_INT_TYPE) 1; + } am1 = a - 1.0; b = pow(2.0, am1); while (1) { diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py index d451c6acd16d..4983f45743fd 100644 --- a/numpy/random/tests/test_generator_mt19937_regressions.py +++ b/numpy/random/tests/test_generator_mt19937_regressions.py @@ -163,3 +163,10 @@ def test_geometric_tiny_prob(self): # is 0.9999999999907766, so we expect the result to be all 2**63-1. assert_array_equal(self.mt19937.geometric(p=1e-30, size=3), np.iinfo(np.int64).max) + + def test_zipf_large_parameter(self): + # Regression test for part of gh-9829: a call such as rng.zipf(10000) + # would hang. + n = 8 + sample = self.mt19937.zipf(10000, size=n) + assert_array_equal(sample, np.ones(n, dtype=np.int64)) From 85071d9d1b611047d21fba55d31be6b05ba6960e Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 25 Jul 2024 19:58:57 -0600 Subject: [PATCH 879/980] BUG: use proper input and output descriptor in array_assign_subscript cast setup --- numpy/_core/src/multiarray/mapping.c | 6 +++--- numpy/_core/tests/test_stringdtype.py | 8 ++++++++ 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index e329a7a6758c..84a7df0bca23 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -1996,9 +1996,9 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) npy_intp itemsize = PyArray_ITEMSIZE(self); int is_aligned = IsUintAligned(self) && IsUintAligned(tmp_arr); - if (PyArray_GetDTypeTransferFunction(is_aligned, - itemsize, itemsize, - PyArray_DESCR(self), PyArray_DESCR(self), + if (PyArray_GetDTypeTransferFunction( + is_aligned, itemsize, itemsize, + PyArray_DESCR(tmp_arr), PyArray_DESCR(self), 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { goto fail; } diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 069a9c00d5de..a1b2d9ad286c 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -495,6 +495,14 @@ def test_fancy_indexing(string_list): sarr = np.array(string_list, dtype="T") assert_array_equal(sarr, sarr[np.arange(sarr.shape[0])]) + # see gh-27003 + for ind in [[0, 1], ...]: + a = np.array(['a'*16, 'b'*16], dtype="T") + b = np.array(['d'*16, 'e'*16], dtype="T") + a[ind] = b + assert_array_equal(a, b) + assert a[0] == 'd'*16 + def test_creation_functions(): assert_array_equal(np.zeros(3, dtype="T"), ["", "", ""]) From 48f03da0cda1fccf5e5a7584f084cf2dddfc497f Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 26 Jul 2024 05:48:08 +0200 Subject: [PATCH 880/980] TYP,BUG: Complete type stubs for ``numpy.dtypes`` --- numpy/dtypes.pyi | 668 ++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 633 insertions(+), 35 deletions(-) diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index cea64282252b..673cbde8d871 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -1,43 +1,641 @@ +import sys +from typing import ( + Any, + Final, + Generic, + Literal as L, + NoReturn, + TypeAlias, + TypeVar, + final, +) + import numpy as np +if sys.version_info >= (3, 11): + from typing import LiteralString +else: + LiteralString: TypeAlias = str + + +__all__ = [ + 'BoolDType', + 'Int8DType', + 'ByteDType', + 'UInt8DType', + 'UByteDType', + 'Int16DType', + 'ShortDType', + 'UInt16DType', + 'UShortDType', + 'Int32DType', + 'IntDType', + 'UInt32DType', + 'UIntDType', + 'Int64DType', + 'LongDType', + 'UInt64DType', + 'ULongDType', + 'LongLongDType', + 'ULongLongDType', + 'Float16DType', + 'Float32DType', + 'Float64DType', + 'LongDoubleDType', + 'Complex64DType', + 'Complex128DType', + 'CLongDoubleDType', + 'ObjectDType', + 'BytesDType', + 'StrDType', + 'VoidDType', + 'DateTime64DType', + 'TimeDelta64DType', + 'StringDType', +] + +_SelfType = TypeVar("_SelfType", bound=object) +_SCT = TypeVar("_SCT", bound=np.generic, covariant=True) + +# Helper base classes (typing-only) + +class _BaseDType(Generic[_SCT], np.dtype[_SCT]): # type: ignore[misc] + names: None # pyright: ignore[reportIncompatibleVariableOverride] + def __new__(cls: type[_SelfType], /) -> _SelfType: ... + def __getitem__(self, key: Any, /) -> NoReturn: ... + @property + def base(self) -> np.dtype[_SCT]: ... + @property + def fields(self) -> None: ... + @property + def isalignedstruct(self) -> L[False]: ... + @property + def isnative(self) -> L[True]: ... + @property + def metadata(self) -> None: ... + @property + def ndim(self) -> L[0]: ... + @property + def shape(self) -> tuple[()]: ... + @property + def subdtype(self) -> None: ... + +class _BuiltinDType(Generic[_SCT], _BaseDType[_SCT]): + @property + def flags(self) -> L[0]: ... + @property + def hasobject(self) -> L[False]: ... + @property + def isbuiltin(self) -> L[1]: ... + +# Helper mixins (typing-only): + +_KindChar = TypeVar("_KindChar", bound=LiteralString, covariant=True) +_TypeChar = TypeVar("_TypeChar", bound=LiteralString, covariant=True) +_TypeNum = TypeVar("_TypeNum", bound=int, covariant=True) + +class _TypeCodes(Generic[_KindChar, _TypeChar, _TypeNum]): + @final + @property + def kind(self) -> _KindChar: ... + @final + @property + def char(self) -> _TypeChar: ... + @final + @property + def num(self) -> _TypeNum: ... + +class _NoOrder: + @final + @property + def byteorder(self) -> L["|"]: ... + +class _NativeOrder: + @final + @property + def byteorder(self) -> L["="]: ... + +_DataSize_co = TypeVar("_DataSize_co", bound=int, covariant=True) +_ItemSize_co = TypeVar("_ItemSize_co", bound=int, covariant=True) -__all__: list[str] +class _NBit(Generic[_DataSize_co, _ItemSize_co]): + @final + @property + def alignment(self) -> _DataSize_co: ... + @final + @property + def itemsize(self) -> _ItemSize_co: ... + +class _8Bit(_NoOrder, _NBit[L[1], L[1]]): ... # Boolean: -BoolDType = np.dtype[np.bool] + +@final +class BoolDType( + _TypeCodes[L["b"], L["?"], L[0]], + _8Bit, + _BuiltinDType[np.bool], +): + @property + def name(self) -> L["bool"]: ... + @property + def str(self) -> L["|b1"]: ... + # Sized integers: -Int8DType = np.dtype[np.int8] -UInt8DType = np.dtype[np.uint8] -Int16DType = np.dtype[np.int16] -UInt16DType = np.dtype[np.uint16] -Int32DType = np.dtype[np.int32] -UInt32DType = np.dtype[np.uint32] -Int64DType = np.dtype[np.int64] -UInt64DType = np.dtype[np.uint64] + +@final +class Int8DType( + _TypeCodes[L["i"], L["b"], L[1]], + _8Bit, + _BuiltinDType[np.int8], +): + @property + def name(self) -> L["int8"]: ... + @property + def str(self) -> L["|i1"]: ... + +@final +class UInt8DType( + _TypeCodes[L["u"], L["B"], L[2]], + _8Bit, + _BuiltinDType[np.uint8], +): + @property + def name(self) -> L["uint8"]: ... + @property + def str(self) -> L["|u1"]: ... + +@final +class Int16DType( + _TypeCodes[L["i"], L["h"], L[3]], + _NativeOrder, + _NBit[L[2], L[2]], + _BuiltinDType[np.int16], +): + @property + def name(self) -> L["int16"]: ... + @property + def str(self) -> L["i2"]: ... + +@final +class UInt16DType( + _TypeCodes[L["u"], L["H"], L[4]], + _NativeOrder, + _NBit[L[2], L[2]], + _BuiltinDType[np.uint16], +): + @property + def name(self) -> L["uint16"]: ... + @property + def str(self) -> L["u2"]: ... + +@final +class Int32DType( + _TypeCodes[L["i"], L["i", "l"], L[5, 7]], + _NativeOrder, + _NBit[L[4], L[4]], + _BuiltinDType[np.int32], +): + @property + def name(self) -> L["int32"]: ... + @property + def str(self) -> L["i4"]: ... + +@final +class UInt32DType( + _TypeCodes[L["u"], L["I", "L"], L[6, 8]], + _NativeOrder, + _NBit[L[4], L[4]], + _BuiltinDType[np.uint32], +): + @property + def name(self) -> L["uint32"]: ... + @property + def str(self) -> L["u4"]: ... + +@final +class Int64DType( + _TypeCodes[L["i"], L["l", "q"], L[7, 9]], + _NativeOrder, + _NBit[L[8], L[8]], + _BuiltinDType[np.int64], +): + @property + def name(self) -> L["int64"]: ... + @property + def str(self) -> L["i8"]: ... + +@final +class UInt64DType( + _TypeCodes[L["u"], L["L", "Q"], L[8, 10]], + _NativeOrder, + _NBit[L[8], L[8]], + _BuiltinDType[np.uint64], +): + @property + def name(self) -> L["uint64"]: ... + @property + def str(self) -> L["u8"]: ... + # Standard C-named version/alias: -ByteDType = np.dtype[np.byte] -UByteDType = np.dtype[np.ubyte] -ShortDType = np.dtype[np.short] -UShortDType = np.dtype[np.ushort] -IntDType = np.dtype[np.intc] -UIntDType = np.dtype[np.uintc] -LongDType = np.dtype[np.long] -ULongDType = np.dtype[np.ulong] -LongLongDType = np.dtype[np.longlong] -ULongLongDType = np.dtype[np.ulonglong] -# Floats -Float16DType = np.dtype[np.float16] -Float32DType = np.dtype[np.float32] -Float64DType = np.dtype[np.float64] -LongDoubleDType = np.dtype[np.longdouble] +ByteDType: Final = Int8DType +UByteDType: Final = UInt8DType +ShortDType: Final = Int16DType +UShortDType: Final = UInt16DType + +@final +class IntDType( + _TypeCodes[L["i"], L["i"], L[5]], + _NativeOrder, + _NBit[L[2, 4], L[2, 4]], + _BuiltinDType[np.intc], +): + @property + def name(self) -> L["int16", "int32"]: ... + @property + def str(self) -> L["i2", "i4"]: ... + +@final +class UIntDType( + _TypeCodes[L["u"], L["I"], L[6]], + _NativeOrder, + _NBit[L[2, 4], L[2, 4]], + _BuiltinDType[np.uintc], +): + @property + def name(self) -> L["uint16", "uint32"]: ... + @property + def str(self) -> L["u2", "u4"]: ... + +@final +class LongDType( + _TypeCodes[L["i"], L["l"], L[7]], + _NativeOrder, + _NBit[L[4, 8], L[4, 8]], + _BuiltinDType[np.long], +): + @property + def name(self) -> L["int32", "int64"]: ... + @property + def str(self) -> L["i4", "i8"]: ... + +@final +class ULongDType( + _TypeCodes[L["u"], L["L"], L[8]], + _NativeOrder, + _NBit[L[4, 8], L[4, 8]], + _BuiltinDType[np.ulong], +): + @property + def name(self) -> L["uint32", "uint64"]: ... + @property + def str(self) -> L["u4", "u8"]: ... + +@final +class LongLongDType( + _TypeCodes[L["i"], L["q"], L[9]], + _NativeOrder, + _NBit[L[8], L[8]], + _BuiltinDType[np.longlong], +): + @property + def name(self) -> L["int64"]: ... + @property + def str(self) -> L["i8"]: ... + +@final +class ULongLongDType( + _TypeCodes[L["u"], L["Q"], L[10]], + _NativeOrder, + _NBit[L[8], L[8]], + _BuiltinDType[np.ulonglong], +): + @property + def name(self) -> L["uint64"]: ... + @property + def str(self) -> L["u8"]: ... + +# Floats: + +@final +class Float16DType( + _TypeCodes[L["f"], L["e"], L[23]], + _NativeOrder, + _NBit[L[2], L[2]], + _BuiltinDType[np.float16], +): + @property + def name(self) -> L["float16"]: ... + @property + def str(self) -> L["f2"]: ... + +@final +class Float32DType( + _TypeCodes[L["f"], L["f"], L[11]], + _NativeOrder, + _NBit[L[4], L[4]], + _BuiltinDType[np.float32], +): + @property + def name(self) -> L["float32"]: ... + @property + def str(self) -> L["f4"]: ... + +@final +class Float64DType( + _TypeCodes[L["f"], L["d"], L[12]], + _NativeOrder, + _NBit[L[8], L[8]], + _BuiltinDType[np.float64], +): + @property + def name(self) -> L["float64"]: ... + @property + def str(self) -> L["f8"]: ... + +@final +class LongDoubleDType( + _TypeCodes[L["f"], L["g"], L[13]], + _NativeOrder, + _NBit[L[8, 12, 16], L[8, 12, 16]], + _BuiltinDType[np.longdouble], +): + @property + def name(self) -> L["float64", "float96", "float128"]: ... + @property + def str(self) -> L["f8", "f12", "f16"]: ... + # Complex: -Complex64DType = np.dtype[np.complex64] -Complex128DType = np.dtype[np.complex128] -CLongDoubleDType = np.dtype[np.clongdouble] -# Others: -ObjectDType = np.dtype[np.object_] -BytesDType = np.dtype[np.bytes_] -StrDType = np.dtype[np.str_] -VoidDType = np.dtype[np.void] -DateTime64DType = np.dtype[np.datetime64] -TimeDelta64DType = np.dtype[np.timedelta64] + +@final +class Complex64DType( + _TypeCodes[L["c"], L["F"], L[14]], + _NativeOrder, + _NBit[L[4], L[8]], + _BuiltinDType[np.complex64], +): + @property + def name(self) -> L["complex64"]: ... + @property + def str(self) -> L["c8"]: ... + +@final +class Complex128DType( + _TypeCodes[L["c"], L["D"], L[15]], + _NativeOrder, + _NBit[L[8], L[16]], + _BuiltinDType[np.complex128], +): + @property + def name(self) -> L["complex128"]: ... + @property + def str(self) -> L["c16"]: ... + +@final +class CLongDoubleDType( + _TypeCodes[L["c"], L["G"], L[16]], + _NativeOrder, + _NBit[L[8, 12, 16], L[16, 24, 32]], + _BuiltinDType[np.clongdouble], +): + @property + def name(self) -> L["complex128", "complex192", "complex256"]: ... + @property + def str(self) -> L["c16", "c24", "c32"]: ... + +# Python objects: + +@final +class ObjectDType( + _TypeCodes[L["O"], L["O"], L[17]], + _NoOrder, + _NBit[L[8], L[8]], + _BaseDType[np.object_], +): + @property + def flags(self) -> L[63]: ... + @property + def hasobject(self) -> L[True]: ... + @property + def isbuiltin(self) -> L[1]: ... + @property + def name(self) -> L["object"]: ... + @property + def str(self) -> L["|O"]: ... + +# Flexible: + +@final +class BytesDType( + Generic[_ItemSize_co], + _TypeCodes[L["S"], L["S"], L[18]], + _NoOrder, + _NBit[L[1],_ItemSize_co], + _BaseDType[np.bytes_], +): + def __new__(cls, size: _ItemSize_co, /) -> BytesDType[_ItemSize_co]: ... + @property + def flags(self) -> L[0]: ... + @property + def hasobject(self) -> L[False]: ... + @property + def isbuiltin(self) -> L[0]: ... + @property + def name(self) -> LiteralString: ... + @property + def str(self) -> LiteralString: ... + +@final +class StrDType( + Generic[_ItemSize_co], + _TypeCodes[L["U"], L["U"], L[19]], + _NativeOrder, + _NBit[L[4],_ItemSize_co], + _BaseDType[np.str_], +): + def __new__(cls, size: _ItemSize_co, /) -> StrDType[_ItemSize_co]: ... + @property + def flags(self) -> L[8]: ... + @property + def hasobject(self) -> L[False]: ... + @property + def isbuiltin(self) -> L[0]: ... + @property + def name(self) -> LiteralString: ... + @property + def str(self) -> LiteralString: ... + +@final +class VoidDType( + Generic[_ItemSize_co], + _TypeCodes[L["V"], L["V"], L[20]], + _NoOrder, + _NBit[L[1],_ItemSize_co], + _BaseDType[np.void], +): + # NOTE: `VoidDType(...)` raises a `TypeError` at the moment + def __new__(cls, length: _ItemSize_co, /) -> NoReturn: ... + @property + def flags(self) -> L[0]: ... + @property + def hasobject(self) -> L[False]: ... + @property + def isbuiltin(self) -> L[0]: ... + @property + def name(self) -> LiteralString: ... + @property + def str(self) -> LiteralString: ... + +# Other: + +_DateTimeUnit_co = TypeVar( + "_DateTimeUnit_co", + bound=L[ + "Y", "M", "W", "D", + "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as", + ], + covariant=True, +) + +@final +class DateTime64DType( + Generic[_DateTimeUnit_co], + _TypeCodes[L["M"], L["M"], L[21]], + _NativeOrder, + _NBit[L[8], L[8]], + _BaseDType[np.datetime64], +): + # NOTE: `DateTime64DType(...)` raises a `TypeError` at the moment + # TODO: Once implemented, don't forget the`unit: L["μs"]` overload. + def __new__(cls, unit: _DateTimeUnit_co, /) -> NoReturn: ... + @property + def flags(self) -> L[0]: ... + @property + def hasobject(self) -> L[False]: ... + @property + def isbuiltin(self) -> L[0]: ... + @property + def name(self) -> L[ + "datetime64", + "datetime64[Y]", + "datetime64[M]", + "datetime64[W]", + "datetime64[D]", + "datetime64[h]", + "datetime64[m]", + "datetime64[s]", + "datetime64[ms]", + "datetime64[us]", + "datetime64[ns]", + "datetime64[ps]", + "datetime64[fs]", + "datetime64[as]", + ]: ... + @property + def str(self) -> L[ + "M8", + "M8[Y]", + "M8[M]", + "M8[W]", + "M8[D]", + "M8[h]", + "M8[m]", + "M8[s]", + "M8[ms]", + "M8[us]", + "M8[ns]", + "M8[ps]", + "M8[fs]", + "M8[as]", + ]: ... + +@final +class TimeDelta64DType( + Generic[_DateTimeUnit_co], + _TypeCodes[L["m"], L["m"], L[22]], + _NativeOrder, + _NBit[L[8], L[8]], + _BaseDType[np.timedelta64], +): + # NOTE: `TimeDelta64DType(...)` raises a `TypeError` at the moment + # TODO: Once implemented, don't forget to overload on `unit: L["μs"]`. + def __new__(cls, unit: _DateTimeUnit_co, /) -> NoReturn: ... + @property + def flags(self) -> L[0]: ... + @property + def hasobject(self) -> L[False]: ... + @property + def isbuiltin(self) -> L[0]: ... + @property + def name(self) -> L[ + "timedelta64", + "timedelta64[Y]", + "timedelta64[M]", + "timedelta64[W]", + "timedelta64[D]", + "timedelta64[h]", + "timedelta64[m]", + "timedelta64[s]", + "timedelta64[ms]", + "timedelta64[us]", + "timedelta64[ns]", + "timedelta64[ps]", + "timedelta64[fs]", + "timedelta64[as]", + ]: ... + @property + def str(self) -> L[ + "m8", + "m8[Y]", + "m8[M]", + "m8[W]", + "m8[D]", + "m8[h]", + "m8[m]", + "m8[s]", + "m8[ms]", + "m8[us]", + "m8[ns]", + "m8[ps]", + "m8[fs]", + "m8[as]", + ]: ... + +@final +class StringDType( + _TypeCodes[L["T"], L["T"], L[2056]], + _NativeOrder, + _NBit[L[8], L[16]], + # TODO: Replace the (invalid) `str` with the scalar type, once implemented + np.dtype[str], # type: ignore[misc] +): + def __new__(cls, /) -> StringDType: ... + def __getitem__(self, key: Any, /) -> NoReturn: ... + @property + def base(self) -> StringDType: ... + @property + def fields(self) -> None: ... + @property + def flags(self) -> L[107]: ... + @property + def hasobject(self) -> L[True]: ... + @property + def isalignedstruct(self) -> L[False]: ... + @property + def isbuiltin(self) -> L[0]: ... + @property + def isnative(self) -> L[True]: ... + @property + def metadata(self) -> None: ... + @property + def name(self) -> L["StringDType128"]: ... + @property + def ndim(self) -> L[0]: ... + @property + def shape(self) -> tuple[()]: ... + @property + def str(self) -> L["|T16"]: ... + @property + def subdtype(self) -> None: ... + @property + def type(self) -> type[str]: ... From f5a5e04555ce320b498c96af06a0112a863f9c3c Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Fri, 26 Jul 2024 00:02:33 -0400 Subject: [PATCH 881/980] BUG: random: Fix long delays/hangs with zipf(a) when a near 1. Closes gh-9829. --- numpy/random/src/distributions/distributions.c | 17 ++++++++++++++--- .../tests/test_generator_mt19937_regressions.py | 11 +++++++++++ 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index 4ce4b728584c..67d16e855a52 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -998,7 +998,7 @@ int64_t random_geometric(bitgen_t *bitgen_state, double p) { } RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a) { - double am1, b; + double am1, b, Umin; if (a >= 1025) { /* @@ -1011,10 +1011,21 @@ RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a) { } am1 = a - 1.0; b = pow(2.0, am1); + /* + * In the while loop, X is generated from the uniform distribution (Umin, 1]. + * Values below Umin would result in X being rejected because it is too + * large, so there is no point in including them in the distribution of U. + */ + Umin = pow(RAND_INT_MAX, -am1); while (1) { - double T, U, V, X; + double U01, T, U, V, X; - U = 1.0 - next_double(bitgen_state); + /* + * U is sampled from (Umin, 1]. Note that Umin might be 0, and we don't + * want U to be 0. + */ + U01 = next_double(bitgen_state); + U = U01*Umin + (1 - U01); V = next_double(bitgen_state); X = floor(pow(U, -1.0 / am1)); /* diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py index 4983f45743fd..f92e92b39b50 100644 --- a/numpy/random/tests/test_generator_mt19937_regressions.py +++ b/numpy/random/tests/test_generator_mt19937_regressions.py @@ -170,3 +170,14 @@ def test_zipf_large_parameter(self): n = 8 sample = self.mt19937.zipf(10000, size=n) assert_array_equal(sample, np.ones(n, dtype=np.int64)) + + def test_zipf_a_near_1(self): + # Regression test for gh-9829: a call such as rng.zipf(1.0000000000001) + # would hang. + n = 100000 + sample = self.mt19937.zipf(1.0000000000001, size=n) + # Not much of a test, but let's do something more than verify that + # it doesn't hang. Certainly for a monotonically decreasing + # discrete distribution truncated to signed 64 bit integers, more + # than half should be less than 2**62. + assert np.count_nonzero(sample < 2**62) > n/2 From 4606b13907bb95d3a7252e6841503f9535c8567c Mon Sep 17 00:00:00 2001 From: Christopher Sidebottom Date: Fri, 26 Jul 2024 10:31:02 +0100 Subject: [PATCH 882/980] Mirror VQSORT_ENABLED logic in Quicksort This patch disables Highway VQSort if the same criteria is met as sort/shared-inl.h, to prevent it aborting at runtime. I'm unsure whether this would look neater using Highway's dynamic dispatch. --- numpy/_core/meson.build | 1 + numpy/_core/src/npysort/highway_qsort.dispatch.cpp | 4 ++++ numpy/_core/src/npysort/highway_qsort.hpp | 13 +++++++++++++ .../src/npysort/highway_qsort_16bit.dispatch.cpp | 4 ++++ numpy/_core/src/npysort/quicksort.cpp | 4 ++-- 5 files changed, 24 insertions(+), 2 deletions(-) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 96c6dc2848d8..dbf1a144ed93 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1222,6 +1222,7 @@ py.extension_module('_multiarray_umath', 'src/multiarray', 'src/npymath', 'src/umath', + 'src/highway' ], dependencies: [blas_dep], link_with: [npymath_lib, multiarray_umath_mtargets.static_lib('_multiarray_umath_mtargets')] + highway_lib, diff --git a/numpy/_core/src/npysort/highway_qsort.dispatch.cpp b/numpy/_core/src/npysort/highway_qsort.dispatch.cpp index 38adfc6de894..194a81e2d7e9 100644 --- a/numpy/_core/src/npysort/highway_qsort.dispatch.cpp +++ b/numpy/_core/src/npysort/highway_qsort.dispatch.cpp @@ -2,6 +2,8 @@ #define VQSORT_ONLY_STATIC 1 #include "hwy/contrib/sort/vqsort-inl.h" +#if VQSORT_ENABLED + #define DISPATCH_VQSORT(TYPE) \ template<> void NPY_CPU_DISPATCH_CURFX(QSort)(TYPE *arr, intptr_t size) \ { \ @@ -18,3 +20,5 @@ namespace np { namespace highway { namespace qsort_simd { DISPATCH_VQSORT(float) } } } // np::highway::qsort_simd + +#endif // VQSORT_ENABLED diff --git a/numpy/_core/src/npysort/highway_qsort.hpp b/numpy/_core/src/npysort/highway_qsort.hpp index e08fb3629ec8..d33f4fc7d5d9 100644 --- a/numpy/_core/src/npysort/highway_qsort.hpp +++ b/numpy/_core/src/npysort/highway_qsort.hpp @@ -1,8 +1,20 @@ #ifndef NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP #define NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP +#include "hwy/highway.h" + #include "common.hpp" +// This replicates VQSORT_ENABLED from hwy/contrib/sort/shared-inl.h +// without checking the scalar target as this is not built within the dynamic +// dispatched sources. +#if (HWY_COMPILER_MSVC && !HWY_IS_DEBUG_BUILD) || \ + (HWY_ARCH_ARM_V7 && HWY_IS_DEBUG_BUILD) || \ + (HWY_ARCH_ARM_A64 && HWY_COMPILER_GCC_ACTUAL && HWY_IS_ASAN) +#define NPY_DISABLE_HIGHWAY_SORT +#endif + +#ifndef NPY_DISABLE_HIGHWAY_SORT namespace np { namespace highway { namespace qsort_simd { #ifndef NPY_DISABLE_OPTIMIZATION @@ -21,3 +33,4 @@ NPY_CPU_DISPATCH_DECLARE(template void QSelect, (T* arr, npy_intp n } } } // np::highway::qsort_simd #endif // NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP +#endif // NPY_DISABLE_HIGHWAY_SORT diff --git a/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp b/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp index 35b6cc58c7e8..d069cb6373d0 100644 --- a/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp +++ b/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp @@ -4,6 +4,8 @@ #include "quicksort.hpp" +#if VQSORT_ENABLED + namespace np { namespace highway { namespace qsort_simd { template<> void NPY_CPU_DISPATCH_CURFX(QSort)(Half *arr, intptr_t size) @@ -24,3 +26,5 @@ template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int16_t *arr, intptr_t size) } } } } // np::highway::qsort_simd + +#endif // VQSORT_ENABLED diff --git a/numpy/_core/src/npysort/quicksort.cpp b/numpy/_core/src/npysort/quicksort.cpp index 15e5668f599d..c8c6ab61bdf0 100644 --- a/numpy/_core/src/npysort/quicksort.cpp +++ b/numpy/_core/src/npysort/quicksort.cpp @@ -80,7 +80,7 @@ inline bool quicksort_dispatch(T *start, npy_intp num) using TF = typename np::meta::FixedWidth::Type; void (*dispfunc)(TF*, intptr_t) = nullptr; if (sizeof(T) == sizeof(uint16_t)) { - #ifndef NPY_DISABLE_OPTIMIZATION + #if !defined(NPY_DISABLE_OPTIMIZATION) && !defined(NPY_DISABLE_HIGHWAY_SORT) #if defined(NPY_CPU_AMD64) || defined(NPY_CPU_X86) // x86 32-bit and 64-bit #include "x86_simd_qsort_16bit.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSort, ); @@ -91,7 +91,7 @@ inline bool quicksort_dispatch(T *start, npy_intp num) #endif } else if (sizeof(T) == sizeof(uint32_t) || sizeof(T) == sizeof(uint64_t)) { - #ifndef NPY_DISABLE_OPTIMIZATION + #if !defined(NPY_DISABLE_OPTIMIZATION) && !defined(NPY_DISABLE_HIGHWAY_SORT) #if defined(NPY_CPU_AMD64) || defined(NPY_CPU_X86) // x86 32-bit and 64-bit #include "x86_simd_qsort.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSort, ); From 0a576b5c93e596a10db676ddabb7069bf637b74a Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Fri, 26 Jul 2024 09:28:42 -0400 Subject: [PATCH 883/980] MAINT: Restore legacy zipf implementation. --- .../random/src/legacy/legacy-distributions.c | 26 ++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/numpy/random/src/legacy/legacy-distributions.c b/numpy/random/src/legacy/legacy-distributions.c index b518b8a03994..14d9ce25f255 100644 --- a/numpy/random/src/legacy/legacy-distributions.c +++ b/numpy/random/src/legacy/legacy-distributions.c @@ -388,7 +388,31 @@ int64_t legacy_random_poisson(bitgen_t *bitgen_state, double lam) { } int64_t legacy_random_zipf(bitgen_t *bitgen_state, double a) { - return (int64_t)random_zipf(bitgen_state, a); + double am1, b; + + am1 = a - 1.0; + b = pow(2.0, am1); + while (1) { + double T, U, V, X; + + U = 1.0 - next_double(bitgen_state); + V = next_double(bitgen_state); + X = floor(pow(U, -1.0 / am1)); + /* + * The real result may be above what can be represented in a signed + * long. Since this is a straightforward rejection algorithm, we can + * just reject this value. This function then models a Zipf + * distribution truncated to sys.maxint. + */ + if (X > (double)RAND_INT_MAX || X < 1.0) { + continue; + } + + T = pow(1.0 + 1.0 / X, am1); + if (V * X * (T - 1.0) / (b - 1.0) <= T / b) { + return (RAND_INT_TYPE)X; + } + } } From 5aeafacee67aa3796db6aa98a068c294a211536c Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Fri, 26 Jul 2024 14:02:16 +0000 Subject: [PATCH 884/980] TST: Refactor to consistently use CompilerChecker --- numpy/f2py/tests/util.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index 0cbfbfe50a8c..3664e40b15b5 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -109,6 +109,9 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None): code = f"import sys; sys.path = {sys.path!r}; import numpy.f2py; numpy.f2py.main()" d = get_module_dir() + # gh-27045 : Skip if no compilers are found + if not has_fortran_compiler(): + pytest.skip("No Fortran compiler available") # Copy files dst_sources = [] @@ -285,6 +288,9 @@ def has_f77_compiler(): def has_f90_compiler(): return checker.has_f90 +def has_fortran_compiler(): + return (checker.has_f90 and checker.has_f77) + # # Building with meson # @@ -303,6 +309,11 @@ def build_meson(source_files, module_name=None, **kwargs): """ Build a module via Meson and import it. """ + + # gh-27045 : Skip if no compilers are found + if not has_fortran_compiler(): + pytest.skip("No Fortran compiler available") + build_dir = get_module_dir() if module_name is None: module_name = get_temp_module_name() @@ -327,13 +338,7 @@ def build_meson(source_files, module_name=None, **kwargs): extra_dat=kwargs.get("extra_dat", {}), ) - # Compile the module - # NOTE: Catch-all since without distutils it is hard to determine which - # compiler stack is on the CI - try: - backend.compile() - except subprocess.CalledProcessError: - pytest.skip("Failed to compile module") + backend.compile() # Import the compiled module sys.path.insert(0, f"{build_dir}/{backend.meson_build_dir}") @@ -369,6 +374,7 @@ def setup_class(cls): F2PyTest._has_c_compiler = has_c_compiler() F2PyTest._has_f77_compiler = has_f77_compiler() F2PyTest._has_f90_compiler = has_f90_compiler() + F2PyTest._has_fortran_compiler = has_fortran_compiler() def setup_method(self): if self.module is not None: @@ -386,7 +392,7 @@ def setup_method(self): pytest.skip("No Fortran 77 compiler available") if needs_f90 and not self._has_f90_compiler: pytest.skip("No Fortran 90 compiler available") - if needs_pyf and not (self._has_f90_compiler or self._has_f77_compiler): + if needs_pyf and not self._has_fortran_compiler: pytest.skip("No Fortran compiler available") # Build the module From 67b3ac3f3e9ec0cb9613e7faf8ea3b690fada2a1 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Fri, 26 Jul 2024 14:02:48 +0000 Subject: [PATCH 885/980] TST: Add a wrapper for f2py compiler cli tests --- numpy/f2py/tests/test_f2py2e.py | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/numpy/f2py/tests/test_f2py2e.py b/numpy/f2py/tests/test_f2py2e.py index 9944da003a8b..97bdb3a5dac7 100644 --- a/numpy/f2py/tests/test_f2py2e.py +++ b/numpy/f2py/tests/test_f2py2e.py @@ -9,6 +9,18 @@ from numpy.f2py.f2py2e import main as f2pycli from numpy.testing._private.utils import NOGIL_BUILD +####################### +# F2PY Test utilities # +###################### + +# Tests for CLI commands which call meson will fail if no compilers are present, these are to be skipped + +def compiler_check_f2pycli(): + if not util.has_fortran_compiler(): + pytest.skip("CLI command needs a Fortran compiler") + else: + f2pycli() + ######################### # CLI utils and classes # ######################### @@ -50,9 +62,9 @@ def get_io_paths(fname_inp, mname="untitled"): ) -############## -# CLI Fixtures and Tests # -############# +################ +# CLI Fixtures # +################ @pytest.fixture(scope="session") @@ -110,6 +122,9 @@ def f2cmap_f90(tmpdir_factory): fmap.write_text(f2cmap, encoding="ascii") return fn +######### +# Tests # +######### def test_gh22819_cli(capfd, gh22819_cli, monkeypatch): """Check that module names are handled correctly @@ -199,8 +214,7 @@ def test_gen_pyf_no_overwrite(capfd, hello_world_f90, monkeypatch): assert "Use --overwrite-signature to overwrite" in err -@pytest.mark.skipif((platform.system() != 'Linux') or (sys.version_info <= (3, 12)), - reason='Compiler and 3.12 required') +@pytest.mark.skipif(sys.version_info <= (3, 12), reason="Python 3.12 required") def test_untitled_cli(capfd, hello_world_f90, monkeypatch): """Check that modules are named correctly @@ -209,7 +223,7 @@ def test_untitled_cli(capfd, hello_world_f90, monkeypatch): ipath = Path(hello_world_f90) monkeypatch.setattr(sys, "argv", f"f2py --backend meson -c {ipath}".split()) with util.switchdir(ipath.parent): - f2pycli() + compiler_check_f2pycli() out, _ = capfd.readouterr() assert "untitledmodule.c" in out @@ -226,7 +240,7 @@ def test_no_py312_distutils_fcompiler(capfd, hello_world_f90, monkeypatch): sys, "argv", f"f2py {ipath} -c --fcompiler=gfortran -m {MNAME}".split() ) with util.switchdir(ipath.parent): - f2pycli() + compiler_check_f2pycli() out, _ = capfd.readouterr() assert "--fcompiler cannot be used with meson" in out monkeypatch.setattr( @@ -759,7 +773,7 @@ def test_no_freethreading_compatible(hello_world_f90, monkeypatch): monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c --no-freethreading-compatible'.split()) with util.switchdir(ipath.parent): - f2pycli() + compiler_check_f2pycli() cmd = f"{sys.executable} -c \"import blah; blah.hi();" if NOGIL_BUILD: cmd += "import sys; assert sys._is_gil_enabled() is True\"" @@ -784,7 +798,7 @@ def test_freethreading_compatible(hello_world_f90, monkeypatch): monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c --freethreading-compatible'.split()) with util.switchdir(ipath.parent): - f2pycli() + compiler_check_f2pycli() cmd = f"{sys.executable} -c \"import blah; blah.hi();" if NOGIL_BUILD: cmd += "import sys; assert sys._is_gil_enabled() is False\"" From cca4a6be6c368ea5c14eb77696bbf7a16d40b55b Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Fri, 26 Jul 2024 14:20:56 +0000 Subject: [PATCH 886/980] MAINT: Move checker up in f2py test utils --- numpy/f2py/tests/util.py | 180 ++++++++++++++++++++------------------- 1 file changed, 91 insertions(+), 89 deletions(-) diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index 3664e40b15b5..40e9e67299ea 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -26,6 +26,97 @@ from importlib import import_module from numpy.f2py._backends._meson import MesonBackend +# +# Check if compilers are available at all... +# + +def check_language(lang, code_snippet=None): + tmpdir = tempfile.mkdtemp() + try: + meson_file = os.path.join(tmpdir, "meson.build") + with open(meson_file, "w") as f: + f.write("project('check_compilers')\n") + f.write(f"add_languages('{lang}')\n") + if code_snippet: + f.write(f"{lang}_compiler = meson.get_compiler('{lang}')\n") + f.write(f"{lang}_code = '''{code_snippet}'''\n") + f.write( + f"_have_{lang}_feature =" + f"{lang}_compiler.compiles({lang}_code," + f" name: '{lang} feature check')\n" + ) + runmeson = subprocess.run( + ["meson", "setup", "btmp"], + check=False, + cwd=tmpdir, + capture_output=True, + ) + return runmeson.returncode == 0 + finally: + shutil.rmtree(tmpdir) + return False + + +fortran77_code = ''' +C Example Fortran 77 code + PROGRAM HELLO + PRINT *, 'Hello, Fortran 77!' + END +''' + +fortran90_code = ''' +! Example Fortran 90 code +program hello90 + type :: greeting + character(len=20) :: text + end type greeting + + type(greeting) :: greet + greet%text = 'hello, fortran 90!' + print *, greet%text +end program hello90 +''' + +# Dummy class for caching relevant checks +class CompilerChecker: + def __init__(self): + self.compilers_checked = False + self.has_c = False + self.has_f77 = False + self.has_f90 = False + + def check_compilers(self): + if (not self.compilers_checked) and (not sys.platform == "cygwin"): + with concurrent.futures.ThreadPoolExecutor() as executor: + futures = [ + executor.submit(check_language, "c"), + executor.submit(check_language, "fortran", fortran77_code), + executor.submit(check_language, "fortran", fortran90_code) + ] + + self.has_c = futures[0].result() + self.has_f77 = futures[1].result() + self.has_f90 = futures[2].result() + + self.compilers_checked = True + +if not IS_WASM: + checker = CompilerChecker() + checker.check_compilers() + +def has_c_compiler(): + return checker.has_c + +def has_f77_compiler(): + return checker.has_f77 + +def has_f90_compiler(): + return checker.has_f90 + +def has_fortran_compiler(): + return (checker.has_f90 and checker.has_f77) + + # # Maintaining a temporary module directory # @@ -202,95 +293,6 @@ def build_code(source_code, module_name=module_name) -# -# Check if compilers are available at all... -# - -def check_language(lang, code_snippet=None): - tmpdir = tempfile.mkdtemp() - try: - meson_file = os.path.join(tmpdir, "meson.build") - with open(meson_file, "w") as f: - f.write("project('check_compilers')\n") - f.write(f"add_languages('{lang}')\n") - if code_snippet: - f.write(f"{lang}_compiler = meson.get_compiler('{lang}')\n") - f.write(f"{lang}_code = '''{code_snippet}'''\n") - f.write( - f"_have_{lang}_feature =" - f"{lang}_compiler.compiles({lang}_code," - f" name: '{lang} feature check')\n" - ) - runmeson = subprocess.run( - ["meson", "setup", "btmp"], - check=False, - cwd=tmpdir, - capture_output=True, - ) - return runmeson.returncode == 0 - finally: - shutil.rmtree(tmpdir) - return False - -fortran77_code = ''' -C Example Fortran 77 code - PROGRAM HELLO - PRINT *, 'Hello, Fortran 77!' - END -''' - -fortran90_code = ''' -! Example Fortran 90 code -program hello90 - type :: greeting - character(len=20) :: text - end type greeting - - type(greeting) :: greet - greet%text = 'hello, fortran 90!' - print *, greet%text -end program hello90 -''' - -# Dummy class for caching relevant checks -class CompilerChecker: - def __init__(self): - self.compilers_checked = False - self.has_c = False - self.has_f77 = False - self.has_f90 = False - - def check_compilers(self): - if (not self.compilers_checked) and (not sys.platform == "cygwin"): - with concurrent.futures.ThreadPoolExecutor() as executor: - futures = [ - executor.submit(check_language, "c"), - executor.submit(check_language, "fortran", fortran77_code), - executor.submit(check_language, "fortran", fortran90_code) - ] - - self.has_c = futures[0].result() - self.has_f77 = futures[1].result() - self.has_f90 = futures[2].result() - - self.compilers_checked = True - -if not IS_WASM: - checker = CompilerChecker() - checker.check_compilers() - -def has_c_compiler(): - return checker.has_c - -def has_f77_compiler(): - return checker.has_f77 - -def has_f90_compiler(): - return checker.has_f90 - -def has_fortran_compiler(): - return (checker.has_f90 and checker.has_f77) - # # Building with meson # From 0aba26e5f56231e655ea892f7a312e033b5b3b7c Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Fri, 26 Jul 2024 14:26:13 +0000 Subject: [PATCH 887/980] TST: Skip compiled tests if meson is missing --- numpy/f2py/tests/util.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index 40e9e67299ea..877a2d325b5b 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -45,12 +45,15 @@ def check_language(lang, code_snippet=None): f"{lang}_compiler.compiles({lang}_code," f" name: '{lang} feature check')\n" ) - runmeson = subprocess.run( - ["meson", "setup", "btmp"], - check=False, - cwd=tmpdir, - capture_output=True, - ) + try: + runmeson = subprocess.run( + ["meson", "setup", "btmp"], + check=False, + cwd=tmpdir, + capture_output=True, + ) + except subprocess.CalledProcessError: + pytest.skip("meson not present, skipping compiler dependent test") return runmeson.returncode == 0 finally: shutil.rmtree(tmpdir) From 24fa17593ffcf38fe9feb673084786359e6caf82 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Fri, 26 Jul 2024 14:30:27 +0000 Subject: [PATCH 888/980] DOC: Add no compiler compliance details --- doc/source/f2py/f2py-testing.rst | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/doc/source/f2py/f2py-testing.rst b/doc/source/f2py/f2py-testing.rst index 945b4ccaa338..36ef65615f36 100644 --- a/doc/source/f2py/f2py-testing.rst +++ b/doc/source/f2py/f2py-testing.rst @@ -50,6 +50,17 @@ functions will be appended to ``self.module`` data member. Thus, the child class be able to access the fortran functions specified in source file by calling ``self.module.[fortran_function_name]``. +.. versionadded:: v2.0.0b1 + +Each of the ``f2py`` tests should run without failure if no Fortran compilers +are present on the host machine. To facilitate this, the ``CompilerChecker`` is +used, essentially providing a ``meson`` dependent set of utilities namely +``has_{c,f77,f90}_compiler()`` or ``has_fortran_compilers()``. + +For the CLI tests in ``test_f2py2e``, flags which are expected to call ``meson`` +or otherwise depend on a compiler need to call ``compiler_check_f2pycli()`` +instead of ``f2pycli()``. + Example ~~~~~~~ @@ -77,4 +88,4 @@ A test can be implemented as follows:: We override the ``sources`` data member to provide the source file. The source files are compiled and subroutines are attached to module data member when the class object -is created. The ``test_module`` function calls the subroutines and tests their results. \ No newline at end of file +is created. The ``test_module`` function calls the subroutines and tests their results. From 115c8c69d465bdd7b75768a4313340a576a6c68b Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Fri, 26 Jul 2024 14:51:03 +0000 Subject: [PATCH 889/980] TST: Skip fortran tests for now on Windows As per the existing NumPy situation.. --- numpy/f2py/tests/util.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index 877a2d325b5b..9cad71a9cf5c 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -31,6 +31,8 @@ # def check_language(lang, code_snippet=None): + if sys.platform == "win32": + pytest.skip("No Fortran tests on Windows (Issue #25134)", allow_module_level=True) tmpdir = tempfile.mkdtemp() try: meson_file = os.path.join(tmpdir, "meson.build") @@ -53,7 +55,7 @@ def check_language(lang, code_snippet=None): capture_output=True, ) except subprocess.CalledProcessError: - pytest.skip("meson not present, skipping compiler dependent test") + pytest.skip("meson not present, skipping compiler dependent test", allow_module_level=True) return runmeson.returncode == 0 finally: shutil.rmtree(tmpdir) From af6dade0cfe413f012ae7f09f81d2ae8bcbcac12 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 26 Jul 2024 17:44:52 +0200 Subject: [PATCH 890/980] TYP: Apply review suggestions in ``numpy.dtypes`` ... and use a more consistent ``TypeVar`` naming scheme. Co-authored-by: Sebastian Berg --- numpy/dtypes.pyi | 173 ++++++++++++++++++----------------------------- 1 file changed, 66 insertions(+), 107 deletions(-) diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index 673cbde8d871..706e538c8bea 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -1,4 +1,3 @@ -import sys from typing import ( Any, Final, @@ -9,15 +8,10 @@ from typing import ( TypeVar, final, ) +from typing_extensions import LiteralString import numpy as np -if sys.version_info >= (3, 11): - from typing import LiteralString -else: - LiteralString: TypeAlias = str - - __all__ = [ 'BoolDType', 'Int8DType', @@ -54,17 +48,17 @@ __all__ = [ 'StringDType', ] -_SelfType = TypeVar("_SelfType", bound=object) -_SCT = TypeVar("_SCT", bound=np.generic, covariant=True) - # Helper base classes (typing-only) -class _BaseDType(Generic[_SCT], np.dtype[_SCT]): # type: ignore[misc] +_SelfT = TypeVar("_SelfT", bound=np.dtype[Any]) +_SCT_co = TypeVar("_SCT_co", bound=np.generic, covariant=True) + +class _SimpleDType(Generic[_SCT_co], np.dtype[_SCT_co]): # type: ignore[misc] names: None # pyright: ignore[reportIncompatibleVariableOverride] - def __new__(cls: type[_SelfType], /) -> _SelfType: ... + def __new__(cls: type[_SelfT], /) -> _SelfT: ... def __getitem__(self, key: Any, /) -> NoReturn: ... @property - def base(self) -> np.dtype[_SCT]: ... + def base(self) -> np.dtype[_SCT_co]: ... @property def fields(self) -> None: ... @property @@ -72,38 +66,34 @@ class _BaseDType(Generic[_SCT], np.dtype[_SCT]): # type: ignore[misc] @property def isnative(self) -> L[True]: ... @property - def metadata(self) -> None: ... - @property def ndim(self) -> L[0]: ... @property def shape(self) -> tuple[()]: ... @property def subdtype(self) -> None: ... -class _BuiltinDType(Generic[_SCT], _BaseDType[_SCT]): +class _LiteralDType(Generic[_SCT_co], _SimpleDType[_SCT_co]): @property def flags(self) -> L[0]: ... @property def hasobject(self) -> L[False]: ... - @property - def isbuiltin(self) -> L[1]: ... # Helper mixins (typing-only): -_KindChar = TypeVar("_KindChar", bound=LiteralString, covariant=True) -_TypeChar = TypeVar("_TypeChar", bound=LiteralString, covariant=True) -_TypeNum = TypeVar("_TypeNum", bound=int, covariant=True) +_KindT_co = TypeVar("_KindT_co", bound=LiteralString, covariant=True) +_CharT_co = TypeVar("_CharT_co", bound=LiteralString, covariant=True) +_NumT_co = TypeVar("_NumT_co", bound=int, covariant=True) -class _TypeCodes(Generic[_KindChar, _TypeChar, _TypeNum]): +class _TypeCodes(Generic[_KindT_co, _CharT_co, _NumT_co]): @final @property - def kind(self) -> _KindChar: ... + def kind(self) -> _KindT_co: ... @final @property - def char(self) -> _TypeChar: ... + def char(self) -> _CharT_co: ... @final @property - def num(self) -> _TypeNum: ... + def num(self) -> _NumT_co: ... class _NoOrder: @final @@ -134,7 +124,7 @@ class _8Bit(_NoOrder, _NBit[L[1], L[1]]): ... class BoolDType( _TypeCodes[L["b"], L["?"], L[0]], _8Bit, - _BuiltinDType[np.bool], + _LiteralDType[np.bool], ): @property def name(self) -> L["bool"]: ... @@ -147,7 +137,7 @@ class BoolDType( class Int8DType( _TypeCodes[L["i"], L["b"], L[1]], _8Bit, - _BuiltinDType[np.int8], + _LiteralDType[np.int8], ): @property def name(self) -> L["int8"]: ... @@ -158,7 +148,7 @@ class Int8DType( class UInt8DType( _TypeCodes[L["u"], L["B"], L[2]], _8Bit, - _BuiltinDType[np.uint8], + _LiteralDType[np.uint8], ): @property def name(self) -> L["uint8"]: ... @@ -170,7 +160,7 @@ class Int16DType( _TypeCodes[L["i"], L["h"], L[3]], _NativeOrder, _NBit[L[2], L[2]], - _BuiltinDType[np.int16], + _LiteralDType[np.int16], ): @property def name(self) -> L["int16"]: ... @@ -182,7 +172,7 @@ class UInt16DType( _TypeCodes[L["u"], L["H"], L[4]], _NativeOrder, _NBit[L[2], L[2]], - _BuiltinDType[np.uint16], + _LiteralDType[np.uint16], ): @property def name(self) -> L["uint16"]: ... @@ -194,7 +184,7 @@ class Int32DType( _TypeCodes[L["i"], L["i", "l"], L[5, 7]], _NativeOrder, _NBit[L[4], L[4]], - _BuiltinDType[np.int32], + _LiteralDType[np.int32], ): @property def name(self) -> L["int32"]: ... @@ -206,7 +196,7 @@ class UInt32DType( _TypeCodes[L["u"], L["I", "L"], L[6, 8]], _NativeOrder, _NBit[L[4], L[4]], - _BuiltinDType[np.uint32], + _LiteralDType[np.uint32], ): @property def name(self) -> L["uint32"]: ... @@ -218,7 +208,7 @@ class Int64DType( _TypeCodes[L["i"], L["l", "q"], L[7, 9]], _NativeOrder, _NBit[L[8], L[8]], - _BuiltinDType[np.int64], + _LiteralDType[np.int64], ): @property def name(self) -> L["int64"]: ... @@ -230,7 +220,7 @@ class UInt64DType( _TypeCodes[L["u"], L["L", "Q"], L[8, 10]], _NativeOrder, _NBit[L[8], L[8]], - _BuiltinDType[np.uint64], + _LiteralDType[np.uint64], ): @property def name(self) -> L["uint64"]: ... @@ -247,32 +237,32 @@ UShortDType: Final = UInt16DType class IntDType( _TypeCodes[L["i"], L["i"], L[5]], _NativeOrder, - _NBit[L[2, 4], L[2, 4]], - _BuiltinDType[np.intc], + _NBit[L[4], L[4]], + _LiteralDType[np.intc], ): @property - def name(self) -> L["int16", "int32"]: ... + def name(self) -> L["int32"]: ... @property - def str(self) -> L["i2", "i4"]: ... + def str(self) -> L["i4"]: ... @final class UIntDType( _TypeCodes[L["u"], L["I"], L[6]], _NativeOrder, - _NBit[L[2, 4], L[2, 4]], - _BuiltinDType[np.uintc], + _NBit[L[4], L[4]], + _LiteralDType[np.uintc], ): @property - def name(self) -> L["uint16", "uint32"]: ... + def name(self) -> L["uint32"]: ... @property - def str(self) -> L["u2", "u4"]: ... + def str(self) -> L["u4"]: ... @final class LongDType( _TypeCodes[L["i"], L["l"], L[7]], _NativeOrder, _NBit[L[4, 8], L[4, 8]], - _BuiltinDType[np.long], + _LiteralDType[np.long], ): @property def name(self) -> L["int32", "int64"]: ... @@ -284,7 +274,7 @@ class ULongDType( _TypeCodes[L["u"], L["L"], L[8]], _NativeOrder, _NBit[L[4, 8], L[4, 8]], - _BuiltinDType[np.ulong], + _LiteralDType[np.ulong], ): @property def name(self) -> L["uint32", "uint64"]: ... @@ -296,7 +286,7 @@ class LongLongDType( _TypeCodes[L["i"], L["q"], L[9]], _NativeOrder, _NBit[L[8], L[8]], - _BuiltinDType[np.longlong], + _LiteralDType[np.longlong], ): @property def name(self) -> L["int64"]: ... @@ -308,7 +298,7 @@ class ULongLongDType( _TypeCodes[L["u"], L["Q"], L[10]], _NativeOrder, _NBit[L[8], L[8]], - _BuiltinDType[np.ulonglong], + _LiteralDType[np.ulonglong], ): @property def name(self) -> L["uint64"]: ... @@ -322,7 +312,7 @@ class Float16DType( _TypeCodes[L["f"], L["e"], L[23]], _NativeOrder, _NBit[L[2], L[2]], - _BuiltinDType[np.float16], + _LiteralDType[np.float16], ): @property def name(self) -> L["float16"]: ... @@ -334,7 +324,7 @@ class Float32DType( _TypeCodes[L["f"], L["f"], L[11]], _NativeOrder, _NBit[L[4], L[4]], - _BuiltinDType[np.float32], + _LiteralDType[np.float32], ): @property def name(self) -> L["float32"]: ... @@ -346,7 +336,7 @@ class Float64DType( _TypeCodes[L["f"], L["d"], L[12]], _NativeOrder, _NBit[L[8], L[8]], - _BuiltinDType[np.float64], + _LiteralDType[np.float64], ): @property def name(self) -> L["float64"]: ... @@ -358,7 +348,7 @@ class LongDoubleDType( _TypeCodes[L["f"], L["g"], L[13]], _NativeOrder, _NBit[L[8, 12, 16], L[8, 12, 16]], - _BuiltinDType[np.longdouble], + _LiteralDType[np.longdouble], ): @property def name(self) -> L["float64", "float96", "float128"]: ... @@ -372,7 +362,7 @@ class Complex64DType( _TypeCodes[L["c"], L["F"], L[14]], _NativeOrder, _NBit[L[4], L[8]], - _BuiltinDType[np.complex64], + _LiteralDType[np.complex64], ): @property def name(self) -> L["complex64"]: ... @@ -384,7 +374,7 @@ class Complex128DType( _TypeCodes[L["c"], L["D"], L[15]], _NativeOrder, _NBit[L[8], L[16]], - _BuiltinDType[np.complex128], + _LiteralDType[np.complex128], ): @property def name(self) -> L["complex128"]: ... @@ -396,7 +386,7 @@ class CLongDoubleDType( _TypeCodes[L["c"], L["G"], L[16]], _NativeOrder, _NBit[L[8, 12, 16], L[16, 24, 32]], - _BuiltinDType[np.clongdouble], + _LiteralDType[np.clongdouble], ): @property def name(self) -> L["complex128", "complex192", "complex256"]: ... @@ -410,15 +400,11 @@ class ObjectDType( _TypeCodes[L["O"], L["O"], L[17]], _NoOrder, _NBit[L[8], L[8]], - _BaseDType[np.object_], + _SimpleDType[np.object_], ): - @property - def flags(self) -> L[63]: ... @property def hasobject(self) -> L[True]: ... @property - def isbuiltin(self) -> L[1]: ... - @property def name(self) -> L["object"]: ... @property def str(self) -> L["|O"]: ... @@ -431,16 +417,12 @@ class BytesDType( _TypeCodes[L["S"], L["S"], L[18]], _NoOrder, _NBit[L[1],_ItemSize_co], - _BaseDType[np.bytes_], + _SimpleDType[np.bytes_], ): def __new__(cls, size: _ItemSize_co, /) -> BytesDType[_ItemSize_co]: ... @property - def flags(self) -> L[0]: ... - @property def hasobject(self) -> L[False]: ... @property - def isbuiltin(self) -> L[0]: ... - @property def name(self) -> LiteralString: ... @property def str(self) -> LiteralString: ... @@ -451,16 +433,12 @@ class StrDType( _TypeCodes[L["U"], L["U"], L[19]], _NativeOrder, _NBit[L[4],_ItemSize_co], - _BaseDType[np.str_], + _SimpleDType[np.str_], ): def __new__(cls, size: _ItemSize_co, /) -> StrDType[_ItemSize_co]: ... @property - def flags(self) -> L[8]: ... - @property def hasobject(self) -> L[False]: ... @property - def isbuiltin(self) -> L[0]: ... - @property def name(self) -> LiteralString: ... @property def str(self) -> LiteralString: ... @@ -470,17 +448,23 @@ class VoidDType( Generic[_ItemSize_co], _TypeCodes[L["V"], L["V"], L[20]], _NoOrder, - _NBit[L[1],_ItemSize_co], - _BaseDType[np.void], + _NBit[L[1], _ItemSize_co], + np.dtype[np.void], # type: ignore[misc] ): # NOTE: `VoidDType(...)` raises a `TypeError` at the moment def __new__(cls, length: _ItemSize_co, /) -> NoReturn: ... @property - def flags(self) -> L[0]: ... + def base(self: _SelfT) -> _SelfT: ... @property - def hasobject(self) -> L[False]: ... + def isalignedstruct(self) -> L[False]: ... + @property + def isnative(self) -> L[True]: ... @property - def isbuiltin(self) -> L[0]: ... + def ndim(self) -> L[0]: ... + @property + def shape(self) -> tuple[()]: ... + @property + def subdtype(self) -> None: ... @property def name(self) -> LiteralString: ... @property @@ -488,32 +472,20 @@ class VoidDType( # Other: -_DateTimeUnit_co = TypeVar( - "_DateTimeUnit_co", - bound=L[ - "Y", "M", "W", "D", - "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as", - ], - covariant=True, -) +_DateUnit: TypeAlias = L["Y", "M", "W", "D"] +_TimeUnit: TypeAlias = L["h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"] +_DateTimeUnit: TypeAlias = _DateUnit | _TimeUnit @final class DateTime64DType( - Generic[_DateTimeUnit_co], _TypeCodes[L["M"], L["M"], L[21]], _NativeOrder, _NBit[L[8], L[8]], - _BaseDType[np.datetime64], + _LiteralDType[np.datetime64], ): # NOTE: `DateTime64DType(...)` raises a `TypeError` at the moment # TODO: Once implemented, don't forget the`unit: L["μs"]` overload. - def __new__(cls, unit: _DateTimeUnit_co, /) -> NoReturn: ... - @property - def flags(self) -> L[0]: ... - @property - def hasobject(self) -> L[False]: ... - @property - def isbuiltin(self) -> L[0]: ... + def __new__(cls, unit: _DateTimeUnit, /) -> NoReturn: ... @property def name(self) -> L[ "datetime64", @@ -551,21 +523,14 @@ class DateTime64DType( @final class TimeDelta64DType( - Generic[_DateTimeUnit_co], _TypeCodes[L["m"], L["m"], L[22]], _NativeOrder, _NBit[L[8], L[8]], - _BaseDType[np.timedelta64], + _LiteralDType[np.timedelta64], ): # NOTE: `TimeDelta64DType(...)` raises a `TypeError` at the moment # TODO: Once implemented, don't forget to overload on `unit: L["μs"]`. - def __new__(cls, unit: _DateTimeUnit_co, /) -> NoReturn: ... - @property - def flags(self) -> L[0]: ... - @property - def hasobject(self) -> L[False]: ... - @property - def isbuiltin(self) -> L[0]: ... + def __new__(cls, unit: _DateTimeUnit, /) -> NoReturn: ... @property def name(self) -> L[ "timedelta64", @@ -616,25 +581,19 @@ class StringDType( @property def fields(self) -> None: ... @property - def flags(self) -> L[107]: ... - @property def hasobject(self) -> L[True]: ... @property def isalignedstruct(self) -> L[False]: ... @property - def isbuiltin(self) -> L[0]: ... - @property def isnative(self) -> L[True]: ... @property - def metadata(self) -> None: ... - @property - def name(self) -> L["StringDType128"]: ... + def name(self) -> L["StringDType64", "StringDType128"]: ... @property def ndim(self) -> L[0]: ... @property def shape(self) -> tuple[()]: ... @property - def str(self) -> L["|T16"]: ... + def str(self) -> L["|T8", "|T16"]: ... @property def subdtype(self) -> None: ... @property From a0bd202146d8adcc2dfa13544e3af95628094860 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 26 Jul 2024 09:58:59 -0600 Subject: [PATCH 891/980] TST: fix issues with tests that use numpy.testing.extbuild --- numpy/_core/tests/test_array_interface.py | 3 +-- numpy/_core/tests/test_mem_policy.py | 13 ------------- numpy/testing/_private/extbuild.py | 6 +++++- 3 files changed, 6 insertions(+), 16 deletions(-) diff --git a/numpy/_core/tests/test_array_interface.py b/numpy/_core/tests/test_array_interface.py index f049eea55d8a..ae719568a4b2 100644 --- a/numpy/_core/tests/test_array_interface.py +++ b/numpy/_core/tests/test_array_interface.py @@ -9,8 +9,7 @@ def get_module(tmp_path): """ Some codes to generate data and manage temporary buffers use when sharing with numpy via the array interface protocol. """ - - if not sys.platform.startswith('linux'): + if sys.platform.startswith('cygwin'): pytest.skip('link fails on cygwin') if IS_WASM: pytest.skip("Can't build module inside Wasm") diff --git a/numpy/_core/tests/test_mem_policy.py b/numpy/_core/tests/test_mem_policy.py index 9540d17d03cb..32459ab4d999 100644 --- a/numpy/_core/tests/test_mem_policy.py +++ b/numpy/_core/tests/test_mem_policy.py @@ -12,11 +12,6 @@ from numpy._core.multiarray import get_handler_name -# FIXME: numpy.testing.extbuild uses `numpy.distutils`, so this won't work on -# Python 3.12 and up. It's an internal test utility, so for now we just skip -# these tests. - - @pytest.fixture def get_module(tmp_path): """ Add a memory policy that returns a false pointer 64 bytes into the @@ -234,7 +229,6 @@ def get_module(tmp_path): more_init=more_init) -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_set_policy(get_module): get_handler_name = np._core.multiarray.get_handler_name @@ -267,7 +261,6 @@ def test_set_policy(get_module): get_module.set_wrong_capsule_name_data_policy() -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_default_policy_singleton(get_module): get_handler_name = np._core.multiarray.get_handler_name @@ -289,7 +282,6 @@ def test_default_policy_singleton(get_module): assert def_policy_1 is def_policy_2 is get_module.get_default_policy() -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_policy_propagation(get_module): # The memory policy goes hand-in-hand with flags.owndata @@ -348,7 +340,6 @@ async def async_test_context_locality(get_module): assert np._core.multiarray.get_handler_name() == orig_policy_name -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_context_locality(get_module): if (sys.implementation.name == 'pypy' and sys.pypy_version_info[:3] < (7, 3, 6)): @@ -370,7 +361,6 @@ def concurrent_thread2(get_module, event): get_module.set_secret_data_policy() -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_thread_locality(get_module): orig_policy_name = np._core.multiarray.get_handler_name() @@ -389,7 +379,6 @@ def test_thread_locality(get_module): assert np._core.multiarray.get_handler_name() == orig_policy_name -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") @pytest.mark.skip(reason="too slow, see gh-23975") def test_new_policy(get_module): a = np.arange(10) @@ -420,7 +409,6 @@ def test_new_policy(get_module): assert np._core.multiarray.get_handler_name(c) == orig_policy_name -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") @pytest.mark.xfail(sys.implementation.name == "pypy", reason=("bad interaction between getenv and " "os.environ inside pytest")) @@ -454,7 +442,6 @@ def test_switch_owner(get_module, policy): np._core._multiarray_umath._set_numpy_warn_if_no_mem_policy(oldval) -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_owner_is_base(get_module): a = get_module.get_array_with_base() with pytest.warns(UserWarning, match='warn_on_free'): diff --git a/numpy/testing/_private/extbuild.py b/numpy/testing/_private/extbuild.py index 65465ed19760..08cbb0564e67 100644 --- a/numpy/testing/_private/extbuild.py +++ b/numpy/testing/_private/extbuild.py @@ -54,7 +54,11 @@ def build_and_import_extension( >>> assert mod.test_bytes(b'abc') """ body = prologue + _make_methods(functions, modname) - init = """PyObject *mod = PyModule_Create(&moduledef); + init = """ + PyObject *mod = PyModule_Create(&moduledef); + #ifdef Py_GIL_DISABLED + PyUnstable_Module_SetGIL(mod, Py_MOD_GIL_NOT_USED); + #endif """ if not build_dir: build_dir = pathlib.Path('.') From 8f2e57bb519e65e847c913bfe8ad635763b99744 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 Jul 2024 17:24:04 +0000 Subject: [PATCH 892/980] MAINT: Bump ossf/scorecard-action from 2.3.3 to 2.4.0 Bumps [ossf/scorecard-action](https://github.com/ossf/scorecard-action) from 2.3.3 to 2.4.0. - [Release notes](https://github.com/ossf/scorecard-action/releases) - [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md) - [Commits](https://github.com/ossf/scorecard-action/compare/dc50aa9510b46c811795eb24b2f1ba02a914e534...62b2cac7ed8198b15735ed49ab1e5cf35480ba46) --- updated-dependencies: - dependency-name: ossf/scorecard-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/scorecards.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 5bec66395d8c..3cdc0bfd0585 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -30,7 +30,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # v2.3.3 + uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0 with: results_file: results.sarif results_format: sarif From f1264b2a0e5f34b3c136e6443163db6c37c09c5c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 Jul 2024 17:24:13 +0000 Subject: [PATCH 893/980] MAINT: Bump github/codeql-action from 3.25.14 to 3.25.15 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.25.14 to 3.25.15. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/5cf07d8b700b67e235fbb65cbc84f69c0cf10464...afb54ba388a7dca6ecae48f608c4ff05ff4cc77a) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index a76f49bb390f..371c3795969f 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -45,7 +45,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@5cf07d8b700b67e235fbb65cbc84f69c0cf10464 # v3.25.14 + uses: github/codeql-action/init@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -55,7 +55,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@5cf07d8b700b67e235fbb65cbc84f69c0cf10464 # v3.25.14 + uses: github/codeql-action/autobuild@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -68,6 +68,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@5cf07d8b700b67e235fbb65cbc84f69c0cf10464 # v3.25.14 + uses: github/codeql-action/analyze@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 5bec66395d8c..33a33e45b0f7 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@5cf07d8b700b67e235fbb65cbc84f69c0cf10464 # v2.1.27 + uses: github/codeql-action/upload-sarif@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v2.1.27 with: sarif_file: results.sarif From dfc5909e984b803ab835b84337d2f0cd7deefeb8 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Wed, 24 Jul 2024 12:42:30 -0400 Subject: [PATCH 894/980] BUG: random: In Johnk's algorithm, use logarithms if either X or Y is 0. If either X or Y is zero, it means the calculation has underflowed and information has been lost. So use the logarithm-based calculation if *either* is 0. Before this, *both* had to be 0 for the logarithm-based calculation to be used. If X was 0 and Y was not, the result X/(X + Y) would be 0, when in fact the logarithm-based calculation might give the correct small but nonzero value. Closes gh-24475. --- numpy/random/src/distributions/distributions.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index 67d16e855a52..724d56ffb842 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -436,7 +436,7 @@ double random_beta(bitgen_t *bitgen_state, double a, double b) { XpY = X + Y; /* Reject if both U and V are 0.0, which is approx 1 in 10^106 */ if ((XpY <= 1.0) && (U + V > 0.0)) { - if (XpY > 0) { + if ((X > 0) && (Y > 0)) { return X / XpY; } else { double logX = log(U) / a; From 9f252631fcb82a60de1a679cff2c7c7d61b3028c Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Wed, 24 Jul 2024 12:43:26 -0400 Subject: [PATCH 895/980] MAINT: random: Refine logarithm-based calculation in Johnk's algorithm. --- .../random/src/distributions/distributions.c | 21 ++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index 724d56ffb842..9f988f857d61 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -439,13 +439,20 @@ double random_beta(bitgen_t *bitgen_state, double a, double b) { if ((X > 0) && (Y > 0)) { return X / XpY; } else { - double logX = log(U) / a; - double logY = log(V) / b; - double logM = logX > logY ? logX : logY; - logX -= logM; - logY -= logM; - - return exp(logX - log(exp(logX) + exp(logY))); + /* + * Either X or Y underflowed to 0, so we lost information in + * U**(1/a) or V**(1/b). We still compute X/(X+Y) here, but we + * work with logarithms as much as we can to avoid the underflow. + */ + double logX = log(U)/a; + double logY = log(V)/b; + double delta = logX - logY; + if (delta > 0) { + return exp(-log1p(exp(-delta))); + } + else { + return exp(delta - log1p(exp(delta))); + } } } } From d2c2837f1548f121afce89d7b6046b2e834d2fdf Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Wed, 24 Jul 2024 17:03:16 -0400 Subject: [PATCH 896/980] TST: random: Add regression test for gh-24475. --- .../test_generator_mt19937_regressions.py | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py index f92e92b39b50..c34e6bb3ba74 100644 --- a/numpy/random/tests/test_generator_mt19937_regressions.py +++ b/numpy/random/tests/test_generator_mt19937_regressions.py @@ -86,6 +86,29 @@ def test_beta_ridiculously_small_parameters(self): x = self.mt19937.beta(tiny/32, tiny/40, size=50) assert not np.any(np.isnan(x)) + def test_beta_expected_zero_frequency(self): + # gh-24475: For small a and b (e.g. a=0.0025, b=0.0025), beta + # would generate too many zeros. + a = 0.0025 + b = 0.0025 + n = 1000000 + x = self.mt19937.beta(a, b, size=n) + nzeros = np.count_nonzero(x == 0) + # beta CDF at x = np.finfo(np.double).smallest_subnormal/2 + # is p = 0.0776169083131899, e.g, + # + # import numpy as np + # from mpmath import mp + # mp.dps = 160 + # x = mp.mpf(np.finfo(np.float64).smallest_subnormal)/2 + # # CDF of the beta distribution at x: + # p = mp.betainc(a, b, x1=0, x2=x, regularized=True) + # n = 1000000 + # exprected_freq = float(n*p) + # + expected_freq = 77616.90831318991 + assert 0.95*expected_freq < nzeros < 1.05*expected_freq + def test_choice_sum_of_probs_tolerance(self): # The sum of probs should be 1.0 with some tolerance. # For low precision dtypes the tolerance was too tight. From 3f70f78d4687caf1cb25aa8190688739bbbbdcac Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Fri, 26 Jul 2024 15:19:22 +0000 Subject: [PATCH 897/980] DOC: Minor typo fix [wheel build] --- doc/source/f2py/f2py-testing.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/f2py/f2py-testing.rst b/doc/source/f2py/f2py-testing.rst index 36ef65615f36..c6680749c7c5 100644 --- a/doc/source/f2py/f2py-testing.rst +++ b/doc/source/f2py/f2py-testing.rst @@ -55,7 +55,7 @@ be able to access the fortran functions specified in source file by calling Each of the ``f2py`` tests should run without failure if no Fortran compilers are present on the host machine. To facilitate this, the ``CompilerChecker`` is used, essentially providing a ``meson`` dependent set of utilities namely -``has_{c,f77,f90}_compiler()`` or ``has_fortran_compilers()``. +``has_{c,f77,f90,fortran}_compiler()``. For the CLI tests in ``test_f2py2e``, flags which are expected to call ``meson`` or otherwise depend on a compiler need to call ``compiler_check_f2pycli()`` From e5a3743ed211626179fc49da0188c6d286ae2810 Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Fri, 26 Jul 2024 18:15:21 +0000 Subject: [PATCH 898/980] Replicate correct sequence of checks for VQSort --- numpy/_core/src/npysort/highway_qsort.hpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/npysort/highway_qsort.hpp b/numpy/_core/src/npysort/highway_qsort.hpp index d33f4fc7d5d9..ba3fe4920594 100644 --- a/numpy/_core/src/npysort/highway_qsort.hpp +++ b/numpy/_core/src/npysort/highway_qsort.hpp @@ -10,7 +10,9 @@ // dispatched sources. #if (HWY_COMPILER_MSVC && !HWY_IS_DEBUG_BUILD) || \ (HWY_ARCH_ARM_V7 && HWY_IS_DEBUG_BUILD) || \ - (HWY_ARCH_ARM_A64 && HWY_COMPILER_GCC_ACTUAL && HWY_IS_ASAN) + (HWY_ARCH_ARM_A64 && HWY_COMPILER_GCC_ACTUAL && HWY_IS_ASAN) || \ + (HWY_ARCH_ARM_A64 && HWY_COMPILER_CLANG && \ + (HWY_IS_HWASAN || HWY_IS_MSAN || HWY_IS_TSAN || HWY_IS_ASAN)) #define NPY_DISABLE_HIGHWAY_SORT #endif From 4fdf6ebf687e7bd85f60cb92aa9a683670d3a400 Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Fri, 26 Jul 2024 19:34:30 +0100 Subject: [PATCH 899/980] Move guard so as not to impact x86-simd-sort --- numpy/_core/src/npysort/quicksort.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/npysort/quicksort.cpp b/numpy/_core/src/npysort/quicksort.cpp index c8c6ab61bdf0..aca748056f39 100644 --- a/numpy/_core/src/npysort/quicksort.cpp +++ b/numpy/_core/src/npysort/quicksort.cpp @@ -80,22 +80,22 @@ inline bool quicksort_dispatch(T *start, npy_intp num) using TF = typename np::meta::FixedWidth::Type; void (*dispfunc)(TF*, intptr_t) = nullptr; if (sizeof(T) == sizeof(uint16_t)) { - #if !defined(NPY_DISABLE_OPTIMIZATION) && !defined(NPY_DISABLE_HIGHWAY_SORT) + #ifndef NPY_DISABLE_OPTIMIZATION #if defined(NPY_CPU_AMD64) || defined(NPY_CPU_X86) // x86 32-bit and 64-bit #include "x86_simd_qsort_16bit.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSort, ); - #else + #elif !defined(NPY_DISABLE_HIGHWAY_SORT) #include "highway_qsort_16bit.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::highway::qsort_simd::template QSort, ); #endif #endif } else if (sizeof(T) == sizeof(uint32_t) || sizeof(T) == sizeof(uint64_t)) { - #if !defined(NPY_DISABLE_OPTIMIZATION) && !defined(NPY_DISABLE_HIGHWAY_SORT) + #ifndef NPY_DISABLE_OPTIMIZATION #if defined(NPY_CPU_AMD64) || defined(NPY_CPU_X86) // x86 32-bit and 64-bit #include "x86_simd_qsort.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSort, ); - #else + #elif !defined(NPY_DISABLE_HIGHWAY_SORT) #include "highway_qsort.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::highway::qsort_simd::template QSort, ); #endif From fc84300dd3d674ed74dc1493342081dcbd803dfa Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Sat, 27 Jul 2024 02:00:34 +0530 Subject: [PATCH 900/980] DOC: Add examples for ufunc `np.strings.isalpha()` --- numpy/_core/code_generators/ufunc_docstrings.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index f3b502bff9d6..2e4d694065fb 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -4482,6 +4482,17 @@ def add_newdoc(place, name, doc): -------- str.isalpha + Examples + -------- + >>> import numpy as np + >>> a = np.array(['a', 'b', '0']) + >>> np.strings.isalpha(a) + array([ True, True, False]) + + >>> a = np.array([['a', 'b', '0'], ['c', '1', '2']]) + >>> np.strings.isalpha(a) + array([[ True, True, False], [ True, False, False]]) + """) add_newdoc('numpy._core.umath', 'isdigit', From 78a981080e0671482c78e68682e14cdde1fe8212 Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Sat, 27 Jul 2024 02:00:55 +0530 Subject: [PATCH 901/980] DOC: Add examples for `np.strings.mod()` --- numpy/_core/strings.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 05fb7248a9e2..19ec2b639ed5 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -197,7 +197,19 @@ def mod(a, values): out : ndarray Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, depending on input types - + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["NumPy is a %s library"]) + >>> np.strings.mod(a, values=["Python"]) + array(['NumPy is a Python library'], dtype='>> a = np.array([b'%d bytes', b'%d bits']) + >>> values = np.array([8, 64]) + >>> np.strings.mod(a, values) + array([b'8 bytes', b'64 bits'], dtype='|S7') + """ return _to_bytes_or_str_array( _vec_string(a, np.object_, '__mod__', (values,)), a) From be75bec611ff98163bd64067fb2e9b251eaafd77 Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Sat, 27 Jul 2024 02:01:13 +0530 Subject: [PATCH 902/980] DOC: Add examples for `np.strings.rfind()` --- numpy/_core/strings.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 19ec2b639ed5..6a3f2171324a 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -277,6 +277,18 @@ def rfind(a, sub, start=0, end=None): -------- str.rfind + Examples + -------- + >>> import numpy as np + >>> a = np.array(["Computer Science"]) + >>> np.strings.rfind(a, "Science", start=0, end=None) + array([9]) + >>> np.strings.rfind(a, "Science", start=0, end=8) + array([-1]) + >>> b = np.array(["Computer Science", "Science"]) + >>> np.strings.rfind(b, "Science", start=0, end=None) + array([9, 0]) + """ end = end if end is not None else MAX return _rfind_ufunc(a, sub, start, end) From b2b033867cbc9e11901ab2b707ac224c472db0cd Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Sat, 27 Jul 2024 02:01:24 +0530 Subject: [PATCH 903/980] DOC: Add examples for `np.strings.startswith()` [skip azp] [skip cirrus] --- numpy/_core/strings.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 6a3f2171324a..0820411840ea 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -428,6 +428,17 @@ def startswith(a, prefix, start=0, end=None): -------- str.startswith + Examples + -------- + >>> import numpy as np + >>> s = np.array(['foo', 'bar']) + >>> s + array(['foo', 'bar'], dtype='>> np.strings.startswith(s, 'fo') + array([True, False]) + >>> np.strings.startswith(s, 'o', start=1, end=2) + array([True, False]) + """ end = end if end is not None else MAX return _startswith_ufunc(a, prefix, start, end) From b61e079b7ee0276637fbdefbe425dc54020b51e8 Mon Sep 17 00:00:00 2001 From: arnaud-ma Date: Fri, 26 Jul 2024 23:41:54 +0200 Subject: [PATCH 904/980] disable name suggestions on some AttributeErrors --- numpy/__init__.py | 9 +++++---- numpy/lib/__init__.py | 9 ++++++--- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index 0673f8d1dd71..27e5d2d6801d 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -371,7 +371,7 @@ def __getattr__(attr): return char elif attr == "array_api": raise AttributeError("`numpy.array_api` is not available from " - "numpy 2.0 onwards") + "numpy 2.0 onwards", name=None) elif attr == "core": import numpy.core as core return core @@ -384,7 +384,7 @@ def __getattr__(attr): return distutils else: raise AttributeError("`numpy.distutils` is not available from " - "Python 3.12 onwards") + "Python 3.12 onwards", name=None) if attr in __future_scalars__: # And future warnings for those that will change, but also give @@ -394,12 +394,13 @@ def __getattr__(attr): "corresponding NumPy scalar.", FutureWarning, stacklevel=2) if attr in __former_attrs__: - raise AttributeError(__former_attrs__[attr]) + raise AttributeError(__former_attrs__[attr], name=None) if attr in __expired_attributes__: raise AttributeError( f"`np.{attr}` was removed in the NumPy 2.0 release. " - f"{__expired_attributes__[attr]}" + f"{__expired_attributes__[attr]}", + name=None ) if attr == "chararray": diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py index 22ad35e93c35..f048b9e2818f 100644 --- a/numpy/lib/__init__.py +++ b/numpy/lib/__init__.py @@ -67,7 +67,8 @@ def __getattr__(attr): raise AttributeError( "numpy.lib.emath was an alias for emath module that was removed " "in NumPy 2.0. Replace usages of numpy.lib.emath with " - "numpy.emath." + "numpy.emath.", + name=None ) elif attr in ( "histograms", "type_check", "nanfunctions", "function_base", @@ -77,12 +78,14 @@ def __getattr__(attr): raise AttributeError( f"numpy.lib.{attr} is now private. If you are using a public " "function, it should be available in the main numpy namespace, " - "otherwise check the NumPy 2.0 migration guide." + "otherwise check the NumPy 2.0 migration guide.", + name=None ) elif attr == "arrayterator": raise AttributeError( "numpy.lib.arrayterator submodule is now private. To access " - "Arrayterator class use numpy.lib.Arrayterator." + "Arrayterator class use numpy.lib.Arrayterator.", + name=None ) else: raise AttributeError("module {!r} has no attribute " From 755e9594729b087f289e3f8e53a6103e3f9cf7ba Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Fri, 26 Jul 2024 20:32:55 -0400 Subject: [PATCH 905/980] MAINT: linalg: Simplify some linalg gufuncs. The new feature added in gh-26908 is used to reduce the number of gufuncs in np.linalg._umath_linalg. For example, before this change, the gufuncs `svd_m` and `svd_n` used the same code, but two distinct gufuncs were necessary because, for input with shape (m, n), the output has shape min(m, n). `svd_m` returned shape (m,), and `svd_n` returned shape (n,). The wrapper code had to determine which gufunc to call. This can be handled now with a single gufunc by defining a function that sets the output dimension to min(m, n). In this PR, the following pairs of gufuncs are coalesced into a one gufunc: lstsq_m & lstsq_n => lstsq qr_r_raw_m & qr_r_raw_n => qr_r_raw svd_m & svd_n => svd svd_m_f & svd_n_f => svd_f svd_m_s & svd_n_s => svd_s --- numpy/linalg/_linalg.py | 32 ++---- numpy/linalg/umath_linalg.cpp | 198 ++++++++++++++++++++-------------- 2 files changed, 123 insertions(+), 107 deletions(-) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 52d1bb0d1d19..bab376a77b03 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -1092,15 +1092,10 @@ def qr(a, mode='reduced'): a = _to_native_byte_order(a) mn = min(m, n) - if m <= n: - gufunc = _umath_linalg.qr_r_raw_m - else: - gufunc = _umath_linalg.qr_r_raw_n - signature = 'D->D' if isComplexType(t) else 'd->d' with errstate(call=_raise_linalgerror_qr, invalid='call', over='ignore', divide='ignore', under='ignore'): - tau = gufunc(a, signature=signature) + tau = _umath_linalg.qr_r_raw(a, signature=signature) # handle modes that don't return q if mode == 'r': @@ -1833,15 +1828,9 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): m, n = a.shape[-2:] if compute_uv: if full_matrices: - if m < n: - gufunc = _umath_linalg.svd_m_f - else: - gufunc = _umath_linalg.svd_n_f + gufunc = _umath_linalg.svd_f else: - if m < n: - gufunc = _umath_linalg.svd_m_s - else: - gufunc = _umath_linalg.svd_n_s + gufunc = _umath_linalg.svd_s signature = 'D->DdD' if isComplexType(t) else 'd->ddd' with errstate(call=_raise_linalgerror_svd_nonconvergence, @@ -1853,16 +1842,11 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): vh = vh.astype(result_t, copy=False) return SVDResult(wrap(u), s, wrap(vh)) else: - if m < n: - gufunc = _umath_linalg.svd_m - else: - gufunc = _umath_linalg.svd_n - signature = 'D->d' if isComplexType(t) else 'd->d' with errstate(call=_raise_linalgerror_svd_nonconvergence, invalid='call', over='ignore', divide='ignore', under='ignore'): - s = gufunc(a, signature=signature) + s = _umath_linalg.svd(a, signature=signature) s = s.astype(_realType(result_t), copy=False) return s @@ -2570,11 +2554,6 @@ def lstsq(a, b, rcond=None): if rcond is None: rcond = finfo(t).eps * max(n, m) - if m <= n: - gufunc = _umath_linalg.lstsq_m - else: - gufunc = _umath_linalg.lstsq_n - signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid' if n_rhs == 0: # lapack can't handle n_rhs = 0 - so allocate @@ -2583,7 +2562,8 @@ def lstsq(a, b, rcond=None): with errstate(call=_raise_linalgerror_lstsq, invalid='call', over='ignore', divide='ignore', under='ignore'): - x, resids, rank, s = gufunc(a, b, rcond, signature=signature) + x, resids, rank, s = _umath_linalg.lstsq(a, b, rcond, + signature=signature) if m == 0: x[...] = 0 if n_rhs == 0: diff --git a/numpy/linalg/umath_linalg.cpp b/numpy/linalg/umath_linalg.cpp index cf8f469a022a..ead6d84a73a2 100644 --- a/numpy/linalg/umath_linalg.cpp +++ b/numpy/linalg/umath_linalg.cpp @@ -8,6 +8,7 @@ #define PY_SSIZE_T_CLEAN #include +#define NPY_TARGET_VERSION NPY_2_1_API_VERSION #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" @@ -4352,6 +4353,60 @@ static const char lstsq_types[] = { NPY_CDOUBLE, NPY_CDOUBLE, NPY_DOUBLE, NPY_CDOUBLE, NPY_DOUBLE, NPY_INT, NPY_DOUBLE, }; +/* + * Function to process core dimensions of a gufunc with two input core + * dimensions m and n, and one output core dimension p which must be + * min(m, n). The parameters m_index, n_index and p_index indicate + * the locations of the core dimensions in core_dims[]. + */ +static int +mnp_min_indexed_process_core_dims(PyUFuncObject *gufunc, + npy_intp core_dims[], + npy_intp m_index, + npy_intp n_index, + npy_intp p_index) +{ + npy_intp m = core_dims[m_index]; + npy_intp n = core_dims[n_index]; + npy_intp p = core_dims[p_index]; + npy_intp required_p = m > n ? n : m; /* min(m, n) */ + if (p == -1) { + core_dims[p_index] = required_p; + return 0; + } + if (p != required_p) { + PyErr_Format(PyExc_ValueError, + "core output dimension p must be min(m, n), where " + "m and n are the core dimensions of the inputs. Got " + "m=%zd and n=%zd, so p must be %zd, but got p=%zd.", + m, n, required_p, p); + return -1; + } + return 0; +} + +/* + * Function to process core dimensions of a gufunc with two input core + * dimensions m and n, and one output core dimension p which must be + * min(m, n). There can be only those three core dimensions in the + * gufunc shape signature. + */ +static int +mnp_min_process_core_dims(PyUFuncObject *gufunc, npy_intp core_dims[]) +{ + return mnp_min_indexed_process_core_dims(gufunc, core_dims, 0, 1, 2); +} + +/* + * Process the core dimensions for the lstsq gufunc. + */ +static int +lstsq_process_core_dims(PyUFuncObject *gufunc, npy_intp core_dims[]) +{ + return mnp_min_indexed_process_core_dims(gufunc, core_dims, 0, 1, 3); +} + + typedef struct gufunc_descriptor_struct { const char *name; const char *signature; @@ -4361,6 +4416,7 @@ typedef struct gufunc_descriptor_struct { int nout; PyUFuncGenericFunction *funcs; const char *types; + PyUFunc_ProcessCoreDimsFunc *process_core_dims_func; } GUFUNC_DESCRIPTOR_t; GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { @@ -4373,7 +4429,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(),()\" \n", 4, 1, 2, FUNC_ARRAY_NAME(slogdet), - slogdet_types + slogdet_types, + nullptr }, { "det", @@ -4382,7 +4439,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->()\" \n", 4, 1, 1, FUNC_ARRAY_NAME(det), - equal_2_types + equal_2_types, + nullptr }, { "eigh_lo", @@ -4394,7 +4452,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m),(m,m)\" \n", 4, 1, 2, FUNC_ARRAY_NAME(eighlo), - eigh_types + eigh_types, + nullptr }, { "eigh_up", @@ -4406,7 +4465,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m),(m,m)\" \n", 4, 1, 2, FUNC_ARRAY_NAME(eighup), - eigh_types + eigh_types, + nullptr }, { "eigvalsh_lo", @@ -4418,7 +4478,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m)\" \n", 4, 1, 1, FUNC_ARRAY_NAME(eigvalshlo), - eighvals_types + eighvals_types, + nullptr }, { "eigvalsh_up", @@ -4430,7 +4491,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m)\" \n", 4, 1, 1, FUNC_ARRAY_NAME(eigvalshup), - eighvals_types + eighvals_types, + nullptr }, { "solve", @@ -4441,7 +4503,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m),(m,n)->(m,n)\" \n", 4, 2, 1, FUNC_ARRAY_NAME(solve), - equal_3_types + equal_3_types, + nullptr }, { "solve1", @@ -4452,7 +4515,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m),(m)->(m)\" \n", 4, 2, 1, FUNC_ARRAY_NAME(solve1), - equal_3_types + equal_3_types, + nullptr }, { "inv", @@ -4463,7 +4527,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m,m)\" \n", 4, 1, 1, FUNC_ARRAY_NAME(inv), - equal_2_types + equal_2_types, + nullptr }, { "cholesky_lo", @@ -4473,7 +4538,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m,m)\"\n", 4, 1, 1, FUNC_ARRAY_NAME(cholesky_lo), - equal_2_types + equal_2_types, + nullptr }, { "cholesky_up", @@ -4483,55 +4549,36 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m,m)\"\n", 4, 1, 1, FUNC_ARRAY_NAME(cholesky_up), - equal_2_types + equal_2_types, + nullptr }, { - "svd_m", - "(m,n)->(m)", - "svd when n>=m. ", + "svd", + "(m,n)->(p)", + "Singular values of array with shape (m, n).\n" + "Return value is 1-d array with shape (min(m, n),).", 4, 1, 1, FUNC_ARRAY_NAME(svd_N), - svd_1_1_types + svd_1_1_types, + mnp_min_process_core_dims }, { - "svd_n", - "(m,n)->(n)", - "svd when n<=m", - 4, 1, 1, - FUNC_ARRAY_NAME(svd_N), - svd_1_1_types - }, - { - "svd_m_s", - "(m,n)->(m,m),(m),(m,n)", - "svd when m<=n", - 4, 1, 3, - FUNC_ARRAY_NAME(svd_S), - svd_1_3_types - }, - { - "svd_n_s", - "(m,n)->(m,n),(n),(n,n)", - "svd when m>=n", + "svd_s", + "(m,n)->(m,p),(p),(p,n)", + "svd (full_matrices=False)", 4, 1, 3, FUNC_ARRAY_NAME(svd_S), - svd_1_3_types - }, - { - "svd_m_f", - "(m,n)->(m,m),(m),(n,n)", - "svd when m<=n", - 4, 1, 3, - FUNC_ARRAY_NAME(svd_A), - svd_1_3_types + svd_1_3_types, + mnp_min_process_core_dims }, { - "svd_n_f", - "(m,n)->(m,m),(n),(n,n)", - "svd when m>=n", + "svd_f", + "(m,n)->(m,m),(p),(n,n)", + "svd (full_matrices=True)", 4, 1, 3, FUNC_ARRAY_NAME(svd_A), - svd_1_3_types + svd_1_3_types, + mnp_min_process_core_dims }, { "eig", @@ -4542,7 +4589,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m),(m,m)\" \n", 3, 1, 2, FUNC_ARRAY_NAME(eig), - eig_types + eig_types, + nullptr }, { "eigvals", @@ -4551,25 +4599,18 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { "Results in a vector of eigenvalues. \n", 3, 1, 1, FUNC_ARRAY_NAME(eigvals), - eigvals_types + eigvals_types, + nullptr }, { - "qr_r_raw_m", - "(m,n)->(m)", + "qr_r_raw", + "(m,n)->(p)", "Compute TAU vector for the last two dimensions \n"\ - "and broadcast to the rest. For m <= n. \n", - 2, 1, 1, - FUNC_ARRAY_NAME(qr_r_raw), - qr_r_raw_types - }, - { - "qr_r_raw_n", - "(m,n)->(n)", - "Compute TAU vector for the last two dimensions \n"\ - "and broadcast to the rest. For m > n. \n", + "and broadcast to the rest. \n", 2, 1, 1, FUNC_ARRAY_NAME(qr_r_raw), - qr_r_raw_types + qr_r_raw_types, + mnp_min_process_core_dims }, { "qr_reduced", @@ -4578,7 +4619,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { "and broadcast to the rest. \n", 2, 2, 1, FUNC_ARRAY_NAME(qr_reduced), - qr_reduced_types + qr_reduced_types, + nullptr }, { "qr_complete", @@ -4587,37 +4629,30 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { "and broadcast to the rest. For m > n. \n", 2, 2, 1, FUNC_ARRAY_NAME(qr_complete), - qr_complete_types - }, - { - "lstsq_m", - "(m,n),(m,nrhs),()->(n,nrhs),(nrhs),(),(m)", - "least squares on the last two dimensions and broadcast to the rest. \n"\ - "For m <= n. \n", - 4, 3, 4, - FUNC_ARRAY_NAME(lstsq), - lstsq_types + qr_complete_types, + nullptr }, { - "lstsq_n", - "(m,n),(m,nrhs),()->(n,nrhs),(nrhs),(),(n)", - "least squares on the last two dimensions and broadcast to the rest. \n"\ - "For m >= n, meaning that residuals are produced. \n", + "lstsq", + "(m,n),(m,nrhs),()->(n,nrhs),(nrhs),(),(p)", + "least squares on the last two dimensions and broadcast to the rest.", 4, 3, 4, FUNC_ARRAY_NAME(lstsq), - lstsq_types + lstsq_types, + lstsq_process_core_dims } }; static int addUfuncs(PyObject *dictionary) { - PyObject *f; + PyUFuncObject *f; int i; const int gufunc_count = sizeof(gufunc_descriptors)/ sizeof(gufunc_descriptors[0]); for (i = 0; i < gufunc_count; i++) { GUFUNC_DESCRIPTOR_t* d = &gufunc_descriptors[i]; - f = PyUFunc_FromFuncAndDataAndSignature(d->funcs, + f = (PyUFuncObject *) PyUFunc_FromFuncAndDataAndSignature( + d->funcs, array_of_nulls, d->types, d->ntypes, @@ -4631,10 +4666,11 @@ addUfuncs(PyObject *dictionary) { if (f == NULL) { return -1; } + f->process_core_dims_func = d->process_core_dims_func; #if _UMATH_LINALG_DEBUG dump_ufunc_object((PyUFuncObject*) f); #endif - int ret = PyDict_SetItemString(dictionary, d->name, f); + int ret = PyDict_SetItemString(dictionary, d->name, (PyObject *)f); Py_DECREF(f); if (ret < 0) { return -1; From 50bd355e9d38a50b685ad8aee75749581b6a14db Mon Sep 17 00:00:00 2001 From: Christopher Sidebottom Date: Mon, 29 Jul 2024 22:28:22 +0100 Subject: [PATCH 906/980] BUG: Bump Highway to latest master (#27070) * Bump Highway to latest master Fixes #27037 * Add reproducer --- numpy/_core/src/highway | 2 +- numpy/_core/tests/test_regression.py | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 1dbb1180e05c..0e2f5ac4af3c 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 1dbb1180e05c55b648f2508d3f97bf26c6f926a8 +Subproject commit 0e2f5ac4af3c95a07cd247b8ddc71a5f5bd83318 diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 2636295a0020..8c9dbbe739e0 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -2636,3 +2636,11 @@ def test_repeated_square_consistency(self): res = buf[3:] np.square(in_vec, out=res) assert_equal(res, expected_res) + + def test_sort_unique_crash(self): + # gh-27037 + for _ in range(4): + vals = np.linspace(0, 1, num=128) + data = np.broadcast_to(vals, (128, 128, 128)) + data = data.transpose(0, 2, 1).copy() + np.unique(data) From e0b1c55d8a98788d337a680e9b0acfcfe69fd5b7 Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Mon, 29 Jul 2024 20:42:47 -0600 Subject: [PATCH 907/980] MAINT: 3.9/10 cleanups * Cleanup some shims related to the lower bound of Python now being `3.10`. --- numpy/_core/src/common/npy_pycompat.h | 13 ------------- numpy/_core/src/multiarray/methods.c | 1 - numpy/_core/tests/test_nditer.py | 2 -- numpy/_core/tests/test_umath.py | 4 ---- numpy/testing/tests/test_utils.py | 5 ++--- 5 files changed, 2 insertions(+), 23 deletions(-) diff --git a/numpy/_core/src/common/npy_pycompat.h b/numpy/_core/src/common/npy_pycompat.h index 67d4f6f625a0..769b90215f2b 100644 --- a/numpy/_core/src/common/npy_pycompat.h +++ b/numpy/_core/src/common/npy_pycompat.h @@ -4,19 +4,6 @@ #include "numpy/npy_3kcompat.h" #include "pythoncapi-compat/pythoncapi_compat.h" -/* - * In Python 3.10a7 (or b1), python started using the identity for the hash - * when a value is NaN. See https://bugs.python.org/issue43475 - */ -#if PY_VERSION_HEX > 0x030a00a6 #define Npy_HashDouble _Py_HashDouble -#else -static inline Py_hash_t -Npy_HashDouble(PyObject *NPY_UNUSED(identity), double val) -{ - return _Py_HashDouble(val); -} -#endif - #endif /* NUMPY_CORE_SRC_COMMON_NPY_PYCOMPAT_H_ */ diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 803678004775..4a8e1ea4579e 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -2862,7 +2862,6 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { (PyCFunction) array_format, METH_VARARGS, NULL}, - /* for typing; requires python >= 3.9 */ {"__class_getitem__", (PyCFunction)array_class_getitem, METH_CLASS | METH_O, NULL}, diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index 517e21a92cf8..5621efef1920 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -3218,8 +3218,6 @@ def test_warn_noclose(): assert len(sup.log) == 1 -@pytest.mark.skipif(sys.version_info[:2] == (3, 9) and sys.platform == "win32", - reason="Errors with Python 3.9 on Windows") @pytest.mark.parametrize(["in_dtype", "buf_dtype"], [("i", "O"), ("O", "i"), # most simple cases ("i,O", "O,O"), # structured partially only copying O diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 02bfeb2c8498..9a300f19764c 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -2807,10 +2807,6 @@ def test_reduction(self): def test_bitwise_count(self, input_dtype_obj, bitsize): input_dtype = input_dtype_obj.type - # bitwise_count is only in-built in 3.10+ - if sys.version_info < (3, 10) and input_dtype == np.object_: - pytest.skip("Required Python >=3.10") - for i in range(1, bitsize): num = 2**i - 1 msg = f"bitwise_count for {num}" diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 247bbeaec6f7..3983ec902356 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -1923,8 +1923,7 @@ def test_xy_rename(assert_func): assert_func(1, y=1) type_message = '...got multiple values for argument' - # explicit linebreak to support Python 3.9 - with pytest.warns(DeprecationWarning, match=dep_message), \ - pytest.raises(TypeError, match=type_message): + with (pytest.warns(DeprecationWarning, match=dep_message), + pytest.raises(TypeError, match=type_message)): assert_func(1, x=1) assert_func(1, 2, y=2) From 84ca71940aaea347276205907fd1da6390f728db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Tue, 30 Jul 2024 14:00:57 +0200 Subject: [PATCH 908/980] CI: Upgrade `array-api-tests` hash --- .github/workflows/linux.yml | 4 ++-- numpy/_core/numeric.py | 5 +++-- numpy/_core/tests/test_numeric.py | 5 +++++ tools/ci/{array-api-skips.txt => array-api-xfails.txt} | 6 +++++- 4 files changed, 15 insertions(+), 5 deletions(-) rename tools/ci/{array-api-skips.txt => array-api-xfails.txt} (78%) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 89f3fef0f6d4..a5a977eefad4 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -232,7 +232,7 @@ jobs: uses: actions/checkout@v4 with: repository: data-apis/array-api-tests - ref: '809a1984414cfc0bca68a823aeaeba7df3900d17' # Latest commit as of 2024-06-26 + ref: '827edd804bcace9d64176b8115138d29ae3e8dec' # Latest commit as of 2024-07-30 submodules: 'true' path: 'array-api-tests' - name: Set up Python @@ -253,7 +253,7 @@ jobs: PYTHONWARNINGS: 'ignore::UserWarning::,ignore::DeprecationWarning::,ignore::RuntimeWarning::' run: | cd ${GITHUB_WORKSPACE}/array-api-tests - pytest array_api_tests -v -c pytest.ini --ci --max-examples=2 --derandomize --disable-deadline --skips-file ${GITHUB_WORKSPACE}/tools/ci/array-api-skips.txt + pytest array_api_tests -v -c pytest.ini --ci --max-examples=50 --derandomize --disable-deadline --xfails-file ${GITHUB_WORKSPACE}/tools/ci/array-api-xfails.txt custom_checks: needs: [smoke_test] diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index cfcd9389b44b..39b3de44fabe 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -2684,9 +2684,10 @@ def astype(x, dtype, /, *, copy=True, device=None): True """ - if not isinstance(x, np.ndarray): + if not (isinstance(x, np.ndarray) or isscalar(x)): raise TypeError( - f"Input should be a NumPy array. It is a {type(x)} instead." + "Input should be a NumPy array or scalar. " + f"It is a {type(x)} instead." ) if device is not None and device != "cpu": raise ValueError( diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index a9694ebee736..9bd7834d3fd7 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -4158,5 +4158,10 @@ def test_astype(self): actual, np.astype(actual, actual.dtype, copy=False) ) + actual = np.astype(np.int64(10), np.float64) + expected = np.float64(10) + assert_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) + with pytest.raises(TypeError, match="Input should be a NumPy array"): np.astype(data, np.float64) diff --git a/tools/ci/array-api-skips.txt b/tools/ci/array-api-xfails.txt similarity index 78% rename from tools/ci/array-api-skips.txt rename to tools/ci/array-api-xfails.txt index 2d618ee05d45..c6ec317d437e 100644 --- a/tools/ci/array-api-skips.txt +++ b/tools/ci/array-api-xfails.txt @@ -7,10 +7,14 @@ array_api_tests/test_signatures.py::test_func_signature[reshape] # 'min/max' args are present. 'a_min/a_max' are retained for backward compat. array_api_tests/test_signatures.py::test_func_signature[clip] -# missing 'descending' keyword arguments +# missing 'descending' keyword argument array_api_tests/test_signatures.py::test_func_signature[argsort] array_api_tests/test_signatures.py::test_func_signature[sort] +# missing 'descending' keyword argument +array_api_tests/test_sorting_functions.py::test_argsort +array_api_tests/test_sorting_functions.py::test_sort + # ufuncs signature on linux is always # np.vecdot is the only ufunc with a keyword argument which causes a failure array_api_tests/test_signatures.py::test_func_signature[vecdot] From 89fd51cbe4fd724b431172f025bab499f2c9dfd8 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 30 Jul 2024 09:45:51 -0600 Subject: [PATCH 909/980] BUG: don't use alpha version in meson version compare --- numpy/random/_examples/cython/meson.build | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/_examples/cython/meson.build b/numpy/random/_examples/cython/meson.build index 9b3fedb81579..7aa367d13787 100644 --- a/numpy/random/_examples/cython/meson.build +++ b/numpy/random/_examples/cython/meson.build @@ -12,7 +12,7 @@ if not cy.version().version_compare('>=3.0.6') endif base_cython_args = [] -if cy.version().version_compare('>=3.1.0a0') +if cy.version().version_compare('>=3.1.0') base_cython_args += ['-Xfreethreading_compatible=True'] endif From 23680c9f6379b44f90c93c61d97c388c5e89826e Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 30 Jul 2024 09:46:33 -0600 Subject: [PATCH 910/980] TST: skip test that uses fork if there are running threads --- numpy/linalg/tests/test_linalg.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 969934a36698..ffd9550e7c1d 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -4,6 +4,7 @@ import os import sys import itertools +import threading import traceback import textwrap import subprocess @@ -1943,7 +1944,9 @@ def test_generalized_raise_multiloop(): assert_raises(np.linalg.LinAlgError, np.linalg.inv, x) - +@pytest.mark.skipif( + threading.active_count() > 1, + reason="skipping test that uses fork because there are multiple threads") def test_xerbla_override(): # Check that our xerbla has been successfully linked in. If it is not, # the default xerbla routine is called, which prints a message to stdout From 38e81bbbad93ac0efd83ab4def4c1437bd0dd080 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 30 Jul 2024 10:39:40 -0600 Subject: [PATCH 911/980] TST: delete tests that assume warings.catch_warnings is thread-safe --- numpy/_core/tests/test_nep50_promotions.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/numpy/_core/tests/test_nep50_promotions.py b/numpy/_core/tests/test_nep50_promotions.py index e603254f6fec..ab800cb5b959 100644 --- a/numpy/_core/tests/test_nep50_promotions.py +++ b/numpy/_core/tests/test_nep50_promotions.py @@ -352,18 +352,11 @@ def legacy_no_warn(): np._set_promotion_state("legacy") b.wait() assert np._get_promotion_state() == "legacy" - # turn warnings into errors, this should not warn with - # legacy promotion state - with warnings.catch_warnings(): - warnings.simplefilter("error") - np.float16(1) + 131008 def weak_warn(): np._set_promotion_state("weak") b.wait() assert np._get_promotion_state() == "weak" - with pytest.raises(RuntimeWarning): - np.float16(1) + 131008 task1 = threading.Thread(target=legacy_no_warn) task2 = threading.Thread(target=weak_warn) From a975551829f093a4572f30397d0df0e551aaed6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Wed, 31 Jul 2024 12:26:05 +0200 Subject: [PATCH 912/980] CI: Increase `max-examples` to 100 --- .github/workflows/linux.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index a5a977eefad4..2e63c7494c54 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -253,7 +253,7 @@ jobs: PYTHONWARNINGS: 'ignore::UserWarning::,ignore::DeprecationWarning::,ignore::RuntimeWarning::' run: | cd ${GITHUB_WORKSPACE}/array-api-tests - pytest array_api_tests -v -c pytest.ini --ci --max-examples=50 --derandomize --disable-deadline --xfails-file ${GITHUB_WORKSPACE}/tools/ci/array-api-xfails.txt + pytest array_api_tests -v -c pytest.ini --ci --max-examples=100 --derandomize --disable-deadline --xfails-file ${GITHUB_WORKSPACE}/tools/ci/array-api-xfails.txt custom_checks: needs: [smoke_test] From 3662311e3781f334b4064fed3576150057db7d26 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 31 Jul 2024 13:43:15 -0600 Subject: [PATCH 913/980] ENH: mark the dragon4 scratch space as thread-local --- numpy/_core/src/multiarray/dragon4.c | 5 ++--- numpy/_core/tests/test_arrayprint.py | 8 ++++++++ numpy/_core/tests/test_multithreading.py | 12 +----------- numpy/testing/_private/utils.py | 14 +++++++++++++- 4 files changed, 24 insertions(+), 15 deletions(-) diff --git a/numpy/_core/src/multiarray/dragon4.c b/numpy/_core/src/multiarray/dragon4.c index 480b78bdbb32..7217f7b88945 100644 --- a/numpy/_core/src/multiarray/dragon4.c +++ b/numpy/_core/src/multiarray/dragon4.c @@ -163,12 +163,11 @@ typedef struct { char repr[16384]; } Dragon4_Scratch; -static int _bigint_static_in_use = 0; -static Dragon4_Scratch _bigint_static; +static NPY_TLS int _bigint_static_in_use = 0; +static NPY_TLS Dragon4_Scratch _bigint_static; static Dragon4_Scratch* get_dragon4_bigint_scratch(void) { - /* this test+set is not threadsafe, but no matter because we have GIL */ if (_bigint_static_in_use) { PyErr_SetString(PyExc_RuntimeError, "numpy float printing code is not re-entrant. " diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index c15c60ab3fab..5b0642cbb0bd 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -9,6 +9,7 @@ assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT, assert_raises_regex, IS_WASM ) +from numpy.testing._private.utils import run_threaded from numpy._core.arrayprint import _typelessdata import textwrap @@ -1249,3 +1250,10 @@ async def main(): loop = asyncio.new_event_loop() asyncio.run(main()) loop.close() + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't support threads") +def test_multithreaded_array_printing(): + # the dragon4 implementation uses a static scratch space for performance + # reasons this test makes sure it is set up in a thread-safe manner + + run_threaded(TestPrintOptions().test_floatmode, 500) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 0fabed294fed..754688501c2d 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -1,25 +1,15 @@ -import concurrent.futures import threading import numpy as np import pytest from numpy.testing import IS_WASM +from numpy.testing._private.utils import run_threaded if IS_WASM: pytest.skip(allow_module_level=True, reason="no threading support in wasm") -def run_threaded(func, iters, pass_count=False): - with concurrent.futures.ThreadPoolExecutor(max_workers=8) as tpe: - if pass_count: - futures = [tpe.submit(func, i) for i in range(iters)] - else: - futures = [tpe.submit(func) for _ in range(iters)] - for f in futures: - f.result() - - def test_parallel_randomstate_creation(): # if the coercion cache is enabled and not thread-safe, creating # RandomState instances simultaneously leads to a data race diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index a913c1a69f88..f22df0ddaab8 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -17,6 +17,7 @@ from warnings import WarningMessage import pprint import sysconfig +import concurrent.futures import numpy as np from numpy._core import ( @@ -40,7 +41,7 @@ 'HAS_REFCOUNT', "IS_WASM", 'suppress_warnings', 'assert_array_compare', 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON', '_OLD_PROMOTION', 'IS_MUSL', '_SUPPORTS_SVE', 'NOGIL_BUILD', - 'IS_EDITABLE' + 'IS_EDITABLE', 'run_threaded', ] @@ -2697,3 +2698,14 @@ def _get_glibc_version(): _glibcver = _get_glibc_version() _glibc_older_than = lambda x: (_glibcver != '0.0' and _glibcver < x) + + +def run_threaded(func, iters, pass_count=False): + """Runs a function many times in parallel""" + with concurrent.futures.ThreadPoolExecutor(max_workers=8) as tpe: + if pass_count: + futures = [tpe.submit(func, i) for i in range(iters)] + else: + futures = [tpe.submit(func) for _ in range(iters)] + for f in futures: + f.result() From f5e479a250d57ef70764492ec382e96fb51a20db Mon Sep 17 00:00:00 2001 From: GUAN MING Date: Thu, 1 Aug 2024 10:16:38 +0800 Subject: [PATCH 914/980] TYP: enhance array type --- numpy/_core/multiarray.pyi | 21 ++++++++++++++++++- .../tests/data/reveal/array_constructors.pyi | 7 ++++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 1e284be13f0a..dd1093015301 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -82,7 +82,12 @@ from numpy._typing import ( _T_co = TypeVar("_T_co", covariant=True) _T_contra = TypeVar("_T_contra", contravariant=True) _SCT = TypeVar("_SCT", bound=generic) -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) +_ArrayType_co = TypeVar( + "_ArrayType_co", + bound=ndarray[Any, Any], + covariant=True, +) # Valid time units _UnitKind = L[ @@ -113,6 +118,9 @@ class _SupportsLenAndGetItem(Protocol[_T_contra, _T_co]): def __len__(self) -> int: ... def __getitem__(self, key: _T_contra, /) -> _T_co: ... +class _SupportsArray(Protocol[_ArrayType_co]): + def __array__(self, /) -> _ArrayType_co: ... + __all__: list[str] ALLOW_THREADS: Final[int] # 0 or 1 (system-specific) @@ -188,6 +196,17 @@ def array( like: None | _SupportsArrayFunc = ..., ) -> _ArrayType: ... @overload +def array( + object: _SupportsArray[_ArrayType], + dtype: None = ..., + *, + copy: None | bool | _CopyMode = ..., + order: _OrderKACF = ..., + subok: L[True], + ndmin: L[0] = ..., + like: None | _SupportsArrayFunc = ..., +) -> _ArrayType: ... +@overload def array( object: _ArrayLike[_SCT], dtype: None = ..., diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 814da1b9d639..2559acbd0e94 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -20,6 +20,7 @@ i8: np.int64 A: npt.NDArray[np.float64] B: SubClass[np.float64] C: list[int] +D: SubClass[np.float64 | np.int64] def func(i: int, j: int, **kwargs: Any) -> SubClass[np.float64]: ... @@ -31,12 +32,16 @@ assert_type(np.empty_like(A, dtype='c16'), npt.NDArray[Any]) assert_type(np.array(A), npt.NDArray[np.float64]) assert_type(np.array(B), npt.NDArray[np.float64]) -assert_type(np.array(B, subok=True), SubClass[np.float64]) assert_type(np.array([1, 1.0]), npt.NDArray[Any]) assert_type(np.array(deque([1, 2, 3])), npt.NDArray[Any]) assert_type(np.array(A, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.array(A, dtype='c16'), npt.NDArray[Any]) assert_type(np.array(A, like=A), npt.NDArray[np.float64]) +assert_type(np.array(A, subok=True), npt.NDArray[np.float64]) +assert_type(np.array(B, subok=True), SubClass[np.float64]) +assert_type(np.array(B, subok=True, ndmin=0), SubClass[np.float64]) +assert_type(np.array(B, subok=True, ndmin=1), SubClass[np.float64]) +assert_type(np.array(D), npt.NDArray[np.float64 | np.int64]) assert_type(np.zeros([1, 5, 6]), npt.NDArray[np.float64]) assert_type(np.zeros([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) From bdd3e92530de16f9df8cf8b9a307d0a4998f14a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Thu, 1 Aug 2024 10:32:46 +0200 Subject: [PATCH 915/980] CI: Add xfail for `clip` --- tools/ci/array-api-xfails.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/ci/array-api-xfails.txt b/tools/ci/array-api-xfails.txt index c6ec317d437e..c81b61c5740e 100644 --- a/tools/ci/array-api-xfails.txt +++ b/tools/ci/array-api-xfails.txt @@ -18,3 +18,6 @@ array_api_tests/test_sorting_functions.py::test_sort # ufuncs signature on linux is always # np.vecdot is the only ufunc with a keyword argument which causes a failure array_api_tests/test_signatures.py::test_func_signature[vecdot] + +# input is cast to min/max's dtype if they're different +array_api_tests/test_operators_and_elementwise_functions.py::test_clip From f3337a3aa687a50198cc418f4897b351c756b79c Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 1 Aug 2024 09:50:10 -0600 Subject: [PATCH 916/980] MAINT: remove indirection and use thread-local _bigint_static directly --- numpy/_core/src/multiarray/dragon4.c | 112 ++++++++++----------------- 1 file changed, 40 insertions(+), 72 deletions(-) diff --git a/numpy/_core/src/multiarray/dragon4.c b/numpy/_core/src/multiarray/dragon4.c index 7217f7b88945..7cd8afbed6d8 100644 --- a/numpy/_core/src/multiarray/dragon4.c +++ b/numpy/_core/src/multiarray/dragon4.c @@ -163,28 +163,8 @@ typedef struct { char repr[16384]; } Dragon4_Scratch; -static NPY_TLS int _bigint_static_in_use = 0; static NPY_TLS Dragon4_Scratch _bigint_static; -static Dragon4_Scratch* -get_dragon4_bigint_scratch(void) { - if (_bigint_static_in_use) { - PyErr_SetString(PyExc_RuntimeError, - "numpy float printing code is not re-entrant. " - "Ping the devs to fix it."); - return NULL; - } - _bigint_static_in_use = 1; - - /* in this dummy implementation we only return the static allocation */ - return &_bigint_static; -} - -static void -free_dragon4_bigint_scratch(Dragon4_Scratch *mem){ - _bigint_static_in_use = 0; -} - /* Copy integer */ static void BigInt_Copy(BigInt *dst, const BigInt *src) @@ -2209,11 +2189,11 @@ Format_floatbits(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, */ static npy_uint32 Dragon4_PrintFloat_IEEE_binary16( - Dragon4_Scratch *scratch, npy_half *value, Dragon4_Options *opt) + npy_half *value, Dragon4_Options *opt) { - char *buffer = scratch->repr; - const npy_uint32 bufferSize = sizeof(scratch->repr); - BigInt *bigints = scratch->bigints; + char *buffer = _bigint_static.repr; + const npy_uint32 bufferSize = sizeof(_bigint_static.repr); + BigInt *bigints = _bigint_static.bigints; npy_uint16 val = *value; npy_uint32 floatExponent, floatMantissa, floatSign; @@ -2296,12 +2276,12 @@ Dragon4_PrintFloat_IEEE_binary16( */ static npy_uint32 Dragon4_PrintFloat_IEEE_binary32( - Dragon4_Scratch *scratch, npy_float32 *value, + npy_float32 *value, Dragon4_Options *opt) { - char *buffer = scratch->repr; - const npy_uint32 bufferSize = sizeof(scratch->repr); - BigInt *bigints = scratch->bigints; + char *buffer = _bigint_static.repr; + const npy_uint32 bufferSize = sizeof(_bigint_static.repr); + BigInt *bigints = _bigint_static.bigints; union { @@ -2389,11 +2369,11 @@ Dragon4_PrintFloat_IEEE_binary32( */ static npy_uint32 Dragon4_PrintFloat_IEEE_binary64( - Dragon4_Scratch *scratch, npy_float64 *value, Dragon4_Options *opt) + npy_float64 *value, Dragon4_Options *opt) { - char *buffer = scratch->repr; - const npy_uint32 bufferSize = sizeof(scratch->repr); - BigInt *bigints = scratch->bigints; + char *buffer = _bigint_static.repr; + const npy_uint32 bufferSize = sizeof(_bigint_static.repr); + BigInt *bigints = _bigint_static.bigints; union { @@ -2504,11 +2484,11 @@ typedef struct FloatVal128 { */ static npy_uint32 Dragon4_PrintFloat_Intel_extended( - Dragon4_Scratch *scratch, FloatVal128 value, Dragon4_Options *opt) + FloatVal128 value, Dragon4_Options *opt) { - char *buffer = scratch->repr; - const npy_uint32 bufferSize = sizeof(scratch->repr); - BigInt *bigints = scratch->bigints; + char *buffer = _bigint_static.repr; + const npy_uint32 bufferSize = sizeof(_bigint_static.repr); + BigInt *bigints = _bigint_static.bigints; npy_uint32 floatExponent, floatSign; npy_uint64 floatMantissa; @@ -2602,7 +2582,7 @@ Dragon4_PrintFloat_Intel_extended( */ static npy_uint32 Dragon4_PrintFloat_Intel_extended80( - Dragon4_Scratch *scratch, npy_float80 *value, Dragon4_Options *opt) + npy_float80 *value, Dragon4_Options *opt) { FloatVal128 val128; union { @@ -2618,7 +2598,7 @@ Dragon4_PrintFloat_Intel_extended80( val128.lo = buf80.integer.a; val128.hi = buf80.integer.b; - return Dragon4_PrintFloat_Intel_extended(scratch, val128, opt); + return Dragon4_PrintFloat_Intel_extended(val128, opt); } #endif /* HAVE_LDOUBLE_INTEL_EXTENDED_10_BYTES_LE */ @@ -2626,7 +2606,7 @@ Dragon4_PrintFloat_Intel_extended80( /* Intel's 80-bit IEEE extended precision format, 96-bit storage */ static npy_uint32 Dragon4_PrintFloat_Intel_extended96( - Dragon4_Scratch *scratch, npy_float96 *value, Dragon4_Options *opt) + npy_float96 *value, Dragon4_Options *opt) { FloatVal128 val128; union { @@ -2642,7 +2622,7 @@ Dragon4_PrintFloat_Intel_extended96( val128.lo = buf96.integer.a; val128.hi = buf96.integer.b; - return Dragon4_PrintFloat_Intel_extended(scratch, val128, opt); + return Dragon4_PrintFloat_Intel_extended(val128, opt); } #endif /* HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE */ @@ -2650,7 +2630,7 @@ Dragon4_PrintFloat_Intel_extended96( /* Motorola Big-endian equivalent of the Intel-extended 96 fp format */ static npy_uint32 Dragon4_PrintFloat_Motorola_extended96( - Dragon4_Scratch *scratch, npy_float96 *value, Dragon4_Options *opt) + npy_float96 *value, Dragon4_Options *opt) { FloatVal128 val128; union { @@ -2667,7 +2647,7 @@ Dragon4_PrintFloat_Motorola_extended96( val128.hi = buf96.integer.a >> 16; /* once again we assume the int has same endianness as the float */ - return Dragon4_PrintFloat_Intel_extended(scratch, val128, opt); + return Dragon4_PrintFloat_Intel_extended(val128, opt); } #endif /* HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE */ @@ -2687,7 +2667,7 @@ typedef union FloatUnion128 /* Intel's 80-bit IEEE extended precision format, 128-bit storage */ static npy_uint32 Dragon4_PrintFloat_Intel_extended128( - Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt) + npy_float128 *value, Dragon4_Options *opt) { FloatVal128 val128; FloatUnion128 buf128; @@ -2697,7 +2677,7 @@ Dragon4_PrintFloat_Intel_extended128( val128.lo = buf128.integer.a; val128.hi = buf128.integer.b; - return Dragon4_PrintFloat_Intel_extended(scratch, val128, opt); + return Dragon4_PrintFloat_Intel_extended(val128, opt); } #endif /* HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE */ @@ -2716,11 +2696,11 @@ Dragon4_PrintFloat_Intel_extended128( */ static npy_uint32 Dragon4_PrintFloat_IEEE_binary128( - Dragon4_Scratch *scratch, FloatVal128 val128, Dragon4_Options *opt) + FloatVal128 val128, Dragon4_Options *opt) { - char *buffer = scratch->repr; - const npy_uint32 bufferSize = sizeof(scratch->repr); - BigInt *bigints = scratch->bigints; + char *buffer = _bigint_static.repr; + const npy_uint32 bufferSize = sizeof(_bigint_static.repr); + BigInt *bigints = _bigint_static.bigints; npy_uint32 floatExponent, floatSign; @@ -2801,7 +2781,7 @@ Dragon4_PrintFloat_IEEE_binary128( #if defined(HAVE_LDOUBLE_IEEE_QUAD_LE) static npy_uint32 Dragon4_PrintFloat_IEEE_binary128_le( - Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt) + npy_float128 *value, Dragon4_Options *opt) { FloatVal128 val128; FloatUnion128 buf128; @@ -2810,7 +2790,7 @@ Dragon4_PrintFloat_IEEE_binary128_le( val128.lo = buf128.integer.a; val128.hi = buf128.integer.b; - return Dragon4_PrintFloat_IEEE_binary128(scratch, val128, opt); + return Dragon4_PrintFloat_IEEE_binary128(val128, opt); } #endif /* HAVE_LDOUBLE_IEEE_QUAD_LE */ @@ -2821,7 +2801,7 @@ Dragon4_PrintFloat_IEEE_binary128_le( */ static npy_uint32 Dragon4_PrintFloat_IEEE_binary128_be( - Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt) + npy_float128 *value, Dragon4_Options *opt) { FloatVal128 val128; FloatUnion128 buf128; @@ -2830,7 +2810,7 @@ Dragon4_PrintFloat_IEEE_binary128_be( val128.lo = buf128.integer.b; val128.hi = buf128.integer.a; - return Dragon4_PrintFloat_IEEE_binary128(scratch, val128, opt); + return Dragon4_PrintFloat_IEEE_binary128(val128, opt); } #endif /* HAVE_LDOUBLE_IEEE_QUAD_BE */ @@ -2876,11 +2856,11 @@ Dragon4_PrintFloat_IEEE_binary128_be( */ static npy_uint32 Dragon4_PrintFloat_IBM_double_double( - Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt) + npy_float128 *value, Dragon4_Options *opt) { - char *buffer = scratch->repr; - const npy_uint32 bufferSize = sizeof(scratch->repr); - BigInt *bigints = scratch->bigints; + char *buffer = _bigint_static.repr; + const npy_uint32 bufferSize = sizeof(_bigint_static.repr); + BigInt *bigints = _bigint_static.bigints; FloatVal128 val128; FloatUnion128 buf128; @@ -3067,16 +3047,10 @@ PyObject *\ Dragon4_Positional_##Type##_opt(npy_type *val, Dragon4_Options *opt)\ {\ PyObject *ret;\ - Dragon4_Scratch *scratch = get_dragon4_bigint_scratch();\ - if (scratch == NULL) {\ - return NULL;\ - }\ - if (Dragon4_PrintFloat_##format(scratch, val, opt) < 0) {\ - free_dragon4_bigint_scratch(scratch);\ + if (Dragon4_PrintFloat_##format(val, opt) < 0) {\ return NULL;\ }\ - ret = PyUnicode_FromString(scratch->repr);\ - free_dragon4_bigint_scratch(scratch);\ + ret = PyUnicode_FromString(_bigint_static.repr);\ return ret;\ }\ \ @@ -3105,16 +3079,10 @@ PyObject *\ Dragon4_Scientific_##Type##_opt(npy_type *val, Dragon4_Options *opt)\ {\ PyObject *ret;\ - Dragon4_Scratch *scratch = get_dragon4_bigint_scratch();\ - if (scratch == NULL) {\ - return NULL;\ - }\ - if (Dragon4_PrintFloat_##format(scratch, val, opt) < 0) {\ - free_dragon4_bigint_scratch(scratch);\ + if (Dragon4_PrintFloat_##format(val, opt) < 0) { \ return NULL;\ }\ - ret = PyUnicode_FromString(scratch->repr);\ - free_dragon4_bigint_scratch(scratch);\ + ret = PyUnicode_FromString(_bigint_static.repr);\ return ret;\ }\ PyObject *\ From 1cb40445aaf63224b458601c1fff9a4e74b44eda Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 1 Aug 2024 22:14:44 +0200 Subject: [PATCH 917/980] API,BUG: Fix copyto (and ufunc) handling of scalar cast safety This defines scalar cast safety to be "safe", "equiv", or "unsafe" (for Python int, float, complex). Although, equiv is a bit of an odd one. We define it by using the common DType with the desired one, and assuming that this cast is at least safe. With that, we can create an appropriate array for the scalar (and the rest works fine). This means that any python float can be safely cast to e.g. float32 and any Python integer to any NumPy integer even though the actual assignment may overflow (floats) or fail (integers). `can_cast` itself is not defined here. It could be, but has the disadvantage that it won't error out on assignment later for integers. --- numpy/_core/src/multiarray/abstractdtypes.c | 114 ++++++++++++++ numpy/_core/src/multiarray/abstractdtypes.h | 14 ++ numpy/_core/src/multiarray/multiarraymodule.c | 54 +++++-- numpy/_core/src/umath/ufunc_object.c | 149 ++++++++++-------- numpy/_core/tests/test_api.py | 31 ++++ numpy/_core/tests/test_numeric.py | 6 +- numpy/_core/tests/test_ufunc.py | 25 +++ 7 files changed, 315 insertions(+), 78 deletions(-) diff --git a/numpy/_core/src/multiarray/abstractdtypes.c b/numpy/_core/src/multiarray/abstractdtypes.c index 8d00084f0efe..214833737792 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.c +++ b/numpy/_core/src/multiarray/abstractdtypes.c @@ -378,3 +378,117 @@ NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyComplexDType = {{{ .dt_slots = &pycomplexdtype_slots, .scalar_type = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ }; + + +/* + * Additional functions to deal with Python literal int, float, complex + */ +/* + * This function takes an existing array operand and if the new descr does + * not match, replaces it with a new array that has the correct descriptor + * and holds exactly the scalar value. + */ +NPY_NO_EXPORT int +npy_update_operand_for_scalar( + PyArrayObject **operand, PyObject *scalar, PyArray_Descr *descr, + NPY_CASTING casting) +{ + if (PyArray_EquivTypes(PyArray_DESCR(*operand), descr)) { + /* + * TODO: This is an unfortunate work-around for legacy type resolvers + * (see `convert_ufunc_arguments` in `ufunc_object.c`), that + * currently forces us to replace the array. + */ + if (!(PyArray_FLAGS(*operand) & NPY_ARRAY_WAS_PYTHON_INT)) { + return 0; + } + } + else if (NPY_UNLIKELY(casting == NPY_EQUIV_CASTING) && + descr->type_num != NPY_OBJECT) { + /* + * increadibly niche, but users could pass equiv casting and we + * actually need to cast. Let object pass (technically correct) but + * in all other cases, we don't technically consider equivalent. + * NOTE(seberg): I don't think we should be beholden to this logic. + */ + PyErr_Format(PyExc_TypeError, + "cannot cast Python %s to %S under the casting rule 'equiv'", + Py_TYPE(scalar)->tp_name, descr); + return -1; + } + + Py_INCREF(descr); + PyArrayObject *new = (PyArrayObject *)PyArray_NewFromDescr( + &PyArray_Type, descr, 0, NULL, NULL, NULL, 0, NULL); + Py_SETREF(*operand, new); + if (*operand == NULL) { + return -1; + } + if (scalar == NULL) { + /* The ufunc.resolve_dtypes paths can go here. Anything should go. */ + return 0; + } + return PyArray_SETITEM(new, PyArray_BYTES(*operand), scalar); +} + + +/* + * When a user passed a Python literal (int, float, complex), special promotion + * rules mean that we don't know the exact descriptor that should be used. + * + * Typically, this just doesn't really matter. Unfortunately, there are two + * exceptions: + * 1. The user might have passed `signature=` which may not be compatible. + * In that case, we cannot really assume "safe" casting. + * 2. It is at least fathomable that a DType doesn't deal with this directly. + * or that using the original int64/object is wrong in the type resolution. + * + * The solution is to assume that we can use the common DType of the signature + * and the Python scalar DType (`in_DT`) as a safe intermediate. + */ +NPY_NO_EXPORT PyArray_Descr * +npy_find_descr_for_scalar( + PyObject *scalar, PyArray_Descr *original_descr, + PyArray_DTypeMeta *in_DT, PyArray_DTypeMeta *op_DT) +{ + PyArray_Descr *res; + /* There is a good chance, descriptors already match... */ + if (NPY_DTYPE(original_descr) == op_DT) { + Py_INCREF(original_descr); + return original_descr; + } + + PyArray_DTypeMeta *common = PyArray_CommonDType(in_DT, op_DT); + if (common == NULL) { + PyErr_Clear(); + /* This is fine. We simply assume the original descr is viable. */ + Py_INCREF(original_descr); + return original_descr; + } + /* A very likely case is that there is nothing to do: */ + if (NPY_DTYPE(original_descr) == common) { + Py_DECREF(common); + Py_INCREF(original_descr); + return original_descr; + } + if (!NPY_DT_is_parametric(common) || + /* In some paths we only have a scalar type, can't discover */ + scalar == NULL || + /* If the DType doesn't know the scalar type, guess at default. */ + !NPY_DT_CALL_is_known_scalar_type(common, Py_TYPE(scalar))) { + if (common->singleton != NULL) { + Py_INCREF(common->singleton); + res = common->singleton; + Py_INCREF(res); + } + else { + res = NPY_DT_CALL_default_descr(common); + } + } + else { + res = NPY_DT_CALL_discover_descr_from_pyobject(common, scalar); + } + + Py_DECREF(common); + return res; +} diff --git a/numpy/_core/src/multiarray/abstractdtypes.h b/numpy/_core/src/multiarray/abstractdtypes.h index 7bf8191e6917..3c96ffe8e0ef 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.h +++ b/numpy/_core/src/multiarray/abstractdtypes.h @@ -1,6 +1,7 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_ABSTRACTDTYPES_H_ #define NUMPY_CORE_SRC_MULTIARRAY_ABSTRACTDTYPES_H_ +#include "numpy/ndarraytypes.h" #include "arrayobject.h" #include "dtypemeta.h" @@ -68,6 +69,19 @@ npy_mark_tmp_array_if_pyscalar( return 0; } + +NPY_NO_EXPORT int +npy_update_operand_for_scalar( + PyArrayObject **operand, PyObject *scalar, PyArray_Descr *descr, + NPY_CASTING casting); + + +NPY_NO_EXPORT PyArray_Descr * +npy_find_descr_for_scalar( + PyObject *scalar, PyArray_Descr *original_descr, + PyArray_DTypeMeta *in_DT, PyArray_DTypeMeta *op_DT); + + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 944898ceecf7..9a1bad320014 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -1929,21 +1929,55 @@ array_asfortranarray(PyObject *NPY_UNUSED(ignored), static PyObject * -array_copyto(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) +array_copyto(PyObject *NPY_UNUSED(ignored), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - static char *kwlist[] = {"dst", "src", "casting", "where", NULL}; - PyObject *wheremask_in = NULL; - PyArrayObject *dst = NULL, *src = NULL, *wheremask = NULL; + PyObject *dst_obj, *src_obj, *wheremask_in = NULL; + PyArrayObject *src = NULL, *wheremask = NULL; NPY_CASTING casting = NPY_SAME_KIND_CASTING; + NPY_PREPARE_ARGPARSER; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!O&|O&O:copyto", kwlist, - &PyArray_Type, &dst, - &PyArray_Converter, &src, - &PyArray_CastingConverter, &casting, - &wheremask_in)) { + if (npy_parse_arguments("copyto", args, len_args, kwnames, + "dst", NULL, &dst_obj, + "src", NULL, &src_obj, + "|casting", &PyArray_CastingConverter, &casting, + "|where", NULL, &wheremask_in, + NULL, NULL, NULL) < 0) { goto fail; } + if (!PyArray_Check(dst_obj)) { + PyErr_Format(PyExc_TypeError, + "copyto() argument 1 must be a numpy.ndarray, not %s", + Py_TYPE(dst_obj)->tp_name); + } + PyArrayObject *dst = (PyArrayObject *)dst_obj; + + src = (PyArrayObject *)PyArray_FromAny(src_obj, NULL, 0, 0, 0, NULL); + if (src == NULL) { + goto fail; + } + PyArray_DTypeMeta *dtype = NPY_DTYPE(PyArray_DESCR(src)); + Py_INCREF(dtype); + if (npy_mark_tmp_array_if_pyscalar(src_obj, src, &dtype)) { + /* The user passed a Python scalar */ + PyArray_Descr *descr = npy_find_descr_for_scalar( + src_obj, PyArray_DESCR(src), dtype, + NPY_DTYPE(PyArray_DESCR(dst))); + Py_DECREF(dtype); + if (descr == NULL) { + goto fail; + } + int res = npy_update_operand_for_scalar(&src, src_obj, descr, casting); + Py_DECREF(descr); + if (res < 0) { + goto fail; + } + } + else { + Py_DECREF(dtype); + } + if (wheremask_in != NULL) { /* Get the boolean where mask */ PyArray_Descr *dtype = PyArray_DescrFromType(NPY_BOOL); @@ -4431,7 +4465,7 @@ static struct PyMethodDef array_module_methods[] = { METH_FASTCALL | METH_KEYWORDS, NULL}, {"copyto", (PyCFunction)array_copyto, - METH_VARARGS|METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"nested_iters", (PyCFunction)NpyIter_NestedIters, METH_VARARGS|METH_KEYWORDS, NULL}, diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 92bc7793f2ad..9aac8d8cf188 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -101,8 +101,8 @@ static int resolve_descriptors(int nop, PyUFuncObject *ufunc, PyArrayMethodObject *ufuncimpl, PyArrayObject *operands[], PyArray_Descr *dtypes[], - PyArray_DTypeMeta *signature[], PyObject *inputs_tup, - NPY_CASTING casting); + PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *original_DTypes[], + PyObject *inputs_tup, NPY_CASTING casting); /*UFUNC_API*/ @@ -2356,11 +2356,12 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc, if (evil_ndim_mutating_hack) { ((PyArrayObject_fields *)out)->nd = 0; } - /* DTypes may currently get filled in fallbacks and XDECREF for error: */ - Py_XDECREF(operation_DTypes[0]); - Py_XDECREF(operation_DTypes[1]); - Py_XDECREF(operation_DTypes[2]); + // TODO: Clean up multiple cleanup! if (ufuncimpl == NULL) { + /* DTypes may currently get filled in fallbacks and XDECREF for error: */ + Py_XDECREF(operation_DTypes[0]); + Py_XDECREF(operation_DTypes[1]); + Py_XDECREF(operation_DTypes[2]); return NULL; } @@ -2372,9 +2373,17 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc, * (although this should possibly happen through a deprecation) */ if (resolve_descriptors(3, ufunc, ufuncimpl, - ops, out_descrs, signature, NULL, casting) < 0) { + ops, out_descrs, signature, operation_DTypes, NULL, casting) < 0) { + /* DTypes may currently get filled in fallbacks and XDECREF for error: */ + Py_XDECREF(operation_DTypes[0]); + Py_XDECREF(operation_DTypes[1]); + Py_XDECREF(operation_DTypes[2]); return NULL; } + /* DTypes may currently get filled in fallbacks and XDECREF for error: */ + Py_XDECREF(operation_DTypes[0]); + Py_XDECREF(operation_DTypes[1]); + Py_XDECREF(operation_DTypes[2]); /* * The first operand and output should be the same array, so they should @@ -4023,12 +4032,12 @@ static int resolve_descriptors(int nop, PyUFuncObject *ufunc, PyArrayMethodObject *ufuncimpl, PyArrayObject *operands[], PyArray_Descr *dtypes[], - PyArray_DTypeMeta *signature[], PyObject *inputs_tup, - NPY_CASTING casting) + PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *original_DTypes[], + PyObject *inputs_tup, NPY_CASTING casting) { int retval = -1; NPY_CASTING safety; - PyArray_Descr *original_dtypes[NPY_MAXARGS]; + PyArray_Descr *original_descrs[NPY_MAXARGS]; NPY_UF_DBG_PRINT("Resolving the descriptors\n"); @@ -4043,12 +4052,12 @@ resolve_descriptors(int nop, PyObject *input_scalars[NPY_MAXARGS]; for (int i = 0; i < nop; i++) { if (operands[i] == NULL) { - original_dtypes[i] = NULL; + original_descrs[i] = NULL; } else { /* For abstract DTypes, we might want to change what this is */ - original_dtypes[i] = PyArray_DTYPE(operands[i]); - Py_INCREF(original_dtypes[i]); + original_descrs[i] = PyArray_DTYPE(operands[i]); + Py_INCREF(original_descrs[i]); } /* * Check whether something is a scalar of the given type. @@ -4067,27 +4076,71 @@ resolve_descriptors(int nop, npy_intp view_offset = NPY_MIN_INTP; /* currently ignored */ safety = ufuncimpl->resolve_descriptors_with_scalars( - ufuncimpl, signature, original_dtypes, input_scalars, + ufuncimpl, signature, original_descrs, input_scalars, dtypes, &view_offset ); + + /* For scalars, replace the operand if needed (scalars can't be out) */ + for (int i = 0; i < nin; i++) { + if ((PyArray_FLAGS(operands[i]) & NPY_ARRAY_WAS_PYTHON_LITERAL)) { + /* `resolve_descriptors_with_scalars` decides the descr */ + if (npy_update_operand_for_scalar( + &operands[i], input_scalars[i], dtypes[i], + /* ignore cast safety for this op (resolvers job) */ + NPY_SAFE_CASTING) < 0) { + goto finish; + } + } + } goto check_safety; } for (int i = 0; i < nop; ++i) { if (operands[i] == NULL) { - original_dtypes[i] = NULL; + original_descrs[i] = NULL; + continue; } - else { - /* - * The dtype may mismatch the signature, in which case we need - * to make it fit before calling the resolution. - */ - PyArray_Descr *descr = PyArray_DTYPE(operands[i]); - original_dtypes[i] = PyArray_CastDescrToDType(descr, signature[i]); - if (original_dtypes[i] == NULL) { + PyArray_Descr *descr = PyArray_DTYPE(operands[i]); + + /* + * If we are working with Python literals/scalars, deal with them. + * If needed, we create new array with the right descriptor. + */ + if ((PyArray_FLAGS(operands[i]) & NPY_ARRAY_WAS_PYTHON_LITERAL)) { + PyObject *input; + if (inputs_tup == NULL) { + input = NULL; + } + else { + input = PyTuple_GET_ITEM(inputs_tup, i); + } + + PyArray_Descr *new_descr = npy_find_descr_for_scalar( + input, descr, original_DTypes[i], signature[i]); + if (new_descr == NULL) { nop = i; /* only this much is initialized */ goto finish; } + int res = npy_update_operand_for_scalar( + &operands[i], input, new_descr, casting); + Py_DECREF(new_descr); + if (res < 0) { + nop = i; /* only this much is initialized */ + goto finish; + } + + /* Descriptor may have been modified along the way */ + descr = PyArray_DESCR(operands[i]); + } + + /* + * The dtype may mismatch the signature, in which case we need + * to make it fit before calling the resolution. + */ + original_descrs[i] = PyArray_CastDescrToDType(descr, signature[i]); + if (original_descrs[i] == NULL) { + nop = i; /* only this much is initialized */ + goto finish; } } @@ -4096,7 +4149,7 @@ resolve_descriptors(int nop, npy_intp view_offset = NPY_MIN_INTP; /* currently ignored */ safety = ufuncimpl->resolve_descriptors(ufuncimpl, - signature, original_dtypes, dtypes, &view_offset); + signature, original_descrs, dtypes, &view_offset); goto check_safety; } else { @@ -4124,7 +4177,7 @@ resolve_descriptors(int nop, finish: for (int i = 0; i < nop; i++) { - Py_XDECREF(original_dtypes[i]); + Py_XDECREF(original_descrs[i]); } return retval; } @@ -4467,49 +4520,11 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, /* Find the correct descriptors for the operation */ if (resolve_descriptors(nop, ufunc, ufuncimpl, - operands, operation_descrs, signature, full_args.in, casting) < 0) { + operands, operation_descrs, signature, operand_DTypes, + full_args.in, casting) < 0) { goto fail; } - if (promoting_pyscalars) { - /* - * Python integers need to be cast specially. For other python - * scalars it does not hurt either. It would be nice to never create - * the array in this case, but that is difficult until value-based - * promotion rules are gone. (After that, we may get away with using - * dummy arrays rather than real arrays for the legacy resolvers.) - */ - for (int i = 0; i < nin; i++) { - int orig_flags = PyArray_FLAGS(operands[i]); - if (!(orig_flags & NPY_ARRAY_WAS_PYTHON_LITERAL)) { - continue; - } - /* - * If descriptor matches, no need to convert, but integers may - * have been too large. - */ - if (!(orig_flags & NPY_ARRAY_WAS_INT_AND_REPLACED) - && PyArray_EquivTypes( - PyArray_DESCR(operands[i]), operation_descrs[i])) { - continue; - } - /* Otherwise, replace the operand with a new array */ - PyArray_Descr *descr = operation_descrs[i]; - Py_INCREF(descr); - PyArrayObject *new = (PyArrayObject *)PyArray_NewFromDescr( - &PyArray_Type, descr, 0, NULL, NULL, NULL, 0, NULL); - Py_SETREF(operands[i], new); - if (operands[i] == NULL) { - goto fail; - } - - PyObject *value = PyTuple_GET_ITEM(full_args.in, i); - if (PyArray_SETITEM(new, PyArray_BYTES(operands[i]), value) < 0) { - goto fail; - } - } - } - /* * Do the final preparations and call the inner-loop. */ @@ -5827,7 +5842,7 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) /* Find the correct operation_descrs for the operation */ int resolve_result = resolve_descriptors(nop, ufunc, ufuncimpl, - tmp_operands, operation_descrs, signature, NULL, NPY_UNSAFE_CASTING); + tmp_operands, operation_descrs, signature, operand_DTypes, NULL, NPY_UNSAFE_CASTING); for (int i = 0; i < 3; i++) { Py_XDECREF(signature[i]); Py_XDECREF(operand_DTypes[i]); @@ -6152,7 +6167,7 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, /* Find the correct descriptors for the operation */ if (resolve_descriptors(ufunc->nargs, ufunc, ufuncimpl, - dummy_arrays, operation_descrs, signature, + dummy_arrays, operation_descrs, signature, DTypes, NULL, casting) < 0) { goto finish; } diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index 5b9bdb60f1b3..cee672bba58c 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -406,6 +406,37 @@ def test_copyto(): # 'dst' must be an array assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4]) + +def test_copyto_cast_safety(): + with pytest.raises(TypeError): + np.copyto(np.arange(3), 3., casting="safe") + + # Can put integer and float scalars safely (and equiv): + np.copyto(np.arange(3), 3, casting="equiv") + np.copyto(np.arange(3.), 3., casting="equiv") + # And also with less precision safely: + np.copyto(np.arange(3, dtype="uint8"), 3, casting="safe") + np.copyto(np.arange(3., dtype="float32"), 3., casting="safe") + + # But not equiv: + with pytest.raises(TypeError): + np.copyto(np.arange(3, dtype="uint8"), 3, casting="equiv") + + with pytest.raises(TypeError): + np.copyto(np.arange(3., dtype="float32"), 3., casting="equiv") + + # As a special thing, object is equiv currently: + np.copyto(np.arange(3, dtype=object), 3, casting="equiv") + + # The following raises an overflow error/givs a warning but not + # type error (due to casting), though: + with pytest.raises(OverflowError): + np.copyto(np.arange(3), 2**80, casting="safe") + + with pytest.warns(RuntimeWarning): + np.copyto(np.arange(3, dtype=np.float32), 2e300, casting="safe") + + def test_copyto_permut(): # test explicit overflow case pad = 500 diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index a9694ebee736..b665db5701a9 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -3472,7 +3472,11 @@ def test_empty_like(self): def test_filled_like(self): self.check_like_function(np.full_like, 0, True) self.check_like_function(np.full_like, 1, True) - self.check_like_function(np.full_like, 1000, True) + # Large integers may overflow, but using int64 is OK (casts) + # see also gh-27075 + with pytest.raises(OverflowError): + np.full_like(np.ones(3, dtype=np.int8), 1000) + self.check_like_function(np.full_like, np.int64(1000), True) self.check_like_function(np.full_like, 123.456, True) # Inf to integer casts cause invalid-value errors: ignore them. with np.errstate(invalid="ignore"): diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index e777d7e07be3..bf2a4e3b051e 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -616,6 +616,31 @@ def call_ufunc(arr, **kwargs): expected = call_ufunc(arr.astype(np.float64)) # upcast assert_array_equal(expected, res) + @pytest.mark.parametrize("ufunc", [np.add, np.equal]) + def test_cast_safety_scalar(self, ufunc): + # We test add and equal, because equal has special scalar handling + # Note that the "equiv" casting behavior should maybe be considered + # a current implementation. + with pytest.raises(TypeError): + # The loop picked is integral, which is not safe + ufunc(3., 4., dtype=int, casting="safe") + + with pytest.raises(TypeError): + # We accept python float as float64 but not float32 for equiv. + ufunc(3., 4., dtype="float32", casting="equiv") + + # Special case for object and equal (note that equiv implies safe) + ufunc(3, 4, dtype=object, casting="equiv") + # Picks a double loop for both, first is equiv, second safe: + ufunc(np.array([3.]), 3., casting="equiv") + ufunc(np.array([3.]), 3, casting="safe") + ufunc(np.array([3]), 3, casting="equiv") + + def test_cast_safety_scalar_special(self): + # We allow this (and it succeeds) via object, although the equiv + # part may not be important. + np.equal(np.array([3]), 2**300, casting="equiv") + def test_true_divide(self): a = np.array(10) b = np.array(20) From 7eb4050719b1d86132dfc374ce91a015d32b3145 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 2 Aug 2024 08:40:01 +0200 Subject: [PATCH 918/980] MAINT: Remove value-based check (should be unused now) --- numpy/_core/src/multiarray/array_assign_scalar.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/array_assign_scalar.c b/numpy/_core/src/multiarray/array_assign_scalar.c index 6818c1aa2a1b..0199ba969eb9 100644 --- a/numpy/_core/src/multiarray/array_assign_scalar.c +++ b/numpy/_core/src/multiarray/array_assign_scalar.c @@ -243,8 +243,7 @@ PyArray_AssignRawScalar(PyArrayObject *dst, } /* Check the casting rule */ - if (!can_cast_scalar_to(src_dtype, src_data, - PyArray_DESCR(dst), casting)) { + if (!PyArray_CanCastTypeTo(src_dtype, PyArray_DESCR(dst), casting)) { npy_set_invalid_cast_error( src_dtype, PyArray_DESCR(dst), casting, NPY_TRUE); return -1; From 97f617732faa2d64cf07e8450ed278503c18b4cd Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 2 Aug 2024 10:48:52 +0200 Subject: [PATCH 919/980] BUG: Fix windows default integer for resolver workaround Increadibly niche and can really never matter. But did matter for the new tests (equiv) --- numpy/_core/src/multiarray/multiarraymodule.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 9a1bad320014..83fc505a97e6 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -5163,7 +5163,7 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { // initialize static reference to a zero-like array npy_static_pydata.zero_pyint_like_arr = PyArray_ZEROS( - 0, NULL, NPY_LONG, NPY_FALSE); + 0, NULL, NPY_DEFAULT_INT, NPY_FALSE); if (npy_static_pydata.zero_pyint_like_arr == NULL) { goto err; } From 9ced09ee433e8ed43f5e2e1193c2d78bef975745 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 2 Aug 2024 11:08:48 +0200 Subject: [PATCH 920/980] DOC: Add release note to change --- doc/release/upcoming_changes/27091.change.rst | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 doc/release/upcoming_changes/27091.change.rst diff --git a/doc/release/upcoming_changes/27091.change.rst b/doc/release/upcoming_changes/27091.change.rst new file mode 100644 index 000000000000..5b71692efabd --- /dev/null +++ b/doc/release/upcoming_changes/27091.change.rst @@ -0,0 +1,24 @@ +Cast-safety fixes in ``copyto`` and ``full`` +-------------------------------------------- +``copyto`` now uses NEP 50 correctly and applies this to its cast safety. +Python integer to NumPy integer casts and Python float to NumPy float casts +are now considered "safe" even if assignment may fail or precision may be lost. +This means the following examples change slightly: + +* ``np.copyto(int8_arr, 1000)`` previously performed an unsafe/same-kind cast + of the Python integer. It will now always raise, to achieve an unsafe cast + you must pass an array or NumPy scalar. +* ``np.copyto(uint8_arr, 1000, casting="safe")`` will raise an OverflowError + rather than a TypeError due to same-kind casting. +* ``np.copyto(float32_arr, 1e300, casting="safe")`` will overflow to ``inf`` + (float32 cannot hold ``1e300``) rather raising a TypeError. + +Further, only the dtype is used when assigning NumPy scalars (or 0-d arrays), +meaning that the following behaves differently: + +* ``np.copyto(float32_arr, np.float64(3.0), casting="safe")`` raises. +* ``np.coptyo(int8_arr, np.int64(100), casting="safe")`` raises. + Previously, NumPy checked whether the 100 fits the ``int8_arr``. + +This aligns ``copyto``, ``full``, and ``full_like`` with the correct NumPy 2 +behavior. \ No newline at end of file From 4a1ca7d97107fb771492d53efcb1f0bad466b5e4 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Fri, 2 Aug 2024 05:15:44 -0400 Subject: [PATCH 921/980] DEP: lib: Deprecate acceptance of float (and more) in bincount. (#27076) The first argument of bincount is expected to contain integers. As noted in gh-3138, it actually accepts floats, and casts them to integers with no warnings. (It also accepts other objects that can be cast to integers, so inputs such as ["1", Fraction(5, 3)] are accepted, with fractional parts silently dropped.) This change deprecates that behavior. Now a deprecation warning is generated if the input cannot be safely cast to integer. Closes gh-3138. Co-authored-by: Sebastian Berg --- numpy/_core/src/multiarray/compiled_base.c | 54 ++++++++++++++++++++-- numpy/_core/tests/test_deprecations.py | 5 ++ numpy/lib/tests/test_function_base.py | 5 ++ 3 files changed, 61 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index 5876247cec9c..48524aff4dac 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -101,7 +101,7 @@ minmax(const npy_intp *data, npy_intp data_len, npy_intp *mn, npy_intp *mx) * arr_bincount is registered as bincount. * * bincount accepts one, two or three arguments. The first is an array of - * non-negative integers The second, if present, is an array of weights, + * non-negative integers. The second, if present, is an array of weights, * which must be promotable to double. Call these arguments list and * weight. Both must be one-dimensional with len(weight) == len(list). If * weight is not present then bincount(list)[i] is the number of occurrences @@ -130,9 +130,57 @@ arr_bincount(PyObject *NPY_UNUSED(self), PyObject *const *args, return NULL; } - lst = (PyArrayObject *)PyArray_ContiguousFromAny(list, NPY_INTP, 1, 1); + /* + * Accepting arbitrary lists that are cast to NPY_INTP, possibly + * losing precision because of unsafe casts, is deprecated. We + * continue to use PyArray_ContiguousFromAny(list, NPY_INTP, 1, 1) + * to convert the input during the deprecation period, but we also + * check to see if a deprecation warning should be generated. + * Some refactoring will be needed when the deprecation expires. + */ + + /* Check to see if we should generate a deprecation warning. */ + if (!PyArray_Check(list)) { + /* list is not a numpy array, so convert it. */ + PyArrayObject *tmp1 = (PyArrayObject *)PyArray_FromAny( + list, NULL, 1, 1, + NPY_ARRAY_DEFAULT, NULL); + if (tmp1 == NULL) { + goto fail; + } + if (PyArray_SIZE(tmp1) > 0) { + /* The input is not empty, so convert it to NPY_INTP. */ + lst = (PyArrayObject *)PyArray_ContiguousFromAny((PyObject *)tmp1, + NPY_INTP, 1, 1); + Py_DECREF(tmp1); + if (lst == NULL) { + /* Failed converting to NPY_INTP. */ + if (PyErr_ExceptionMatches(PyExc_TypeError)) { + PyErr_Clear(); + /* Deprecated 2024-08-02, NumPy 2.1 */ + if (DEPRECATE("Non-integer input passed to bincount. In a " + "future version of NumPy, this will be an " + "error. (Deprecated NumPy 2.1)") < 0) { + goto fail; + } + } + else { + /* Failure was not a TypeError. */ + goto fail; + } + } + } + else { + /* Got an empty list. */ + Py_DECREF(tmp1); + } + } + if (lst == NULL) { - goto fail; + lst = (PyArrayObject *)PyArray_ContiguousFromAny(list, NPY_INTP, 1, 1); + if (lst == NULL) { + goto fail; + } } len = PyArray_SIZE(lst); diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 648a1d22ea99..33431faef684 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -217,6 +217,11 @@ class TestBincount(_DeprecationTestCase): def test_bincount_minlength(self): self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None)) + # 2024-07-29, 2.1.0 + @pytest.mark.parametrize('badlist', [[0.5, 1.2, 1.5], + ['0', '1', '1']]) + def test_bincount_bad_list(self, badlist): + self.assert_deprecated(lambda: np.bincount(badlist)) class TestGeneratorSum(_DeprecationTestCase): diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index d5a6d7b999de..bc3ce6409f1c 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2848,6 +2848,11 @@ def test_empty_with_minlength(self): y = np.bincount(x, minlength=5) assert_array_equal(y, np.zeros(5, dtype=int)) + @pytest.mark.parametrize('minlength', [0, 3]) + def test_empty_list(self, minlength): + assert_array_equal(np.bincount([], minlength=minlength), + np.zeros(minlength, dtype=int)) + def test_with_incorrect_minlength(self): x = np.array([], dtype=int) assert_raises_regex(TypeError, From ff2088bbc253f037a2f79973738e3704efaa23d6 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Fri, 2 Aug 2024 07:45:31 -0400 Subject: [PATCH 922/980] DOC: Add release note about deprecation introduced in gh-27076. [skip actions] [skip azp] [skip cirrus] --- doc/release/upcoming_changes/27076.deprecation.rst | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 doc/release/upcoming_changes/27076.deprecation.rst diff --git a/doc/release/upcoming_changes/27076.deprecation.rst b/doc/release/upcoming_changes/27076.deprecation.rst new file mode 100644 index 000000000000..e4482f0868f3 --- /dev/null +++ b/doc/release/upcoming_changes/27076.deprecation.rst @@ -0,0 +1,3 @@ + * Passing non-integer inputs as the first argument of `bincount` is now + deprecated, because such inputs are silently cast to integers with no + warning about loss of precision. From 886465079f6dc9c83b3c42d9e291dfcce67fb5f4 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Fri, 2 Aug 2024 08:50:51 -0400 Subject: [PATCH 923/980] DOC: Fix indentation of a few release notes. [skip actions] [skip azp] [skip cirrus] --- doc/release/upcoming_changes/26388.performance.rst | 6 +++--- doc/release/upcoming_changes/26452.deprecation.rst | 8 ++++---- doc/release/upcoming_changes/27076.deprecation.rst | 6 +++--- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/doc/release/upcoming_changes/26388.performance.rst b/doc/release/upcoming_changes/26388.performance.rst index 885bc28c4a78..2e99f9452c1e 100644 --- a/doc/release/upcoming_changes/26388.performance.rst +++ b/doc/release/upcoming_changes/26388.performance.rst @@ -1,3 +1,3 @@ - * `numpy.save` now uses pickle protocol version 4 for saving arrays with - object dtype, which allows for pickle objects larger than 4GB and improves - saving speed by about 5% for large arrays. \ No newline at end of file +* `numpy.save` now uses pickle protocol version 4 for saving arrays with + object dtype, which allows for pickle objects larger than 4GB and improves + saving speed by about 5% for large arrays. diff --git a/doc/release/upcoming_changes/26452.deprecation.rst b/doc/release/upcoming_changes/26452.deprecation.rst index 146b50af048c..cc4a10bfafee 100644 --- a/doc/release/upcoming_changes/26452.deprecation.rst +++ b/doc/release/upcoming_changes/26452.deprecation.rst @@ -1,4 +1,4 @@ - * The `fix_imports` keyword argument in `numpy.save` is deprecated. Since - NumPy 1.17, `numpy.save` uses a pickle protocol that no longer supports - Python 2, and ignored `fix_imports` keyword. This keyword is kept only - for backward compatibility. It is now deprecated. \ No newline at end of file +* The `fix_imports` keyword argument in `numpy.save` is deprecated. Since + NumPy 1.17, `numpy.save` uses a pickle protocol that no longer supports + Python 2, and ignored `fix_imports` keyword. This keyword is kept only + for backward compatibility. It is now deprecated. diff --git a/doc/release/upcoming_changes/27076.deprecation.rst b/doc/release/upcoming_changes/27076.deprecation.rst index e4482f0868f3..f692b814c17d 100644 --- a/doc/release/upcoming_changes/27076.deprecation.rst +++ b/doc/release/upcoming_changes/27076.deprecation.rst @@ -1,3 +1,3 @@ - * Passing non-integer inputs as the first argument of `bincount` is now - deprecated, because such inputs are silently cast to integers with no - warning about loss of precision. +* Passing non-integer inputs as the first argument of `bincount` is now + deprecated, because such inputs are silently cast to integers with no + warning about loss of precision. From c56a45315ba4373f586223676c5450ee5cc8e661 Mon Sep 17 00:00:00 2001 From: Slobodan Date: Fri, 2 Aug 2024 07:10:00 -0700 Subject: [PATCH 924/980] BUG: Complex printing tests fail on Windows ARM64 Fixes issue #25626 --- numpy/_core/include/numpy/npy_math.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/numpy/_core/include/numpy/npy_math.h b/numpy/_core/include/numpy/npy_math.h index 216b173fde58..d11df12b7ceb 100644 --- a/numpy/_core/include/numpy/npy_math.h +++ b/numpy/_core/include/numpy/npy_math.h @@ -362,7 +362,11 @@ NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0); static inline double npy_creal(const npy_cdouble z) { +#if defined(__cplusplus) return ((double *) &z)[0]; +#else + return creal(z); +#endif } static inline void npy_csetreal(npy_cdouble *z, const double r) @@ -372,7 +376,11 @@ static inline void npy_csetreal(npy_cdouble *z, const double r) static inline double npy_cimag(const npy_cdouble z) { +#if defined(__cplusplus) return ((double *) &z)[1]; +#else + return cimag(z); +#endif } static inline void npy_csetimag(npy_cdouble *z, const double i) @@ -382,7 +390,11 @@ static inline void npy_csetimag(npy_cdouble *z, const double i) static inline float npy_crealf(const npy_cfloat z) { +#if defined(__cplusplus) return ((float *) &z)[0]; +#else + return crealf(z); +#endif } static inline void npy_csetrealf(npy_cfloat *z, const float r) @@ -392,7 +404,11 @@ static inline void npy_csetrealf(npy_cfloat *z, const float r) static inline float npy_cimagf(const npy_cfloat z) { +#if defined(__cplusplus) return ((float *) &z)[1]; +#else + return cimagf(z); +#endif } static inline void npy_csetimagf(npy_cfloat *z, const float i) @@ -402,7 +418,11 @@ static inline void npy_csetimagf(npy_cfloat *z, const float i) static inline npy_longdouble npy_creall(const npy_clongdouble z) { +#if defined(__cplusplus) return ((longdouble_t *) &z)[0]; +#else + return creall(z); +#endif } static inline void npy_csetreall(npy_clongdouble *z, const longdouble_t r) @@ -412,7 +432,11 @@ static inline void npy_csetreall(npy_clongdouble *z, const longdouble_t r) static inline npy_longdouble npy_cimagl(const npy_clongdouble z) { +#if defined(__cplusplus) return ((longdouble_t *) &z)[1]; +#else + return cimagl(z); +#endif } static inline void npy_csetimagl(npy_clongdouble *z, const longdouble_t i) From 80edbd168c4eb6a4b3d91153bd86719b1a71da31 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 Aug 2024 17:05:22 +0000 Subject: [PATCH 925/980] MAINT: Bump actions/upload-artifact from 4.3.4 to 4.3.5 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.3.4 to 4.3.5. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/0b2256b8c012f0828dc542b3febcab082c67f72b...89ef406dd8d7e03cfd12d9e0a4a378f454709029) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/cygwin.yml | 2 +- .github/workflows/scorecards.yml | 2 +- .github/workflows/wheels.yml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index e17a0d95d0ac..33b77e408d23 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -62,7 +62,7 @@ jobs: cd tools /usr/bin/python3.9 -m pytest --pyargs numpy -n2 -m "not slow" - name: Upload wheel if tests fail - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 + uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 if: failure() with: name: numpy-cygwin-wheel diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index cf6cd230ec93..a5ee5c1d1e3b 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -42,7 +42,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable # uploads of run results in SARIF format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 + uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 with: name: SARIF file path: results.sarif diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index f73616ef46c3..95a0f8bc8a9a 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -172,7 +172,7 @@ jobs: CIBW_FREE_THREADED_SUPPORT: True CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 + - uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 with: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl @@ -253,7 +253,7 @@ jobs: python -mpip install twine twine check dist/* - - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 + - uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 with: name: sdist path: ./dist/* From f4f82e98214d6654bcdc623545ed340e8efef804 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 2 Aug 2024 11:22:14 -0600 Subject: [PATCH 926/980] BUG: add missing error handling in public_dtype_api.c --- numpy/_core/src/multiarray/public_dtype_api.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/public_dtype_api.c b/numpy/_core/src/multiarray/public_dtype_api.c index 60dceae3275d..9b2d7a393842 100644 --- a/numpy/_core/src/multiarray/public_dtype_api.c +++ b/numpy/_core/src/multiarray/public_dtype_api.c @@ -71,7 +71,9 @@ PyArrayInitDTypeMeta_FromSpec( return -1; } - dtypemeta_initialize_struct_from_spec(DType, spec, 0); + if (dtypemeta_initialize_struct_from_spec(DType, spec, 0) < 0) { + return -1; + } if (NPY_DT_SLOTS(DType)->setitem == NULL || NPY_DT_SLOTS(DType)->getitem == NULL) { From 886d36100004ba31a779a441f79ac885cc9e236e Mon Sep 17 00:00:00 2001 From: GUAN MING <105915352+guan404ming@users.noreply.github.com> Date: Sat, 3 Aug 2024 01:32:09 +0800 Subject: [PATCH 927/980] DOC: update np.shares_memory() docs (#27090) Description In NumPy 2.0, the MAY_SHARE_EXACT and MAY_SHARE_BOUNDS constants are no longer exposed as part of the public API. This PR updates the documentation for np.shares_memory functions to reflect this change. Changes made Replaced MAY_SHARE_EXACT with its actual value -1 in the documentation. Replaced MAY_SHARE_BOUNDS with its actual value 0 in the documentation. Updated the warning message to remove references to MAY_SHARE_BOUNDS. These changes ensure that the documentation accurately reflects the current implementation and provides clear guidance for users on how to correctly use these functions in NumPy 2.0. Close #27089 Co-authored-by: Sebastian Berg --- numpy/_core/multiarray.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 70c3a08384bb..e2ca115b3728 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -1350,7 +1350,7 @@ def shares_memory(a, b, max_work=None): .. warning:: This function can be exponentially slow for some inputs, unless - `max_work` is set to a finite number or ``MAY_SHARE_BOUNDS``. + `max_work` is set to zero or a positive integer. If in doubt, use `numpy.may_share_memory` instead. Parameters @@ -1362,12 +1362,13 @@ def shares_memory(a, b, max_work=None): of candidate solutions to consider). The following special values are recognized: - max_work=MAY_SHARE_EXACT (default) + max_work=-1 (default) The problem is solved exactly. In this case, the function returns True only if there is an element shared between the arrays. Finding the exact solution may take extremely long in some cases. - max_work=MAY_SHARE_BOUNDS + max_work=0 Only the memory bounds of a and b are checked. + This is equivalent to using ``may_share_memory()``. Raises ------ From a07c90ccef87bc7d7f584f6861b7f0f466329321 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 2 Aug 2024 19:52:42 +0200 Subject: [PATCH 928/980] DOC: Add documentation explaining our promotion rules (#25705) * DOC: Add documentation explaining our promotion rules This adds a dedicated page about promotion rules. * Try clarifying the promotion figure a bit more and maybe fix doc warnings/errors * DOC: adjustments to promotion rule documentation * Small edits * Address Martens review comments * Also adopt a suggestion by Matt (slightly rephrasing anothe part) * Address/adopt Martens review comments --------- Co-authored-by: Matt Haberland --- doc/source/reference/arrays.promotion.rst | 256 +++ doc/source/reference/arrays.rst | 1 + .../figures/nep-0050-promotion-no-fonts.svg | 1471 +++++++++++++++++ 3 files changed, 1728 insertions(+) create mode 100644 doc/source/reference/arrays.promotion.rst create mode 100644 doc/source/reference/figures/nep-0050-promotion-no-fonts.svg diff --git a/doc/source/reference/arrays.promotion.rst b/doc/source/reference/arrays.promotion.rst new file mode 100644 index 000000000000..7e19691820f6 --- /dev/null +++ b/doc/source/reference/arrays.promotion.rst @@ -0,0 +1,256 @@ +.. currentmodule:: numpy + +.. _arrays.promotion: + +**************************** +Data type promotion in NumPy +**************************** + +When mixing two different data types, NumPy has to determine the appropriate +dtype for the result of the operation. This step is referred to as *promotion* +or *finding the common dtype*. + +In typical cases, the user does not need to worry about the details of +promotion, since the promotion step usually ensures that the result will +either match or exceed the precision of the input. + +For example, when the inputs are of the same dtype, the dtype of the result +matches the dtype of the inputs: + + >>> np.int8(1) + np.int8(1) + np.int8(2) + +Mixing two different dtypes normally produces a result with the dtype of the +higher precision input: + + >>> np.int8(4) + np.int64(8) # 64 > 8 + np.int64(12) + >>> np.float32(3) + np.float16(3) # 32 > 16 + np.float32(6.0) + +In typical cases, this does not lead to surprises. However, if you work with +non-default dtypes like unsigned integers and low-precision floats, or if you +mix NumPy integers, NumPy floats, and Python scalars, some +details of NumPy promotion rules may be relevant. Note that these detailed +rules do not always match those of other languages [#hist-reasons]_. + +Numerical dtypes come in four "kinds" with a natural hierarchy. + +1. unsigned integers (``uint``) +2. signed integers (``int``) +3. float (``float``) +4. complex (``complex``) + +In addition to kind, NumPy numerical dtypes also have an associated precision, specified +in bits. Together, the kind and precision specify the dtype. For example, a +``uint8`` is an unsigned integer stored using 8 bits. + +The result of an operation will always be of an equal or higher kind of any of +the inputs. Furthermore, the result will always have a precision greater than +or equal to those of the inputs. Already, this can lead to some examples which +may be unexpected: + +1. When mixing floating point numbers and integers, the precision of the + integer may force the result to a higher precision floating point. For + example, the result of an operation involving ``int64`` and ``float16`` + is ``float64``. +2. When mixing unsigned and signed integers with the same precision, the + result will have *higher* precision than either inputs. Additionally, + if one of them has 64bit precision already, no higher precision integer + is available and for example an operation involving ``int64`` and ``uint64`` + gives ``float64``. + +Please see the `Numerical promotion` section and image below for details +on both. + +Detailed behavior of Python scalars +----------------------------------- +Since NumPy 2.0 [#NEP50]_, an important point in our promotion rules is +that although operations involving two NumPy dtypes never lose precision, +operations involving a NumPy dtype and a Python scalar (``int``, ``float``, +or ``complex``) *can* lose precision. For instance, it is probably intuitive +that the result of an operation between a Python integer and a NumPy integer +should be a NumPy integer. However, Python integers have arbitrary precision +whereas all NumPy dtypes have fixed precision, so the arbitrary precision +of Python integers cannot be preserved. + +More generally, NumPy considers the "kind" of Python scalars, but ignores +their precision when determining the result dtype. This is often convenient. +For instance, when working with arrays of a low precision dtype, it is usually +desirable for simple operations with Python scalars to preserve the dtype. + + >>> arr_float32 = np.array([1, 2.5, 2.1], dtype="float32") + >>> arr_float32 + 10.0 # undesirable to promote to float64 + array([11. , 12.5, 12.1], dtype=float32) + >>> arr_int16 = np.array([3, 5, 7], dtype="int16") + >>> arr_int16 + 10 # undesirable to promote to int64 + array([13, 15, 17], dtype=int16) + +In both cases, the result precision is dictated by the NumPy dtype. +Because of this, ``arr_float32 + 3.0`` behaves the same as +``arr_float32 + np.float32(3.0)``, and ``arr_int16 + 10`` behaves as +``arr_int16 + np.int16(10.)``. + +As another example, when mixing NumPy integers with a Python ``float`` +or ``complex``, the result always has type ``float64`` or ``complex128``: + + >> np.int16(1) + 1.0 + np.float64(2.0) + +However, these rules can also lead to surprising behavior when working with +low precision dtypes. + +First, since the Python value is converted to a NumPy one before the operation +can by performed, operations can fail with an error when the result seems +obvious. For instance, ``np.int8(1) + 1000`` cannot continue because ``1000`` +exceeds the maximum value of an ``int8``. When the Python scalar +cannot be coerced to the NumPy dtype, an error is raised: + + >>> np.int8(1) + 1000 + Traceback (most recent call last): + ... + OverflowError: Python integer 1000 out of bounds for int8 + >>> np.int64(1) * 10**100 + Traceback (most recent call last): + ... + OverflowError: Python int too large to convert to C long + >>> np.float32(1) + 1e300 + np.float32(inf) + ... RuntimeWarning: overflow encountered in cast + +Second, since the Python float or integer precision is always ignored, a low +precision NumPy scalar will keep using its lower precision unless explicitly +converted to a higher precision NumPy dtype or Python scalar (e.g. via ``int()``, +``float()``, or ``scalar.item()``). This lower precision may be detrimental to +some calculations or lead to incorrect results, especially in the case of integer +overflows: + + >>> np.int8(100) + 100 # the result exceeds the capacity of int8 + np.int8(-56) + ... RuntimeWarning: overflow encountered in scalar add + +Note that NumPy warns when overflows occur for scalars, but not for arrays; +e.g., ``np.array(100, dtype="uint8") + 100`` will *not* warn. + +Numerical promotion +------------------- + +The following image shows the numerical promotion rules with the kinds +on the vertical axis and the precision on the horizontal axis. + +.. figure:: figures/nep-0050-promotion-no-fonts.svg + :figclass: align-center + +The input dtype with the higher kind determines the kind of the result dtype. +The result dtype has a precision as low as possible without appearing to the +left of either input dtype in the diagram. + +Note the following specific rules and observations: +1. When a Python ``float`` or ``complex`` interacts with a NumPy integer + the result will be ``float64`` or ``complex128`` (yellow border). + NumPy booleans will also be cast to the default integer.[#default-int] + This is not relevant when additionally NumPy floating point values are + involved. +2. The precision is drawn such that ``float16 < int16 < uint16`` because + large ``uint16`` do not fit ``int16`` and large ``int16`` will lose precision + when stored in a ``float16``. + This pattern however is broken since NumPy always considers ``float64`` + and ``complex128`` to be acceptable promotion results for any integer + value. +3. A special case is that NumPy promotes many combinations of signed and + unsigned integers to ``float64``. A higher kind is used here because no + signed integer dtype is sufficiently precise to hold a ``uint64``. + + +Exceptions to the general promotion rules +----------------------------------------- + +In NumPy promotion refers to what specific functions do with the result and +in some cases, this means that NumPy may deviate from what the `np.result_type` +would give. + +Behavior of ``sum`` and ``prod`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +**``np.sum`` and ``np.prod``:** Will alway return the default integer type +when summing over integer values (or booleans). This is usually an ``int64``. +The reason for this is that integer summations are otherwise very likely +to overflow and give confusing results. +This rule also applies to the underlying ``np.add.reduce`` and +``np.multiply.reduce``. + +Notable behavior with NumPy or Python integer scalars +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +NumPy promotion refers to the result dtype and operation precision, +but the operation will sometimes dictate that result. +Division always returns floating point values and comparison always booleans. + +This leads to what may appear as "exceptions" to the rules: +* NumPy comparisons with Python integers or mixed precision integers always + return the correct result. The inputs will never be cast in a way which + loses precision. +* Equality comparisons between types which cannot be promoted will be + considered all ``False`` (equality) or all ``True`` (not-equal). +* Unary math functions like ``np.sin`` that always return floating point + values, accept any Python integer input by converting it to ``float64``. +* Division always returns floating point values and thus also allows divisions + between any NumPy integer with any Python integer value by casting both + to ``float64``. + +In principle, some of these exceptions may make sense for other functions. +Please raise an issue if you feel this is the case. + +Promotion of non-numerical datatypes +------------------------------------ + +NumPy extends the promotion to non-numerical types, although in many cases +promotion is not well defined and simply rejected. + +The following rules apply: +* NumPy byte strings (``np.bytes_``) can be promoted to unicode strings + (``np.str_``). However, casting the bytes to unicode will fail for + non-ascii characters. +* For some purposes NumPy will promote almost any other datatype to strings. + This applies to array creation or concatenation. +* The array constructers like ``np.array()`` will use ``object`` dtype when + there is no viable promotion. +* Structured dtypes can promote when their field names and order matches. + In that case all fields are promoted individually. +* NumPy ``timedelta`` can in some cases promote with integers. + +.. note:: + Some of these rules are somewhat surprising, and are being considered for + change in the future. However, any backward-incompatible changes have to + be weighed against the risks of breaking existing code. Please raise an + issue if you have particular ideas about how promotion should work. + +Details of promoted ``dtype`` instances +--------------------------------------- +The above discussion has mainly dealt with the behavior when mixing different +DType classes. +A ``dtype`` instance attached to an array can carry additional information +such as byte-order, metadata, string length, or exact structured dtype layout. + +While the string length or field names of a structured dtype are important, +NumPy considers byte-order, metadata, and the exact layout of a structured +dtype as storage details. +During promotion NumPy does *not* take these storage details into account: +* Byte-order is converted to native byte-order. +* Metadata attached to the dtype may or may not be preserved. +* Resulting structured dtypes will be packed (but aligned if inputs were). + +This behaviors is the best behavior for most programs where storage details +are not relevant to the final results and where the use of incorrect byte-order +could drastically slow down evaluation. + + +.. [#hist-reasons]: To a large degree, this may just be for choices made early + on in NumPy's predecessors. For more details, see `NEP 50 `. + +.. [#NEP50]: See also `NEP 50 ` which changed the rules for NumPy 2.0. + Previous versions of NumPy would sometimes return higher precision results + based on the input value of Python scalars. + Further, previous versions of NumPy would typically ignore the higher + precision of NumPy scalars or 0-D arrays for promotion purposes. + +.. [#default-int]: The default integer is marked as ``int64`` in the schema + but is ``int32`` on 32bit platforms. However, normal PCs are 64bit. diff --git a/doc/source/reference/arrays.rst b/doc/source/reference/arrays.rst index e8c9bc348f31..2e5869dc5379 100644 --- a/doc/source/reference/arrays.rst +++ b/doc/source/reference/arrays.rst @@ -41,6 +41,7 @@ of also more complicated arrangements of data. arrays.ndarray arrays.scalars arrays.dtypes + arrays.promotion arrays.nditer arrays.classes maskedarray diff --git a/doc/source/reference/figures/nep-0050-promotion-no-fonts.svg b/doc/source/reference/figures/nep-0050-promotion-no-fonts.svg new file mode 100644 index 000000000000..579480132b3d --- /dev/null +++ b/doc/source/reference/figures/nep-0050-promotion-no-fonts.svg @@ -0,0 +1,1471 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From 8f032480e5f9325fddcf811ecaae73061a91f260 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 2 Aug 2024 20:46:07 +0200 Subject: [PATCH 929/980] Apply suggestions from code review Co-authored-by: Nathan Goldbaum --- numpy/_core/tests/test_api.py | 2 +- numpy/_core/tests/test_ufunc.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index cee672bba58c..1ac7a49b3610 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -428,7 +428,7 @@ def test_copyto_cast_safety(): # As a special thing, object is equiv currently: np.copyto(np.arange(3, dtype=object), 3, casting="equiv") - # The following raises an overflow error/givs a warning but not + # The following raises an overflow error/gives a warning but not # type error (due to casting), though: with pytest.raises(OverflowError): np.copyto(np.arange(3), 2**80, casting="safe") diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index bf2a4e3b051e..a605b65079ba 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -620,9 +620,9 @@ def call_ufunc(arr, **kwargs): def test_cast_safety_scalar(self, ufunc): # We test add and equal, because equal has special scalar handling # Note that the "equiv" casting behavior should maybe be considered - # a current implementation. + # a current implementation detail. with pytest.raises(TypeError): - # The loop picked is integral, which is not safe + # this picks an integer loop, which is not safe ufunc(3., 4., dtype=int, casting="safe") with pytest.raises(TypeError): From c5e3766769c6b7ea555fab5f554032e50b10294f Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 2 Aug 2024 21:01:45 +0200 Subject: [PATCH 930/980] MAINt: Rename variables and at least only duplicate cleanup --- numpy/_core/src/multiarray/multiarraymodule.c | 18 +++++++++--------- numpy/_core/src/umath/ufunc_object.c | 17 +++++++---------- 2 files changed, 16 insertions(+), 19 deletions(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 83fc505a97e6..ad1202fbb045 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -1957,14 +1957,14 @@ array_copyto(PyObject *NPY_UNUSED(ignored), if (src == NULL) { goto fail; } - PyArray_DTypeMeta *dtype = NPY_DTYPE(PyArray_DESCR(src)); - Py_INCREF(dtype); - if (npy_mark_tmp_array_if_pyscalar(src_obj, src, &dtype)) { + PyArray_DTypeMeta *DType = NPY_DTYPE(PyArray_DESCR(src)); + Py_INCREF(DType); + if (npy_mark_tmp_array_if_pyscalar(src_obj, src, &DType)) { /* The user passed a Python scalar */ PyArray_Descr *descr = npy_find_descr_for_scalar( - src_obj, PyArray_DESCR(src), dtype, + src_obj, PyArray_DESCR(src), DType, NPY_DTYPE(PyArray_DESCR(dst))); - Py_DECREF(dtype); + Py_DECREF(DType); if (descr == NULL) { goto fail; } @@ -1975,17 +1975,17 @@ array_copyto(PyObject *NPY_UNUSED(ignored), } } else { - Py_DECREF(dtype); + Py_DECREF(DType); } if (wheremask_in != NULL) { /* Get the boolean where mask */ - PyArray_Descr *dtype = PyArray_DescrFromType(NPY_BOOL); - if (dtype == NULL) { + PyArray_Descr *descr = PyArray_DescrFromType(NPY_BOOL); + if (descr == NULL) { goto fail; } wheremask = (PyArrayObject *)PyArray_FromAny(wheremask_in, - dtype, 0, 0, 0, NULL); + descr, 0, 0, 0, NULL); if (wheremask == NULL) { goto fail; } diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 9aac8d8cf188..364dfa7ced30 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -2356,7 +2356,7 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc, if (evil_ndim_mutating_hack) { ((PyArrayObject_fields *)out)->nd = 0; } - // TODO: Clean up multiple cleanup! + if (ufuncimpl == NULL) { /* DTypes may currently get filled in fallbacks and XDECREF for error: */ Py_XDECREF(operation_DTypes[0]); @@ -2372,18 +2372,15 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc, * casting safety could in principle be set to the default same-kind. * (although this should possibly happen through a deprecation) */ - if (resolve_descriptors(3, ufunc, ufuncimpl, - ops, out_descrs, signature, operation_DTypes, NULL, casting) < 0) { - /* DTypes may currently get filled in fallbacks and XDECREF for error: */ - Py_XDECREF(operation_DTypes[0]); - Py_XDECREF(operation_DTypes[1]); - Py_XDECREF(operation_DTypes[2]); - return NULL; - } - /* DTypes may currently get filled in fallbacks and XDECREF for error: */ + int res = resolve_descriptors(3, ufunc, ufuncimpl, + ops, out_descrs, signature, operation_DTypes, NULL, casting); + Py_XDECREF(operation_DTypes[0]); Py_XDECREF(operation_DTypes[1]); Py_XDECREF(operation_DTypes[2]); + if (res < 0) { + return NULL; + } /* * The first operand and output should be the same array, so they should From da370ad636a81d4c72ab66a84e815e6181bebf73 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sat, 3 Aug 2024 21:00:58 +0200 Subject: [PATCH 931/980] DOC: Fixup promotion doc There were some errors in it with sphinx that I missed. --- doc/source/reference/arrays.promotion.rst | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/doc/source/reference/arrays.promotion.rst b/doc/source/reference/arrays.promotion.rst index 7e19691820f6..cd476815f55c 100644 --- a/doc/source/reference/arrays.promotion.rst +++ b/doc/source/reference/arrays.promotion.rst @@ -146,6 +146,7 @@ The result dtype has a precision as low as possible without appearing to the left of either input dtype in the diagram. Note the following specific rules and observations: + 1. When a Python ``float`` or ``complex`` interacts with a NumPy integer the result will be ``float64`` or ``complex128`` (yellow border). NumPy booleans will also be cast to the default integer.[#default-int] @@ -185,6 +186,7 @@ but the operation will sometimes dictate that result. Division always returns floating point values and comparison always booleans. This leads to what may appear as "exceptions" to the rules: + * NumPy comparisons with Python integers or mixed precision integers always return the correct result. The inputs will never be cast in a way which loses precision. @@ -206,6 +208,7 @@ NumPy extends the promotion to non-numerical types, although in many cases promotion is not well defined and simply rejected. The following rules apply: + * NumPy byte strings (``np.bytes_``) can be promoted to unicode strings (``np.str_``). However, casting the bytes to unicode will fail for non-ascii characters. @@ -243,14 +246,14 @@ are not relevant to the final results and where the use of incorrect byte-order could drastically slow down evaluation. -.. [#hist-reasons]: To a large degree, this may just be for choices made early +.. [#hist-reasons] To a large degree, this may just be for choices made early on in NumPy's predecessors. For more details, see `NEP 50 `. -.. [#NEP50]: See also `NEP 50 ` which changed the rules for NumPy 2.0. - Previous versions of NumPy would sometimes return higher precision results - based on the input value of Python scalars. +.. [#NEP50] See also :ref:`NEP 50 ` which changed the rules for + NumPy 2.0. Previous versions of NumPy would sometimes return higher + precision results based on the input value of Python scalars. Further, previous versions of NumPy would typically ignore the higher precision of NumPy scalars or 0-D arrays for promotion purposes. -.. [#default-int]: The default integer is marked as ``int64`` in the schema +.. [#default-int] The default integer is marked as ``int64`` in the schema but is ``int32`` on 32bit platforms. However, normal PCs are 64bit. From 42b58e30fbe00fba9d4d2e735fcc5406d5033dca Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sat, 3 Aug 2024 21:25:53 +0200 Subject: [PATCH 932/980] Add an `n_cleanup` as requested in review --- numpy/_core/src/umath/ufunc_object.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 364dfa7ced30..6bd02b0fec87 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -4034,6 +4034,7 @@ resolve_descriptors(int nop, { int retval = -1; NPY_CASTING safety; + int n_cleanup = 0; /* number of original_descrs filled (to XDECREF) */ PyArray_Descr *original_descrs[NPY_MAXARGS]; NPY_UF_DBG_PRINT("Resolving the descriptors\n"); @@ -4070,6 +4071,7 @@ resolve_descriptors(int nop, input_scalars[i] = NULL; } } + n_cleanup = nop; npy_intp view_offset = NPY_MIN_INTP; /* currently ignored */ safety = ufuncimpl->resolve_descriptors_with_scalars( @@ -4115,14 +4117,12 @@ resolve_descriptors(int nop, PyArray_Descr *new_descr = npy_find_descr_for_scalar( input, descr, original_DTypes[i], signature[i]); if (new_descr == NULL) { - nop = i; /* only this much is initialized */ goto finish; } int res = npy_update_operand_for_scalar( &operands[i], input, new_descr, casting); Py_DECREF(new_descr); if (res < 0) { - nop = i; /* only this much is initialized */ goto finish; } @@ -4136,9 +4136,9 @@ resolve_descriptors(int nop, */ original_descrs[i] = PyArray_CastDescrToDType(descr, signature[i]); if (original_descrs[i] == NULL) { - nop = i; /* only this much is initialized */ goto finish; } + n_cleanup += 1; } if (ufuncimpl->resolve_descriptors != &wrapped_legacy_resolve_descriptors) { @@ -4173,7 +4173,7 @@ resolve_descriptors(int nop, retval = 0; finish: - for (int i = 0; i < nop; i++) { + for (int i = 0; i < n_cleanup; i++) { Py_XDECREF(original_descrs[i]); } return retval; From e093c7e608c079c7dc9d463201ee3a5262d00b7d Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Sat, 3 Aug 2024 14:48:10 -0700 Subject: [PATCH 933/980] TYP: Make array shape type variable covariant and bound Fixes #25729 This change allows future changes to the static typing of numpy that modify or only work with certain numbers of dimensions. It also applies the change to subclasses of ndarray and adds tests. It allows users to statically type their array shapes with subtypes of tuple (e.g. NamedTuple) and tuples of int subtypes (e.g. Literal or NewType). For a discussion of the merits of TypeVarTuple vs a tuple-bound TypeVar, see the linked PR --- .gitignore | 1 + .../upcoming_changes/26081.improvement.rst | 10 +++++ doc/release/upcoming_changes/README.rst | 2 +- numpy/__init__.pyi | 40 +++++++++---------- numpy/_core/defchararray.pyi | 32 +++++++-------- numpy/_core/records.pyi | 6 +-- numpy/ma/core.pyi | 8 ++-- numpy/ma/mrecords.pyi | 6 +-- numpy/typing/tests/data/fail/shape.pyi | 6 +++ numpy/typing/tests/data/pass/shape.py | 18 +++++++++ numpy/typing/tests/data/reveal/shape.pyi | 15 +++++++ 11 files changed, 94 insertions(+), 50 deletions(-) create mode 100644 doc/release/upcoming_changes/26081.improvement.rst create mode 100644 numpy/typing/tests/data/fail/shape.pyi create mode 100644 numpy/typing/tests/data/pass/shape.py create mode 100644 numpy/typing/tests/data/reveal/shape.pyi diff --git a/.gitignore b/.gitignore index e90cccc46642..127d82c1ef14 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ # Editor temporary/working/backup files # ######################################### +env/ .#* [#]*# *~ diff --git a/doc/release/upcoming_changes/26081.improvement.rst b/doc/release/upcoming_changes/26081.improvement.rst new file mode 100644 index 000000000000..e8389af8c84c --- /dev/null +++ b/doc/release/upcoming_changes/26081.improvement.rst @@ -0,0 +1,10 @@ +``ndarray`` shape-type parameter is now covariant and bound to ``tuple[int, ...]`` +---------------------------------------------------------------------------------- +Static typing for ``ndarray`` is a long-term effort that continues +with this change. It is a generic type with type parameters for +the shape and the data type. Previously, the shape type parameter could be +any value. This change restricts it to a tuple of ints, as one would expect +from using ``ndarray.shape``. Further, the shape-type parameter has been +changed from invariant to covariant. This change also applies to subpytes of +``ndarray``, e.g. ``np.ma.MaskedArray``. See the `typing docs `_ +for more information. \ No newline at end of file diff --git a/doc/release/upcoming_changes/README.rst b/doc/release/upcoming_changes/README.rst index 91b7f7e000a0..51ccd7690eff 100644 --- a/doc/release/upcoming_changes/README.rst +++ b/doc/release/upcoming_changes/README.rst @@ -40,7 +40,7 @@ So for example: ``123.new_feature.rst`` would have the content:: The ``my_new_feature`` option is now available for `my_favorite_function`. To use it, write ``np.my_favorite_function(..., my_new_feature=True)``. -``highlight`` is usually formatted as bulled points making the fragment +``highlight`` is usually formatted as bullet points making the fragment ``* This is a highlight``. Note the use of single-backticks to get an internal link (assuming diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 8e9c971a1fc6..a0b32c9dbbcd 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1500,10 +1500,8 @@ _DType = TypeVar("_DType", bound=dtype[Any]) _DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any]) _FlexDType = TypeVar("_FlexDType", bound=dtype[flexible]) -# TODO: Set the `bound` to something more suitable once we -# have proper shape support -_ShapeType = TypeVar("_ShapeType", bound=Any) -_ShapeType2 = TypeVar("_ShapeType2", bound=Any) +_ShapeType_co = TypeVar("_ShapeType_co", covariant=True, bound=tuple[int, ...]) +_ShapeType2 = TypeVar("_ShapeType2", bound=tuple[int, ...]) _NumberType = TypeVar("_NumberType", bound=number[Any]) if sys.version_info >= (3, 12): @@ -1553,7 +1551,7 @@ class _SupportsImag(Protocol[_T_co]): @property def imag(self) -> _T_co: ... -class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): +class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): __hash__: ClassVar[None] @property def base(self) -> None | NDArray[Any]: ... @@ -1563,14 +1561,14 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def size(self) -> int: ... @property def real( - self: ndarray[_ShapeType, dtype[_SupportsReal[_ScalarType]]], # type: ignore[type-var] - ) -> ndarray[_ShapeType, _dtype[_ScalarType]]: ... + self: ndarray[_ShapeType_co, dtype[_SupportsReal[_ScalarType]]], # type: ignore[type-var] + ) -> ndarray[_ShapeType_co, _dtype[_ScalarType]]: ... @real.setter def real(self, value: ArrayLike) -> None: ... @property def imag( - self: ndarray[_ShapeType, dtype[_SupportsImag[_ScalarType]]], # type: ignore[type-var] - ) -> ndarray[_ShapeType, _dtype[_ScalarType]]: ... + self: ndarray[_ShapeType_co, dtype[_SupportsImag[_ScalarType]]], # type: ignore[type-var] + ) -> ndarray[_ShapeType_co, _dtype[_ScalarType]]: ... @imag.setter def imag(self, value: ArrayLike) -> None: ... def __new__( @@ -1591,11 +1589,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __array__( self, dtype: None = ..., /, *, copy: None | bool = ... - ) -> ndarray[_ShapeType, _DType_co]: ... + ) -> ndarray[_ShapeType_co, _DType_co]: ... @overload def __array__( self, dtype: _DType, /, *, copy: None | bool = ... - ) -> ndarray[_ShapeType, _DType]: ... + ) -> ndarray[_ShapeType_co, _DType]: ... def __array_ufunc__( self, @@ -1646,12 +1644,12 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __getitem__(self: NDArray[void], key: str) -> NDArray[Any]: ... @overload - def __getitem__(self: NDArray[void], key: list[str]) -> ndarray[_ShapeType, _dtype[void]]: ... + def __getitem__(self: NDArray[void], key: list[str]) -> ndarray[_ShapeType_co, _dtype[void]]: ... @property def ctypes(self) -> _ctypes[int]: ... @property - def shape(self) -> _Shape: ... + def shape(self) -> _ShapeType_co: ... @shape.setter def shape(self, value: _ShapeLike) -> None: ... @property @@ -3786,7 +3784,7 @@ _MemMapModeKind: TypeAlias = L[ "write", "w+", ] -class memmap(ndarray[_ShapeType, _DType_co]): +class memmap(ndarray[_ShapeType_co, _DType_co]): __array_priority__: ClassVar[float] filename: str | None offset: int @@ -3824,7 +3822,7 @@ class memmap(ndarray[_ShapeType, _DType_co]): def __array_finalize__(self, obj: object) -> None: ... def __array_wrap__( self, - array: memmap[_ShapeType, _DType_co], + array: memmap[_ShapeType_co, _DType_co], context: None | tuple[ufunc, tuple[Any, ...], int] = ..., return_scalar: builtins.bool = ..., ) -> Any: ... @@ -3927,7 +3925,7 @@ class poly1d: k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., ) -> poly1d: ... -class matrix(ndarray[_ShapeType, _DType_co]): +class matrix(ndarray[_ShapeType_co, _DType_co]): __array_priority__: ClassVar[float] def __new__( subtype, @@ -3963,13 +3961,13 @@ class matrix(ndarray[_ShapeType, _DType_co]): @overload def __getitem__(self: NDArray[void], key: str, /) -> matrix[Any, dtype[Any]]: ... @overload - def __getitem__(self: NDArray[void], key: list[str], /) -> matrix[_ShapeType, dtype[void]]: ... + def __getitem__(self: NDArray[void], key: list[str], /) -> matrix[_ShapeType_co, dtype[void]]: ... def __mul__(self, other: ArrayLike, /) -> matrix[Any, Any]: ... def __rmul__(self, other: ArrayLike, /) -> matrix[Any, Any]: ... - def __imul__(self, other: ArrayLike, /) -> matrix[_ShapeType, _DType_co]: ... + def __imul__(self, other: ArrayLike, /) -> matrix[_ShapeType_co, _DType_co]: ... def __pow__(self, other: ArrayLike, /) -> matrix[Any, Any]: ... - def __ipow__(self, other: ArrayLike, /) -> matrix[_ShapeType, _DType_co]: ... + def __ipow__(self, other: ArrayLike, /) -> matrix[_ShapeType_co, _DType_co]: ... @overload def sum(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... @@ -4065,14 +4063,14 @@ class matrix(ndarray[_ShapeType, _DType_co]): @property def I(self) -> matrix[Any, Any]: ... @property - def A(self) -> ndarray[_ShapeType, _DType_co]: ... + def A(self) -> ndarray[_ShapeType_co, _DType_co]: ... @property def A1(self) -> ndarray[Any, _DType_co]: ... @property def H(self) -> matrix[Any, _DType_co]: ... def getT(self) -> matrix[Any, _DType_co]: ... def getI(self) -> matrix[Any, Any]: ... - def getA(self) -> ndarray[_ShapeType, _DType_co]: ... + def getA(self) -> ndarray[_ShapeType_co, _DType_co]: ... def getA1(self) -> ndarray[Any, _DType_co]: ... def getH(self) -> matrix[Any, _DType_co]: ... diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 59a8356e5f88..f00c68e7ff07 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -16,7 +16,7 @@ from numpy import ( int_, object_, _OrderKACF, - _ShapeType, + _ShapeType_co, _CharDType, _SupportsBuffer, ) @@ -35,7 +35,7 @@ from numpy._core.multiarray import compare_chararrays as compare_chararrays _SCT = TypeVar("_SCT", str_, bytes_) _CharArray = chararray[Any, dtype[_SCT]] -class chararray(ndarray[_ShapeType, _CharDType]): +class chararray(ndarray[_ShapeType_co, _CharDType]): @overload def __new__( subtype, @@ -436,20 +436,20 @@ class chararray(ndarray[_ShapeType, _CharDType]): ) -> _CharArray[bytes_]: ... def zfill(self, width: _ArrayLikeInt_co) -> chararray[Any, _CharDType]: ... - def capitalize(self) -> chararray[_ShapeType, _CharDType]: ... - def title(self) -> chararray[_ShapeType, _CharDType]: ... - def swapcase(self) -> chararray[_ShapeType, _CharDType]: ... - def lower(self) -> chararray[_ShapeType, _CharDType]: ... - def upper(self) -> chararray[_ShapeType, _CharDType]: ... - def isalnum(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def isalpha(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def isdigit(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def islower(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def isspace(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def istitle(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def isupper(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def isnumeric(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def isdecimal(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... + def capitalize(self) -> chararray[_ShapeType_co, _CharDType]: ... + def title(self) -> chararray[_ShapeType_co, _CharDType]: ... + def swapcase(self) -> chararray[_ShapeType_co, _CharDType]: ... + def lower(self) -> chararray[_ShapeType_co, _CharDType]: ... + def upper(self) -> chararray[_ShapeType_co, _CharDType]: ... + def isalnum(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... + def isalpha(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... + def isdigit(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... + def islower(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... + def isspace(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... + def istitle(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... + def isupper(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... + def isnumeric(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... + def isdecimal(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... __all__: list[str] diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index e7de3d10c521..d88fb5c7221c 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -16,7 +16,7 @@ from numpy import ( void, _ByteOrder, _SupportsBuffer, - _ShapeType, + _ShapeType_co, _DType_co, _OrderKACF, ) @@ -49,7 +49,7 @@ class record(void): @overload def __getitem__(self, key: list[str]) -> record: ... -class recarray(ndarray[_ShapeType, _DType_co]): +class recarray(ndarray[_ShapeType_co, _DType_co]): # NOTE: While not strictly mandatory, we're demanding here that arguments # for the `format_parser`- and `dtype`-based dtype constructors are # mutually exclusive @@ -114,7 +114,7 @@ class recarray(ndarray[_ShapeType, _DType_co]): @overload def __getitem__(self, indx: str) -> NDArray[Any]: ... @overload - def __getitem__(self, indx: list[str]) -> recarray[_ShapeType, dtype[record]]: ... + def __getitem__(self, indx: list[str]) -> recarray[_ShapeType_co, dtype[record]]: ... @overload def field(self, attr: int | str, val: None = ...) -> Any: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index d6cc0a782c23..826250d4c3a8 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -15,9 +15,7 @@ from numpy import ( angle as angle ) -# TODO: Set the `bound` to something more suitable once we -# have proper shape support -_ShapeType = TypeVar("_ShapeType", bound=Any) +_ShapeType_co = TypeVar("_ShapeType_co", bound=tuple[int, ...], covariant=True) _DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) __all__: list[str] @@ -165,7 +163,7 @@ class MaskedIterator: def __setitem__(self, index, value): ... def __next__(self): ... -class MaskedArray(ndarray[_ShapeType, _DType_co]): +class MaskedArray(ndarray[_ShapeType_co, _DType_co]): __array_priority__: Any def __new__(cls, data=..., mask=..., dtype=..., copy=..., subok=..., ndmin=..., fill_value=..., keep_mask=..., hard_mask=..., shrink=..., order=...): ... def __array_finalize__(self, obj): ... @@ -300,7 +298,7 @@ class MaskedArray(ndarray[_ShapeType, _DType_co]): def __reduce__(self): ... def __deepcopy__(self, memo=...): ... -class mvoid(MaskedArray[_ShapeType, _DType_co]): +class mvoid(MaskedArray[_ShapeType_co, _DType_co]): def __new__( self, data, diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index 264807e05d57..85714420cb64 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -5,12 +5,10 @@ from numpy.ma import MaskedArray __all__: list[str] -# TODO: Set the `bound` to something more suitable once we -# have proper shape support -_ShapeType = TypeVar("_ShapeType", bound=Any) +_ShapeType_co = TypeVar("_ShapeType_co", covariant=True, bound=tuple[int, ...]) _DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) -class MaskedRecords(MaskedArray[_ShapeType, _DType_co]): +class MaskedRecords(MaskedArray[_ShapeType_co, _DType_co]): def __new__( cls, shape, diff --git a/numpy/typing/tests/data/fail/shape.pyi b/numpy/typing/tests/data/fail/shape.pyi new file mode 100644 index 000000000000..3dd6d14f4222 --- /dev/null +++ b/numpy/typing/tests/data/fail/shape.pyi @@ -0,0 +1,6 @@ +from typing import Any +import numpy as np + +# test bounds of _ShapeType_co + +np.ndarray[tuple[str, str], Any] # E: Value of type variable diff --git a/numpy/typing/tests/data/pass/shape.py b/numpy/typing/tests/data/pass/shape.py new file mode 100644 index 000000000000..8e2e2faad9a8 --- /dev/null +++ b/numpy/typing/tests/data/pass/shape.py @@ -0,0 +1,18 @@ +from typing import Any, NamedTuple + +import numpy as np +from typing_extensions import assert_type + + +# Subtype of tuple[int, int] +class XYGrid(NamedTuple): + x_axis: int + y_axis: int + +arr: np.ndarray[XYGrid, Any] = np.empty(XYGrid(2, 2)) + +# Test variance of _ShapeType_co +def accepts_2d(a: np.ndarray[tuple[int, int], Any]) -> None: + return None + +accepts_2d(arr) diff --git a/numpy/typing/tests/data/reveal/shape.pyi b/numpy/typing/tests/data/reveal/shape.pyi new file mode 100644 index 000000000000..8f8d819cbcea --- /dev/null +++ b/numpy/typing/tests/data/reveal/shape.pyi @@ -0,0 +1,15 @@ +from typing import Any, NamedTuple + +import numpy as np +from typing_extensions import assert_type + + +# Subtype of tuple[int, int] +class XYGrid(NamedTuple): + x_axis: int + y_axis: int + +arr: np.ndarray[XYGrid, Any] + +# Test shape property matches shape typevar +assert_type(arr.shape, XYGrid) From 818515b265f2084e909822a83e68ed032a7ac83b Mon Sep 17 00:00:00 2001 From: Christian Heimes Date: Sun, 4 Aug 2024 08:46:11 +0200 Subject: [PATCH 934/980] Fix building NumPy in FIPS mode MD5 is an insecure cryptogrpahic hashing algorithm and is therefore blocked in FIPS mode. NumPy uses MD5 has digest algorithm without any security requirements. Use `usedforsecurity=False` flag to tell OpenSSL that the use of MD5 is okay in FIPS enforcing mode. I implemented the flag in Python 3.9 for exactly this purpose Fixes: #27099 Signed-off-by: Christian Heimes --- numpy/_core/code_generators/genapi.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index bf9c1d74f01b..da2f8f636e59 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -160,7 +160,7 @@ def __str__(self): return '%s%s %s(%s)' % (doccomment, self.return_type, self.name, argstr) def api_hash(self): - m = hashlib.md5() + m = hashlib.md5(usedforsecurity=False) m.update(remove_whitespace(self.return_type)) m.update('\000') m.update(self.name) @@ -533,7 +533,9 @@ def fullapi_hash(api_dicts): a.extend(name) a.extend(','.join(map(str, data))) - return hashlib.md5(''.join(a).encode('ascii')).hexdigest() + return hashlib.md5( + ''.join(a).encode('ascii'), usedforsecurity=False + ).hexdigest() # To parse strings like 'hex = checksum' where hex is e.g. 0x1234567F and # checksum a 128 bits md5 checksum (hex format as well) @@ -555,7 +557,7 @@ def main(): tagname = sys.argv[1] order_file = sys.argv[2] functions = get_api_functions(tagname, order_file) - m = hashlib.md5(tagname) + m = hashlib.md5(tagname, usedforsecurity=False) for func in functions: print(func) ah = func.api_hash() From 8cec566cd9084a8f93b90ed36697c3804b682594 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Sun, 4 Aug 2024 11:10:18 -0600 Subject: [PATCH 935/980] DOC: remove incorrect docstring comment --- numpy/_core/src/multiarray/dtypemeta.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index f46e882ec2d1..244b47250786 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -159,9 +159,8 @@ PyArray_ArrFuncs default_funcs = { /* * Internal version of PyArrayInitDTypeMeta_FromSpec. * - * See the documentation of that function for more details. Does not do any - * error checking. - + * See the documentation of that function for more details. + * * Setting priv to a nonzero value indicates that a dtypemeta is being * initialized from inside NumPy, otherwise this function is being called by * the public implementation. From 86e3933ec23e2aab6e265f2618ba2afd76f928c3 Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Mon, 5 Aug 2024 08:16:59 +1000 Subject: [PATCH 936/980] BLD: cp313 cp313t linux_aarch64 [wheel build] --- tools/ci/cirrus_wheels.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml index 8705bd9b9cbd..1a9984568729 100644 --- a/tools/ci/cirrus_wheels.yml +++ b/tools/ci/cirrus_wheels.yml @@ -32,6 +32,12 @@ linux_aarch64_task: - env: CIRRUS_CLONE_SUBMODULES: true CIBW_BUILD: cp312-* + - env: + CIRRUS_CLONE_SUBMODULES: true + CIBW_BUILD: cp313-* + - env: + CIRRUS_CLONE_SUBMODULES: true + CIBW_BUILD: cp313t-* initial_setup_script: | apt update @@ -62,7 +68,7 @@ macosx_arm64_task: CIBW_BUILD: cp310-* cp311 - env: CIRRUS_CLONE_SUBMODULES: true - CIBW_BUILD: cp312-* + CIBW_BUILD: cp312-* cp313-* env: PATH: /usr/local/lib:/usr/local/include:$PATH CIBW_ARCHS: arm64 From 78c7fb8926b98de10d0b18d87c9c34da6c3ce1aa Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Mon, 5 Aug 2024 08:48:52 +1000 Subject: [PATCH 937/980] BLD: cp313t for macos<14 (openblas) [wheel build] --- tools/ci/cirrus_wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml index 1a9984568729..6cb0d4ad07cc 100644 --- a/tools/ci/cirrus_wheels.yml +++ b/tools/ci/cirrus_wheels.yml @@ -68,7 +68,7 @@ macosx_arm64_task: CIBW_BUILD: cp310-* cp311 - env: CIRRUS_CLONE_SUBMODULES: true - CIBW_BUILD: cp312-* cp313-* + CIBW_BUILD: cp312-* cp313-* cp313t-* env: PATH: /usr/local/lib:/usr/local/include:$PATH CIBW_ARCHS: arm64 From d382583616aadfe6ebf1fc2d1773e5e2a20317ec Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Mon, 5 Aug 2024 13:23:46 +1000 Subject: [PATCH 938/980] BLD: CIBW_FREE_THREADED_SUPPORT [wheel build] --- tools/ci/cirrus_wheels.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml index 6cb0d4ad07cc..d09c47cc2d5e 100644 --- a/tools/ci/cirrus_wheels.yml +++ b/tools/ci/cirrus_wheels.yml @@ -38,6 +38,7 @@ linux_aarch64_task: - env: CIRRUS_CLONE_SUBMODULES: true CIBW_BUILD: cp313t-* + CIBW_FREE_THREADED_SUPPORT: 1 initial_setup_script: | apt update From f636dfd7a90acab0f6277ad1940d65f4c4753e22 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 5 Aug 2024 12:53:03 +0200 Subject: [PATCH 939/980] BUG: Fix repr for integer scalar subclasses The subclass path was untested and thus not taken correctly. --- numpy/_core/src/multiarray/scalartypes.c.src | 4 ++-- numpy/_core/tests/test_scalarinherit.py | 7 +++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index f3f931de33bc..a972c8b78229 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -562,9 +562,9 @@ genint_type_repr(PyObject *self) int num = _typenum_fromtypeobj((PyObject *)Py_TYPE(self), 0); PyObject *repr; - if (num == 0) { + if (num == NPY_NOTYPE) { /* Not a builtin scalar (presumably), just use the name */ - repr = PyUnicode_FromFormat("%S(%S)", Py_TYPE(self)->tp_name, value_string); + repr = PyUnicode_FromFormat("%s(%S)", Py_TYPE(self)->tp_name, value_string); Py_DECREF(value_string); return repr; } diff --git a/numpy/_core/tests/test_scalarinherit.py b/numpy/_core/tests/test_scalarinherit.py index f9c574d5798e..52591215a2e7 100644 --- a/numpy/_core/tests/test_scalarinherit.py +++ b/numpy/_core/tests/test_scalarinherit.py @@ -54,6 +54,13 @@ def test_gh_15395(self): with pytest.raises(TypeError): B1(1.0, 2.0) + def test_int_repr(self): + # Test that integer repr works correctly for subclasses (gh-27106) + class my_int16(np.int16): + pass + + s = repr(my_int16(3)) + assert s == "my_int16(3)" class TestCharacter: def test_char_radd(self): From 4d8c5e02b0479f15bc247fa1bd76e1c4841f00f1 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 5 Aug 2024 18:23:35 +0200 Subject: [PATCH 940/980] DEV: make linter.py runnable from outside the root of the repo This is a minor tweak, which I needed when wrapping another tool around the numpy dev commands. [skip circle] [skip cirrus] [skip actions] --- tools/linter.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tools/linter.py b/tools/linter.py index 0031ff83a479..c5746b518b8e 100644 --- a/tools/linter.py +++ b/tools/linter.py @@ -4,10 +4,8 @@ from argparse import ArgumentParser from git import Repo, exc -CONFIG = os.path.join( - os.path.abspath(os.path.dirname(__file__)), - 'lint_diff.ini', -) +CWD = os.path.abspath(os.path.dirname(__file__)) +CONFIG = os.path.join(CWD, 'lint_diff.ini') # NOTE: The `diff` and `exclude` options of pycodestyle seem to be # incompatible, so instead just exclude the necessary files when @@ -23,7 +21,7 @@ class DiffLinter: def __init__(self, branch): self.branch = branch - self.repo = Repo('.') + self.repo = Repo(os.path.join(CWD, '..')) self.head = self.repo.head.commit def get_branch_diff(self, uncommitted = False): From 4910621e57f8cf305545c3addffbc42ce49b1119 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Aug 2024 17:18:55 +0000 Subject: [PATCH 941/980] MAINT: Bump pypa/cibuildwheel from 2.19.2 to 2.20.0 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 2.19.2 to 2.20.0. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/7e5a838a63ac8128d71ab2dfd99e4634dd1bca09...bd033a44476646b606efccdd5eed92d5ea1d77ad) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 95a0f8bc8a9a..9bfebd87526e 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -166,7 +166,7 @@ jobs: echo "CIBW_BUILD_FRONTEND=pip; args: --no-build-isolation" >> "$GITHUB_ENV" - name: Build wheels - uses: pypa/cibuildwheel@7e5a838a63ac8128d71ab2dfd99e4634dd1bca09 # v2.19.2 + uses: pypa/cibuildwheel@bd033a44476646b606efccdd5eed92d5ea1d77ad # v2.20.0 env: CIBW_PRERELEASE_PYTHONS: True CIBW_FREE_THREADED_SUPPORT: True From 40ad3e06440a7b8ca5c56846a5b6b7cba724422b Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Mon, 5 Aug 2024 12:27:49 -0700 Subject: [PATCH 942/980] Use the new npyv_loadable_stride_ functions for ldexp and frexp --- numpy/_core/src/umath/fast_loop_macros.h | 28 ------------------- .../umath/loops_exponent_log.dispatch.c.src | 13 +++++++-- 2 files changed, 11 insertions(+), 30 deletions(-) diff --git a/numpy/_core/src/umath/fast_loop_macros.h b/numpy/_core/src/umath/fast_loop_macros.h index 1e19bf19bfbf..ab830d52e9ab 100644 --- a/numpy/_core/src/umath/fast_loop_macros.h +++ b/numpy/_core/src/umath/fast_loop_macros.h @@ -323,34 +323,6 @@ abs_ptrdiff(char *a, char *b) ((abs_ptrdiff(args[1], args[0]) >= (vsize)) || \ ((abs_ptrdiff(args[1], args[0]) == 0)))) -/* - * Avoid using SIMD for very large step sizes for several reasons: - * 1) Supporting large step sizes requires use of i64gather/scatter_ps instructions, - * in which case we need two i64gather instructions and an additional vinsertf32x8 - * instruction to load a single zmm register (since one i64gather instruction - * loads into a ymm register). This is not ideal for performance. - * 2) Gather and scatter instructions can be slow when the loads/stores - * cross page boundaries. - * - * We instead rely on i32gather/scatter_ps instructions which use a 32-bit index - * element. The index needs to be < INT_MAX to avoid overflow. MAX_STEP_SIZE - * ensures this. The condition also requires that the input and output arrays - * should have no overlap in memory. - */ -#define IS_BINARY_SMALL_STEPS_AND_NOMEMOVERLAP \ - ((labs(steps[0]) < MAX_STEP_SIZE) && \ - (labs(steps[1]) < MAX_STEP_SIZE) && \ - (labs(steps[2]) < MAX_STEP_SIZE) && \ - (nomemoverlap(args[0], steps[0], args[2], steps[2], dimensions[0])) && \ - (nomemoverlap(args[1], steps[1], args[2], steps[2], dimensions[0]))) - -#define IS_UNARY_TWO_OUT_SMALL_STEPS_AND_NOMEMOVERLAP \ - ((labs(steps[0]) < MAX_STEP_SIZE) && \ - (labs(steps[1]) < MAX_STEP_SIZE) && \ - (labs(steps[2]) < MAX_STEP_SIZE) && \ - (nomemoverlap(args[0], steps[0], args[2], steps[2], dimensions[0])) && \ - (nomemoverlap(args[0], steps[0], args[1], steps[1], dimensions[0]))) - /* * 1) Output should be contiguous, can handle strided input data * 2) Input step should be smaller than MAX_STEP_SIZE for performance diff --git a/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src b/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src index 4285708fe703..a4acc4437b1b 100644 --- a/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src +++ b/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src @@ -1350,12 +1350,17 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(DOUBLE_@func@) * #TYPE = FLOAT, DOUBLE# * #c = f, # * #C = F, # + * #suffix = f32, f64# */ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_frexp) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { #ifdef SIMD_AVX512_SKX - if (IS_UNARY_TWO_OUT_SMALL_STEPS_AND_NOMEMOVERLAP) { + if ((npyv_loadable_stride_@suffix@(steps[0])) && + (npyv_storable_stride_@suffix@(steps[1])) && + (npyv_storable_stride_@suffix@(steps[2])) && + (!is_mem_overlap(args[0], steps[0], args[2], steps[2], dimensions[0])) && + (!is_mem_overlap(args[0], steps[0], args[1], steps[1], dimensions[0]))) { AVX512_SKX_frexp_@TYPE@(args, dimensions, steps); return; } @@ -1370,7 +1375,11 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_ldexp) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { #ifdef SIMD_AVX512_SKX - if (IS_BINARY_SMALL_STEPS_AND_NOMEMOVERLAP) { + if ((npyv_loadable_stride_@suffix@(steps[0])) && + (npyv_storable_stride_@suffix@(steps[1])) && + (npyv_storable_stride_@suffix@(steps[2])) && + (!is_mem_overlap(args[0], steps[0], args[2], steps[2], dimensions[0])) && + (!is_mem_overlap(args[1], steps[1], args[2], steps[2], dimensions[0]))) { AVX512_SKX_ldexp_@TYPE@(args, dimensions, steps); return; } From 6e03b4fdce36d7abe8784c3cf5b84933814164fe Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 26 Jul 2024 14:39:46 -0600 Subject: [PATCH 943/980] BUG: fix another cast setup in array_assign_subscript --- numpy/_core/src/multiarray/mapping.c | 9 ++++++--- numpy/_core/tests/test_stringdtype.py | 16 +++++++++------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 84a7df0bca23..fc9a2105c1c8 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -2034,6 +2034,7 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) goto fail; } + int allocated_array = 0; if (tmp_arr == NULL) { /* Fill extra op, need to swap first */ tmp_arr = mit->extra_op; @@ -2047,6 +2048,7 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) if (PyArray_CopyObject(tmp_arr, op) < 0) { goto fail; } + allocated_array = 1; } if (PyArray_MapIterCheckIndices(mit) < 0) { @@ -2091,9 +2093,10 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) NPY_ARRAYMETHOD_FLAGS transfer_flags; npy_intp itemsize = PyArray_ITEMSIZE(self); - if (PyArray_GetDTypeTransferFunction(1, - itemsize, itemsize, - PyArray_DESCR(self), PyArray_DESCR(self), + if (PyArray_GetDTypeTransferFunction( + 1, itemsize, itemsize, + allocated_array ? PyArray_DESCR(mit->extra_op) : PyArray_DESCR(self), + PyArray_DESCR(self), 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { goto fail; } diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index a1b2d9ad286c..637a195ca696 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -495,13 +495,15 @@ def test_fancy_indexing(string_list): sarr = np.array(string_list, dtype="T") assert_array_equal(sarr, sarr[np.arange(sarr.shape[0])]) - # see gh-27003 - for ind in [[0, 1], ...]: - a = np.array(['a'*16, 'b'*16], dtype="T") - b = np.array(['d'*16, 'e'*16], dtype="T") - a[ind] = b - assert_array_equal(a, b) - assert a[0] == 'd'*16 + # see gh-27003 and gh-27053 + for ind in [[True, True], [0, 1], ...]: + for lop in [['a'*16, 'b'*16], ['', '']]: + a = np.array(lop, dtype="T") + rop = ['d'*16, 'e'*16] + for b in [rop, np.array(rop, dtype="T")]: + a[ind] = b + assert_array_equal(a, b) + assert a[0] == 'd'*16 def test_creation_functions(): From 6a0d05cd1aa6bdefe38430a9bc74592d8f7dc40b Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 5 Aug 2024 14:16:06 -0600 Subject: [PATCH 944/980] DOC: add todo --- numpy/_core/src/multiarray/mapping.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index fc9a2105c1c8..4a6c1f093769 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -2092,7 +2092,8 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) /* May need a generic copy function (only for refs and odd sizes) */ NPY_ARRAYMETHOD_FLAGS transfer_flags; npy_intp itemsize = PyArray_ITEMSIZE(self); - + // TODO: the heuristic used here to determine the src_dtype might be subtly wrong + // for non-REFCHK user DTypes. See gh-27057 for the prior discussion about this. if (PyArray_GetDTypeTransferFunction( 1, itemsize, itemsize, allocated_array ? PyArray_DESCR(mit->extra_op) : PyArray_DESCR(self), From bbcedfc4e9161da613105ce0e74da26fb76a567e Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Mon, 5 Aug 2024 15:22:26 -0700 Subject: [PATCH 945/980] Add MAXSTORE and MAXLOAD for f64 data --- numpy/_core/src/common/simd/avx512/avx512.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/_core/src/common/simd/avx512/avx512.h b/numpy/_core/src/common/simd/avx512/avx512.h index aa6abe256424..2a4a20b2970d 100644 --- a/numpy/_core/src/common/simd/avx512/avx512.h +++ b/numpy/_core/src/common/simd/avx512/avx512.h @@ -11,6 +11,8 @@ // Enough limit to allow us to use _mm512_i32gather_* and _mm512_i32scatter_* #define NPY_SIMD_MAXLOAD_STRIDE32 (0x7fffffff / 16) #define NPY_SIMD_MAXSTORE_STRIDE32 (0x7fffffff / 16) +#define NPY_SIMD_MAXLOAD_STRIDE64 (0x7fffffff / 16) +#define NPY_SIMD_MAXSTORE_STRIDE64 (0x7fffffff / 16) typedef __m512i npyv_u8; typedef __m512i npyv_s8; From a35a5f0134b45781e9446d9b2f9671dab1cb68f3 Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Tue, 6 Aug 2024 08:50:02 +1000 Subject: [PATCH 946/980] WHL: pip isolation for cp313t [wheel build] --- tools/ci/cirrus_wheels.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml index d09c47cc2d5e..1a517814d98c 100644 --- a/tools/ci/cirrus_wheels.yml +++ b/tools/ci/cirrus_wheels.yml @@ -39,6 +39,7 @@ linux_aarch64_task: CIRRUS_CLONE_SUBMODULES: true CIBW_BUILD: cp313t-* CIBW_FREE_THREADED_SUPPORT: 1 + CIBW_BUILD_FRONTEND: "pip; args: --no-build-isolation" initial_setup_script: | apt update From f3254a40cfbb097a33523935773d935539208157 Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Tue, 6 Aug 2024 09:07:26 +1000 Subject: [PATCH 947/980] WHL: macos cp313t [wheel build] --- tools/ci/cirrus_wheels.yml | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml index 1a517814d98c..f63274e5af3f 100644 --- a/tools/ci/cirrus_wheels.yml +++ b/tools/ci/cirrus_wheels.yml @@ -12,6 +12,8 @@ build_and_store_wheels: &BUILD_AND_STORE_WHEELS linux_aarch64_task: use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' + env: + CIRRUS_CLONE_SUBMODULES: true compute_engine_instance: image_project: cirrus-images image: family/docker-builder-arm64 @@ -24,19 +26,14 @@ linux_aarch64_task: # single task takes longer than 60 mins (the default time limit for a # cirrus-ci task). - env: - CIRRUS_CLONE_SUBMODULES: true CIBW_BUILD: cp310-* - env: - CIRRUS_CLONE_SUBMODULES: true CIBW_BUILD: cp311-* - env: - CIRRUS_CLONE_SUBMODULES: true CIBW_BUILD: cp312-* - env: - CIRRUS_CLONE_SUBMODULES: true CIBW_BUILD: cp313-* - env: - CIRRUS_CLONE_SUBMODULES: true CIBW_BUILD: cp313t-* CIBW_FREE_THREADED_SUPPORT: 1 CIBW_BUILD_FRONTEND: "pip; args: --no-build-isolation" @@ -60,17 +57,21 @@ linux_aarch64_task: macosx_arm64_task: use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' + env: + CIRRUS_CLONE_SUBMODULES: true macos_instance: matrix: image: ghcr.io/cirruslabs/macos-monterey-xcode matrix: - env: - CIRRUS_CLONE_SUBMODULES: true CIBW_BUILD: cp310-* cp311 - env: - CIRRUS_CLONE_SUBMODULES: true - CIBW_BUILD: cp312-* cp313-* cp313t-* + CIBW_BUILD: cp312-* cp313-* + - env: + CIBW_BUILD: cp313t-* + CIBW_FREE_THREADED_SUPPORT: 1 + CIBW_BUILD_FRONTEND: "pip; args: --no-build-isolation" env: PATH: /usr/local/lib:/usr/local/include:$PATH CIBW_ARCHS: arm64 From a0a7fc1d4041543d808d34f32310aca6abb40345 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 6 Aug 2024 12:46:57 +0200 Subject: [PATCH 948/980] BUG: Ensure that scalar binops prioritize __array_ufunc__ If array-ufunc is implemented, we must call always use it for all operators (that seems to be the promise). If __array_function__ is defined we are in the clear w.r.t. recursion because the object is either an array (can be unpacked, but already checked earlier now also), or it cannot call the ufunc without unpacking itself (otherwise it would cause recursion). There is an oddity about `__array_wrap__`. Rather than trying to do odd things to deal with it, I added a comment explaining why it doens't matter (roughly: don't use our scalar priority if you want to be sure to get a chance). --- numpy/_core/src/multiarray/scalartypes.c.src | 29 +++++++++++++++++--- numpy/_core/tests/test_multiarray.py | 12 ++++++++ numpy/_core/tests/test_scalarmath.py | 2 +- 3 files changed, 38 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index a972c8b78229..689e16730cc0 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -194,15 +194,30 @@ find_binary_operation_path( PyLong_CheckExact(other) || PyFloat_CheckExact(other) || PyComplex_CheckExact(other) || - PyBool_Check(other)) { + PyBool_Check(other) || + PyArray_Check(other)) { /* * The other operand is ready for the operation already. Must pass on * on float/long/complex mainly for weak promotion (NEP 50). */ - Py_INCREF(other); - *other_op = other; + *other_op = Py_NewRef(other); return 0; } + /* + * If other has __array_ufunc__ always use ufunc. If array-ufunc was None + * we already deferred. And any custom object with array-ufunc cannot call + * our ufuncs without preventing recursion. + * It may be nice to avoid double lookup in `BINOP_GIVE_UP_IF_NEEDED`. + */ + PyObject *attr = PyArray_LookupSpecial(other, npy_interned_str.array_ufunc); + if (attr != NULL) { + Py_DECREF(attr); + *other_op = Py_NewRef(other); + return 0; + } + else if (PyErr_Occurred()) { + PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ + } /* * Now check `other`. We want to know whether it is an object scalar @@ -216,7 +231,13 @@ find_binary_operation_path( } if (!was_scalar || PyArray_DESCR(arr)->type_num != NPY_OBJECT) { - /* The array is OK for usage and we can simply forward it + /* + * The array is OK for usage and we can simply forward it. There + * is a theoretical subtlety here: If the other object implements + * `__array_wrap__`, we may ignore that. However, this only matters + * if the other object has the identical `__array_priority__` and + * additionally already deferred back to us. + * (`obj + scalar` and `scalar + obj` are not symmetric.) * * NOTE: Future NumPy may need to distinguish scalars here, one option * could be marking the array. diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 41b0a6213ac8..5843459da752 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -4025,6 +4025,18 @@ class LowPriority(np.ndarray): assert res.shape == (3,) assert res[0] == 'result' + @pytest.mark.parametrize("scalar", [ + np.longdouble(1), np.timedelta64(120, 'm')]) + @pytest.mark.parametrize("op", [operator.add, operator.xor]) + def test_scalar_binop_guarantees_ufunc(self, scalar, op): + # Test that __array_ufunc__ will always cause ufunc use even when + # we have to protect some other calls from recursing (see gh-26904). + class SomeClass: + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + return "result" + + assert SomeClass() + np.longdouble(1) == "result" + assert np.longdouble(1) + SomeClass() == "result" def test_ufunc_override_normalize_signature(self): # gh-5674 diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index 4429e70fe66b..35350b01ef3a 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -27,7 +27,7 @@ floating_types = np.floating.__subclasses__() complex_floating_types = np.complexfloating.__subclasses__() -objecty_things = [object(), None] +objecty_things = [object(), None, np.array(None, dtype=object)] binary_operators_for_scalars = [ operator.lt, operator.le, operator.eq, operator.ne, operator.ge, From ccf67e3bd740ea082a7f4f25971ab073a40c7ff8 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 6 Aug 2024 14:07:43 +0200 Subject: [PATCH 949/980] BLD: update vendored Meson for cross-compilation patches Pulls in PR 16 from our Meson fork (https://github.com/numpy/meson/pull) --- vendored-meson/meson | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendored-meson/meson b/vendored-meson/meson index 31161eef3fc8..6f88e485f27b 160000 --- a/vendored-meson/meson +++ b/vendored-meson/meson @@ -1 +1 @@ -Subproject commit 31161eef3fc8cf0bf834edc1dd29e490fc6d7713 +Subproject commit 6f88e485f27bb0a41d31638f0c55055362e0b1ac From 75f25b223b251e85d1cd9062cb982e8d3321fa3a Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Tue, 6 Aug 2024 17:31:49 +0100 Subject: [PATCH 950/980] BUG: Bump Highway to latest Includes https://github.com/google/highway/commit/8270360a02418dab1de4282a5f7d271e3b7fc6dd which removes final unsafe memory access. --- numpy/_core/src/highway | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 0e2f5ac4af3c..5975f5ef76c3 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 0e2f5ac4af3c95a07cd247b8ddc71a5f5bd83318 +Subproject commit 5975f5ef76c3e4364844d869454046f0f8420ef8 From 476bc6b3b4c3d846d1e24e0c0951f345a28f7c13 Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Mon, 5 Aug 2024 09:35:34 -0700 Subject: [PATCH 951/980] TYP: Force matrix shape type to be 2D --- .gitignore | 1 - .../upcoming_changes/26081.improvement.rst | 5 +++-- numpy/__init__.pyi | 15 +++++++++------ 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/.gitignore b/.gitignore index 127d82c1ef14..e90cccc46642 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,5 @@ # Editor temporary/working/backup files # ######################################### -env/ .#* [#]*# *~ diff --git a/doc/release/upcoming_changes/26081.improvement.rst b/doc/release/upcoming_changes/26081.improvement.rst index e8389af8c84c..bac5c197caa0 100644 --- a/doc/release/upcoming_changes/26081.improvement.rst +++ b/doc/release/upcoming_changes/26081.improvement.rst @@ -5,6 +5,7 @@ with this change. It is a generic type with type parameters for the shape and the data type. Previously, the shape type parameter could be any value. This change restricts it to a tuple of ints, as one would expect from using ``ndarray.shape``. Further, the shape-type parameter has been -changed from invariant to covariant. This change also applies to subpytes of -``ndarray``, e.g. ``np.ma.MaskedArray``. See the `typing docs `_ +changed from invariant to covariant. This change also applies to the subtypes +of ``ndarray``, e.g. ``numpy.ma.MaskedArray``. See the +`typing docs `_ for more information. \ No newline at end of file diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index a0b32c9dbbcd..e73d6f16765b 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1502,6 +1502,7 @@ _FlexDType = TypeVar("_FlexDType", bound=dtype[flexible]) _ShapeType_co = TypeVar("_ShapeType_co", covariant=True, bound=tuple[int, ...]) _ShapeType2 = TypeVar("_ShapeType2", bound=tuple[int, ...]) +_Shape2DType_co = TypeVar("_Shape2DType_co", covariant=True, bound=tuple[int, int]) _NumberType = TypeVar("_NumberType", bound=number[Any]) if sys.version_info >= (3, 12): @@ -3925,7 +3926,9 @@ class poly1d: k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., ) -> poly1d: ... -class matrix(ndarray[_ShapeType_co, _DType_co]): + + +class matrix(ndarray[_Shape2DType_co, _DType_co]): __array_priority__: ClassVar[float] def __new__( subtype, @@ -3961,13 +3964,13 @@ class matrix(ndarray[_ShapeType_co, _DType_co]): @overload def __getitem__(self: NDArray[void], key: str, /) -> matrix[Any, dtype[Any]]: ... @overload - def __getitem__(self: NDArray[void], key: list[str], /) -> matrix[_ShapeType_co, dtype[void]]: ... + def __getitem__(self: NDArray[void], key: list[str], /) -> matrix[_Shape2DType_co, dtype[void]]: ... def __mul__(self, other: ArrayLike, /) -> matrix[Any, Any]: ... def __rmul__(self, other: ArrayLike, /) -> matrix[Any, Any]: ... - def __imul__(self, other: ArrayLike, /) -> matrix[_ShapeType_co, _DType_co]: ... + def __imul__(self, other: ArrayLike, /) -> matrix[_Shape2DType_co, _DType_co]: ... def __pow__(self, other: ArrayLike, /) -> matrix[Any, Any]: ... - def __ipow__(self, other: ArrayLike, /) -> matrix[_ShapeType_co, _DType_co]: ... + def __ipow__(self, other: ArrayLike, /) -> matrix[_Shape2DType_co, _DType_co]: ... @overload def sum(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... @@ -4063,14 +4066,14 @@ class matrix(ndarray[_ShapeType_co, _DType_co]): @property def I(self) -> matrix[Any, Any]: ... @property - def A(self) -> ndarray[_ShapeType_co, _DType_co]: ... + def A(self) -> ndarray[_Shape2DType_co, _DType_co]: ... @property def A1(self) -> ndarray[Any, _DType_co]: ... @property def H(self) -> matrix[Any, _DType_co]: ... def getT(self) -> matrix[Any, _DType_co]: ... def getI(self) -> matrix[Any, Any]: ... - def getA(self) -> ndarray[_ShapeType_co, _DType_co]: ... + def getA(self) -> ndarray[_Shape2DType_co, _DType_co]: ... def getA1(self) -> ndarray[Any, _DType_co]: ... def getH(self) -> matrix[Any, _DType_co]: ... From bb3a2b1e23659aa2d9899e9bba4d7386f058d5f2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Aug 2024 17:05:33 +0000 Subject: [PATCH 952/980] MAINT: Bump github/codeql-action from 3.25.15 to 3.26.0 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.25.15 to 3.26.0. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/afb54ba388a7dca6ecae48f608c4ff05ff4cc77a...eb055d739abdc2e8de2e5f4ba1a8b246daa779aa) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 371c3795969f..467400d99336 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -45,7 +45,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15 + uses: github/codeql-action/init@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa # v3.26.0 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -55,7 +55,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15 + uses: github/codeql-action/autobuild@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa # v3.26.0 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -68,6 +68,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15 + uses: github/codeql-action/analyze@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa # v3.26.0 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index a5ee5c1d1e3b..881d6df0871f 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v2.1.27 + uses: github/codeql-action/upload-sarif@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa # v2.1.27 with: sarif_file: results.sarif From 3767c7592cae2748720bde09b8b642592741dd46 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Aug 2024 17:05:38 +0000 Subject: [PATCH 953/980] MAINT: Bump actions/upload-artifact from 4.3.5 to 4.3.6 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.3.5 to 4.3.6. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/89ef406dd8d7e03cfd12d9e0a4a378f454709029...834a144ee995460fba8ed112a2fc961b36a5ec5a) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/cygwin.yml | 2 +- .github/workflows/scorecards.yml | 2 +- .github/workflows/wheels.yml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 33b77e408d23..adf2c4442a9e 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -62,7 +62,7 @@ jobs: cd tools /usr/bin/python3.9 -m pytest --pyargs numpy -n2 -m "not slow" - name: Upload wheel if tests fail - uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 if: failure() with: name: numpy-cygwin-wheel diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index a5ee5c1d1e3b..28d94feff6c7 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -42,7 +42,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable # uploads of run results in SARIF format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 with: name: SARIF file path: results.sarif diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 9bfebd87526e..ce034d24d2ea 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -172,7 +172,7 @@ jobs: CIBW_FREE_THREADED_SUPPORT: True CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - - uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 + - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 with: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl @@ -253,7 +253,7 @@ jobs: python -mpip install twine twine check dist/* - - uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 + - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 with: name: sdist path: ./dist/* From 3e4cb2a24d348b05f91fbb58bf6536555b3cab76 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 6 Aug 2024 21:24:06 +0200 Subject: [PATCH 954/980] BUG: Fix missing error return in copyto Could also just return NULL, but right now the earlier one also uses goto fail, so stick with it. Found in the fast-path PR, it seems there are so many `PyErr_Occurred()` checks, that this didn't really register before. --- numpy/_core/src/multiarray/multiarraymodule.c | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index ad1202fbb045..e02743693212 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -1950,6 +1950,7 @@ array_copyto(PyObject *NPY_UNUSED(ignored), PyErr_Format(PyExc_TypeError, "copyto() argument 1 must be a numpy.ndarray, not %s", Py_TYPE(dst_obj)->tp_name); + goto fail; } PyArrayObject *dst = (PyArrayObject *)dst_obj; From 97562c3d2b6a3536330b459829fa635ac6e9dac0 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 7 Aug 2024 08:02:35 +0200 Subject: [PATCH 955/980] Update numpy/_core/tests/test_multiarray.py Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> --- numpy/_core/tests/test_multiarray.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 5843459da752..441d76af9228 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -4035,8 +4035,8 @@ class SomeClass: def __array_ufunc__(self, ufunc, method, *inputs, **kw): return "result" - assert SomeClass() + np.longdouble(1) == "result" - assert np.longdouble(1) + SomeClass() == "result" + assert SomeClass() + scalar == "result" + assert scalar + SomeClass() == "result" def test_ufunc_override_normalize_signature(self): # gh-5674 From d27a1f52a60cc1a68c731e0ae15af7553b358958 Mon Sep 17 00:00:00 2001 From: mattip Date: Thu, 8 Aug 2024 09:13:20 +0300 Subject: [PATCH 956/980] bump scipy-openblas version --- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index e134b0dae82e..1e2d5e804df3 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ spin # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.27.44.3 +scipy-openblas32==0.3.27.44.4 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index f688bfb6eb3a..ebf1a7dbd4dc 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.27.44.3 -scipy-openblas64==0.3.27.44.3 +scipy-openblas32==0.3.27.44.4 +scipy-openblas64==0.3.27.44.4 From 27f1b44025079ed655d3ebc9c08652e2ffed970b Mon Sep 17 00:00:00 2001 From: mattip Date: Thu, 8 Aug 2024 09:14:03 +0300 Subject: [PATCH 957/980] update bundled licenses: reflect scipy-openblas, remove libquadmath from windows --- tools/wheels/LICENSE_linux.txt | 4 ++-- tools/wheels/LICENSE_osx.txt | 4 ++-- tools/wheels/LICENSE_win32.txt | 27 +++------------------------ 3 files changed, 7 insertions(+), 28 deletions(-) diff --git a/tools/wheels/LICENSE_linux.txt b/tools/wheels/LICENSE_linux.txt index a5b5ae5c22e6..021b4b0289e7 100644 --- a/tools/wheels/LICENSE_linux.txt +++ b/tools/wheels/LICENSE_linux.txt @@ -5,7 +5,7 @@ This binary distribution of NumPy also bundles the following software: Name: OpenBLAS -Files: numpy.libs/libopenblas*.so +Files: numpy.libs/libscipy_openblas*.so Description: bundled as a dynamically linked library Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause @@ -41,7 +41,7 @@ License: BSD-3-Clause Name: LAPACK -Files: numpy.libs/libopenblas*.so +Files: numpy.libs/libscipy_openblas*.so Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause-Attribution diff --git a/tools/wheels/LICENSE_osx.txt b/tools/wheels/LICENSE_osx.txt index 1ebd5663d02c..81889131cfa7 100644 --- a/tools/wheels/LICENSE_osx.txt +++ b/tools/wheels/LICENSE_osx.txt @@ -4,7 +4,7 @@ This binary distribution of NumPy also bundles the following software: Name: OpenBLAS -Files: numpy/.dylibs/libopenblas*.so +Files: numpy/.dylibs/libscipy_openblas*.so Description: bundled as a dynamically linked library Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause @@ -40,7 +40,7 @@ License: BSD-3-Clause Name: LAPACK -Files: numpy/.dylibs/libopenblas*.so +Files: numpy/.dylibs/libscipy_openblas*.so Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause-Attribution diff --git a/tools/wheels/LICENSE_win32.txt b/tools/wheels/LICENSE_win32.txt index f8eaaf1cae25..a2ccce66fbe5 100644 --- a/tools/wheels/LICENSE_win32.txt +++ b/tools/wheels/LICENSE_win32.txt @@ -5,7 +5,7 @@ This binary distribution of NumPy also bundles the following software: Name: OpenBLAS -Files: numpy.libs\libopenblas*.dll +Files: numpy.libs\libscipy_openblas*.dll Description: bundled as a dynamically linked library Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause @@ -41,7 +41,7 @@ License: BSD-3-Clause Name: LAPACK -Files: numpy.libs\libopenblas*.dll +Files: numpy.libs\libscipy_openblas*.dll Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause-Attribution @@ -96,7 +96,7 @@ License: BSD-3-Clause-Attribution Name: GCC runtime library -Files: numpy.libs\libgfortran*.dll +Files: numpy.libs\libscipy_openblas*.dll Description: statically linked to files compiled with gcc Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran License: GPL-3.0-with-GCC-exception @@ -879,24 +879,3 @@ the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . -Name: libquadmath -Files: numpy.libs\libopenb*.dll -Description: statically linked to files compiled with gcc -Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath -License: LGPL-2.1-or-later - - GCC Quad-Precision Math Library - Copyright (C) 2010-2019 Free Software Foundation, Inc. - Written by Francois-Xavier Coudert - - This file is part of the libquadmath library. - Libquadmath is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - Libquadmath is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html From 86361ae099f4d0273a820822ddf208d297fe0adf Mon Sep 17 00:00:00 2001 From: mattip Date: Thu, 8 Aug 2024 09:19:28 +0300 Subject: [PATCH 958/980] add test for issue 27036 --- numpy/linalg/tests/test_regression.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py index 8cac195aa864..91051c0eca4f 100644 --- a/numpy/linalg/tests/test_regression.py +++ b/numpy/linalg/tests/test_regression.py @@ -161,3 +161,18 @@ def test_matrix_rank_rtol_argument(self, rtol): x = np.zeros((4, 3, 2)) res = np.linalg.matrix_rank(x, rtol=rtol) assert res.shape == (4,) + + def test_openblas_threading(self): + # gh-27036 + # Test whether matrix multiplication involving a large matrix always + # gives the same (correct) answer + x = np.arange(500000, dtype=np.float64) + src = np.vstack((x, -10*x)).T + matrix = np.array([[0, 1], [1, 0]]) + expected = np.vstack((-10*x, x)).T # src @ matrix + for i in range(200): + result = src @ matrix + mismatches = (~np.isclose(result, expected)).sum() + if mismatches != 0: + assert False, ("unexpected result from matmul, " + "probably due to OpenBLAS threading issues") From 0b2d77940663c408133aa27d1605a0a87471bde1 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 8 Aug 2024 20:52:47 +0200 Subject: [PATCH 959/980] BUG: Do not accidentally store dtype metadata in ``np.save`` We had logic in place to drop (most) metadata, but the change had a small bug: During saving, we were still using the one with metadata... Maybe doesn't quite close it, but big enough of an improvement for now, I think, so Closes gh-14142 --- numpy/lib/format.py | 2 ++ numpy/lib/tests/test_format.py | 34 ++++++++++++++++------------------ numpy/lib/tests/test_utils.py | 2 +- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/numpy/lib/format.py b/numpy/lib/format.py index 8e14dfe4bcab..a90403459848 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -271,6 +271,8 @@ def dtype_to_descr(dtype): warnings.warn("metadata on a dtype is not saved to an npy/npz. " "Use another format (such as pickle) to store it.", UserWarning, stacklevel=2) + dtype = new_dtype + if dtype.names is not None: # This is a record array. The .descr is fine. XXX: parts of the # record array with an empty name, like padding bytes, still get diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index 31352864b7e2..bb262e048cba 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -998,32 +998,30 @@ def test_header_growth_axis(): assert len(fp.getvalue()) == expected_header_length -@pytest.mark.parametrize('dt, fail', [ - (np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3', - metadata={'some': 'stuff'})]}), True), - (np.dtype(int, metadata={'some': 'stuff'}), False), - (np.dtype([('subarray', (int, (2,)))], metadata={'some': 'stuff'}), False), +@pytest.mark.parametrize('dt', [ + np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3', + metadata={'some': 'stuff'})]}), + np.dtype(int, metadata={'some': 'stuff'}), + np.dtype([('subarray', (int, (2,)))], metadata={'some': 'stuff'}), # recursive: metadata on the field of a dtype - (np.dtype({'names': ['a', 'b'], 'formats': [ + np.dtype({'names': ['a', 'b'], 'formats': [ float, np.dtype({'names': ['c'], 'formats': [np.dtype(int, metadata={})]}) - ]}), False) + ]}), ]) @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), reason="PyPy bug in error formatting") -def test_metadata_dtype(dt, fail): +def test_metadata_dtype(dt): # gh-14142 arr = np.ones(10, dtype=dt) buf = BytesIO() with assert_warns(UserWarning): np.save(buf, arr) buf.seek(0) - if fail: - with assert_raises(ValueError): - np.load(buf) - else: - arr2 = np.load(buf) - # BUG: assert_array_equal does not check metadata - from numpy.lib._utils_impl import drop_metadata - assert_array_equal(arr, arr2) - assert drop_metadata(arr.dtype) is not arr.dtype - assert drop_metadata(arr2.dtype) is arr2.dtype + + # Loading should work (metadata was stripped): + arr2 = np.load(buf) + # BUG: assert_array_equal does not check metadata + from numpy.lib._utils_impl import drop_metadata + assert_array_equal(arr, arr2) + assert drop_metadata(arr.dtype) is not arr.dtype + assert drop_metadata(arr2.dtype) is arr2.dtype diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py index e2f72ac90c92..644912d941e3 100644 --- a/numpy/lib/tests/test_utils.py +++ b/numpy/lib/tests/test_utils.py @@ -43,7 +43,7 @@ def _compare_dtypes(dt1, dt2): assert dt_m.metadata is None assert dt_m['l1'].metadata is None assert dt_m['l1']['l2'].metadata is None - + # alignment dt = np.dtype([('x', ' Date: Thu, 8 Aug 2024 23:34:38 +0300 Subject: [PATCH 960/980] BLD: use smaller scipy-openblas builds --- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 1e2d5e804df3..d2940e2d65bc 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ spin # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.27.44.4 +scipy-openblas32==0.3.27.44.5 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index ebf1a7dbd4dc..965fdb8faadf 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.27.44.4 -scipy-openblas64==0.3.27.44.4 +scipy-openblas32==0.3.27.44.5 +scipy-openblas64==0.3.27.44.5 From 37aba675c96c1852ecb420fdb47f25900a0665b1 Mon Sep 17 00:00:00 2001 From: mattip Date: Thu, 8 Aug 2024 23:46:32 +0300 Subject: [PATCH 961/980] add release note --- doc/release/upcoming_changes/27147.performance.rst | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 doc/release/upcoming_changes/27147.performance.rst diff --git a/doc/release/upcoming_changes/27147.performance.rst b/doc/release/upcoming_changes/27147.performance.rst new file mode 100644 index 000000000000..2cea7780f41c --- /dev/null +++ b/doc/release/upcoming_changes/27147.performance.rst @@ -0,0 +1,8 @@ +* OpenBLAS on x86_64 and i686 is built with fewer kernels. Based on + benchmarking, there are 5 clusters of performance around these kernels: + ``PRESCOTT NEHALEM SANDYBRIDGE HASWELL SKYLAKEX``. + +* OpenBLAS on windows is linked without quadmath, simplfying licensing + +* Due to a regression in OpenBLAS on windows, the performance improvements + when using multiple threads for OpenBLAS 0.3.26 were reverted. From a57e45beedb9fa52a09ea45645a46365c3c5a838 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 9 Aug 2024 15:18:59 -0600 Subject: [PATCH 962/980] ENH: fix thread-unsafe C API usages (#27145) Ref #26159 See also the CPython HOWTO on this topic: https://docs.python.org/3.13/howto/free-threading-extensions.html#freethreading-extensions-howto. The remaining usages of PyDict_GetItem and PyDict_Next are all around the fields attribute of structured dtypes. I'm pretty sure that dictionary is effectively frozen after the DType is constructed, so I don't worry about those uses. It's not straightforward to write tests for this, I'm just applying static refactorings in places where the refactoring shouldn't introduce new reference counting bugs. * ENH: fix thread-unsafe C API usages * ENH: use critical sections in einsum * BUG: fix error handling in loadtxt C code * revert einsum changes --- numpy/_core/src/multiarray/array_coercion.c | 16 +++++++------- numpy/_core/src/multiarray/textreading/rows.c | 21 ++++++++++++++++--- 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index 69da09875bfb..0cffcc6bab22 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -6,6 +6,7 @@ #include #include "numpy/npy_3kcompat.h" +#include "npy_pycompat.h" #include "lowlevel_strided_loops.h" #include "numpy/arrayobject.h" @@ -224,24 +225,23 @@ npy_discover_dtype_from_pytype(PyTypeObject *pytype) PyObject *DType; if (pytype == &PyArray_Type) { - DType = Py_None; + DType = Py_NewRef(Py_None); } else if (pytype == &PyFloat_Type) { - DType = (PyObject *)&PyArray_PyFloatDType; + DType = Py_NewRef((PyObject *)&PyArray_PyFloatDType); } else if (pytype == &PyLong_Type) { - DType = (PyObject *)&PyArray_PyLongDType; + DType = Py_NewRef((PyObject *)&PyArray_PyLongDType); } else { - DType = PyDict_GetItem(_global_pytype_to_type_dict, - (PyObject *)pytype); + int res = PyDict_GetItemRef(_global_pytype_to_type_dict, + (PyObject *)pytype, (PyObject **)&DType); - if (DType == NULL) { - /* the python type is not known */ + if (res <= 0) { + /* the python type is not known or an error was set */ return NULL; } } - Py_INCREF(DType); assert(DType == Py_None || PyObject_TypeCheck(DType, (PyTypeObject *)&PyArrayDTypeMeta_Type)); return (PyArray_DTypeMeta *)DType; } diff --git a/numpy/_core/src/multiarray/textreading/rows.c b/numpy/_core/src/multiarray/textreading/rows.c index 8fe13d0d3532..4ca1cc00e9f7 100644 --- a/numpy/_core/src/multiarray/textreading/rows.c +++ b/numpy/_core/src/multiarray/textreading/rows.c @@ -58,13 +58,18 @@ create_conv_funcs( PyObject *key, *value; Py_ssize_t pos = 0; + int error = 0; +#if Py_GIL_DISABLED + Py_BEGIN_CRITICAL_SECTION(converters); +#endif while (PyDict_Next(converters, &pos, &key, &value)) { Py_ssize_t column = PyNumber_AsSsize_t(key, PyExc_IndexError); if (column == -1 && PyErr_Occurred()) { PyErr_Format(PyExc_TypeError, "keys of the converters dictionary must be integers; " "got %.100R", key); - goto error; + error = 1; + break; } if (usecols != NULL) { /* @@ -92,7 +97,8 @@ create_conv_funcs( PyErr_Format(PyExc_ValueError, "converter specified for column %zd, which is invalid " "for the number of fields %zd.", column, num_fields); - goto error; + error = 1; + break; } if (column < 0) { column += num_fields; @@ -102,11 +108,20 @@ create_conv_funcs( PyErr_Format(PyExc_TypeError, "values of the converters dictionary must be callable, " "but the value associated with key %R is not", key); - goto error; + error = 1; + break; } Py_INCREF(value); conv_funcs[column] = value; } +#if Py_GIL_DISABLED + Py_END_CRITICAL_SECTION(); +#endif + + if (error) { + goto error; + } + return conv_funcs; error: From 10cca72cc1d48eb130db9475a7f475e418032e64 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 10 Aug 2024 10:08:43 -0600 Subject: [PATCH 963/980] MAINT: Bump pythoncapi-compat version. --- numpy/_core/src/common/pythoncapi-compat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/common/pythoncapi-compat b/numpy/_core/src/common/pythoncapi-compat index 01341acbbef0..ea1f7f6eac63 160000 --- a/numpy/_core/src/common/pythoncapi-compat +++ b/numpy/_core/src/common/pythoncapi-compat @@ -1 +1 @@ -Subproject commit 01341acbbef0ca85cf2fa31b63307ddf4d9a87fb +Subproject commit ea1f7f6eac63ff401937515638252402ff33dccb From 75d34312c830b7cb05612495e9ce0ed7f162927b Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 21 Jul 2024 12:34:24 -0600 Subject: [PATCH 964/980] REL: Prepare for the NumPy 2.1.0rc1 release [wheel build] - Update .mailmap - Create 2.1.0-changelog.rst - Update 2.1.0-notes.rst - Update pyproject.toml - Delete release fragments --- .mailmap | 119 ++-- doc/changelog/2.1.0-changelog.rst | 577 ++++++++++++++++++ .../upcoming_changes/12150.improvement.rst | 5 - .../upcoming_changes/26081.improvement.rst | 11 - doc/release/upcoming_changes/26103.c_api.rst | 15 - .../upcoming_changes/26268.expired.rst | 1 - doc/release/upcoming_changes/26285.change.rst | 13 - .../upcoming_changes/26285.performance.rst | 5 - .../upcoming_changes/26292.new_feature.rst | 1 - doc/release/upcoming_changes/26313.change.rst | 2 - .../upcoming_changes/26388.performance.rst | 3 - .../upcoming_changes/26452.deprecation.rst | 4 - .../upcoming_changes/26501.new_feature.rst | 2 - .../upcoming_changes/26579.new_function.rst | 6 - .../upcoming_changes/26580.new_feature.rst | 1 - .../upcoming_changes/26611.expired.rst | 2 - .../upcoming_changes/26611.new_feature.rst | 2 - .../upcoming_changes/26656.improvement.rst | 5 - .../upcoming_changes/26724.new_feature.rst | 7 - .../upcoming_changes/26750.improvement.rst | 12 - doc/release/upcoming_changes/26766.change.rst | 2 - doc/release/upcoming_changes/26842.c_api.rst | 5 - .../upcoming_changes/26846.improvement.rst | 6 - doc/release/upcoming_changes/26908.c_api.rst | 8 - .../upcoming_changes/26981.new_feature.rst | 9 - .../upcoming_changes/27076.deprecation.rst | 3 - doc/release/upcoming_changes/27091.change.rst | 24 - .../upcoming_changes/27147.performance.rst | 8 - doc/source/release/2.1.0-notes.rst | 301 ++++++++- pyproject.toml | 3 +- 30 files changed, 943 insertions(+), 219 deletions(-) create mode 100644 doc/changelog/2.1.0-changelog.rst delete mode 100644 doc/release/upcoming_changes/12150.improvement.rst delete mode 100644 doc/release/upcoming_changes/26081.improvement.rst delete mode 100644 doc/release/upcoming_changes/26103.c_api.rst delete mode 100644 doc/release/upcoming_changes/26268.expired.rst delete mode 100644 doc/release/upcoming_changes/26285.change.rst delete mode 100644 doc/release/upcoming_changes/26285.performance.rst delete mode 100644 doc/release/upcoming_changes/26292.new_feature.rst delete mode 100644 doc/release/upcoming_changes/26313.change.rst delete mode 100644 doc/release/upcoming_changes/26388.performance.rst delete mode 100644 doc/release/upcoming_changes/26452.deprecation.rst delete mode 100644 doc/release/upcoming_changes/26501.new_feature.rst delete mode 100644 doc/release/upcoming_changes/26579.new_function.rst delete mode 100644 doc/release/upcoming_changes/26580.new_feature.rst delete mode 100644 doc/release/upcoming_changes/26611.expired.rst delete mode 100644 doc/release/upcoming_changes/26611.new_feature.rst delete mode 100644 doc/release/upcoming_changes/26656.improvement.rst delete mode 100644 doc/release/upcoming_changes/26724.new_feature.rst delete mode 100644 doc/release/upcoming_changes/26750.improvement.rst delete mode 100644 doc/release/upcoming_changes/26766.change.rst delete mode 100644 doc/release/upcoming_changes/26842.c_api.rst delete mode 100644 doc/release/upcoming_changes/26846.improvement.rst delete mode 100644 doc/release/upcoming_changes/26908.c_api.rst delete mode 100644 doc/release/upcoming_changes/26981.new_feature.rst delete mode 100644 doc/release/upcoming_changes/27076.deprecation.rst delete mode 100644 doc/release/upcoming_changes/27091.change.rst delete mode 100644 doc/release/upcoming_changes/27147.performance.rst diff --git a/.mailmap b/.mailmap index 143ad1c4a9b2..b073f12c416b 100644 --- a/.mailmap +++ b/.mailmap @@ -7,53 +7,55 @@ # # This file is up-to-date if the command git log --format="%aN <%aE>" | sort -u # gives no duplicates. -@8bitmp3 <19637339+8bitmp3@users.noreply.github.com> -@Algorithmist-Girl <36552319+Algorithmist-Girl@users.noreply.github.com> -@DWesl <22566757+DWesl@users.noreply.github.com> -@Endolith -@GalaxySnail -@Illviljan <14371165+Illviljan@users.noreply.github.com> -@LSchroefl <65246829+LSchroefl@users.noreply.github.com> -@Lbogula -@Lisa <34400837+lyzlisa@users.noreply.github.com> -@Patrick <39380924+xamm@users.noreply.github.com> -@Scian <65375075+hoony6134@users.noreply.github.com> -@Searchingdays -@amagicmuffin <2014wcheng@gmail.com> -@code-review-doctor -@cook-1229 <70235336+cook-1229@users.noreply.github.com> -@dg3192 <113710955+dg3192@users.noreply.github.com> -@ellaella12 -@ellaella12 <120079323+ellaella12@users.noreply.github.com> -@h-vetinari -@h6197627 <44726212+h6197627@users.noreply.github.com> -@jbCodeHub -@juztamau5 -@legoffant <58195095+legoffant@users.noreply.github.com> -@liang3zy22 <35164941+liang3zy22@users.noreply.github.com> -@luzpaz -@luzpaz -@matoro -@mcp292 -@mgunyho <20118130+mgunyho@users.noreply.github.com> -@msavinash <73682349+msavinash@users.noreply.github.com> -@mykykh <49101849+mykykh@users.noreply.github.com> -@partev -@pkubaj -@pmvz -@pojaghi <36278217+pojaghi@users.noreply.github.com> -@pratiklp00 -@sfolje0 -@spacescientist -@stefan6419846 -@stefan6419846 <96178532+stefan6419846@users.noreply.github.com> -@tajbinjohn -@tautaus -@undermyumbrella1 -@xoviat <49173759+xoviat@users.noreply.github.com> -@xoviat <49173759+xoviat@users.noreply.github.com> -@yan-wyb -@yetanothercheer +!8bitmp3 <19637339+8bitmp3@users.noreply.github.com> +!Algorithmist-Girl <36552319+Algorithmist-Girl@users.noreply.github.com> +!DWesl <22566757+DWesl@users.noreply.github.com> +!Endolith +!GalaxySnail +!Illviljan <14371165+Illviljan@users.noreply.github.com> +!LSchroefl <65246829+LSchroefl@users.noreply.github.com> +!Lbogula +!Lisa <34400837+lyzlisa@users.noreply.github.com> +!Patrick <39380924+xamm@users.noreply.github.com> +!Scian <65375075+hoony6134@users.noreply.github.com> +!Searchingdays +!amagicmuffin <2014wcheng@gmail.com> +!code-review-doctor +!cook-1229 <70235336+cook-1229@users.noreply.github.com> +!dg3192 <113710955+dg3192@users.noreply.github.com> +!ellaella12 +!ellaella12 <120079323+ellaella12@users.noreply.github.com> +!h-vetinari +!h6197627 <44726212+h6197627@users.noreply.github.com> +!jbCodeHub +!juztamau5 +!legoffant <58195095+legoffant@users.noreply.github.com> +!liang3zy22 <35164941+liang3zy22@users.noreply.github.com> +!luzpaz +!luzpaz +!matoro +!mcp292 +!mgunyho <20118130+mgunyho@users.noreply.github.com> +!msavinash <73682349+msavinash@users.noreply.github.com> +!mykykh <49101849+mykykh@users.noreply.github.com> +!ogidig5 <82846833+ogidig5@users.noreply.github.com> +!partev +!pkubaj +!pmvz +!pojaghi <36278217+pojaghi@users.noreply.github.com> +!pratiklp00 +!sfolje0 +!spacescientist +!stefan6419846 +!stefan6419846 <96178532+stefan6419846@users.noreply.github.com> +!tajbinjohn +!tautaus +!undermyumbrella1 +!vahidmech +!xoviat <49173759+xoviat@users.noreply.github.com> +!xoviat <49173759+xoviat@users.noreply.github.com> +!yan-wyb +!yetanothercheer Aaron Baecker Adrin Jalali Arun Kota @@ -64,6 +66,7 @@ Aditi Saluja <136131452+salujaditi14@users.noreply.github.com> Andrei Batomunkuev Ajay DS Ajay DS +Ajay Kumar Janapareddi Alan Fontenot Alan Fontenot <36168460+logeaux@users.noreply.github.com> Abdul Muneer @@ -117,6 +120,7 @@ Andrea Sangalli <53617841+and-sang@users.noreply.github.c Andreas Klöckner Andreas Schwab Andrei Kucharavy +Andrej Zhilenkov Andrew Lawson Anirudh Subramanian Anne Archibald @@ -127,9 +131,11 @@ Antoine Pitrou Anton Prosekin Anže Starič Arfy Slowy +Arnaud Ma Aron Ahmadia Arun Kota Arun Kota +Arun Pa Arun Palaniappen Arun Persaud Ashutosh Singh @@ -171,6 +177,8 @@ Bui Duc Minh <41239569+Mibu287@users.noreply.github.co Caio Agiani Carl Kleffner Carl Leake +Carlos Henrique Hermanny Moreira da Silva +Carlos Henrique Hermanny Moreira da Silva <121122527+carlosilva10260@users.noreply.github.com> Cédric Hannotier Charles Stern <62192187+cisaacstern@users.noreply.github.com> Chiara Marmo @@ -304,6 +312,7 @@ Giannis Zapantis Guillaume Peillex Jack J. Woehr Jacob M. Casey +Jakob Stevens Haas <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Jaime Fernandez Jaime Fernandez Jaime Fernandez @@ -314,6 +323,8 @@ Jake VanderPlas Jakob Jakobson Jakob Jakobson <43045863+jakobjakobson13@users.noreply.github.com> James Bourbeau +James Joseph Thomas +James Joseph Thomas quotuva James Oliver <46758370+jamesoliverh@users.noreply.github.com> James Webber Jamie Macey @@ -356,8 +367,11 @@ Joseph Fox-Rabinovitz Joshua Himmens Joyce Brum +Joren Hammudoglu Jory Klaverstijn Jory Klaverstijn <63673224+JoryKlaverstijn@users.noreply.github.com> +Julia Poo +Julia Poo <57632293+JuliaPoo@users.noreply.github.com> Julian Taylor Julian Taylor Julian Taylor @@ -371,6 +385,8 @@ Kai Striega Kasia Leszek Kasia Leszek <39829548+katleszek@users.noreply.github.com> Karan Dhir +Karthik Gangula <56480632+gangula-karthik@users.noreply.github.com> +Karthik Kaiplody Keller Meier Kenny Huynh Kevin Granados @@ -395,6 +411,7 @@ Lars Grüter Leona Taric Leona Taric <92495067+LeonaTaric@users.noreply.github.com> Leonardus Chen +Liangyu Zhang Licht Takeuchi Lorenzo Mammana Lillian Zha @@ -472,6 +489,8 @@ Michel Fruchart Miki Watanabe (渡邉 美希) Miles Cranmer +Milica Dančuk +Milica Dančuk love-bees <33499899+love-bees@users.noreply.github.com> Mircea Akos Bruma Mircea Akos Bruma Mitchell Faas <35742861+Mitchell-Faas@users.noreply.github.com> @@ -483,6 +502,8 @@ Mukulika Pahari <60316606+Mukulikaa@users.noreply.git Munira Alduraibi Namami Shanker Namami Shanker NamamiShanker +Nathan Goldbaum +Nathan Goldbaum Nathaniel J. Smith Naveen Arunachalam naveenarun Neil Girdhar @@ -514,6 +535,8 @@ Pat Miller patmiller Paul Ivanov Paul Ivanov Paul Jacobson +Paul Juma Otieno +Paul Juma Otieno <103896399+otieno-juma@users.noreply.github.com> Paul Reece Paul YS Lee Paul Pey Lian Lim @@ -644,6 +667,8 @@ Vrinda Narayan Vrinda Narayan Vrinda Narayan <48102157+vrindaaa@users.noreply.github.com> Wansoo Kim +Warrick Ball +Warrick Ball Warren Weckesser Warren Weckesser Weitang Li diff --git a/doc/changelog/2.1.0-changelog.rst b/doc/changelog/2.1.0-changelog.rst new file mode 100644 index 000000000000..99f871a97718 --- /dev/null +++ b/doc/changelog/2.1.0-changelog.rst @@ -0,0 +1,577 @@ + +Contributors +============ + +A total of 110 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !ogidig5 + +* !partev +* !vahidmech + +* Aaron Meurer +* Adrin Jalali + +* Agriya Khetarpal +* Ajay Kumar Janapareddi + +* Alex Herbert + +* Andras Deak +* Andrej Zhilenkov + +* Andrew Nelson +* Anne Gunn + +* Antony Lee +* Arnaud Ma + +* Arun Kannawadi + +* Arun Pa + +* Bas van Beek +* Ben Woodruff + +* Bruno Oliveira + +* Carlos Henrique Hermanny Moreira da Silva + +* Charles Harris +* Chris Sidebottom +* Christian Heimes + +* Christian Lorentzen +* Christopher Sidebottom +* Christopher Titchen + +* Clément Robert +* Cobalt Yang + +* Devyani Chavan + +* Dimitri Papadopoulos Orfanos +* Ebigide Jude + +* Eric Xie + +* Evgeni Burovski +* Fabian Vogt + +* Francisco Sousa + +* GUAN MING + +* Gabriel Fougeron + +* Gagandeep Singh +* Giovanni Del Monte + +* Gonzalo Tornaría + +* Gonçalo Bárias + +* Hugo van Kemenade +* Jakob Stevens Haas + +* Jakob Unfried + +* James Joseph Thomas + +* Jean Lecordier + +* Joren Hammudoglu + +* Joris Van den Bossche +* Julia Poo + +* Justus Magin +* Jyn Spring 琴春 +* KIU Shueng Chuan +* Karthik Gangula + +* Karthik Kaiplody + +* Kevin Sheppard +* Kristoffer Pedersen + +* Leo Singer +* Liang Yan +* Liangyu Zhang + +* Lucas Colley +* Luiz Eduardo Amaral + +* Lysandros Nikolaou +* Marcel Loose + +* Marten van Kerkwijk +* Mateusz Sokół +* Matt Haberland +* Matt Thompson + +* Matthew Roeschke + +* Matthew Thompson + +* Matthias Bussonnier +* Matti Picus +* Melissa Weber Mendonça +* Milica Dančuk + +* Moritz Schreiber + +* Nathan Goldbaum +* Olivier Grisel +* Patrick J. Roddy + +* Paul Juma Otieno + +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Raquel Braunschweig + +* Robert Kern +* Rohit Goswami +* Romain Geissler + +* Ross Barnowski +* Rostan Tabet + +* Sam Morley + +* Sayed Adel +* Sean Cheah +* Sebastian Berg +* Serge Guelton +* Slobodan + +* Stefan van der Walt +* Thomas A Caswell +* Thomas Li +* Timo Röhling + +* Tsvika Shapira + +* Tuhin Sharma + +* Tyler Reddy +* Victor Eijkhout + +* Warren Weckesser +* Warrick Ball +* Xiangyi Wang + +* Yair Chuchem + +* Yang Liu + +* Yannik Wicke + +* Yevhen Amelin + +* Yuki K + +Pull requests merged +==================== + +A total of 455 pull requests were merged for this release. + +* `#12150 `__: ENH: When histogramming data with integer dtype, force bin width... +* `#24448 `__: TST: add some tests of np.log for complex input. +* `#25704 `__: DOC: quantile: correct/simplify documentation +* `#25705 `__: DOC: Add documentation explaining our promotion rules +* `#25781 `__: ENH: Convert fp32 sin/cos from C universal intrinsics to C++... +* `#25908 `__: ENH: Add center/ljust/rjust/zfill ufuncs for unicode and bytes +* `#25913 `__: NEP: NEP 55 updates and add @mhvk as an author +* `#25963 `__: BUG: Fix bug in numpy.pad() +* `#25964 `__: CI: fix last docbuild warnings +* `#25970 `__: MAINT: Prepare main for NumPy 2.1.0 development +* `#25971 `__: DOC: Fix a note section markup in ``dtype.rst`` +* `#25972 `__: DOC: Fix module setting of ``MaskedArray`` +* `#25974 `__: BUG: Raise error for negative-sized fixed-width dtype +* `#25975 `__: BUG: Fixes np.put receiving empty array causes endless loop +* `#25981 `__: BLD: push a tag builds a wheel +* `#25985 `__: BLD: omit pp39-macosx_arm64 from matrix +* `#25988 `__: DOC: Remove unused parameter description +* `#25990 `__: CI: clean up some unused `choco install` invocations +* `#25995 `__: CI: don't use ``fetch-tags`` in wheel build jobs +* `#25999 `__: BUG: fix kwarg handling in assert_warn [skip cirrus][skip azp] +* `#26000 `__: BUG: Filter out broken Highway platform +* `#26003 `__: MAINT: Bump pypa/cibuildwheel from 2.16.5 to 2.17.0 +* `#26005 `__: DOC: indicate stringdtype support in docstrings for string operations +* `#26006 `__: TST: remove usage of ProcessPoolExecutor in stringdtype tests +* `#26007 `__: MAINT: Remove sdist task from pavement.py +* `#26011 `__: DOC: mention the ``exceptions`` namespace in the 2.0.0 release... +* `#26012 `__: ENH: install StringDType promoter for add +* `#26014 `__: MAINT: remove the now-unused ``NPY_NO_SIGNAL`` +* `#26015 `__: MAINT: remove now-unused ``NPY_USE_C99_FORMAT`` +* `#26016 `__: MAINT: handle ``NPY_ALLOW_THREADS`` and related build option... +* `#26017 `__: MAINT: avoid use of flexible array member in public header +* `#26024 `__: BUG: raise error trying to coerce object arrays containing timedelta64('NaT')... +* `#26025 `__: BUG: fix reference count leak in __array__ internals +* `#26027 `__: BUG: add missing error handling in string to int cast internals +* `#26033 `__: MAINT: Remove partition and split-like functions from numpy.strings +* `#26045 `__: ENH: Optimize np.power for integer type +* `#26055 `__: ENH: Optimize np.power(x, 2) for double and float type +* `#26063 `__: MAINT,API: Const qualify some new API (mostly new DType API) +* `#26064 `__: MAINT: Make PyArrayMultiIterObject struct "smaller" +* `#26066 `__: BUG: Allow the new string dtype summation to work +* `#26067 `__: DOC: note stringdtype output support in np.strings docstrings +* `#26070 `__: DOC clarifications on debugging numpy +* `#26071 `__: BUG: fix logic error in stringdtype maximum/minimum ufunc +* `#26080 `__: BUG: adapt cython files to new complex declarations +* `#26081 `__: TYP: Make array _ShapeType bound and covariant +* `#26082 `__: ENH: Add partition/rpartition ufunc for string dtypes +* `#26083 `__: MAINT: Bump actions/cache from 4.0.1 to 4.0.2 +* `#26089 `__: TYP: Adjust typing for ``np.random.integers`` and ``np.random.randint`` +* `#26090 `__: API: Require reduce promoters to start with None to match +* `#26095 `__: MAINT: Bump actions/dependency-review-action from 4.1.3 to 4.2.3 +* `#26097 `__: DOC: Mention ``copy=True`` for ``__array__`` method in the migration... +* `#26099 `__: DOC: fix typo in doc/source/user/absolute_beginners.rst +* `#26103 `__: API: Default to hidden visibility for API tables +* `#26105 `__: MAINT: install all-string promoter for multiply +* `#26108 `__: MAINT: Remove unnecessarily defensive code from dlpack deleter +* `#26112 `__: TST: fix incorrect dtype in test +* `#26113 `__: BLD: Do not use -O3 flag when building in debug mode +* `#26116 `__: ENH: inherit numerical dtypes from abstract ones. +* `#26119 `__: BUG: fix reference counting error in stringdtype setup +* `#26123 `__: BUG: update pocketfft to unconditionaly disable use of aligned_alloc +* `#26125 `__: DOC: Bump pydata-sphinx-theme version +* `#26128 `__: DOC: Update absolute_beginners.rst +* `#26129 `__: MAINT: add missing noexcept clauses +* `#26130 `__: ENH: Optimize performance of np.atleast_1d +* `#26133 `__: MAINT: Bump actions/dependency-review-action from 4.2.3 to 4.2.4 +* `#26134 `__: CI, BLD: Push NumPy's Emscripten/Pyodide wheels nightly to Anaconda.org... +* `#26135 `__: BUG: masked array division should ignore all FPEs in mask calculation +* `#26136 `__: BUG: fixed datetime64[ns] conversion issue in numpy.vectorize,... +* `#26138 `__: MAINT: Bump actions/setup-python from 5.0.0 to 5.1.0 +* `#26139 `__: MAINT: Bump actions/dependency-review-action from 4.2.4 to 4.2.5 +* `#26142 `__: BUG,MAINT: Fix __array__ bugs and simplify code +* `#26147 `__: BUG: introduce PyArray_SafeCast to fix issues around stringdtype... +* `#26149 `__: MAINT: Escalate import warning to an import error +* `#26151 `__: BUG: Fix test_impossible_feature_enable failing without BASELINE_FEAT +* `#26155 `__: NEP: add NEP 56 mailing list resolution +* `#26160 `__: ENH: Improve performance of np.broadcast_arrays and np.broadcast_shapes +* `#26162 `__: BUG: Infinite Loop in numpy.base_repr +* `#26168 `__: DOC: mention np.lib.NumPyVersion in the 2.0 migration guide +* `#26172 `__: DOC, TST: make ``numpy.version`` officially public +* `#26174 `__: MAINT: Fix failure in routines.version.rst +* `#26182 `__: DOC: Update absolute_beginners.rst +* `#26185 `__: MAINT: Update Pyodide to 0.25.1 +* `#26187 `__: TST: Use platform.machine() for improved portability on riscv64 +* `#26189 `__: MNT: use pythoncapi_compat.h in npy_compat.h +* `#26190 `__: BUG: fix reference counting error in wrapping_method_resolve_descriptors +* `#26207 `__: TST: account for immortal objects in test_iter_refcount +* `#26210 `__: API: Readd ``np.bool_`` typing stub +* `#26212 `__: BENCH: Add benchmarks for np.power(x,2) and np.power(x,0.5) +* `#26213 `__: MNT: try updating pythoncapi-compat +* `#26215 `__: API: Enforce one copy for ``__array__`` when ``copy=True`` +* `#26219 `__: ENH: Enable RVV CPU feature detection +* `#26222 `__: MAINT: Drop Python 3.9 +* `#26227 `__: MAINT: utilize ufunc API const correctness internally +* `#26229 `__: TST: skip limited API test on nogil python build +* `#26232 `__: MAINT: fix typo in _add_newdoc_ufunc docstring +* `#26235 `__: Update numpy.any documentation example +* `#26237 `__: MAINT: Update ``array-api-tests`` job +* `#26239 `__: DOC: add versionadded for copy keyword in np.asarray docstring +* `#26241 `__: DOC: Fixup intp/uintp documentation for ssize_t/size_t changes +* `#26245 `__: DOC: Update ``__array__`` ``copy`` keyword docs +* `#26246 `__: MNT: migrate PyList_GetItem usages to PyList_GetItemRef +* `#26248 `__: MAINT,BUG: Robust string meson template substitution +* `#26251 `__: MNT: disable the allocator cache for nogil builds +* `#26258 `__: BLD: update to OpenBLAS 0.3.27 +* `#26260 `__: BUG: Ensure seed sequences are restored through pickling +* `#26261 `__: ENH: introduce a notion of "compatible" stringdtype instances +* `#26263 `__: MAINT: fix typo +* `#26264 `__: MAINT: fix typo in #include example +* `#26267 `__: MAINT: Update URL in nep 0014 - domain change +* `#26268 `__: API: Disallow 0D input arrays in ``nonzero`` +* `#26270 `__: BUG: ensure np.vectorize doesn't truncate fixed-width strings +* `#26273 `__: ENH: Bump Highway to HEAD and remove platform filter +* `#26274 `__: BLD: use install-tags to optionally install tests +* `#26280 `__: ENH: Speedup clip for floating point +* `#26281 `__: BUG: Workaround for Intel Compiler mask conversion bug +* `#26282 `__: MNT: replace _PyDict_GetItemStringWithError with PyDict_GetItemStringRef +* `#26284 `__: TST: run the smoke tests on more python versions +* `#26285 `__: ENH: Decrease wall time of ``ma.cov`` and ``ma.corrcoef`` +* `#26286 `__: BLD: ensure libnpymath and highway static libs use hidden visibility +* `#26292 `__: API: Add ``shape`` and ``copy`` arguments to ``numpy.reshape`` +* `#26294 `__: MNT: disable the coercion cache for the nogil build +* `#26295 `__: CI: add llvm/clang sanitizer tests +* `#26299 `__: MAINT: Pin sphinx to version 7.2.6 +* `#26302 `__: BLD: use newer openblas wheels [wheel build] +* `#26303 `__: DOC: add explanation of dtype to parameter values for np.append +* `#26304 `__: MAINT: address improper error handling and cleanup for ``spin`` +* `#26309 `__: MAINT: Bump actions/upload-artifact from 4.3.1 to 4.3.2 +* `#26311 `__: DOC: Follow-up fixes for new theme +* `#26313 `__: MAINT: Cleanup ``vecdot``'s signature, typing, and importing +* `#26317 `__: BUG: use PyArray_SafeCast in array_astype +* `#26319 `__: BUG: fix spin bench not running on Windows +* `#26320 `__: DOC: Add replacement NEP links in superseded, replaced-by fields +* `#26322 `__: DOC: Documentation and examples for conversion of np.timedelta64... +* `#26324 `__: BUG: Fix invalid constructor in string_fastsearch.h with C++... +* `#26325 `__: TST: Skip Cython test for editable install +* `#26329 `__: MAINT: Bump actions/upload-artifact from 4.3.2 to 4.3.3 +* `#26338 `__: MAINT: update x86-simd-sort to latest +* `#26340 `__: DOC: Added small clarification note, based on discussion in issue... +* `#26347 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.3 to 3.0.4 +* `#26348 `__: NOGIL: Make loop data cache and dispatch cache thread-safe in... +* `#26353 `__: BUG: ensure text padding ufuncs handle stringdtype nan-like nulls +* `#26354 `__: BUG: Fix rfft for even input length. +* `#26355 `__: ENH: add support for nan-like null strings in string replace +* `#26359 `__: MAINT: Simplify bugfix for even rfft +* `#26362 `__: MAINT: Bump actions/dependency-review-action from 4.2.5 to 4.3.1 +* `#26363 `__: MAINT: Bump actions/dependency-review-action from 4.3.1 to 4.3.2 +* `#26364 `__: TST: static types are now immortal in the default build too +* `#26368 `__: [NOGIL] thread local promotion state +* `#26369 `__: DOC: fix np.unique release notes [skip cirrus] +* `#26372 `__: BUG: Make sure that NumPy scalars are supported by can_cast +* `#26377 `__: TYP: Fix incorrect type hint for creating a recarray from fromrecords +* `#26378 `__: DOC: Update internal links for generator.rst and related +* `#26384 `__: BUG: Fix incorrect return type of item with length 0 from chararray.__getitem__ +* `#26385 `__: DOC: Updated remaining links in random folder +* `#26386 `__: DOC: Improve example on array broadcasting +* `#26388 `__: BUG: Use Python pickle protocol version 4 for np.save +* `#26391 `__: DOC: Add missing methods to numpy.strings docs +* `#26392 `__: BUG: support nan-like null strings in [l,r]strip +* `#26396 `__: MNT: more gracefully handle spin adding arguments to functions... +* `#26399 `__: DOC: Update INSTALL.rst +* `#26413 `__: DOC: Fix some typos and incorrect markups +* `#26415 `__: MAINT: updated instructions to get MachAr byte pattern +* `#26416 `__: MAINT: Bump ossf/scorecard-action from 2.3.1 to 2.3.3 +* `#26418 `__: DOC: add reference docs for NpyString C API +* `#26419 `__: MNT: clean up references to array_owned==2 case in StringDType +* `#26426 `__: TYP,TST: Bump mypy to 1.10.0 +* `#26428 `__: MAINT: Bump pypa/cibuildwheel from 2.17.0 to 2.18.0 +* `#26429 `__: TYP: npyio: loadtxt: usecols: add None type +* `#26431 `__: TST: skip test_frompyfunc_leaks in the free-threaded build +* `#26432 `__: MAINT: Add some PR prefixes to the labeler. +* `#26436 `__: BUG: fixes for three related stringdtype issues +* `#26441 `__: BUG: int32 and intc should both appear in sctypes +* `#26442 `__: DOC: Adding links to polynomial table. +* `#26443 `__: TST: temporarily pin spin to work around issue in 0.9 release +* `#26444 `__: DOC: Remove outdated authentication instructions +* `#26445 `__: TST: fix xfailed tests on pypy 7.3.16 +* `#26447 `__: TST: attempt to fix intel SDE SIMD CI +* `#26449 `__: MAINT: fix typo +* `#26452 `__: DEP: Deprecate 'fix_imports' flag in numpy.save +* `#26456 `__: ENH: improve the error raised by ``numpy.isdtype`` +* `#26463 `__: TST: add basic free-threaded CI testing +* `#26464 `__: BLD: update vendored-meson to current Meson master (1.4.99) +* `#26469 `__: MAINT: Bump github/codeql-action from 2.13.4 to 3.25.5 +* `#26471 `__: BLD: cp313 [wheel build] +* `#26474 `__: BLD: Make NumPy build reproducibly +* `#26476 `__: DOC: Skip API documentation for numpy.distutils with Python 3.12... +* `#26478 `__: DOC: Set default as ``-j 1`` for spin docs and move ``-W`` to SPHINXOPTS +* `#26480 `__: TYP: fix type annotation for ``newbyteorder`` +* `#26481 `__: Improve documentation of numpy.ma.filled +* `#26486 `__: MAINT: Bump github/codeql-action from 3.25.5 to 3.25.6 +* `#26487 `__: MAINT: Bump pypa/cibuildwheel from 2.18.0 to 2.18.1 +* `#26488 `__: DOC: add examples to get_printoptions +* `#26489 `__: DOC: add example to get_include +* `#26492 `__: DOC: fix rng.random example in numpy-for-matlab-users +* `#26501 `__: ENH: Implement DLPack version 1 +* `#26503 `__: TST: work around flaky test on free-threaded build +* `#26504 `__: DOC: Copy-edit numpy 2.0 migration guide. +* `#26505 `__: DOC: update the NumPy Roadmap +* `#26507 `__: MAINT: mark temp elision address cache as thread local +* `#26511 `__: MAINT: Bump mamba-org/setup-micromamba from 1.8.1 to 1.9.0 +* `#26512 `__: CI: enable free-threaded wheel builds [wheel build] +* `#26514 `__: MAINT: Avoid gcc compiler warning +* `#26515 `__: MAINT: Fix GCC -Wmaybe-uninitialized warning +* `#26517 `__: DOC: Add missing functions to the migration guide +* `#26519 `__: MAINT: Avoid by-pointer parameter passing for LINEARIZE_DATA_t... +* `#26520 `__: BUG: Fix handling of size=() in Generator.choice when a.ndim... +* `#26524 `__: BUG: fix incorrect error handling for dtype('a') deprecation +* `#26526 `__: BUG: fix assert in PyArry_ConcatenateArrays with StringDType +* `#26529 `__: BUG: ``PyDataMem_SetHandler`` check capsule name +* `#26531 `__: BUG: Fix entry-point of Texinfo docs +* `#26534 `__: BUG: cast missing in PyPy-specific f2py code, pin spin in CI +* `#26537 `__: BUG: Fix F77 ! comment handling +* `#26538 `__: DOC: Update ``gradient`` docstrings +* `#26546 `__: MAINT: Remove redundant print from bug report issue template +* `#26548 `__: BUG: Fix typo in array-wrap code that lead to memory leak +* `#26550 `__: BUG: Make Polynomial evaluation adhere to nep 50 +* `#26552 `__: BUG: Fix in1d fast-path range +* `#26558 `__: BUG: fancy indexing copy +* `#26559 `__: BUG: fix setxor1d when input arrays aren't 1D +* `#26562 `__: MAINT: Bump mamba-org/setup-micromamba from 1.8.1 to 1.9.0 +* `#26563 `__: BUG: Fix memory leaks found with valgrind +* `#26564 `__: CI, BLD: Upgrade to Pyodide 0.26.0 for Emscripten/Pyodide CI... +* `#26566 `__: DOC: update ufunc tutorials to use setuptools +* `#26567 `__: BUG: fix memory leaks found with valgrind (next) +* `#26568 `__: MAINT: Unpin pydata-sphinx-theme +* `#26571 `__: DOC: Added web docs for missing ma and strings routines +* `#26572 `__: ENH: Add array API inspection functions +* `#26579 `__: ENH: Add unstack() +* `#26580 `__: ENH: Add copy and device keyword to np.asanyarray to match np.asarray +* `#26582 `__: BUG: weighted nanpercentile, nanquantile and multi-dim q +* `#26585 `__: MAINT: Bump github/codeql-action from 3.25.6 to 3.25.7 +* `#26586 `__: BUG: Fix memory leaks found by valgrind +* `#26589 `__: BUG: catch invalid fixed-width dtype sizes +* `#26594 `__: DOC: Update constants.rst: fix URL redirect +* `#26597 `__: ENH: Better error message for axis=None in ``np.put_along_axis``... +* `#26599 `__: ENH: use size-zero dtype for broadcast-shapes +* `#26602 `__: TST: Re-enable int8/uint8 einsum tests +* `#26603 `__: BUG: Disallow string inputs for ``copy`` keyword in ``np.array``... +* `#26604 `__: refguide-check with pytest as a runner +* `#26605 `__: DOC: fix typos in numpy v2.0 documentation +* `#26606 `__: DOC: Update randn() to use rng.standard_normal() +* `#26607 `__: MNT: Reorganize non-constant global statics into structs +* `#26609 `__: DOC: Updated notes and examples for np.insert. +* `#26610 `__: BUG: np.take handle 64-bit indices on 32-bit platforms +* `#26611 `__: MNT: Remove ``set_string_function`` +* `#26614 `__: MAINT: Bump github/codeql-action from 3.25.7 to 3.25.8 +* `#26619 `__: TST: Re-enable ``test_shift_all_bits`` on clang-cl +* `#26626 `__: DOC: add ``getbufsize`` example +* `#26627 `__: DOC: add ``setbufsize`` example +* `#26628 `__: DOC: add ``matrix_transpose`` example +* `#26629 `__: DOC: add ``unique_all`` example +* `#26630 `__: DOC: add ``unique_counts`` example +* `#26631 `__: DOC: add ``unique_inverse`` example +* `#26632 `__: DOC: add ``unique_values`` example +* `#26633 `__: DOC: fix ``matrix_transpose`` doctest +* `#26634 `__: BUG: Replace dots with underscores in f2py meson backend for... +* `#26636 `__: MAINT: Bump actions/dependency-review-action from 4.3.2 to 4.3.3 +* `#26637 `__: BUG: fix incorrect randomized parameterization in bench_linalg +* `#26638 `__: MNT: use reproducible RNG sequences in benchmarks +* `#26639 `__: MNT: more benchmark cleanup +* `#26641 `__: DOC: Update 2.0 migration guide +* `#26644 `__: DOC: Added clean_dirs to spin docs to remove generated folders +* `#26645 `__: DOC: Enable web docs for numpy.trapezoid and add back links +* `#26646 `__: DOC: Update docstring for invert function +* `#26655 `__: CI: modified CI job to test editable install +* `#26658 `__: MAINT: Bump pypa/cibuildwheel from 2.18.1 to 2.19.0 +* `#26662 `__: DOC: add CI and NEP commit acronyms +* `#26664 `__: CI: build and upload free-threaded nightly wheels for macOS +* `#26667 `__: BUG: Adds asanyarray to start of linalg.cross +* `#26670 `__: MAINT: Bump github/codeql-action from 3.25.8 to 3.25.9 +* `#26672 `__: CI: upgrade FreeBSD Cirrus job from FreeBSD 13.2 to 14.0 +* `#26675 `__: CI: Use default llvm on Windows. +* `#26676 `__: MAINT: mark evil_global_disable_warn_O4O8_flag as thread-local +* `#26679 `__: DOC: add ``np.linalg`` examples +* `#26680 `__: remove doctesting from refguide-check, add ``spin check-tutorials`` +* `#26684 `__: MAINT: Bump pypa/cibuildwheel from 2.19.0 to 2.19.1 +* `#26685 `__: MAINT: Bump github/codeql-action from 3.25.9 to 3.25.10 +* `#26686 `__: MAINT: Add comment lost in previous PR. +* `#26691 `__: BUILD: check for scipy-doctest, remove it from requirements +* `#26692 `__: DOC: document workaround for deprecation of dim-2 inputs to ``cross`` +* `#26693 `__: BUG: allow replacement in the dispatch cache +* `#26702 `__: DOC: Added missing See Also sections in Polynomial module +* `#26703 `__: BUG: Handle ``--f77flags`` and ``--f90flags`` for ``meson`` +* `#26706 `__: TST: Skip an f2py module test on Windows +* `#26714 `__: MAINT: Update main after 2.0.0 release. +* `#26716 `__: DOC: Add clarifications np.argpartition +* `#26717 `__: DOC: Mention more error paths and try to consolidate import errors +* `#26721 `__: DOC, MAINT: Turn on version warning banner provided by PyData... +* `#26722 `__: DOC: Update roadmap a bit more +* `#26724 `__: ENH: Add Array API 2023.12 version support +* `#26737 `__: DOC: Extend release notes for #26611 +* `#26739 `__: DOC: Update NEPs statuses +* `#26741 `__: DOC: Remove mention of NaN and NAN aliases from constants +* `#26742 `__: DOC: Mention '1.25' legacy printing mode in ``set_printoptions`` +* `#26744 `__: BUG: Fix new DTypes and new string promotion when signature is... +* `#26750 `__: ENH: Add locking to umath_linalg if no lapack is detected at... +* `#26760 `__: TYP: fix incorrect import in ``ma/extras.pyi`` stub +* `#26762 `__: BUG: fix max_rows and chunked string/datetime reading in ``loadtxt`` +* `#26766 `__: ENH: Support integer dtype inputs in rounding functions +* `#26769 `__: BUG: Quantile closest_observation to round to nearest even order +* `#26770 `__: DOC, NEP: Update NEP44 +* `#26771 `__: BUG: fix PyArray_ImportNumPyAPI under -Werror=strict-prototypes +* `#26776 `__: BUG: remove numpy.f2py from excludedimports +* `#26780 `__: MAINT: use an atomic load/store and a mutex to initialize the... +* `#26788 `__: TYP: fix missing ``sys`` import in numeric.pyi +* `#26789 `__: BUG: avoid side-effect of 'include complex.h' +* `#26790 `__: DOC: Update link to Python stdlib random. +* `#26795 `__: BUG: add order to out array of ``numpy.fft`` +* `#26797 `__: BLD: Fix x86-simd-sort build failure on openBSD +* `#26799 `__: MNT: Update dlpack docs and typing stubs +* `#26802 `__: Missing meson pass-through argument +* `#26805 `__: DOC: Update 2.0 migration guide and release note +* `#26808 `__: DOC: Change selected hardlinks to NEPs to intersphinx mappings +* `#26811 `__: DOC: update notes on sign for complex numbers +* `#26812 `__: CI,TST: Fix meson tests needing gfortran [wheel build] +* `#26813 `__: TST: fix 'spin test single_test' for future versions of spin +* `#26814 `__: DOC: Add ``>>> import numpy as np`` stubs everywhere +* `#26815 `__: MAINT: Bump github/codeql-action from 3.25.10 to 3.25.11 +* `#26826 `__: DOC: remove hack to override _add_newdocs_scalars +* `#26827 `__: DOC: AI-Gen examples ctypeslib.as_ctypes_types +* `#26828 `__: DOC: AI generated examples for ma.left_shift. +* `#26829 `__: DOC: AI-Gen examples for ma.put +* `#26830 `__: DOC: AI generated examples for ma.reshape +* `#26831 `__: DOC: AI generated examples for ma.correlate. +* `#26833 `__: MAINT: Bump pypa/cibuildwheel from 2.19.1 to 2.19.2 +* `#26841 `__: BENCH: Missing ufunc in benchmarks +* `#26842 `__: BUILD: clean out py2 stuff from npy_3kcompat.h +* `#26846 `__: MAINT: back printoptions with a true context variable +* `#26847 `__: TYP: fix ``ufunc`` method type annotations +* `#26848 `__: TYP: include the ``|`` prefix for ``dtype`` char codes +* `#26849 `__: BUG: Mismatched allocation domains in ``PyArray_FillWithScalar`` +* `#26858 `__: TYP: Annotate type aliases as ``typing.TypeAlias`` +* `#26866 `__: MAINT: Bump actions/upload-artifact from 4.3.3 to 4.3.4 +* `#26867 `__: TYP,BUG: fix ``numpy.__dir__`` annotations +* `#26871 `__: TYP: adopt ``typing.LiteralString`` and use more of ``typing.Literal`` +* `#26872 `__: TYP: use ``types.CapsuleType`` on python>=3.13 +* `#26873 `__: TYP: improved ``numpy._array_api_info`` typing +* `#26875 `__: TYP,BUG: Replace ``numpy._typing._UnknownType`` with ``typing.Never`` +* `#26877 `__: BUG: start applying ruff/flake8-implicit-str-concat rules (ISC) +* `#26879 `__: MAINT: start applying ruff/flake8-simplify rules (SIM) +* `#26880 `__: DOC: Fix small incorrect markup +* `#26881 `__: DOC, MAINT: fix typos found by codespell +* `#26882 `__: MAINT: start applying ruff/pyupgrade rules (UP) +* `#26883 `__: BUG: Make issctype always return bool. +* `#26884 `__: MAINT: Remove a redundant import from the generated __ufunc_api.h. +* `#26889 `__: API: Add ``device`` and ``to_device`` to scalars +* `#26891 `__: DOC: Add a note that one should free the proto struct +* `#26892 `__: ENH: Allow use of clip with Python integers to always succeed +* `#26894 `__: MAINT: Bump actions/setup-node from 4.0.2 to 4.0.3 +* `#26895 `__: DOC: Change documentation copyright strings to use a dynamic... +* `#26896 `__: DOC: Change NEP hardlinks to intersphinx mappings. +* `#26897 `__: TYP: type hint ``numpy.polynomial`` +* `#26901 `__: BUG: ``np.loadtxt`` return F_CONTIGUOUS ndarray if row size is... +* `#26902 `__: Apply some ruff/flake8-bugbear rules (B004 and B005) +* `#26903 `__: BUG: Fix off-by-one error in amount of characters in strip +* `#26904 `__: BUG,ENH: Fix generic scalar infinite recursion issues +* `#26905 `__: API: Do not consider subclasses for NEP 50 weak promotion +* `#26906 `__: MAINT: Bump actions/setup-python from 5.1.0 to 5.1.1 +* `#26908 `__: ENH: Provide a hook for gufuncs to process core dimensions. +* `#26913 `__: MAINT: declare that NumPy's C extensions support running without... +* `#26914 `__: API: Partially revert unique with return_inverse +* `#26919 `__: BUG,MAINT: Fix utf-8 character stripping memory access +* `#26923 `__: MAINT: Bump actions/dependency-review-action from 4.3.3 to 4.3.4 +* `#26924 `__: MAINT: Bump github/codeql-action from 3.25.11 to 3.25.12 +* `#26927 `__: TYP: Transparent ``__array__`` shape-type +* `#26928 `__: TYP: Covariant ``numpy.flatiter`` type parameter +* `#26929 `__: TYP: Positional-only dunder binop method parameters +* `#26930 `__: BUG: Fix out-of-bound minimum offset for in1d table method +* `#26931 `__: DOC, BUG: Fix running full test command in docstring +* `#26934 `__: MAINT: add PyArray_ZeroContiguousBuffer helper and use it in... +* `#26935 `__: BUG: fix ``f2py`` tests to work with v2 API +* `#26937 `__: TYP,BUG: Remove ``numpy.cast`` and ``numpy.disp`` from the typing... +* `#26938 `__: TYP,BUG: Fix ``dtype`` type alias specialization issue in ``__init__.pyi`` +* `#26942 `__: TYP: Improved ``numpy.generic`` rich comparison operator type... +* `#26943 `__: TYP,BUG: Remove non-existant ``numpy.__git_version__`` in the... +* `#26946 `__: TYP: Add missing typecodes in ``numpy._core.numerictypes.typecodes`` +* `#26950 `__: MAINT: add freethreading_compatible directive to cython build +* `#26953 `__: TYP: Replace ``typing.Union`` with ``|`` in ``numpy._typing`` +* `#26954 `__: TYP: Replace ``typing.Optional[T]`` with ``T | None`` in the... +* `#26964 `__: DOC: Issue template for static typing +* `#26968 `__: MAINT: add a 'tests' install tag to the `numpy._core._simd` extension... +* `#26969 `__: BUG: Fix unicode strip +* `#26972 `__: BUG: Off by one in memory overlap check +* `#26975 `__: TYP: Use ``Final`` and ``LiteralString`` for the constants in... +* `#26980 `__: DOC: add sphinx-copybutton +* `#26981 `__: ENH: add support in f2py to declare gil-disabled support +* `#26983 `__: TYP,BUG: Type annotations for ``numpy.trapezoid`` +* `#26984 `__: TYP,BUG: Fix potentially unresolved typevar in ``median`` and... +* `#26985 `__: BUG: Add object cast to avoid warning with limited API +* `#26989 `__: DOC: fix ctypes example +* `#26991 `__: MAINT: mark scipy-openblas nightly tests as allowed to fail +* `#26992 `__: TYP: Covariant ``numpy.ndenumerate`` type parameter +* `#26993 `__: TYP,BUG: FIx ``numpy.ndenumerate`` annotations for ``object_``... +* `#26996 `__: ENH: Add ``__slots__`` to private (sub-)classes in ``numpy.lib._index_tricks_impl`` +* `#27002 `__: MAINT: Update main after 2.0.1 release. +* `#27008 `__: TYP,BUG: Complete type stubs for ``numpy.dtypes`` +* `#27009 `__: TST, MAINT: Loosen required test precision +* `#27010 `__: DOC: update tutorials link +* `#27011 `__: MAINT: replace PyThread_type_lock with PyMutex on Python >= 3.13.0b3 +* `#27013 `__: BUG: cfuncs.py: fix crash when sys.stderr is not available +* `#27014 `__: BUG: fix gcd inf +* `#27015 `__: DOC: Fix migration note for ``alltrue`` and ``sometrue`` +* `#27017 `__: DOC: Release note for feature added in gh-26908. +* `#27019 `__: TYP: improved ``numpy.array`` type hints for array-like input +* `#27025 `__: DOC: Replace np.matrix in .view() docstring example. +* `#27026 `__: DOC: fix tiny typo +* `#27027 `__: BUG: Fix simd loadable stride logic +* `#27031 `__: DOC: document 'floatmode' and 'legacy' keys from np.get_printoptions'... +* `#27034 `__: BUG: random: Fix edge case of Johnk's algorithm for the beta... +* `#27041 `__: MAINT: Bump github/codeql-action from 3.25.12 to 3.25.14 +* `#27043 `__: CI: unify free-threaded wheel builds with other builds +* `#27046 `__: BUG: random: prevent zipf from hanging when parameter is large. +* `#27047 `__: BUG: use proper input and output descriptor in array_assign_subscript... +* `#27048 `__: BUG: random: Fix long delays/hangs with zipf(a) when a near 1. +* `#27050 `__: BUG: Mirror VQSORT_ENABLED logic in Quicksort +* `#27051 `__: TST: Refactor to consistently use CompilerChecker +* `#27052 `__: TST: fix issues with tests that use numpy.testing.extbuild +* `#27055 `__: MAINT: Bump ossf/scorecard-action from 2.3.3 to 2.4.0 +* `#27056 `__: MAINT: Bump github/codeql-action from 3.25.14 to 3.25.15 +* `#27057 `__: BUG: fix another cast setup in array_assign_subscript +* `#27058 `__: DOC: Add some missing examples for ``np.strings`` methods +* `#27059 `__: ENH: Disable name suggestions on some AttributeErrors +* `#27060 `__: MAINT: linalg: Simplify some linalg gufuncs. +* `#27070 `__: BUG: Bump Highway to latest master +* `#27076 `__: DEP: lib: Deprecate acceptance of float (and more) in bincount. +* `#27079 `__: MAINT: 3.9/10 cleanups +* `#27081 `__: CI: Upgrade ``array-api-tests`` +* `#27085 `__: ENH: fixes for warnings on free-threaded wheel builds +* `#27087 `__: ENH: mark the dragon4 scratch space as thread-local +* `#27090 `__: DOC: update np.shares_memory() docs +* `#27091 `__: API,BUG: Fix copyto (and ufunc) handling of scalar cast safety +* `#27094 `__: DOC: Add release note about deprecation introduced in gh-27076. +* `#27095 `__: DOC: Fix indentation of a few release notes. +* `#27096 `__: BUG: Complex printing tests fail on Windows ARM64 +* `#27097 `__: MAINT: Bump actions/upload-artifact from 4.3.4 to 4.3.5 +* `#27098 `__: BUG: add missing error handling in public_dtype_api.c +* `#27102 `__: DOC: Fixup promotion doc +* `#27104 `__: BUG: Fix building NumPy in FIPS mode +* `#27108 `__: DOC: remove incorrect docstring comment +* `#27110 `__: BLD: cp313 cp313t linux_aarch64 [wheel build] +* `#27112 `__: BUG: Fix repr for integer scalar subclasses +* `#27113 `__: DEV: make linter.py runnable from outside the root of the repo +* `#27114 `__: MAINT: Bump pypa/cibuildwheel from 2.19.2 to 2.20.0 +* `#27115 `__: BUG: Use the new ``npyv_loadable_stride_`` functions for ldexp and... +* `#27117 `__: BUG: Ensure that scalar binops prioritize __array_ufunc__ +* `#27118 `__: BLD: update vendored Meson for cross-compilation patches +* `#27123 `__: BUG: Bump Highway to latest +* `#27124 `__: MAINT: Bump github/codeql-action from 3.25.15 to 3.26.0 +* `#27125 `__: MAINT: Bump actions/upload-artifact from 4.3.5 to 4.3.6 +* `#27127 `__: BUG: Fix missing error return in copyto +* `#27144 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27149 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27162 `__: BLD: use smaller scipy-openblas builds +* `#27166 `__: ENH: fix thread-unsafe C API usages +* `#27173 `__: MAINT: Bump pythoncapi-compat version. diff --git a/doc/release/upcoming_changes/12150.improvement.rst b/doc/release/upcoming_changes/12150.improvement.rst deleted file mode 100644 index f73a6d2aaa28..000000000000 --- a/doc/release/upcoming_changes/12150.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -``histogram`` auto-binning now returns bin sizes >=1 for integer input data ---------------------------------------------------------------------------- -For integer input data, bin sizes smaller than 1 result in spurious empty -bins. This is now avoided when the number of bins is computed using one of the -algorithms provided by `histogram_bin_edges`. diff --git a/doc/release/upcoming_changes/26081.improvement.rst b/doc/release/upcoming_changes/26081.improvement.rst deleted file mode 100644 index bac5c197caa0..000000000000 --- a/doc/release/upcoming_changes/26081.improvement.rst +++ /dev/null @@ -1,11 +0,0 @@ -``ndarray`` shape-type parameter is now covariant and bound to ``tuple[int, ...]`` ----------------------------------------------------------------------------------- -Static typing for ``ndarray`` is a long-term effort that continues -with this change. It is a generic type with type parameters for -the shape and the data type. Previously, the shape type parameter could be -any value. This change restricts it to a tuple of ints, as one would expect -from using ``ndarray.shape``. Further, the shape-type parameter has been -changed from invariant to covariant. This change also applies to the subtypes -of ``ndarray``, e.g. ``numpy.ma.MaskedArray``. See the -`typing docs `_ -for more information. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26103.c_api.rst b/doc/release/upcoming_changes/26103.c_api.rst deleted file mode 100644 index 9d0d998e2dfc..000000000000 --- a/doc/release/upcoming_changes/26103.c_api.rst +++ /dev/null @@ -1,15 +0,0 @@ -API symbols now hidden but customizable ---------------------------------------- -NumPy now defaults to hide the API symbols it adds to allow all NumPy API -usage. -This means that by default you cannot dynamically fetch the NumPy API from -another library (this was never possible on windows). - -If you are experiencing linking errors related to ``PyArray_API`` or -``PyArray_RUNTIME_VERSION``, you can define the -:c:macro:`NPY_API_SYMBOL_ATTRIBUTE` to opt-out of this change. - -If you are experiencing problems due to an upstream header including NumPy, -the solution is to make sure you ``#include "numpy/ndarrayobject.h"`` before -their header and import NumPy yourself based on :ref:`including-the-c-api`. - diff --git a/doc/release/upcoming_changes/26268.expired.rst b/doc/release/upcoming_changes/26268.expired.rst deleted file mode 100644 index 932fdbfae6d7..000000000000 --- a/doc/release/upcoming_changes/26268.expired.rst +++ /dev/null @@ -1 +0,0 @@ -* Scalars and 0D arrays are disallowed for `numpy.nonzero` and `numpy.ndarray.nonzero`. diff --git a/doc/release/upcoming_changes/26285.change.rst b/doc/release/upcoming_changes/26285.change.rst deleted file mode 100644 index d652c58dc799..000000000000 --- a/doc/release/upcoming_changes/26285.change.rst +++ /dev/null @@ -1,13 +0,0 @@ -``ma.corrcoef`` may return a slightly different result ------------------------------------------------------- -A pairwise observation approach is currently used in `ma.corrcoef` to -calculate the standard deviations for each pair of variables. This has been -changed as it is being used to normalise the covariance, estimated using -`ma.cov`, which does not consider the observations for each variable in a -pairwise manner, rendering it unnecessary. The normalisation has been -replaced by the more appropriate standard deviation for each variable, -which significantly reduces the wall time, but will return slightly different -estimates of the correlation coefficients in cases where the observations -between a pair of variables are not aligned. However, it will return the same -estimates in all other cases, including returning the same correlation matrix -as `corrcoef` when using a masked array with no masked values. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26285.performance.rst b/doc/release/upcoming_changes/26285.performance.rst deleted file mode 100644 index 79009f662a0f..000000000000 --- a/doc/release/upcoming_changes/26285.performance.rst +++ /dev/null @@ -1,5 +0,0 @@ -``ma.cov`` and ``ma.corrcoef`` are now significantly faster ------------------------------------------------------------ -The private function has been refactored along with `ma.cov` and -`ma.corrcoef`. They are now significantly faster, particularly on large, -masked arrays. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26292.new_feature.rst b/doc/release/upcoming_changes/26292.new_feature.rst deleted file mode 100644 index fc2c33571d77..000000000000 --- a/doc/release/upcoming_changes/26292.new_feature.rst +++ /dev/null @@ -1 +0,0 @@ -* `numpy.reshape` and `numpy.ndarray.reshape` now support ``shape`` and ``copy`` arguments. diff --git a/doc/release/upcoming_changes/26313.change.rst b/doc/release/upcoming_changes/26313.change.rst deleted file mode 100644 index 99c8b1d879f9..000000000000 --- a/doc/release/upcoming_changes/26313.change.rst +++ /dev/null @@ -1,2 +0,0 @@ -* As `numpy.vecdot` is now a ufunc it has a less precise signature. - This is due to the limitations of ufunc's typing stub. diff --git a/doc/release/upcoming_changes/26388.performance.rst b/doc/release/upcoming_changes/26388.performance.rst deleted file mode 100644 index 2e99f9452c1e..000000000000 --- a/doc/release/upcoming_changes/26388.performance.rst +++ /dev/null @@ -1,3 +0,0 @@ -* `numpy.save` now uses pickle protocol version 4 for saving arrays with - object dtype, which allows for pickle objects larger than 4GB and improves - saving speed by about 5% for large arrays. diff --git a/doc/release/upcoming_changes/26452.deprecation.rst b/doc/release/upcoming_changes/26452.deprecation.rst deleted file mode 100644 index cc4a10bfafee..000000000000 --- a/doc/release/upcoming_changes/26452.deprecation.rst +++ /dev/null @@ -1,4 +0,0 @@ -* The `fix_imports` keyword argument in `numpy.save` is deprecated. Since - NumPy 1.17, `numpy.save` uses a pickle protocol that no longer supports - Python 2, and ignored `fix_imports` keyword. This keyword is kept only - for backward compatibility. It is now deprecated. diff --git a/doc/release/upcoming_changes/26501.new_feature.rst b/doc/release/upcoming_changes/26501.new_feature.rst deleted file mode 100644 index c7465925295c..000000000000 --- a/doc/release/upcoming_changes/26501.new_feature.rst +++ /dev/null @@ -1,2 +0,0 @@ -* NumPy now supports DLPack v1, support for older versions will - be deprecated in the future. diff --git a/doc/release/upcoming_changes/26579.new_function.rst b/doc/release/upcoming_changes/26579.new_function.rst deleted file mode 100644 index 168d12189323..000000000000 --- a/doc/release/upcoming_changes/26579.new_function.rst +++ /dev/null @@ -1,6 +0,0 @@ -New function `numpy.unstack` ----------------------------- - -A new function ``np.unstack(array, axis=...)`` was added, which splits -an array into a tuple of arrays along an axis. It serves as the inverse -of `numpy.stack`. diff --git a/doc/release/upcoming_changes/26580.new_feature.rst b/doc/release/upcoming_changes/26580.new_feature.rst deleted file mode 100644 index c625e9b9d8a2..000000000000 --- a/doc/release/upcoming_changes/26580.new_feature.rst +++ /dev/null @@ -1 +0,0 @@ -* `numpy.asanyarray` now supports ``copy`` and ``device`` arguments, matching `numpy.asarray`. diff --git a/doc/release/upcoming_changes/26611.expired.rst b/doc/release/upcoming_changes/26611.expired.rst deleted file mode 100644 index 1df220d2b2a7..000000000000 --- a/doc/release/upcoming_changes/26611.expired.rst +++ /dev/null @@ -1,2 +0,0 @@ -* ``set_string_function`` internal function was removed and ``PyArray_SetStringFunction`` - was stubbed out. diff --git a/doc/release/upcoming_changes/26611.new_feature.rst b/doc/release/upcoming_changes/26611.new_feature.rst deleted file mode 100644 index 6178049cf4ed..000000000000 --- a/doc/release/upcoming_changes/26611.new_feature.rst +++ /dev/null @@ -1,2 +0,0 @@ -* `numpy.printoptions`, `numpy.get_printoptions`, and `numpy.set_printoptions` now support - a new option, ``override_repr``, for defining custom ``repr(array)`` behavior. diff --git a/doc/release/upcoming_changes/26656.improvement.rst b/doc/release/upcoming_changes/26656.improvement.rst deleted file mode 100644 index 66d7508d2738..000000000000 --- a/doc/release/upcoming_changes/26656.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -`np.quantile` with method ``closest_observation`` chooses nearest even order statistic --------------------------------------------------------------------------------------- -This changes the definition of nearest for border cases from the nearest odd -order statistic to nearest even order statistic. The numpy implementation now -matches other reference implementations. diff --git a/doc/release/upcoming_changes/26724.new_feature.rst b/doc/release/upcoming_changes/26724.new_feature.rst deleted file mode 100644 index 3c6a830728a4..000000000000 --- a/doc/release/upcoming_changes/26724.new_feature.rst +++ /dev/null @@ -1,7 +0,0 @@ -* `numpy.cumulative_sum` and `numpy.cumulative_prod` were added as Array API - compatible alternatives for `numpy.cumsum` and `numpy.cumprod`. The new functions - can include a fixed initial (zeros for ``sum`` and ones for ``prod``) in the result. -* `numpy.clip` now supports ``max`` and ``min`` keyword arguments which are meant - to replace ``a_min`` and ``a_max``. Also, for ``np.clip(a)`` or ``np.clip(a, None, None)`` - a copy of the input array will be returned instead of raising an error. -* `numpy.astype` now supports ``device`` argument. diff --git a/doc/release/upcoming_changes/26750.improvement.rst b/doc/release/upcoming_changes/26750.improvement.rst deleted file mode 100644 index 858061dbe48a..000000000000 --- a/doc/release/upcoming_changes/26750.improvement.rst +++ /dev/null @@ -1,12 +0,0 @@ -`lapack_lite` is now thread safe --------------------------------- - -NumPy provides a minimal low-performance version of LAPACK named ``lapack_lite`` -that can be used if no BLAS/LAPACK system is detected at build time. - -Until now, ``lapack_lite`` was not thread safe. Single-threaded use cases did -not hit any issues, but running linear algebra operations in multiple threads -could lead to errors, incorrect results, or seg faults due to data races. - -We have added a global lock, serializing access to ``lapack_lite`` in multiple -threads. diff --git a/doc/release/upcoming_changes/26766.change.rst b/doc/release/upcoming_changes/26766.change.rst deleted file mode 100644 index 923dbe816dd1..000000000000 --- a/doc/release/upcoming_changes/26766.change.rst +++ /dev/null @@ -1,2 +0,0 @@ -* `numpy.floor`, `numpy.ceil`, and `numpy.trunc` now won't perform casting - to a floating dtype for integer and boolean dtype input arrays. diff --git a/doc/release/upcoming_changes/26842.c_api.rst b/doc/release/upcoming_changes/26842.c_api.rst deleted file mode 100644 index 7e50dd385006..000000000000 --- a/doc/release/upcoming_changes/26842.c_api.rst +++ /dev/null @@ -1,5 +0,0 @@ -Many shims removed from npy_3kcompat.h --------------------------------------- -Many of the old shims and helper functions were removed from -``npy_3kcompat.h``. If you find yourself in need of these, vendor the previous -version of the file into your codebase. diff --git a/doc/release/upcoming_changes/26846.improvement.rst b/doc/release/upcoming_changes/26846.improvement.rst deleted file mode 100644 index ae9b72d195bf..000000000000 --- a/doc/release/upcoming_changes/26846.improvement.rst +++ /dev/null @@ -1,6 +0,0 @@ -The `numpy.printoptions` context manager is now thread and async-safe ---------------------------------------------------------------------- - -In prior versions of NumPy, the printoptions were defined using a combination -of Python and C global variables. We have refactored so the state is stored in -a python ``ContextVar``, making the context manager thread and async-safe. diff --git a/doc/release/upcoming_changes/26908.c_api.rst b/doc/release/upcoming_changes/26908.c_api.rst deleted file mode 100644 index d6e43591819d..000000000000 --- a/doc/release/upcoming_changes/26908.c_api.rst +++ /dev/null @@ -1,8 +0,0 @@ -New ``PyUFuncObject`` field ``process_core_dims_func`` ------------------------------------------------------- -The field ``process_core_dims_func`` was added to the structure -``PyUFuncObject``. For generalized ufuncs, this field can be set to a -function of type ``PyUFunc_ProcessCoreDimsFunc`` that will be called when the -ufunc is called. It allows the ufunc author to check that core dimensions -satisfy additional constraints, and to set output core dimension sizes if they -have not been provided. diff --git a/doc/release/upcoming_changes/26981.new_feature.rst b/doc/release/upcoming_changes/26981.new_feature.rst deleted file mode 100644 index f466faeb7590..000000000000 --- a/doc/release/upcoming_changes/26981.new_feature.rst +++ /dev/null @@ -1,9 +0,0 @@ -``f2py`` can generate freethreading-compatible C extensions ------------------------------------------------------------ - -Pass ``--freethreading-compatible`` to the f2py CLI tool to produce a C -extension marked as compatible with the free threading CPython -interpreter. Doing so prevents the interpreter from re-enabling the GIL at -runtime when it imports the C extension. Note that ``f2py`` does not analyze -fortran code for thread safety, so you must verify that the wrapped fortran -code is thread safe before marking the extension as compatible. diff --git a/doc/release/upcoming_changes/27076.deprecation.rst b/doc/release/upcoming_changes/27076.deprecation.rst deleted file mode 100644 index f692b814c17d..000000000000 --- a/doc/release/upcoming_changes/27076.deprecation.rst +++ /dev/null @@ -1,3 +0,0 @@ -* Passing non-integer inputs as the first argument of `bincount` is now - deprecated, because such inputs are silently cast to integers with no - warning about loss of precision. diff --git a/doc/release/upcoming_changes/27091.change.rst b/doc/release/upcoming_changes/27091.change.rst deleted file mode 100644 index 5b71692efabd..000000000000 --- a/doc/release/upcoming_changes/27091.change.rst +++ /dev/null @@ -1,24 +0,0 @@ -Cast-safety fixes in ``copyto`` and ``full`` --------------------------------------------- -``copyto`` now uses NEP 50 correctly and applies this to its cast safety. -Python integer to NumPy integer casts and Python float to NumPy float casts -are now considered "safe" even if assignment may fail or precision may be lost. -This means the following examples change slightly: - -* ``np.copyto(int8_arr, 1000)`` previously performed an unsafe/same-kind cast - of the Python integer. It will now always raise, to achieve an unsafe cast - you must pass an array or NumPy scalar. -* ``np.copyto(uint8_arr, 1000, casting="safe")`` will raise an OverflowError - rather than a TypeError due to same-kind casting. -* ``np.copyto(float32_arr, 1e300, casting="safe")`` will overflow to ``inf`` - (float32 cannot hold ``1e300``) rather raising a TypeError. - -Further, only the dtype is used when assigning NumPy scalars (or 0-d arrays), -meaning that the following behaves differently: - -* ``np.copyto(float32_arr, np.float64(3.0), casting="safe")`` raises. -* ``np.coptyo(int8_arr, np.int64(100), casting="safe")`` raises. - Previously, NumPy checked whether the 100 fits the ``int8_arr``. - -This aligns ``copyto``, ``full``, and ``full_like`` with the correct NumPy 2 -behavior. \ No newline at end of file diff --git a/doc/release/upcoming_changes/27147.performance.rst b/doc/release/upcoming_changes/27147.performance.rst deleted file mode 100644 index 2cea7780f41c..000000000000 --- a/doc/release/upcoming_changes/27147.performance.rst +++ /dev/null @@ -1,8 +0,0 @@ -* OpenBLAS on x86_64 and i686 is built with fewer kernels. Based on - benchmarking, there are 5 clusters of performance around these kernels: - ``PRESCOTT NEHALEM SANDYBRIDGE HASWELL SKYLAKEX``. - -* OpenBLAS on windows is linked without quadmath, simplfying licensing - -* Due to a regression in OpenBLAS on windows, the performance improvements - when using multiple threads for OpenBLAS 0.3.26 were reverted. diff --git a/doc/source/release/2.1.0-notes.rst b/doc/source/release/2.1.0-notes.rst index d0b0b6f1b785..295115d57343 100644 --- a/doc/source/release/2.1.0-notes.rst +++ b/doc/source/release/2.1.0-notes.rst @@ -1,19 +1,302 @@ .. currentmodule:: numpy -========================== +========================= NumPy 2.1.0 Release Notes -========================== +========================= +NumPy 2.1.0 provides support for the upcoming Python 3.13 release and drops +support for Python 3.9. In addition to the usual bug fixes and updated Python +support, it helps get us back into our usual release cycle after the extended +development of 2.0. The highlights for this release are: -Highlights -========== +- Support for the array-api 2023.12 standard. +- Support for Python 3.13. +- Preliminary support for free threaded Python 3.13. -*We'll choose highlights for this release near the end of the release cycle.* +Python versions 3.10-3.13 are supported in this release. -.. if release snippets have been incorporated already, uncomment the follow - line (leave the `.. include:: directive) +New functions +============= -.. **Content from release note snippets in doc/release/upcoming_changes:** +New function ``numpy.unstack`` +------------------------------ + +A new function ``np.unstack(array, axis=...)`` was added, which splits +an array into a tuple of arrays along an axis. It serves as the inverse +of `numpy.stack`. + +(`gh-26579 `__) + + +Deprecations +============ + +* The ``fix_imports`` keyword argument in ``numpy.save`` is deprecated. Since + NumPy 1.17, ``numpy.save`` uses a pickle protocol that no longer supports + Python 2, and ignored ``fix_imports`` keyword. This keyword is kept only + for backward compatibility. It is now deprecated. + + (`gh-26452 `__) + +* Passing non-integer inputs as the first argument of `bincount` is now + deprecated, because such inputs are silently cast to integers with no + warning about loss of precision. + + (`gh-27076 `__) + + +Expired deprecations +==================== + +* Scalars and 0D arrays are disallowed for ``numpy.nonzero`` and ``numpy.ndarray.nonzero``. + + (`gh-26268 `__) + +* ``set_string_function`` internal function was removed and ``PyArray_SetStringFunction`` + was stubbed out. + + (`gh-26611 `__) + + +C API changes +============= + +API symbols now hidden but customizable +--------------------------------------- +NumPy now defaults to hide the API symbols it adds to allow all NumPy API +usage. This means that by default you cannot dynamically fetch the NumPy API +from another library (this was never possible on windows). + +If you are experiencing linking errors related to ``PyArray_API`` or +``PyArray_RUNTIME_VERSION``, you can define the +``NPY_API_SYMBOL_ATTRIBUTE`` to opt-out of this change. + +If you are experiencing problems due to an upstream header including NumPy, +the solution is to make sure you ``#include "numpy/ndarrayobject.h"`` before +their header and import NumPy yourself based on ``including-the-c-api``. + +(`gh-26103 `__) + +Many shims removed from npy_3kcompat.h +-------------------------------------- +Many of the old shims and helper functions were removed from +``npy_3kcompat.h``. If you find yourself in need of these, vendor the previous +version of the file into your codebase. + +(`gh-26842 `__) + +New ``PyUFuncObject`` field ``process_core_dims_func`` +------------------------------------------------------ +The field ``process_core_dims_func`` was added to the structure +``PyUFuncObject``. For generalized ufuncs, this field can be set to a function +of type ``PyUFunc_ProcessCoreDimsFunc`` that will be called when the ufunc is +called. It allows the ufunc author to check that core dimensions satisfy +additional constraints, and to set output core dimension sizes if they have not +been provided. + +(`gh-26908 `__) + + +New Features +============ + +* ``numpy.reshape`` and ``numpy.ndarray.reshape`` now support ``shape`` and + ``copy`` arguments. + + (`gh-26292 `__) + +* NumPy now supports DLPack v1, support for older versions will + be deprecated in the future. + + (`gh-26501 `__) + +* ``numpy.asanyarray`` now supports ``copy`` and ``device`` arguments, matching + ``numpy.asarray``. + + (`gh-26580 `__) + +* ``numpy.printoptions``, ``numpy.get_printoptions``, and + ``numpy.set_printoptions`` now support a new option, ``override_repr``, for + defining custom ``repr(array)`` behavior. + + (`gh-26611 `__) + +* ``numpy.cumulative_sum`` and ``numpy.cumulative_prod`` were added as Array + API compatible alternatives for ``numpy.cumsum`` and ``numpy.cumprod``. The + new functions can include a fixed initial (zeros for ``sum`` and ones for + ``prod``) in the result. + + (`gh-26724 `__) + +* ``numpy.clip`` now supports ``max`` and ``min`` keyword arguments which are + meant to replace ``a_min`` and ``a_max``. Also, for ``np.clip(a)`` or + ``np.clip(a, None, None)`` a copy of the input array will be returned instead + of raising an error. + + (`gh-26724 `__) + +* ``numpy.astype`` now supports ``device`` argument. + + (`gh-26724 `__) + +``f2py`` can generate freethreading-compatible C extensions +----------------------------------------------------------- +Pass ``--freethreading-compatible`` to the f2py CLI tool to produce a C +extension marked as compatible with the free threading CPython +interpreter. Doing so prevents the interpreter from re-enabling the GIL at +runtime when it imports the C extension. Note that ``f2py`` does not analyze +fortran code for thread safety, so you must verify that the wrapped fortran +code is thread safe before marking the extension as compatible. + +(`gh-26981 `__) + + +Improvements +============ + +``histogram`` auto-binning now returns bin sizes >=1 for integer input data +--------------------------------------------------------------------------- +For integer input data, bin sizes smaller than 1 result in spurious empty +bins. This is now avoided when the number of bins is computed using one of the +algorithms provided by ``histogram_bin_edges``. + +(`gh-12150 `__) + +``ndarray`` shape-type parameter is now covariant and bound to ``tuple[int, ...]`` +---------------------------------------------------------------------------------- +Static typing for ``ndarray`` is a long-term effort that continues +with this change. It is a generic type with type parameters for +the shape and the data type. Previously, the shape type parameter could be +any value. This change restricts it to a tuple of ints, as one would expect +from using ``ndarray.shape``. Further, the shape-type parameter has been +changed from invariant to covariant. This change also applies to the subtypes +of ``ndarray``, e.g. ``numpy.ma.MaskedArray``. See the +`typing docs `_ +for more information. + +(`gh-26081 `__) + +``np.quantile`` with method ``closest_observation`` chooses nearest even order statistic +---------------------------------------------------------------------------------------- +This changes the definition of nearest for border cases from the nearest odd +order statistic to nearest even order statistic. The numpy implementation now +matches other reference implementations. + +(`gh-26656 `__) + +``lapack_lite`` is now thread safe +---------------------------------- +NumPy provides a minimal low-performance version of LAPACK named ``lapack_lite`` +that can be used if no BLAS/LAPACK system is detected at build time. + +Until now, ``lapack_lite`` was not thread safe. Single-threaded use cases did +not hit any issues, but running linear algebra operations in multiple threads +could lead to errors, incorrect results, or segfaults due to data races. + +We have added a global lock, serializing access to ``lapack_lite`` in multiple +threads. + +(`gh-26750 `__) + +The ``numpy.printoptions`` context manager is now thread and async-safe +----------------------------------------------------------------------- +In prior versions of NumPy, the printoptions were defined using a combination +of Python and C global variables. We have refactored so the state is stored in +a python ``ContextVar``, making the context manager thread and async-safe. + +(`gh-26846 `__) + + +Performance improvements and changes +==================================== + +* ``numpy.save`` now uses pickle protocol version 4 for saving arrays with + object dtype, which allows for pickle objects larger than 4GB and improves + saving speed by about 5% for large arrays. + + (`gh-26388 `__) + +* OpenBLAS on x86_64 and i686 is built with fewer kernels. Based on + benchmarking, there are 5 clusters of performance around these kernels: + ``PRESCOTT NEHALEM SANDYBRIDGE HASWELL SKYLAKEX``. + + (`gh-27147 `__) + +* OpenBLAS on windows is linked without quadmath, simplifying licensing + + (`gh-27147 `__) + +* Due to a regression in OpenBLAS on windows, the performance improvements when + using multiple threads for OpenBLAS 0.3.26 were reverted. + + (`gh-27147 `__) + +``ma.cov`` and ``ma.corrcoef`` are now significantly faster +----------------------------------------------------------- +The private function has been refactored along with ``ma.cov`` and +``ma.corrcoef``. They are now significantly faster, particularly on large, +masked arrays. + +(`gh-26285 `__) + + +Changes +======= + +* As ``numpy.vecdot`` is now a ufunc it has a less precise signature. + This is due to the limitations of ufunc's typing stub. + + (`gh-26313 `__) + +* ``numpy.floor``, ``numpy.ceil``, and ``numpy.trunc`` now won't perform + casting to a floating dtype for integer and boolean dtype input arrays. + + (`gh-26766 `__) + +``ma.corrcoef`` may return a slightly different result +------------------------------------------------------ +A pairwise observation approach is currently used in ``ma.corrcoef`` to +calculate the standard deviations for each pair of variables. This has been +changed as it is being used to normalise the covariance, estimated using +``ma.cov``, which does not consider the observations for each variable in a +pairwise manner, rendering it unnecessary. The normalisation has been replaced +by the more appropriate standard deviation for each variable, which +significantly reduces the wall time, but will return slightly different +estimates of the correlation coefficients in cases where the observations +between a pair of variables are not aligned. However, it will return the same +estimates in all other cases, including returning the same correlation matrix +as ``corrcoef`` when using a masked array with no masked values. + +(`gh-26285 `__) + +Cast-safety fixes in ``copyto`` and ``full`` +-------------------------------------------- +``copyto`` now uses NEP 50 correctly and applies this to its cast safety. +Python integer to NumPy integer casts and Python float to NumPy float casts +are now considered "safe" even if assignment may fail or precision may be lost. +This means the following examples change slightly: + +* ``np.copyto(int8_arr, 1000)`` previously performed an unsafe/same-kind cast + of the Python integer. It will now always raise, to achieve an unsafe cast + you must pass an array or NumPy scalar. + +* ``np.copyto(uint8_arr, 1000, casting="safe")`` will raise an OverflowError + rather than a TypeError due to same-kind casting. + +* ``np.copyto(float32_arr, 1e300, casting="safe")`` will overflow to ``inf`` + (float32 cannot hold ``1e300``) rather raising a TypeError. + +Further, only the dtype is used when assigning NumPy scalars (or 0-d arrays), +meaning that the following behaves differently: + +* ``np.copyto(float32_arr, np.float64(3.0), casting="safe")`` raises. + +* ``np.coptyo(int8_arr, np.int64(100), casting="safe")`` raises. + Previously, NumPy checked whether the 100 fits the ``int8_arr``. + +This aligns ``copyto``, ``full``, and ``full_like`` with the correct NumPy 2 +behavior. + +(`gh-27091 `__) -.. include:: notes-towncrier.rst diff --git a/pyproject.toml b/pyproject.toml index ad4673949a10..305db1c77ba4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.1.0.dev0" +version = "2.1.0rc1" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} @@ -29,6 +29,7 @@ classifiers = [ 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', + 'Programming Language :: Python :: 3.13', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: Implementation :: CPython', 'Topic :: Software Development', From cbfec1b22fac9233dbb9448176caf95ef26482e0 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 11 Aug 2024 01:39:36 +0200 Subject: [PATCH 965/980] DOC: Add release notes for #26897 --- doc/release/upcoming_changes/26897.improvement.rst | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 doc/release/upcoming_changes/26897.improvement.rst diff --git a/doc/release/upcoming_changes/26897.improvement.rst b/doc/release/upcoming_changes/26897.improvement.rst new file mode 100644 index 000000000000..1b3b327711af --- /dev/null +++ b/doc/release/upcoming_changes/26897.improvement.rst @@ -0,0 +1,6 @@ +Type hinting ``numpy.polynomial`` +--------------------------------- + +Starting from the 2.1 release, PEP 484 type annotations have been included for +the functions and convenience classes in ``numpy.polynomial`` and its +sub-packages. From 7f1cd245a163ed14e39eee27f06a99b925ac5e3f Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 11 Aug 2024 02:01:44 +0200 Subject: [PATCH 966/980] DOC: Add release notes for #27008 --- doc/release/upcoming_changes/27008.improvement.rst | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 doc/release/upcoming_changes/27008.improvement.rst diff --git a/doc/release/upcoming_changes/27008.improvement.rst b/doc/release/upcoming_changes/27008.improvement.rst new file mode 100644 index 000000000000..47e1090d9067 --- /dev/null +++ b/doc/release/upcoming_changes/27008.improvement.rst @@ -0,0 +1,8 @@ +Improved ``numpy.dtypes`` type hints +------------------------------------ + +The type annotations for ``numpy.dtypes`` are now a better reflection of the +runtime: +The ``numpy.dtype`` type-aliases have been replaced with specialized ``dtype`` +*subtypes*, and the previously missing annotations for +``numpy.dtypes.StringDType`` have been added. From 49f422b75ff4acee53490d124638fd4efebf17c7 Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 12 Aug 2024 11:50:22 +0300 Subject: [PATCH 967/980] BUILD: use a shrunken version of scipy-openblas wheels [wheel build] --- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index d2940e2d65bc..215bc1229930 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ spin # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.27.44.5 +scipy-openblas32==0.3.27.44.6 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index 965fdb8faadf..5bed94385819 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.27.44.5 -scipy-openblas64==0.3.27.44.5 +scipy-openblas32==0.3.27.44.6 +scipy-openblas64==0.3.27.44.6 From 117da9404cffdb952362982e4eb9b5f62ca296b4 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sun, 11 Aug 2024 13:02:30 +0200 Subject: [PATCH 968/980] REV: Revert undef I and document it This is based on what Matti wrote in gh-27105 but also adding it to the migration guide. Closes gh-27083 Co-authored-by: Matti Picus --- doc/source/numpy_2_0_migration_guide.rst | 13 ++++++++++ .../reference/c-api/types-and-structures.rst | 26 +++++++++++++++++++ numpy/_core/include/numpy/npy_common.h | 5 ---- 3 files changed, 39 insertions(+), 5 deletions(-) diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index 2ff49b162fe4..55d4696a114d 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -220,6 +220,19 @@ using the NumPy types. You can still write cython code using the ``c.real`` and ``c.imag`` attributes (using the native typedefs), but you can no longer use in-place operators ``c.imag += 1`` in Cython's c++ mode. +Because NumPy 2 now includes ``complex.h`` code that uses a variable named +``I`` may see an error such as + +.. code-block::C + error: expected ‘)’ before ‘__extension__’ + double I, + +to use the name ``I`` requires an ``#undef I`` now. + +.. note:: + NumPy 2.0.1 briefly included the ``#undef I`` to help users not already + including ``complex.h``. + Changes to namespaces ===================== diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index 8d57153d8803..4565e602193f 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -1611,3 +1611,29 @@ for completeness and assistance in understanding the code. ``arrayobject.h`` header. This type is not exposed to Python and could be replaced with a C-structure. As a Python type it takes advantage of reference- counted memory management. + + +NumPy C-API and C complex +========================= +When you use the NumPy C-API, you will have access to complex real declarations +``npy_cdouble`` and ``npy_cfloat``, which are declared in terms of the C +standard types from ``complex.h``. Unfortunately, ``complex.h`` contains +`#define I ...`` (where the actual definition depends on the compiler), which +means that any downstream user that does ``#include `` +could get ``I`` defined, and using something like declaring ``double I;`` in +their code will result in an obscure compiler error like + +.. code-block::C + error: expected ‘)’ before ‘__extension__’ + double I, + +This error can be avoided by adding:: + + #undef I + +to your code. + +.. versionchanged:: 2.0 + The inclusion of ``complex.h`` was new in NumPy 2, so that code defining + a different ``I`` may not have required the ``#undef I`` on older versions. + NumPy 2.0.1 briefly included the ``#under I`` \ No newline at end of file diff --git a/numpy/_core/include/numpy/npy_common.h b/numpy/_core/include/numpy/npy_common.h index 3132b602a7c8..79ad8ad78cb2 100644 --- a/numpy/_core/include/numpy/npy_common.h +++ b/numpy/_core/include/numpy/npy_common.h @@ -379,11 +379,6 @@ typedef struct #include -// Downstream libraries like sympy would like to use I -// see https://github.com/numpy/numpy/issues/26787 -#ifdef I -#undef I -#endif #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) typedef _Dcomplex npy_cdouble; From 87af28e59d207f3407b7d48b5451f55156a474b0 Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 12 Aug 2024 08:32:33 +0300 Subject: [PATCH 969/980] BUILD: improve download script --- tools/download-wheels.py | 32 ++++++++++++++++++++++++-------- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/tools/download-wheels.py b/tools/download-wheels.py index e5753eb2148c..54dbdf1200a8 100644 --- a/tools/download-wheels.py +++ b/tools/download-wheels.py @@ -56,15 +56,20 @@ def get_wheel_names(version): The release version. For instance, "1.18.3". """ + ret = [] http = urllib3.PoolManager(cert_reqs="CERT_REQUIRED") tmpl = re.compile(rf"^.*{PREFIX}-{version}{SUFFIX}") - index_url = f"{STAGING_URL}/files" - index_html = http.request("GET", index_url) - soup = BeautifulSoup(index_html.data, "html.parser") - return soup.find_all(string=tmpl) + # TODO: generalize this by searching for `showing 1 of N` and + # looping over N pages, starting from 1 + for i in range(1, 3): + index_url = f"{STAGING_URL}/files?page={i}" + index_html = http.request("GET", index_url) + soup = BeautifulSoup(index_html.data, "html.parser") + ret += soup.find_all(string=tmpl) + return ret -def download_wheels(version, wheelhouse): +def download_wheels(version, wheelhouse, test=False): """Download release wheels. The release wheels for the given NumPy version are downloaded @@ -86,8 +91,15 @@ def download_wheels(version, wheelhouse): wheel_path = os.path.join(wheelhouse, wheel_name) with open(wheel_path, "wb") as f: with http.request("GET", wheel_url, preload_content=False,) as r: - print(f"{i + 1:<4}{wheel_name}") - shutil.copyfileobj(r, f) + info = r.info() + length = int(info.get('Content-Length', '0')) + if length == 0: + length = 'unknown size' + else: + length = f"{(length / 1024 / 1024):.2f}MB" + print(f"{i + 1:<4}{wheel_name} {length}") + if not test: + shutil.copyfileobj(r, f) print(f"\nTotal files downloaded: {len(wheel_names)}") @@ -101,6 +113,10 @@ def download_wheels(version, wheelhouse): default=os.path.join(os.getcwd(), "release", "installers"), help="Directory in which to store downloaded wheels\n" "[defaults to /release/installers]") + parser.add_argument( + "-t", "--test", + action = 'store_true', + help="only list available wheels, do not download") args = parser.parse_args() @@ -110,4 +126,4 @@ def download_wheels(version, wheelhouse): f"{wheelhouse} wheelhouse directory is not present." " Perhaps you need to use the '-w' flag to specify one.") - download_wheels(args.version, wheelhouse) + download_wheels(args.version, wheelhouse, test=args.test) From 9bf2e00f3d96c36b20a8017965c38b547acf6b03 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Mon, 12 Aug 2024 07:40:51 +1100 Subject: [PATCH 970/980] MAINT: update default NPY_FEATURE_VERSION after dropping py39 --- numpy/_core/include/numpy/numpyconfig.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index b49d215614ac..0f2b68054527 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -121,8 +121,8 @@ /* user provided a target version, use it */ #define NPY_FEATURE_VERSION NPY_TARGET_VERSION #else - /* Use the default (increase when dropping Python 3.9 support) */ - #define NPY_FEATURE_VERSION NPY_1_19_API_VERSION + /* Use the default (increase when dropping Python 3.10 support) */ + #define NPY_FEATURE_VERSION NPY_1_21_API_VERSION #endif /* Sanity check the (requested) feature version */ From fdf6055d0636f501a6857e04ad8bd4729a579b0a Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 12 Aug 2024 13:44:54 -0600 Subject: [PATCH 971/980] DOC: add free-threading release notes [skip azp][skip actions][skip cirrus] --- doc/source/release/2.1.0-notes.rst | 43 ++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/doc/source/release/2.1.0-notes.rst b/doc/source/release/2.1.0-notes.rst index 295115d57343..c591b29e4c24 100644 --- a/doc/source/release/2.1.0-notes.rst +++ b/doc/source/release/2.1.0-notes.rst @@ -101,6 +101,49 @@ been provided. New Features ============ +Preliminary Support for Free-Threaded CPython 3.13 +-------------------------------------------------- + +CPython 3.13 will be available as an experimental free-threaded build. See +https://py-free-threading.github.io, `PEP 703 +`_ and the `CPython 3.13 release notes +`_ for +more detail about free-threaded Python. + +NumPy 2.1 has preliminary support for the free-threaded build of CPython +3.13. This support was enabled by fixing a number of C thread-safety issues in +NumPy. Before NumPy 2.1, NumPy used a large number of C global static variables +to store runtime caches and other state. We have either refactored to avoid the +need for global state, converted the global state to thread-local state, or +added locking. + +Support for free-threaded Python does not mean that NumPy is thread +safe. Read-only shared access to ndarray should be safe. NumPy exposes shared +mutable state and we have not added any locking to the array object itself to +serialize access to shared state. Care must be taken in user code to avoid +races if you would like to mutate the same array in multiple threads. It is +certainly possible to crash NumPy by mutating an array simultaneously in +multiple threads, for example by calling a ufunc and the ``resize`` method +simultaneously. For now our guidance is: "don't do that". In the future we would +like to provide stronger guarantees. + +Object arrays in particular need special care, since the GIL +previously provided locking for object array access and no longer does. See +`Issue #27199 `_ for more +information about object arrays in the free-threaded build. + +If you are interested in free-threaded Python, for example because you have a +multiprocessing-based workflow that you are interested in running with Python +threads, we encourage testing and experimentation. + +If you run into problems that you suspect are because of NumPy, please `open an +issue `_, checking first if +the bug also occurs in the "regular" non-free-threaded CPython 3.13 build. Many +threading bugs can also occur in code that releases the GIL; disabling the GIL +only makes it easier to hit threading bugs. + +(`gh-26157 `__) + * ``numpy.reshape`` and ``numpy.ndarray.reshape`` now support ``shape`` and ``copy`` arguments. From 3b34c0990f3d55eaf7875b9bcec42c92da94faa7 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 13 Aug 2024 16:20:31 +0200 Subject: [PATCH 972/980] BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds The value was simply hardcoded to the wrong thing in the dynamic path... --- numpy/_core/include/numpy/npy_2_compat.h | 2 +- numpy/_core/tests/examples/cython/checks.pyx | 4 ++++ numpy/_core/tests/test_cython.py | 7 +++++++ 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/numpy/_core/include/numpy/npy_2_compat.h b/numpy/_core/include/numpy/npy_2_compat.h index 80bb4088c812..e39e65aedea7 100644 --- a/numpy/_core/include/numpy/npy_2_compat.h +++ b/numpy/_core/include/numpy/npy_2_compat.h @@ -125,7 +125,7 @@ PyArray_ImportNumPyAPI(void) #define NPY_DEFAULT_INT \ (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_INTP : NPY_LONG) #define NPY_RAVEL_AXIS \ - (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? -1 : 32) + (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_MIN_INT : 32) #define NPY_MAXARGS \ (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? 64 : 32) #endif diff --git a/numpy/_core/tests/examples/cython/checks.pyx b/numpy/_core/tests/examples/cython/checks.pyx index b51ab128053f..c0bb1f3f5370 100644 --- a/numpy/_core/tests/examples/cython/checks.pyx +++ b/numpy/_core/tests/examples/cython/checks.pyx @@ -129,6 +129,10 @@ def get_default_integer(): return cnp.dtype("intp") return None +def get_ravel_axis(): + return cnp.NPY_RAVEL_AXIS + + def conv_intp(cnp.intp_t val): return val diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index 26a1fafa0066..71c1a457761b 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -153,6 +153,13 @@ def test_default_int(install_temp): assert checks.get_default_integer() is np.dtype(int) + +def test_ravel_axis(install_temp): + import checks + + assert checks.get_ravel_axis() == np.iinfo("intc").min + + def test_convert_datetime64_to_datetimestruct(install_temp): # GH#21199 import checks From 898de56c5d9f088e0647ae4b25e866c36a1d8356 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Aug 2024 02:26:55 +0200 Subject: [PATCH 973/980] TYP: Fixed & improved type hints for ``numpy.histogram2d`` --- numpy/lib/_twodim_base_impl.pyi | 221 ++++++++++++++++-- .../typing/tests/data/reveal/twodim_base.pyi | 71 +++++- 2 files changed, 264 insertions(+), 28 deletions(-) diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index 4096976871d7..c4690a4304bd 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -2,6 +2,7 @@ import builtins from collections.abc import Callable, Sequence from typing import ( Any, + TypeAlias, overload, TypeVar, Literal as L, @@ -16,6 +17,7 @@ from numpy import ( int_, intp, float64, + complex128, signedinteger, floating, complexfloating, @@ -29,6 +31,7 @@ from numpy._typing import ( ArrayLike, _ArrayLike, NDArray, + _SupportsArray, _SupportsArrayFunc, _ArrayLikeInt_co, _ArrayLikeFloat_co, @@ -164,44 +167,220 @@ def vander( increasing: bool = ..., ) -> NDArray[object_]: ... + +_Int_co: TypeAlias = np.integer[Any] | np.bool +_Float_co: TypeAlias = np.floating[Any] | _Int_co +_Number_co: TypeAlias = np.number[Any] | np.bool + +_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_SCT]] | Sequence[_SCT] +_ArrayLike2D: TypeAlias = ( + _SupportsArray[np.dtype[_SCT]] + | Sequence[_ArrayLike1D[_SCT]] +) + +_ArrayLike1DInt_co = ( + _SupportsArray[np.dtype[_Int_co]] + | Sequence[int | _Int_co] +) +_ArrayLike1DFloat_co = ( + _SupportsArray[np.dtype[_Float_co]] + | Sequence[float | int | _Float_co] +) +_ArrayLike2DFloat_co = ( + _SupportsArray[np.dtype[_Float_co]] + | Sequence[_ArrayLike1DFloat_co] +) +_ArrayLike1DNumber_co = ( + _SupportsArray[np.dtype[_Number_co]] + | Sequence[int | float | complex | _Number_co] +) + +_SCT_complex = TypeVar("_SCT_complex", bound=np.complexfloating[Any, Any]) +_SCT_inexact = TypeVar("_SCT_inexact", bound=np.inexact[Any]) +_SCT_number_co = TypeVar("_SCT_number_co", bound=_Number_co) + @overload -def histogram2d( # type: ignore[misc] - x: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co, +def histogram2d( + x: _ArrayLike1D[_SCT_complex], + y: _ArrayLike1D[_SCT_complex | _Float_co], bins: int | Sequence[int] = ..., - range: None | _ArrayLikeFloat_co = ..., + range: None | _ArrayLike2DFloat_co = ..., density: None | bool = ..., - weights: None | _ArrayLikeFloat_co = ..., + weights: None | _ArrayLike1DFloat_co = ..., ) -> tuple[ NDArray[float64], - NDArray[floating[Any]], - NDArray[floating[Any]], + NDArray[_SCT_complex], + NDArray[_SCT_complex], ]: ... @overload def histogram2d( - x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co, + x: _ArrayLike1D[_SCT_complex | _Float_co], + y: _ArrayLike1D[_SCT_complex], bins: int | Sequence[int] = ..., - range: None | _ArrayLikeFloat_co = ..., + range: None | _ArrayLike2DFloat_co = ..., density: None | bool = ..., - weights: None | _ArrayLikeFloat_co = ..., + weights: None | _ArrayLike1DFloat_co = ..., ) -> tuple[ NDArray[float64], - NDArray[complexfloating[Any, Any]], - NDArray[complexfloating[Any, Any]], + NDArray[_SCT_complex], + NDArray[_SCT_complex], ]: ... -@overload # TODO: Sort out `bins` +@overload def histogram2d( - x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co, - bins: Sequence[_ArrayLikeInt_co], - range: None | _ArrayLikeFloat_co = ..., + x: _ArrayLike1D[_SCT_inexact], + y: _ArrayLike1D[_SCT_inexact | _Int_co], + bins: int | Sequence[int] = ..., + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_inexact], + NDArray[_SCT_inexact], +]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_SCT_inexact | _Int_co], + y: _ArrayLike1D[_SCT_inexact], + bins: int | Sequence[int] = ..., + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_inexact], + NDArray[_SCT_inexact], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DInt_co | Sequence[float | int], + y: _ArrayLike1DInt_co | Sequence[float | int], + bins: int | Sequence[int] = ..., + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[float64], + NDArray[float64], +]: ... +@overload +def histogram2d( + x: Sequence[complex | float | int], + y: Sequence[complex | float | int], + bins: int | Sequence[int] = ..., + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[complex128 | float64], + NDArray[complex128 | float64], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: _ArrayLike1D[_SCT_number_co] | Sequence[_ArrayLike1D[_SCT_number_co]], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_number_co], + NDArray[_SCT_number_co], +]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_SCT_inexact], + y: _ArrayLike1D[_SCT_inexact], + bins: Sequence[_ArrayLike1D[_SCT_number_co] | int], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_number_co | _SCT_inexact], + NDArray[_SCT_number_co | _SCT_inexact], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DInt_co | Sequence[float | int], + y: _ArrayLike1DInt_co | Sequence[float | int], + bins: Sequence[_ArrayLike1D[_SCT_number_co] | int], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_number_co | float64], + NDArray[_SCT_number_co | float64], +]: ... +@overload +def histogram2d( + x: Sequence[complex | float | int], + y: Sequence[complex | float | int], + bins: Sequence[_ArrayLike1D[_SCT_number_co] | int], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_number_co | complex128 | float64], + NDArray[_SCT_number_co | complex128 | float64] , +]: ... + +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[bool]], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.bool], + NDArray[np.bool], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[int | bool]], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.int_ | np.bool], + NDArray[np.int_ | np.bool], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[float | int | bool]], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.float64 | np.int_ | np.bool], + NDArray[np.float64 | np.int_ | np.bool], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[complex | float | int | bool]], + range: None | _ArrayLike2DFloat_co = ..., density: None | bool = ..., - weights: None | _ArrayLikeFloat_co = ..., + weights: None | _ArrayLike1DFloat_co = ..., ) -> tuple[ NDArray[float64], - NDArray[Any], - NDArray[Any], + NDArray[np.complex128 | np.float64 | np.int_ | np.bool], + NDArray[np.complex128 | np.float64 | np.int_ | np.bool], ]: ... # NOTE: we're assuming/demanding here the `mask_func` returns diff --git a/numpy/typing/tests/data/reveal/twodim_base.pyi b/numpy/typing/tests/data/reveal/twodim_base.pyi index 9d808dbb1e0d..f52ad3a41b69 100644 --- a/numpy/typing/tests/data/reveal/twodim_base.pyi +++ b/numpy/typing/tests/data/reveal/twodim_base.pyi @@ -28,6 +28,7 @@ AR_c: npt.NDArray[np.complex128] AR_O: npt.NDArray[np.object_] AR_LIKE_b: list[bool] +AR_LIKE_c: list[complex] assert_type(np.fliplr(AR_b), npt.NDArray[np.bool]) assert_type(np.fliplr(AR_LIKE_b), npt.NDArray[Any]) @@ -62,28 +63,84 @@ assert_type(np.vander(AR_f, increasing=True), npt.NDArray[np.floating[Any]]) assert_type(np.vander(AR_c), npt.NDArray[np.complexfloating[Any, Any]]) assert_type(np.vander(AR_O), npt.NDArray[np.object_]) +assert_type( + np.histogram2d(AR_LIKE_c, AR_LIKE_c), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128 | np.float64], + npt.NDArray[np.complex128 | np.float64], + ], +) assert_type( np.histogram2d(AR_i, AR_b), tuple[ npt.NDArray[np.float64], - npt.NDArray[np.floating[Any]], - npt.NDArray[np.floating[Any]], + npt.NDArray[np.float64], + npt.NDArray[np.float64], ], ) assert_type( - np.histogram2d(AR_f, AR_f), + np.histogram2d(AR_f, AR_i), tuple[ npt.NDArray[np.float64], - npt.NDArray[np.floating[Any]], - npt.NDArray[np.floating[Any]], + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type( + np.histogram2d(AR_i, AR_f), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.float64], + npt.NDArray[np.float64], ], ) assert_type( np.histogram2d(AR_f, AR_c, weights=AR_LIKE_b), tuple[ npt.NDArray[np.float64], - npt.NDArray[np.complexfloating[Any, Any]], - npt.NDArray[np.complexfloating[Any, Any]], + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) +assert_type( + np.histogram2d(AR_f, AR_c, bins=8), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) +assert_type( + np.histogram2d(AR_c, AR_f, bins=(8, 5)), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) +assert_type( + np.histogram2d(AR_c, AR_i, bins=AR_u), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.uint64], + npt.NDArray[np.uint64], + ], +) +assert_type( + np.histogram2d(AR_c, AR_c, bins=(AR_u, AR_u)), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.uint64], + npt.NDArray[np.uint64], + ], +) +assert_type( + np.histogram2d(AR_c, AR_c, bins=(AR_b, 8)), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.bool | np.complex128], + npt.NDArray[np.bool | np.complex128], ], ) From f3f8f2c11f29eab3149b167ac1f3bd23c4d5fe8d Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 9 Aug 2024 03:45:31 +0200 Subject: [PATCH 974/980] TYP: Fix incompatible overrides in the ``numpy._typing._ufunc`` stubs --- numpy/_typing/_ufunc.pyi | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 5e52039864b7..9495321e2c20 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -17,6 +17,7 @@ from typing import ( Protocol, NoReturn, ) +from typing_extensions import LiteralString from numpy import ufunc, _CastingKind, _OrderKACF from numpy.typing import NDArray @@ -32,9 +33,9 @@ _3Tuple = tuple[_T, _T, _T] _4Tuple = tuple[_T, _T, _T, _T] _NTypes = TypeVar("_NTypes", bound=int, covariant=True) -_IDType = TypeVar("_IDType", bound=Any, covariant=True) -_NameType = TypeVar("_NameType", bound=str, covariant=True) -_Signature = TypeVar("_Signature", bound=str, covariant=True) +_IDType = TypeVar("_IDType", covariant=True) +_NameType = TypeVar("_NameType", bound=LiteralString, covariant=True) +_Signature = TypeVar("_Signature", bound=LiteralString, covariant=True) class _SupportsArrayUFunc(Protocol): From 44ce7e8f7b43c1045ad78eecaac0435fff491fae Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 16 Aug 2024 10:28:23 +0200 Subject: [PATCH 975/980] BUG: Fix ``PyArray_ZeroContiguousBuffer`` (resize) with struct dtypes We allow the structured dtype to return NULL for the zero fill function to indicate that a simple memset is sufficient. Also simplifies error handling a bit. The get_fill_zero_loop function must clean up on error and not return references if returns a `NULL` loop. --- numpy/_core/src/multiarray/refcount.c | 10 ++++------ numpy/_core/tests/test_multiarray.py | 6 ++++++ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/multiarray/refcount.c b/numpy/_core/src/multiarray/refcount.c index 0da40cbdc60e..571b50372684 100644 --- a/numpy/_core/src/multiarray/refcount.c +++ b/numpy/_core/src/multiarray/refcount.c @@ -83,14 +83,16 @@ PyArray_ZeroContiguousBuffer( if (get_fill_zero_loop( NULL, descr, aligned, descr->elsize, &(zero_info.func), &(zero_info.auxdata), &flags_unused) < 0) { - goto fail; + return -1; } } else { + assert(zero_info.func == NULL); + } + if (zero_info.func == NULL) { /* the multiply here should never overflow, since we already checked if the new array size doesn't overflow */ memset(data, 0, size*stride); - NPY_traverse_info_xfree(&zero_info); return 0; } @@ -98,10 +100,6 @@ PyArray_ZeroContiguousBuffer( NULL, descr, data, size, stride, zero_info.auxdata); NPY_traverse_info_xfree(&zero_info); return res; - - fail: - NPY_traverse_info_xfree(&zero_info); - return -1; } diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 441d76af9228..0bc9fea9c960 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -9174,6 +9174,12 @@ def test_resize(self): d.resize(150) assert_(old < sys.getsizeof(d)) + @pytest.mark.parametrize("dtype", ["u4,f4", "u4,O"]) + def test_resize_structured(self, dtype): + a = np.array([(0, 0.0) for i in range(5)], dtype=dtype) + a.resize(1000) + assert_array_equal(a, np.zeros(1000, dtype=dtype)) + def test_error(self): d = np.ones(100) assert_raises(TypeError, d.__sizeof__, "a") From d902c24b684e8ba5370a160a375beaa77bae5032 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 15 Aug 2024 14:54:23 -0600 Subject: [PATCH 976/980] DOC: add docs on thread safety in NumPy [skip azp][skip actions][skip cirrus] --- doc/source/reference/global_state.rst | 16 ++++++--- doc/source/reference/index.rst | 1 + doc/source/reference/thread_safety.rst | 49 ++++++++++++++++++++++++++ 3 files changed, 61 insertions(+), 5 deletions(-) create mode 100644 doc/source/reference/thread_safety.rst diff --git a/doc/source/reference/global_state.rst b/doc/source/reference/global_state.rst index e0ab1bb2a7ba..5bc512e0e9ec 100644 --- a/doc/source/reference/global_state.rst +++ b/doc/source/reference/global_state.rst @@ -4,11 +4,10 @@ Global state ************ -NumPy has a few import-time, compile-time, or runtime options -which change the global behaviour. -Most of these are related to performance or for debugging -purposes and will not be interesting to the vast majority -of users. +NumPy exposes global state in legacy APIs and a few import-time, +compile-time, or runtime options which change the global behaviour. +Most of these are related to performance or for debugging purposes and +will not be interesting to the vast majority of users. Performance-related options @@ -71,3 +70,10 @@ and set the ``ndarray.base``. .. versionchanged:: 1.25.2 This variable is only checked on the first import. + +Legacy User DTypes +================== + +The number of legacy user DTypes is stored in ``NPY_NUMUSERTPES``, a global +variable that is exposed in the NumPy C API. This means that the legacy DType +API is inherently not thread-safe. diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst index ed9641409014..02e3248953fb 100644 --- a/doc/source/reference/index.rst +++ b/doc/source/reference/index.rst @@ -58,6 +58,7 @@ Other topics array_api simd/index + thread_safety global_state security distutils_status_migration diff --git a/doc/source/reference/thread_safety.rst b/doc/source/reference/thread_safety.rst new file mode 100644 index 000000000000..df806e9e7c5f --- /dev/null +++ b/doc/source/reference/thread_safety.rst @@ -0,0 +1,49 @@ +.. _thread_safety: + +************* +Thread Safety +************* + +NumPy supports use in a multithreaded context via the `threading` module in the +standard library. Many NumPy operations release the GIL, so unlike many +situations in Python, it is possible to improve parallel performance by +exploiting multithreaded parallelism in Python. + +The easiest performance gains happen when each worker thread owns its own array +or set of array objects, with no data directly shared between threads. Because +NumPy releases the GIL for many low-level operations, threads that spend most of +the time in low-level code will run in parallel. + +It is possible to share NumPy arrays between threads, but extreme care must be +taken to avoid creating thread safety issues when mutating shared arrays. If +two threads simultaneously read from and write to the same array, at best they +will see inconsistent views of the same array data. It is also possible to crash +the Python interpreter by, for example, resizing an array while another thread +is reading from it to compute a ufunc operation. + +In the future, we may add locking to ndarray to make working with shared NumPy +arrays easier, but for now we suggest focusing on read-only access of arrays +that are shared between threads. + +Note that operations that *do not* release the GIL will see no performance gains +from use of the `threading` module, and instead might be better served with +`multiprocessing`. In particular, operations on arrays with ``dtype=object`` do +not release the GIL. + +Free-threaded Python +-------------------- + +.. versionadded:: 2.1 + +Starting with NumPy 2.1 and CPython 3.13, NumPy also has experimental support +for python runtimes with the GIL disabled. See +https://py-free-threading.github.io for more information about installing and +using free-threaded Python, as well as information about supporting it in +libraries that depend on NumPy. + +Because free-threaded Python does not have a global interpreter lock to +serialize access to Python objects, there are more opportunities for threads to +mutate shared state and create thread safety issues. In addition to the +limitations about locking of the ndarray object noted above, this also means +that arrays with ``dtype=object`` are not protected by the GIL, creating data +races for python objects that are not possible outside free-threaded python. From 5af2e965a02137cd05706d8d395d8263f395f7c7 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 16 Aug 2024 10:52:01 -0600 Subject: [PATCH 977/980] Move NUMUSERTYPES thread safety discussion to legacy DType API docs --- doc/source/reference/c-api/array.rst | 7 +++++++ doc/source/reference/global_state.rst | 21 +++++++-------------- doc/source/user/c-info.beyond-basics.rst | 3 +++ 3 files changed, 17 insertions(+), 14 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 68fbb6ef3d66..80af4b83d172 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -1264,6 +1264,13 @@ User-defined data types registered (checked only by the address of the pointer), then return the previously-assigned type-number. + The number of user DTypes known to numpy is stored in + ``NPY_NUMUSERTYPES``, a static global variable that is public in the + C API. Accessing this symbol is inherently *not* thread-safe. If + for some reason you need to use this API in a multithreaded context, + you will need to add your own locking, NumPy does not ensure new + data types can be added in a thread-safe manner. + .. c:function:: int PyArray_RegisterCastFunc( \ PyArray_Descr* descr, int totype, PyArray_VectorUnaryFunc* castfunc) diff --git a/doc/source/reference/global_state.rst b/doc/source/reference/global_state.rst index 5bc512e0e9ec..e66c86faf1b3 100644 --- a/doc/source/reference/global_state.rst +++ b/doc/source/reference/global_state.rst @@ -1,13 +1,13 @@ .. _global_state: -************ -Global state -************ +**************************** +Global Configuration Options +**************************** -NumPy exposes global state in legacy APIs and a few import-time, -compile-time, or runtime options which change the global behaviour. -Most of these are related to performance or for debugging purposes and -will not be interesting to the vast majority of users. +NumPy has a few import-time, compile-time, or runtime configuration +options which change the global behaviour. Most of these are related to +performance or for debugging purposes and will not be interesting to the +vast majority of users. Performance-related options @@ -70,10 +70,3 @@ and set the ``ndarray.base``. .. versionchanged:: 1.25.2 This variable is only checked on the first import. - -Legacy User DTypes -================== - -The number of legacy user DTypes is stored in ``NPY_NUMUSERTPES``, a global -variable that is exposed in the NumPy C API. This means that the legacy DType -API is inherently not thread-safe. diff --git a/doc/source/user/c-info.beyond-basics.rst b/doc/source/user/c-info.beyond-basics.rst index 697c0c045e4f..7bf793ae2e47 100644 --- a/doc/source/user/c-info.beyond-basics.rst +++ b/doc/source/user/c-info.beyond-basics.rst @@ -268,6 +268,9 @@ specifies your data-type. This type number should be stored and made available by your module so that other modules can use it to recognize your data-type. +Note that this API is inherently thread-unsafe. See `thread_safety` for more +details about thread safety in NumPy. + Registering a casting function ------------------------------ From 395a81dee5be52b2acff817e6f16652d23181fc3 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 16 Aug 2024 10:56:02 -0600 Subject: [PATCH 978/980] DOC: reword discussion about shared arrays to hopefully be clearer [skip azp][skip actions][skip cirrus] --- doc/source/reference/thread_safety.rst | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/doc/source/reference/thread_safety.rst b/doc/source/reference/thread_safety.rst index df806e9e7c5f..84590bfac39c 100644 --- a/doc/source/reference/thread_safety.rst +++ b/doc/source/reference/thread_safety.rst @@ -15,15 +15,17 @@ NumPy releases the GIL for many low-level operations, threads that spend most of the time in low-level code will run in parallel. It is possible to share NumPy arrays between threads, but extreme care must be -taken to avoid creating thread safety issues when mutating shared arrays. If -two threads simultaneously read from and write to the same array, at best they -will see inconsistent views of the same array data. It is also possible to crash -the Python interpreter by, for example, resizing an array while another thread -is reading from it to compute a ufunc operation. - -In the future, we may add locking to ndarray to make working with shared NumPy -arrays easier, but for now we suggest focusing on read-only access of arrays -that are shared between threads. +taken to avoid creating thread safety issues when mutating arrays that are +shared between multiple threads. If two threads simultaneously read from and +write to the same array, they will at best produce inconsistent, racey results that +are not reproducible, let alone correct. It is also possible to crash the Python +interpreter by, for example, resizing an array while another thread is reading +from it to compute a ufunc operation. + +In the future, we may add locking to ndarray to make writing multithreaded +algorithms using NumPy arrays safer, but for now we suggest focusing on +read-only access of arrays that are shared between threads, or adding your own +locking if you need to mutation and multithreading. Note that operations that *do not* release the GIL will see no performance gains from use of the `threading` module, and instead might be better served with From 85b1cab2ad5f418cca485042478622ce13a497a7 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 17 Aug 2024 10:19:56 -0600 Subject: [PATCH 979/980] BUG: Allow fitting of degree zero polynomials with Polynomial.fit Backport of #25984. For degenerate domains in Polynomial.fit (which occurs when all independent datapoints are equal) we expand the domain. This allows fitting of degree zero polynomials. Fixes #25982 --- numpy/polynomial/_polybase.py | 3 +++ numpy/polynomial/tests/test_polynomial.py | 11 ++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py index 9c345553eedd..65c3ff43dc32 100644 --- a/numpy/polynomial/_polybase.py +++ b/numpy/polynomial/_polybase.py @@ -1041,6 +1041,9 @@ class domain in NumPy 1.4 and ``None`` in later versions. """ if domain is None: domain = pu.getdomain(x) + if domain[0] == domain[1]: + domain[0] -= 1 + domain[1] += 1 elif type(domain) is list and len(domain) == 0: domain = cls.domain diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index a0be94c3a6a0..162cb0a9bea0 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -5,11 +5,12 @@ from fractions import Fraction import numpy as np import numpy.polynomial.polynomial as poly +import numpy.polynomial.polyutils as pu import pickle from copy import deepcopy from numpy.testing import ( assert_almost_equal, assert_raises, assert_equal, assert_, - assert_array_equal, assert_raises_regex) + assert_array_equal, assert_raises_regex, assert_warns) def trim(x): @@ -628,6 +629,14 @@ def test_polyline(self): def test_polyline_zero(self): assert_equal(poly.polyline(3, 0), [3]) + def test_fit_degenerate_domain(self): + p = poly.Polynomial.fit([1], [2], deg=0) + assert_equal(p.coef, [2.]) + p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=0) + assert_almost_equal(p.coef, [2.05]) + with assert_warns(pu.RankWarning): + p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=1) + def test_result_type(self): w = np.array([-1, 1], dtype=np.float32) p = np.polynomial.Polynomial(w, domain=w, window=w) From b6f434f852e9d1ed4f7de9a3a465b38171d54a61 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 18 Aug 2024 09:44:41 -0600 Subject: [PATCH 980/980] REL: Prepare for the NumPy 2.1.0 release [wheel build] - Update 2.1.0-changelog.rst - Update 2.1.0-notes.rst - Update pyproject.toml - Delete release fragments. --- doc/changelog/2.1.0-changelog.rst | 17 ++++++++++++++++- .../upcoming_changes/26897.improvement.rst | 6 ------ .../upcoming_changes/27008.improvement.rst | 8 -------- doc/source/release/2.1.0-notes.rst | 17 +++++++++++++++++ pyproject.toml | 2 +- 5 files changed, 34 insertions(+), 16 deletions(-) delete mode 100644 doc/release/upcoming_changes/26897.improvement.rst delete mode 100644 doc/release/upcoming_changes/27008.improvement.rst diff --git a/doc/changelog/2.1.0-changelog.rst b/doc/changelog/2.1.0-changelog.rst index 99f871a97718..af7f5a3b07c7 100644 --- a/doc/changelog/2.1.0-changelog.rst +++ b/doc/changelog/2.1.0-changelog.rst @@ -8,6 +8,7 @@ names contributed a patch for the first time. * !ogidig5 + * !partev * !vahidmech + +* !h-vetinari * Aaron Meurer * Adrin Jalali + * Agriya Khetarpal @@ -118,7 +119,7 @@ names contributed a patch for the first time. Pull requests merged ==================== -A total of 455 pull requests were merged for this release. +A total of 469 pull requests were merged for this release. * `#12150 `__: ENH: When histogramming data with integer dtype, force bin width... * `#24448 `__: TST: add some tests of np.log for complex input. @@ -575,3 +576,17 @@ A total of 455 pull requests were merged for this release. * `#27162 `__: BLD: use smaller scipy-openblas builds * `#27166 `__: ENH: fix thread-unsafe C API usages * `#27173 `__: MAINT: Bump pythoncapi-compat version. +* `#27176 `__: REL: Prepare for the NumPy 2.1.0rc1 release [wheel build] +* `#27180 `__: DOC: Add release notes for #26897 +* `#27181 `__: DOC: Add release notes for #27008 +* `#27190 `__: BUILD: use a shrunken version of scipy-openblas wheels [wheel... +* `#27193 `__: REV: Revert undef I and document it +* `#27196 `__: BUILD: improve download script +* `#27197 `__: MAINT: update default NPY_FEATURE_VERSION after dropping py39 +* `#27200 `__: DOC: add free-threading release notes +* `#27209 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27216 `__: TYP: Fixed & improved type hints for ``numpy.histogram2d`` +* `#27217 `__: TYP: Fix incompatible overrides in the ``numpy._typing._ufunc``... +* `#27229 `__: BUG: Fix ``PyArray_ZeroContiguousBuffer`` (resize) with struct... +* `#27233 `__: DOC: add docs on thread safety in NumPy +* `#27234 `__: BUG: Allow fitting of degree zero polynomials with Polynomial.fit diff --git a/doc/release/upcoming_changes/26897.improvement.rst b/doc/release/upcoming_changes/26897.improvement.rst deleted file mode 100644 index 1b3b327711af..000000000000 --- a/doc/release/upcoming_changes/26897.improvement.rst +++ /dev/null @@ -1,6 +0,0 @@ -Type hinting ``numpy.polynomial`` ---------------------------------- - -Starting from the 2.1 release, PEP 484 type annotations have been included for -the functions and convenience classes in ``numpy.polynomial`` and its -sub-packages. diff --git a/doc/release/upcoming_changes/27008.improvement.rst b/doc/release/upcoming_changes/27008.improvement.rst deleted file mode 100644 index 47e1090d9067..000000000000 --- a/doc/release/upcoming_changes/27008.improvement.rst +++ /dev/null @@ -1,8 +0,0 @@ -Improved ``numpy.dtypes`` type hints ------------------------------------- - -The type annotations for ``numpy.dtypes`` are now a better reflection of the -runtime: -The ``numpy.dtype`` type-aliases have been replaced with specialized ``dtype`` -*subtypes*, and the previously missing annotations for -``numpy.dtypes.StringDType`` have been added. diff --git a/doc/source/release/2.1.0-notes.rst b/doc/source/release/2.1.0-notes.rst index c591b29e4c24..bb9c71079062 100644 --- a/doc/source/release/2.1.0-notes.rst +++ b/doc/source/release/2.1.0-notes.rst @@ -250,6 +250,23 @@ a python ``ContextVar``, making the context manager thread and async-safe. (`gh-26846 `__) +Type hinting ``numpy.polynomial`` +--------------------------------- +Starting from the 2.1 release, PEP 484 type annotations have been included for +the functions and convenience classes in ``numpy.polynomial`` and its +sub-packages. + +(`gh-26897 `__) + +Improved ``numpy.dtypes`` type hints +------------------------------------ +The type annotations for ``numpy.dtypes`` are now a better reflection of the +runtime: The ``numpy.dtype`` type-aliases have been replaced with specialized +``dtype`` *subtypes*, and the previously missing annotations for +``numpy.dtypes.StringDType`` have been added. + +(`gh-27008 `__) + Performance improvements and changes ==================================== diff --git a/pyproject.toml b/pyproject.toml index 305db1c77ba4..6596535b0f3b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.1.0rc1" +version = "2.1.0" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"}