From d27a1f52a60cc1a68c731e0ae15af7553b358958 Mon Sep 17 00:00:00 2001 From: mattip Date: Thu, 8 Aug 2024 09:13:20 +0300 Subject: [PATCH 001/101] bump scipy-openblas version --- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index e134b0dae82e..1e2d5e804df3 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ spin # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.27.44.3 +scipy-openblas32==0.3.27.44.4 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index f688bfb6eb3a..ebf1a7dbd4dc 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.27.44.3 -scipy-openblas64==0.3.27.44.3 +scipy-openblas32==0.3.27.44.4 +scipy-openblas64==0.3.27.44.4 From 27f1b44025079ed655d3ebc9c08652e2ffed970b Mon Sep 17 00:00:00 2001 From: mattip Date: Thu, 8 Aug 2024 09:14:03 +0300 Subject: [PATCH 002/101] update bundled licenses: reflect scipy-openblas, remove libquadmath from windows --- tools/wheels/LICENSE_linux.txt | 4 ++-- tools/wheels/LICENSE_osx.txt | 4 ++-- tools/wheels/LICENSE_win32.txt | 27 +++------------------------ 3 files changed, 7 insertions(+), 28 deletions(-) diff --git a/tools/wheels/LICENSE_linux.txt b/tools/wheels/LICENSE_linux.txt index a5b5ae5c22e6..021b4b0289e7 100644 --- a/tools/wheels/LICENSE_linux.txt +++ b/tools/wheels/LICENSE_linux.txt @@ -5,7 +5,7 @@ This binary distribution of NumPy also bundles the following software: Name: OpenBLAS -Files: numpy.libs/libopenblas*.so +Files: numpy.libs/libscipy_openblas*.so Description: bundled as a dynamically linked library Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause @@ -41,7 +41,7 @@ License: BSD-3-Clause Name: LAPACK -Files: numpy.libs/libopenblas*.so +Files: numpy.libs/libscipy_openblas*.so Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause-Attribution diff --git a/tools/wheels/LICENSE_osx.txt b/tools/wheels/LICENSE_osx.txt index 1ebd5663d02c..81889131cfa7 100644 --- a/tools/wheels/LICENSE_osx.txt +++ b/tools/wheels/LICENSE_osx.txt @@ -4,7 +4,7 @@ This binary distribution of NumPy also bundles the following software: Name: OpenBLAS -Files: numpy/.dylibs/libopenblas*.so +Files: numpy/.dylibs/libscipy_openblas*.so Description: bundled as a dynamically linked library Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause @@ -40,7 +40,7 @@ License: BSD-3-Clause Name: LAPACK -Files: numpy/.dylibs/libopenblas*.so +Files: numpy/.dylibs/libscipy_openblas*.so Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause-Attribution diff --git a/tools/wheels/LICENSE_win32.txt b/tools/wheels/LICENSE_win32.txt index f8eaaf1cae25..a2ccce66fbe5 100644 --- a/tools/wheels/LICENSE_win32.txt +++ b/tools/wheels/LICENSE_win32.txt @@ -5,7 +5,7 @@ This binary distribution of NumPy also bundles the following software: Name: OpenBLAS -Files: numpy.libs\libopenblas*.dll +Files: numpy.libs\libscipy_openblas*.dll Description: bundled as a dynamically linked library Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause @@ -41,7 +41,7 @@ License: BSD-3-Clause Name: LAPACK -Files: numpy.libs\libopenblas*.dll +Files: numpy.libs\libscipy_openblas*.dll Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause-Attribution @@ -96,7 +96,7 @@ License: BSD-3-Clause-Attribution Name: GCC runtime library -Files: numpy.libs\libgfortran*.dll +Files: numpy.libs\libscipy_openblas*.dll Description: statically linked to files compiled with gcc Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran License: GPL-3.0-with-GCC-exception @@ -879,24 +879,3 @@ the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . -Name: libquadmath -Files: numpy.libs\libopenb*.dll -Description: statically linked to files compiled with gcc -Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath -License: LGPL-2.1-or-later - - GCC Quad-Precision Math Library - Copyright (C) 2010-2019 Free Software Foundation, Inc. - Written by Francois-Xavier Coudert - - This file is part of the libquadmath library. - Libquadmath is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - Libquadmath is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html From 86361ae099f4d0273a820822ddf208d297fe0adf Mon Sep 17 00:00:00 2001 From: mattip Date: Thu, 8 Aug 2024 09:19:28 +0300 Subject: [PATCH 003/101] add test for issue 27036 --- numpy/linalg/tests/test_regression.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py index 8cac195aa864..91051c0eca4f 100644 --- a/numpy/linalg/tests/test_regression.py +++ b/numpy/linalg/tests/test_regression.py @@ -161,3 +161,18 @@ def test_matrix_rank_rtol_argument(self, rtol): x = np.zeros((4, 3, 2)) res = np.linalg.matrix_rank(x, rtol=rtol) assert res.shape == (4,) + + def test_openblas_threading(self): + # gh-27036 + # Test whether matrix multiplication involving a large matrix always + # gives the same (correct) answer + x = np.arange(500000, dtype=np.float64) + src = np.vstack((x, -10*x)).T + matrix = np.array([[0, 1], [1, 0]]) + expected = np.vstack((-10*x, x)).T # src @ matrix + for i in range(200): + result = src @ matrix + mismatches = (~np.isclose(result, expected)).sum() + if mismatches != 0: + assert False, ("unexpected result from matmul, " + "probably due to OpenBLAS threading issues") From 0b2d77940663c408133aa27d1605a0a87471bde1 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 8 Aug 2024 20:52:47 +0200 Subject: [PATCH 004/101] BUG: Do not accidentally store dtype metadata in ``np.save`` We had logic in place to drop (most) metadata, but the change had a small bug: During saving, we were still using the one with metadata... Maybe doesn't quite close it, but big enough of an improvement for now, I think, so Closes gh-14142 --- numpy/lib/format.py | 2 ++ numpy/lib/tests/test_format.py | 34 ++++++++++++++++------------------ numpy/lib/tests/test_utils.py | 2 +- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/numpy/lib/format.py b/numpy/lib/format.py index 8e14dfe4bcab..a90403459848 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -271,6 +271,8 @@ def dtype_to_descr(dtype): warnings.warn("metadata on a dtype is not saved to an npy/npz. " "Use another format (such as pickle) to store it.", UserWarning, stacklevel=2) + dtype = new_dtype + if dtype.names is not None: # This is a record array. The .descr is fine. XXX: parts of the # record array with an empty name, like padding bytes, still get diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index 31352864b7e2..bb262e048cba 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -998,32 +998,30 @@ def test_header_growth_axis(): assert len(fp.getvalue()) == expected_header_length -@pytest.mark.parametrize('dt, fail', [ - (np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3', - metadata={'some': 'stuff'})]}), True), - (np.dtype(int, metadata={'some': 'stuff'}), False), - (np.dtype([('subarray', (int, (2,)))], metadata={'some': 'stuff'}), False), +@pytest.mark.parametrize('dt', [ + np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3', + metadata={'some': 'stuff'})]}), + np.dtype(int, metadata={'some': 'stuff'}), + np.dtype([('subarray', (int, (2,)))], metadata={'some': 'stuff'}), # recursive: metadata on the field of a dtype - (np.dtype({'names': ['a', 'b'], 'formats': [ + np.dtype({'names': ['a', 'b'], 'formats': [ float, np.dtype({'names': ['c'], 'formats': [np.dtype(int, metadata={})]}) - ]}), False) + ]}), ]) @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), reason="PyPy bug in error formatting") -def test_metadata_dtype(dt, fail): +def test_metadata_dtype(dt): # gh-14142 arr = np.ones(10, dtype=dt) buf = BytesIO() with assert_warns(UserWarning): np.save(buf, arr) buf.seek(0) - if fail: - with assert_raises(ValueError): - np.load(buf) - else: - arr2 = np.load(buf) - # BUG: assert_array_equal does not check metadata - from numpy.lib._utils_impl import drop_metadata - assert_array_equal(arr, arr2) - assert drop_metadata(arr.dtype) is not arr.dtype - assert drop_metadata(arr2.dtype) is arr2.dtype + + # Loading should work (metadata was stripped): + arr2 = np.load(buf) + # BUG: assert_array_equal does not check metadata + from numpy.lib._utils_impl import drop_metadata + assert_array_equal(arr, arr2) + assert drop_metadata(arr.dtype) is not arr.dtype + assert drop_metadata(arr2.dtype) is arr2.dtype diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py index e2f72ac90c92..644912d941e3 100644 --- a/numpy/lib/tests/test_utils.py +++ b/numpy/lib/tests/test_utils.py @@ -43,7 +43,7 @@ def _compare_dtypes(dt1, dt2): assert dt_m.metadata is None assert dt_m['l1'].metadata is None assert dt_m['l1']['l2'].metadata is None - + # alignment dt = np.dtype([('x', ' Date: Thu, 8 Aug 2024 23:34:38 +0300 Subject: [PATCH 005/101] BLD: use smaller scipy-openblas builds --- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 1e2d5e804df3..d2940e2d65bc 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ spin # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.27.44.4 +scipy-openblas32==0.3.27.44.5 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index ebf1a7dbd4dc..965fdb8faadf 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.27.44.4 -scipy-openblas64==0.3.27.44.4 +scipy-openblas32==0.3.27.44.5 +scipy-openblas64==0.3.27.44.5 From 37aba675c96c1852ecb420fdb47f25900a0665b1 Mon Sep 17 00:00:00 2001 From: mattip Date: Thu, 8 Aug 2024 23:46:32 +0300 Subject: [PATCH 006/101] add release note --- doc/release/upcoming_changes/27147.performance.rst | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 doc/release/upcoming_changes/27147.performance.rst diff --git a/doc/release/upcoming_changes/27147.performance.rst b/doc/release/upcoming_changes/27147.performance.rst new file mode 100644 index 000000000000..2cea7780f41c --- /dev/null +++ b/doc/release/upcoming_changes/27147.performance.rst @@ -0,0 +1,8 @@ +* OpenBLAS on x86_64 and i686 is built with fewer kernels. Based on + benchmarking, there are 5 clusters of performance around these kernels: + ``PRESCOTT NEHALEM SANDYBRIDGE HASWELL SKYLAKEX``. + +* OpenBLAS on windows is linked without quadmath, simplfying licensing + +* Due to a regression in OpenBLAS on windows, the performance improvements + when using multiple threads for OpenBLAS 0.3.26 were reverted. From a57e45beedb9fa52a09ea45645a46365c3c5a838 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 9 Aug 2024 15:18:59 -0600 Subject: [PATCH 007/101] ENH: fix thread-unsafe C API usages (#27145) Ref #26159 See also the CPython HOWTO on this topic: https://docs.python.org/3.13/howto/free-threading-extensions.html#freethreading-extensions-howto. The remaining usages of PyDict_GetItem and PyDict_Next are all around the fields attribute of structured dtypes. I'm pretty sure that dictionary is effectively frozen after the DType is constructed, so I don't worry about those uses. It's not straightforward to write tests for this, I'm just applying static refactorings in places where the refactoring shouldn't introduce new reference counting bugs. * ENH: fix thread-unsafe C API usages * ENH: use critical sections in einsum * BUG: fix error handling in loadtxt C code * revert einsum changes --- numpy/_core/src/multiarray/array_coercion.c | 16 +++++++------- numpy/_core/src/multiarray/textreading/rows.c | 21 ++++++++++++++++--- 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index 69da09875bfb..0cffcc6bab22 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -6,6 +6,7 @@ #include #include "numpy/npy_3kcompat.h" +#include "npy_pycompat.h" #include "lowlevel_strided_loops.h" #include "numpy/arrayobject.h" @@ -224,24 +225,23 @@ npy_discover_dtype_from_pytype(PyTypeObject *pytype) PyObject *DType; if (pytype == &PyArray_Type) { - DType = Py_None; + DType = Py_NewRef(Py_None); } else if (pytype == &PyFloat_Type) { - DType = (PyObject *)&PyArray_PyFloatDType; + DType = Py_NewRef((PyObject *)&PyArray_PyFloatDType); } else if (pytype == &PyLong_Type) { - DType = (PyObject *)&PyArray_PyLongDType; + DType = Py_NewRef((PyObject *)&PyArray_PyLongDType); } else { - DType = PyDict_GetItem(_global_pytype_to_type_dict, - (PyObject *)pytype); + int res = PyDict_GetItemRef(_global_pytype_to_type_dict, + (PyObject *)pytype, (PyObject **)&DType); - if (DType == NULL) { - /* the python type is not known */ + if (res <= 0) { + /* the python type is not known or an error was set */ return NULL; } } - Py_INCREF(DType); assert(DType == Py_None || PyObject_TypeCheck(DType, (PyTypeObject *)&PyArrayDTypeMeta_Type)); return (PyArray_DTypeMeta *)DType; } diff --git a/numpy/_core/src/multiarray/textreading/rows.c b/numpy/_core/src/multiarray/textreading/rows.c index 8fe13d0d3532..4ca1cc00e9f7 100644 --- a/numpy/_core/src/multiarray/textreading/rows.c +++ b/numpy/_core/src/multiarray/textreading/rows.c @@ -58,13 +58,18 @@ create_conv_funcs( PyObject *key, *value; Py_ssize_t pos = 0; + int error = 0; +#if Py_GIL_DISABLED + Py_BEGIN_CRITICAL_SECTION(converters); +#endif while (PyDict_Next(converters, &pos, &key, &value)) { Py_ssize_t column = PyNumber_AsSsize_t(key, PyExc_IndexError); if (column == -1 && PyErr_Occurred()) { PyErr_Format(PyExc_TypeError, "keys of the converters dictionary must be integers; " "got %.100R", key); - goto error; + error = 1; + break; } if (usecols != NULL) { /* @@ -92,7 +97,8 @@ create_conv_funcs( PyErr_Format(PyExc_ValueError, "converter specified for column %zd, which is invalid " "for the number of fields %zd.", column, num_fields); - goto error; + error = 1; + break; } if (column < 0) { column += num_fields; @@ -102,11 +108,20 @@ create_conv_funcs( PyErr_Format(PyExc_TypeError, "values of the converters dictionary must be callable, " "but the value associated with key %R is not", key); - goto error; + error = 1; + break; } Py_INCREF(value); conv_funcs[column] = value; } +#if Py_GIL_DISABLED + Py_END_CRITICAL_SECTION(); +#endif + + if (error) { + goto error; + } + return conv_funcs; error: From 10cca72cc1d48eb130db9475a7f475e418032e64 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 10 Aug 2024 10:08:43 -0600 Subject: [PATCH 008/101] MAINT: Bump pythoncapi-compat version. --- numpy/_core/src/common/pythoncapi-compat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/common/pythoncapi-compat b/numpy/_core/src/common/pythoncapi-compat index 01341acbbef0..ea1f7f6eac63 160000 --- a/numpy/_core/src/common/pythoncapi-compat +++ b/numpy/_core/src/common/pythoncapi-compat @@ -1 +1 @@ -Subproject commit 01341acbbef0ca85cf2fa31b63307ddf4d9a87fb +Subproject commit ea1f7f6eac63ff401937515638252402ff33dccb From 75d34312c830b7cb05612495e9ce0ed7f162927b Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 21 Jul 2024 12:34:24 -0600 Subject: [PATCH 009/101] REL: Prepare for the NumPy 2.1.0rc1 release [wheel build] - Update .mailmap - Create 2.1.0-changelog.rst - Update 2.1.0-notes.rst - Update pyproject.toml - Delete release fragments --- .mailmap | 119 ++-- doc/changelog/2.1.0-changelog.rst | 577 ++++++++++++++++++ .../upcoming_changes/12150.improvement.rst | 5 - .../upcoming_changes/26081.improvement.rst | 11 - doc/release/upcoming_changes/26103.c_api.rst | 15 - .../upcoming_changes/26268.expired.rst | 1 - doc/release/upcoming_changes/26285.change.rst | 13 - .../upcoming_changes/26285.performance.rst | 5 - .../upcoming_changes/26292.new_feature.rst | 1 - doc/release/upcoming_changes/26313.change.rst | 2 - .../upcoming_changes/26388.performance.rst | 3 - .../upcoming_changes/26452.deprecation.rst | 4 - .../upcoming_changes/26501.new_feature.rst | 2 - .../upcoming_changes/26579.new_function.rst | 6 - .../upcoming_changes/26580.new_feature.rst | 1 - .../upcoming_changes/26611.expired.rst | 2 - .../upcoming_changes/26611.new_feature.rst | 2 - .../upcoming_changes/26656.improvement.rst | 5 - .../upcoming_changes/26724.new_feature.rst | 7 - .../upcoming_changes/26750.improvement.rst | 12 - doc/release/upcoming_changes/26766.change.rst | 2 - doc/release/upcoming_changes/26842.c_api.rst | 5 - .../upcoming_changes/26846.improvement.rst | 6 - doc/release/upcoming_changes/26908.c_api.rst | 8 - .../upcoming_changes/26981.new_feature.rst | 9 - .../upcoming_changes/27076.deprecation.rst | 3 - doc/release/upcoming_changes/27091.change.rst | 24 - .../upcoming_changes/27147.performance.rst | 8 - doc/source/release/2.1.0-notes.rst | 301 ++++++++- pyproject.toml | 3 +- 30 files changed, 943 insertions(+), 219 deletions(-) create mode 100644 doc/changelog/2.1.0-changelog.rst delete mode 100644 doc/release/upcoming_changes/12150.improvement.rst delete mode 100644 doc/release/upcoming_changes/26081.improvement.rst delete mode 100644 doc/release/upcoming_changes/26103.c_api.rst delete mode 100644 doc/release/upcoming_changes/26268.expired.rst delete mode 100644 doc/release/upcoming_changes/26285.change.rst delete mode 100644 doc/release/upcoming_changes/26285.performance.rst delete mode 100644 doc/release/upcoming_changes/26292.new_feature.rst delete mode 100644 doc/release/upcoming_changes/26313.change.rst delete mode 100644 doc/release/upcoming_changes/26388.performance.rst delete mode 100644 doc/release/upcoming_changes/26452.deprecation.rst delete mode 100644 doc/release/upcoming_changes/26501.new_feature.rst delete mode 100644 doc/release/upcoming_changes/26579.new_function.rst delete mode 100644 doc/release/upcoming_changes/26580.new_feature.rst delete mode 100644 doc/release/upcoming_changes/26611.expired.rst delete mode 100644 doc/release/upcoming_changes/26611.new_feature.rst delete mode 100644 doc/release/upcoming_changes/26656.improvement.rst delete mode 100644 doc/release/upcoming_changes/26724.new_feature.rst delete mode 100644 doc/release/upcoming_changes/26750.improvement.rst delete mode 100644 doc/release/upcoming_changes/26766.change.rst delete mode 100644 doc/release/upcoming_changes/26842.c_api.rst delete mode 100644 doc/release/upcoming_changes/26846.improvement.rst delete mode 100644 doc/release/upcoming_changes/26908.c_api.rst delete mode 100644 doc/release/upcoming_changes/26981.new_feature.rst delete mode 100644 doc/release/upcoming_changes/27076.deprecation.rst delete mode 100644 doc/release/upcoming_changes/27091.change.rst delete mode 100644 doc/release/upcoming_changes/27147.performance.rst diff --git a/.mailmap b/.mailmap index 143ad1c4a9b2..b073f12c416b 100644 --- a/.mailmap +++ b/.mailmap @@ -7,53 +7,55 @@ # # This file is up-to-date if the command git log --format="%aN <%aE>" | sort -u # gives no duplicates. -@8bitmp3 <19637339+8bitmp3@users.noreply.github.com> -@Algorithmist-Girl <36552319+Algorithmist-Girl@users.noreply.github.com> -@DWesl <22566757+DWesl@users.noreply.github.com> -@Endolith -@GalaxySnail -@Illviljan <14371165+Illviljan@users.noreply.github.com> -@LSchroefl <65246829+LSchroefl@users.noreply.github.com> -@Lbogula -@Lisa <34400837+lyzlisa@users.noreply.github.com> -@Patrick <39380924+xamm@users.noreply.github.com> -@Scian <65375075+hoony6134@users.noreply.github.com> -@Searchingdays -@amagicmuffin <2014wcheng@gmail.com> -@code-review-doctor -@cook-1229 <70235336+cook-1229@users.noreply.github.com> -@dg3192 <113710955+dg3192@users.noreply.github.com> -@ellaella12 -@ellaella12 <120079323+ellaella12@users.noreply.github.com> -@h-vetinari -@h6197627 <44726212+h6197627@users.noreply.github.com> -@jbCodeHub -@juztamau5 -@legoffant <58195095+legoffant@users.noreply.github.com> -@liang3zy22 <35164941+liang3zy22@users.noreply.github.com> -@luzpaz -@luzpaz -@matoro -@mcp292 -@mgunyho <20118130+mgunyho@users.noreply.github.com> -@msavinash <73682349+msavinash@users.noreply.github.com> -@mykykh <49101849+mykykh@users.noreply.github.com> -@partev -@pkubaj -@pmvz -@pojaghi <36278217+pojaghi@users.noreply.github.com> -@pratiklp00 -@sfolje0 -@spacescientist -@stefan6419846 -@stefan6419846 <96178532+stefan6419846@users.noreply.github.com> -@tajbinjohn -@tautaus -@undermyumbrella1 -@xoviat <49173759+xoviat@users.noreply.github.com> -@xoviat <49173759+xoviat@users.noreply.github.com> -@yan-wyb -@yetanothercheer +!8bitmp3 <19637339+8bitmp3@users.noreply.github.com> +!Algorithmist-Girl <36552319+Algorithmist-Girl@users.noreply.github.com> +!DWesl <22566757+DWesl@users.noreply.github.com> +!Endolith +!GalaxySnail +!Illviljan <14371165+Illviljan@users.noreply.github.com> +!LSchroefl <65246829+LSchroefl@users.noreply.github.com> +!Lbogula +!Lisa <34400837+lyzlisa@users.noreply.github.com> +!Patrick <39380924+xamm@users.noreply.github.com> +!Scian <65375075+hoony6134@users.noreply.github.com> +!Searchingdays +!amagicmuffin <2014wcheng@gmail.com> +!code-review-doctor +!cook-1229 <70235336+cook-1229@users.noreply.github.com> +!dg3192 <113710955+dg3192@users.noreply.github.com> +!ellaella12 +!ellaella12 <120079323+ellaella12@users.noreply.github.com> +!h-vetinari +!h6197627 <44726212+h6197627@users.noreply.github.com> +!jbCodeHub +!juztamau5 +!legoffant <58195095+legoffant@users.noreply.github.com> +!liang3zy22 <35164941+liang3zy22@users.noreply.github.com> +!luzpaz +!luzpaz +!matoro +!mcp292 +!mgunyho <20118130+mgunyho@users.noreply.github.com> +!msavinash <73682349+msavinash@users.noreply.github.com> +!mykykh <49101849+mykykh@users.noreply.github.com> +!ogidig5 <82846833+ogidig5@users.noreply.github.com> +!partev +!pkubaj +!pmvz +!pojaghi <36278217+pojaghi@users.noreply.github.com> +!pratiklp00 +!sfolje0 +!spacescientist +!stefan6419846 +!stefan6419846 <96178532+stefan6419846@users.noreply.github.com> +!tajbinjohn +!tautaus +!undermyumbrella1 +!vahidmech +!xoviat <49173759+xoviat@users.noreply.github.com> +!xoviat <49173759+xoviat@users.noreply.github.com> +!yan-wyb +!yetanothercheer Aaron Baecker Adrin Jalali Arun Kota @@ -64,6 +66,7 @@ Aditi Saluja <136131452+salujaditi14@users.noreply.github.com> Andrei Batomunkuev Ajay DS Ajay DS +Ajay Kumar Janapareddi Alan Fontenot Alan Fontenot <36168460+logeaux@users.noreply.github.com> Abdul Muneer @@ -117,6 +120,7 @@ Andrea Sangalli <53617841+and-sang@users.noreply.github.c Andreas Klöckner Andreas Schwab Andrei Kucharavy +Andrej Zhilenkov Andrew Lawson Anirudh Subramanian Anne Archibald @@ -127,9 +131,11 @@ Antoine Pitrou Anton Prosekin Anže Starič Arfy Slowy +Arnaud Ma Aron Ahmadia Arun Kota Arun Kota +Arun Pa Arun Palaniappen Arun Persaud Ashutosh Singh @@ -171,6 +177,8 @@ Bui Duc Minh <41239569+Mibu287@users.noreply.github.co Caio Agiani Carl Kleffner Carl Leake +Carlos Henrique Hermanny Moreira da Silva +Carlos Henrique Hermanny Moreira da Silva <121122527+carlosilva10260@users.noreply.github.com> Cédric Hannotier Charles Stern <62192187+cisaacstern@users.noreply.github.com> Chiara Marmo @@ -304,6 +312,7 @@ Giannis Zapantis Guillaume Peillex Jack J. Woehr Jacob M. Casey +Jakob Stevens Haas <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Jaime Fernandez Jaime Fernandez Jaime Fernandez @@ -314,6 +323,8 @@ Jake VanderPlas Jakob Jakobson Jakob Jakobson <43045863+jakobjakobson13@users.noreply.github.com> James Bourbeau +James Joseph Thomas +James Joseph Thomas quotuva James Oliver <46758370+jamesoliverh@users.noreply.github.com> James Webber Jamie Macey @@ -356,8 +367,11 @@ Joseph Fox-Rabinovitz Joshua Himmens Joyce Brum +Joren Hammudoglu Jory Klaverstijn Jory Klaverstijn <63673224+JoryKlaverstijn@users.noreply.github.com> +Julia Poo +Julia Poo <57632293+JuliaPoo@users.noreply.github.com> Julian Taylor Julian Taylor Julian Taylor @@ -371,6 +385,8 @@ Kai Striega Kasia Leszek Kasia Leszek <39829548+katleszek@users.noreply.github.com> Karan Dhir +Karthik Gangula <56480632+gangula-karthik@users.noreply.github.com> +Karthik Kaiplody Keller Meier Kenny Huynh Kevin Granados @@ -395,6 +411,7 @@ Lars Grüter Leona Taric Leona Taric <92495067+LeonaTaric@users.noreply.github.com> Leonardus Chen +Liangyu Zhang Licht Takeuchi Lorenzo Mammana Lillian Zha @@ -472,6 +489,8 @@ Michel Fruchart Miki Watanabe (渡邉 美希) Miles Cranmer +Milica Dančuk +Milica Dančuk love-bees <33499899+love-bees@users.noreply.github.com> Mircea Akos Bruma Mircea Akos Bruma Mitchell Faas <35742861+Mitchell-Faas@users.noreply.github.com> @@ -483,6 +502,8 @@ Mukulika Pahari <60316606+Mukulikaa@users.noreply.git Munira Alduraibi Namami Shanker Namami Shanker NamamiShanker +Nathan Goldbaum +Nathan Goldbaum Nathaniel J. Smith Naveen Arunachalam naveenarun Neil Girdhar @@ -514,6 +535,8 @@ Pat Miller patmiller Paul Ivanov Paul Ivanov Paul Jacobson +Paul Juma Otieno +Paul Juma Otieno <103896399+otieno-juma@users.noreply.github.com> Paul Reece Paul YS Lee Paul Pey Lian Lim @@ -644,6 +667,8 @@ Vrinda Narayan Vrinda Narayan Vrinda Narayan <48102157+vrindaaa@users.noreply.github.com> Wansoo Kim +Warrick Ball +Warrick Ball Warren Weckesser Warren Weckesser Weitang Li diff --git a/doc/changelog/2.1.0-changelog.rst b/doc/changelog/2.1.0-changelog.rst new file mode 100644 index 000000000000..99f871a97718 --- /dev/null +++ b/doc/changelog/2.1.0-changelog.rst @@ -0,0 +1,577 @@ + +Contributors +============ + +A total of 110 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !ogidig5 + +* !partev +* !vahidmech + +* Aaron Meurer +* Adrin Jalali + +* Agriya Khetarpal +* Ajay Kumar Janapareddi + +* Alex Herbert + +* Andras Deak +* Andrej Zhilenkov + +* Andrew Nelson +* Anne Gunn + +* Antony Lee +* Arnaud Ma + +* Arun Kannawadi + +* Arun Pa + +* Bas van Beek +* Ben Woodruff + +* Bruno Oliveira + +* Carlos Henrique Hermanny Moreira da Silva + +* Charles Harris +* Chris Sidebottom +* Christian Heimes + +* Christian Lorentzen +* Christopher Sidebottom +* Christopher Titchen + +* Clément Robert +* Cobalt Yang + +* Devyani Chavan + +* Dimitri Papadopoulos Orfanos +* Ebigide Jude + +* Eric Xie + +* Evgeni Burovski +* Fabian Vogt + +* Francisco Sousa + +* GUAN MING + +* Gabriel Fougeron + +* Gagandeep Singh +* Giovanni Del Monte + +* Gonzalo Tornaría + +* Gonçalo Bárias + +* Hugo van Kemenade +* Jakob Stevens Haas + +* Jakob Unfried + +* James Joseph Thomas + +* Jean Lecordier + +* Joren Hammudoglu + +* Joris Van den Bossche +* Julia Poo + +* Justus Magin +* Jyn Spring 琴春 +* KIU Shueng Chuan +* Karthik Gangula + +* Karthik Kaiplody + +* Kevin Sheppard +* Kristoffer Pedersen + +* Leo Singer +* Liang Yan +* Liangyu Zhang + +* Lucas Colley +* Luiz Eduardo Amaral + +* Lysandros Nikolaou +* Marcel Loose + +* Marten van Kerkwijk +* Mateusz Sokół +* Matt Haberland +* Matt Thompson + +* Matthew Roeschke + +* Matthew Thompson + +* Matthias Bussonnier +* Matti Picus +* Melissa Weber Mendonça +* Milica Dančuk + +* Moritz Schreiber + +* Nathan Goldbaum +* Olivier Grisel +* Patrick J. Roddy + +* Paul Juma Otieno + +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Raquel Braunschweig + +* Robert Kern +* Rohit Goswami +* Romain Geissler + +* Ross Barnowski +* Rostan Tabet + +* Sam Morley + +* Sayed Adel +* Sean Cheah +* Sebastian Berg +* Serge Guelton +* Slobodan + +* Stefan van der Walt +* Thomas A Caswell +* Thomas Li +* Timo Röhling + +* Tsvika Shapira + +* Tuhin Sharma + +* Tyler Reddy +* Victor Eijkhout + +* Warren Weckesser +* Warrick Ball +* Xiangyi Wang + +* Yair Chuchem + +* Yang Liu + +* Yannik Wicke + +* Yevhen Amelin + +* Yuki K + +Pull requests merged +==================== + +A total of 455 pull requests were merged for this release. + +* `#12150 `__: ENH: When histogramming data with integer dtype, force bin width... +* `#24448 `__: TST: add some tests of np.log for complex input. +* `#25704 `__: DOC: quantile: correct/simplify documentation +* `#25705 `__: DOC: Add documentation explaining our promotion rules +* `#25781 `__: ENH: Convert fp32 sin/cos from C universal intrinsics to C++... +* `#25908 `__: ENH: Add center/ljust/rjust/zfill ufuncs for unicode and bytes +* `#25913 `__: NEP: NEP 55 updates and add @mhvk as an author +* `#25963 `__: BUG: Fix bug in numpy.pad() +* `#25964 `__: CI: fix last docbuild warnings +* `#25970 `__: MAINT: Prepare main for NumPy 2.1.0 development +* `#25971 `__: DOC: Fix a note section markup in ``dtype.rst`` +* `#25972 `__: DOC: Fix module setting of ``MaskedArray`` +* `#25974 `__: BUG: Raise error for negative-sized fixed-width dtype +* `#25975 `__: BUG: Fixes np.put receiving empty array causes endless loop +* `#25981 `__: BLD: push a tag builds a wheel +* `#25985 `__: BLD: omit pp39-macosx_arm64 from matrix +* `#25988 `__: DOC: Remove unused parameter description +* `#25990 `__: CI: clean up some unused `choco install` invocations +* `#25995 `__: CI: don't use ``fetch-tags`` in wheel build jobs +* `#25999 `__: BUG: fix kwarg handling in assert_warn [skip cirrus][skip azp] +* `#26000 `__: BUG: Filter out broken Highway platform +* `#26003 `__: MAINT: Bump pypa/cibuildwheel from 2.16.5 to 2.17.0 +* `#26005 `__: DOC: indicate stringdtype support in docstrings for string operations +* `#26006 `__: TST: remove usage of ProcessPoolExecutor in stringdtype tests +* `#26007 `__: MAINT: Remove sdist task from pavement.py +* `#26011 `__: DOC: mention the ``exceptions`` namespace in the 2.0.0 release... +* `#26012 `__: ENH: install StringDType promoter for add +* `#26014 `__: MAINT: remove the now-unused ``NPY_NO_SIGNAL`` +* `#26015 `__: MAINT: remove now-unused ``NPY_USE_C99_FORMAT`` +* `#26016 `__: MAINT: handle ``NPY_ALLOW_THREADS`` and related build option... +* `#26017 `__: MAINT: avoid use of flexible array member in public header +* `#26024 `__: BUG: raise error trying to coerce object arrays containing timedelta64('NaT')... +* `#26025 `__: BUG: fix reference count leak in __array__ internals +* `#26027 `__: BUG: add missing error handling in string to int cast internals +* `#26033 `__: MAINT: Remove partition and split-like functions from numpy.strings +* `#26045 `__: ENH: Optimize np.power for integer type +* `#26055 `__: ENH: Optimize np.power(x, 2) for double and float type +* `#26063 `__: MAINT,API: Const qualify some new API (mostly new DType API) +* `#26064 `__: MAINT: Make PyArrayMultiIterObject struct "smaller" +* `#26066 `__: BUG: Allow the new string dtype summation to work +* `#26067 `__: DOC: note stringdtype output support in np.strings docstrings +* `#26070 `__: DOC clarifications on debugging numpy +* `#26071 `__: BUG: fix logic error in stringdtype maximum/minimum ufunc +* `#26080 `__: BUG: adapt cython files to new complex declarations +* `#26081 `__: TYP: Make array _ShapeType bound and covariant +* `#26082 `__: ENH: Add partition/rpartition ufunc for string dtypes +* `#26083 `__: MAINT: Bump actions/cache from 4.0.1 to 4.0.2 +* `#26089 `__: TYP: Adjust typing for ``np.random.integers`` and ``np.random.randint`` +* `#26090 `__: API: Require reduce promoters to start with None to match +* `#26095 `__: MAINT: Bump actions/dependency-review-action from 4.1.3 to 4.2.3 +* `#26097 `__: DOC: Mention ``copy=True`` for ``__array__`` method in the migration... +* `#26099 `__: DOC: fix typo in doc/source/user/absolute_beginners.rst +* `#26103 `__: API: Default to hidden visibility for API tables +* `#26105 `__: MAINT: install all-string promoter for multiply +* `#26108 `__: MAINT: Remove unnecessarily defensive code from dlpack deleter +* `#26112 `__: TST: fix incorrect dtype in test +* `#26113 `__: BLD: Do not use -O3 flag when building in debug mode +* `#26116 `__: ENH: inherit numerical dtypes from abstract ones. +* `#26119 `__: BUG: fix reference counting error in stringdtype setup +* `#26123 `__: BUG: update pocketfft to unconditionaly disable use of aligned_alloc +* `#26125 `__: DOC: Bump pydata-sphinx-theme version +* `#26128 `__: DOC: Update absolute_beginners.rst +* `#26129 `__: MAINT: add missing noexcept clauses +* `#26130 `__: ENH: Optimize performance of np.atleast_1d +* `#26133 `__: MAINT: Bump actions/dependency-review-action from 4.2.3 to 4.2.4 +* `#26134 `__: CI, BLD: Push NumPy's Emscripten/Pyodide wheels nightly to Anaconda.org... +* `#26135 `__: BUG: masked array division should ignore all FPEs in mask calculation +* `#26136 `__: BUG: fixed datetime64[ns] conversion issue in numpy.vectorize,... +* `#26138 `__: MAINT: Bump actions/setup-python from 5.0.0 to 5.1.0 +* `#26139 `__: MAINT: Bump actions/dependency-review-action from 4.2.4 to 4.2.5 +* `#26142 `__: BUG,MAINT: Fix __array__ bugs and simplify code +* `#26147 `__: BUG: introduce PyArray_SafeCast to fix issues around stringdtype... +* `#26149 `__: MAINT: Escalate import warning to an import error +* `#26151 `__: BUG: Fix test_impossible_feature_enable failing without BASELINE_FEAT +* `#26155 `__: NEP: add NEP 56 mailing list resolution +* `#26160 `__: ENH: Improve performance of np.broadcast_arrays and np.broadcast_shapes +* `#26162 `__: BUG: Infinite Loop in numpy.base_repr +* `#26168 `__: DOC: mention np.lib.NumPyVersion in the 2.0 migration guide +* `#26172 `__: DOC, TST: make ``numpy.version`` officially public +* `#26174 `__: MAINT: Fix failure in routines.version.rst +* `#26182 `__: DOC: Update absolute_beginners.rst +* `#26185 `__: MAINT: Update Pyodide to 0.25.1 +* `#26187 `__: TST: Use platform.machine() for improved portability on riscv64 +* `#26189 `__: MNT: use pythoncapi_compat.h in npy_compat.h +* `#26190 `__: BUG: fix reference counting error in wrapping_method_resolve_descriptors +* `#26207 `__: TST: account for immortal objects in test_iter_refcount +* `#26210 `__: API: Readd ``np.bool_`` typing stub +* `#26212 `__: BENCH: Add benchmarks for np.power(x,2) and np.power(x,0.5) +* `#26213 `__: MNT: try updating pythoncapi-compat +* `#26215 `__: API: Enforce one copy for ``__array__`` when ``copy=True`` +* `#26219 `__: ENH: Enable RVV CPU feature detection +* `#26222 `__: MAINT: Drop Python 3.9 +* `#26227 `__: MAINT: utilize ufunc API const correctness internally +* `#26229 `__: TST: skip limited API test on nogil python build +* `#26232 `__: MAINT: fix typo in _add_newdoc_ufunc docstring +* `#26235 `__: Update numpy.any documentation example +* `#26237 `__: MAINT: Update ``array-api-tests`` job +* `#26239 `__: DOC: add versionadded for copy keyword in np.asarray docstring +* `#26241 `__: DOC: Fixup intp/uintp documentation for ssize_t/size_t changes +* `#26245 `__: DOC: Update ``__array__`` ``copy`` keyword docs +* `#26246 `__: MNT: migrate PyList_GetItem usages to PyList_GetItemRef +* `#26248 `__: MAINT,BUG: Robust string meson template substitution +* `#26251 `__: MNT: disable the allocator cache for nogil builds +* `#26258 `__: BLD: update to OpenBLAS 0.3.27 +* `#26260 `__: BUG: Ensure seed sequences are restored through pickling +* `#26261 `__: ENH: introduce a notion of "compatible" stringdtype instances +* `#26263 `__: MAINT: fix typo +* `#26264 `__: MAINT: fix typo in #include example +* `#26267 `__: MAINT: Update URL in nep 0014 - domain change +* `#26268 `__: API: Disallow 0D input arrays in ``nonzero`` +* `#26270 `__: BUG: ensure np.vectorize doesn't truncate fixed-width strings +* `#26273 `__: ENH: Bump Highway to HEAD and remove platform filter +* `#26274 `__: BLD: use install-tags to optionally install tests +* `#26280 `__: ENH: Speedup clip for floating point +* `#26281 `__: BUG: Workaround for Intel Compiler mask conversion bug +* `#26282 `__: MNT: replace _PyDict_GetItemStringWithError with PyDict_GetItemStringRef +* `#26284 `__: TST: run the smoke tests on more python versions +* `#26285 `__: ENH: Decrease wall time of ``ma.cov`` and ``ma.corrcoef`` +* `#26286 `__: BLD: ensure libnpymath and highway static libs use hidden visibility +* `#26292 `__: API: Add ``shape`` and ``copy`` arguments to ``numpy.reshape`` +* `#26294 `__: MNT: disable the coercion cache for the nogil build +* `#26295 `__: CI: add llvm/clang sanitizer tests +* `#26299 `__: MAINT: Pin sphinx to version 7.2.6 +* `#26302 `__: BLD: use newer openblas wheels [wheel build] +* `#26303 `__: DOC: add explanation of dtype to parameter values for np.append +* `#26304 `__: MAINT: address improper error handling and cleanup for ``spin`` +* `#26309 `__: MAINT: Bump actions/upload-artifact from 4.3.1 to 4.3.2 +* `#26311 `__: DOC: Follow-up fixes for new theme +* `#26313 `__: MAINT: Cleanup ``vecdot``'s signature, typing, and importing +* `#26317 `__: BUG: use PyArray_SafeCast in array_astype +* `#26319 `__: BUG: fix spin bench not running on Windows +* `#26320 `__: DOC: Add replacement NEP links in superseded, replaced-by fields +* `#26322 `__: DOC: Documentation and examples for conversion of np.timedelta64... +* `#26324 `__: BUG: Fix invalid constructor in string_fastsearch.h with C++... +* `#26325 `__: TST: Skip Cython test for editable install +* `#26329 `__: MAINT: Bump actions/upload-artifact from 4.3.2 to 4.3.3 +* `#26338 `__: MAINT: update x86-simd-sort to latest +* `#26340 `__: DOC: Added small clarification note, based on discussion in issue... +* `#26347 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.3 to 3.0.4 +* `#26348 `__: NOGIL: Make loop data cache and dispatch cache thread-safe in... +* `#26353 `__: BUG: ensure text padding ufuncs handle stringdtype nan-like nulls +* `#26354 `__: BUG: Fix rfft for even input length. +* `#26355 `__: ENH: add support for nan-like null strings in string replace +* `#26359 `__: MAINT: Simplify bugfix for even rfft +* `#26362 `__: MAINT: Bump actions/dependency-review-action from 4.2.5 to 4.3.1 +* `#26363 `__: MAINT: Bump actions/dependency-review-action from 4.3.1 to 4.3.2 +* `#26364 `__: TST: static types are now immortal in the default build too +* `#26368 `__: [NOGIL] thread local promotion state +* `#26369 `__: DOC: fix np.unique release notes [skip cirrus] +* `#26372 `__: BUG: Make sure that NumPy scalars are supported by can_cast +* `#26377 `__: TYP: Fix incorrect type hint for creating a recarray from fromrecords +* `#26378 `__: DOC: Update internal links for generator.rst and related +* `#26384 `__: BUG: Fix incorrect return type of item with length 0 from chararray.__getitem__ +* `#26385 `__: DOC: Updated remaining links in random folder +* `#26386 `__: DOC: Improve example on array broadcasting +* `#26388 `__: BUG: Use Python pickle protocol version 4 for np.save +* `#26391 `__: DOC: Add missing methods to numpy.strings docs +* `#26392 `__: BUG: support nan-like null strings in [l,r]strip +* `#26396 `__: MNT: more gracefully handle spin adding arguments to functions... +* `#26399 `__: DOC: Update INSTALL.rst +* `#26413 `__: DOC: Fix some typos and incorrect markups +* `#26415 `__: MAINT: updated instructions to get MachAr byte pattern +* `#26416 `__: MAINT: Bump ossf/scorecard-action from 2.3.1 to 2.3.3 +* `#26418 `__: DOC: add reference docs for NpyString C API +* `#26419 `__: MNT: clean up references to array_owned==2 case in StringDType +* `#26426 `__: TYP,TST: Bump mypy to 1.10.0 +* `#26428 `__: MAINT: Bump pypa/cibuildwheel from 2.17.0 to 2.18.0 +* `#26429 `__: TYP: npyio: loadtxt: usecols: add None type +* `#26431 `__: TST: skip test_frompyfunc_leaks in the free-threaded build +* `#26432 `__: MAINT: Add some PR prefixes to the labeler. +* `#26436 `__: BUG: fixes for three related stringdtype issues +* `#26441 `__: BUG: int32 and intc should both appear in sctypes +* `#26442 `__: DOC: Adding links to polynomial table. +* `#26443 `__: TST: temporarily pin spin to work around issue in 0.9 release +* `#26444 `__: DOC: Remove outdated authentication instructions +* `#26445 `__: TST: fix xfailed tests on pypy 7.3.16 +* `#26447 `__: TST: attempt to fix intel SDE SIMD CI +* `#26449 `__: MAINT: fix typo +* `#26452 `__: DEP: Deprecate 'fix_imports' flag in numpy.save +* `#26456 `__: ENH: improve the error raised by ``numpy.isdtype`` +* `#26463 `__: TST: add basic free-threaded CI testing +* `#26464 `__: BLD: update vendored-meson to current Meson master (1.4.99) +* `#26469 `__: MAINT: Bump github/codeql-action from 2.13.4 to 3.25.5 +* `#26471 `__: BLD: cp313 [wheel build] +* `#26474 `__: BLD: Make NumPy build reproducibly +* `#26476 `__: DOC: Skip API documentation for numpy.distutils with Python 3.12... +* `#26478 `__: DOC: Set default as ``-j 1`` for spin docs and move ``-W`` to SPHINXOPTS +* `#26480 `__: TYP: fix type annotation for ``newbyteorder`` +* `#26481 `__: Improve documentation of numpy.ma.filled +* `#26486 `__: MAINT: Bump github/codeql-action from 3.25.5 to 3.25.6 +* `#26487 `__: MAINT: Bump pypa/cibuildwheel from 2.18.0 to 2.18.1 +* `#26488 `__: DOC: add examples to get_printoptions +* `#26489 `__: DOC: add example to get_include +* `#26492 `__: DOC: fix rng.random example in numpy-for-matlab-users +* `#26501 `__: ENH: Implement DLPack version 1 +* `#26503 `__: TST: work around flaky test on free-threaded build +* `#26504 `__: DOC: Copy-edit numpy 2.0 migration guide. +* `#26505 `__: DOC: update the NumPy Roadmap +* `#26507 `__: MAINT: mark temp elision address cache as thread local +* `#26511 `__: MAINT: Bump mamba-org/setup-micromamba from 1.8.1 to 1.9.0 +* `#26512 `__: CI: enable free-threaded wheel builds [wheel build] +* `#26514 `__: MAINT: Avoid gcc compiler warning +* `#26515 `__: MAINT: Fix GCC -Wmaybe-uninitialized warning +* `#26517 `__: DOC: Add missing functions to the migration guide +* `#26519 `__: MAINT: Avoid by-pointer parameter passing for LINEARIZE_DATA_t... +* `#26520 `__: BUG: Fix handling of size=() in Generator.choice when a.ndim... +* `#26524 `__: BUG: fix incorrect error handling for dtype('a') deprecation +* `#26526 `__: BUG: fix assert in PyArry_ConcatenateArrays with StringDType +* `#26529 `__: BUG: ``PyDataMem_SetHandler`` check capsule name +* `#26531 `__: BUG: Fix entry-point of Texinfo docs +* `#26534 `__: BUG: cast missing in PyPy-specific f2py code, pin spin in CI +* `#26537 `__: BUG: Fix F77 ! comment handling +* `#26538 `__: DOC: Update ``gradient`` docstrings +* `#26546 `__: MAINT: Remove redundant print from bug report issue template +* `#26548 `__: BUG: Fix typo in array-wrap code that lead to memory leak +* `#26550 `__: BUG: Make Polynomial evaluation adhere to nep 50 +* `#26552 `__: BUG: Fix in1d fast-path range +* `#26558 `__: BUG: fancy indexing copy +* `#26559 `__: BUG: fix setxor1d when input arrays aren't 1D +* `#26562 `__: MAINT: Bump mamba-org/setup-micromamba from 1.8.1 to 1.9.0 +* `#26563 `__: BUG: Fix memory leaks found with valgrind +* `#26564 `__: CI, BLD: Upgrade to Pyodide 0.26.0 for Emscripten/Pyodide CI... +* `#26566 `__: DOC: update ufunc tutorials to use setuptools +* `#26567 `__: BUG: fix memory leaks found with valgrind (next) +* `#26568 `__: MAINT: Unpin pydata-sphinx-theme +* `#26571 `__: DOC: Added web docs for missing ma and strings routines +* `#26572 `__: ENH: Add array API inspection functions +* `#26579 `__: ENH: Add unstack() +* `#26580 `__: ENH: Add copy and device keyword to np.asanyarray to match np.asarray +* `#26582 `__: BUG: weighted nanpercentile, nanquantile and multi-dim q +* `#26585 `__: MAINT: Bump github/codeql-action from 3.25.6 to 3.25.7 +* `#26586 `__: BUG: Fix memory leaks found by valgrind +* `#26589 `__: BUG: catch invalid fixed-width dtype sizes +* `#26594 `__: DOC: Update constants.rst: fix URL redirect +* `#26597 `__: ENH: Better error message for axis=None in ``np.put_along_axis``... +* `#26599 `__: ENH: use size-zero dtype for broadcast-shapes +* `#26602 `__: TST: Re-enable int8/uint8 einsum tests +* `#26603 `__: BUG: Disallow string inputs for ``copy`` keyword in ``np.array``... +* `#26604 `__: refguide-check with pytest as a runner +* `#26605 `__: DOC: fix typos in numpy v2.0 documentation +* `#26606 `__: DOC: Update randn() to use rng.standard_normal() +* `#26607 `__: MNT: Reorganize non-constant global statics into structs +* `#26609 `__: DOC: Updated notes and examples for np.insert. +* `#26610 `__: BUG: np.take handle 64-bit indices on 32-bit platforms +* `#26611 `__: MNT: Remove ``set_string_function`` +* `#26614 `__: MAINT: Bump github/codeql-action from 3.25.7 to 3.25.8 +* `#26619 `__: TST: Re-enable ``test_shift_all_bits`` on clang-cl +* `#26626 `__: DOC: add ``getbufsize`` example +* `#26627 `__: DOC: add ``setbufsize`` example +* `#26628 `__: DOC: add ``matrix_transpose`` example +* `#26629 `__: DOC: add ``unique_all`` example +* `#26630 `__: DOC: add ``unique_counts`` example +* `#26631 `__: DOC: add ``unique_inverse`` example +* `#26632 `__: DOC: add ``unique_values`` example +* `#26633 `__: DOC: fix ``matrix_transpose`` doctest +* `#26634 `__: BUG: Replace dots with underscores in f2py meson backend for... +* `#26636 `__: MAINT: Bump actions/dependency-review-action from 4.3.2 to 4.3.3 +* `#26637 `__: BUG: fix incorrect randomized parameterization in bench_linalg +* `#26638 `__: MNT: use reproducible RNG sequences in benchmarks +* `#26639 `__: MNT: more benchmark cleanup +* `#26641 `__: DOC: Update 2.0 migration guide +* `#26644 `__: DOC: Added clean_dirs to spin docs to remove generated folders +* `#26645 `__: DOC: Enable web docs for numpy.trapezoid and add back links +* `#26646 `__: DOC: Update docstring for invert function +* `#26655 `__: CI: modified CI job to test editable install +* `#26658 `__: MAINT: Bump pypa/cibuildwheel from 2.18.1 to 2.19.0 +* `#26662 `__: DOC: add CI and NEP commit acronyms +* `#26664 `__: CI: build and upload free-threaded nightly wheels for macOS +* `#26667 `__: BUG: Adds asanyarray to start of linalg.cross +* `#26670 `__: MAINT: Bump github/codeql-action from 3.25.8 to 3.25.9 +* `#26672 `__: CI: upgrade FreeBSD Cirrus job from FreeBSD 13.2 to 14.0 +* `#26675 `__: CI: Use default llvm on Windows. +* `#26676 `__: MAINT: mark evil_global_disable_warn_O4O8_flag as thread-local +* `#26679 `__: DOC: add ``np.linalg`` examples +* `#26680 `__: remove doctesting from refguide-check, add ``spin check-tutorials`` +* `#26684 `__: MAINT: Bump pypa/cibuildwheel from 2.19.0 to 2.19.1 +* `#26685 `__: MAINT: Bump github/codeql-action from 3.25.9 to 3.25.10 +* `#26686 `__: MAINT: Add comment lost in previous PR. +* `#26691 `__: BUILD: check for scipy-doctest, remove it from requirements +* `#26692 `__: DOC: document workaround for deprecation of dim-2 inputs to ``cross`` +* `#26693 `__: BUG: allow replacement in the dispatch cache +* `#26702 `__: DOC: Added missing See Also sections in Polynomial module +* `#26703 `__: BUG: Handle ``--f77flags`` and ``--f90flags`` for ``meson`` +* `#26706 `__: TST: Skip an f2py module test on Windows +* `#26714 `__: MAINT: Update main after 2.0.0 release. +* `#26716 `__: DOC: Add clarifications np.argpartition +* `#26717 `__: DOC: Mention more error paths and try to consolidate import errors +* `#26721 `__: DOC, MAINT: Turn on version warning banner provided by PyData... +* `#26722 `__: DOC: Update roadmap a bit more +* `#26724 `__: ENH: Add Array API 2023.12 version support +* `#26737 `__: DOC: Extend release notes for #26611 +* `#26739 `__: DOC: Update NEPs statuses +* `#26741 `__: DOC: Remove mention of NaN and NAN aliases from constants +* `#26742 `__: DOC: Mention '1.25' legacy printing mode in ``set_printoptions`` +* `#26744 `__: BUG: Fix new DTypes and new string promotion when signature is... +* `#26750 `__: ENH: Add locking to umath_linalg if no lapack is detected at... +* `#26760 `__: TYP: fix incorrect import in ``ma/extras.pyi`` stub +* `#26762 `__: BUG: fix max_rows and chunked string/datetime reading in ``loadtxt`` +* `#26766 `__: ENH: Support integer dtype inputs in rounding functions +* `#26769 `__: BUG: Quantile closest_observation to round to nearest even order +* `#26770 `__: DOC, NEP: Update NEP44 +* `#26771 `__: BUG: fix PyArray_ImportNumPyAPI under -Werror=strict-prototypes +* `#26776 `__: BUG: remove numpy.f2py from excludedimports +* `#26780 `__: MAINT: use an atomic load/store and a mutex to initialize the... +* `#26788 `__: TYP: fix missing ``sys`` import in numeric.pyi +* `#26789 `__: BUG: avoid side-effect of 'include complex.h' +* `#26790 `__: DOC: Update link to Python stdlib random. +* `#26795 `__: BUG: add order to out array of ``numpy.fft`` +* `#26797 `__: BLD: Fix x86-simd-sort build failure on openBSD +* `#26799 `__: MNT: Update dlpack docs and typing stubs +* `#26802 `__: Missing meson pass-through argument +* `#26805 `__: DOC: Update 2.0 migration guide and release note +* `#26808 `__: DOC: Change selected hardlinks to NEPs to intersphinx mappings +* `#26811 `__: DOC: update notes on sign for complex numbers +* `#26812 `__: CI,TST: Fix meson tests needing gfortran [wheel build] +* `#26813 `__: TST: fix 'spin test single_test' for future versions of spin +* `#26814 `__: DOC: Add ``>>> import numpy as np`` stubs everywhere +* `#26815 `__: MAINT: Bump github/codeql-action from 3.25.10 to 3.25.11 +* `#26826 `__: DOC: remove hack to override _add_newdocs_scalars +* `#26827 `__: DOC: AI-Gen examples ctypeslib.as_ctypes_types +* `#26828 `__: DOC: AI generated examples for ma.left_shift. +* `#26829 `__: DOC: AI-Gen examples for ma.put +* `#26830 `__: DOC: AI generated examples for ma.reshape +* `#26831 `__: DOC: AI generated examples for ma.correlate. +* `#26833 `__: MAINT: Bump pypa/cibuildwheel from 2.19.1 to 2.19.2 +* `#26841 `__: BENCH: Missing ufunc in benchmarks +* `#26842 `__: BUILD: clean out py2 stuff from npy_3kcompat.h +* `#26846 `__: MAINT: back printoptions with a true context variable +* `#26847 `__: TYP: fix ``ufunc`` method type annotations +* `#26848 `__: TYP: include the ``|`` prefix for ``dtype`` char codes +* `#26849 `__: BUG: Mismatched allocation domains in ``PyArray_FillWithScalar`` +* `#26858 `__: TYP: Annotate type aliases as ``typing.TypeAlias`` +* `#26866 `__: MAINT: Bump actions/upload-artifact from 4.3.3 to 4.3.4 +* `#26867 `__: TYP,BUG: fix ``numpy.__dir__`` annotations +* `#26871 `__: TYP: adopt ``typing.LiteralString`` and use more of ``typing.Literal`` +* `#26872 `__: TYP: use ``types.CapsuleType`` on python>=3.13 +* `#26873 `__: TYP: improved ``numpy._array_api_info`` typing +* `#26875 `__: TYP,BUG: Replace ``numpy._typing._UnknownType`` with ``typing.Never`` +* `#26877 `__: BUG: start applying ruff/flake8-implicit-str-concat rules (ISC) +* `#26879 `__: MAINT: start applying ruff/flake8-simplify rules (SIM) +* `#26880 `__: DOC: Fix small incorrect markup +* `#26881 `__: DOC, MAINT: fix typos found by codespell +* `#26882 `__: MAINT: start applying ruff/pyupgrade rules (UP) +* `#26883 `__: BUG: Make issctype always return bool. +* `#26884 `__: MAINT: Remove a redundant import from the generated __ufunc_api.h. +* `#26889 `__: API: Add ``device`` and ``to_device`` to scalars +* `#26891 `__: DOC: Add a note that one should free the proto struct +* `#26892 `__: ENH: Allow use of clip with Python integers to always succeed +* `#26894 `__: MAINT: Bump actions/setup-node from 4.0.2 to 4.0.3 +* `#26895 `__: DOC: Change documentation copyright strings to use a dynamic... +* `#26896 `__: DOC: Change NEP hardlinks to intersphinx mappings. +* `#26897 `__: TYP: type hint ``numpy.polynomial`` +* `#26901 `__: BUG: ``np.loadtxt`` return F_CONTIGUOUS ndarray if row size is... +* `#26902 `__: Apply some ruff/flake8-bugbear rules (B004 and B005) +* `#26903 `__: BUG: Fix off-by-one error in amount of characters in strip +* `#26904 `__: BUG,ENH: Fix generic scalar infinite recursion issues +* `#26905 `__: API: Do not consider subclasses for NEP 50 weak promotion +* `#26906 `__: MAINT: Bump actions/setup-python from 5.1.0 to 5.1.1 +* `#26908 `__: ENH: Provide a hook for gufuncs to process core dimensions. +* `#26913 `__: MAINT: declare that NumPy's C extensions support running without... +* `#26914 `__: API: Partially revert unique with return_inverse +* `#26919 `__: BUG,MAINT: Fix utf-8 character stripping memory access +* `#26923 `__: MAINT: Bump actions/dependency-review-action from 4.3.3 to 4.3.4 +* `#26924 `__: MAINT: Bump github/codeql-action from 3.25.11 to 3.25.12 +* `#26927 `__: TYP: Transparent ``__array__`` shape-type +* `#26928 `__: TYP: Covariant ``numpy.flatiter`` type parameter +* `#26929 `__: TYP: Positional-only dunder binop method parameters +* `#26930 `__: BUG: Fix out-of-bound minimum offset for in1d table method +* `#26931 `__: DOC, BUG: Fix running full test command in docstring +* `#26934 `__: MAINT: add PyArray_ZeroContiguousBuffer helper and use it in... +* `#26935 `__: BUG: fix ``f2py`` tests to work with v2 API +* `#26937 `__: TYP,BUG: Remove ``numpy.cast`` and ``numpy.disp`` from the typing... +* `#26938 `__: TYP,BUG: Fix ``dtype`` type alias specialization issue in ``__init__.pyi`` +* `#26942 `__: TYP: Improved ``numpy.generic`` rich comparison operator type... +* `#26943 `__: TYP,BUG: Remove non-existant ``numpy.__git_version__`` in the... +* `#26946 `__: TYP: Add missing typecodes in ``numpy._core.numerictypes.typecodes`` +* `#26950 `__: MAINT: add freethreading_compatible directive to cython build +* `#26953 `__: TYP: Replace ``typing.Union`` with ``|`` in ``numpy._typing`` +* `#26954 `__: TYP: Replace ``typing.Optional[T]`` with ``T | None`` in the... +* `#26964 `__: DOC: Issue template for static typing +* `#26968 `__: MAINT: add a 'tests' install tag to the `numpy._core._simd` extension... +* `#26969 `__: BUG: Fix unicode strip +* `#26972 `__: BUG: Off by one in memory overlap check +* `#26975 `__: TYP: Use ``Final`` and ``LiteralString`` for the constants in... +* `#26980 `__: DOC: add sphinx-copybutton +* `#26981 `__: ENH: add support in f2py to declare gil-disabled support +* `#26983 `__: TYP,BUG: Type annotations for ``numpy.trapezoid`` +* `#26984 `__: TYP,BUG: Fix potentially unresolved typevar in ``median`` and... +* `#26985 `__: BUG: Add object cast to avoid warning with limited API +* `#26989 `__: DOC: fix ctypes example +* `#26991 `__: MAINT: mark scipy-openblas nightly tests as allowed to fail +* `#26992 `__: TYP: Covariant ``numpy.ndenumerate`` type parameter +* `#26993 `__: TYP,BUG: FIx ``numpy.ndenumerate`` annotations for ``object_``... +* `#26996 `__: ENH: Add ``__slots__`` to private (sub-)classes in ``numpy.lib._index_tricks_impl`` +* `#27002 `__: MAINT: Update main after 2.0.1 release. +* `#27008 `__: TYP,BUG: Complete type stubs for ``numpy.dtypes`` +* `#27009 `__: TST, MAINT: Loosen required test precision +* `#27010 `__: DOC: update tutorials link +* `#27011 `__: MAINT: replace PyThread_type_lock with PyMutex on Python >= 3.13.0b3 +* `#27013 `__: BUG: cfuncs.py: fix crash when sys.stderr is not available +* `#27014 `__: BUG: fix gcd inf +* `#27015 `__: DOC: Fix migration note for ``alltrue`` and ``sometrue`` +* `#27017 `__: DOC: Release note for feature added in gh-26908. +* `#27019 `__: TYP: improved ``numpy.array`` type hints for array-like input +* `#27025 `__: DOC: Replace np.matrix in .view() docstring example. +* `#27026 `__: DOC: fix tiny typo +* `#27027 `__: BUG: Fix simd loadable stride logic +* `#27031 `__: DOC: document 'floatmode' and 'legacy' keys from np.get_printoptions'... +* `#27034 `__: BUG: random: Fix edge case of Johnk's algorithm for the beta... +* `#27041 `__: MAINT: Bump github/codeql-action from 3.25.12 to 3.25.14 +* `#27043 `__: CI: unify free-threaded wheel builds with other builds +* `#27046 `__: BUG: random: prevent zipf from hanging when parameter is large. +* `#27047 `__: BUG: use proper input and output descriptor in array_assign_subscript... +* `#27048 `__: BUG: random: Fix long delays/hangs with zipf(a) when a near 1. +* `#27050 `__: BUG: Mirror VQSORT_ENABLED logic in Quicksort +* `#27051 `__: TST: Refactor to consistently use CompilerChecker +* `#27052 `__: TST: fix issues with tests that use numpy.testing.extbuild +* `#27055 `__: MAINT: Bump ossf/scorecard-action from 2.3.3 to 2.4.0 +* `#27056 `__: MAINT: Bump github/codeql-action from 3.25.14 to 3.25.15 +* `#27057 `__: BUG: fix another cast setup in array_assign_subscript +* `#27058 `__: DOC: Add some missing examples for ``np.strings`` methods +* `#27059 `__: ENH: Disable name suggestions on some AttributeErrors +* `#27060 `__: MAINT: linalg: Simplify some linalg gufuncs. +* `#27070 `__: BUG: Bump Highway to latest master +* `#27076 `__: DEP: lib: Deprecate acceptance of float (and more) in bincount. +* `#27079 `__: MAINT: 3.9/10 cleanups +* `#27081 `__: CI: Upgrade ``array-api-tests`` +* `#27085 `__: ENH: fixes for warnings on free-threaded wheel builds +* `#27087 `__: ENH: mark the dragon4 scratch space as thread-local +* `#27090 `__: DOC: update np.shares_memory() docs +* `#27091 `__: API,BUG: Fix copyto (and ufunc) handling of scalar cast safety +* `#27094 `__: DOC: Add release note about deprecation introduced in gh-27076. +* `#27095 `__: DOC: Fix indentation of a few release notes. +* `#27096 `__: BUG: Complex printing tests fail on Windows ARM64 +* `#27097 `__: MAINT: Bump actions/upload-artifact from 4.3.4 to 4.3.5 +* `#27098 `__: BUG: add missing error handling in public_dtype_api.c +* `#27102 `__: DOC: Fixup promotion doc +* `#27104 `__: BUG: Fix building NumPy in FIPS mode +* `#27108 `__: DOC: remove incorrect docstring comment +* `#27110 `__: BLD: cp313 cp313t linux_aarch64 [wheel build] +* `#27112 `__: BUG: Fix repr for integer scalar subclasses +* `#27113 `__: DEV: make linter.py runnable from outside the root of the repo +* `#27114 `__: MAINT: Bump pypa/cibuildwheel from 2.19.2 to 2.20.0 +* `#27115 `__: BUG: Use the new ``npyv_loadable_stride_`` functions for ldexp and... +* `#27117 `__: BUG: Ensure that scalar binops prioritize __array_ufunc__ +* `#27118 `__: BLD: update vendored Meson for cross-compilation patches +* `#27123 `__: BUG: Bump Highway to latest +* `#27124 `__: MAINT: Bump github/codeql-action from 3.25.15 to 3.26.0 +* `#27125 `__: MAINT: Bump actions/upload-artifact from 4.3.5 to 4.3.6 +* `#27127 `__: BUG: Fix missing error return in copyto +* `#27144 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27149 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27162 `__: BLD: use smaller scipy-openblas builds +* `#27166 `__: ENH: fix thread-unsafe C API usages +* `#27173 `__: MAINT: Bump pythoncapi-compat version. diff --git a/doc/release/upcoming_changes/12150.improvement.rst b/doc/release/upcoming_changes/12150.improvement.rst deleted file mode 100644 index f73a6d2aaa28..000000000000 --- a/doc/release/upcoming_changes/12150.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -``histogram`` auto-binning now returns bin sizes >=1 for integer input data ---------------------------------------------------------------------------- -For integer input data, bin sizes smaller than 1 result in spurious empty -bins. This is now avoided when the number of bins is computed using one of the -algorithms provided by `histogram_bin_edges`. diff --git a/doc/release/upcoming_changes/26081.improvement.rst b/doc/release/upcoming_changes/26081.improvement.rst deleted file mode 100644 index bac5c197caa0..000000000000 --- a/doc/release/upcoming_changes/26081.improvement.rst +++ /dev/null @@ -1,11 +0,0 @@ -``ndarray`` shape-type parameter is now covariant and bound to ``tuple[int, ...]`` ----------------------------------------------------------------------------------- -Static typing for ``ndarray`` is a long-term effort that continues -with this change. It is a generic type with type parameters for -the shape and the data type. Previously, the shape type parameter could be -any value. This change restricts it to a tuple of ints, as one would expect -from using ``ndarray.shape``. Further, the shape-type parameter has been -changed from invariant to covariant. This change also applies to the subtypes -of ``ndarray``, e.g. ``numpy.ma.MaskedArray``. See the -`typing docs `_ -for more information. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26103.c_api.rst b/doc/release/upcoming_changes/26103.c_api.rst deleted file mode 100644 index 9d0d998e2dfc..000000000000 --- a/doc/release/upcoming_changes/26103.c_api.rst +++ /dev/null @@ -1,15 +0,0 @@ -API symbols now hidden but customizable ---------------------------------------- -NumPy now defaults to hide the API symbols it adds to allow all NumPy API -usage. -This means that by default you cannot dynamically fetch the NumPy API from -another library (this was never possible on windows). - -If you are experiencing linking errors related to ``PyArray_API`` or -``PyArray_RUNTIME_VERSION``, you can define the -:c:macro:`NPY_API_SYMBOL_ATTRIBUTE` to opt-out of this change. - -If you are experiencing problems due to an upstream header including NumPy, -the solution is to make sure you ``#include "numpy/ndarrayobject.h"`` before -their header and import NumPy yourself based on :ref:`including-the-c-api`. - diff --git a/doc/release/upcoming_changes/26268.expired.rst b/doc/release/upcoming_changes/26268.expired.rst deleted file mode 100644 index 932fdbfae6d7..000000000000 --- a/doc/release/upcoming_changes/26268.expired.rst +++ /dev/null @@ -1 +0,0 @@ -* Scalars and 0D arrays are disallowed for `numpy.nonzero` and `numpy.ndarray.nonzero`. diff --git a/doc/release/upcoming_changes/26285.change.rst b/doc/release/upcoming_changes/26285.change.rst deleted file mode 100644 index d652c58dc799..000000000000 --- a/doc/release/upcoming_changes/26285.change.rst +++ /dev/null @@ -1,13 +0,0 @@ -``ma.corrcoef`` may return a slightly different result ------------------------------------------------------- -A pairwise observation approach is currently used in `ma.corrcoef` to -calculate the standard deviations for each pair of variables. This has been -changed as it is being used to normalise the covariance, estimated using -`ma.cov`, which does not consider the observations for each variable in a -pairwise manner, rendering it unnecessary. The normalisation has been -replaced by the more appropriate standard deviation for each variable, -which significantly reduces the wall time, but will return slightly different -estimates of the correlation coefficients in cases where the observations -between a pair of variables are not aligned. However, it will return the same -estimates in all other cases, including returning the same correlation matrix -as `corrcoef` when using a masked array with no masked values. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26285.performance.rst b/doc/release/upcoming_changes/26285.performance.rst deleted file mode 100644 index 79009f662a0f..000000000000 --- a/doc/release/upcoming_changes/26285.performance.rst +++ /dev/null @@ -1,5 +0,0 @@ -``ma.cov`` and ``ma.corrcoef`` are now significantly faster ------------------------------------------------------------ -The private function has been refactored along with `ma.cov` and -`ma.corrcoef`. They are now significantly faster, particularly on large, -masked arrays. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26292.new_feature.rst b/doc/release/upcoming_changes/26292.new_feature.rst deleted file mode 100644 index fc2c33571d77..000000000000 --- a/doc/release/upcoming_changes/26292.new_feature.rst +++ /dev/null @@ -1 +0,0 @@ -* `numpy.reshape` and `numpy.ndarray.reshape` now support ``shape`` and ``copy`` arguments. diff --git a/doc/release/upcoming_changes/26313.change.rst b/doc/release/upcoming_changes/26313.change.rst deleted file mode 100644 index 99c8b1d879f9..000000000000 --- a/doc/release/upcoming_changes/26313.change.rst +++ /dev/null @@ -1,2 +0,0 @@ -* As `numpy.vecdot` is now a ufunc it has a less precise signature. - This is due to the limitations of ufunc's typing stub. diff --git a/doc/release/upcoming_changes/26388.performance.rst b/doc/release/upcoming_changes/26388.performance.rst deleted file mode 100644 index 2e99f9452c1e..000000000000 --- a/doc/release/upcoming_changes/26388.performance.rst +++ /dev/null @@ -1,3 +0,0 @@ -* `numpy.save` now uses pickle protocol version 4 for saving arrays with - object dtype, which allows for pickle objects larger than 4GB and improves - saving speed by about 5% for large arrays. diff --git a/doc/release/upcoming_changes/26452.deprecation.rst b/doc/release/upcoming_changes/26452.deprecation.rst deleted file mode 100644 index cc4a10bfafee..000000000000 --- a/doc/release/upcoming_changes/26452.deprecation.rst +++ /dev/null @@ -1,4 +0,0 @@ -* The `fix_imports` keyword argument in `numpy.save` is deprecated. Since - NumPy 1.17, `numpy.save` uses a pickle protocol that no longer supports - Python 2, and ignored `fix_imports` keyword. This keyword is kept only - for backward compatibility. It is now deprecated. diff --git a/doc/release/upcoming_changes/26501.new_feature.rst b/doc/release/upcoming_changes/26501.new_feature.rst deleted file mode 100644 index c7465925295c..000000000000 --- a/doc/release/upcoming_changes/26501.new_feature.rst +++ /dev/null @@ -1,2 +0,0 @@ -* NumPy now supports DLPack v1, support for older versions will - be deprecated in the future. diff --git a/doc/release/upcoming_changes/26579.new_function.rst b/doc/release/upcoming_changes/26579.new_function.rst deleted file mode 100644 index 168d12189323..000000000000 --- a/doc/release/upcoming_changes/26579.new_function.rst +++ /dev/null @@ -1,6 +0,0 @@ -New function `numpy.unstack` ----------------------------- - -A new function ``np.unstack(array, axis=...)`` was added, which splits -an array into a tuple of arrays along an axis. It serves as the inverse -of `numpy.stack`. diff --git a/doc/release/upcoming_changes/26580.new_feature.rst b/doc/release/upcoming_changes/26580.new_feature.rst deleted file mode 100644 index c625e9b9d8a2..000000000000 --- a/doc/release/upcoming_changes/26580.new_feature.rst +++ /dev/null @@ -1 +0,0 @@ -* `numpy.asanyarray` now supports ``copy`` and ``device`` arguments, matching `numpy.asarray`. diff --git a/doc/release/upcoming_changes/26611.expired.rst b/doc/release/upcoming_changes/26611.expired.rst deleted file mode 100644 index 1df220d2b2a7..000000000000 --- a/doc/release/upcoming_changes/26611.expired.rst +++ /dev/null @@ -1,2 +0,0 @@ -* ``set_string_function`` internal function was removed and ``PyArray_SetStringFunction`` - was stubbed out. diff --git a/doc/release/upcoming_changes/26611.new_feature.rst b/doc/release/upcoming_changes/26611.new_feature.rst deleted file mode 100644 index 6178049cf4ed..000000000000 --- a/doc/release/upcoming_changes/26611.new_feature.rst +++ /dev/null @@ -1,2 +0,0 @@ -* `numpy.printoptions`, `numpy.get_printoptions`, and `numpy.set_printoptions` now support - a new option, ``override_repr``, for defining custom ``repr(array)`` behavior. diff --git a/doc/release/upcoming_changes/26656.improvement.rst b/doc/release/upcoming_changes/26656.improvement.rst deleted file mode 100644 index 66d7508d2738..000000000000 --- a/doc/release/upcoming_changes/26656.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -`np.quantile` with method ``closest_observation`` chooses nearest even order statistic --------------------------------------------------------------------------------------- -This changes the definition of nearest for border cases from the nearest odd -order statistic to nearest even order statistic. The numpy implementation now -matches other reference implementations. diff --git a/doc/release/upcoming_changes/26724.new_feature.rst b/doc/release/upcoming_changes/26724.new_feature.rst deleted file mode 100644 index 3c6a830728a4..000000000000 --- a/doc/release/upcoming_changes/26724.new_feature.rst +++ /dev/null @@ -1,7 +0,0 @@ -* `numpy.cumulative_sum` and `numpy.cumulative_prod` were added as Array API - compatible alternatives for `numpy.cumsum` and `numpy.cumprod`. The new functions - can include a fixed initial (zeros for ``sum`` and ones for ``prod``) in the result. -* `numpy.clip` now supports ``max`` and ``min`` keyword arguments which are meant - to replace ``a_min`` and ``a_max``. Also, for ``np.clip(a)`` or ``np.clip(a, None, None)`` - a copy of the input array will be returned instead of raising an error. -* `numpy.astype` now supports ``device`` argument. diff --git a/doc/release/upcoming_changes/26750.improvement.rst b/doc/release/upcoming_changes/26750.improvement.rst deleted file mode 100644 index 858061dbe48a..000000000000 --- a/doc/release/upcoming_changes/26750.improvement.rst +++ /dev/null @@ -1,12 +0,0 @@ -`lapack_lite` is now thread safe --------------------------------- - -NumPy provides a minimal low-performance version of LAPACK named ``lapack_lite`` -that can be used if no BLAS/LAPACK system is detected at build time. - -Until now, ``lapack_lite`` was not thread safe. Single-threaded use cases did -not hit any issues, but running linear algebra operations in multiple threads -could lead to errors, incorrect results, or seg faults due to data races. - -We have added a global lock, serializing access to ``lapack_lite`` in multiple -threads. diff --git a/doc/release/upcoming_changes/26766.change.rst b/doc/release/upcoming_changes/26766.change.rst deleted file mode 100644 index 923dbe816dd1..000000000000 --- a/doc/release/upcoming_changes/26766.change.rst +++ /dev/null @@ -1,2 +0,0 @@ -* `numpy.floor`, `numpy.ceil`, and `numpy.trunc` now won't perform casting - to a floating dtype for integer and boolean dtype input arrays. diff --git a/doc/release/upcoming_changes/26842.c_api.rst b/doc/release/upcoming_changes/26842.c_api.rst deleted file mode 100644 index 7e50dd385006..000000000000 --- a/doc/release/upcoming_changes/26842.c_api.rst +++ /dev/null @@ -1,5 +0,0 @@ -Many shims removed from npy_3kcompat.h --------------------------------------- -Many of the old shims and helper functions were removed from -``npy_3kcompat.h``. If you find yourself in need of these, vendor the previous -version of the file into your codebase. diff --git a/doc/release/upcoming_changes/26846.improvement.rst b/doc/release/upcoming_changes/26846.improvement.rst deleted file mode 100644 index ae9b72d195bf..000000000000 --- a/doc/release/upcoming_changes/26846.improvement.rst +++ /dev/null @@ -1,6 +0,0 @@ -The `numpy.printoptions` context manager is now thread and async-safe ---------------------------------------------------------------------- - -In prior versions of NumPy, the printoptions were defined using a combination -of Python and C global variables. We have refactored so the state is stored in -a python ``ContextVar``, making the context manager thread and async-safe. diff --git a/doc/release/upcoming_changes/26908.c_api.rst b/doc/release/upcoming_changes/26908.c_api.rst deleted file mode 100644 index d6e43591819d..000000000000 --- a/doc/release/upcoming_changes/26908.c_api.rst +++ /dev/null @@ -1,8 +0,0 @@ -New ``PyUFuncObject`` field ``process_core_dims_func`` ------------------------------------------------------- -The field ``process_core_dims_func`` was added to the structure -``PyUFuncObject``. For generalized ufuncs, this field can be set to a -function of type ``PyUFunc_ProcessCoreDimsFunc`` that will be called when the -ufunc is called. It allows the ufunc author to check that core dimensions -satisfy additional constraints, and to set output core dimension sizes if they -have not been provided. diff --git a/doc/release/upcoming_changes/26981.new_feature.rst b/doc/release/upcoming_changes/26981.new_feature.rst deleted file mode 100644 index f466faeb7590..000000000000 --- a/doc/release/upcoming_changes/26981.new_feature.rst +++ /dev/null @@ -1,9 +0,0 @@ -``f2py`` can generate freethreading-compatible C extensions ------------------------------------------------------------ - -Pass ``--freethreading-compatible`` to the f2py CLI tool to produce a C -extension marked as compatible with the free threading CPython -interpreter. Doing so prevents the interpreter from re-enabling the GIL at -runtime when it imports the C extension. Note that ``f2py`` does not analyze -fortran code for thread safety, so you must verify that the wrapped fortran -code is thread safe before marking the extension as compatible. diff --git a/doc/release/upcoming_changes/27076.deprecation.rst b/doc/release/upcoming_changes/27076.deprecation.rst deleted file mode 100644 index f692b814c17d..000000000000 --- a/doc/release/upcoming_changes/27076.deprecation.rst +++ /dev/null @@ -1,3 +0,0 @@ -* Passing non-integer inputs as the first argument of `bincount` is now - deprecated, because such inputs are silently cast to integers with no - warning about loss of precision. diff --git a/doc/release/upcoming_changes/27091.change.rst b/doc/release/upcoming_changes/27091.change.rst deleted file mode 100644 index 5b71692efabd..000000000000 --- a/doc/release/upcoming_changes/27091.change.rst +++ /dev/null @@ -1,24 +0,0 @@ -Cast-safety fixes in ``copyto`` and ``full`` --------------------------------------------- -``copyto`` now uses NEP 50 correctly and applies this to its cast safety. -Python integer to NumPy integer casts and Python float to NumPy float casts -are now considered "safe" even if assignment may fail or precision may be lost. -This means the following examples change slightly: - -* ``np.copyto(int8_arr, 1000)`` previously performed an unsafe/same-kind cast - of the Python integer. It will now always raise, to achieve an unsafe cast - you must pass an array or NumPy scalar. -* ``np.copyto(uint8_arr, 1000, casting="safe")`` will raise an OverflowError - rather than a TypeError due to same-kind casting. -* ``np.copyto(float32_arr, 1e300, casting="safe")`` will overflow to ``inf`` - (float32 cannot hold ``1e300``) rather raising a TypeError. - -Further, only the dtype is used when assigning NumPy scalars (or 0-d arrays), -meaning that the following behaves differently: - -* ``np.copyto(float32_arr, np.float64(3.0), casting="safe")`` raises. -* ``np.coptyo(int8_arr, np.int64(100), casting="safe")`` raises. - Previously, NumPy checked whether the 100 fits the ``int8_arr``. - -This aligns ``copyto``, ``full``, and ``full_like`` with the correct NumPy 2 -behavior. \ No newline at end of file diff --git a/doc/release/upcoming_changes/27147.performance.rst b/doc/release/upcoming_changes/27147.performance.rst deleted file mode 100644 index 2cea7780f41c..000000000000 --- a/doc/release/upcoming_changes/27147.performance.rst +++ /dev/null @@ -1,8 +0,0 @@ -* OpenBLAS on x86_64 and i686 is built with fewer kernels. Based on - benchmarking, there are 5 clusters of performance around these kernels: - ``PRESCOTT NEHALEM SANDYBRIDGE HASWELL SKYLAKEX``. - -* OpenBLAS on windows is linked without quadmath, simplfying licensing - -* Due to a regression in OpenBLAS on windows, the performance improvements - when using multiple threads for OpenBLAS 0.3.26 were reverted. diff --git a/doc/source/release/2.1.0-notes.rst b/doc/source/release/2.1.0-notes.rst index d0b0b6f1b785..295115d57343 100644 --- a/doc/source/release/2.1.0-notes.rst +++ b/doc/source/release/2.1.0-notes.rst @@ -1,19 +1,302 @@ .. currentmodule:: numpy -========================== +========================= NumPy 2.1.0 Release Notes -========================== +========================= +NumPy 2.1.0 provides support for the upcoming Python 3.13 release and drops +support for Python 3.9. In addition to the usual bug fixes and updated Python +support, it helps get us back into our usual release cycle after the extended +development of 2.0. The highlights for this release are: -Highlights -========== +- Support for the array-api 2023.12 standard. +- Support for Python 3.13. +- Preliminary support for free threaded Python 3.13. -*We'll choose highlights for this release near the end of the release cycle.* +Python versions 3.10-3.13 are supported in this release. -.. if release snippets have been incorporated already, uncomment the follow - line (leave the `.. include:: directive) +New functions +============= -.. **Content from release note snippets in doc/release/upcoming_changes:** +New function ``numpy.unstack`` +------------------------------ + +A new function ``np.unstack(array, axis=...)`` was added, which splits +an array into a tuple of arrays along an axis. It serves as the inverse +of `numpy.stack`. + +(`gh-26579 `__) + + +Deprecations +============ + +* The ``fix_imports`` keyword argument in ``numpy.save`` is deprecated. Since + NumPy 1.17, ``numpy.save`` uses a pickle protocol that no longer supports + Python 2, and ignored ``fix_imports`` keyword. This keyword is kept only + for backward compatibility. It is now deprecated. + + (`gh-26452 `__) + +* Passing non-integer inputs as the first argument of `bincount` is now + deprecated, because such inputs are silently cast to integers with no + warning about loss of precision. + + (`gh-27076 `__) + + +Expired deprecations +==================== + +* Scalars and 0D arrays are disallowed for ``numpy.nonzero`` and ``numpy.ndarray.nonzero``. + + (`gh-26268 `__) + +* ``set_string_function`` internal function was removed and ``PyArray_SetStringFunction`` + was stubbed out. + + (`gh-26611 `__) + + +C API changes +============= + +API symbols now hidden but customizable +--------------------------------------- +NumPy now defaults to hide the API symbols it adds to allow all NumPy API +usage. This means that by default you cannot dynamically fetch the NumPy API +from another library (this was never possible on windows). + +If you are experiencing linking errors related to ``PyArray_API`` or +``PyArray_RUNTIME_VERSION``, you can define the +``NPY_API_SYMBOL_ATTRIBUTE`` to opt-out of this change. + +If you are experiencing problems due to an upstream header including NumPy, +the solution is to make sure you ``#include "numpy/ndarrayobject.h"`` before +their header and import NumPy yourself based on ``including-the-c-api``. + +(`gh-26103 `__) + +Many shims removed from npy_3kcompat.h +-------------------------------------- +Many of the old shims and helper functions were removed from +``npy_3kcompat.h``. If you find yourself in need of these, vendor the previous +version of the file into your codebase. + +(`gh-26842 `__) + +New ``PyUFuncObject`` field ``process_core_dims_func`` +------------------------------------------------------ +The field ``process_core_dims_func`` was added to the structure +``PyUFuncObject``. For generalized ufuncs, this field can be set to a function +of type ``PyUFunc_ProcessCoreDimsFunc`` that will be called when the ufunc is +called. It allows the ufunc author to check that core dimensions satisfy +additional constraints, and to set output core dimension sizes if they have not +been provided. + +(`gh-26908 `__) + + +New Features +============ + +* ``numpy.reshape`` and ``numpy.ndarray.reshape`` now support ``shape`` and + ``copy`` arguments. + + (`gh-26292 `__) + +* NumPy now supports DLPack v1, support for older versions will + be deprecated in the future. + + (`gh-26501 `__) + +* ``numpy.asanyarray`` now supports ``copy`` and ``device`` arguments, matching + ``numpy.asarray``. + + (`gh-26580 `__) + +* ``numpy.printoptions``, ``numpy.get_printoptions``, and + ``numpy.set_printoptions`` now support a new option, ``override_repr``, for + defining custom ``repr(array)`` behavior. + + (`gh-26611 `__) + +* ``numpy.cumulative_sum`` and ``numpy.cumulative_prod`` were added as Array + API compatible alternatives for ``numpy.cumsum`` and ``numpy.cumprod``. The + new functions can include a fixed initial (zeros for ``sum`` and ones for + ``prod``) in the result. + + (`gh-26724 `__) + +* ``numpy.clip`` now supports ``max`` and ``min`` keyword arguments which are + meant to replace ``a_min`` and ``a_max``. Also, for ``np.clip(a)`` or + ``np.clip(a, None, None)`` a copy of the input array will be returned instead + of raising an error. + + (`gh-26724 `__) + +* ``numpy.astype`` now supports ``device`` argument. + + (`gh-26724 `__) + +``f2py`` can generate freethreading-compatible C extensions +----------------------------------------------------------- +Pass ``--freethreading-compatible`` to the f2py CLI tool to produce a C +extension marked as compatible with the free threading CPython +interpreter. Doing so prevents the interpreter from re-enabling the GIL at +runtime when it imports the C extension. Note that ``f2py`` does not analyze +fortran code for thread safety, so you must verify that the wrapped fortran +code is thread safe before marking the extension as compatible. + +(`gh-26981 `__) + + +Improvements +============ + +``histogram`` auto-binning now returns bin sizes >=1 for integer input data +--------------------------------------------------------------------------- +For integer input data, bin sizes smaller than 1 result in spurious empty +bins. This is now avoided when the number of bins is computed using one of the +algorithms provided by ``histogram_bin_edges``. + +(`gh-12150 `__) + +``ndarray`` shape-type parameter is now covariant and bound to ``tuple[int, ...]`` +---------------------------------------------------------------------------------- +Static typing for ``ndarray`` is a long-term effort that continues +with this change. It is a generic type with type parameters for +the shape and the data type. Previously, the shape type parameter could be +any value. This change restricts it to a tuple of ints, as one would expect +from using ``ndarray.shape``. Further, the shape-type parameter has been +changed from invariant to covariant. This change also applies to the subtypes +of ``ndarray``, e.g. ``numpy.ma.MaskedArray``. See the +`typing docs `_ +for more information. + +(`gh-26081 `__) + +``np.quantile`` with method ``closest_observation`` chooses nearest even order statistic +---------------------------------------------------------------------------------------- +This changes the definition of nearest for border cases from the nearest odd +order statistic to nearest even order statistic. The numpy implementation now +matches other reference implementations. + +(`gh-26656 `__) + +``lapack_lite`` is now thread safe +---------------------------------- +NumPy provides a minimal low-performance version of LAPACK named ``lapack_lite`` +that can be used if no BLAS/LAPACK system is detected at build time. + +Until now, ``lapack_lite`` was not thread safe. Single-threaded use cases did +not hit any issues, but running linear algebra operations in multiple threads +could lead to errors, incorrect results, or segfaults due to data races. + +We have added a global lock, serializing access to ``lapack_lite`` in multiple +threads. + +(`gh-26750 `__) + +The ``numpy.printoptions`` context manager is now thread and async-safe +----------------------------------------------------------------------- +In prior versions of NumPy, the printoptions were defined using a combination +of Python and C global variables. We have refactored so the state is stored in +a python ``ContextVar``, making the context manager thread and async-safe. + +(`gh-26846 `__) + + +Performance improvements and changes +==================================== + +* ``numpy.save`` now uses pickle protocol version 4 for saving arrays with + object dtype, which allows for pickle objects larger than 4GB and improves + saving speed by about 5% for large arrays. + + (`gh-26388 `__) + +* OpenBLAS on x86_64 and i686 is built with fewer kernels. Based on + benchmarking, there are 5 clusters of performance around these kernels: + ``PRESCOTT NEHALEM SANDYBRIDGE HASWELL SKYLAKEX``. + + (`gh-27147 `__) + +* OpenBLAS on windows is linked without quadmath, simplifying licensing + + (`gh-27147 `__) + +* Due to a regression in OpenBLAS on windows, the performance improvements when + using multiple threads for OpenBLAS 0.3.26 were reverted. + + (`gh-27147 `__) + +``ma.cov`` and ``ma.corrcoef`` are now significantly faster +----------------------------------------------------------- +The private function has been refactored along with ``ma.cov`` and +``ma.corrcoef``. They are now significantly faster, particularly on large, +masked arrays. + +(`gh-26285 `__) + + +Changes +======= + +* As ``numpy.vecdot`` is now a ufunc it has a less precise signature. + This is due to the limitations of ufunc's typing stub. + + (`gh-26313 `__) + +* ``numpy.floor``, ``numpy.ceil``, and ``numpy.trunc`` now won't perform + casting to a floating dtype for integer and boolean dtype input arrays. + + (`gh-26766 `__) + +``ma.corrcoef`` may return a slightly different result +------------------------------------------------------ +A pairwise observation approach is currently used in ``ma.corrcoef`` to +calculate the standard deviations for each pair of variables. This has been +changed as it is being used to normalise the covariance, estimated using +``ma.cov``, which does not consider the observations for each variable in a +pairwise manner, rendering it unnecessary. The normalisation has been replaced +by the more appropriate standard deviation for each variable, which +significantly reduces the wall time, but will return slightly different +estimates of the correlation coefficients in cases where the observations +between a pair of variables are not aligned. However, it will return the same +estimates in all other cases, including returning the same correlation matrix +as ``corrcoef`` when using a masked array with no masked values. + +(`gh-26285 `__) + +Cast-safety fixes in ``copyto`` and ``full`` +-------------------------------------------- +``copyto`` now uses NEP 50 correctly and applies this to its cast safety. +Python integer to NumPy integer casts and Python float to NumPy float casts +are now considered "safe" even if assignment may fail or precision may be lost. +This means the following examples change slightly: + +* ``np.copyto(int8_arr, 1000)`` previously performed an unsafe/same-kind cast + of the Python integer. It will now always raise, to achieve an unsafe cast + you must pass an array or NumPy scalar. + +* ``np.copyto(uint8_arr, 1000, casting="safe")`` will raise an OverflowError + rather than a TypeError due to same-kind casting. + +* ``np.copyto(float32_arr, 1e300, casting="safe")`` will overflow to ``inf`` + (float32 cannot hold ``1e300``) rather raising a TypeError. + +Further, only the dtype is used when assigning NumPy scalars (or 0-d arrays), +meaning that the following behaves differently: + +* ``np.copyto(float32_arr, np.float64(3.0), casting="safe")`` raises. + +* ``np.coptyo(int8_arr, np.int64(100), casting="safe")`` raises. + Previously, NumPy checked whether the 100 fits the ``int8_arr``. + +This aligns ``copyto``, ``full``, and ``full_like`` with the correct NumPy 2 +behavior. + +(`gh-27091 `__) -.. include:: notes-towncrier.rst diff --git a/pyproject.toml b/pyproject.toml index ad4673949a10..305db1c77ba4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.1.0.dev0" +version = "2.1.0rc1" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} @@ -29,6 +29,7 @@ classifiers = [ 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', + 'Programming Language :: Python :: 3.13', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: Implementation :: CPython', 'Topic :: Software Development', From cbfec1b22fac9233dbb9448176caf95ef26482e0 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 11 Aug 2024 01:39:36 +0200 Subject: [PATCH 010/101] DOC: Add release notes for #26897 --- doc/release/upcoming_changes/26897.improvement.rst | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 doc/release/upcoming_changes/26897.improvement.rst diff --git a/doc/release/upcoming_changes/26897.improvement.rst b/doc/release/upcoming_changes/26897.improvement.rst new file mode 100644 index 000000000000..1b3b327711af --- /dev/null +++ b/doc/release/upcoming_changes/26897.improvement.rst @@ -0,0 +1,6 @@ +Type hinting ``numpy.polynomial`` +--------------------------------- + +Starting from the 2.1 release, PEP 484 type annotations have been included for +the functions and convenience classes in ``numpy.polynomial`` and its +sub-packages. From 7f1cd245a163ed14e39eee27f06a99b925ac5e3f Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 11 Aug 2024 02:01:44 +0200 Subject: [PATCH 011/101] DOC: Add release notes for #27008 --- doc/release/upcoming_changes/27008.improvement.rst | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 doc/release/upcoming_changes/27008.improvement.rst diff --git a/doc/release/upcoming_changes/27008.improvement.rst b/doc/release/upcoming_changes/27008.improvement.rst new file mode 100644 index 000000000000..47e1090d9067 --- /dev/null +++ b/doc/release/upcoming_changes/27008.improvement.rst @@ -0,0 +1,8 @@ +Improved ``numpy.dtypes`` type hints +------------------------------------ + +The type annotations for ``numpy.dtypes`` are now a better reflection of the +runtime: +The ``numpy.dtype`` type-aliases have been replaced with specialized ``dtype`` +*subtypes*, and the previously missing annotations for +``numpy.dtypes.StringDType`` have been added. From 49f422b75ff4acee53490d124638fd4efebf17c7 Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 12 Aug 2024 11:50:22 +0300 Subject: [PATCH 012/101] BUILD: use a shrunken version of scipy-openblas wheels [wheel build] --- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index d2940e2d65bc..215bc1229930 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ spin # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.27.44.5 +scipy-openblas32==0.3.27.44.6 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index 965fdb8faadf..5bed94385819 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.27.44.5 -scipy-openblas64==0.3.27.44.5 +scipy-openblas32==0.3.27.44.6 +scipy-openblas64==0.3.27.44.6 From 117da9404cffdb952362982e4eb9b5f62ca296b4 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sun, 11 Aug 2024 13:02:30 +0200 Subject: [PATCH 013/101] REV: Revert undef I and document it This is based on what Matti wrote in gh-27105 but also adding it to the migration guide. Closes gh-27083 Co-authored-by: Matti Picus --- doc/source/numpy_2_0_migration_guide.rst | 13 ++++++++++ .../reference/c-api/types-and-structures.rst | 26 +++++++++++++++++++ numpy/_core/include/numpy/npy_common.h | 5 ---- 3 files changed, 39 insertions(+), 5 deletions(-) diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index 2ff49b162fe4..55d4696a114d 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -220,6 +220,19 @@ using the NumPy types. You can still write cython code using the ``c.real`` and ``c.imag`` attributes (using the native typedefs), but you can no longer use in-place operators ``c.imag += 1`` in Cython's c++ mode. +Because NumPy 2 now includes ``complex.h`` code that uses a variable named +``I`` may see an error such as + +.. code-block::C + error: expected ‘)’ before ‘__extension__’ + double I, + +to use the name ``I`` requires an ``#undef I`` now. + +.. note:: + NumPy 2.0.1 briefly included the ``#undef I`` to help users not already + including ``complex.h``. + Changes to namespaces ===================== diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index 8d57153d8803..4565e602193f 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -1611,3 +1611,29 @@ for completeness and assistance in understanding the code. ``arrayobject.h`` header. This type is not exposed to Python and could be replaced with a C-structure. As a Python type it takes advantage of reference- counted memory management. + + +NumPy C-API and C complex +========================= +When you use the NumPy C-API, you will have access to complex real declarations +``npy_cdouble`` and ``npy_cfloat``, which are declared in terms of the C +standard types from ``complex.h``. Unfortunately, ``complex.h`` contains +`#define I ...`` (where the actual definition depends on the compiler), which +means that any downstream user that does ``#include `` +could get ``I`` defined, and using something like declaring ``double I;`` in +their code will result in an obscure compiler error like + +.. code-block::C + error: expected ‘)’ before ‘__extension__’ + double I, + +This error can be avoided by adding:: + + #undef I + +to your code. + +.. versionchanged:: 2.0 + The inclusion of ``complex.h`` was new in NumPy 2, so that code defining + a different ``I`` may not have required the ``#undef I`` on older versions. + NumPy 2.0.1 briefly included the ``#under I`` \ No newline at end of file diff --git a/numpy/_core/include/numpy/npy_common.h b/numpy/_core/include/numpy/npy_common.h index 3132b602a7c8..79ad8ad78cb2 100644 --- a/numpy/_core/include/numpy/npy_common.h +++ b/numpy/_core/include/numpy/npy_common.h @@ -379,11 +379,6 @@ typedef struct #include -// Downstream libraries like sympy would like to use I -// see https://github.com/numpy/numpy/issues/26787 -#ifdef I -#undef I -#endif #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) typedef _Dcomplex npy_cdouble; From 87af28e59d207f3407b7d48b5451f55156a474b0 Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 12 Aug 2024 08:32:33 +0300 Subject: [PATCH 014/101] BUILD: improve download script --- tools/download-wheels.py | 32 ++++++++++++++++++++++++-------- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/tools/download-wheels.py b/tools/download-wheels.py index e5753eb2148c..54dbdf1200a8 100644 --- a/tools/download-wheels.py +++ b/tools/download-wheels.py @@ -56,15 +56,20 @@ def get_wheel_names(version): The release version. For instance, "1.18.3". """ + ret = [] http = urllib3.PoolManager(cert_reqs="CERT_REQUIRED") tmpl = re.compile(rf"^.*{PREFIX}-{version}{SUFFIX}") - index_url = f"{STAGING_URL}/files" - index_html = http.request("GET", index_url) - soup = BeautifulSoup(index_html.data, "html.parser") - return soup.find_all(string=tmpl) + # TODO: generalize this by searching for `showing 1 of N` and + # looping over N pages, starting from 1 + for i in range(1, 3): + index_url = f"{STAGING_URL}/files?page={i}" + index_html = http.request("GET", index_url) + soup = BeautifulSoup(index_html.data, "html.parser") + ret += soup.find_all(string=tmpl) + return ret -def download_wheels(version, wheelhouse): +def download_wheels(version, wheelhouse, test=False): """Download release wheels. The release wheels for the given NumPy version are downloaded @@ -86,8 +91,15 @@ def download_wheels(version, wheelhouse): wheel_path = os.path.join(wheelhouse, wheel_name) with open(wheel_path, "wb") as f: with http.request("GET", wheel_url, preload_content=False,) as r: - print(f"{i + 1:<4}{wheel_name}") - shutil.copyfileobj(r, f) + info = r.info() + length = int(info.get('Content-Length', '0')) + if length == 0: + length = 'unknown size' + else: + length = f"{(length / 1024 / 1024):.2f}MB" + print(f"{i + 1:<4}{wheel_name} {length}") + if not test: + shutil.copyfileobj(r, f) print(f"\nTotal files downloaded: {len(wheel_names)}") @@ -101,6 +113,10 @@ def download_wheels(version, wheelhouse): default=os.path.join(os.getcwd(), "release", "installers"), help="Directory in which to store downloaded wheels\n" "[defaults to /release/installers]") + parser.add_argument( + "-t", "--test", + action = 'store_true', + help="only list available wheels, do not download") args = parser.parse_args() @@ -110,4 +126,4 @@ def download_wheels(version, wheelhouse): f"{wheelhouse} wheelhouse directory is not present." " Perhaps you need to use the '-w' flag to specify one.") - download_wheels(args.version, wheelhouse) + download_wheels(args.version, wheelhouse, test=args.test) From 9bf2e00f3d96c36b20a8017965c38b547acf6b03 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Mon, 12 Aug 2024 07:40:51 +1100 Subject: [PATCH 015/101] MAINT: update default NPY_FEATURE_VERSION after dropping py39 --- numpy/_core/include/numpy/numpyconfig.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index b49d215614ac..0f2b68054527 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -121,8 +121,8 @@ /* user provided a target version, use it */ #define NPY_FEATURE_VERSION NPY_TARGET_VERSION #else - /* Use the default (increase when dropping Python 3.9 support) */ - #define NPY_FEATURE_VERSION NPY_1_19_API_VERSION + /* Use the default (increase when dropping Python 3.10 support) */ + #define NPY_FEATURE_VERSION NPY_1_21_API_VERSION #endif /* Sanity check the (requested) feature version */ From fdf6055d0636f501a6857e04ad8bd4729a579b0a Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 12 Aug 2024 13:44:54 -0600 Subject: [PATCH 016/101] DOC: add free-threading release notes [skip azp][skip actions][skip cirrus] --- doc/source/release/2.1.0-notes.rst | 43 ++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/doc/source/release/2.1.0-notes.rst b/doc/source/release/2.1.0-notes.rst index 295115d57343..c591b29e4c24 100644 --- a/doc/source/release/2.1.0-notes.rst +++ b/doc/source/release/2.1.0-notes.rst @@ -101,6 +101,49 @@ been provided. New Features ============ +Preliminary Support for Free-Threaded CPython 3.13 +-------------------------------------------------- + +CPython 3.13 will be available as an experimental free-threaded build. See +https://py-free-threading.github.io, `PEP 703 +`_ and the `CPython 3.13 release notes +`_ for +more detail about free-threaded Python. + +NumPy 2.1 has preliminary support for the free-threaded build of CPython +3.13. This support was enabled by fixing a number of C thread-safety issues in +NumPy. Before NumPy 2.1, NumPy used a large number of C global static variables +to store runtime caches and other state. We have either refactored to avoid the +need for global state, converted the global state to thread-local state, or +added locking. + +Support for free-threaded Python does not mean that NumPy is thread +safe. Read-only shared access to ndarray should be safe. NumPy exposes shared +mutable state and we have not added any locking to the array object itself to +serialize access to shared state. Care must be taken in user code to avoid +races if you would like to mutate the same array in multiple threads. It is +certainly possible to crash NumPy by mutating an array simultaneously in +multiple threads, for example by calling a ufunc and the ``resize`` method +simultaneously. For now our guidance is: "don't do that". In the future we would +like to provide stronger guarantees. + +Object arrays in particular need special care, since the GIL +previously provided locking for object array access and no longer does. See +`Issue #27199 `_ for more +information about object arrays in the free-threaded build. + +If you are interested in free-threaded Python, for example because you have a +multiprocessing-based workflow that you are interested in running with Python +threads, we encourage testing and experimentation. + +If you run into problems that you suspect are because of NumPy, please `open an +issue `_, checking first if +the bug also occurs in the "regular" non-free-threaded CPython 3.13 build. Many +threading bugs can also occur in code that releases the GIL; disabling the GIL +only makes it easier to hit threading bugs. + +(`gh-26157 `__) + * ``numpy.reshape`` and ``numpy.ndarray.reshape`` now support ``shape`` and ``copy`` arguments. From 3b34c0990f3d55eaf7875b9bcec42c92da94faa7 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 13 Aug 2024 16:20:31 +0200 Subject: [PATCH 017/101] BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds The value was simply hardcoded to the wrong thing in the dynamic path... --- numpy/_core/include/numpy/npy_2_compat.h | 2 +- numpy/_core/tests/examples/cython/checks.pyx | 4 ++++ numpy/_core/tests/test_cython.py | 7 +++++++ 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/numpy/_core/include/numpy/npy_2_compat.h b/numpy/_core/include/numpy/npy_2_compat.h index 80bb4088c812..e39e65aedea7 100644 --- a/numpy/_core/include/numpy/npy_2_compat.h +++ b/numpy/_core/include/numpy/npy_2_compat.h @@ -125,7 +125,7 @@ PyArray_ImportNumPyAPI(void) #define NPY_DEFAULT_INT \ (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_INTP : NPY_LONG) #define NPY_RAVEL_AXIS \ - (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? -1 : 32) + (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_MIN_INT : 32) #define NPY_MAXARGS \ (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? 64 : 32) #endif diff --git a/numpy/_core/tests/examples/cython/checks.pyx b/numpy/_core/tests/examples/cython/checks.pyx index b51ab128053f..c0bb1f3f5370 100644 --- a/numpy/_core/tests/examples/cython/checks.pyx +++ b/numpy/_core/tests/examples/cython/checks.pyx @@ -129,6 +129,10 @@ def get_default_integer(): return cnp.dtype("intp") return None +def get_ravel_axis(): + return cnp.NPY_RAVEL_AXIS + + def conv_intp(cnp.intp_t val): return val diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index 26a1fafa0066..71c1a457761b 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -153,6 +153,13 @@ def test_default_int(install_temp): assert checks.get_default_integer() is np.dtype(int) + +def test_ravel_axis(install_temp): + import checks + + assert checks.get_ravel_axis() == np.iinfo("intc").min + + def test_convert_datetime64_to_datetimestruct(install_temp): # GH#21199 import checks From 898de56c5d9f088e0647ae4b25e866c36a1d8356 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Aug 2024 02:26:55 +0200 Subject: [PATCH 018/101] TYP: Fixed & improved type hints for ``numpy.histogram2d`` --- numpy/lib/_twodim_base_impl.pyi | 221 ++++++++++++++++-- .../typing/tests/data/reveal/twodim_base.pyi | 71 +++++- 2 files changed, 264 insertions(+), 28 deletions(-) diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index 4096976871d7..c4690a4304bd 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -2,6 +2,7 @@ import builtins from collections.abc import Callable, Sequence from typing import ( Any, + TypeAlias, overload, TypeVar, Literal as L, @@ -16,6 +17,7 @@ from numpy import ( int_, intp, float64, + complex128, signedinteger, floating, complexfloating, @@ -29,6 +31,7 @@ from numpy._typing import ( ArrayLike, _ArrayLike, NDArray, + _SupportsArray, _SupportsArrayFunc, _ArrayLikeInt_co, _ArrayLikeFloat_co, @@ -164,44 +167,220 @@ def vander( increasing: bool = ..., ) -> NDArray[object_]: ... + +_Int_co: TypeAlias = np.integer[Any] | np.bool +_Float_co: TypeAlias = np.floating[Any] | _Int_co +_Number_co: TypeAlias = np.number[Any] | np.bool + +_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_SCT]] | Sequence[_SCT] +_ArrayLike2D: TypeAlias = ( + _SupportsArray[np.dtype[_SCT]] + | Sequence[_ArrayLike1D[_SCT]] +) + +_ArrayLike1DInt_co = ( + _SupportsArray[np.dtype[_Int_co]] + | Sequence[int | _Int_co] +) +_ArrayLike1DFloat_co = ( + _SupportsArray[np.dtype[_Float_co]] + | Sequence[float | int | _Float_co] +) +_ArrayLike2DFloat_co = ( + _SupportsArray[np.dtype[_Float_co]] + | Sequence[_ArrayLike1DFloat_co] +) +_ArrayLike1DNumber_co = ( + _SupportsArray[np.dtype[_Number_co]] + | Sequence[int | float | complex | _Number_co] +) + +_SCT_complex = TypeVar("_SCT_complex", bound=np.complexfloating[Any, Any]) +_SCT_inexact = TypeVar("_SCT_inexact", bound=np.inexact[Any]) +_SCT_number_co = TypeVar("_SCT_number_co", bound=_Number_co) + @overload -def histogram2d( # type: ignore[misc] - x: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co, +def histogram2d( + x: _ArrayLike1D[_SCT_complex], + y: _ArrayLike1D[_SCT_complex | _Float_co], bins: int | Sequence[int] = ..., - range: None | _ArrayLikeFloat_co = ..., + range: None | _ArrayLike2DFloat_co = ..., density: None | bool = ..., - weights: None | _ArrayLikeFloat_co = ..., + weights: None | _ArrayLike1DFloat_co = ..., ) -> tuple[ NDArray[float64], - NDArray[floating[Any]], - NDArray[floating[Any]], + NDArray[_SCT_complex], + NDArray[_SCT_complex], ]: ... @overload def histogram2d( - x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co, + x: _ArrayLike1D[_SCT_complex | _Float_co], + y: _ArrayLike1D[_SCT_complex], bins: int | Sequence[int] = ..., - range: None | _ArrayLikeFloat_co = ..., + range: None | _ArrayLike2DFloat_co = ..., density: None | bool = ..., - weights: None | _ArrayLikeFloat_co = ..., + weights: None | _ArrayLike1DFloat_co = ..., ) -> tuple[ NDArray[float64], - NDArray[complexfloating[Any, Any]], - NDArray[complexfloating[Any, Any]], + NDArray[_SCT_complex], + NDArray[_SCT_complex], ]: ... -@overload # TODO: Sort out `bins` +@overload def histogram2d( - x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co, - bins: Sequence[_ArrayLikeInt_co], - range: None | _ArrayLikeFloat_co = ..., + x: _ArrayLike1D[_SCT_inexact], + y: _ArrayLike1D[_SCT_inexact | _Int_co], + bins: int | Sequence[int] = ..., + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_inexact], + NDArray[_SCT_inexact], +]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_SCT_inexact | _Int_co], + y: _ArrayLike1D[_SCT_inexact], + bins: int | Sequence[int] = ..., + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_inexact], + NDArray[_SCT_inexact], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DInt_co | Sequence[float | int], + y: _ArrayLike1DInt_co | Sequence[float | int], + bins: int | Sequence[int] = ..., + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[float64], + NDArray[float64], +]: ... +@overload +def histogram2d( + x: Sequence[complex | float | int], + y: Sequence[complex | float | int], + bins: int | Sequence[int] = ..., + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[complex128 | float64], + NDArray[complex128 | float64], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: _ArrayLike1D[_SCT_number_co] | Sequence[_ArrayLike1D[_SCT_number_co]], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_number_co], + NDArray[_SCT_number_co], +]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_SCT_inexact], + y: _ArrayLike1D[_SCT_inexact], + bins: Sequence[_ArrayLike1D[_SCT_number_co] | int], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_number_co | _SCT_inexact], + NDArray[_SCT_number_co | _SCT_inexact], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DInt_co | Sequence[float | int], + y: _ArrayLike1DInt_co | Sequence[float | int], + bins: Sequence[_ArrayLike1D[_SCT_number_co] | int], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_number_co | float64], + NDArray[_SCT_number_co | float64], +]: ... +@overload +def histogram2d( + x: Sequence[complex | float | int], + y: Sequence[complex | float | int], + bins: Sequence[_ArrayLike1D[_SCT_number_co] | int], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_number_co | complex128 | float64], + NDArray[_SCT_number_co | complex128 | float64] , +]: ... + +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[bool]], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.bool], + NDArray[np.bool], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[int | bool]], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.int_ | np.bool], + NDArray[np.int_ | np.bool], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[float | int | bool]], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.float64 | np.int_ | np.bool], + NDArray[np.float64 | np.int_ | np.bool], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[complex | float | int | bool]], + range: None | _ArrayLike2DFloat_co = ..., density: None | bool = ..., - weights: None | _ArrayLikeFloat_co = ..., + weights: None | _ArrayLike1DFloat_co = ..., ) -> tuple[ NDArray[float64], - NDArray[Any], - NDArray[Any], + NDArray[np.complex128 | np.float64 | np.int_ | np.bool], + NDArray[np.complex128 | np.float64 | np.int_ | np.bool], ]: ... # NOTE: we're assuming/demanding here the `mask_func` returns diff --git a/numpy/typing/tests/data/reveal/twodim_base.pyi b/numpy/typing/tests/data/reveal/twodim_base.pyi index 9d808dbb1e0d..f52ad3a41b69 100644 --- a/numpy/typing/tests/data/reveal/twodim_base.pyi +++ b/numpy/typing/tests/data/reveal/twodim_base.pyi @@ -28,6 +28,7 @@ AR_c: npt.NDArray[np.complex128] AR_O: npt.NDArray[np.object_] AR_LIKE_b: list[bool] +AR_LIKE_c: list[complex] assert_type(np.fliplr(AR_b), npt.NDArray[np.bool]) assert_type(np.fliplr(AR_LIKE_b), npt.NDArray[Any]) @@ -62,28 +63,84 @@ assert_type(np.vander(AR_f, increasing=True), npt.NDArray[np.floating[Any]]) assert_type(np.vander(AR_c), npt.NDArray[np.complexfloating[Any, Any]]) assert_type(np.vander(AR_O), npt.NDArray[np.object_]) +assert_type( + np.histogram2d(AR_LIKE_c, AR_LIKE_c), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128 | np.float64], + npt.NDArray[np.complex128 | np.float64], + ], +) assert_type( np.histogram2d(AR_i, AR_b), tuple[ npt.NDArray[np.float64], - npt.NDArray[np.floating[Any]], - npt.NDArray[np.floating[Any]], + npt.NDArray[np.float64], + npt.NDArray[np.float64], ], ) assert_type( - np.histogram2d(AR_f, AR_f), + np.histogram2d(AR_f, AR_i), tuple[ npt.NDArray[np.float64], - npt.NDArray[np.floating[Any]], - npt.NDArray[np.floating[Any]], + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type( + np.histogram2d(AR_i, AR_f), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.float64], + npt.NDArray[np.float64], ], ) assert_type( np.histogram2d(AR_f, AR_c, weights=AR_LIKE_b), tuple[ npt.NDArray[np.float64], - npt.NDArray[np.complexfloating[Any, Any]], - npt.NDArray[np.complexfloating[Any, Any]], + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) +assert_type( + np.histogram2d(AR_f, AR_c, bins=8), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) +assert_type( + np.histogram2d(AR_c, AR_f, bins=(8, 5)), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) +assert_type( + np.histogram2d(AR_c, AR_i, bins=AR_u), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.uint64], + npt.NDArray[np.uint64], + ], +) +assert_type( + np.histogram2d(AR_c, AR_c, bins=(AR_u, AR_u)), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.uint64], + npt.NDArray[np.uint64], + ], +) +assert_type( + np.histogram2d(AR_c, AR_c, bins=(AR_b, 8)), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.bool | np.complex128], + npt.NDArray[np.bool | np.complex128], ], ) From f3f8f2c11f29eab3149b167ac1f3bd23c4d5fe8d Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 9 Aug 2024 03:45:31 +0200 Subject: [PATCH 019/101] TYP: Fix incompatible overrides in the ``numpy._typing._ufunc`` stubs --- numpy/_typing/_ufunc.pyi | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 5e52039864b7..9495321e2c20 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -17,6 +17,7 @@ from typing import ( Protocol, NoReturn, ) +from typing_extensions import LiteralString from numpy import ufunc, _CastingKind, _OrderKACF from numpy.typing import NDArray @@ -32,9 +33,9 @@ _3Tuple = tuple[_T, _T, _T] _4Tuple = tuple[_T, _T, _T, _T] _NTypes = TypeVar("_NTypes", bound=int, covariant=True) -_IDType = TypeVar("_IDType", bound=Any, covariant=True) -_NameType = TypeVar("_NameType", bound=str, covariant=True) -_Signature = TypeVar("_Signature", bound=str, covariant=True) +_IDType = TypeVar("_IDType", covariant=True) +_NameType = TypeVar("_NameType", bound=LiteralString, covariant=True) +_Signature = TypeVar("_Signature", bound=LiteralString, covariant=True) class _SupportsArrayUFunc(Protocol): From 44ce7e8f7b43c1045ad78eecaac0435fff491fae Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 16 Aug 2024 10:28:23 +0200 Subject: [PATCH 020/101] BUG: Fix ``PyArray_ZeroContiguousBuffer`` (resize) with struct dtypes We allow the structured dtype to return NULL for the zero fill function to indicate that a simple memset is sufficient. Also simplifies error handling a bit. The get_fill_zero_loop function must clean up on error and not return references if returns a `NULL` loop. --- numpy/_core/src/multiarray/refcount.c | 10 ++++------ numpy/_core/tests/test_multiarray.py | 6 ++++++ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/multiarray/refcount.c b/numpy/_core/src/multiarray/refcount.c index 0da40cbdc60e..571b50372684 100644 --- a/numpy/_core/src/multiarray/refcount.c +++ b/numpy/_core/src/multiarray/refcount.c @@ -83,14 +83,16 @@ PyArray_ZeroContiguousBuffer( if (get_fill_zero_loop( NULL, descr, aligned, descr->elsize, &(zero_info.func), &(zero_info.auxdata), &flags_unused) < 0) { - goto fail; + return -1; } } else { + assert(zero_info.func == NULL); + } + if (zero_info.func == NULL) { /* the multiply here should never overflow, since we already checked if the new array size doesn't overflow */ memset(data, 0, size*stride); - NPY_traverse_info_xfree(&zero_info); return 0; } @@ -98,10 +100,6 @@ PyArray_ZeroContiguousBuffer( NULL, descr, data, size, stride, zero_info.auxdata); NPY_traverse_info_xfree(&zero_info); return res; - - fail: - NPY_traverse_info_xfree(&zero_info); - return -1; } diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 441d76af9228..0bc9fea9c960 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -9174,6 +9174,12 @@ def test_resize(self): d.resize(150) assert_(old < sys.getsizeof(d)) + @pytest.mark.parametrize("dtype", ["u4,f4", "u4,O"]) + def test_resize_structured(self, dtype): + a = np.array([(0, 0.0) for i in range(5)], dtype=dtype) + a.resize(1000) + assert_array_equal(a, np.zeros(1000, dtype=dtype)) + def test_error(self): d = np.ones(100) assert_raises(TypeError, d.__sizeof__, "a") From d902c24b684e8ba5370a160a375beaa77bae5032 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 15 Aug 2024 14:54:23 -0600 Subject: [PATCH 021/101] DOC: add docs on thread safety in NumPy [skip azp][skip actions][skip cirrus] --- doc/source/reference/global_state.rst | 16 ++++++--- doc/source/reference/index.rst | 1 + doc/source/reference/thread_safety.rst | 49 ++++++++++++++++++++++++++ 3 files changed, 61 insertions(+), 5 deletions(-) create mode 100644 doc/source/reference/thread_safety.rst diff --git a/doc/source/reference/global_state.rst b/doc/source/reference/global_state.rst index e0ab1bb2a7ba..5bc512e0e9ec 100644 --- a/doc/source/reference/global_state.rst +++ b/doc/source/reference/global_state.rst @@ -4,11 +4,10 @@ Global state ************ -NumPy has a few import-time, compile-time, or runtime options -which change the global behaviour. -Most of these are related to performance or for debugging -purposes and will not be interesting to the vast majority -of users. +NumPy exposes global state in legacy APIs and a few import-time, +compile-time, or runtime options which change the global behaviour. +Most of these are related to performance or for debugging purposes and +will not be interesting to the vast majority of users. Performance-related options @@ -71,3 +70,10 @@ and set the ``ndarray.base``. .. versionchanged:: 1.25.2 This variable is only checked on the first import. + +Legacy User DTypes +================== + +The number of legacy user DTypes is stored in ``NPY_NUMUSERTPES``, a global +variable that is exposed in the NumPy C API. This means that the legacy DType +API is inherently not thread-safe. diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst index ed9641409014..02e3248953fb 100644 --- a/doc/source/reference/index.rst +++ b/doc/source/reference/index.rst @@ -58,6 +58,7 @@ Other topics array_api simd/index + thread_safety global_state security distutils_status_migration diff --git a/doc/source/reference/thread_safety.rst b/doc/source/reference/thread_safety.rst new file mode 100644 index 000000000000..df806e9e7c5f --- /dev/null +++ b/doc/source/reference/thread_safety.rst @@ -0,0 +1,49 @@ +.. _thread_safety: + +************* +Thread Safety +************* + +NumPy supports use in a multithreaded context via the `threading` module in the +standard library. Many NumPy operations release the GIL, so unlike many +situations in Python, it is possible to improve parallel performance by +exploiting multithreaded parallelism in Python. + +The easiest performance gains happen when each worker thread owns its own array +or set of array objects, with no data directly shared between threads. Because +NumPy releases the GIL for many low-level operations, threads that spend most of +the time in low-level code will run in parallel. + +It is possible to share NumPy arrays between threads, but extreme care must be +taken to avoid creating thread safety issues when mutating shared arrays. If +two threads simultaneously read from and write to the same array, at best they +will see inconsistent views of the same array data. It is also possible to crash +the Python interpreter by, for example, resizing an array while another thread +is reading from it to compute a ufunc operation. + +In the future, we may add locking to ndarray to make working with shared NumPy +arrays easier, but for now we suggest focusing on read-only access of arrays +that are shared between threads. + +Note that operations that *do not* release the GIL will see no performance gains +from use of the `threading` module, and instead might be better served with +`multiprocessing`. In particular, operations on arrays with ``dtype=object`` do +not release the GIL. + +Free-threaded Python +-------------------- + +.. versionadded:: 2.1 + +Starting with NumPy 2.1 and CPython 3.13, NumPy also has experimental support +for python runtimes with the GIL disabled. See +https://py-free-threading.github.io for more information about installing and +using free-threaded Python, as well as information about supporting it in +libraries that depend on NumPy. + +Because free-threaded Python does not have a global interpreter lock to +serialize access to Python objects, there are more opportunities for threads to +mutate shared state and create thread safety issues. In addition to the +limitations about locking of the ndarray object noted above, this also means +that arrays with ``dtype=object`` are not protected by the GIL, creating data +races for python objects that are not possible outside free-threaded python. From 5af2e965a02137cd05706d8d395d8263f395f7c7 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 16 Aug 2024 10:52:01 -0600 Subject: [PATCH 022/101] Move NUMUSERTYPES thread safety discussion to legacy DType API docs --- doc/source/reference/c-api/array.rst | 7 +++++++ doc/source/reference/global_state.rst | 21 +++++++-------------- doc/source/user/c-info.beyond-basics.rst | 3 +++ 3 files changed, 17 insertions(+), 14 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 68fbb6ef3d66..80af4b83d172 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -1264,6 +1264,13 @@ User-defined data types registered (checked only by the address of the pointer), then return the previously-assigned type-number. + The number of user DTypes known to numpy is stored in + ``NPY_NUMUSERTYPES``, a static global variable that is public in the + C API. Accessing this symbol is inherently *not* thread-safe. If + for some reason you need to use this API in a multithreaded context, + you will need to add your own locking, NumPy does not ensure new + data types can be added in a thread-safe manner. + .. c:function:: int PyArray_RegisterCastFunc( \ PyArray_Descr* descr, int totype, PyArray_VectorUnaryFunc* castfunc) diff --git a/doc/source/reference/global_state.rst b/doc/source/reference/global_state.rst index 5bc512e0e9ec..e66c86faf1b3 100644 --- a/doc/source/reference/global_state.rst +++ b/doc/source/reference/global_state.rst @@ -1,13 +1,13 @@ .. _global_state: -************ -Global state -************ +**************************** +Global Configuration Options +**************************** -NumPy exposes global state in legacy APIs and a few import-time, -compile-time, or runtime options which change the global behaviour. -Most of these are related to performance or for debugging purposes and -will not be interesting to the vast majority of users. +NumPy has a few import-time, compile-time, or runtime configuration +options which change the global behaviour. Most of these are related to +performance or for debugging purposes and will not be interesting to the +vast majority of users. Performance-related options @@ -70,10 +70,3 @@ and set the ``ndarray.base``. .. versionchanged:: 1.25.2 This variable is only checked on the first import. - -Legacy User DTypes -================== - -The number of legacy user DTypes is stored in ``NPY_NUMUSERTPES``, a global -variable that is exposed in the NumPy C API. This means that the legacy DType -API is inherently not thread-safe. diff --git a/doc/source/user/c-info.beyond-basics.rst b/doc/source/user/c-info.beyond-basics.rst index 697c0c045e4f..7bf793ae2e47 100644 --- a/doc/source/user/c-info.beyond-basics.rst +++ b/doc/source/user/c-info.beyond-basics.rst @@ -268,6 +268,9 @@ specifies your data-type. This type number should be stored and made available by your module so that other modules can use it to recognize your data-type. +Note that this API is inherently thread-unsafe. See `thread_safety` for more +details about thread safety in NumPy. + Registering a casting function ------------------------------ From 395a81dee5be52b2acff817e6f16652d23181fc3 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 16 Aug 2024 10:56:02 -0600 Subject: [PATCH 023/101] DOC: reword discussion about shared arrays to hopefully be clearer [skip azp][skip actions][skip cirrus] --- doc/source/reference/thread_safety.rst | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/doc/source/reference/thread_safety.rst b/doc/source/reference/thread_safety.rst index df806e9e7c5f..84590bfac39c 100644 --- a/doc/source/reference/thread_safety.rst +++ b/doc/source/reference/thread_safety.rst @@ -15,15 +15,17 @@ NumPy releases the GIL for many low-level operations, threads that spend most of the time in low-level code will run in parallel. It is possible to share NumPy arrays between threads, but extreme care must be -taken to avoid creating thread safety issues when mutating shared arrays. If -two threads simultaneously read from and write to the same array, at best they -will see inconsistent views of the same array data. It is also possible to crash -the Python interpreter by, for example, resizing an array while another thread -is reading from it to compute a ufunc operation. - -In the future, we may add locking to ndarray to make working with shared NumPy -arrays easier, but for now we suggest focusing on read-only access of arrays -that are shared between threads. +taken to avoid creating thread safety issues when mutating arrays that are +shared between multiple threads. If two threads simultaneously read from and +write to the same array, they will at best produce inconsistent, racey results that +are not reproducible, let alone correct. It is also possible to crash the Python +interpreter by, for example, resizing an array while another thread is reading +from it to compute a ufunc operation. + +In the future, we may add locking to ndarray to make writing multithreaded +algorithms using NumPy arrays safer, but for now we suggest focusing on +read-only access of arrays that are shared between threads, or adding your own +locking if you need to mutation and multithreading. Note that operations that *do not* release the GIL will see no performance gains from use of the `threading` module, and instead might be better served with From 85b1cab2ad5f418cca485042478622ce13a497a7 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 17 Aug 2024 10:19:56 -0600 Subject: [PATCH 024/101] BUG: Allow fitting of degree zero polynomials with Polynomial.fit Backport of #25984. For degenerate domains in Polynomial.fit (which occurs when all independent datapoints are equal) we expand the domain. This allows fitting of degree zero polynomials. Fixes #25982 --- numpy/polynomial/_polybase.py | 3 +++ numpy/polynomial/tests/test_polynomial.py | 11 ++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py index 9c345553eedd..65c3ff43dc32 100644 --- a/numpy/polynomial/_polybase.py +++ b/numpy/polynomial/_polybase.py @@ -1041,6 +1041,9 @@ class domain in NumPy 1.4 and ``None`` in later versions. """ if domain is None: domain = pu.getdomain(x) + if domain[0] == domain[1]: + domain[0] -= 1 + domain[1] += 1 elif type(domain) is list and len(domain) == 0: domain = cls.domain diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index a0be94c3a6a0..162cb0a9bea0 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -5,11 +5,12 @@ from fractions import Fraction import numpy as np import numpy.polynomial.polynomial as poly +import numpy.polynomial.polyutils as pu import pickle from copy import deepcopy from numpy.testing import ( assert_almost_equal, assert_raises, assert_equal, assert_, - assert_array_equal, assert_raises_regex) + assert_array_equal, assert_raises_regex, assert_warns) def trim(x): @@ -628,6 +629,14 @@ def test_polyline(self): def test_polyline_zero(self): assert_equal(poly.polyline(3, 0), [3]) + def test_fit_degenerate_domain(self): + p = poly.Polynomial.fit([1], [2], deg=0) + assert_equal(p.coef, [2.]) + p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=0) + assert_almost_equal(p.coef, [2.05]) + with assert_warns(pu.RankWarning): + p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=1) + def test_result_type(self): w = np.array([-1, 1], dtype=np.float32) p = np.polynomial.Polynomial(w, domain=w, window=w) From b6f434f852e9d1ed4f7de9a3a465b38171d54a61 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 18 Aug 2024 09:44:41 -0600 Subject: [PATCH 025/101] REL: Prepare for the NumPy 2.1.0 release [wheel build] - Update 2.1.0-changelog.rst - Update 2.1.0-notes.rst - Update pyproject.toml - Delete release fragments. --- doc/changelog/2.1.0-changelog.rst | 17 ++++++++++++++++- .../upcoming_changes/26897.improvement.rst | 6 ------ .../upcoming_changes/27008.improvement.rst | 8 -------- doc/source/release/2.1.0-notes.rst | 17 +++++++++++++++++ pyproject.toml | 2 +- 5 files changed, 34 insertions(+), 16 deletions(-) delete mode 100644 doc/release/upcoming_changes/26897.improvement.rst delete mode 100644 doc/release/upcoming_changes/27008.improvement.rst diff --git a/doc/changelog/2.1.0-changelog.rst b/doc/changelog/2.1.0-changelog.rst index 99f871a97718..af7f5a3b07c7 100644 --- a/doc/changelog/2.1.0-changelog.rst +++ b/doc/changelog/2.1.0-changelog.rst @@ -8,6 +8,7 @@ names contributed a patch for the first time. * !ogidig5 + * !partev * !vahidmech + +* !h-vetinari * Aaron Meurer * Adrin Jalali + * Agriya Khetarpal @@ -118,7 +119,7 @@ names contributed a patch for the first time. Pull requests merged ==================== -A total of 455 pull requests were merged for this release. +A total of 469 pull requests were merged for this release. * `#12150 `__: ENH: When histogramming data with integer dtype, force bin width... * `#24448 `__: TST: add some tests of np.log for complex input. @@ -575,3 +576,17 @@ A total of 455 pull requests were merged for this release. * `#27162 `__: BLD: use smaller scipy-openblas builds * `#27166 `__: ENH: fix thread-unsafe C API usages * `#27173 `__: MAINT: Bump pythoncapi-compat version. +* `#27176 `__: REL: Prepare for the NumPy 2.1.0rc1 release [wheel build] +* `#27180 `__: DOC: Add release notes for #26897 +* `#27181 `__: DOC: Add release notes for #27008 +* `#27190 `__: BUILD: use a shrunken version of scipy-openblas wheels [wheel... +* `#27193 `__: REV: Revert undef I and document it +* `#27196 `__: BUILD: improve download script +* `#27197 `__: MAINT: update default NPY_FEATURE_VERSION after dropping py39 +* `#27200 `__: DOC: add free-threading release notes +* `#27209 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27216 `__: TYP: Fixed & improved type hints for ``numpy.histogram2d`` +* `#27217 `__: TYP: Fix incompatible overrides in the ``numpy._typing._ufunc``... +* `#27229 `__: BUG: Fix ``PyArray_ZeroContiguousBuffer`` (resize) with struct... +* `#27233 `__: DOC: add docs on thread safety in NumPy +* `#27234 `__: BUG: Allow fitting of degree zero polynomials with Polynomial.fit diff --git a/doc/release/upcoming_changes/26897.improvement.rst b/doc/release/upcoming_changes/26897.improvement.rst deleted file mode 100644 index 1b3b327711af..000000000000 --- a/doc/release/upcoming_changes/26897.improvement.rst +++ /dev/null @@ -1,6 +0,0 @@ -Type hinting ``numpy.polynomial`` ---------------------------------- - -Starting from the 2.1 release, PEP 484 type annotations have been included for -the functions and convenience classes in ``numpy.polynomial`` and its -sub-packages. diff --git a/doc/release/upcoming_changes/27008.improvement.rst b/doc/release/upcoming_changes/27008.improvement.rst deleted file mode 100644 index 47e1090d9067..000000000000 --- a/doc/release/upcoming_changes/27008.improvement.rst +++ /dev/null @@ -1,8 +0,0 @@ -Improved ``numpy.dtypes`` type hints ------------------------------------- - -The type annotations for ``numpy.dtypes`` are now a better reflection of the -runtime: -The ``numpy.dtype`` type-aliases have been replaced with specialized ``dtype`` -*subtypes*, and the previously missing annotations for -``numpy.dtypes.StringDType`` have been added. diff --git a/doc/source/release/2.1.0-notes.rst b/doc/source/release/2.1.0-notes.rst index c591b29e4c24..bb9c71079062 100644 --- a/doc/source/release/2.1.0-notes.rst +++ b/doc/source/release/2.1.0-notes.rst @@ -250,6 +250,23 @@ a python ``ContextVar``, making the context manager thread and async-safe. (`gh-26846 `__) +Type hinting ``numpy.polynomial`` +--------------------------------- +Starting from the 2.1 release, PEP 484 type annotations have been included for +the functions and convenience classes in ``numpy.polynomial`` and its +sub-packages. + +(`gh-26897 `__) + +Improved ``numpy.dtypes`` type hints +------------------------------------ +The type annotations for ``numpy.dtypes`` are now a better reflection of the +runtime: The ``numpy.dtype`` type-aliases have been replaced with specialized +``dtype`` *subtypes*, and the previously missing annotations for +``numpy.dtypes.StringDType`` have been added. + +(`gh-27008 `__) + Performance improvements and changes ==================================== diff --git a/pyproject.toml b/pyproject.toml index 305db1c77ba4..6596535b0f3b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.1.0rc1" +version = "2.1.0" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} From 38d7b0846f8d4307ee7222da70cba8fa691e25d6 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 18 Aug 2024 17:09:18 -0600 Subject: [PATCH 026/101] MAINT: prepare 2.1.x for further development - Create 2.1.1-notes.rst - Update release.rst - Update pavement.py - Update pyproject.py --- doc/source/release.rst | 1 + doc/source/release/2.1.1-notes.rst | 18 ++++++++++++++++++ pavement.py | 2 +- pyproject.toml | 2 +- 4 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 doc/source/release/2.1.1-notes.rst diff --git a/doc/source/release.rst b/doc/source/release.rst index cad71725fe94..97af3a958211 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release notes .. toctree:: :maxdepth: 2 + 2.1.1 2.1.0 2.0.1 2.0.0 diff --git a/doc/source/release/2.1.1-notes.rst b/doc/source/release/2.1.1-notes.rst new file mode 100644 index 000000000000..f147c22ed0e0 --- /dev/null +++ b/doc/source/release/2.1.1-notes.rst @@ -0,0 +1,18 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.1.1 Release Notes +========================== + +NumPy 2.1.1 is a maintenance release that fixes bugs and regressions +discovered after the 2.1.0 release. + +The Python versions supported by this release are 3.10-3.13. + +Contributors +============ + + +Pull requests merged +==================== + diff --git a/pavement.py b/pavement.py index 43dc28675eb9..a4439e32185f 100644 --- a/pavement.py +++ b/pavement.py @@ -38,7 +38,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.1.0-notes.rst' +RELEASE_NOTES = 'doc/source/release/2.1.1-notes.rst' #------------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index 6596535b0f3b..aab64e51e6f9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.1.0" +version = "2.1.1" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} From 87832d4fee8fd8d3942a44983f6c6bce55731531 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 20 Aug 2024 13:54:14 -0600 Subject: [PATCH 027/101] BUG: revert unintended change in the return value of set_printoptions --- numpy/_core/arrayprint.py | 17 +++++++++++++---- numpy/_core/tests/test_arrayprint.py | 3 ++- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 4297e109ce8a..3ee4e45197b5 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -280,6 +280,15 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ]) """ + _set_printoptions(precision, threshold, edgeitems, linewidth, suppress, nanstr, + infstr, formatter, sign, floatmode, legacy=legacy, + override_repr=override_repr) + + +def _set_printoptions(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, nanstr=None, + infstr=None, formatter=None, sign=None, floatmode=None, + *, legacy=None, override_repr=None): new_opt = _make_options_dict(precision, threshold, edgeitems, linewidth, suppress, nanstr, infstr, sign, formatter, floatmode, legacy) @@ -293,8 +302,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, if updated_opt['legacy'] == 113: updated_opt['sign'] = '-' - token = format_options.set(updated_opt) - return token + return format_options.set(updated_opt) @set_module('numpy') @@ -378,8 +386,9 @@ def printoptions(*args, **kwargs): -------- set_printoptions, get_printoptions - """ - token = set_printoptions(*args, **kwargs) + """ + token = _set_printoptions(*args, **kwargs) + try: yield get_printoptions() finally: diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index 5b0642cbb0bd..e2305c974147 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -627,8 +627,9 @@ def teardown_method(self): def test_basic(self): x = np.array([1.5, 0, 1.234567890]) assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])") - np.set_printoptions(precision=4) + ret = np.set_printoptions(precision=4) assert_equal(repr(x), "array([1.5 , 0. , 1.2346])") + assert ret is None def test_precision_zero(self): np.set_printoptions(precision=0) From b1adcaad0dde46dc7b7e34a51cdbe3289e4f8505 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 20 Aug 2024 13:57:12 -0600 Subject: [PATCH 028/101] MAINT: appease linter --- numpy/_core/arrayprint.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 3ee4e45197b5..fde0d7d4a162 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -280,9 +280,9 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ]) """ - _set_printoptions(precision, threshold, edgeitems, linewidth, suppress, nanstr, - infstr, formatter, sign, floatmode, legacy=legacy, - override_repr=override_repr) + _set_printoptions(precision, threshold, edgeitems, linewidth, suppress, + nanstr, infstr, formatter, sign, floatmode, + legacy=legacy, override_repr=override_repr) def _set_printoptions(precision=None, threshold=None, edgeitems=None, From 818ed35459a1ec731f40dc8dd260cad31f2432cd Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 21 Aug 2024 11:01:15 -0600 Subject: [PATCH 029/101] BUG: fix reference counting bug in __array_interface__ implementation (#27249) * BUG: fix reference counting bug in __array_interface__ implementation * MAINT: only decref if the reference is valid --- numpy/_core/src/multiarray/ctors.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 5c1a78daf0c5..c659dfa356cd 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -2234,8 +2234,8 @@ PyArray_FromInterface(PyObject *origin) Py_SETREF(dtype, new_dtype); } } + Py_DECREF(descr); } - Py_DECREF(descr); } Py_CLEAR(attr); From 82cae4011bea3bdc4ca21f95273576f55fd8ca82 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 21 Aug 2024 19:15:07 +0200 Subject: [PATCH 030/101] TST: Add regression test for missing descr in array-interface This adds a simple regression test for the missing descr for gh-27249. --- numpy/_core/tests/test_array_coercion.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index ee7b7c8d6685..c2172d40d81e 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -762,6 +762,17 @@ def __getitem__(self): with pytest.raises(error): np.array(BadSequence()) + def test_array_interface_descr_optional(self): + # The descr should be optional regresion test for gh-27249 + arr = np.ones(10, dtype="V10") + iface = arr.__array_interface__ + iface.pop("descr") + + class MyClass: + __array_interface__ = iface + + assert_array_equal(np.asarray(MyClass), arr) + class TestAsArray: """Test expected behaviors of ``asarray``.""" From 2b2909e04c73d0c366ff03448ad1b13f3deca6cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Wed, 21 Aug 2024 19:51:18 +0200 Subject: [PATCH 031/101] BUG: Fix #27256 and #27257 --- doc/release/upcoming_changes/26766.change.rst | 2 ++ numpy/_core/code_generators/ufunc_docstrings.py | 2 +- numpy/_core/fromnumeric.py | 12 ++++++------ numpy/_core/fromnumeric.pyi | 10 ++++++++-- numpy/_core/tests/test_numeric.py | 1 + numpy/lib/_ufunclike_impl.py | 12 ++++++------ 6 files changed, 24 insertions(+), 15 deletions(-) create mode 100644 doc/release/upcoming_changes/26766.change.rst diff --git a/doc/release/upcoming_changes/26766.change.rst b/doc/release/upcoming_changes/26766.change.rst new file mode 100644 index 000000000000..f9223a1d1114 --- /dev/null +++ b/doc/release/upcoming_changes/26766.change.rst @@ -0,0 +1,2 @@ +* `numpy.fix` now won't perform casting to a floating data-type for integer + and boolean data-type input arrays. diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index 2e4d694065fb..cf000506e096 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -795,7 +795,7 @@ def add_newdoc(place, name, doc): Returns ------- y : ndarray or scalar - The ceiling of each element in `x`, with `float` dtype. + The ceiling of each element in `x`. $OUT_SCALAR_1 See Also diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 45614511ecf0..a3d8712764e0 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -207,13 +207,13 @@ def take(a, indices, axis=None, out=None, mode='raise'): return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) -def _reshape_dispatcher(a, /, shape=None, *, newshape=None, order=None, +def _reshape_dispatcher(a, /, shape=None, order=None, *, newshape=None, copy=None): return (a,) @array_function_dispatch(_reshape_dispatcher) -def reshape(a, /, shape=None, *, newshape=None, order='C', copy=None): +def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None): """ Gives a new shape to an array without changing its data. @@ -226,10 +226,6 @@ def reshape(a, /, shape=None, *, newshape=None, order='C', copy=None): an integer, then the result will be a 1-D array of that length. One shape dimension can be -1. In this case, the value is inferred from the length of the array and remaining dimensions. - newshape : int or tuple of ints - .. deprecated:: 2.1 - Replaced by ``shape`` argument. Retained for backward - compatibility. order : {'C', 'F', 'A'}, optional Read the elements of ``a`` using this index order, and place the elements into the reshaped array using this index order. 'C' @@ -243,6 +239,10 @@ def reshape(a, /, shape=None, *, newshape=None, order='C', copy=None): 'A' means to read / write the elements in Fortran-like index order if ``a`` is Fortran *contiguous* in memory, C-like order otherwise. + newshape : int or tuple of ints + .. deprecated:: 2.1 + Replaced by ``shape`` argument. Retained for backward + compatibility. copy : bool, optional If ``True``, then the array data is copied. If ``None``, a copy will only be made if it's required by ``order``. For ``False`` it raises diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 0d4e30ce8101..08e791789c82 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -92,15 +92,21 @@ def take( @overload def reshape( a: _ArrayLike[_SCT], - newshape: _ShapeLike, + /, + shape: _ShapeLike = ..., order: _OrderACF = ..., + *, + newshape: _ShapeLike = ..., copy: None | bool = ..., ) -> NDArray[_SCT]: ... @overload def reshape( a: ArrayLike, - newshape: _ShapeLike, + /, + shape: _ShapeLike = ..., order: _OrderACF = ..., + *, + newshape: _ShapeLike = ..., copy: None | bool = ..., ) -> NDArray[Any]: ... diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index ee0d1bbfee1e..de1f7c71bd1a 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -184,6 +184,7 @@ def test_reshape_shape_arg(self): assert_equal(np.reshape(arr, shape), expected) assert_equal(np.reshape(arr, shape, order="C"), expected) + assert_equal(np.reshape(arr, shape, "C"), expected) assert_equal(np.reshape(arr, shape=shape), expected) assert_equal(np.reshape(arr, shape=shape, order="C"), expected) with pytest.warns(DeprecationWarning): diff --git a/numpy/lib/_ufunclike_impl.py b/numpy/lib/_ufunclike_impl.py index 3fc5a32d33a6..3f026a2ce79c 100644 --- a/numpy/lib/_ufunclike_impl.py +++ b/numpy/lib/_ufunclike_impl.py @@ -21,12 +21,12 @@ def fix(x, out=None): Round to nearest integer towards zero. Round an array of floats element-wise to nearest integer towards zero. - The rounded values are returned as floats. + The rounded values have the same data-type as the input. Parameters ---------- x : array_like - An array of floats to be rounded + An array to be rounded out : ndarray, optional A location into which the result is stored. If provided, it must have a shape that the input broadcasts to. If not provided or None, a @@ -35,12 +35,12 @@ def fix(x, out=None): Returns ------- out : ndarray of floats - A float array with the same dimensions as the input. - If second argument is not supplied then a float array is returned + An array with the same dimensions and data-type as the input. + If second argument is not supplied then a new array is returned with the rounded values. If a second argument is supplied the result is stored there. - The return value `out` is then a reference to that array. + The return value ``out`` is then a reference to that array. See Also -------- @@ -53,7 +53,7 @@ def fix(x, out=None): >>> np.fix(3.14) 3.0 >>> np.fix(3) - 3.0 + 3 >>> np.fix([2.1, 2.9, -2.1, -2.9]) array([ 2., 2., -2., -2.]) From a1e7385c56a9906a6dc075b755cf41c3a033f65a Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Mon, 26 Aug 2024 09:57:47 +0200 Subject: [PATCH 032/101] BUG: Fix array_equal for numeric and non-numeric scalar types Backport of #27275 Mitigates #27271. The underlying issue (an array comparison returning a python bool instead of a numpy bool) is not addressed. The order of statements is slightly reordered, so that the if `a1 is a2:` check can be done before the calculation of `cannot_have_nan` Closes gh-27271 --- numpy/_core/numeric.py | 16 ++++++++-------- numpy/_core/tests/test_numeric.py | 7 +++++++ 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 39b3de44fabe..1f3f1c20dbd1 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -2554,17 +2554,17 @@ def array_equal(a1, a2, equal_nan=False): if a1.shape != a2.shape: return False if not equal_nan: - return builtins.bool((a1 == a2).all()) - cannot_have_nan = (_dtype_cannot_hold_nan(a1.dtype) - and _dtype_cannot_hold_nan(a2.dtype)) - if cannot_have_nan: - if a1 is a2: - return True - return builtins.bool((a1 == a2).all()) + return builtins.bool((asanyarray(a1 == a2)).all()) if a1 is a2: # nan will compare equal so an array will compare equal to itself. return True + + cannot_have_nan = (_dtype_cannot_hold_nan(a1.dtype) + and _dtype_cannot_hold_nan(a2.dtype)) + if cannot_have_nan: + return builtins.bool(asarray(a1 == a2).all()) + # Handling NaN values if equal_nan is True a1nan, a2nan = isnan(a1), isnan(a2) # NaN's occur at different locations @@ -2624,7 +2624,7 @@ def array_equiv(a1, a2): except Exception: return False - return builtins.bool((a1 == a2).all()) + return builtins.bool(asanyarray(a1 == a2).all()) def _astype_dispatcher(x, dtype, /, *, copy=None, device=None): diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index de1f7c71bd1a..5906922ff1bb 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -2191,6 +2191,13 @@ def test_array_equal_equal_nan(self, bx, by, equal_nan, expected): assert_(res is expected) assert_(type(res) is bool) + def test_array_equal_different_scalar_types(self): + # https://github.com/numpy/numpy/issues/27271 + a = np.array("foo") + b = np.array(1) + assert not np.array_equal(a, b) + assert not np.array_equiv(a, b) + def test_none_compares_elementwise(self): a = np.array([None, 1, None], dtype=object) assert_equal(a == None, [True, False, True]) From 326bc17ec19c26129e40485f8c539686efa0e289 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 26 Aug 2024 17:25:25 -0600 Subject: [PATCH 033/101] MAINT: Update main after the 2.0.2 release - Forward port 2.0.2-changelog.rst - Forward port 2.0.2-notes.rst - Update release.rst [skip azp] [skip cirrus] [skip actions] --- doc/changelog/2.0.2-changelog.rst | 45 +++++++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/2.0.2-notes.rst | 58 ++++++++++++++++++++++++++++++ 3 files changed, 104 insertions(+) create mode 100644 doc/changelog/2.0.2-changelog.rst create mode 100644 doc/source/release/2.0.2-notes.rst diff --git a/doc/changelog/2.0.2-changelog.rst b/doc/changelog/2.0.2-changelog.rst new file mode 100644 index 000000000000..6622407dd8f6 --- /dev/null +++ b/doc/changelog/2.0.2-changelog.rst @@ -0,0 +1,45 @@ + +Contributors +============ + +A total of 13 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Bruno Oliveira + +* Charles Harris +* Chris Sidebottom +* Christian Heimes + +* Christopher Sidebottom +* Mateusz Sokół +* Matti Picus +* Nathan Goldbaum +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* Yair Chuchem + + +Pull requests merged +==================== + +A total of 19 pull requests were merged for this release. + +* `#27000 `__: REL: Prepare for the NumPy 2.0.1 release [wheel build] +* `#27001 `__: MAINT: prepare 2.0.x for further development +* `#27021 `__: BUG: cfuncs.py: fix crash when sys.stderr is not available +* `#27022 `__: DOC: Fix migration note for ``alltrue`` and ``sometrue`` +* `#27061 `__: BUG: use proper input and output descriptor in array_assign_subscript... +* `#27073 `__: BUG: Mirror VQSORT_ENABLED logic in Quicksort +* `#27074 `__: BUG: Bump Highway to latest master +* `#27077 `__: BUG: Off by one in memory overlap check +* `#27122 `__: BUG: Use the new ``npyv_loadable_stride_`` functions for ldexp and... +* `#27126 `__: BUG: Bump Highway to latest +* `#27128 `__: BUG: add missing error handling in public_dtype_api.c +* `#27129 `__: BUG: fix another cast setup in array_assign_subscript +* `#27130 `__: BUG: Fix building NumPy in FIPS mode +* `#27131 `__: BLD: update vendored Meson for cross-compilation patches +* `#27146 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27151 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27195 `__: REV: Revert undef I and document it +* `#27213 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27279 `__: BUG: Fix array_equal for numeric and non-numeric scalar types diff --git a/doc/source/release.rst b/doc/source/release.rst index 97af3a958211..fa6cb5bbcb8a 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -7,6 +7,7 @@ Release notes 2.1.1 2.1.0 + 2.0.2 2.0.1 2.0.0 1.26.4 diff --git a/doc/source/release/2.0.2-notes.rst b/doc/source/release/2.0.2-notes.rst new file mode 100644 index 000000000000..ae5c26250ba7 --- /dev/null +++ b/doc/source/release/2.0.2-notes.rst @@ -0,0 +1,58 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.0.2 Release Notes +========================== + +NumPy 2.0.2 is a maintenance release that fixes bugs and regressions +discovered after the 2.0.1 release. + +The Python versions supported by this release are 3.9-3.12. + + +Contributors +============ + +A total of 13 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Bruno Oliveira + +* Charles Harris +* Chris Sidebottom +* Christian Heimes + +* Christopher Sidebottom +* Mateusz Sokół +* Matti Picus +* Nathan Goldbaum +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* Yair Chuchem + + + +Pull requests merged +==================== + +A total of 19 pull requests were merged for this release. + +* `#27000 `__: REL: Prepare for the NumPy 2.0.1 release [wheel build] +* `#27001 `__: MAINT: prepare 2.0.x for further development +* `#27021 `__: BUG: cfuncs.py: fix crash when sys.stderr is not available +* `#27022 `__: DOC: Fix migration note for ``alltrue`` and ``sometrue`` +* `#27061 `__: BUG: use proper input and output descriptor in array_assign_subscript... +* `#27073 `__: BUG: Mirror VQSORT_ENABLED logic in Quicksort +* `#27074 `__: BUG: Bump Highway to latest master +* `#27077 `__: BUG: Off by one in memory overlap check +* `#27122 `__: BUG: Use the new ``npyv_loadable_stride_`` functions for ldexp and... +* `#27126 `__: BUG: Bump Highway to latest +* `#27128 `__: BUG: add missing error handling in public_dtype_api.c +* `#27129 `__: BUG: fix another cast setup in array_assign_subscript +* `#27130 `__: BUG: Fix building NumPy in FIPS mode +* `#27131 `__: BLD: update vendored Meson for cross-compilation patches +* `#27146 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27151 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27195 `__: REV: Revert undef I and document it +* `#27213 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27279 `__: BUG: Fix array_equal for numeric and non-numeric scalar types + From db9668dfb89ac89dec826591b7cf73fd7f47d7b5 Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Tue, 27 Aug 2024 07:50:03 +1000 Subject: [PATCH 034/101] BLD: cp311- macosx_arm64 wheels [wheel build] --- tools/ci/cirrus_wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml index f63274e5af3f..99aa6ee2b50f 100644 --- a/tools/ci/cirrus_wheels.yml +++ b/tools/ci/cirrus_wheels.yml @@ -65,7 +65,7 @@ macosx_arm64_task: matrix: - env: - CIBW_BUILD: cp310-* cp311 + CIBW_BUILD: cp310-* cp311-* - env: CIBW_BUILD: cp312-* cp313-* - env: From d4306dd2c498a5bbc74e0b26ae60b64a5a4e8c5c Mon Sep 17 00:00:00 2001 From: Maximilian Weigand Date: Fri, 26 Jul 2024 07:36:48 +0000 Subject: [PATCH 035/101] TST: Add regression test for gh-26920 --- .../two_mods_with_no_public_entities.f90 | 21 +++++++++++++ .../two_mods_with_one_public_routine.f90 | 21 +++++++++++++ numpy/f2py/tests/test_modules.py | 31 +++++++++++++++++++ 3 files changed, 73 insertions(+) create mode 100644 numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 create mode 100644 numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 diff --git a/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 b/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 new file mode 100644 index 000000000000..07adce591f35 --- /dev/null +++ b/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 @@ -0,0 +1,21 @@ + module mod2 + implicit none + private mod2_func1 + contains + + subroutine mod2_func1() + print*, "mod2_func1" + end subroutine mod2_func1 + + end module mod2 + + module mod1 + implicit none + private :: mod1_func1 + contains + + subroutine mod1_func1() + print*, "mod1_func1" + end subroutine mod1_func1 + + end module mod1 diff --git a/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 b/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 new file mode 100644 index 000000000000..b7fb95b010a6 --- /dev/null +++ b/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 @@ -0,0 +1,21 @@ + module mod2 + implicit none + PUBLIC :: mod2_func1 + contains + + subroutine mod2_func1() + print*, "mod2_func1" + end subroutine mod2_func1 + + end module mod2 + + module mod1 + implicit none + PUBLIC :: mod1_func1 + contains + + subroutine mod1_func1() + print*, "mod1_func1" + end subroutine mod1_func1 + + end module mod1 diff --git a/numpy/f2py/tests/test_modules.py b/numpy/f2py/tests/test_modules.py index 009ae3365cd5..436e0c700017 100644 --- a/numpy/f2py/tests/test_modules.py +++ b/numpy/f2py/tests/test_modules.py @@ -5,6 +5,37 @@ from numpy.testing import IS_PYPY +@pytest.mark.slow +class TestModuleFilterPublicEntities(util.F2PyTest): + sources = [ + util.getpath( + "tests", "src", "modules", "gh26920", + "two_mods_with_one_public_routine.f90" + ) + ] + # we filter the only public function mod2 + only = ["mod1_func1", ] + + def test_gh26920(self): + # if it compiles and can be loaded, things are fine + pass + + +@pytest.mark.slow +class TestModuleWithoutPublicEntities(util.F2PyTest): + sources = [ + util.getpath( + "tests", "src", "modules", "gh26920", + "two_mods_with_no_public_entities.f90" + ) + ] + only = ["mod1_func1", ] + + def test_gh26920(self): + # if it compiles and can be loaded, things are fine + pass + + @pytest.mark.slow class TestModuleDocString(util.F2PyTest): sources = [util.getpath("tests", "src", "modules", "module_data_docstring.f90")] From 2a49507d02cbf0e3ce6c540a162c3771195d9b37 Mon Sep 17 00:00:00 2001 From: Maximilian Weigand Date: Fri, 26 Jul 2024 07:41:08 +0000 Subject: [PATCH 036/101] BUG: f2py: better handle filtering of public/private subroutines Don't mistake public/private declarations of F90 subroutines for variables when the corresponding subroutines are filtered by use of only:. Also, handle modules with no public variables or subroutines, caused by the filtering. Closes gh-26920. --- numpy/f2py/auxfuncs.py | 28 ++++++++++++++++++---------- numpy/f2py/f90mod_rules.py | 7 ++++++- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 68b56c5a640c..88a9ff552343 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -36,16 +36,15 @@ 'isintent_nothide', 'isintent_out', 'isintent_overwrite', 'islogical', 'islogicalfunction', 'islong_complex', 'islong_double', 'islong_doublefunction', 'islong_long', 'islong_longfunction', - 'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isrequired', - 'isroutine', 'isscalar', 'issigned_long_longarray', 'isstring', - 'isstringarray', 'isstring_or_stringarray', 'isstringfunction', - 'issubroutine', 'get_f2py_modulename', - 'issubroutine_wrap', 'isthreadsafe', 'isunsigned', 'isunsigned_char', - 'isunsigned_chararray', 'isunsigned_long_long', - 'isunsigned_long_longarray', 'isunsigned_short', - 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', - 'replace', 'show', 'stripcomma', 'throw_error', 'isattr_value', - 'getuseblocks', 'process_f2cmap_dict' + 'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isvariable', + 'isrequired', 'isroutine', 'isscalar', 'issigned_long_longarray', + 'isstring', 'isstringarray', 'isstring_or_stringarray', 'isstringfunction', + 'issubroutine', 'get_f2py_modulename', 'issubroutine_wrap', 'isthreadsafe', + 'isunsigned', 'isunsigned_char', 'isunsigned_chararray', + 'isunsigned_long_long', 'isunsigned_long_longarray', 'isunsigned_short', + 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', 'replace', + 'show', 'stripcomma', 'throw_error', 'isattr_value', 'getuseblocks', + 'process_f2cmap_dict' ] @@ -518,6 +517,15 @@ def isprivate(var): return 'attrspec' in var and 'private' in var['attrspec'] +def isvariable(var): + # heuristic to find public/private declarations of filtered subroutines + if len(var) == 1 and 'attrspec' in var and \ + var['attrspec'][0] in ('public', 'private'): + is_var = False + else: + is_var = True + return is_var + def hasinitvalue(var): return '=' in var diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py index db53beaf616b..9c52938f08da 100644 --- a/numpy/f2py/f90mod_rules.py +++ b/numpy/f2py/f90mod_rules.py @@ -110,11 +110,16 @@ def dadd(line, s=doc): notvars.append(b['name']) for n in m['vars'].keys(): var = m['vars'][n] - if (n not in notvars) and (not l_or(isintent_hide, isprivate)(var)): + + if (n not in notvars and isvariable(var)) and (not l_or(isintent_hide, isprivate)(var)): onlyvars.append(n) mfargs.append(n) outmess('\t\tConstructing F90 module support for "%s"...\n' % (m['name'])) + if len(onlyvars) == 0 and len(notvars) == 1 and m['name'] in notvars: + outmess(f"\t\t\tSkipping {m['name']} since there are no public vars/func in this module...\n") + continue + if m['name'] in usenames and not contains_functions_or_subroutines: outmess(f"\t\t\tSkipping {m['name']} since it is in 'use'...\n") continue From a7cb4c473ee22eb7399a245b894137b12c26a5d2 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 2 Sep 2024 15:19:01 -0600 Subject: [PATCH 037/101] REL: Prepare for the NumPy 2.1.1 release [wheel build] - Create 2.1.1-changelog.rst. - Update 2.1.1-notes.rst. --- doc/changelog/2.1.1-changelog.rst | 30 ++++++++++++++++++++++++++++++ doc/source/release/2.1.1-notes.rst | 23 +++++++++++++++++++++++ 2 files changed, 53 insertions(+) create mode 100644 doc/changelog/2.1.1-changelog.rst diff --git a/doc/changelog/2.1.1-changelog.rst b/doc/changelog/2.1.1-changelog.rst new file mode 100644 index 000000000000..d18636771e1a --- /dev/null +++ b/doc/changelog/2.1.1-changelog.rst @@ -0,0 +1,30 @@ + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Andrew Nelson +* Charles Harris +* Mateusz Sokół +* Maximilian Weigand + +* Nathan Goldbaum +* Pieter Eendebak +* Sebastian Berg + +Pull requests merged +==================== + +A total of 10 pull requests were merged for this release. + +* `#27236 `__: REL: Prepare for the NumPy 2.1.0 release [wheel build] +* `#27252 `__: MAINT: prepare 2.1.x for further development +* `#27259 `__: BUG: revert unintended change in the return value of set_printoptions +* `#27266 `__: BUG: fix reference counting bug in __array_interface__ implementation… +* `#27267 `__: TST: Add regression test for missing descr in array-interface +* `#27276 `__: BUG: Fix #27256 and #27257 +* `#27278 `__: BUG: Fix array_equal for numeric and non-numeric scalar types +* `#27287 `__: MAINT: Update maintenance/2.1.x after the 2.0.2 release +* `#27303 `__: BLD: cp311- macosx_arm64 wheels [wheel build] +* `#27304 `__: BUG: f2py: better handle filtering of public/private subroutines diff --git a/doc/source/release/2.1.1-notes.rst b/doc/source/release/2.1.1-notes.rst index f147c22ed0e0..79c63514695c 100644 --- a/doc/source/release/2.1.1-notes.rst +++ b/doc/source/release/2.1.1-notes.rst @@ -12,7 +12,30 @@ The Python versions supported by this release are 3.10-3.13. Contributors ============ +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Andrew Nelson +* Charles Harris +* Mateusz Sokół +* Maximilian Weigand + +* Nathan Goldbaum +* Pieter Eendebak +* Sebastian Berg Pull requests merged ==================== +A total of 10 pull requests were merged for this release. + +* `#27236 `__: REL: Prepare for the NumPy 2.1.0 release [wheel build] +* `#27252 `__: MAINT: prepare 2.1.x for further development +* `#27259 `__: BUG: revert unintended change in the return value of set_printoptions +* `#27266 `__: BUG: fix reference counting bug in __array_interface__ implementation… +* `#27267 `__: TST: Add regression test for missing descr in array-interface +* `#27276 `__: BUG: Fix #27256 and #27257 +* `#27278 `__: BUG: Fix array_equal for numeric and non-numeric scalar types +* `#27287 `__: MAINT: Update maintenance/2.1.x after the 2.0.2 release +* `#27303 `__: BLD: cp311- macosx_arm64 wheels [wheel build] +* `#27304 `__: BUG: f2py: better handle filtering of public/private subroutines + From 25d59e351fedab6465bbac861e94b54aeb6b6381 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 3 Sep 2024 10:16:58 -0600 Subject: [PATCH 038/101] MAINT: prepare 2.1.x for further development - Create 2.1.2-notes.rst - Update pavement.py - Update pyproject.toml - Update release.rst [skip azp] [skip actions] [skip cirrus] --- doc/source/release.rst | 1 + doc/source/release/2.1.2-notes.rst | 17 +++++++++++++++++ pavement.py | 2 +- pyproject.toml | 2 +- 4 files changed, 20 insertions(+), 2 deletions(-) create mode 100644 doc/source/release/2.1.2-notes.rst diff --git a/doc/source/release.rst b/doc/source/release.rst index fa6cb5bbcb8a..04444a198b02 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release notes .. toctree:: :maxdepth: 2 + 2.1.2 2.1.1 2.1.0 2.0.2 diff --git a/doc/source/release/2.1.2-notes.rst b/doc/source/release/2.1.2-notes.rst new file mode 100644 index 000000000000..974ebdfca978 --- /dev/null +++ b/doc/source/release/2.1.2-notes.rst @@ -0,0 +1,17 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.1.2 Release Notes +========================== + +NumPy 2.1.2 is a maintenance release that fixes bugs and regressions +discovered after the 2.1.1 release. + +The Python versions supported by this release are 3.10-3.13. + +Contributors +============ + +Pull requests merged +==================== + diff --git a/pavement.py b/pavement.py index a4439e32185f..ab3a5d5ba221 100644 --- a/pavement.py +++ b/pavement.py @@ -38,7 +38,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.1.1-notes.rst' +RELEASE_NOTES = 'doc/source/release/2.1.2-notes.rst' #------------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index aab64e51e6f9..7106f5d37563 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.1.1" +version = "2.1.2" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} From 14ae841a6a7b681e3feecfba2c15b1806c7ed8e6 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 13 Sep 2024 14:32:16 -0600 Subject: [PATCH 039/101] MAINT: update pythoncapi-compat submodule --- numpy/_core/src/common/pythoncapi-compat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/common/pythoncapi-compat b/numpy/_core/src/common/pythoncapi-compat index ea1f7f6eac63..2d18aecd7b2f 160000 --- a/numpy/_core/src/common/pythoncapi-compat +++ b/numpy/_core/src/common/pythoncapi-compat @@ -1 +1 @@ -Subproject commit ea1f7f6eac63ff401937515638252402ff33dccb +Subproject commit 2d18aecd7b2f549d38a13e27b682ea4966f37bd8 From 393b08c3ad37ff80c70c7749bf0ad115b9b0587b Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 13 Sep 2024 14:32:20 -0600 Subject: [PATCH 040/101] BUG: apply critical sections around populating the ufunc cache --- numpy/_core/src/common/npy_hashtable.c | 20 ------ numpy/_core/src/common/npy_hashtable.h | 7 -- numpy/_core/src/multiarray/textreading/rows.c | 5 +- numpy/_core/src/umath/dispatching.c | 67 +++++++++++-------- 4 files changed, 41 insertions(+), 58 deletions(-) diff --git a/numpy/_core/src/common/npy_hashtable.c b/numpy/_core/src/common/npy_hashtable.c index 5c745ba388cd..596e62cf8354 100644 --- a/numpy/_core/src/common/npy_hashtable.c +++ b/numpy/_core/src/common/npy_hashtable.c @@ -29,18 +29,6 @@ #define _NpyHASH_XXROTATE(x) ((x << 13) | (x >> 19)) /* Rotate left 13 bits */ #endif -#ifdef Py_GIL_DISABLED -#define LOCK_TABLE(tb) PyMutex_Lock(&tb->mutex) -#define UNLOCK_TABLE(tb) PyMutex_Unlock(&tb->mutex) -#define INITIALIZE_LOCK(tb) memset(&tb->mutex, 0, sizeof(PyMutex)) -#else -// the GIL serializes access to the table so no need -// for locking if it is enabled -#define LOCK_TABLE(tb) -#define UNLOCK_TABLE(tb) -#define INITIALIZE_LOCK(tb) -#endif - /* * This hashing function is basically the Python tuple hash with the type * identity hash inlined. The tuple hash itself is a reduced version of xxHash. @@ -112,8 +100,6 @@ PyArrayIdentityHash_New(int key_len) res->size = 4; /* Start with a size of 4 */ res->nelem = 0; - INITIALIZE_LOCK(res); - res->buckets = PyMem_Calloc(4 * (key_len + 1), sizeof(PyObject *)); if (res->buckets == NULL) { PyErr_NoMemory(); @@ -206,17 +192,14 @@ NPY_NO_EXPORT int PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, PyObject *const *key, PyObject *value, int replace) { - LOCK_TABLE(tb); if (value != NULL && _resize_if_necessary(tb) < 0) { /* Shrink, only if a new value is added. */ - UNLOCK_TABLE(tb); return -1; } PyObject **tb_item = find_item(tb, key); if (value != NULL) { if (tb_item[0] != NULL && tb_item[0] != value && !replace) { - UNLOCK_TABLE(tb); PyErr_SetString(PyExc_RuntimeError, "Identity cache already includes an item with this key."); return -1; @@ -230,7 +213,6 @@ PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, memset(tb_item, 0, (tb->key_len + 1) * sizeof(PyObject *)); } - UNLOCK_TABLE(tb); return 0; } @@ -238,8 +220,6 @@ PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, NPY_NO_EXPORT PyObject * PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key) { - LOCK_TABLE(tb); PyObject *res = find_item(tb, key)[0]; - UNLOCK_TABLE(tb); return res; } diff --git a/numpy/_core/src/common/npy_hashtable.h b/numpy/_core/src/common/npy_hashtable.h index 583f3d9861a6..a4252da87aff 100644 --- a/numpy/_core/src/common/npy_hashtable.h +++ b/numpy/_core/src/common/npy_hashtable.h @@ -13,13 +13,6 @@ typedef struct { PyObject **buckets; npy_intp size; /* current size */ npy_intp nelem; /* number of elements */ -#ifdef Py_GIL_DISABLED -#if PY_VERSION_HEX < 0x30d00b3 -#error "GIL-disabled builds require Python 3.13.0b3 or newer" -#else - PyMutex mutex; -#endif -#endif } PyArrayIdentityHash; diff --git a/numpy/_core/src/multiarray/textreading/rows.c b/numpy/_core/src/multiarray/textreading/rows.c index 4ca1cc00e9f7..214c5c499ad8 100644 --- a/numpy/_core/src/multiarray/textreading/rows.c +++ b/numpy/_core/src/multiarray/textreading/rows.c @@ -6,6 +6,7 @@ #define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "numpy/npy_3kcompat.h" +#include "npy_pycompat.h" #include "alloc.h" #include @@ -59,9 +60,7 @@ create_conv_funcs( PyObject *key, *value; Py_ssize_t pos = 0; int error = 0; -#if Py_GIL_DISABLED Py_BEGIN_CRITICAL_SECTION(converters); -#endif while (PyDict_Next(converters, &pos, &key, &value)) { Py_ssize_t column = PyNumber_AsSsize_t(key, PyExc_IndexError); if (column == -1 && PyErr_Occurred()) { @@ -114,9 +113,7 @@ create_conv_funcs( Py_INCREF(value); conv_funcs[column] = value; } -#if Py_GIL_DISABLED Py_END_CRITICAL_SECTION(); -#endif if (error) { goto error; diff --git a/numpy/_core/src/umath/dispatching.c b/numpy/_core/src/umath/dispatching.c index 110e2f40ab32..e76509ad7db2 100644 --- a/numpy/_core/src/umath/dispatching.c +++ b/numpy/_core/src/umath/dispatching.c @@ -976,6 +976,10 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, } } + int error_res = 0; + PyObject *all_dtypes; + PyArrayMethodObject *method; + Py_BEGIN_CRITICAL_SECTION((PyObject *)ufunc); int current_promotion_state = get_npy_promotion_state(); if (force_legacy_promotion && legacy_promotion_is_possible @@ -989,42 +993,51 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, int cacheable = 1; /* unused, as we modify the original `op_dtypes` */ if (legacy_promote_using_legacy_type_resolver(ufunc, ops, signature, op_dtypes, &cacheable, NPY_FALSE) < 0) { - goto handle_error; + error_res = -1; } } - /* Pause warnings and always use "new" path */ - set_npy_promotion_state(NPY_USE_WEAK_PROMOTION); - PyObject *info = promote_and_get_info_and_ufuncimpl(ufunc, - ops, signature, op_dtypes, legacy_promotion_is_possible); - set_npy_promotion_state(current_promotion_state); + PyObject *info = NULL; + if (error_res == 0) { + /* Pause warnings and always use "new" path */ + set_npy_promotion_state(NPY_USE_WEAK_PROMOTION); + info = promote_and_get_info_and_ufuncimpl(ufunc, + ops, signature, op_dtypes, legacy_promotion_is_possible); + set_npy_promotion_state(current_promotion_state); - if (info == NULL) { - goto handle_error; + if (info == NULL) { + error_res = -1; + } } - PyArrayMethodObject *method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1); - PyObject *all_dtypes = PyTuple_GET_ITEM(info, 0); + if (error_res == 0) { + method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1); + all_dtypes = PyTuple_GET_ITEM(info, 0); - /* If necessary, check if the old result would have been different */ - if (NPY_UNLIKELY(current_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN) - && (force_legacy_promotion || promoting_pyscalars) - && npy_give_promotion_warnings()) { - PyArray_DTypeMeta *check_dtypes[NPY_MAXARGS]; - for (int i = 0; i < nargs; i++) { - check_dtypes[i] = (PyArray_DTypeMeta *)PyTuple_GET_ITEM( - all_dtypes, i); - } - /* Before calling to the legacy promotion, pretend that is the state: */ - set_npy_promotion_state(NPY_USE_LEGACY_PROMOTION); - int res = legacy_promote_using_legacy_type_resolver(ufunc, - ops, signature, check_dtypes, NULL, NPY_TRUE); - /* Reset the promotion state: */ - set_npy_promotion_state(NPY_USE_WEAK_PROMOTION_AND_WARN); - if (res < 0) { - goto handle_error; + /* If necessary, check if the old result would have been different */ + if (NPY_UNLIKELY(current_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN) + && (force_legacy_promotion || promoting_pyscalars) + && npy_give_promotion_warnings()) { + PyArray_DTypeMeta *check_dtypes[NPY_MAXARGS]; + for (int i = 0; i < nargs; i++) { + check_dtypes[i] = (PyArray_DTypeMeta *)PyTuple_GET_ITEM( + all_dtypes, i); + } + /* Before calling to the legacy promotion, pretend that is the state: */ + set_npy_promotion_state(NPY_USE_LEGACY_PROMOTION); + int res = legacy_promote_using_legacy_type_resolver(ufunc, + ops, signature, check_dtypes, NULL, NPY_TRUE); + /* Reset the promotion state: */ + set_npy_promotion_state(NPY_USE_WEAK_PROMOTION_AND_WARN); + if (res < 0) { + error_res = 0; + } } } + Py_END_CRITICAL_SECTION(); + if (error_res < 0) { + goto handle_error; + } /* * In certain cases (only the logical ufuncs really), the loop we found may From 84f53386bde9c994c49373075ce91353a7767bc8 Mon Sep 17 00:00:00 2001 From: gorloffslava <31761951+gorloffslava@users.noreply.github.com> Date: Mon, 9 Sep 2024 12:37:49 +0500 Subject: [PATCH 041/101] BUILD: fix missing include for std::ptrdiff_t for C++23 language mode --- numpy/_core/src/umath/string_fastsearch.h | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/src/umath/string_fastsearch.h b/numpy/_core/src/umath/string_fastsearch.h index 61abdcb5ad19..96c1e2d30140 100644 --- a/numpy/_core/src/umath/string_fastsearch.h +++ b/numpy/_core/src/umath/string_fastsearch.h @@ -9,6 +9,7 @@ #include #include +#include #include From 270b21c61de664b63fff2f46fac519bff0d93ba8 Mon Sep 17 00:00:00 2001 From: mattip Date: Fri, 20 Sep 2024 08:14:01 +0300 Subject: [PATCH 042/101] BLD: pin setuptools to avoid breaking numpy.distutils --- environment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environment.yml b/environment.yml index 86ee1058f440..9f0e236ed74e 100644 --- a/environment.yml +++ b/environment.yml @@ -12,7 +12,7 @@ dependencies: - compilers - openblas - nomkl - - setuptools + - setuptools==65.5.1 - ninja - pkg-config - meson-python From 3ad770563bab93cdc3af50024effdda19a6b4f43 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 20 Sep 2024 14:36:49 +0200 Subject: [PATCH 043/101] BUG: Allow unsigned shift argument for np.roll --- numpy/_core/numeric.py | 2 +- numpy/_core/tests/test_numeric.py | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 1f3f1c20dbd1..61518d5ab56f 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -1272,7 +1272,7 @@ def roll(a, shift, axis=None): "'shift' and 'axis' should be scalars or 1D sequences") shifts = {ax: 0 for ax in range(a.ndim)} for sh, ax in broadcasted: - shifts[ax] += sh + shifts[ax] += int(sh) rolls = [((slice(None), slice(None)),)] * a.ndim for ax, offset in shifts.items(): diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 5906922ff1bb..ae80aaddd4d7 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -3701,6 +3701,18 @@ def test_roll_empty(self): x = np.array([]) assert_equal(np.roll(x, 1), np.array([])) + def test_roll_unsigned_shift(self): + x = np.arange(4) + shift = np.uint16(2) + assert_equal(np.roll(x, shift), np.roll(x, 2)) + + shift = np.uint64(2**63+2) + assert_equal(np.roll(x, shift), np.roll(x, 2)) + + def test_roll_big_int(self): + x = np.arange(4) + assert_equal(np.roll(x, 2**100), x) + class TestRollaxis: From 3cf10a831d48bc9a7afe0e7f6114c71e24f67160 Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Sun, 22 Sep 2024 16:59:24 +0100 Subject: [PATCH 044/101] BUG: Disable SVE VQSort This patch removes the SVE dispatch path for VQSort, due to it being broken with GCC 10.2.1 in the manylinux2014 image. Compiling it outside of manylinux2014 with GCC 10.5.0 appears to work correctly. I'm assuming this isn't being caught in CI due to there not being a SVE capable machine in the wheel builds? --- numpy/_core/meson.build | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index dbf1a144ed93..3d4ef36c055c 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -829,7 +829,7 @@ foreach gen_mtargets : [ 'highway_qsort.dispatch.h', 'src/npysort/highway_qsort.dispatch.cpp', use_highway ? [ - SVE, ASIMD, VSX2, # FIXME: disable VXE due to runtime segfault + ASIMD, VSX2, # FIXME: disable VXE due to runtime segfault ] : [] ], [ From c47ca011dd1457419b5d680c8b63fa86dea22af7 Mon Sep 17 00:00:00 2001 From: Ishankoradia <39583356+Ishankoradia@users.noreply.github.com> Date: Sat, 28 Sep 2024 22:28:59 +0530 Subject: [PATCH 045/101] BUG: fftn axis bug (#27466) * rfftn axis bug * added test on shapes and fixed the linter issue * linter length --- numpy/fft/_pocketfft.py | 2 +- numpy/fft/tests/test_pocketfft.py | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py index 2199797ad900..4edeecc075ad 100644 --- a/numpy/fft/_pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -1401,7 +1401,7 @@ def rfftn(a, s=None, axes=None, norm=None, out=None): a = asarray(a) s, axes = _cook_nd_args(a, s, axes) a = rfft(a, s[-1], axes[-1], norm, out=out) - for ii in range(len(axes)-1): + for ii in range(len(axes)-2, -1, -1): a = fft(a, s[ii], axes[ii], norm, out=out) return a diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py index d1e4da2eb831..fc6592e4f4f6 100644 --- a/numpy/fft/tests/test_pocketfft.py +++ b/numpy/fft/tests/test_pocketfft.py @@ -307,6 +307,14 @@ def test_rfftn(self): np.fft.rfftn(x, norm="ortho"), atol=1e-6) assert_allclose(np.fft.rfftn(x) / (30. * 20. * 10.), np.fft.rfftn(x, norm="forward"), atol=1e-6) + # Regression test for gh-27159 + x = np.ones((2, 3)) + result = np.fft.rfftn(x, axes=(0, 0, 1), s=(10, 20, 40)) + assert result.shape == (10, 21) + expected = np.fft.fft(np.fft.fft(np.fft.rfft(x, axis=1, n=40), + axis=0, n=20), axis=0, n=10) + assert expected.shape == (10, 21) + assert_allclose(result, expected, atol=1e-6) def test_irfftn(self): x = random((30, 20, 10)) From 2760a1305eb75eeece2d3ce20a9e735b747ccfb6 Mon Sep 17 00:00:00 2001 From: Peter Hawkins Date: Mon, 30 Sep 2024 13:21:09 +0000 Subject: [PATCH 046/101] BUG: Fix extra decref of PyArray_UInt8DType. We didn't take a reference to this type, so we shouldn't be freeing one. This appears to have been missed by PR #25329. --- numpy/_core/src/multiarray/abstractdtypes.c | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/_core/src/multiarray/abstractdtypes.c b/numpy/_core/src/multiarray/abstractdtypes.c index 214833737792..21360a16c1b6 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.c +++ b/numpy/_core/src/multiarray/abstractdtypes.c @@ -177,7 +177,6 @@ int_common_dtype(PyArray_DTypeMeta *NPY_UNUSED(cls), PyArray_DTypeMeta *other) /* This is a back-compat fallback to usually do the right thing... */ PyArray_DTypeMeta *uint8_dt = &PyArray_UInt8DType; PyArray_DTypeMeta *res = NPY_DT_CALL_common_dtype(other, uint8_dt); - Py_DECREF(uint8_dt); if (res == NULL) { PyErr_Clear(); } From 1f30c0d04f687bfbb0dc941179799313abef0a3d Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 30 Sep 2024 09:14:17 +0300 Subject: [PATCH 047/101] use PyPI not scientific-python-nightly-wheels for CI doc build --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index eb267dffd7fb..3014cb5c5074 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -56,7 +56,7 @@ jobs: . venv/bin/activate pip install --progress-bar=off -r requirements/test_requirements.txt # get newer, pre-release versions of critical packages - pip install --progress-bar=off --pre --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple -r requirements/doc_requirements.txt + pip install --progress-bar=off --pre -r requirements/doc_requirements.txt # then install numpy HEAD, which will override the version installed above pip install . --config-settings=setup-args="-Dallow-noblas=true" From 3db631ea09aaa2bdb47675681f8af46e8bc04eb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Eiras?= Date: Sun, 29 Sep 2024 19:07:10 +0200 Subject: [PATCH 048/101] Make check for SVE support happen on demand and not during module import The check would invoke an external process which would slow down imports --- numpy/_core/tests/test_multiarray.py | 4 ++-- numpy/_core/tests/test_scalarmath.py | 4 ++-- numpy/testing/_private/utils.py | 17 ++++++++++------- 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 0bc9fea9c960..2f7e3c574c79 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -30,7 +30,7 @@ assert_array_equal, assert_raises_regex, assert_array_almost_equal, assert_allclose, IS_PYPY, IS_WASM, IS_PYSTON, HAS_REFCOUNT, assert_array_less, runstring, temppath, suppress_warnings, break_cycles, - _SUPPORTS_SVE, assert_array_compare, + check_support_sve, assert_array_compare, ) from numpy.testing._private.utils import requires_memory, _no_tracing from numpy._core.tests._locales import CommaDecimalPointLocale @@ -10107,7 +10107,7 @@ def test_non_c_contiguous(self): assert_array_equal(x.view(' Date: Mon, 30 Sep 2024 10:21:46 -0600 Subject: [PATCH 049/101] BUG: initialize the promotion state to be weak --- numpy/_core/src/multiarray/convert_datatype.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 550d3e253868..1292a2ad2b92 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -49,7 +49,7 @@ */ NPY_NO_EXPORT npy_intp REQUIRED_STR_LEN[] = {0, 3, 5, 10, 10, 20, 20, 20, 20}; -static NPY_TLS int npy_promotion_state = NPY_USE_LEGACY_PROMOTION; +static NPY_TLS int npy_promotion_state = NPY_USE_WEAK_PROMOTION; NPY_NO_EXPORT int get_npy_promotion_state() { From 364efb5444fa6776936e4269c7662e59090eb919 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 3 Oct 2024 17:14:43 +0000 Subject: [PATCH 050/101] MAINT: Bump pypa/cibuildwheel from 2.21.1 to 2.21.2 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 2.21.1 to 2.21.2. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/d4a2945fcc8d13f20a1b99d461b8e844d5fc6e23...f1859528322d7b29d4493ee241a167807661dfb4) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index ce034d24d2ea..b11c1e1c168f 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -166,7 +166,7 @@ jobs: echo "CIBW_BUILD_FRONTEND=pip; args: --no-build-isolation" >> "$GITHUB_ENV" - name: Build wheels - uses: pypa/cibuildwheel@bd033a44476646b606efccdd5eed92d5ea1d77ad # v2.20.0 + uses: pypa/cibuildwheel@f1859528322d7b29d4493ee241a167807661dfb4 # v2.21.2 env: CIBW_PRERELEASE_PYTHONS: True CIBW_FREE_THREADED_SUPPORT: True From 09e9cd98472c61fb3fda4735763b512f3ee772e9 Mon Sep 17 00:00:00 2001 From: "Marten H. van Kerkwijk" Date: Fri, 4 Oct 2024 13:22:17 -0400 Subject: [PATCH 051/101] BUG: avoid segfault on bad arguments in ndarray.__array_function__ --- numpy/_core/src/multiarray/methods.c | 9 ++++++++- numpy/_core/tests/test_overrides.py | 8 ++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 4a8e1ea4579e..2a950d6ca5d1 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -1120,7 +1120,14 @@ array_function(PyArrayObject *NPY_UNUSED(self), PyObject *c_args, PyObject *c_kw &func, &types, &args, &kwargs)) { return NULL; } - + if (!PyTuple_CheckExact(args)) { + PyErr_SetString(PyExc_TypeError, "args must be a tuple."); + return NULL; + } + if (!PyDict_CheckExact(kwargs)) { + PyErr_SetString(PyExc_TypeError, "kwargs must be a dict."); + return NULL; + } types = PySequence_Fast( types, "types argument to ndarray.__array_function__ must be iterable"); diff --git a/numpy/_core/tests/test_overrides.py b/numpy/_core/tests/test_overrides.py index 1ac2277b5de7..fabcaa10801e 100644 --- a/numpy/_core/tests/test_overrides.py +++ b/numpy/_core/tests/test_overrides.py @@ -203,6 +203,14 @@ def test_no_wrapper(self): array.__array_function__(func=func, types=(np.ndarray,), args=(array,), kwargs={}) + def test_wrong_arguments(self): + # Check our implementation guards against wrong arguments. + a = np.array([1, 2]) + with pytest.raises(TypeError, match="args must be a tuple"): + a.__array_function__(np.reshape, (np.ndarray,), a, (2, 1)) + with pytest.raises(TypeError, match="kwargs must be a dict"): + a.__array_function__(np.reshape, (np.ndarray,), (a,), (2, 1)) + class TestArrayFunctionDispatch: From 6d85a24ad2acdde4399153e855f8481da10a3eb6 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 4 Oct 2024 20:31:34 -0600 Subject: [PATCH 052/101] MAINT: Pin setuptools for Python < 3.12 [wheel build] --- requirements/test_requirements.txt | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index ec7827b7e50e..5c19c3a914ec 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -1,8 +1,7 @@ Cython wheel==0.38.1 -#setuptools==65.5.1 ; python_version < '3.12' -#setuptools ; python_version >= '3.12' -setuptools +setuptools==65.5.1 ; python_version < '3.12' +setuptools ; python_version >= '3.12' hypothesis==6.104.1 pytest==7.4.0 pytz==2023.3.post1 From 6b9ef481661cf055d23393e3a9e6b6176149d845 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 3 Oct 2024 10:25:49 -0600 Subject: [PATCH 053/101] REL: Prepare for the NumPy 2.1.2 release [wheel build] - Create 2.1.2-changelog.rst - Update 2.1.2-notes.rst - Update .mailmap --- .mailmap | 2 ++ doc/changelog/2.1.2-changelog.rst | 38 +++++++++++++++++++++++++++++ doc/source/release/2.1.2-notes.rst | 31 +++++++++++++++++++++++ numpy/distutils/mingw32ccompiler.py | 8 +++++- tools/lint_diff.ini | 2 +- 5 files changed, 79 insertions(+), 2 deletions(-) create mode 100644 doc/changelog/2.1.2-changelog.rst diff --git a/.mailmap b/.mailmap index b073f12c416b..23a556dd9fc4 100644 --- a/.mailmap +++ b/.mailmap @@ -304,6 +304,7 @@ Imen Rajhi Inessa Pawson Irina Maria Mocan <28827042+IrinaMaria@users.noreply.github.com> Irvin Probst +Ishan Koradia <39583356+Ishankoradia@users.noreply.github.com> Ivan Meleshko Isabela Presedo-Floyd Ganesh Kathiresan @@ -620,6 +621,7 @@ Simon Gasse Simon Gasse Sista Seetaram Sista Seetaram <65669128+sistaseetaram@users.noreply.github.com> +Slava Gorloff <31761951+gorloffslava@users.noreply.github.com> Søren Rasmussen <47032123+sorenrasmussenai@users.noreply.github.com> Spencer Hill Srimukh Sripada diff --git a/doc/changelog/2.1.2-changelog.rst b/doc/changelog/2.1.2-changelog.rst new file mode 100644 index 000000000000..bd0f7bd2422c --- /dev/null +++ b/doc/changelog/2.1.2-changelog.rst @@ -0,0 +1,38 @@ + +Contributors +============ + +A total of 11 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Chris Sidebottom +* Ishan Koradia + +* João Eiras + +* Katie Rust + +* Marten van Kerkwijk +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Pieter Eendebak +* Slava Gorloff + + +Pull requests merged +==================== + +A total of 14 pull requests were merged for this release. + +* `#27333 `__: MAINT: prepare 2.1.x for further development +* `#27400 `__: BUG: apply critical sections around populating the dispatch cache +* `#27406 `__: BUG: Stub out get_build_msvc_version if distutils.msvccompiler... +* `#27416 `__: BUILD: fix missing include for std::ptrdiff_t for C++23 language... +* `#27433 `__: BLD: pin setuptools to avoid breaking numpy.distutils +* `#27437 `__: BUG: Allow unsigned shift argument for np.roll +* `#27439 `__: BUG: Disable SVE VQSort +* `#27471 `__: BUG: rfftn axis bug +* `#27479 `__: BUG: Fix extra decref of PyArray_UInt8DType. +* `#27480 `__: CI: use PyPI not scientific-python-nightly-wheels for CI doc... +* `#27481 `__: MAINT: Check for SVE support on demand +* `#27484 `__: BUG: initialize the promotion state to be weak +* `#27501 `__: MAINT: Bump pypa/cibuildwheel from 2.20.0 to 2.21.2 +* `#27506 `__: BUG: avoid segfault on bad arguments in ndarray.__array_function__ diff --git a/doc/source/release/2.1.2-notes.rst b/doc/source/release/2.1.2-notes.rst index 974ebdfca978..1a187dbd3365 100644 --- a/doc/source/release/2.1.2-notes.rst +++ b/doc/source/release/2.1.2-notes.rst @@ -12,6 +12,37 @@ The Python versions supported by this release are 3.10-3.13. Contributors ============ +A total of 11 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Chris Sidebottom +* Ishan Koradia + +* João Eiras + +* Katie Rust + +* Marten van Kerkwijk +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Pieter Eendebak +* Slava Gorloff + + Pull requests merged ==================== +A total of 14 pull requests were merged for this release. + +* `#27333 `__: MAINT: prepare 2.1.x for further development +* `#27400 `__: BUG: apply critical sections around populating the dispatch cache +* `#27406 `__: BUG: Stub out get_build_msvc_version if distutils.msvccompiler... +* `#27416 `__: BUILD: fix missing include for std::ptrdiff_t for C++23 language... +* `#27433 `__: BLD: pin setuptools to avoid breaking numpy.distutils +* `#27437 `__: BUG: Allow unsigned shift argument for np.roll +* `#27439 `__: BUG: Disable SVE VQSort +* `#27471 `__: BUG: rfftn axis bug +* `#27479 `__: BUG: Fix extra decref of PyArray_UInt8DType. +* `#27480 `__: CI: use PyPI not scientific-python-nightly-wheels for CI doc... +* `#27481 `__: MAINT: Check for SVE support on demand +* `#27484 `__: BUG: initialize the promotion state to be weak +* `#27501 `__: MAINT: Bump pypa/cibuildwheel from 2.20.0 to 2.21.2 +* `#27506 `__: BUG: avoid segfault on bad arguments in ndarray.__array_function__ diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py index 4763f41ad326..ac0c206f96cf 100644 --- a/numpy/distutils/mingw32ccompiler.py +++ b/numpy/distutils/mingw32ccompiler.py @@ -24,7 +24,13 @@ import distutils.cygwinccompiler from distutils.unixccompiler import UnixCCompiler -from distutils.msvccompiler import get_build_version as get_build_msvc_version + +try: + from distutils.msvccompiler import get_build_version as get_build_msvc_version +except ImportError: + def get_build_msvc_version(): + return None + from distutils.errors import UnknownFileError from numpy.distutils.misc_util import (msvc_runtime_library, msvc_runtime_version, diff --git a/tools/lint_diff.ini b/tools/lint_diff.ini index dbebe483b4ab..810e265d4dec 100644 --- a/tools/lint_diff.ini +++ b/tools/lint_diff.ini @@ -1,5 +1,5 @@ [pycodestyle] -max_line_length = 79 +max_line_length = 88 statistics = True ignore = E121,E122,E123,E125,E126,E127,E128,E226,E241,E251,E265,E266,E302,E402,E704,E712,E721,E731,E741,W291,W293,W391,W503,W504 exclude = numpy/__config__.py,numpy/typing/tests/data,.spin/cmds.py From 40b66315cda263ab25cdbf15c6f171d4c7495d13 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 5 Oct 2024 12:59:15 -0600 Subject: [PATCH 054/101] MAINT: prepare 2.1.x for further development - Create 2.1.3-notes.rst - Update release.rst - Update pavement.py - Update pyproject.toml --- doc/source/release.rst | 1 + doc/source/release/2.1.3-notes.rst | 18 ++++++++++++++++++ pavement.py | 2 +- pyproject.toml | 2 +- 4 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 doc/source/release/2.1.3-notes.rst diff --git a/doc/source/release.rst b/doc/source/release.rst index 04444a198b02..c990b7ab8076 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release notes .. toctree:: :maxdepth: 2 + 2.1.3 2.1.2 2.1.1 2.1.0 diff --git a/doc/source/release/2.1.3-notes.rst b/doc/source/release/2.1.3-notes.rst new file mode 100644 index 000000000000..8e1b7e4d6da0 --- /dev/null +++ b/doc/source/release/2.1.3-notes.rst @@ -0,0 +1,18 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.1.3 Release Notes +========================== + +NumPy 2.1.3 is a maintenance release that fixes bugs and regressions +discovered after the 2.1.2 release. + +The Python versions supported by this release are 3.10-3.13. + +Contributors +============ + + +Pull requests merged +==================== + diff --git a/pavement.py b/pavement.py index ab3a5d5ba221..4149f571ef28 100644 --- a/pavement.py +++ b/pavement.py @@ -38,7 +38,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.1.2-notes.rst' +RELEASE_NOTES = 'doc/source/release/2.1.3-notes.rst' #------------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index 7106f5d37563..b5782a7e4258 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.1.2" +version = "2.1.3" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} From 6302a772f6e56e6b44a232fac8abd2762867beb5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 17:30:35 +0000 Subject: [PATCH 055/101] MAINT: Bump actions/cache from 4.1.0 to 4.1.1 Bumps [actions/cache](https://github.com/actions/cache) from 4.1.0 to 4.1.1. - [Release notes](https://github.com/actions/cache/releases) - [Commits](https://github.com/actions/cache/compare/v4.1.0...v4.1.1) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/linux_qemu.yml | 2 +- .github/workflows/macos.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index d4d6fe4a4989..c63c5b7a9f20 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -108,7 +108,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@v4.0.2 + uses: actions/cache@v4.1.1 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 164a4c6710c2..c1a5a5299ee8 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -44,7 +44,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + uses: actions/cache@3624ceb22c1c5a301c8db4169662070a689d9ea8 # v4.1.1 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -68,7 +68,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + uses: actions/cache@3624ceb22c1c5a301c8db4169662070a689d9ea8 # v4.1.1 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 From 3657251ec49fef7a0f5e65b382dc92c2ad663742 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 9 Oct 2024 17:30:17 +0000 Subject: [PATCH 056/101] MAINT: Bump pypa/cibuildwheel from 2.21.2 to 2.21.3 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 2.21.2 to 2.21.3. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/f1859528322d7b29d4493ee241a167807661dfb4...7940a4c0e76eb2030e473a5f864f291f63ee879b) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index b11c1e1c168f..c2628339b530 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -166,7 +166,7 @@ jobs: echo "CIBW_BUILD_FRONTEND=pip; args: --no-build-isolation" >> "$GITHUB_ENV" - name: Build wheels - uses: pypa/cibuildwheel@f1859528322d7b29d4493ee241a167807661dfb4 # v2.21.2 + uses: pypa/cibuildwheel@7940a4c0e76eb2030e473a5f864f291f63ee879b # v2.21.3 env: CIBW_PRERELEASE_PYTHONS: True CIBW_FREE_THREADED_SUPPORT: True From 420dbdbd14e57286a5cbaffdc1dbe942349399b5 Mon Sep 17 00:00:00 2001 From: Michael Davidsaver Date: Sun, 18 Aug 2024 08:07:16 -0700 Subject: [PATCH 057/101] MSVC does not support #warning directive --- numpy/_core/include/numpy/numpyconfig.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index 0f2b68054527..95ce781b3a17 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -128,7 +128,7 @@ /* Sanity check the (requested) feature version */ #if NPY_FEATURE_VERSION > NPY_API_VERSION #error "NPY_TARGET_VERSION higher than NumPy headers!" -#elif NPY_FEATURE_VERSION < NPY_1_15_API_VERSION +#elif NPY_FEATURE_VERSION < NPY_1_15_API_VERSION && !defined(_WIN32) /* No support for irrelevant old targets, no need for error, but warn. */ #warning "Requested NumPy target lower than supported NumPy 1.15." #endif From 91e74978b59cbe2c58448b1e5d17e92067b0c4b8 Mon Sep 17 00:00:00 2001 From: Michael Davidsaver Date: Mon, 9 Sep 2024 10:59:55 -0400 Subject: [PATCH 058/101] Emit MSVC style warning --- numpy/_core/include/numpy/numpyconfig.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index 95ce781b3a17..46ecade41ada 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -128,9 +128,16 @@ /* Sanity check the (requested) feature version */ #if NPY_FEATURE_VERSION > NPY_API_VERSION #error "NPY_TARGET_VERSION higher than NumPy headers!" -#elif NPY_FEATURE_VERSION < NPY_1_15_API_VERSION && !defined(_WIN32) +#elif NPY_FEATURE_VERSION < NPY_1_15_API_VERSION /* No support for irrelevant old targets, no need for error, but warn. */ - #warning "Requested NumPy target lower than supported NumPy 1.15." + #ifndef _MSC_VER + #warning "Requested NumPy target lower than supported NumPy 1.15." + #else + #define _WARN___STR2__(x) #x + #define _WARN___STR1__(x) _WARN___STR2__(x) + #define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " + #pragma message(_WARN___LOC__"Requested NumPy target lower than supported NumPy 1.15.") + #endif #endif /* From 6756b4ee005c25a4f66ac89fdd4d86180e7e8363 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 9 Oct 2024 17:52:25 +0200 Subject: [PATCH 059/101] BUG: Fix user dtype can-cast with python scalar during promotion The can-cast code for "Python scalars" was old and did not correctly take into account possible user-dtypes with respect to NEP 50 weak promotion. To do this, we already had the necessary helper functions that go via promotion (although it took me some brooding to remember ;)). So the fix is rather simple. Actually adding CI/test for the fix is unfortunately hard as it requires such a user DType. --- numpy/_core/src/multiarray/convert_datatype.c | 21 ++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 1292a2ad2b92..fc1cd84883b3 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -897,18 +897,29 @@ can_cast_pyscalar_scalar_to( } /* - * For all other cases we use the default dtype. + * For all other cases we need to make a bit of a dance to find the cast + * safety. We do so by finding the descriptor for the "scalar" (without + * a value; for parametric user dtypes a value may be needed eventually). */ - PyArray_Descr *from; + PyArray_DTypeMeta *from_DType; + PyArray_Descr *default_dtype; if (flags & NPY_ARRAY_WAS_PYTHON_INT) { - from = PyArray_DescrFromType(NPY_LONG); + default_dtype = PyArray_DescrNewFromType(NPY_INTP); + from_DType = &PyArray_PyLongDType; } else if (flags & NPY_ARRAY_WAS_PYTHON_FLOAT) { - from = PyArray_DescrFromType(NPY_DOUBLE); + default_dtype = PyArray_DescrNewFromType(NPY_FLOAT64); + from_DType = &PyArray_PyFloatDType; } else { - from = PyArray_DescrFromType(NPY_CDOUBLE); + default_dtype = PyArray_DescrNewFromType(NPY_COMPLEX128); + from_DType = &PyArray_PyComplexDType; } + + PyArray_Descr *from = npy_find_descr_for_scalar( + NULL, default_dtype, from_DType, NPY_DTYPE(to)); + Py_DECREF(default_dtype); + int res = PyArray_CanCastTypeTo(from, to, casting); Py_DECREF(from); return res; From 8852d7c40409ae7e2e6b9a94eb8f27368aac529c Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 14 Oct 2024 22:31:39 +0200 Subject: [PATCH 060/101] DEV: bump `python` to 3.12 in environment.yml [ci skip] --- .github/workflows/macos.yml | 2 +- environment.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index c1a5a5299ee8..d5436747c8b3 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -25,7 +25,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.11"] + python-version: ["3.12"] steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 diff --git a/environment.yml b/environment.yml index 9f0e236ed74e..e0d2ccdc1117 100644 --- a/environment.yml +++ b/environment.yml @@ -7,7 +7,7 @@ name: numpy-dev channels: - conda-forge dependencies: - - python=3.11 #need to pin to avoid issues with builds + - python=3.12 # need to pin to avoid issues with builds - cython>=3.0 - compilers - openblas From bc6023c95fefa332f9510a527f605b869be3aefc Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 14 Oct 2024 13:59:03 +0200 Subject: [PATCH 061/101] BLD: update vendored Meson to 1.5.2 --- vendored-meson/meson | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendored-meson/meson b/vendored-meson/meson index 6f88e485f27b..11dffde9a67f 160000 --- a/vendored-meson/meson +++ b/vendored-meson/meson @@ -1 +1 @@ -Subproject commit 6f88e485f27bb0a41d31638f0c55055362e0b1ac +Subproject commit 11dffde9a67fe926b262dc33fff3d68f9281b159 From 1171ae269bf3a0adc1ca2e99cd691617686fa648 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 15 Oct 2024 10:39:20 +0200 Subject: [PATCH 062/101] BLD: update vendored Meson to include a fix for AIX Will be included upstream in Meson 1.6.0; cherry-picked in https://github.com/numpy/meson/pull/17. --- vendored-meson/meson | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendored-meson/meson b/vendored-meson/meson index 11dffde9a67f..0d93515fb826 160000 --- a/vendored-meson/meson +++ b/vendored-meson/meson @@ -1 +1 @@ -Subproject commit 11dffde9a67fe926b262dc33fff3d68f9281b159 +Subproject commit 0d93515fb826440d19707eee47fd92655fe2f166 From b5e12fe3fc5403b340d60bac9d1594e85083dcf2 Mon Sep 17 00:00:00 2001 From: Christian Lorentzen Date: Mon, 14 Oct 2024 14:26:29 +0200 Subject: [PATCH 063/101] BUG: weighted quantile for some zero weights (#27549) This PR fixed weighted quantiles (and percentiles) for a corner case: * at least one weight is zero * q=0 (0-quantile equals minimum) Then: ``` np.quantile(np.arange(3), 0, weights=[0, 0, 1], method="inverted_cdf") ``` should return 2, the minimum when neglecting zero weight values. Current main returns 0. --- numpy/lib/_function_base_impl.py | 7 +++++++ numpy/lib/tests/test_function_base.py | 11 +++++++++++ 2 files changed, 18 insertions(+) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index d90070e19e8c..840b501bacae 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4870,6 +4870,13 @@ def _quantile( # returns 2 instead of 1 because 0.4 is not binary representable. if quantiles.dtype.kind == "f": cdf = cdf.astype(quantiles.dtype) + # Weights must be non-negative, so we might have zero weights at the + # beginning leading to some leading zeros in cdf. The call to + # np.searchsorted for quantiles=0 will then pick the first element, + # but should pick the first one larger than zero. We + # therefore simply set 0 values in cdf to -1. + if np.any(cdf[0, ...] == 0): + cdf[cdf == 0] = -1 def find_cdf_1d(arr, cdf): indices = np.searchsorted(cdf, quantiles, side="left") diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index bc3ce6409f1c..b51564619051 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -4010,6 +4010,17 @@ def test_quantile_with_weights_and_axis(self, method): ) assert_allclose(q, q_res) + @pytest.mark.parametrize("method", methods_supporting_weights) + def test_quantile_weights_min_max(self, method): + # Test weighted quantile at 0 and 1 with leading and trailing zero + # weights. + w = [0, 0, 1, 2, 3, 0] + y = np.arange(6) + y_min = np.quantile(y, 0, weights=w, method="inverted_cdf") + y_max = np.quantile(y, 1, weights=w, method="inverted_cdf") + assert y_min == y[2] # == 2 + assert y_max == y[4] # == 4 + def test_quantile_weights_raises_negative_weights(self): y = [1, 2] w = [-0.5, 1] From 28ae9e18e7067b32a3b8d83477a2e9ec38453ed8 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 15 Oct 2024 10:37:22 -0600 Subject: [PATCH 064/101] MAINT: Use miniforge for macos conda test. --- .github/workflows/macos.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index d5436747c8b3..c941c46fd2bc 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -52,7 +52,7 @@ jobs: restore-keys: | ${{ github.workflow }}-${{ matrix.python-version }}-ccache-macos- - - name: Setup Mambaforge + - name: Setup Miniforge uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4 with: python-version: ${{ matrix.python-version }} @@ -60,7 +60,7 @@ jobs: channel-priority: true activate-environment: numpy-dev use-only-tar-bz2: false - miniforge-variant: Mambaforge + miniforge-variant: Miniforge3 miniforge-version: latest use-mamba: true @@ -119,7 +119,7 @@ jobs: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: '3.10' From 987e132f42cddbb3d6965309f06b1a4f1d17b5b3 Mon Sep 17 00:00:00 2001 From: mattip Date: Tue, 15 Oct 2024 18:41:55 +0300 Subject: [PATCH 065/101] BUILD: satisfy gcc-13 pendantic errors --- numpy/_core/src/multiarray/alloc.c | 4 +--- numpy/_core/src/umath/scalarmath.c.src | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index b7e7c9948ce1..4d9368d6be69 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -274,10 +274,8 @@ PyDataMem_RENEW(void *ptr, size_t size) void *result; assert(size != 0); + PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); result = realloc(ptr, size); - if (result != ptr) { - PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); - } PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); return result; } diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index cd28e4405b6d..3b7b65e97fab 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -1369,7 +1369,7 @@ static PyObject * */ PyObject *ret; npy_float64 arg1, arg2, other_val; - @type@ other_val_conv; + @type@ other_val_conv = 0; int is_forward; if (Py_TYPE(a) == &Py@Name@ArrType_Type) { From b1d83bb7e398d65da51f23bf2ede986876d1991b Mon Sep 17 00:00:00 2001 From: mattip Date: Tue, 15 Oct 2024 21:14:24 +0300 Subject: [PATCH 066/101] BUG: handle possible error for PyTraceMallocTrack DOC: update documentation for PyDataMem_* functions --- doc/source/reference/c-api/array.rst | 4 +-- numpy/_core/src/multiarray/alloc.c | 38 ++++++++++++++++++++++------ 2 files changed, 32 insertions(+), 10 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 80af4b83d172..e6f26f92cdf5 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -4099,8 +4099,8 @@ Memory management .. c:function:: char* PyDataMem_RENEW(void * ptr, size_t newbytes) - Macros to allocate, free, and reallocate memory. These macros are used - internally to create arrays. + Functions to allocate, free, and reallocate memory. These are used + internally to manage array data memory unless overridden. .. c:function:: npy_intp* PyDimMem_NEW(int nd) diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index 4d9368d6be69..268507b2b51f 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -238,7 +238,11 @@ PyDataMem_NEW(size_t size) assert(size != 0); result = malloc(size); - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (ret == -1) { + free(result); + return NULL; + } return result; } @@ -251,7 +255,11 @@ PyDataMem_NEW_ZEROED(size_t nmemb, size_t size) void *result; result = calloc(nmemb, size); - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + if (ret == -1) { + free(result); + return NULL; + } return result; } @@ -276,7 +284,11 @@ PyDataMem_RENEW(void *ptr, size_t size) assert(size != 0); PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); result = realloc(ptr, size); - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (ret == -1) { + free(result); + return NULL; + } return result; } @@ -360,7 +372,11 @@ PyDataMem_UserNEW(size_t size, PyObject *mem_handler) } assert(size != 0); result = handler->allocator.malloc(handler->allocator.ctx, size); - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (ret == -1) { + handler->allocator.free(handler->allocator.ctx, result, size); + return NULL; + } return result; } @@ -374,7 +390,11 @@ PyDataMem_UserNEW_ZEROED(size_t nmemb, size_t size, PyObject *mem_handler) return NULL; } result = handler->allocator.calloc(handler->allocator.ctx, nmemb, size); - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + if (ret == -1) { + handler->allocator.free(handler->allocator.ctx, result, size); + return NULL; + } return result; } @@ -404,11 +424,13 @@ PyDataMem_UserRENEW(void *ptr, size_t size, PyObject *mem_handler) } assert(size != 0); + int ret = PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)result, size); result = handler->allocator.realloc(handler->allocator.ctx, ptr, size); - if (result != ptr) { - PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (ret == -1) { + handler->allocator.free(handler->allocator.ctx, result, size); + return NULL; } - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); return result; } From 80c7e2af0901e99675fd578d5be056c1c1931c2f Mon Sep 17 00:00:00 2001 From: mattip Date: Tue, 15 Oct 2024 21:26:27 +0300 Subject: [PATCH 067/101] fix call to PyTraceMalloc_Untrack --- numpy/_core/src/multiarray/alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index 268507b2b51f..396a7adb3148 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -424,7 +424,7 @@ PyDataMem_UserRENEW(void *ptr, size_t size, PyObject *mem_handler) } assert(size != 0); - int ret = PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); result = handler->allocator.realloc(handler->allocator.ctx, ptr, size); int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); if (ret == -1) { From 47e8771f67d131f31da2e717864a506bdad7bada Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 15 Oct 2024 12:52:33 +0200 Subject: [PATCH 068/101] BLD: start building Windows free-threaded wheels [wheel build] --- .github/workflows/wheels.yml | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index c2628339b530..e763b8d86dd4 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -76,8 +76,8 @@ jobs: # Github Actions doesn't support pairing matrix values together, let's improvise # https://github.com/github/feedback/discussions/7835#discussioncomment-1769026 buildplat: - - [ubuntu-20.04, manylinux_x86_64, ""] - - [ubuntu-20.04, musllinux_x86_64, ""] + - [ubuntu-22.04, manylinux_x86_64, ""] + - [ubuntu-22.04, musllinux_x86_64, ""] - [macos-13, macosx_x86_64, openblas] # targeting macos >= 14. Could probably build on macos-14, but it would be a cross-compile @@ -90,14 +90,10 @@ jobs: # Don't build PyPy 32-bit windows - buildplat: [windows-2019, win32, ""] python: "pp310" - - buildplat: [ ubuntu-20.04, musllinux_x86_64, "" ] + - buildplat: [ ubuntu-22.04, musllinux_x86_64, "" ] python: "pp310" - buildplat: [ macos-14, macosx_arm64, accelerate ] python: "pp310" - - buildplat: [ windows-2019, win_amd64, "" ] - python: "cp313t" - - buildplat: [ windows-2019, win32, "" ] - python: "cp313t" - buildplat: [ macos13, macosx_x86_64, openblas ] python: "cp313t" @@ -130,7 +126,7 @@ jobs: if: runner.os == 'windows' # Used to push the built wheels - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: "3.x" @@ -162,6 +158,7 @@ jobs: - name: Set up free-threaded build if: matrix.python == 'cp313t' + shell: bash -el {0} run: | echo "CIBW_BUILD_FRONTEND=pip; args: --no-build-isolation" >> "$GITHUB_ENV" @@ -172,12 +169,12 @@ jobs: CIBW_FREE_THREADED_SUPPORT: True CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 + - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl - - uses: mamba-org/setup-micromamba@f8b8a1e23a26f60a44c853292711bacfd3eac822 + - uses: mamba-org/setup-micromamba@617811f69075e3fd3ae68ca64220ad065877f246 with: # for installation of anaconda-client, required for upload to # anaconda.org @@ -231,7 +228,7 @@ jobs: with: submodules: true # Used to push the built wheels - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: # Build sdist on lowest supported Python python-version: "3.10" @@ -253,7 +250,7 @@ jobs: python -mpip install twine twine check dist/* - - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 + - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: sdist path: ./dist/* From c9aed62c5e62c446dde3ad471b653a2b74fc1a4c Mon Sep 17 00:00:00 2001 From: mattip Date: Tue, 15 Oct 2024 21:34:56 +0300 Subject: [PATCH 069/101] BUILD: vendor tempita from Cython --- LICENSES_bundled.txt | 5 + numpy/_build_utils/tempita.py | 4 +- numpy/_build_utils/tempita/LICENSE.txt | 20 + numpy/_build_utils/tempita/__init__.py | 4 + numpy/_build_utils/tempita/_looper.py | 156 ++++ numpy/_build_utils/tempita/_tempita.py | 1092 ++++++++++++++++++++++++ 6 files changed, 1278 insertions(+), 3 deletions(-) create mode 100644 numpy/_build_utils/tempita/LICENSE.txt create mode 100644 numpy/_build_utils/tempita/__init__.py create mode 100644 numpy/_build_utils/tempita/_looper.py create mode 100644 numpy/_build_utils/tempita/_tempita.py diff --git a/LICENSES_bundled.txt b/LICENSES_bundled.txt index 815c9a1dba33..b3d8aa8bed06 100644 --- a/LICENSES_bundled.txt +++ b/LICENSES_bundled.txt @@ -29,3 +29,8 @@ Name: spin Files: .spin/cmds.py License: BSD-3 For license text, see .spin/LICENSE + +Name: tempita +Files: numpy/_build_utils/tempita/* +License: MIT + For details, see numpy/_build_utils/tempita/LICENCE.txt diff --git a/numpy/_build_utils/tempita.py b/numpy/_build_utils/tempita.py index 0743b892436b..32e400f9c907 100644 --- a/numpy/_build_utils/tempita.py +++ b/numpy/_build_utils/tempita.py @@ -3,9 +3,7 @@ import os import argparse -from Cython import Tempita as tempita - -# XXX: If this import ever fails (does it really?), vendor cython.tempita +import tempita def process_tempita(fromfile, outfile=None): diff --git a/numpy/_build_utils/tempita/LICENSE.txt b/numpy/_build_utils/tempita/LICENSE.txt new file mode 100644 index 000000000000..0ba6f23c440f --- /dev/null +++ b/numpy/_build_utils/tempita/LICENSE.txt @@ -0,0 +1,20 @@ +Copyright (c) 2008 Ian Bicking and Contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/numpy/_build_utils/tempita/__init__.py b/numpy/_build_utils/tempita/__init__.py new file mode 100644 index 000000000000..41a0ce3d0efa --- /dev/null +++ b/numpy/_build_utils/tempita/__init__.py @@ -0,0 +1,4 @@ +# The original Tempita implements all of its templating code here. +# Moved it to _tempita.py to make the compilation portable. + +from ._tempita import * diff --git a/numpy/_build_utils/tempita/_looper.py b/numpy/_build_utils/tempita/_looper.py new file mode 100644 index 000000000000..4864f2949605 --- /dev/null +++ b/numpy/_build_utils/tempita/_looper.py @@ -0,0 +1,156 @@ +""" +Helper for looping over sequences, particular in templates. + +Often in a loop in a template it's handy to know what's next up, +previously up, if this is the first or last item in the sequence, etc. +These can be awkward to manage in a normal Python loop, but using the +looper you can get a better sense of the context. Use like:: + + >>> for loop, item in looper(['a', 'b', 'c']): + ... print loop.number, item + ... if not loop.last: + ... print '---' + 1 a + --- + 2 b + --- + 3 c + +""" + +basestring_ = (bytes, str) + +__all__ = ['looper'] + + +class looper: + """ + Helper for looping (particularly in templates) + + Use this like:: + + for loop, item in looper(seq): + if loop.first: + ... + """ + + def __init__(self, seq): + self.seq = seq + + def __iter__(self): + return looper_iter(self.seq) + + def __repr__(self): + return '<%s for %r>' % ( + self.__class__.__name__, self.seq) + + +class looper_iter: + + def __init__(self, seq): + self.seq = list(seq) + self.pos = 0 + + def __iter__(self): + return self + + def __next__(self): + if self.pos >= len(self.seq): + raise StopIteration + result = loop_pos(self.seq, self.pos), self.seq[self.pos] + self.pos += 1 + return result + + +class loop_pos: + + def __init__(self, seq, pos): + self.seq = seq + self.pos = pos + + def __repr__(self): + return '' % ( + self.seq[self.pos], self.pos) + + def index(self): + return self.pos + index = property(index) + + def number(self): + return self.pos + 1 + number = property(number) + + def item(self): + return self.seq[self.pos] + item = property(item) + + def __next__(self): + try: + return self.seq[self.pos + 1] + except IndexError: + return None + __next__ = property(__next__) + + def previous(self): + if self.pos == 0: + return None + return self.seq[self.pos - 1] + previous = property(previous) + + def odd(self): + return not self.pos % 2 + odd = property(odd) + + def even(self): + return self.pos % 2 + even = property(even) + + def first(self): + return self.pos == 0 + first = property(first) + + def last(self): + return self.pos == len(self.seq) - 1 + last = property(last) + + def length(self): + return len(self.seq) + length = property(length) + + def first_group(self, getter=None): + """ + Returns true if this item is the start of a new group, + where groups mean that some attribute has changed. The getter + can be None (the item itself changes), an attribute name like + ``'.attr'``, a function, or a dict key or list index. + """ + if self.first: + return True + return self._compare_group(self.item, self.previous, getter) + + def last_group(self, getter=None): + """ + Returns true if this item is the end of a new group, + where groups mean that some attribute has changed. The getter + can be None (the item itself changes), an attribute name like + ``'.attr'``, a function, or a dict key or list index. + """ + if self.last: + return True + return self._compare_group(self.item, self.__next__, getter) + + def _compare_group(self, item, other, getter): + if getter is None: + return item != other + elif (isinstance(getter, basestring_) + and getter.startswith('.')): + getter = getter[1:] + if getter.endswith('()'): + getter = getter[:-2] + return getattr(item, getter)() != getattr(other, getter)() + else: + return getattr(item, getter) != getattr(other, getter) + elif hasattr(getter, '__call__'): + return getter(item) != getter(other) + else: + return item[getter] != other[getter] diff --git a/numpy/_build_utils/tempita/_tempita.py b/numpy/_build_utils/tempita/_tempita.py new file mode 100644 index 000000000000..c5269f25ff39 --- /dev/null +++ b/numpy/_build_utils/tempita/_tempita.py @@ -0,0 +1,1092 @@ +""" +A small templating language + +This implements a small templating language. This language implements +if/elif/else, for/continue/break, expressions, and blocks of Python +code. The syntax is:: + + {{any expression (function calls etc)}} + {{any expression | filter}} + {{for x in y}}...{{endfor}} + {{if x}}x{{elif y}}y{{else}}z{{endif}} + {{py:x=1}} + {{py: + def foo(bar): + return 'baz' + }} + {{default var = default_value}} + {{# comment}} + +You use this with the ``Template`` class or the ``sub`` shortcut. +The ``Template`` class takes the template string and the name of +the template (for errors) and a default namespace. Then (like +``string.Template``) you can call the ``tmpl.substitute(**kw)`` +method to make a substitution (or ``tmpl.substitute(a_dict)``). + +``sub(content, **kw)`` substitutes the template immediately. You +can use ``__name='tmpl.html'`` to set the name of the template. + +If there are syntax errors ``TemplateError`` will be raised. +""" + + +import re +import sys +import os +import tokenize +from io import StringIO + +from ._looper import looper + +__all__ = ['TemplateError', 'Template', 'sub', 'bunch'] + +in_re = re.compile(r'\s+in\s+') +var_re = re.compile(r'^[a-z_][a-z0-9_]*$', re.I) +basestring_ = (bytes, str) + +def coerce_text(v): + if not isinstance(v, basestring_): + if hasattr(v, '__str__'): + return str(v) + else: + return bytes(v) + return v + +class TemplateError(Exception): + """Exception raised while parsing a template + """ + + def __init__(self, message, position, name=None): + Exception.__init__(self, message) + self.position = position + self.name = name + + def __str__(self): + msg = ' '.join(self.args) + if self.position: + msg = '%s at line %s column %s' % ( + msg, self.position[0], self.position[1]) + if self.name: + msg += ' in %s' % self.name + return msg + + +class _TemplateContinue(Exception): + pass + + +class _TemplateBreak(Exception): + pass + + +def get_file_template(name, from_template): + path = os.path.join(os.path.dirname(from_template.name), name) + return from_template.__class__.from_filename( + path, namespace=from_template.namespace, + get_template=from_template.get_template) + + +class Template: + + default_namespace = { + 'start_braces': '{{', + 'end_braces': '}}', + 'looper': looper, + } + + default_encoding = 'utf8' + default_inherit = None + + def __init__(self, content, name=None, namespace=None, stacklevel=None, + get_template=None, default_inherit=None, line_offset=0, + delimiters=None, delimeters=None): + self.content = content + + # set delimiters + if delimeters: + import warnings + warnings.warn( + "'delimeters' kwarg is being deprecated in favor of correctly" + " spelled 'delimiters'. Please adjust your code.", + DeprecationWarning + ) + if delimiters is None: + delimiters = delimeters + if delimiters is None: + delimiters = (self.default_namespace['start_braces'], + self.default_namespace['end_braces']) + else: + #assert len(delimiters) == 2 and all([isinstance(delimiter, basestring) + # for delimiter in delimiters]) + self.default_namespace = self.__class__.default_namespace.copy() + self.default_namespace['start_braces'] = delimiters[0] + self.default_namespace['end_braces'] = delimiters[1] + self.delimiters = self.delimeters = delimiters # Keep a legacy read-only copy, but don't use it. + + self._unicode = isinstance(content, str) + if name is None and stacklevel is not None: + try: + caller = sys._getframe(stacklevel) + except ValueError: + pass + else: + globals = caller.f_globals + lineno = caller.f_lineno + if '__file__' in globals: + name = globals['__file__'] + if name.endswith('.pyc') or name.endswith('.pyo'): + name = name[:-1] + elif '__name__' in globals: + name = globals['__name__'] + else: + name = '' + if lineno: + name += ':%s' % lineno + self.name = name + self._parsed = parse(content, name=name, line_offset=line_offset, delimiters=self.delimiters) + if namespace is None: + namespace = {} + self.namespace = namespace + self.get_template = get_template + if default_inherit is not None: + self.default_inherit = default_inherit + + def from_filename(cls, filename, namespace=None, encoding=None, + default_inherit=None, get_template=get_file_template): + with open(filename, 'rb') as f: + c = f.read() + if encoding: + c = c.decode(encoding) + return cls(content=c, name=filename, namespace=namespace, + default_inherit=default_inherit, get_template=get_template) + + from_filename = classmethod(from_filename) + + def __repr__(self): + return '<%s %s name=%r>' % ( + self.__class__.__name__, + hex(id(self))[2:], self.name) + + def substitute(self, *args, **kw): + if args: + if kw: + raise TypeError( + "You can only give positional *or* keyword arguments") + if len(args) > 1: + raise TypeError( + "You can only give one positional argument") + if not hasattr(args[0], 'items'): + raise TypeError( + "If you pass in a single argument, you must pass in a dictionary-like object (with a .items() method); you gave %r" + % (args[0],)) + kw = args[0] + ns = kw + ns['__template_name__'] = self.name + if self.namespace: + ns.update(self.namespace) + result, defs, inherit = self._interpret(ns) + if not inherit: + inherit = self.default_inherit + if inherit: + result = self._interpret_inherit(result, defs, inherit, ns) + return result + + def _interpret(self, ns): + __traceback_hide__ = True + parts = [] + defs = {} + self._interpret_codes(self._parsed, ns, out=parts, defs=defs) + if '__inherit__' in defs: + inherit = defs.pop('__inherit__') + else: + inherit = None + return ''.join(parts), defs, inherit + + def _interpret_inherit(self, body, defs, inherit_template, ns): + __traceback_hide__ = True + if not self.get_template: + raise TemplateError( + 'You cannot use inheritance without passing in get_template', + position=None, name=self.name) + templ = self.get_template(inherit_template, self) + self_ = TemplateObject(self.name) + for name, value in defs.items(): + setattr(self_, name, value) + self_.body = body + ns = ns.copy() + ns['self'] = self_ + return templ.substitute(ns) + + def _interpret_codes(self, codes, ns, out, defs): + __traceback_hide__ = True + for item in codes: + if isinstance(item, basestring_): + out.append(item) + else: + self._interpret_code(item, ns, out, defs) + + def _interpret_code(self, code, ns, out, defs): + __traceback_hide__ = True + name, pos = code[0], code[1] + if name == 'py': + self._exec(code[2], ns, pos) + elif name == 'continue': + raise _TemplateContinue() + elif name == 'break': + raise _TemplateBreak() + elif name == 'for': + vars, expr, content = code[2], code[3], code[4] + expr = self._eval(expr, ns, pos) + self._interpret_for(vars, expr, content, ns, out, defs) + elif name == 'cond': + parts = code[2:] + self._interpret_if(parts, ns, out, defs) + elif name == 'expr': + parts = code[2].split('|') + base = self._eval(parts[0], ns, pos) + for part in parts[1:]: + func = self._eval(part, ns, pos) + base = func(base) + out.append(self._repr(base, pos)) + elif name == 'default': + var, expr = code[2], code[3] + if var not in ns: + result = self._eval(expr, ns, pos) + ns[var] = result + elif name == 'inherit': + expr = code[2] + value = self._eval(expr, ns, pos) + defs['__inherit__'] = value + elif name == 'def': + name = code[2] + signature = code[3] + parts = code[4] + ns[name] = defs[name] = TemplateDef(self, name, signature, body=parts, ns=ns, + pos=pos) + elif name == 'comment': + return + else: + assert 0, "Unknown code: %r" % name + + def _interpret_for(self, vars, expr, content, ns, out, defs): + __traceback_hide__ = True + for item in expr: + if len(vars) == 1: + ns[vars[0]] = item + else: + if len(vars) != len(item): + raise ValueError( + 'Need %i items to unpack (got %i items)' + % (len(vars), len(item))) + for name, value in zip(vars, item): + ns[name] = value + try: + self._interpret_codes(content, ns, out, defs) + except _TemplateContinue: + continue + except _TemplateBreak: + break + + def _interpret_if(self, parts, ns, out, defs): + __traceback_hide__ = True + # @@: if/else/else gets through + for part in parts: + assert not isinstance(part, basestring_) + name, pos = part[0], part[1] + if name == 'else': + result = True + else: + result = self._eval(part[2], ns, pos) + if result: + self._interpret_codes(part[3], ns, out, defs) + break + + def _eval(self, code, ns, pos): + __traceback_hide__ = True + try: + try: + value = eval(code, self.default_namespace, ns) + except SyntaxError as e: + raise SyntaxError( + 'invalid syntax in expression: %s' % code) + return value + except Exception as e: + if getattr(e, 'args', None): + arg0 = e.args[0] + else: + arg0 = coerce_text(e) + e.args = (self._add_line_info(arg0, pos),) + raise + + def _exec(self, code, ns, pos): + __traceback_hide__ = True + try: + exec(code, self.default_namespace, ns) + except Exception as e: + if e.args: + e.args = (self._add_line_info(e.args[0], pos),) + else: + e.args = (self._add_line_info(None, pos),) + raise + + def _repr(self, value, pos): + __traceback_hide__ = True + try: + if value is None: + return '' + if self._unicode: + try: + value = str(value) + except UnicodeDecodeError: + value = bytes(value) + else: + if not isinstance(value, basestring_): + value = coerce_text(value) + if (isinstance(value, str) + and self.default_encoding): + value = value.encode(self.default_encoding) + except Exception as e: + e.args = (self._add_line_info(e.args[0], pos),) + raise + else: + if self._unicode and isinstance(value, bytes): + if not self.default_encoding: + raise UnicodeDecodeError( + 'Cannot decode bytes value %r into unicode ' + '(no default_encoding provided)' % value) + try: + value = value.decode(self.default_encoding) + except UnicodeDecodeError as e: + raise UnicodeDecodeError( + e.encoding, + e.object, + e.start, + e.end, + e.reason + ' in string %r' % value) + elif not self._unicode and isinstance(value, str): + if not self.default_encoding: + raise UnicodeEncodeError( + 'Cannot encode unicode value %r into bytes ' + '(no default_encoding provided)' % value) + value = value.encode(self.default_encoding) + return value + + def _add_line_info(self, msg, pos): + msg = "%s at line %s column %s" % ( + msg, pos[0], pos[1]) + if self.name: + msg += " in file %s" % self.name + return msg + + +def sub(content, delimiters=None, **kw): + name = kw.get('__name') + delimeters = kw.pop('delimeters') if 'delimeters' in kw else None # for legacy code + tmpl = Template(content, name=name, delimiters=delimiters, delimeters=delimeters) + return tmpl.substitute(kw) + + +def paste_script_template_renderer(content, vars, filename=None): + tmpl = Template(content, name=filename) + return tmpl.substitute(vars) + + +class bunch(dict): + + def __init__(self, **kw): + for name, value in kw.items(): + setattr(self, name, value) + + def __setattr__(self, name, value): + self[name] = value + + def __getattr__(self, name): + try: + return self[name] + except KeyError: + raise AttributeError(name) + + def __getitem__(self, key): + if 'default' in self: + try: + return dict.__getitem__(self, key) + except KeyError: + return dict.__getitem__(self, 'default') + else: + return dict.__getitem__(self, key) + + def __repr__(self): + return '<%s %s>' % ( + self.__class__.__name__, + ' '.join(['%s=%r' % (k, v) for k, v in sorted(self.items())])) + + +class TemplateDef: + def __init__(self, template, func_name, func_signature, + body, ns, pos, bound_self=None): + self._template = template + self._func_name = func_name + self._func_signature = func_signature + self._body = body + self._ns = ns + self._pos = pos + self._bound_self = bound_self + + def __repr__(self): + return '' % ( + self._func_name, self._func_signature, + self._template.name, self._pos) + + def __str__(self): + return self() + + def __call__(self, *args, **kw): + values = self._parse_signature(args, kw) + ns = self._ns.copy() + ns.update(values) + if self._bound_self is not None: + ns['self'] = self._bound_self + out = [] + subdefs = {} + self._template._interpret_codes(self._body, ns, out, subdefs) + return ''.join(out) + + def __get__(self, obj, type=None): + if obj is None: + return self + return self.__class__( + self._template, self._func_name, self._func_signature, + self._body, self._ns, self._pos, bound_self=obj) + + def _parse_signature(self, args, kw): + values = {} + sig_args, var_args, var_kw, defaults = self._func_signature + extra_kw = {} + for name, value in kw.items(): + if not var_kw and name not in sig_args: + raise TypeError( + 'Unexpected argument %s' % name) + if name in sig_args: + values[sig_args] = value + else: + extra_kw[name] = value + args = list(args) + sig_args = list(sig_args) + while args: + while sig_args and sig_args[0] in values: + sig_args.pop(0) + if sig_args: + name = sig_args.pop(0) + values[name] = args.pop(0) + elif var_args: + values[var_args] = tuple(args) + break + else: + raise TypeError( + 'Extra position arguments: %s' + % ', '.join([repr(v) for v in args])) + for name, value_expr in defaults.items(): + if name not in values: + values[name] = self._template._eval( + value_expr, self._ns, self._pos) + for name in sig_args: + if name not in values: + raise TypeError( + 'Missing argument: %s' % name) + if var_kw: + values[var_kw] = extra_kw + return values + + +class TemplateObject: + + def __init__(self, name): + self.__name = name + self.get = TemplateObjectGetter(self) + + def __repr__(self): + return '<%s %s>' % (self.__class__.__name__, self.__name) + + +class TemplateObjectGetter: + + def __init__(self, template_obj): + self.__template_obj = template_obj + + def __getattr__(self, attr): + return getattr(self.__template_obj, attr, Empty) + + def __repr__(self): + return '<%s around %r>' % (self.__class__.__name__, self.__template_obj) + + +class _Empty: + def __call__(self, *args, **kw): + return self + + def __str__(self): + return '' + + def __repr__(self): + return 'Empty' + + def __unicode__(self): + return '' + + def __iter__(self): + return iter(()) + + def __bool__(self): + return False + +Empty = _Empty() +del _Empty + +############################################################ +## Lexing and Parsing +############################################################ + + +def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None): + """ + Lex a string into chunks: + + >>> lex('hey') + ['hey'] + >>> lex('hey {{you}}') + ['hey ', ('you', (1, 7))] + >>> lex('hey {{') + Traceback (most recent call last): + ... + TemplateError: No }} to finish last expression at line 1 column 7 + >>> lex('hey }}') + Traceback (most recent call last): + ... + TemplateError: }} outside expression at line 1 column 7 + >>> lex('hey {{ {{') + Traceback (most recent call last): + ... + TemplateError: {{ inside expression at line 1 column 10 + + """ + if delimiters is None: + delimiters = ( Template.default_namespace['start_braces'], + Template.default_namespace['end_braces'] ) + in_expr = False + chunks = [] + last = 0 + last_pos = (line_offset + 1, 1) + + token_re = re.compile(r'%s|%s' % (re.escape(delimiters[0]), + re.escape(delimiters[1]))) + for match in token_re.finditer(s): + expr = match.group(0) + pos = find_position(s, match.end(), last, last_pos) + if expr == delimiters[0] and in_expr: + raise TemplateError('%s inside expression' % delimiters[0], + position=pos, + name=name) + elif expr == delimiters[1] and not in_expr: + raise TemplateError('%s outside expression' % delimiters[1], + position=pos, + name=name) + if expr == delimiters[0]: + part = s[last:match.start()] + if part: + chunks.append(part) + in_expr = True + else: + chunks.append((s[last:match.start()], last_pos)) + in_expr = False + last = match.end() + last_pos = pos + if in_expr: + raise TemplateError('No %s to finish last expression' % delimiters[1], + name=name, position=last_pos) + part = s[last:] + if part: + chunks.append(part) + if trim_whitespace: + chunks = trim_lex(chunks) + return chunks + +statement_re = re.compile(r'^(?:if |elif |for |def |inherit |default |py:)') +single_statements = ['else', 'endif', 'endfor', 'enddef', 'continue', 'break'] +trail_whitespace_re = re.compile(r'\n\r?[\t ]*$') +lead_whitespace_re = re.compile(r'^[\t ]*\n') + + +def trim_lex(tokens): + r""" + Takes a lexed set of tokens, and removes whitespace when there is + a directive on a line by itself: + + >>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False) + >>> tokens + [('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny'] + >>> trim_lex(tokens) + [('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y'] + """ + last_trim = None + for i, current in enumerate(tokens): + if isinstance(current, basestring_): + # we don't trim this + continue + item = current[0] + if not statement_re.search(item) and item not in single_statements: + continue + if not i: + prev = '' + else: + prev = tokens[i - 1] + if i + 1 >= len(tokens): + next_chunk = '' + else: + next_chunk = tokens[i + 1] + if (not isinstance(next_chunk, basestring_) + or not isinstance(prev, basestring_)): + continue + prev_ok = not prev or trail_whitespace_re.search(prev) + if i == 1 and not prev.strip(): + prev_ok = True + if last_trim is not None and last_trim + 2 == i and not prev.strip(): + prev_ok = 'last' + if (prev_ok + and (not next_chunk or lead_whitespace_re.search(next_chunk) + or (i == len(tokens) - 2 and not next_chunk.strip()))): + if prev: + if ((i == 1 and not prev.strip()) + or prev_ok == 'last'): + tokens[i - 1] = '' + else: + m = trail_whitespace_re.search(prev) + # +1 to leave the leading \n on: + prev = prev[:m.start() + 1] + tokens[i - 1] = prev + if next_chunk: + last_trim = i + if i == len(tokens) - 2 and not next_chunk.strip(): + tokens[i + 1] = '' + else: + m = lead_whitespace_re.search(next_chunk) + next_chunk = next_chunk[m.end():] + tokens[i + 1] = next_chunk + return tokens + + +def find_position(string, index, last_index, last_pos): + """Given a string and index, return (line, column)""" + lines = string.count('\n', last_index, index) + if lines > 0: + column = index - string.rfind('\n', last_index, index) + else: + column = last_pos[1] + (index - last_index) + return (last_pos[0] + lines, column) + + +def parse(s, name=None, line_offset=0, delimiters=None): + r""" + Parses a string into a kind of AST + + >>> parse('{{x}}') + [('expr', (1, 3), 'x')] + >>> parse('foo') + ['foo'] + >>> parse('{{if x}}test{{endif}}') + [('cond', (1, 3), ('if', (1, 3), 'x', ['test']))] + >>> parse('series->{{for x in y}}x={{x}}{{endfor}}') + ['series->', ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])] + >>> parse('{{for x, y in z:}}{{continue}}{{endfor}}') + [('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])] + >>> parse('{{py:x=1}}') + [('py', (1, 3), 'x=1')] + >>> parse('{{if x}}a{{elif y}}b{{else}}c{{endif}}') + [('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))] + + Some exceptions:: + + >>> parse('{{continue}}') + Traceback (most recent call last): + ... + TemplateError: continue outside of for loop at line 1 column 3 + >>> parse('{{if x}}foo') + Traceback (most recent call last): + ... + TemplateError: No {{endif}} at line 1 column 3 + >>> parse('{{else}}') + Traceback (most recent call last): + ... + TemplateError: else outside of an if block at line 1 column 3 + >>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}') + Traceback (most recent call last): + ... + TemplateError: Unexpected endif at line 1 column 25 + >>> parse('{{if}}{{endif}}') + Traceback (most recent call last): + ... + TemplateError: if with no expression at line 1 column 3 + >>> parse('{{for x y}}{{endfor}}') + Traceback (most recent call last): + ... + TemplateError: Bad for (no "in") in 'x y' at line 1 column 3 + >>> parse('{{py:x=1\ny=2}}') + Traceback (most recent call last): + ... + TemplateError: Multi-line py blocks must start with a newline at line 1 column 3 + """ + if delimiters is None: + delimiters = ( Template.default_namespace['start_braces'], + Template.default_namespace['end_braces'] ) + tokens = lex(s, name=name, line_offset=line_offset, delimiters=delimiters) + result = [] + while tokens: + next_chunk, tokens = parse_expr(tokens, name) + result.append(next_chunk) + return result + + +def parse_expr(tokens, name, context=()): + if isinstance(tokens[0], basestring_): + return tokens[0], tokens[1:] + expr, pos = tokens[0] + expr = expr.strip() + if expr.startswith('py:'): + expr = expr[3:].lstrip(' \t') + if expr.startswith('\n') or expr.startswith('\r'): + expr = expr.lstrip('\r\n') + if '\r' in expr: + expr = expr.replace('\r\n', '\n') + expr = expr.replace('\r', '') + expr += '\n' + else: + if '\n' in expr: + raise TemplateError( + 'Multi-line py blocks must start with a newline', + position=pos, name=name) + return ('py', pos, expr), tokens[1:] + elif expr in ('continue', 'break'): + if 'for' not in context: + raise TemplateError( + 'continue outside of for loop', + position=pos, name=name) + return (expr, pos), tokens[1:] + elif expr.startswith('if '): + return parse_cond(tokens, name, context) + elif (expr.startswith('elif ') + or expr == 'else'): + raise TemplateError( + '%s outside of an if block' % expr.split()[0], + position=pos, name=name) + elif expr in ('if', 'elif', 'for'): + raise TemplateError( + '%s with no expression' % expr, + position=pos, name=name) + elif expr in ('endif', 'endfor', 'enddef'): + raise TemplateError( + 'Unexpected %s' % expr, + position=pos, name=name) + elif expr.startswith('for '): + return parse_for(tokens, name, context) + elif expr.startswith('default '): + return parse_default(tokens, name, context) + elif expr.startswith('inherit '): + return parse_inherit(tokens, name, context) + elif expr.startswith('def '): + return parse_def(tokens, name, context) + elif expr.startswith('#'): + return ('comment', pos, tokens[0][0]), tokens[1:] + return ('expr', pos, tokens[0][0]), tokens[1:] + + +def parse_cond(tokens, name, context): + start = tokens[0][1] + pieces = [] + context = context + ('if',) + while 1: + if not tokens: + raise TemplateError( + 'Missing {{endif}}', + position=start, name=name) + if (isinstance(tokens[0], tuple) + and tokens[0][0] == 'endif'): + return ('cond', start) + tuple(pieces), tokens[1:] + next_chunk, tokens = parse_one_cond(tokens, name, context) + pieces.append(next_chunk) + + +def parse_one_cond(tokens, name, context): + (first, pos), tokens = tokens[0], tokens[1:] + content = [] + if first.endswith(':'): + first = first[:-1] + if first.startswith('if '): + part = ('if', pos, first[3:].lstrip(), content) + elif first.startswith('elif '): + part = ('elif', pos, first[5:].lstrip(), content) + elif first == 'else': + part = ('else', pos, None, content) + else: + assert 0, "Unexpected token %r at %s" % (first, pos) + while 1: + if not tokens: + raise TemplateError( + 'No {{endif}}', + position=pos, name=name) + if (isinstance(tokens[0], tuple) + and (tokens[0][0] == 'endif' + or tokens[0][0].startswith('elif ') + or tokens[0][0] == 'else')): + return part, tokens + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_for(tokens, name, context): + first, pos = tokens[0] + tokens = tokens[1:] + context = ('for',) + context + content = [] + assert first.startswith('for '), first + if first.endswith(':'): + first = first[:-1] + first = first[3:].strip() + match = in_re.search(first) + if not match: + raise TemplateError( + 'Bad for (no "in") in %r' % first, + position=pos, name=name) + vars = first[:match.start()] + if '(' in vars: + raise TemplateError( + 'You cannot have () in the variable section of a for loop (%r)' + % vars, position=pos, name=name) + vars = tuple([ + v.strip() for v in first[:match.start()].split(',') + if v.strip()]) + expr = first[match.end():] + while 1: + if not tokens: + raise TemplateError( + 'No {{endfor}}', + position=pos, name=name) + if (isinstance(tokens[0], tuple) + and tokens[0][0] == 'endfor'): + return ('for', pos, vars, expr, content), tokens[1:] + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_default(tokens, name, context): + first, pos = tokens[0] + assert first.startswith('default ') + first = first.split(None, 1)[1] + parts = first.split('=', 1) + if len(parts) == 1: + raise TemplateError( + "Expression must be {{default var=value}}; no = found in %r" % first, + position=pos, name=name) + var = parts[0].strip() + if ',' in var: + raise TemplateError( + "{{default x, y = ...}} is not supported", + position=pos, name=name) + if not var_re.search(var): + raise TemplateError( + "Not a valid variable name for {{default}}: %r" + % var, position=pos, name=name) + expr = parts[1].strip() + return ('default', pos, var, expr), tokens[1:] + + +def parse_inherit(tokens, name, context): + first, pos = tokens[0] + assert first.startswith('inherit ') + expr = first.split(None, 1)[1] + return ('inherit', pos, expr), tokens[1:] + + +def parse_def(tokens, name, context): + first, start = tokens[0] + tokens = tokens[1:] + assert first.startswith('def ') + first = first.split(None, 1)[1] + if first.endswith(':'): + first = first[:-1] + if '(' not in first: + func_name = first + sig = ((), None, None, {}) + elif not first.endswith(')'): + raise TemplateError("Function definition doesn't end with ): %s" % first, + position=start, name=name) + else: + first = first[:-1] + func_name, sig_text = first.split('(', 1) + sig = parse_signature(sig_text, name, start) + context = context + ('def',) + content = [] + while 1: + if not tokens: + raise TemplateError( + 'Missing {{enddef}}', + position=start, name=name) + if (isinstance(tokens[0], tuple) + and tokens[0][0] == 'enddef'): + return ('def', start, func_name, sig, content), tokens[1:] + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_signature(sig_text, name, pos): + tokens = tokenize.generate_tokens(StringIO(sig_text).readline) + sig_args = [] + var_arg = None + var_kw = None + defaults = {} + + def get_token(pos=False): + try: + tok_type, tok_string, (srow, scol), (erow, ecol), line = next(tokens) + except StopIteration: + return tokenize.ENDMARKER, '' + if pos: + return tok_type, tok_string, (srow, scol), (erow, ecol) + else: + return tok_type, tok_string + while 1: + var_arg_type = None + tok_type, tok_string = get_token() + if tok_type == tokenize.ENDMARKER: + break + if tok_type == tokenize.OP and (tok_string == '*' or tok_string == '**'): + var_arg_type = tok_string + tok_type, tok_string = get_token() + if tok_type != tokenize.NAME: + raise TemplateError('Invalid signature: (%s)' % sig_text, + position=pos, name=name) + var_name = tok_string + tok_type, tok_string = get_token() + if tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','): + if var_arg_type == '*': + var_arg = var_name + elif var_arg_type == '**': + var_kw = var_name + else: + sig_args.append(var_name) + if tok_type == tokenize.ENDMARKER: + break + continue + if var_arg_type is not None: + raise TemplateError('Invalid signature: (%s)' % sig_text, + position=pos, name=name) + if tok_type == tokenize.OP and tok_string == '=': + nest_type = None + unnest_type = None + nest_count = 0 + start_pos = end_pos = None + parts = [] + while 1: + tok_type, tok_string, s, e = get_token(True) + if start_pos is None: + start_pos = s + end_pos = e + if tok_type == tokenize.ENDMARKER and nest_count: + raise TemplateError('Invalid signature: (%s)' % sig_text, + position=pos, name=name) + if (not nest_count and + (tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','))): + default_expr = isolate_expression(sig_text, start_pos, end_pos) + defaults[var_name] = default_expr + sig_args.append(var_name) + break + parts.append((tok_type, tok_string)) + if nest_count and tok_type == tokenize.OP and tok_string == nest_type: + nest_count += 1 + elif nest_count and tok_type == tokenize.OP and tok_string == unnest_type: + nest_count -= 1 + if not nest_count: + nest_type = unnest_type = None + elif not nest_count and tok_type == tokenize.OP and tok_string in ('(', '[', '{'): + nest_type = tok_string + nest_count = 1 + unnest_type = {'(': ')', '[': ']', '{': '}'}[nest_type] + return sig_args, var_arg, var_kw, defaults + + +def isolate_expression(string, start_pos, end_pos): + srow, scol = start_pos + srow -= 1 + erow, ecol = end_pos + erow -= 1 + lines = string.splitlines(True) + if srow == erow: + return lines[srow][scol:ecol] + parts = [lines[srow][scol:]] + parts.extend(lines[srow+1:erow]) + if erow < len(lines): + # It'll sometimes give (end_row_past_finish, 0) + parts.append(lines[erow][:ecol]) + return ''.join(parts) + +_fill_command_usage = """\ +%prog [OPTIONS] TEMPLATE arg=value + +Use py:arg=value to set a Python value; otherwise all values are +strings. +""" + + +def fill_command(args=None): + import sys + import optparse + import pkg_resources + import os + if args is None: + args = sys.argv[1:] + dist = pkg_resources.get_distribution('Paste') + parser = optparse.OptionParser( + version=coerce_text(dist), + usage=_fill_command_usage) + parser.add_option( + '-o', '--output', + dest='output', + metavar="FILENAME", + help="File to write output to (default stdout)") + parser.add_option( + '--env', + dest='use_env', + action='store_true', + help="Put the environment in as top-level variables") + options, args = parser.parse_args(args) + if len(args) < 1: + print('You must give a template filename') + sys.exit(2) + template_name = args[0] + args = args[1:] + vars = {} + if options.use_env: + vars.update(os.environ) + for value in args: + if '=' not in value: + print('Bad argument: %r' % value) + sys.exit(2) + name, value = value.split('=', 1) + if name.startswith('py:'): + name = name[:3] + value = eval(value) + vars[name] = value + if template_name == '-': + template_content = sys.stdin.read() + template_name = '' + else: + with open(template_name, 'rb') as f: + template_content = f.read() + template = Template(template_content, name=template_name) + result = template.substitute(vars) + if options.output: + with open(options.output, 'wb') as f: + f.write(result) + else: + sys.stdout.write(result) + +if __name__ == '__main__': + fill_command() From a308561d9e6d2e6faeee3fc964cedb24ecc3351f Mon Sep 17 00:00:00 2001 From: mattip Date: Tue, 15 Oct 2024 21:55:00 +0300 Subject: [PATCH 070/101] BUILD: reformat vendored code to make linter happy --- numpy/_build_utils/tempita/_tempita.py | 646 ++++++++++++++----------- 1 file changed, 350 insertions(+), 296 deletions(-) diff --git a/numpy/_build_utils/tempita/_tempita.py b/numpy/_build_utils/tempita/_tempita.py index c5269f25ff39..e6ab007e1921 100644 --- a/numpy/_build_utils/tempita/_tempita.py +++ b/numpy/_build_utils/tempita/_tempita.py @@ -29,7 +29,6 @@ def foo(bar): If there are syntax errors ``TemplateError`` will be raised. """ - import re import sys import os @@ -38,23 +37,24 @@ def foo(bar): from ._looper import looper -__all__ = ['TemplateError', 'Template', 'sub', 'bunch'] +__all__ = ["TemplateError", "Template", "sub", "bunch"] -in_re = re.compile(r'\s+in\s+') -var_re = re.compile(r'^[a-z_][a-z0-9_]*$', re.I) +in_re = re.compile(r"\s+in\s+") +var_re = re.compile(r"^[a-z_][a-z0-9_]*$", re.I) basestring_ = (bytes, str) + def coerce_text(v): if not isinstance(v, basestring_): - if hasattr(v, '__str__'): + if hasattr(v, "__str__"): return str(v) else: return bytes(v) return v + class TemplateError(Exception): - """Exception raised while parsing a template - """ + """Exception raised while parsing a template""" def __init__(self, message, position, name=None): Exception.__init__(self, message) @@ -62,12 +62,11 @@ def __init__(self, message, position, name=None): self.name = name def __str__(self): - msg = ' '.join(self.args) + msg = " ".join(self.args) if self.position: - msg = '%s at line %s column %s' % ( - msg, self.position[0], self.position[1]) + msg = "%s at line %s column %s" % (msg, self.position[0], self.position[1]) if self.name: - msg += ' in %s' % self.name + msg += " in %s" % self.name return msg @@ -82,46 +81,59 @@ class _TemplateBreak(Exception): def get_file_template(name, from_template): path = os.path.join(os.path.dirname(from_template.name), name) return from_template.__class__.from_filename( - path, namespace=from_template.namespace, - get_template=from_template.get_template) + path, namespace=from_template.namespace, get_template=from_template.get_template + ) class Template: - default_namespace = { - 'start_braces': '{{', - 'end_braces': '}}', - 'looper': looper, - } + "start_braces": "{{", + "end_braces": "}}", + "looper": looper, + } - default_encoding = 'utf8' + default_encoding = "utf8" default_inherit = None - def __init__(self, content, name=None, namespace=None, stacklevel=None, - get_template=None, default_inherit=None, line_offset=0, - delimiters=None, delimeters=None): + def __init__( + self, + content, + name=None, + namespace=None, + stacklevel=None, + get_template=None, + default_inherit=None, + line_offset=0, + delimiters=None, + delimeters=None, + ): self.content = content # set delimiters if delimeters: import warnings + warnings.warn( "'delimeters' kwarg is being deprecated in favor of correctly" " spelled 'delimiters'. Please adjust your code.", - DeprecationWarning + DeprecationWarning, ) if delimiters is None: delimiters = delimeters if delimiters is None: - delimiters = (self.default_namespace['start_braces'], - self.default_namespace['end_braces']) + delimiters = ( + self.default_namespace["start_braces"], + self.default_namespace["end_braces"], + ) else: - #assert len(delimiters) == 2 and all([isinstance(delimiter, basestring) + # assert len(delimiters) == 2 and all([isinstance(delimiter, basestring) # for delimiter in delimiters]) self.default_namespace = self.__class__.default_namespace.copy() - self.default_namespace['start_braces'] = delimiters[0] - self.default_namespace['end_braces'] = delimiters[1] - self.delimiters = self.delimeters = delimiters # Keep a legacy read-only copy, but don't use it. + self.default_namespace["start_braces"] = delimiters[0] + self.default_namespace["end_braces"] = delimiters[1] + self.delimiters = self.delimeters = ( + delimiters # Keep a legacy read-only copy, but don't use it. + ) self._unicode = isinstance(content, str) if name is None and stacklevel is not None: @@ -132,18 +144,20 @@ def __init__(self, content, name=None, namespace=None, stacklevel=None, else: globals = caller.f_globals lineno = caller.f_lineno - if '__file__' in globals: - name = globals['__file__'] - if name.endswith('.pyc') or name.endswith('.pyo'): + if "__file__" in globals: + name = globals["__file__"] + if name.endswith(".pyc") or name.endswith(".pyo"): name = name[:-1] - elif '__name__' in globals: - name = globals['__name__'] + elif "__name__" in globals: + name = globals["__name__"] else: - name = '' + name = "" if lineno: - name += ':%s' % lineno + name += ":%s" % lineno self.name = name - self._parsed = parse(content, name=name, line_offset=line_offset, delimiters=self.delimiters) + self._parsed = parse( + content, name=name, line_offset=line_offset, delimiters=self.delimiters + ) if namespace is None: namespace = {} self.namespace = namespace @@ -151,37 +165,50 @@ def __init__(self, content, name=None, namespace=None, stacklevel=None, if default_inherit is not None: self.default_inherit = default_inherit - def from_filename(cls, filename, namespace=None, encoding=None, - default_inherit=None, get_template=get_file_template): - with open(filename, 'rb') as f: + def from_filename( + cls, + filename, + namespace=None, + encoding=None, + default_inherit=None, + get_template=get_file_template, + ): + with open(filename, "rb") as f: c = f.read() if encoding: c = c.decode(encoding) - return cls(content=c, name=filename, namespace=namespace, - default_inherit=default_inherit, get_template=get_template) + return cls( + content=c, + name=filename, + namespace=namespace, + default_inherit=default_inherit, + get_template=get_template, + ) from_filename = classmethod(from_filename) def __repr__(self): - return '<%s %s name=%r>' % ( + return "<%s %s name=%r>" % ( self.__class__.__name__, - hex(id(self))[2:], self.name) + hex(id(self))[2:], + self.name, + ) def substitute(self, *args, **kw): if args: if kw: - raise TypeError( - "You can only give positional *or* keyword arguments") + raise TypeError("You can only give positional *or* keyword arguments") if len(args) > 1: + raise TypeError("You can only give one positional argument") + if not hasattr(args[0], "items"): raise TypeError( - "You can only give one positional argument") - if not hasattr(args[0], 'items'): - raise TypeError( - "If you pass in a single argument, you must pass in a dictionary-like object (with a .items() method); you gave %r" - % (args[0],)) + "If you pass in a single argument, you must pass in a " + "dictionary-like object (with a .items() method); you gave %r" + % (args[0],) + ) kw = args[0] ns = kw - ns['__template_name__'] = self.name + ns["__template_name__"] = self.name if self.namespace: ns.update(self.namespace) result, defs, inherit = self._interpret(ns) @@ -196,25 +223,27 @@ def _interpret(self, ns): parts = [] defs = {} self._interpret_codes(self._parsed, ns, out=parts, defs=defs) - if '__inherit__' in defs: - inherit = defs.pop('__inherit__') + if "__inherit__" in defs: + inherit = defs.pop("__inherit__") else: inherit = None - return ''.join(parts), defs, inherit + return "".join(parts), defs, inherit def _interpret_inherit(self, body, defs, inherit_template, ns): __traceback_hide__ = True if not self.get_template: raise TemplateError( - 'You cannot use inheritance without passing in get_template', - position=None, name=self.name) + "You cannot use inheritance without passing in get_template", + position=None, + name=self.name, + ) templ = self.get_template(inherit_template, self) self_ = TemplateObject(self.name) for name, value in defs.items(): setattr(self_, name, value) self_.body = body ns = ns.copy() - ns['self'] = self_ + ns["self"] = self_ return templ.substitute(ns) def _interpret_codes(self, codes, ns, out, defs): @@ -228,42 +257,43 @@ def _interpret_codes(self, codes, ns, out, defs): def _interpret_code(self, code, ns, out, defs): __traceback_hide__ = True name, pos = code[0], code[1] - if name == 'py': + if name == "py": self._exec(code[2], ns, pos) - elif name == 'continue': + elif name == "continue": raise _TemplateContinue() - elif name == 'break': + elif name == "break": raise _TemplateBreak() - elif name == 'for': + elif name == "for": vars, expr, content = code[2], code[3], code[4] expr = self._eval(expr, ns, pos) self._interpret_for(vars, expr, content, ns, out, defs) - elif name == 'cond': + elif name == "cond": parts = code[2:] self._interpret_if(parts, ns, out, defs) - elif name == 'expr': - parts = code[2].split('|') + elif name == "expr": + parts = code[2].split("|") base = self._eval(parts[0], ns, pos) for part in parts[1:]: func = self._eval(part, ns, pos) base = func(base) out.append(self._repr(base, pos)) - elif name == 'default': + elif name == "default": var, expr = code[2], code[3] if var not in ns: result = self._eval(expr, ns, pos) ns[var] = result - elif name == 'inherit': + elif name == "inherit": expr = code[2] value = self._eval(expr, ns, pos) - defs['__inherit__'] = value - elif name == 'def': + defs["__inherit__"] = value + elif name == "def": name = code[2] signature = code[3] parts = code[4] - ns[name] = defs[name] = TemplateDef(self, name, signature, body=parts, ns=ns, - pos=pos) - elif name == 'comment': + ns[name] = defs[name] = TemplateDef( + self, name, signature, body=parts, ns=ns, pos=pos + ) + elif name == "comment": return else: assert 0, "Unknown code: %r" % name @@ -276,8 +306,9 @@ def _interpret_for(self, vars, expr, content, ns, out, defs): else: if len(vars) != len(item): raise ValueError( - 'Need %i items to unpack (got %i items)' - % (len(vars), len(item))) + "Need %i items to unpack (got %i items)" + % (len(vars), len(item)) + ) for name, value in zip(vars, item): ns[name] = value try: @@ -293,7 +324,7 @@ def _interpret_if(self, parts, ns, out, defs): for part in parts: assert not isinstance(part, basestring_) name, pos = part[0], part[1] - if name == 'else': + if name == "else": result = True else: result = self._eval(part[2], ns, pos) @@ -307,11 +338,10 @@ def _eval(self, code, ns, pos): try: value = eval(code, self.default_namespace, ns) except SyntaxError as e: - raise SyntaxError( - 'invalid syntax in expression: %s' % code) + raise SyntaxError("invalid syntax in expression: %s" % code) return value except Exception as e: - if getattr(e, 'args', None): + if getattr(e, "args", None): arg0 = e.args[0] else: arg0 = coerce_text(e) @@ -333,7 +363,7 @@ def _repr(self, value, pos): __traceback_hide__ = True try: if value is None: - return '' + return "" if self._unicode: try: value = str(value) @@ -342,8 +372,7 @@ def _repr(self, value, pos): else: if not isinstance(value, basestring_): value = coerce_text(value) - if (isinstance(value, str) - and self.default_encoding): + if isinstance(value, str) and self.default_encoding: value = value.encode(self.default_encoding) except Exception as e: e.args = (self._add_line_info(e.args[0], pos),) @@ -352,8 +381,9 @@ def _repr(self, value, pos): if self._unicode and isinstance(value, bytes): if not self.default_encoding: raise UnicodeDecodeError( - 'Cannot decode bytes value %r into unicode ' - '(no default_encoding provided)' % value) + "Cannot decode bytes value %r into unicode " + "(no default_encoding provided)" % value + ) try: value = value.decode(self.default_encoding) except UnicodeDecodeError as e: @@ -362,26 +392,27 @@ def _repr(self, value, pos): e.object, e.start, e.end, - e.reason + ' in string %r' % value) + e.reason + " in string %r" % value, + ) elif not self._unicode and isinstance(value, str): if not self.default_encoding: raise UnicodeEncodeError( - 'Cannot encode unicode value %r into bytes ' - '(no default_encoding provided)' % value) + "Cannot encode unicode value %r into bytes " + "(no default_encoding provided)" % value + ) value = value.encode(self.default_encoding) return value def _add_line_info(self, msg, pos): - msg = "%s at line %s column %s" % ( - msg, pos[0], pos[1]) + msg = "%s at line %s column %s" % (msg, pos[0], pos[1]) if self.name: msg += " in file %s" % self.name return msg def sub(content, delimiters=None, **kw): - name = kw.get('__name') - delimeters = kw.pop('delimeters') if 'delimeters' in kw else None # for legacy code + name = kw.get("__name") + delimeters = kw.pop("delimeters") if "delimeters" in kw else None # for legacy code tmpl = Template(content, name=name, delimiters=delimiters, delimeters=delimeters) return tmpl.substitute(kw) @@ -392,7 +423,6 @@ def paste_script_template_renderer(content, vars, filename=None): class bunch(dict): - def __init__(self, **kw): for name, value in kw.items(): setattr(self, name, value) @@ -407,23 +437,25 @@ def __getattr__(self, name): raise AttributeError(name) def __getitem__(self, key): - if 'default' in self: + if "default" in self: try: return dict.__getitem__(self, key) except KeyError: - return dict.__getitem__(self, 'default') + return dict.__getitem__(self, "default") else: return dict.__getitem__(self, key) def __repr__(self): - return '<%s %s>' % ( + return "<%s %s>" % ( self.__class__.__name__, - ' '.join(['%s=%r' % (k, v) for k, v in sorted(self.items())])) + " ".join(["%s=%r" % (k, v) for k, v in sorted(self.items())]), + ) class TemplateDef: - def __init__(self, template, func_name, func_signature, - body, ns, pos, bound_self=None): + def __init__( + self, template, func_name, func_signature, body, ns, pos, bound_self=None + ): self._template = template self._func_name = func_name self._func_signature = func_signature @@ -433,9 +465,12 @@ def __init__(self, template, func_name, func_signature, self._bound_self = bound_self def __repr__(self): - return '' % ( - self._func_name, self._func_signature, - self._template.name, self._pos) + return "" % ( + self._func_name, + self._func_signature, + self._template.name, + self._pos, + ) def __str__(self): return self() @@ -445,18 +480,24 @@ def __call__(self, *args, **kw): ns = self._ns.copy() ns.update(values) if self._bound_self is not None: - ns['self'] = self._bound_self + ns["self"] = self._bound_self out = [] subdefs = {} self._template._interpret_codes(self._body, ns, out, subdefs) - return ''.join(out) + return "".join(out) def __get__(self, obj, type=None): if obj is None: return self return self.__class__( - self._template, self._func_name, self._func_signature, - self._body, self._ns, self._pos, bound_self=obj) + self._template, + self._func_name, + self._func_signature, + self._body, + self._ns, + self._pos, + bound_self=obj, + ) def _parse_signature(self, args, kw): values = {} @@ -464,8 +505,7 @@ def _parse_signature(self, args, kw): extra_kw = {} for name, value in kw.items(): if not var_kw and name not in sig_args: - raise TypeError( - 'Unexpected argument %s' % name) + raise TypeError("Unexpected argument %s" % name) if name in sig_args: values[sig_args] = value else: @@ -483,33 +523,29 @@ def _parse_signature(self, args, kw): break else: raise TypeError( - 'Extra position arguments: %s' - % ', '.join([repr(v) for v in args])) + "Extra position arguments: %s" % ", ".join([repr(v) for v in args]) + ) for name, value_expr in defaults.items(): if name not in values: - values[name] = self._template._eval( - value_expr, self._ns, self._pos) + values[name] = self._template._eval(value_expr, self._ns, self._pos) for name in sig_args: if name not in values: - raise TypeError( - 'Missing argument: %s' % name) + raise TypeError("Missing argument: %s" % name) if var_kw: values[var_kw] = extra_kw return values class TemplateObject: - def __init__(self, name): self.__name = name self.get = TemplateObjectGetter(self) def __repr__(self): - return '<%s %s>' % (self.__class__.__name__, self.__name) + return "<%s %s>" % (self.__class__.__name__, self.__name) class TemplateObjectGetter: - def __init__(self, template_obj): self.__template_obj = template_obj @@ -517,7 +553,7 @@ def __getattr__(self, attr): return getattr(self.__template_obj, attr, Empty) def __repr__(self): - return '<%s around %r>' % (self.__class__.__name__, self.__template_obj) + return "<%s around %r>" % (self.__class__.__name__, self.__template_obj) class _Empty: @@ -525,13 +561,13 @@ def __call__(self, *args, **kw): return self def __str__(self): - return '' + return "" def __repr__(self): - return 'Empty' + return "Empty" def __unicode__(self): - return '' + return "" def __iter__(self): return iter(()) @@ -539,6 +575,7 @@ def __iter__(self): def __bool__(self): return False + Empty = _Empty() del _Empty @@ -570,39 +607,45 @@ def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None): """ if delimiters is None: - delimiters = ( Template.default_namespace['start_braces'], - Template.default_namespace['end_braces'] ) + delimiters = ( + Template.default_namespace["start_braces"], + Template.default_namespace["end_braces"], + ) in_expr = False chunks = [] last = 0 last_pos = (line_offset + 1, 1) - token_re = re.compile(r'%s|%s' % (re.escape(delimiters[0]), - re.escape(delimiters[1]))) + token_re = re.compile( + r"%s|%s" % (re.escape(delimiters[0]), re.escape(delimiters[1])) + ) for match in token_re.finditer(s): expr = match.group(0) pos = find_position(s, match.end(), last, last_pos) if expr == delimiters[0] and in_expr: - raise TemplateError('%s inside expression' % delimiters[0], - position=pos, - name=name) + raise TemplateError( + "%s inside expression" % delimiters[0], position=pos, name=name + ) elif expr == delimiters[1] and not in_expr: - raise TemplateError('%s outside expression' % delimiters[1], - position=pos, - name=name) + raise TemplateError( + "%s outside expression" % delimiters[1], position=pos, name=name + ) if expr == delimiters[0]: part = s[last:match.start()] if part: chunks.append(part) in_expr = True else: - chunks.append((s[last:match.start()], last_pos)) + chunks.append((s[last: match.start()], last_pos)) in_expr = False last = match.end() last_pos = pos if in_expr: - raise TemplateError('No %s to finish last expression' % delimiters[1], - name=name, position=last_pos) + raise TemplateError( + "No %s to finish last expression" % delimiters[1], + name=name, + position=last_pos, + ) part = s[last:] if part: chunks.append(part) @@ -610,10 +653,11 @@ def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None): chunks = trim_lex(chunks) return chunks -statement_re = re.compile(r'^(?:if |elif |for |def |inherit |default |py:)') -single_statements = ['else', 'endif', 'endfor', 'enddef', 'continue', 'break'] -trail_whitespace_re = re.compile(r'\n\r?[\t ]*$') -lead_whitespace_re = re.compile(r'^[\t ]*\n') + +statement_re = re.compile(r"^(?:if |elif |for |def |inherit |default |py:)") +single_statements = ["else", "endif", "endfor", "enddef", "continue", "break"] +trail_whitespace_re = re.compile(r"\n\r?[\t ]*$") +lead_whitespace_re = re.compile(r"^[\t ]*\n") def trim_lex(tokens): @@ -636,37 +680,37 @@ def trim_lex(tokens): if not statement_re.search(item) and item not in single_statements: continue if not i: - prev = '' + prev = "" else: prev = tokens[i - 1] if i + 1 >= len(tokens): - next_chunk = '' + next_chunk = "" else: next_chunk = tokens[i + 1] - if (not isinstance(next_chunk, basestring_) - or not isinstance(prev, basestring_)): + if not isinstance(next_chunk, basestring_) or not isinstance(prev, basestring_): continue prev_ok = not prev or trail_whitespace_re.search(prev) if i == 1 and not prev.strip(): prev_ok = True if last_trim is not None and last_trim + 2 == i and not prev.strip(): - prev_ok = 'last' - if (prev_ok - and (not next_chunk or lead_whitespace_re.search(next_chunk) - or (i == len(tokens) - 2 and not next_chunk.strip()))): + prev_ok = "last" + if prev_ok and ( + not next_chunk + or lead_whitespace_re.search(next_chunk) + or (i == len(tokens) - 2 and not next_chunk.strip()) + ): if prev: - if ((i == 1 and not prev.strip()) - or prev_ok == 'last'): - tokens[i - 1] = '' + if (i == 1 and not prev.strip()) or prev_ok == "last": + tokens[i - 1] = "" else: m = trail_whitespace_re.search(prev) # +1 to leave the leading \n on: - prev = prev[:m.start() + 1] + prev = prev[: m.start() + 1] tokens[i - 1] = prev if next_chunk: last_trim = i if i == len(tokens) - 2 and not next_chunk.strip(): - tokens[i + 1] = '' + tokens[i + 1] = "" else: m = lead_whitespace_re.search(next_chunk) next_chunk = next_chunk[m.end():] @@ -676,9 +720,9 @@ def trim_lex(tokens): def find_position(string, index, last_index, last_pos): """Given a string and index, return (line, column)""" - lines = string.count('\n', last_index, index) + lines = string.count("\n", last_index, index) if lines > 0: - column = index - string.rfind('\n', last_index, index) + column = index - string.rfind("\n", last_index, index) else: column = last_pos[1] + (index - last_index) return (last_pos[0] + lines, column) @@ -701,7 +745,7 @@ def parse(s, name=None, line_offset=0, delimiters=None): >>> parse('{{py:x=1}}') [('py', (1, 3), 'x=1')] >>> parse('{{if x}}a{{elif y}}b{{else}}c{{endif}}') - [('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))] + [('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))] # noqa: E501 Some exceptions:: @@ -735,8 +779,10 @@ def parse(s, name=None, line_offset=0, delimiters=None): TemplateError: Multi-line py blocks must start with a newline at line 1 column 3 """ if delimiters is None: - delimiters = ( Template.default_namespace['start_braces'], - Template.default_namespace['end_braces'] ) + delimiters = ( + Template.default_namespace["start_braces"], + Template.default_namespace["end_braces"], + ) tokens = lex(s, name=name, line_offset=line_offset, delimiters=delimiters) result = [] while tokens: @@ -750,66 +796,58 @@ def parse_expr(tokens, name, context=()): return tokens[0], tokens[1:] expr, pos = tokens[0] expr = expr.strip() - if expr.startswith('py:'): - expr = expr[3:].lstrip(' \t') - if expr.startswith('\n') or expr.startswith('\r'): - expr = expr.lstrip('\r\n') - if '\r' in expr: - expr = expr.replace('\r\n', '\n') - expr = expr.replace('\r', '') - expr += '\n' + if expr.startswith("py:"): + expr = expr[3:].lstrip(" \t") + if expr.startswith("\n") or expr.startswith("\r"): + expr = expr.lstrip("\r\n") + if "\r" in expr: + expr = expr.replace("\r\n", "\n") + expr = expr.replace("\r", "") + expr += "\n" else: - if '\n' in expr: + if "\n" in expr: raise TemplateError( - 'Multi-line py blocks must start with a newline', - position=pos, name=name) - return ('py', pos, expr), tokens[1:] - elif expr in ('continue', 'break'): - if 'for' not in context: - raise TemplateError( - 'continue outside of for loop', - position=pos, name=name) + "Multi-line py blocks must start with a newline", + position=pos, + name=name, + ) + return ("py", pos, expr), tokens[1:] + elif expr in ("continue", "break"): + if "for" not in context: + raise TemplateError("continue outside of for loop", position=pos, name=name) return (expr, pos), tokens[1:] - elif expr.startswith('if '): + elif expr.startswith("if "): return parse_cond(tokens, name, context) - elif (expr.startswith('elif ') - or expr == 'else'): - raise TemplateError( - '%s outside of an if block' % expr.split()[0], - position=pos, name=name) - elif expr in ('if', 'elif', 'for'): + elif expr.startswith("elif ") or expr == "else": raise TemplateError( - '%s with no expression' % expr, - position=pos, name=name) - elif expr in ('endif', 'endfor', 'enddef'): - raise TemplateError( - 'Unexpected %s' % expr, - position=pos, name=name) - elif expr.startswith('for '): + "%s outside of an if block" % expr.split()[0], position=pos, name=name + ) + elif expr in ("if", "elif", "for"): + raise TemplateError("%s with no expression" % expr, position=pos, name=name) + elif expr in ("endif", "endfor", "enddef"): + raise TemplateError("Unexpected %s" % expr, position=pos, name=name) + elif expr.startswith("for "): return parse_for(tokens, name, context) - elif expr.startswith('default '): + elif expr.startswith("default "): return parse_default(tokens, name, context) - elif expr.startswith('inherit '): + elif expr.startswith("inherit "): return parse_inherit(tokens, name, context) - elif expr.startswith('def '): + elif expr.startswith("def "): return parse_def(tokens, name, context) - elif expr.startswith('#'): - return ('comment', pos, tokens[0][0]), tokens[1:] - return ('expr', pos, tokens[0][0]), tokens[1:] + elif expr.startswith("#"): + return ("comment", pos, tokens[0][0]), tokens[1:] + return ("expr", pos, tokens[0][0]), tokens[1:] def parse_cond(tokens, name, context): start = tokens[0][1] pieces = [] - context = context + ('if',) + context = context + ("if",) while 1: if not tokens: - raise TemplateError( - 'Missing {{endif}}', - position=start, name=name) - if (isinstance(tokens[0], tuple) - and tokens[0][0] == 'endif'): - return ('cond', start) + tuple(pieces), tokens[1:] + raise TemplateError("Missing {{endif}}", position=start, name=name) + if isinstance(tokens[0], tuple) and tokens[0][0] == "endif": + return ("cond", start) + tuple(pieces), tokens[1:] next_chunk, tokens = parse_one_cond(tokens, name, context) pieces.append(next_chunk) @@ -817,25 +855,24 @@ def parse_cond(tokens, name, context): def parse_one_cond(tokens, name, context): (first, pos), tokens = tokens[0], tokens[1:] content = [] - if first.endswith(':'): + if first.endswith(":"): first = first[:-1] - if first.startswith('if '): - part = ('if', pos, first[3:].lstrip(), content) - elif first.startswith('elif '): - part = ('elif', pos, first[5:].lstrip(), content) - elif first == 'else': - part = ('else', pos, None, content) + if first.startswith("if "): + part = ("if", pos, first[3:].lstrip(), content) + elif first.startswith("elif "): + part = ("elif", pos, first[5:].lstrip(), content) + elif first == "else": + part = ("else", pos, None, content) else: assert 0, "Unexpected token %r at %s" % (first, pos) while 1: if not tokens: - raise TemplateError( - 'No {{endif}}', - position=pos, name=name) - if (isinstance(tokens[0], tuple) - and (tokens[0][0] == 'endif' - or tokens[0][0].startswith('elif ') - or tokens[0][0] == 'else')): + raise TemplateError("No {{endif}}", position=pos, name=name) + if isinstance(tokens[0], tuple) and ( + tokens[0][0] == "endif" + or tokens[0][0].startswith("elif ") + or tokens[0][0] == "else" + ): return part, tokens next_chunk, tokens = parse_expr(tokens, name, context) content.append(next_chunk) @@ -844,94 +881,93 @@ def parse_one_cond(tokens, name, context): def parse_for(tokens, name, context): first, pos = tokens[0] tokens = tokens[1:] - context = ('for',) + context + context = ("for",) + context content = [] - assert first.startswith('for '), first - if first.endswith(':'): + assert first.startswith("for "), first + if first.endswith(":"): first = first[:-1] first = first[3:].strip() match = in_re.search(first) if not match: + raise TemplateError('Bad for (no "in") in %r' % first, position=pos, name=name) + vars = first[: match.start()] + if "(" in vars: raise TemplateError( - 'Bad for (no "in") in %r' % first, - position=pos, name=name) - vars = first[:match.start()] - if '(' in vars: - raise TemplateError( - 'You cannot have () in the variable section of a for loop (%r)' - % vars, position=pos, name=name) - vars = tuple([ - v.strip() for v in first[:match.start()].split(',') - if v.strip()]) + "You cannot have () in the variable section of a for loop (%r)" % vars, + position=pos, + name=name, + ) + vars = tuple([v.strip() for v in first[: match.start()].split(",") if v.strip()]) expr = first[match.end():] while 1: if not tokens: - raise TemplateError( - 'No {{endfor}}', - position=pos, name=name) - if (isinstance(tokens[0], tuple) - and tokens[0][0] == 'endfor'): - return ('for', pos, vars, expr, content), tokens[1:] + raise TemplateError("No {{endfor}}", position=pos, name=name) + if isinstance(tokens[0], tuple) and tokens[0][0] == "endfor": + return ("for", pos, vars, expr, content), tokens[1:] next_chunk, tokens = parse_expr(tokens, name, context) content.append(next_chunk) def parse_default(tokens, name, context): first, pos = tokens[0] - assert first.startswith('default ') + assert first.startswith("default ") first = first.split(None, 1)[1] - parts = first.split('=', 1) + parts = first.split("=", 1) if len(parts) == 1: raise TemplateError( "Expression must be {{default var=value}}; no = found in %r" % first, - position=pos, name=name) + position=pos, + name=name, + ) var = parts[0].strip() - if ',' in var: + if "," in var: raise TemplateError( - "{{default x, y = ...}} is not supported", - position=pos, name=name) + "{{default x, y = ...}} is not supported", position=pos, name=name + ) if not var_re.search(var): raise TemplateError( - "Not a valid variable name for {{default}}: %r" - % var, position=pos, name=name) + "Not a valid variable name for {{default}}: %r" % var, + position=pos, + name=name, + ) expr = parts[1].strip() - return ('default', pos, var, expr), tokens[1:] + return ("default", pos, var, expr), tokens[1:] def parse_inherit(tokens, name, context): first, pos = tokens[0] - assert first.startswith('inherit ') + assert first.startswith("inherit ") expr = first.split(None, 1)[1] - return ('inherit', pos, expr), tokens[1:] + return ("inherit", pos, expr), tokens[1:] def parse_def(tokens, name, context): first, start = tokens[0] tokens = tokens[1:] - assert first.startswith('def ') + assert first.startswith("def ") first = first.split(None, 1)[1] - if first.endswith(':'): + if first.endswith(":"): first = first[:-1] - if '(' not in first: + if "(" not in first: func_name = first sig = ((), None, None, {}) - elif not first.endswith(')'): - raise TemplateError("Function definition doesn't end with ): %s" % first, - position=start, name=name) + elif not first.endswith(")"): + raise TemplateError( + "Function definition doesn't end with ): %s" % first, + position=start, + name=name, + ) else: first = first[:-1] - func_name, sig_text = first.split('(', 1) + func_name, sig_text = first.split("(", 1) sig = parse_signature(sig_text, name, start) - context = context + ('def',) + context = context + ("def",) content = [] while 1: if not tokens: - raise TemplateError( - 'Missing {{enddef}}', - position=start, name=name) - if (isinstance(tokens[0], tuple) - and tokens[0][0] == 'enddef'): - return ('def', start, func_name, sig, content), tokens[1:] + raise TemplateError("Missing {{enddef}}", position=start, name=name) + if isinstance(tokens[0], tuple) and tokens[0][0] == "enddef": + return ("def", start, func_name, sig, content), tokens[1:] next_chunk, tokens = parse_expr(tokens, name, context) content.append(next_chunk) @@ -947,28 +983,32 @@ def get_token(pos=False): try: tok_type, tok_string, (srow, scol), (erow, ecol), line = next(tokens) except StopIteration: - return tokenize.ENDMARKER, '' + return tokenize.ENDMARKER, "" if pos: return tok_type, tok_string, (srow, scol), (erow, ecol) else: return tok_type, tok_string + while 1: var_arg_type = None tok_type, tok_string = get_token() if tok_type == tokenize.ENDMARKER: break - if tok_type == tokenize.OP and (tok_string == '*' or tok_string == '**'): + if tok_type == tokenize.OP and (tok_string == "*" or tok_string == "**"): var_arg_type = tok_string tok_type, tok_string = get_token() if tok_type != tokenize.NAME: - raise TemplateError('Invalid signature: (%s)' % sig_text, - position=pos, name=name) + raise TemplateError( + "Invalid signature: (%s)" % sig_text, position=pos, name=name + ) var_name = tok_string tok_type, tok_string = get_token() - if tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','): - if var_arg_type == '*': + if tok_type == tokenize.ENDMARKER or ( + tok_type == tokenize.OP and tok_string == "," + ): + if var_arg_type == "*": var_arg = var_name - elif var_arg_type == '**': + elif var_arg_type == "**": var_kw = var_name else: sig_args.append(var_name) @@ -976,9 +1016,10 @@ def get_token(pos=False): break continue if var_arg_type is not None: - raise TemplateError('Invalid signature: (%s)' % sig_text, - position=pos, name=name) - if tok_type == tokenize.OP and tok_string == '=': + raise TemplateError( + "Invalid signature: (%s)" % sig_text, position=pos, name=name + ) + if tok_type == tokenize.OP and tok_string == "=": nest_type = None unnest_type = None nest_count = 0 @@ -990,10 +1031,13 @@ def get_token(pos=False): start_pos = s end_pos = e if tok_type == tokenize.ENDMARKER and nest_count: - raise TemplateError('Invalid signature: (%s)' % sig_text, - position=pos, name=name) - if (not nest_count and - (tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','))): + raise TemplateError( + "Invalid signature: (%s)" % sig_text, position=pos, name=name + ) + if not nest_count and ( + tok_type == tokenize.ENDMARKER + or (tok_type == tokenize.OP and tok_string == ",") + ): default_expr = isolate_expression(sig_text, start_pos, end_pos) defaults[var_name] = default_expr sig_args.append(var_name) @@ -1001,14 +1045,20 @@ def get_token(pos=False): parts.append((tok_type, tok_string)) if nest_count and tok_type == tokenize.OP and tok_string == nest_type: nest_count += 1 - elif nest_count and tok_type == tokenize.OP and tok_string == unnest_type: + elif ( + nest_count and tok_type == tokenize.OP and tok_string == unnest_type + ): nest_count -= 1 if not nest_count: nest_type = unnest_type = None - elif not nest_count and tok_type == tokenize.OP and tok_string in ('(', '[', '{'): + elif ( + not nest_count + and tok_type == tokenize.OP + and tok_string in ("(", "[", "{") + ): nest_type = tok_string nest_count = 1 - unnest_type = {'(': ')', '[': ']', '{': '}'}[nest_type] + unnest_type = {"(": ")", "[": "]", "{": "}"}[nest_type] return sig_args, var_arg, var_kw, defaults @@ -1021,11 +1071,12 @@ def isolate_expression(string, start_pos, end_pos): if srow == erow: return lines[srow][scol:ecol] parts = [lines[srow][scol:]] - parts.extend(lines[srow+1:erow]) + parts.extend(lines[srow + 1:erow]) if erow < len(lines): # It'll sometimes give (end_row_past_finish, 0) parts.append(lines[erow][:ecol]) - return ''.join(parts) + return "".join(parts) + _fill_command_usage = """\ %prog [OPTIONS] TEMPLATE arg=value @@ -1040,25 +1091,27 @@ def fill_command(args=None): import optparse import pkg_resources import os + if args is None: args = sys.argv[1:] - dist = pkg_resources.get_distribution('Paste') - parser = optparse.OptionParser( - version=coerce_text(dist), - usage=_fill_command_usage) + dist = pkg_resources.get_distribution("Paste") + parser = optparse.OptionParser(version=coerce_text(dist), usage=_fill_command_usage) parser.add_option( - '-o', '--output', - dest='output', + "-o", + "--output", + dest="output", metavar="FILENAME", - help="File to write output to (default stdout)") + help="File to write output to (default stdout)", + ) parser.add_option( - '--env', - dest='use_env', - action='store_true', - help="Put the environment in as top-level variables") + "--env", + dest="use_env", + action="store_true", + help="Put the environment in as top-level variables", + ) options, args = parser.parse_args(args) if len(args) < 1: - print('You must give a template filename') + print("You must give a template filename") sys.exit(2) template_name = args[0] args = args[1:] @@ -1066,27 +1119,28 @@ def fill_command(args=None): if options.use_env: vars.update(os.environ) for value in args: - if '=' not in value: - print('Bad argument: %r' % value) + if "=" not in value: + print("Bad argument: %r" % value) sys.exit(2) - name, value = value.split('=', 1) - if name.startswith('py:'): + name, value = value.split("=", 1) + if name.startswith("py:"): name = name[:3] value = eval(value) vars[name] = value - if template_name == '-': + if template_name == "-": template_content = sys.stdin.read() - template_name = '' + template_name = "" else: - with open(template_name, 'rb') as f: + with open(template_name, "rb") as f: template_content = f.read() template = Template(template_content, name=template_name) result = template.substitute(vars) if options.output: - with open(options.output, 'wb') as f: + with open(options.output, "wb") as f: f.write(result) else: sys.stdout.write(result) -if __name__ == '__main__': + +if __name__ == "__main__": fill_command() From 67fbe24e25eb4e514faa02a945133d91668c6dbf Mon Sep 17 00:00:00 2001 From: mattip Date: Tue, 15 Oct 2024 21:58:32 +0300 Subject: [PATCH 071/101] remove deprecated mispelled delimeters kwarg --- numpy/_build_utils/tempita/_tempita.py | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/numpy/_build_utils/tempita/_tempita.py b/numpy/_build_utils/tempita/_tempita.py index e6ab007e1921..c30b6547ade6 100644 --- a/numpy/_build_utils/tempita/_tempita.py +++ b/numpy/_build_utils/tempita/_tempita.py @@ -105,21 +105,10 @@ def __init__( default_inherit=None, line_offset=0, delimiters=None, - delimeters=None, ): self.content = content # set delimiters - if delimeters: - import warnings - - warnings.warn( - "'delimeters' kwarg is being deprecated in favor of correctly" - " spelled 'delimiters'. Please adjust your code.", - DeprecationWarning, - ) - if delimiters is None: - delimiters = delimeters if delimiters is None: delimiters = ( self.default_namespace["start_braces"], @@ -131,9 +120,7 @@ def __init__( self.default_namespace = self.__class__.default_namespace.copy() self.default_namespace["start_braces"] = delimiters[0] self.default_namespace["end_braces"] = delimiters[1] - self.delimiters = self.delimeters = ( - delimiters # Keep a legacy read-only copy, but don't use it. - ) + self.delimiters = delimiters self._unicode = isinstance(content, str) if name is None and stacklevel is not None: @@ -412,8 +399,7 @@ def _add_line_info(self, msg, pos): def sub(content, delimiters=None, **kw): name = kw.get("__name") - delimeters = kw.pop("delimeters") if "delimeters" in kw else None # for legacy code - tmpl = Template(content, name=name, delimiters=delimiters, delimeters=delimeters) + tmpl = Template(content, name=name, delimiters=delimiters) return tmpl.substitute(kw) From 40be518472f13b0b761de34837a42fa3ba744d4f Mon Sep 17 00:00:00 2001 From: Austin <504977925@qq.com> Date: Wed, 16 Oct 2024 13:08:20 +0800 Subject: [PATCH 072/101] BUG: Fix warning "differs in levels of indirection" in npy_atomic.h with MSVC (#27557) * Fix pointer indirection warning (C4047) in npy_atomic.h for MSVC * Fix pointer indirection warning (C4047) in npy_atomic.h for MSVC * Fix atomic pointer loading with proper type casting for various architectures * Update numpy/_core/src/common/npy_atomic.h Co-authored-by: Nathan Goldbaum * Update numpy/_core/src/common/npy_atomic.h Co-authored-by: Nathan Goldbaum * Update numpy/_core/src/common/npy_atomic.h Co-authored-by: Nathan Goldbaum * Update numpy/_core/src/common/npy_atomic.h Co-authored-by: Nathan Goldbaum * Update numpy/_core/src/common/npy_atomic.h Co-authored-by: Nathan Goldbaum --------- Co-authored-by: Nathan Goldbaum --- numpy/_core/src/common/npy_atomic.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/common/npy_atomic.h b/numpy/_core/src/common/npy_atomic.h index b92d58d583c0..5dfff57b604f 100644 --- a/numpy/_core/src/common/npy_atomic.h +++ b/numpy/_core/src/common/npy_atomic.h @@ -53,15 +53,15 @@ npy_atomic_load_ptr(const void *obj) { #elif defined(MSC_ATOMICS) #if SIZEOF_VOID_P == 8 #if defined(_M_X64) || defined(_M_IX86) - return *(volatile uint64_t *)obj; + return (void *)*(volatile uint64_t *)obj; #elif defined(_M_ARM64) - return (uint64_t)__ldar64((unsigned __int64 volatile *)obj); + return (void *)__ldar64((unsigned __int64 volatile *)obj); #endif #else #if defined(_M_X64) || defined(_M_IX86) - return *(volatile uint32_t *)obj; + return (void *)*(volatile uint32_t *)obj; #elif defined(_M_ARM64) - return (uint32_t)__ldar32((unsigned __int32 volatile *)obj); + return (void *)__ldar32((unsigned __int32 volatile *)obj); #endif #endif #elif defined(GCC_ATOMICS) From 119a72fd79746e044fe0fbdc1bb87b456d9d194d Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Thu, 17 Oct 2024 12:10:37 -0700 Subject: [PATCH 073/101] Update highway to latest --- numpy/_core/src/highway | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 5975f5ef76c3..a97b5d371d69 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 5975f5ef76c3e4364844d869454046f0f8420ef8 +Subproject commit a97b5d371d696564e206627a883b1341c65bd983 From bf8350239ce36cdc5ea65b4402521628e84a8056 Mon Sep 17 00:00:00 2001 From: "Benjamin A. Beasley" Date: Wed, 16 Oct 2024 13:25:35 -0400 Subject: [PATCH 074/101] BUG: Adjust numpy.i for SWIG 4.3 compatibility Replace each `SWIG_Python_AppendOutput` with `SWIG_AppendOutput`. Fixes #27578. --- tools/swig/numpy.i | 68 +++++++++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/tools/swig/numpy.i b/tools/swig/numpy.i index c8c26cbcd3d6..747446648c8b 100644 --- a/tools/swig/numpy.i +++ b/tools/swig/numpy.i @@ -1989,7 +1989,7 @@ void free_cap(PyObject * cap) %typemap(argout) (DATA_TYPE ARGOUT_ARRAY1[ANY]) { - $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum); + $result = SWIG_AppendOutput($result,(PyObject*)array$argnum); } /* Typemap suite for (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) @@ -2018,7 +2018,7 @@ void free_cap(PyObject * cap) %typemap(argout) (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) { - $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum); + $result = SWIG_AppendOutput($result,(PyObject*)array$argnum); } /* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) @@ -2047,7 +2047,7 @@ void free_cap(PyObject * cap) %typemap(argout) (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) { - $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum); + $result = SWIG_AppendOutput($result,(PyObject*)array$argnum); } /* Typemap suite for (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) @@ -2065,7 +2065,7 @@ void free_cap(PyObject * cap) %typemap(argout) (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) { - $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum); + $result = SWIG_AppendOutput($result,(PyObject*)array$argnum); } /* Typemap suite for (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) @@ -2083,7 +2083,7 @@ void free_cap(PyObject * cap) %typemap(argout) (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) { - $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum); + $result = SWIG_AppendOutput($result,(PyObject*)array$argnum); } /* Typemap suite for (DATA_TYPE ARGOUT_ARRAY4[ANY][ANY][ANY][ANY]) @@ -2101,7 +2101,7 @@ void free_cap(PyObject * cap) %typemap(argout) (DATA_TYPE ARGOUT_ARRAY4[ANY][ANY][ANY][ANY]) { - $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum); + $result = SWIG_AppendOutput($result,(PyObject*)array$argnum); } /*****************************/ @@ -2126,7 +2126,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1) @@ -2147,7 +2147,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) @@ -2169,7 +2169,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2) @@ -2191,7 +2191,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) @@ -2213,7 +2213,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2) @@ -2235,7 +2235,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2259,7 +2259,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, @@ -2283,7 +2283,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2307,7 +2307,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, @@ -2331,7 +2331,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2356,7 +2356,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, @@ -2381,7 +2381,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2406,7 +2406,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, @@ -2431,7 +2431,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /*************************************/ @@ -2465,7 +2465,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEWM_ARRAY1) @@ -2495,7 +2495,7 @@ PyObject* cap = PyCapsule_New((void*)(*$2), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) @@ -2526,7 +2526,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_ARRAY2) @@ -2557,7 +2557,7 @@ PyObject* cap = PyCapsule_New((void*)(*$3), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) @@ -2588,7 +2588,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_FARRAY2) @@ -2619,7 +2619,7 @@ PyObject* cap = PyCapsule_New((void*)(*$3), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2652,7 +2652,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, @@ -2685,7 +2685,7 @@ PyObject* cap = PyCapsule_New((void*)(*$4), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2718,7 +2718,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, @@ -2751,7 +2751,7 @@ PyObject* cap = PyCapsule_New((void*)(*$4), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2785,7 +2785,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, @@ -2819,7 +2819,7 @@ PyObject* cap = PyCapsule_New((void*)(*$5), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2853,7 +2853,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, @@ -2887,7 +2887,7 @@ PyObject* cap = PyCapsule_New((void*)(*$5), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /**************************************/ From e388737a6fc2f9c5ec60d86ef2607231905d3eb2 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar <142383124+abhishek-iitmadras@users.noreply.github.com> Date: Mon, 21 Oct 2024 23:05:14 +0530 Subject: [PATCH 075/101] Update linux_qemu.yml include the --platform flag in all docker run commands --- .github/workflows/linux_qemu.yml | 51 +++++++++++++++++++------------- 1 file changed, 31 insertions(+), 20 deletions(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index c63c5b7a9f20..d44fc365973b 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -44,22 +44,25 @@ jobs: # test_unary_spurious_fpexception is currently skipped # FIXME(@seiko2plus): Requires confirmation for the following issue: # The presence of an FP invalid exception caused by sqrt. Unsure if this is a qemu bug or not. - "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_unary_spurious_fpexception" - ] + "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_unary_spurious_fpexception", + "arm" + ] - [ "ppc64le", "powerpc64le-linux-gnu", "ppc64le/ubuntu:22.04", "-Dallow-noblas=true", "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", - ] + "ppc64le" + ] - [ "ppc64le - baseline(Power9)", "powerpc64le-linux-gnu", "ppc64le/ubuntu:22.04", "-Dallow-noblas=true -Dcpu-baseline=vsx3", "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", - ] + "ppc64le" + ] - [ "s390x", "s390x-linux-gnu", @@ -68,27 +71,31 @@ jobs: # Skipping TestRationalFunctions.test_gcd_overflow test # because of a possible qemu bug that appears to be related to int64 overflow in absolute operation. # TODO(@seiko2plus): Confirm the bug and provide a minimal reproducer, then report it to upstream. - "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow" - ] + "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow", + "s390x" + ] - [ "s390x - baseline(Z13)", "s390x-linux-gnu", "s390x/ubuntu:22.04", "-Dallow-noblas=true -Dcpu-baseline=vx", - "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow" - ] + "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow", + "s390x" + ] - [ "riscv64", "riscv64-linux-gnu", "riscv64/ubuntu:22.04", "-Dallow-noblas=true", - "test_kind or test_multiarray or test_simd or test_umath or test_ufunc" - ] + "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", + "riscv64" + ] env: TOOLCHAIN_NAME: ${{ matrix.BUILD_PROP[1] }} DOCKER_CONTAINER: ${{ matrix.BUILD_PROP[2] }} MESON_OPTIONS: ${{ matrix.BUILD_PROP[3] }} RUNTIME_TEST_FILTER: ${{ matrix.BUILD_PROP[4] }} + ARCH: ${{ matrix.BUILD_PROP[5] }} TERM: xterm-256color name: "${{ matrix.BUILD_PROP[0] }}" @@ -117,7 +124,8 @@ jobs: - name: Creates new container if: steps.container-cache.outputs.cache-hit != 'true' run: | - docker run --name the_container --interactive -v /:/host -v $(pwd):/numpy ${DOCKER_CONTAINER} /bin/bash -c " + docker run --platform=linux/${ARCH} --name the_container --interactive \ + -v /:/host -v $(pwd):/numpy ${DOCKER_CONTAINER} /bin/bash -c " apt update && apt install -y cmake git python3 python-is-python3 python3-dev python3-pip && mkdir -p /lib64 && ln -s /host/lib64/ld-* /lib64/ && @@ -147,10 +155,11 @@ jobs: - name: Meson Build run: | - docker run --rm -e "TERM=xterm-256color" -v $(pwd):/numpy -v /:/host the_container \ - /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin build --clean -- ${MESON_OPTIONS} - '" + docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ + -v $(pwd):/numpy -v /:/host the_container \ + /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' + cd /numpy && spin build --clean -- ${MESON_OPTIONS} + '" - name: Meson Log if: always() @@ -158,9 +167,11 @@ jobs: - name: Run Tests run: | - docker run --rm -e "TERM=xterm-256color" -v $(pwd):/numpy -v /:/host the_container \ - /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - export F90=/usr/bin/gfortran - cd /numpy && spin test -- -k \"${RUNTIME_TEST_FILTER}\" - '" + docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ + -v $(pwd):/numpy -v /:/host the_container \ + /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' + export F90=/usr/bin/gfortran + cd /numpy && spin test -- -k \"${RUNTIME_TEST_FILTER}\" + '" + From 3b6c4da807bd001fb7403a42dcb4c64bb7b286f0 Mon Sep 17 00:00:00 2001 From: Marcel Telka Date: Sun, 27 Oct 2024 00:44:23 +0200 Subject: [PATCH 076/101] BLD: Do not set __STDC_VERSION__ to zero during build The __STDC_VERSION__ set to zero prevents successful build on at least one platform - OpenIndiana. In addiiton, zero is not a valid value for __STDC_VERSION__ and it is unclear why the setting was added. Closes #25366. --- numpy/_core/meson.build | 1 - numpy/_core/src/common/npy_atomic.h | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 3d4ef36c055c..544af3665be7 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -681,7 +681,6 @@ c_args_common = [ # Same as NPY_CXX_FLAGS (TODO: extend for what ccompiler_opt adds) cpp_args_common = c_args_common + [ - '-D__STDC_VERSION__=0', # for compatibility with C headers ] if cc.get_argument_syntax() != 'msvc' cpp_args_common += [ diff --git a/numpy/_core/src/common/npy_atomic.h b/numpy/_core/src/common/npy_atomic.h index 5dfff57b604f..910028dcde7c 100644 --- a/numpy/_core/src/common/npy_atomic.h +++ b/numpy/_core/src/common/npy_atomic.h @@ -9,7 +9,8 @@ #include "numpy/npy_common.h" -#if __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \ + && !defined(__STDC_NO_ATOMICS__) // TODO: support C++ atomics as well if this header is ever needed in C++ #include #include From 281af6f77fb5aae92b5ea3bbcef600b52a9c99da Mon Sep 17 00:00:00 2001 From: kp2pml30 Date: Tue, 29 Oct 2024 20:25:02 +0400 Subject: [PATCH 077/101] ENH: fix wasm32 runtime type error in numpy._core The error is caused by function pointer type mismatch in call.indirect --- numpy/_core/src/multiarray/dtypemeta.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 244b47250786..316a61d31da4 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -1256,22 +1256,22 @@ dtypemeta_wrap_legacy_descriptor( static PyObject * -dtypemeta_get_abstract(PyArray_DTypeMeta *self) { +dtypemeta_get_abstract(PyArray_DTypeMeta *self, void *NPY_UNUSED(ignored)) { return PyBool_FromLong(NPY_DT_is_abstract(self)); } static PyObject * -dtypemeta_get_legacy(PyArray_DTypeMeta *self) { +dtypemeta_get_legacy(PyArray_DTypeMeta *self, void *NPY_UNUSED(ignored)) { return PyBool_FromLong(NPY_DT_is_legacy(self)); } static PyObject * -dtypemeta_get_parametric(PyArray_DTypeMeta *self) { +dtypemeta_get_parametric(PyArray_DTypeMeta *self, void *NPY_UNUSED(ignored)) { return PyBool_FromLong(NPY_DT_is_parametric(self)); } static PyObject * -dtypemeta_get_is_numeric(PyArray_DTypeMeta *self) { +dtypemeta_get_is_numeric(PyArray_DTypeMeta *self, void *NPY_UNUSED(ignored)) { return PyBool_FromLong(NPY_DT_is_numeric(self)); } From f055fb97531bd89ab0d97f3a5a9f6a11dab4b6ec Mon Sep 17 00:00:00 2001 From: Peter Hawkins Date: Tue, 29 Oct 2024 18:53:12 +0000 Subject: [PATCH 078/101] BUG: Fix a reference count leak in npy_find_descr_for_scalar. The reference count for common->singleton is incremented twice, when it should only be incremented once. This leak was found when running Google's tests with NumPy 2.1.2, and appears to be a new leak as of NumPy 2.1, probably introduced in https://github.com/numpy/numpy/commit/1cb40445aaf63224b458601c1fff9a4e74b44eda. In particular, this test: https://github.com/protocolbuffers/protobuf/blob/6cb71402940c6645e49959dfc915f16f4d2e6c20/python/google/protobuf/internal/numpy/numpy_test.py#L67 runs in Py_DEBUG mode and verifies that the total reference count before and after various test cases is unchanged. The same test case has found other NumPy reference count leaks in the past and it may be sensible to add something similar to NumPy's own test suite. --- numpy/_core/src/multiarray/abstractdtypes.c | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/_core/src/multiarray/abstractdtypes.c b/numpy/_core/src/multiarray/abstractdtypes.c index 21360a16c1b6..ae7a8ec1506c 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.c +++ b/numpy/_core/src/multiarray/abstractdtypes.c @@ -476,7 +476,6 @@ npy_find_descr_for_scalar( /* If the DType doesn't know the scalar type, guess at default. */ !NPY_DT_CALL_is_known_scalar_type(common, Py_TYPE(scalar))) { if (common->singleton != NULL) { - Py_INCREF(common->singleton); res = common->singleton; Py_INCREF(res); } From a121864b22b6fe5da2d2ae8924b58a31d1b67bf0 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 24 Oct 2024 15:43:23 -0600 Subject: [PATCH 079/101] BUG: fixes for StringDType/unicode promoters --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 140 ++++++++++++------- numpy/_core/tests/test_stringdtype.py | 50 +++++++ 2 files changed, 140 insertions(+), 50 deletions(-) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index ed9f62077589..6187cb9fce68 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -2595,10 +2595,17 @@ init_stringdtype_ufuncs(PyObject *umath) "find", "rfind", "index", "rindex", "count", }; - PyArray_DTypeMeta *findlike_promoter_dtypes[] = { - &PyArray_StringDType, &PyArray_UnicodeDType, - &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, - &PyArray_DefaultIntDType, + PyArray_DTypeMeta *findlike_promoter_dtypes[2][5] = { + { + &PyArray_StringDType, &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, + &PyArray_IntAbstractDType, + }, + { + &PyArray_UnicodeDType, &PyArray_StringDType, + &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, + &PyArray_IntAbstractDType, + }, }; find_like_function *findlike_functions[] = { @@ -2618,11 +2625,12 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - - if (add_promoter(umath, findlike_names[i], - findlike_promoter_dtypes, - 5, string_findlike_promoter) < 0) { - return -1; + for (int j=0; j<2; j++) { + if (add_promoter(umath, findlike_names[i], + findlike_promoter_dtypes[j], + 5, string_findlike_promoter) < 0) { + return -1; + } } } @@ -2636,10 +2644,17 @@ init_stringdtype_ufuncs(PyObject *umath) "startswith", "endswith", }; - PyArray_DTypeMeta *startswith_endswith_promoter_dtypes[] = { - &PyArray_StringDType, &PyArray_UnicodeDType, - &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, - &PyArray_BoolDType, + PyArray_DTypeMeta *startswith_endswith_promoter_dtypes[2][5] = { + { + &PyArray_StringDType, &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, + &PyArray_BoolDType, + }, + { + &PyArray_UnicodeDType, &PyArray_StringDType, + &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, + &PyArray_BoolDType, + }, }; static STARTPOSITION startswith_endswith_startposition[] = { @@ -2656,11 +2671,12 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - - if (add_promoter(umath, startswith_endswith_names[i], - startswith_endswith_promoter_dtypes, - 5, string_startswith_endswith_promoter) < 0) { - return -1; + for (int j=0; j<2; j++) { + if (add_promoter(umath, startswith_endswith_names[i], + startswith_endswith_promoter_dtypes[j], + 5, string_startswith_endswith_promoter) < 0) { + return -1; + } } } @@ -2732,24 +2748,38 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - PyArray_DTypeMeta *replace_promoter_pyint_dtypes[] = { - &PyArray_StringDType, &PyArray_UnicodeDType, &PyArray_UnicodeDType, - &PyArray_IntAbstractDType, &PyArray_StringDType, - }; - - if (add_promoter(umath, "_replace", replace_promoter_pyint_dtypes, 5, - string_replace_promoter) < 0) { - return -1; - } - - PyArray_DTypeMeta *replace_promoter_int64_dtypes[] = { - &PyArray_StringDType, &PyArray_UnicodeDType, &PyArray_UnicodeDType, - &PyArray_Int64DType, &PyArray_StringDType, + PyArray_DTypeMeta *replace_promoter_unicode_dtypes[6][5] = { + { + &PyArray_StringDType, &PyArray_UnicodeDType, &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, + { + &PyArray_UnicodeDType, &PyArray_StringDType, &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, + { + &PyArray_UnicodeDType, &PyArray_UnicodeDType, &PyArray_StringDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, + { + &PyArray_StringDType, &PyArray_StringDType, &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, + { + &PyArray_StringDType, &PyArray_UnicodeDType, &PyArray_StringDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, + { + &PyArray_UnicodeDType, &PyArray_StringDType, &PyArray_StringDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, }; - if (add_promoter(umath, "_replace", replace_promoter_int64_dtypes, 5, - string_replace_promoter) < 0) { - return -1; + for (int j=0; j<6; j++) { + if (add_promoter(umath, "_replace", replace_promoter_unicode_dtypes[j], 5, + string_replace_promoter) < 0) { + return -1; + } } PyArray_DTypeMeta *expandtabs_dtypes[] = { @@ -2767,9 +2797,9 @@ init_stringdtype_ufuncs(PyObject *umath) } PyArray_DTypeMeta *expandtabs_promoter_dtypes[] = { - &PyArray_StringDType, - (PyArray_DTypeMeta *)Py_None, - &PyArray_StringDType + &PyArray_StringDType, + &PyArray_IntAbstractDType, + &PyArray_StringDType }; if (add_promoter(umath, "_expandtabs", expandtabs_promoter_dtypes, @@ -2803,7 +2833,7 @@ init_stringdtype_ufuncs(PyObject *umath) PyArray_DTypeMeta *int_promoter_dtypes[] = { &PyArray_StringDType, - (PyArray_DTypeMeta *)Py_None, + &PyArray_IntAbstractDType, &PyArray_StringDType, &PyArray_StringDType, }; @@ -2814,17 +2844,27 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - PyArray_DTypeMeta *unicode_promoter_dtypes[] = { - &PyArray_StringDType, - (PyArray_DTypeMeta *)Py_None, - &PyArray_UnicodeDType, - &PyArray_StringDType, + PyArray_DTypeMeta *unicode_promoter_dtypes[2][4] = { + { + &PyArray_StringDType, + &PyArray_IntAbstractDType, + &PyArray_UnicodeDType, + &PyArray_StringDType, + }, + { + &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, + &PyArray_StringDType, + &PyArray_StringDType, + }, }; - if (add_promoter(umath, center_ljust_rjust_names[i], - unicode_promoter_dtypes, 4, - string_multiply_promoter) < 0) { - return -1; + for (int j=0; j<2; j++) { + if (add_promoter(umath, center_ljust_rjust_names[i], + unicode_promoter_dtypes[j], 4, + string_multiply_promoter) < 0) { + return -1; + } } } @@ -2840,13 +2880,13 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - PyArray_DTypeMeta *int_promoter_dtypes[] = { + PyArray_DTypeMeta *zfill_promoter_dtypes[] = { &PyArray_StringDType, - (PyArray_DTypeMeta *)Py_None, + &PyArray_IntAbstractDType, &PyArray_StringDType, }; - if (add_promoter(umath, "_zfill", int_promoter_dtypes, 3, + if (add_promoter(umath, "_zfill", zfill_promoter_dtypes, 3, string_multiply_promoter) < 0) { return -1; } diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 637a195ca696..7336c4e3f750 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -996,6 +996,56 @@ def test_ufunc_multiply(dtype, string_list, other, other_dtype, use_out): other * arr +def test_findlike_promoters(): + r = "Wally" + l = "Where's Wally?" + s = np.int32(3) + e = np.int8(13) + for dtypes in [("T", "U"), ("U", "T")]: + for function, answer in [ + (np.strings.index, 8), + (np.strings.endswith, True), + ]: + assert answer == function( + np.array(l, dtype=dtypes[0]), np.array(r, dtype=dtypes[1]), s, e + ) + + +def test_strip_promoter(): + arg = "Hello!!!!" + strip_char = "!" + answer = "Hello" + for dtypes in [("T", "U"), ("U", "T")]: + assert answer == np.strings.strip( + np.array(arg, dtype=dtypes[0]), np.array(strip_char, dtype=dtypes[1]) + ) + + +def test_replace_promoter(): + arg = ["Hello, planet!", "planet, Hello!"] + old = "planet" + new = "world" + answer = ["Hello, world!", "world, Hello!"] + for dtypes in itertools.product("TU", repeat=3): + if dtypes == ("U", "U", "U"): + continue + answer_arr = np.strings.replace( + np.array(arg, dtype=dtypes[0]), + np.array(old, dtype=dtypes[1]), + np.array(new, dtype=dtypes[2]), + ) + assert_array_equal(answer_arr, answer) + + +def test_center_promoter(): + arg = "Hello, planet!" + fillchar = "/" + for dtypes in [("T", "U"), ("U", "T")]: + assert "/Hello, planet!/" == np.strings.center( + np.array(arg, dtype=dtypes[0]), 16, np.array(fillchar, dtype=dtypes[1]) + ) + + DATETIME_INPUT = [ np.datetime64("1923-04-14T12:43:12"), np.datetime64("1994-06-21T14:43:15"), From a90fe7cc6e52be410321b5fffe4e85e76acedb0a Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 25 Oct 2024 15:00:05 -0600 Subject: [PATCH 080/101] BUG: fix more issues with string ufunc promotion --- numpy/_core/strings.py | 138 ++++++++++++++++++-------- numpy/_core/tests/test_stringdtype.py | 20 ++-- 2 files changed, 107 insertions(+), 51 deletions(-) diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 0820411840ea..6d6574bc8d6b 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -669,20 +669,29 @@ def center(a, width, fillchar=' '): array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype=' Date: Sat, 26 Oct 2024 13:24:41 -0600 Subject: [PATCH 081/101] BUG: substantially simplify and fix issue with justification promoter --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 41 +++++--- numpy/_core/strings.py | 104 ++++++++----------- 2 files changed, 67 insertions(+), 78 deletions(-) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index 6187cb9fce68..8e25b3968cfe 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1598,6 +1598,20 @@ string_expandtabs_strided_loop(PyArrayMethod_Context *context, return -1; } +static int +string_center_ljust_rjust_promoter( + PyObject *NPY_UNUSED(ufunc), + PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], + PyArray_DTypeMeta *new_op_dtypes[]) +{ + new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_StringDType); + new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_Int64DType); + new_op_dtypes[2] = NPY_DT_NewRef(&PyArray_StringDType); + new_op_dtypes[3] = NPY_DT_NewRef(&PyArray_StringDType); + return 0; +} + static NPY_CASTING center_ljust_rjust_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), @@ -2831,20 +2845,13 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - PyArray_DTypeMeta *int_promoter_dtypes[] = { - &PyArray_StringDType, - &PyArray_IntAbstractDType, - &PyArray_StringDType, - &PyArray_StringDType, - }; - - if (add_promoter(umath, center_ljust_rjust_names[i], - int_promoter_dtypes, 4, - string_multiply_promoter) < 0) { - return -1; - } - - PyArray_DTypeMeta *unicode_promoter_dtypes[2][4] = { + PyArray_DTypeMeta *promoter_dtypes[3][4] = { + { + &PyArray_StringDType, + &PyArray_IntAbstractDType, + &PyArray_StringDType, + &PyArray_StringDType, + }, { &PyArray_StringDType, &PyArray_IntAbstractDType, @@ -2859,10 +2866,10 @@ init_stringdtype_ufuncs(PyObject *umath) }, }; - for (int j=0; j<2; j++) { + for (int j=0; j<3; j++) { if (add_promoter(umath, center_ljust_rjust_names[i], - unicode_promoter_dtypes[j], 4, - string_multiply_promoter) < 0) { + promoter_dtypes[j], 4, + string_center_ljust_rjust_promoter) < 0) { return -1; } } diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 6d6574bc8d6b..4f732fdcfdbc 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -670,28 +670,26 @@ def center(a, width, fillchar=' '): """ width = np.asanyarray(width) + if not np.issubdtype(width.dtype, np.integer): raise TypeError(f"unsupported type {width.dtype} for operand 'width'") a = np.asanyarray(a) fillchar = np.asanyarray(fillchar) - try_out_dt = np.result_type(a, fillchar) - if try_out_dt.char == "T": - a = a.astype(try_out_dt, copy=False) - fillchar = fillchar.astype(try_out_dt, copy=False) - out = None - else: - fillchar = fillchar.astype(a.dtype, copy=False) - width = np.maximum(str_len(a), width) - out_dtype = f"{a.dtype.char}{width.max()}" - shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape) - out = np.empty_like(a, shape=shape, dtype=out_dtype) - if np.any(str_len(fillchar) != 1): raise TypeError( "The fill character must be exactly one character long") + if np.result_type(a, fillchar).char == "T": + return _center(a, width, fillchar) + + fillchar = fillchar.astype(a.dtype, copy=False) + width = np.maximum(str_len(a), width) + out_dtype = f"{a.dtype.char}{width.max()}" + shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape) + out = np.empty_like(a, shape=shape, dtype=out_dtype) + return _center(a, width, fillchar, out=out) @@ -742,22 +740,19 @@ def ljust(a, width, fillchar=' '): a = np.asanyarray(a) fillchar = np.asanyarray(fillchar) - try_out_dt = np.result_type(a, fillchar) - if try_out_dt.char == "T": - a = a.astype(try_out_dt, copy=False) - fillchar = fillchar.astype(try_out_dt, copy=False) - out = None - else: - fillchar = fillchar.astype(a.dtype, copy=False) - width = np.maximum(str_len(a), width) - shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape) - out_dtype = f"{a.dtype.char}{width.max()}" - out = np.empty_like(a, shape=shape, dtype=out_dtype) - if np.any(str_len(fillchar) != 1): raise TypeError( "The fill character must be exactly one character long") + if np.result_type(a, fillchar).char == "T": + return _ljust(a, width, fillchar) + + fillchar = fillchar.astype(a.dtype, copy=False) + width = np.maximum(str_len(a), width) + shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape) + out_dtype = f"{a.dtype.char}{width.max()}" + out = np.empty_like(a, shape=shape, dtype=out_dtype) + return _ljust(a, width, fillchar, out=out) @@ -808,22 +803,19 @@ def rjust(a, width, fillchar=' '): a = np.asanyarray(a) fillchar = np.asanyarray(fillchar) - try_out_dt = np.result_type(a, fillchar) - if try_out_dt.char == "T": - a = a.astype(try_out_dt, copy=False) - fillchar = fillchar.astype(try_out_dt, copy=False) - out = None - else: - fillchar = fillchar.astype(a.dtype, copy=False) - width = np.maximum(str_len(a), width) - shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape) - out_dtype = f"{a.dtype.char}{width.max()}" - out = np.empty_like(a, shape=shape, dtype=out_dtype) - if np.any(str_len(fillchar) != 1): raise TypeError( "The fill character must be exactly one character long") + if np.result_type(a, fillchar).char == "T": + return _rjust(a, width, fillchar) + + fillchar = fillchar.astype(a.dtype, copy=False) + width = np.maximum(str_len(a), width) + shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape) + out_dtype = f"{a.dtype.char}{width.max()}" + out = np.empty_like(a, shape=shape, dtype=out_dtype) + return _rjust(a, width, fillchar, out=out) @@ -1246,23 +1238,19 @@ def replace(a, old, new, count=-1): new_dtype = getattr(new, 'dtype', None) new = np.asanyarray(new) - try_out_dt = np.result_type(arr, old, new) - if try_out_dt.char == "T": - arr = a.astype(try_out_dt, copy=False) - old = old.astype(try_out_dt, copy=False) - new = new.astype(try_out_dt, copy=False) - counts = count - out = None - else: - a_dt = arr.dtype - old = old.astype(old_dtype if old_dtype else a_dt, copy=False) - new = new.astype(new_dtype if new_dtype else a_dt, copy=False) - max_int64 = np.iinfo(np.int64).max - counts = _count_ufunc(arr, old, 0, max_int64) - counts = np.where(count < 0, counts, np.minimum(counts, count)) - buffersizes = str_len(arr) + counts * (str_len(new) - str_len(old)) - out_dtype = f"{arr.dtype.char}{buffersizes.max()}" - out = np.empty_like(arr, shape=buffersizes.shape, dtype=out_dtype) + if np.result_type(arr, old, new).char == "T": + return _replace(arr, old, new, count) + + a_dt = arr.dtype + old = old.astype(old_dtype if old_dtype else a_dt, copy=False) + new = new.astype(new_dtype if new_dtype else a_dt, copy=False) + max_int64 = np.iinfo(np.int64).max + counts = _count_ufunc(arr, old, 0, max_int64) + counts = np.where(count < 0, counts, np.minimum(counts, count)) + buffersizes = str_len(arr) + counts * (str_len(new) - str_len(old)) + out_dtype = f"{arr.dtype.char}{buffersizes.max()}" + out = np.empty_like(arr, shape=buffersizes.shape, dtype=out_dtype) + return _replace(arr, old, new, counts, out=out) @@ -1465,10 +1453,7 @@ def partition(a, sep): a = np.asanyarray(a) sep = np.asanyarray(sep) - try_out_dt = np.result_type(a, sep) - if try_out_dt.char == "T": - a = a.astype(try_out_dt, copy=False) - sep = sep.astype(try_out_dt, copy=False) + if np.result_type(a, sep).char == "T": return _partition(a, sep) sep = sep.astype(a.dtype, copy=False) @@ -1535,10 +1520,7 @@ def rpartition(a, sep): a = np.asanyarray(a) sep = np.asanyarray(sep) - try_out_dt = np.result_type(a, sep) - if try_out_dt.char == "T": - a = a.astype(try_out_dt, copy=False) - sep = sep.astype(try_out_dt, copy=False) + if np.result_type(a, sep).char == "T": return _rpartition(a, sep) sep = sep.astype(a.dtype, copy=False) From e6b02d7ba6d8a34b169113f9874d015ef875d67e Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Sat, 26 Oct 2024 13:31:21 -0600 Subject: [PATCH 082/101] DOC: add release note --- doc/release/upcoming_changes/27636.improvement.rst | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 doc/release/upcoming_changes/27636.improvement.rst diff --git a/doc/release/upcoming_changes/27636.improvement.rst b/doc/release/upcoming_changes/27636.improvement.rst new file mode 100644 index 000000000000..53c202b31197 --- /dev/null +++ b/doc/release/upcoming_changes/27636.improvement.rst @@ -0,0 +1,3 @@ +* Fixed a number of issues around promotion for string ufuncs with StringDType + arguments. Mixing StringDType and the fixed-width DTypes using the string + ufuncs should now generate much more uniform results. From cbda85b3551910a8e0d80b89c727b62d66aec20f Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 30 Oct 2024 21:25:06 -0600 Subject: [PATCH 083/101] REL: Prepare for the NumPy 2.1.3 release [wheel build] - Create 2.1.3-changelog.rst - Update 2.1.3-notes.rst - Delete release fragments --- doc/changelog/2.1.3-changelog.rst | 49 +++++++++++++++ doc/release/upcoming_changes/26766.change.rst | 2 - .../upcoming_changes/27636.improvement.rst | 3 - doc/source/release/2.1.3-notes.rst | 63 +++++++++++++++++++ 4 files changed, 112 insertions(+), 5 deletions(-) create mode 100644 doc/changelog/2.1.3-changelog.rst delete mode 100644 doc/release/upcoming_changes/26766.change.rst delete mode 100644 doc/release/upcoming_changes/27636.improvement.rst diff --git a/doc/changelog/2.1.3-changelog.rst b/doc/changelog/2.1.3-changelog.rst new file mode 100644 index 000000000000..073bd002e7ca --- /dev/null +++ b/doc/changelog/2.1.3-changelog.rst @@ -0,0 +1,49 @@ + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Abhishek Kumar + +* Austin + +* Benjamin A. Beasley + +* Charles Harris +* Christian Lorentzen +* Marcel Telka + +* Matti Picus +* Michael Davidsaver + +* Nathan Goldbaum +* Peter Hawkins +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* dependabot[bot] +* kp2pml30 + + +Pull requests merged +==================== + +A total of 21 pull requests were merged for this release. + +* `#27512 `__: MAINT: prepare 2.1.x for further development +* `#27537 `__: MAINT: Bump actions/cache from 4.0.2 to 4.1.1 +* `#27538 `__: MAINT: Bump pypa/cibuildwheel from 2.21.2 to 2.21.3 +* `#27539 `__: MAINT: MSVC does not support #warning directive +* `#27543 `__: BUG: Fix user dtype can-cast with python scalar during promotion +* `#27561 `__: DEV: bump ``python`` to 3.12 in environment.yml +* `#27562 `__: BLD: update vendored Meson to 1.5.2 +* `#27563 `__: BUG: weighted quantile for some zero weights (#27549) +* `#27565 `__: MAINT: Use miniforge for macos conda test. +* `#27566 `__: BUILD: satisfy gcc-13 pendantic errors +* `#27569 `__: BUG: handle possible error for PyTraceMallocTrack +* `#27570 `__: BLD: start building Windows free-threaded wheels [wheel build] +* `#27571 `__: BUILD: vendor tempita from Cython +* `#27574 `__: BUG: Fix warning "differs in levels of indirection" in npy_atomic.h... +* `#27592 `__: MAINT: Update Highway to latest +* `#27593 `__: BUG: Adjust numpy.i for SWIG 4.3 compatibility +* `#27616 `__: BUG: Fix Linux QEMU CI workflow +* `#27668 `__: BLD: Do not set __STDC_VERSION__ to zero during build +* `#27669 `__: ENH: fix wasm32 runtime type error in numpy._core +* `#27672 `__: BUG: Fix a reference count leak in npy_find_descr_for_scalar. +* `#27673 `__: BUG: fixes for StringDType/unicode promoters diff --git a/doc/release/upcoming_changes/26766.change.rst b/doc/release/upcoming_changes/26766.change.rst deleted file mode 100644 index f9223a1d1114..000000000000 --- a/doc/release/upcoming_changes/26766.change.rst +++ /dev/null @@ -1,2 +0,0 @@ -* `numpy.fix` now won't perform casting to a floating data-type for integer - and boolean data-type input arrays. diff --git a/doc/release/upcoming_changes/27636.improvement.rst b/doc/release/upcoming_changes/27636.improvement.rst deleted file mode 100644 index 53c202b31197..000000000000 --- a/doc/release/upcoming_changes/27636.improvement.rst +++ /dev/null @@ -1,3 +0,0 @@ -* Fixed a number of issues around promotion for string ufuncs with StringDType - arguments. Mixing StringDType and the fixed-width DTypes using the string - ufuncs should now generate much more uniform results. diff --git a/doc/source/release/2.1.3-notes.rst b/doc/source/release/2.1.3-notes.rst index 8e1b7e4d6da0..cd797e0062a0 100644 --- a/doc/source/release/2.1.3-notes.rst +++ b/doc/source/release/2.1.3-notes.rst @@ -9,10 +9,73 @@ discovered after the 2.1.2 release. The Python versions supported by this release are 3.10-3.13. + +Improvements +============ + +* Fixed a number of issues around promotion for string ufuncs with StringDType + arguments. Mixing StringDType and the fixed-width DTypes using the string + ufuncs should now generate much more uniform results. + + (`gh-27636 `__) + + +Changes +======= + +* `numpy.fix` now won't perform casting to a floating data-type for integer + and boolean data-type input arrays. + + (`gh-26766 `__) + + Contributors ============ +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Abhishek Kumar + +* Austin + +* Benjamin A. Beasley + +* Charles Harris +* Christian Lorentzen +* Marcel Telka + +* Matti Picus +* Michael Davidsaver + +* Nathan Goldbaum +* Peter Hawkins +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* dependabot[bot] +* kp2pml30 + + Pull requests merged ==================== +A total of 21 pull requests were merged for this release. + +* `#27512 `__: MAINT: prepare 2.1.x for further development +* `#27537 `__: MAINT: Bump actions/cache from 4.0.2 to 4.1.1 +* `#27538 `__: MAINT: Bump pypa/cibuildwheel from 2.21.2 to 2.21.3 +* `#27539 `__: MAINT: MSVC does not support #warning directive +* `#27543 `__: BUG: Fix user dtype can-cast with python scalar during promotion +* `#27561 `__: DEV: bump ``python`` to 3.12 in environment.yml +* `#27562 `__: BLD: update vendored Meson to 1.5.2 +* `#27563 `__: BUG: weighted quantile for some zero weights (#27549) +* `#27565 `__: MAINT: Use miniforge for macos conda test. +* `#27566 `__: BUILD: satisfy gcc-13 pendantic errors +* `#27569 `__: BUG: handle possible error for PyTraceMallocTrack +* `#27570 `__: BLD: start building Windows free-threaded wheels [wheel build] +* `#27571 `__: BUILD: vendor tempita from Cython +* `#27574 `__: BUG: Fix warning "differs in levels of indirection" in npy_atomic.h... +* `#27592 `__: MAINT: Update Highway to latest +* `#27593 `__: BUG: Adjust numpy.i for SWIG 4.3 compatibility +* `#27616 `__: BUG: Fix Linux QEMU CI workflow +* `#27668 `__: BLD: Do not set __STDC_VERSION__ to zero during build +* `#27669 `__: ENH: fix wasm32 runtime type error in numpy._core +* `#27672 `__: BUG: Fix a reference count leak in npy_find_descr_for_scalar. +* `#27673 `__: BUG: fixes for StringDType/unicode promoters + From 20df445936ca44eeea3cdf0440f59e896a8631d3 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 2 Nov 2024 12:09:40 -0600 Subject: [PATCH 084/101] MAINT: prepare 2.1.x for further development - Create 2.1.4-notes.rst - Update release.rst - Update pavement.py - Update pyproject.toml --- doc/source/release.rst | 1 + doc/source/release/2.1.4-notes.rst | 23 +++++++++++++++++++++++ pavement.py | 2 +- pyproject.toml | 2 +- 4 files changed, 26 insertions(+), 2 deletions(-) create mode 100644 doc/source/release/2.1.4-notes.rst diff --git a/doc/source/release.rst b/doc/source/release.rst index c990b7ab8076..5f3e184b9478 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release notes .. toctree:: :maxdepth: 2 + 2.1.4 2.1.3 2.1.2 2.1.1 diff --git a/doc/source/release/2.1.4-notes.rst b/doc/source/release/2.1.4-notes.rst new file mode 100644 index 000000000000..4e9d9f1a03aa --- /dev/null +++ b/doc/source/release/2.1.4-notes.rst @@ -0,0 +1,23 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.1.4 Release Notes +========================== + +NumPy 2.1.4 is a maintenance release that fixes bugs and regressions +discovered after the 2.1.3 release. + +The Python versions supported by this release are 3.10-3.13. + +Highlights +========== + +*We'll choose highlights for this release near the end of the release cycle.* + + +.. if release snippets have been incorporated already, uncomment the follow + line (leave the `.. include:: directive) + +.. **Content from release note snippets in doc/release/upcoming_changes:** + +.. include:: notes-towncrier.rst diff --git a/pavement.py b/pavement.py index 4149f571ef28..3f59655a6a8a 100644 --- a/pavement.py +++ b/pavement.py @@ -38,7 +38,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.1.3-notes.rst' +RELEASE_NOTES = 'doc/source/release/2.1.4-notes.rst' #------------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index b5782a7e4258..1d396005aac2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.1.3" +version = "2.1.4" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} From 677a6591bf0d8f769bff125b43f1baaf0456109b Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 7 Nov 2024 09:18:42 -0700 Subject: [PATCH 085/101] BUG: fix incorrect output descriptor in fancy indexing --- numpy/_core/src/multiarray/mapping.c | 2 +- numpy/_core/tests/test_stringdtype.py | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 4a6c1f093769..e5a2a2de7fba 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -1667,7 +1667,7 @@ array_subscript(PyArrayObject *self, PyObject *op) if (PyArray_GetDTypeTransferFunction(1, itemsize, itemsize, - PyArray_DESCR(self), PyArray_DESCR(self), + PyArray_DESCR(self), PyArray_DESCR(mit->extra_op), 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { goto finish; } diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index f087802e310b..e2290abdf1f6 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -496,14 +496,15 @@ def test_fancy_indexing(string_list): assert_array_equal(sarr, sarr[np.arange(sarr.shape[0])]) # see gh-27003 and gh-27053 - for ind in [[True, True], [0, 1], ...]: - for lop in [['a'*16, 'b'*16], ['', '']]: + for ind in [[True, True], [0, 1], ..., np.array([0, 1], dtype='uint8')]: + for lop in [['a'*25, 'b'*25], ['', '']]: a = np.array(lop, dtype="T") - rop = ['d'*16, 'e'*16] + assert_array_equal(a[ind], a) + rop = ['d'*25, 'e'*25] for b in [rop, np.array(rop, dtype="T")]: a[ind] = b assert_array_equal(a, b) - assert a[0] == 'd'*16 + assert a[0] == 'd'*25 def test_creation_functions(): From 8b078eee7c29211a10a69081d57d44f78ef3efb0 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 13 Nov 2024 08:20:37 -0700 Subject: [PATCH 086/101] TST: add segfaulting test --- numpy/_core/tests/test_stringdtype.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index e2290abdf1f6..ce4d70b624ae 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -495,9 +495,23 @@ def test_fancy_indexing(string_list): sarr = np.array(string_list, dtype="T") assert_array_equal(sarr, sarr[np.arange(sarr.shape[0])]) + inds = [ + [True, True], + [0, 1], + ..., + np.array([0, 1], dtype='uint8'), + ] + + lops = [ + ['a'*25, 'b'*25], + ['', ''], + ['hello', 'world'], + ['hello', 'world'*25], + ] + # see gh-27003 and gh-27053 - for ind in [[True, True], [0, 1], ..., np.array([0, 1], dtype='uint8')]: - for lop in [['a'*25, 'b'*25], ['', '']]: + for ind in inds: + for lop in lops: a = np.array(lop, dtype="T") assert_array_equal(a[ind], a) rop = ['d'*25, 'e'*25] From c35627f627469272c2dde74e54bb9359003d1650 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 13 Nov 2024 16:26:53 +0100 Subject: [PATCH 087/101] BUG: Ensure nditer always adds necessary casts (and tiny simplification) --- numpy/_core/src/multiarray/mapping.c | 10 ++++++---- numpy/_core/src/multiarray/nditer_constr.c | 6 ++++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index e5a2a2de7fba..62204ce762f8 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -2034,7 +2034,6 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) goto fail; } - int allocated_array = 0; if (tmp_arr == NULL) { /* Fill extra op, need to swap first */ tmp_arr = mit->extra_op; @@ -2048,7 +2047,11 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) if (PyArray_CopyObject(tmp_arr, op) < 0) { goto fail; } - allocated_array = 1; + /* + * In this branch we copy directly from a newly allocated array which + * may have a new descr: + */ + descr = PyArray_DESCR(tmp_arr); } if (PyArray_MapIterCheckIndices(mit) < 0) { @@ -2096,8 +2099,7 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) // for non-REFCHK user DTypes. See gh-27057 for the prior discussion about this. if (PyArray_GetDTypeTransferFunction( 1, itemsize, itemsize, - allocated_array ? PyArray_DESCR(mit->extra_op) : PyArray_DESCR(self), - PyArray_DESCR(self), + descr, PyArray_DESCR(self), 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { goto fail; } diff --git a/numpy/_core/src/multiarray/nditer_constr.c b/numpy/_core/src/multiarray/nditer_constr.c index 427dd3d876bc..ab1a540cb283 100644 --- a/numpy/_core/src/multiarray/nditer_constr.c +++ b/numpy/_core/src/multiarray/nditer_constr.c @@ -1315,8 +1315,10 @@ npyiter_check_casting(int nop, PyArrayObject **op, printf("\n"); #endif /* If the types aren't equivalent, a cast is necessary */ - if (op[iop] != NULL && !PyArray_EquivTypes(PyArray_DESCR(op[iop]), - op_dtype[iop])) { + npy_intp view_offset = NPY_MIN_INTP; + if (op[iop] != NULL && !(PyArray_SafeCast( + PyArray_DESCR(op[iop]), op_dtype[iop], &view_offset, + NPY_NO_CASTING, 1) && view_offset == 0)) { /* Check read (op -> temp) casting */ if ((op_itflags[iop] & NPY_OP_ITFLAG_READ) && !PyArray_CanCastArrayTo(op[iop], From dc345a142df02e1d1537283cec6a88ebfe2729f4 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 14 Nov 2024 23:07:53 +0100 Subject: [PATCH 088/101] TYP: Fix incorrect baseclass of ``linalg.LinAlgError`` --- numpy/linalg/__init__.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/linalg/__init__.pyi b/numpy/linalg/__init__.pyi index 51f5f9ad4be1..8dc662a7303c 100644 --- a/numpy/linalg/__init__.pyi +++ b/numpy/linalg/__init__.pyi @@ -41,4 +41,4 @@ from numpy._pytesttester import PytestTester __all__: list[str] test: PytestTester -class LinAlgError(Exception): ... +class LinAlgError(ValueError): ... From 9ab12dcbce38a06f16e54c82f00baef4e5e9f7dc Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Mon, 4 Nov 2024 01:11:20 +0000 Subject: [PATCH 089/101] TST: Multiple modules in single pyf for gh-27622 --- .../upcoming_changes/27695.improvement.rst | 5 +++++ numpy/f2py/tests/src/regression/datonly.f90 | 17 +++++++++++++++++ numpy/f2py/tests/test_regression.py | 12 ++++++++++++ 3 files changed, 34 insertions(+) create mode 100644 doc/release/upcoming_changes/27695.improvement.rst create mode 100644 numpy/f2py/tests/src/regression/datonly.f90 diff --git a/doc/release/upcoming_changes/27695.improvement.rst b/doc/release/upcoming_changes/27695.improvement.rst new file mode 100644 index 000000000000..95584b6e90ce --- /dev/null +++ b/doc/release/upcoming_changes/27695.improvement.rst @@ -0,0 +1,5 @@ +``f2py`` handles multiple modules and exposes variables again +------------------------------------------------------------- +A regression has been fixed which allows F2PY users to expose variables to +Python in modules with only assignments, and also fixes situations where +multiple modules are present within a single source file. diff --git a/numpy/f2py/tests/src/regression/datonly.f90 b/numpy/f2py/tests/src/regression/datonly.f90 new file mode 100644 index 000000000000..67fc4aca82e3 --- /dev/null +++ b/numpy/f2py/tests/src/regression/datonly.f90 @@ -0,0 +1,17 @@ +module datonly + implicit none + integer, parameter :: max_value = 100 + real, dimension(:), allocatable :: data_array +end module datonly + +module dat + implicit none + integer, parameter :: max_= 1009 +end module dat + +subroutine simple_subroutine(ain, aout) + use dat, only: max_ + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + max_ +end subroutine simple_subroutine diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index e11ed1a0efa3..cbc81508ae42 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -24,6 +24,18 @@ def test_inout(self): assert np.allclose(x, [3, 1, 2]) +class TestDataOnlyMultiModule(util.F2PyTest): + # Check that modules without subroutines work + sources = [util.getpath("tests", "src", "regression", "datonly.f90")] + + @pytest.mark.slow + def test_mdat(self): + assert self.module.datonly.max_value == 100 + assert self.module.dat.max_ == 1009 + int_in = 5 + assert self.module.simple_subroutine(5) == 1014 + + class TestNegativeBounds(util.F2PyTest): # Check that negative bounds work correctly sources = [util.getpath("tests", "src", "negative_bounds", "issue_20853.f90")] From 690e663a1d0a526bb4b2b08b3ed9d1ee171c97f2 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Mon, 4 Nov 2024 01:45:13 +0000 Subject: [PATCH 090/101] BUG: Handle multi-module files and common better Fixes gh-25186 gh-25337 gh-27622 --- numpy/f2py/auxfuncs.py | 2 +- numpy/f2py/f90mod_rules.py | 8 +++----- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 88a9ff552343..77cf6ee2b167 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -44,7 +44,7 @@ 'isunsigned_long_long', 'isunsigned_long_longarray', 'isunsigned_short', 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', 'replace', 'show', 'stripcomma', 'throw_error', 'isattr_value', 'getuseblocks', - 'process_f2cmap_dict' + 'process_f2cmap_dict', 'containscommon' ] diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py index 9c52938f08da..b1cd15320657 100644 --- a/numpy/f2py/f90mod_rules.py +++ b/numpy/f2py/f90mod_rules.py @@ -97,9 +97,6 @@ def dadd(line, s=doc): usenames = getuseblocks(pymod) for m in findf90modules(pymod): - contains_functions_or_subroutines = any( - item for item in m["body"] if item["block"] in ["function", "subroutine"] - ) sargs, fargs, efargs, modobjs, notvars, onlyvars = [], [], [], [], [ m['name']], [] sargsp = [] @@ -120,8 +117,9 @@ def dadd(line, s=doc): outmess(f"\t\t\tSkipping {m['name']} since there are no public vars/func in this module...\n") continue - if m['name'] in usenames and not contains_functions_or_subroutines: - outmess(f"\t\t\tSkipping {m['name']} since it is in 'use'...\n") + # gh-25186 + if m['name'] in usenames and containscommon(m): + outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a common block...\n") continue if onlyvars: outmess('\t\t Variables: %s\n' % (' '.join(onlyvars))) From e0fa5f3a3726c94a0be872c78ae7cd381db46eff Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 18 Nov 2024 17:53:08 +0100 Subject: [PATCH 091/101] BUG: Fix repeat for strings and accumulate API logic (#27773) Backport of gh-27773 but not including fixes to allow strings in accumulation. This fixes threethings: 1. `needs_api` was wrongly overwritten, this was just a line not deleted earlier. 2. `repeat` was just broken with string dtype... I guess there was no actual test. 3. `repeat` internals passed on `cast_info` not by reference. I guess that isn't a bug, but it's weird. --- numpy/_core/src/multiarray/item_selection.c | 41 ++++++++++++--------- numpy/_core/src/umath/ufunc_object.c | 2 - numpy/_core/tests/test_stringdtype.py | 11 ++++++ 3 files changed, 34 insertions(+), 20 deletions(-) diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 4d98ce0c350c..f3ce35f3092f 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -785,21 +785,21 @@ static NPY_GCC_OPT_3 inline int npy_fastrepeat_impl( npy_intp n_outer, npy_intp n, npy_intp nel, npy_intp chunk, npy_bool broadcast, npy_intp* counts, char* new_data, char* old_data, - npy_intp elsize, NPY_cast_info cast_info, int needs_refcounting) + npy_intp elsize, NPY_cast_info *cast_info, int needs_custom_copy) { npy_intp i, j, k; for (i = 0; i < n_outer; i++) { for (j = 0; j < n; j++) { npy_intp tmp = broadcast ? counts[0] : counts[j]; for (k = 0; k < tmp; k++) { - if (!needs_refcounting) { + if (!needs_custom_copy) { memcpy(new_data, old_data, chunk); } else { char *data[2] = {old_data, new_data}; npy_intp strides[2] = {elsize, elsize}; - if (cast_info.func(&cast_info.context, data, &nel, - strides, cast_info.auxdata) < 0) { + if (cast_info->func(&cast_info->context, data, &nel, + strides, cast_info->auxdata) < 0) { return -1; } } @@ -811,48 +811,53 @@ npy_fastrepeat_impl( return 0; } + +/* + * Helper to allow the compiler to specialize for all direct element copy + * cases (e.g. all numerical dtypes). + */ static NPY_GCC_OPT_3 int npy_fastrepeat( npy_intp n_outer, npy_intp n, npy_intp nel, npy_intp chunk, npy_bool broadcast, npy_intp* counts, char* new_data, char* old_data, - npy_intp elsize, NPY_cast_info cast_info, int needs_refcounting) + npy_intp elsize, NPY_cast_info *cast_info, int needs_custom_copy) { - if (!needs_refcounting) { + if (!needs_custom_copy) { if (chunk == 1) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } if (chunk == 2) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } if (chunk == 4) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } if (chunk == 8) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } if (chunk == 16) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } if (chunk == 32) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } } return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, elsize, - cast_info, needs_refcounting); + cast_info, needs_custom_copy); } @@ -872,7 +877,6 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) char *new_data, *old_data; NPY_cast_info cast_info; NPY_ARRAYMETHOD_FLAGS flags; - int needs_refcounting; repeats = (PyArrayObject *)PyArray_ContiguousFromAny(op, NPY_INTP, 0, 1); if (repeats == NULL) { @@ -897,7 +901,6 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) aop = (PyArrayObject *)ap; n = PyArray_DIM(aop, axis); NPY_cast_info_init(&cast_info); - needs_refcounting = PyDataType_REFCHK(PyArray_DESCR(aop)); if (!broadcast && PyArray_SIZE(repeats) != n) { PyErr_Format(PyExc_ValueError, @@ -947,16 +950,18 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) n_outer *= PyArray_DIMS(aop)[i]; } - if (needs_refcounting) { + int needs_custom_copy = 0; + if (PyDataType_REFCHK(PyArray_DESCR(ret))) { + needs_custom_copy = 1; if (PyArray_GetDTypeTransferFunction( - 1, elsize, elsize, PyArray_DESCR(aop), PyArray_DESCR(aop), 0, + 1, elsize, elsize, PyArray_DESCR(aop), PyArray_DESCR(ret), 0, &cast_info, &flags) < 0) { goto fail; } } if (npy_fastrepeat(n_outer, n, nel, chunk, broadcast, counts, new_data, - old_data, elsize, cast_info, needs_refcounting) < 0) { + old_data, elsize, &cast_info, needs_custom_copy) < 0) { goto fail; } diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 6bd02b0fec87..fa76455243dd 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -2916,8 +2916,6 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, NPY_UF_DBG_PRINT1("iterator loop count %d\n", (int)count); - needs_api = PyDataType_REFCHK(descrs[0]); - if (!needs_api) { NPY_BEGIN_THREADS_THRESHOLDED(count); } diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index ce4d70b624ae..8b3bfc26d709 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1574,6 +1574,17 @@ def test_unset_na_coercion(): arr == op +def test_repeat(string_array): + res = string_array.repeat(1000) + # Create an empty array with expanded dimension, and fill it. Then, + # reshape it to the expected result. + expected = np.empty_like(string_array, shape=string_array.shape + (1000,)) + expected[...] = string_array[:, np.newaxis] + expected = expected.reshape(-1) + + assert_array_equal(res, expected, strict=True) + + class TestImplementation: """Check that strings are stored in the arena when possible. From f78a1c9084d9cc3875ceb71d12b4b05f84da8f3e Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 18 Nov 2024 13:31:08 -0700 Subject: [PATCH 092/101] MAINT: Update MyPy tests and requirements from main. --- .github/workflows/mypy.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 726e6b839051..f93587076493 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -47,14 +47,14 @@ jobs: matrix: os_python: - [ubuntu-latest, '3.12'] - - [windows-2019, '3.11'] - - [macos-12, '3.10'] + - [windows-latest, '3.11'] + - [macos-latest, '3.10'] steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: ${{ matrix.os_python[1] }} - name: Install dependencies From 785fcebcce0e124200795148cf08ddad7bfb1191 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 20 Nov 2024 14:47:59 +0100 Subject: [PATCH 093/101] BUG: Ensure context path is taken in masked array array-wrap The unary comparison "domains" can return masked (the others can't so there is a dissonance here that doesn't really add up). This adds a force-cast to boolean, since the domain result only makes sense as boolean, but the `masked` scalar is float64... Now, why did this change in NumPy 2? It didn't... Both before and after a `TypeError` was raised (incorrectly) and the `context` path was then skipped completely (the ufunc machinery has a try/except). What changed is that I removed `__array_prepare__` because it had basically no use. Turns out, that in this error path (and only there) it was important because the "context" path was not taken, but `__array_prepare__` called `__array_finalize__` so it also applied the mask. For unary ufuncs, it thus neatly (but very round-about) fills the bad logic here. (For non-unary ufuncs, the "domain" functions don't return masked arrays so the path cannot happen) A revier with a keen eye may notice that the old code returned a masked array rather than a the masked constant here. Note, however, that this is special the incorrectly taken no-context path, because the other path doesn't have the "return scalar" logic (which is probably also a bug, but who is counting). Closes gh-25635 --- numpy/ma/core.py | 6 +++++- numpy/ma/tests/test_core.py | 4 ++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 01eb8f9415a9..9e38bf1046d4 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -3153,7 +3153,11 @@ def __array_wrap__(self, obj, context=None, return_scalar=False): if domain is not None: # Take the domain, and make sure it's a ndarray with np.errstate(divide='ignore', invalid='ignore'): - d = filled(domain(*input_args), True) + # The result may be masked for two (unary) domains. + # That can't really be right as some domains drop + # the mask and some don't behaving differently here. + d = domain(*input_args).astype(bool, copy=False) + d = filled(d, True) if d.any(): # Fill the result where the domain is wrong diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 970ae2875493..ab4f46422109 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -1178,6 +1178,10 @@ def test_basic_ufuncs(self): assert_equal(np.greater_equal(x, y), greater_equal(xm, ym)) assert_equal(np.conjugate(x), conjugate(xm)) + def test_basic_ufuncs_masked(self): + # Mostly regression test for gh-25635 + assert np.sqrt(np.ma.masked) is np.ma.masked + def test_count_func(self): # Tests count assert_equal(1, count(1)) From c4593073cafdcdd9f833a2dc8a13e5ee95313155 Mon Sep 17 00:00:00 2001 From: mayeut Date: Sun, 24 Nov 2024 10:43:41 +0100 Subject: [PATCH 094/101] CI: skip ninja installation in linux_qemu workflows The ninja used in the workflow is the one from the host. Skipping ninja installation in the container allows to workaround issues that could arise when building it from source as is currently the case with riscv64. --- .github/workflows/linux_qemu.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index d44fc365973b..4ef74bcfa7f8 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -115,7 +115,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@v4.1.1 + uses: actions/cache@v4.1.2 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} @@ -141,7 +141,9 @@ jobs: rm -f /usr/bin/ld.bfd && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-ld.bfd /usr/bin/ld.bfd && rm -f /usr/bin/ninja && ln -s /host/usr/bin/ninja /usr/bin/ninja && git config --global --add safe.directory /numpy && - python -m pip install -r /numpy/requirements/build_requirements.txt && + # No need to build ninja from source, the host ninja is used for the build + grep -v ninja /numpy/requirements/build_requirements.txt > /tmp/build_requirements.txt && + python -m pip install -r /tmp/build_requirements.txt && python -m pip install pytest pytest-xdist hypothesis typing_extensions && rm -f /usr/local/bin/ninja && mkdir -p /usr/local/bin && ln -s /host/usr/bin/ninja /usr/local/bin/ninja " From a777588e0b4e13189dba98e8a303a0a4bca9ea70 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 24 Nov 2024 14:40:18 -0700 Subject: [PATCH 095/101] CI: Update circleci doc build from main. This also moves to spin 0.14 and updates a number of related files. [skip cirrus] [skip azp] --- .circleci/config.yml | 34 ++-- .github/workflows/macos.yml | 22 +- .spin/cmds.py | 304 ++++++---------------------- environment.yml | 2 +- requirements/build_requirements.txt | 2 +- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 2 +- requirements/doc_requirements.txt | 3 + 8 files changed, 103 insertions(+), 268 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 3014cb5c5074..8c2b443f1e84 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,7 @@ _defaults: &defaults docker: # CircleCI maintains a library of pre-built images # documented at https://circleci.com/developer/images/image/cimg/python - - image: cimg/python:3.11.8 + - image: cimg/python:3.11.10 working_directory: ~/repo @@ -54,29 +54,22 @@ jobs: command: | python3.11 -m venv venv . venv/bin/activate - pip install --progress-bar=off -r requirements/test_requirements.txt + pip install --progress-bar=off -r requirements/test_requirements.txt \ + -r requirements/build_requirements.txt \ + -r requirements/ci_requirements.txt # get newer, pre-release versions of critical packages pip install --progress-bar=off --pre -r requirements/doc_requirements.txt # then install numpy HEAD, which will override the version installed above - pip install . --config-settings=setup-args="-Dallow-noblas=true" - - - run: - name: create release notes - command: | - . venv/bin/activate - VERSION=$(pip show numpy | grep Version: | cut -d ' ' -f 2 | cut -c 1-5) - towncrier build --version $VERSION --yes - ./tools/ci/test_all_newsfragments_used.py + spin build --with-scipy-openblas=64 -j 2 - run: name: build devdocs w/ref warnings command: | . venv/bin/activate - cd doc # Don't use -q, show warning summary" - SPHINXOPTS="-W -n" make -e html - if [[ $(find build/html -type f | wc -l) -lt 1000 ]]; then - echo "doc build failed: build/html is empty" + SPHINXOPTS="-W -n" spin docs + if [[ $(find doc/build/html -type f | wc -l) -lt 1000 ]]; then + echo "doc build failed: doc/build/html is empty" exit -1 fi @@ -95,10 +88,17 @@ jobs: # destination: neps - run: - name: run refguide-check + name: check doctests command: | . venv/bin/activate - python tools/refguide_check.py -v + spin check-docs -v + spin check-tutorials -v + # Currently, this does two checks not done by check-docs: + # - validates ReST blocks (via validate_rst_syntax) + # - checks that all of a module's `__all__` is reflected in the + # module-level docstring autosummary + echo calling python3 tools/refguide_check.py -v + python3 tools/refguide_check.py -v - persist_to_workspace: root: ~/repo diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index c941c46fd2bc..62fd24a4e337 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -1,4 +1,4 @@ -name: macOS tests (meson) +name: macOS tests on: pull_request: @@ -44,7 +44,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@3624ceb22c1c5a301c8db4169662070a689d9ea8 # v4.1.1 + uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -53,7 +53,7 @@ jobs: ${{ github.workflow }}-${{ matrix.python-version }}-ccache-macos- - name: Setup Miniforge - uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4 + uses: conda-incubator/setup-miniconda@d2e6a045a86077fb6cad6f5adf368e9076ddaa8d # v3.1.0 with: python-version: ${{ matrix.python-version }} channels: conda-forge @@ -68,7 +68,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@3624ceb22c1c5a301c8db4169662070a689d9ea8 # v4.1.1 + uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 @@ -103,7 +103,8 @@ jobs: accelerate: - name: Accelerate (LP64, ILP64) - ${{ matrix.build_runner[1] }} + name: Accelerate - ${{ matrix.build_runner[1] }} - ${{ matrix.version }} + # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' runs-on: ${{ matrix.build_runner[0] }} strategy: @@ -112,6 +113,7 @@ jobs: build_runner: - [ macos-13, "macos_x86_64" ] - [ macos-14, "macos_arm64" ] + version: ["3.10", "3.13t"] steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -119,15 +121,21 @@ jobs: submodules: recursive fetch-tags: true - - uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 + - uses: quansight-labs/setup-python@b9ab292c751a42bcd2bb465b7fa202ea2c3f5796 # v5.3.1 with: - python-version: '3.10' + python-version: ${{ matrix.version }} - uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0 if: ${{ matrix.build_runner[0] == 'macos-13' }} with: xcode-version: '14.3' + # TODO: remove cython nightly install when cython does a release + - name: Install nightly Cython + if: matrix.version == '3.13t' + run: | + pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple cython + - name: Install dependencies run: | pip install -r requirements/build_requirements.txt diff --git a/.spin/cmds.py b/.spin/cmds.py index 0773578de913..ee9fa38346a7 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -1,13 +1,11 @@ import os import shutil import pathlib -import shutil -import pathlib import importlib import subprocess import click -from spin import util +import spin from spin.cmds import meson @@ -38,8 +36,7 @@ def _get_numpy_tools(filename): "revision-range", required=True ) -@click.pass_context -def changelog(ctx, token, revision_range): +def changelog(token, revision_range): """👩 Get change log for provided revision range \b @@ -74,71 +71,20 @@ def changelog(ctx, token, revision_range): ) -@click.command() -@click.option( - "-j", "--jobs", - help="Number of parallel tasks to launch", - type=int -) -@click.option( - "--clean", is_flag=True, - help="Clean build directory before build" -) -@click.option( - "-v", "--verbose", is_flag=True, - help="Print all build output, even installation" -) @click.option( "--with-scipy-openblas", type=click.Choice(["32", "64"]), default=None, help="Build with pre-installed scipy-openblas32 or scipy-openblas64 wheel" ) -@click.argument("meson_args", nargs=-1) -@click.pass_context -def build(ctx, meson_args, with_scipy_openblas, jobs=None, clean=False, verbose=False, quiet=False, *args, **kwargs): - """🔧 Build package with Meson/ninja and install - - MESON_ARGS are passed through e.g.: - - spin build -- -Dpkg_config_path=/lib64/pkgconfig - - The package is installed to build-install - - By default builds for release, to be able to use a debugger set CFLAGS - appropriately. For example, for linux use - - CFLAGS="-O0 -g" spin build - """ - # XXX keep in sync with upstream build +@spin.util.extend_command(spin.cmds.meson.build) +def build(*, parent_callback, with_scipy_openblas, **kwargs): if with_scipy_openblas: _config_openblas(with_scipy_openblas) - ctx.params.pop("with_scipy_openblas", None) - ctx.forward(meson.build) + parent_callback(**kwargs) -@click.command() -@click.argument("sphinx_target", default="html") -@click.option( - "--clean", is_flag=True, - default=False, - help="Clean previously built docs before building" -) -@click.option( - "--build/--no-build", - "first_build", - default=True, - help="Build numpy before generating docs", -) -@click.option( - '--jobs', '-j', - metavar='N_JOBS', - # Avoids pydata_sphinx_theme extension warning from default="auto". - default="1", - help=("Number of parallel build jobs." - "Can be set to `auto` to use all cores.") -) -@click.pass_context -def docs(ctx, sphinx_target, clean, first_build, jobs, *args, **kwargs): +@spin.util.extend_command(spin.cmds.meson.docs) +def docs(*, parent_callback, **kwargs): """📖 Build Sphinx documentation By default, SPHINXOPTS="-W", raising errors on warnings. @@ -159,22 +105,12 @@ def docs(ctx, sphinx_target, clean, first_build, jobs, *args, **kwargs): spin docs dist """ - meson.docs.ignore_unknown_options = True - - # See https://github.com/scientific-python/spin/pull/199 - # Can be changed when spin updates to 0.11, and moved to pyproject.toml - if clean: - clean_dirs = [ - './doc/build/', - './doc/source/reference/generated', - './doc/source/reference/random/bit_generators/generated', - './doc/source/reference/random/generated', - ] - - for target_dir in clean_dirs: - if os.path.isdir(target_dir): - print(f"Removing {target_dir!r}") - shutil.rmtree(target_dir) + kwargs['clean_dirs'] = [ + './doc/build/', + './doc/source/reference/generated', + './doc/source/reference/random/bit_generators/generated', + './doc/source/reference/random/generated', + ] # Run towncrier without staging anything for commit. This is the way to get # release notes snippets included in a local doc build. @@ -184,11 +120,14 @@ def docs(ctx, sphinx_target, clean, first_build, jobs, *args, **kwargs): with open(outfile, 'w') as f: f.write(p.stdout) - ctx.forward(meson.docs) + parent_callback(**kwargs) + + +# Override default jobs to 1 +jobs_param = next(p for p in docs.params if p.name == 'jobs') +jobs_param.default = 1 -@click.command() -@click.argument("pytest_args", nargs=-1) @click.option( "-m", "markexpr", @@ -196,101 +135,25 @@ def docs(ctx, sphinx_target, clean, first_build, jobs, *args, **kwargs): default="not slow", help="Run tests with the given markers" ) -@click.option( - "-j", - "n_jobs", - metavar='N_JOBS', - default="1", - help=("Number of parallel jobs for testing. " - "Can be set to `auto` to use all cores.") -) -@click.option( - "--tests", "-t", - metavar='TESTS', - help=(""" -Which tests to run. Can be a module, function, class, or method: - - \b - numpy.random - numpy.random.tests.test_generator_mt19937 - numpy.random.tests.test_generator_mt19937::TestMultivariateHypergeometric - numpy.random.tests.test_generator_mt19937::TestMultivariateHypergeometric::test_edge_cases - \b -""") -) -@click.option( - '--verbose', '-v', is_flag=True, default=False -) -@click.pass_context -def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose, *args, **kwargs): - """🔧 Run tests - - PYTEST_ARGS are passed through directly to pytest, e.g.: - - spin test -- --pdb - - To run tests on a directory or file: - - \b - spin test numpy/linalg - spin test numpy/linalg/tests/test_linalg.py - - To report the durations of the N slowest tests: - - spin test -- --durations=N - - To run tests that match a given pattern: - - \b - spin test -- -k "geometric" - spin test -- -k "geometric and not rgeometric" - +@spin.util.extend_command(spin.cmds.meson.test) +def test(*, parent_callback, pytest_args, tests, markexpr, **kwargs): + """ By default, spin will run `-m 'not slow'`. To run the full test suite, use `spin test -m full` - - For more, see `pytest --help`. """ # noqa: E501 if (not pytest_args) and (not tests): - pytest_args = ('numpy',) + pytest_args = ('--pyargs', 'numpy') if '-m' not in pytest_args: - if len(pytest_args) == 1 and not tests: - tests = pytest_args[0] - pytest_args = () if markexpr != "full": pytest_args = ('-m', markexpr) + pytest_args - if (n_jobs != "1") and ('-n' not in pytest_args): - pytest_args = ('-n', str(n_jobs)) + pytest_args - - if tests and not ('--pyargs' in pytest_args): - pytest_args = ('--pyargs', tests) + pytest_args - - if verbose: - pytest_args = ('-v',) + pytest_args - - ctx.params['pytest_args'] = pytest_args - - for extra_param in ('markexpr', 'n_jobs', 'tests', 'verbose'): - del ctx.params[extra_param] - ctx.forward(meson.test) + kwargs['pytest_args'] = pytest_args + parent_callback(**{'pytest_args': pytest_args, 'tests': tests, **kwargs}) -@click.command() -@click.argument("pytest_args", nargs=-1) -@click.option( - "-j", - "n_jobs", - metavar='N_JOBS', - default="1", - help=("Number of parallel jobs for testing. " - "Can be set to `auto` to use all cores.") -) -@click.option( - '--verbose', '-v', is_flag=True, default=False -) -@click.pass_context -def check_docs(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): +@spin.util.extend_command(test, doc='') +def check_docs(*, parent_callback, pytest_args, **kwargs): """🔧 Run doctests of objects in the public API. PYTEST_ARGS are passed through directly to pytest, e.g.: @@ -327,14 +190,9 @@ def check_docs(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): import scipy_doctest except ModuleNotFoundError as e: raise ModuleNotFoundError("scipy-doctest not installed") from e - if (not pytest_args): - pytest_args = ('numpy',) - - if (n_jobs != "1") and ('-n' not in pytest_args): - pytest_args = ('-n', str(n_jobs)) + pytest_args - if verbose: - pytest_args = ('-v',) + pytest_args + if (not pytest_args): + pytest_args = ('--pyargs', 'numpy') # turn doctesting on: doctest_args = ( @@ -344,39 +202,21 @@ def check_docs(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): pytest_args = pytest_args + doctest_args - ctx.params['pytest_args'] = pytest_args - - for extra_param in ('n_jobs', 'verbose'): - del ctx.params[extra_param] + parent_callback(**{'pytest_args': pytest_args, **kwargs}) - ctx.forward(meson.test) - -@click.command() -@click.argument("pytest_args", nargs=-1) -@click.option( - "-j", - "n_jobs", - metavar='N_JOBS', - default="1", - help=("Number of parallel jobs for testing. " - "Can be set to `auto` to use all cores.") -) -@click.option( - '--verbose', '-v', is_flag=True, default=False -) -@click.pass_context -def check_tutorials(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): +@spin.util.extend_command(test, doc='') +def check_tutorials(*, parent_callback, pytest_args, **kwargs): """🔧 Run doctests of user-facing rst tutorials. - To test all tutorials in the numpy/doc/source/user/ directory, use + To test all tutorials in the numpy doc/source/user/ directory, use spin check-tutorials To run tests on a specific RST file: \b - spin check-tutorials numpy/doc/source/user/absolute-beginners.rst + spin check-tutorials doc/source/user/absolute-beginners.rst \b Note: @@ -393,20 +233,14 @@ def check_tutorials(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): # - `spin check-tutorials path/to/rst`, and # - `spin check-tutorials path/to/rst -- --durations=3` if (not pytest_args) or all(arg.startswith('-') for arg in pytest_args): - pytest_args = ('numpy/doc/source/user',) + pytest_args + pytest_args = ('doc/source/user',) + pytest_args # make all paths relative to the numpy source folder pytest_args = tuple( - str(curdir / '..' / '..' / arg) if not arg.startswith('-') else arg + str(curdir / '..' / arg) if not arg.startswith('-') else arg for arg in pytest_args ) - if (n_jobs != "1") and ('-n' not in pytest_args): - pytest_args = ('-n', str(n_jobs)) + pytest_args - - if verbose: - pytest_args = ('-v',) + pytest_args - # turn doctesting on: doctest_args = ( '--doctest-glob=*rst', @@ -414,12 +248,7 @@ def check_tutorials(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): pytest_args = pytest_args + doctest_args - ctx.params['pytest_args'] = pytest_args - - for extra_param in ('n_jobs', 'verbose'): - del ctx.params[extra_param] - - ctx.forward(meson.test) + parent_callback(**{'pytest_args': pytest_args, **kwargs}) # From scipy: benchmarks/benchmarks/common.py @@ -446,7 +275,7 @@ def _set_mem_rlimit(max_mem=None): def _commit_to_sha(commit): - p = util.run(['git', 'rev-parse', commit], output=False, echo=False) + p = spin.util.run(['git', 'rev-parse', commit], output=False, echo=False) if p.returncode != 0: raise( click.ClickException( @@ -459,10 +288,10 @@ def _commit_to_sha(commit): def _dirty_git_working_dir(): # Changes to the working directory - p0 = util.run(['git', 'diff-files', '--quiet']) + p0 = spin.util.run(['git', 'diff-files', '--quiet']) # Staged changes - p1 = util.run(['git', 'diff-index', '--quiet', '--cached', 'HEAD']) + p1 = spin.util.run(['git', 'diff-index', '--quiet', '--cached', 'HEAD']) return (p0.returncode != 0 or p1.returncode != 0) @@ -487,7 +316,7 @@ def _run_asv(cmd): except (ImportError, RuntimeError): pass - util.run(cmd, cwd='benchmarks', env=env) + spin.util.run(cmd, cwd='benchmarks', env=env) @click.command() @click.option( @@ -510,7 +339,7 @@ def lint(ctx, branch, uncommitted): Examples: \b - For lint checks of your development brach with `main` or a custom branch: + For lint checks of your development branch with `main` or a custom branch: \b $ spin lint # defaults to main @@ -558,8 +387,9 @@ def lint(ctx, branch, uncommitted): required=False, nargs=-1 ) +@meson.build_dir_option @click.pass_context -def bench(ctx, tests, compare, verbose, quick, commits): +def bench(ctx, tests, compare, verbose, quick, commits, build_dir): """🏋 Run benchmarks. \b @@ -611,9 +441,9 @@ def bench(ctx, tests, compare, verbose, quick, commits): ) ctx.invoke(build) - meson._set_pythonpath() + meson._set_pythonpath(build_dir) - p = util.run( + p = spin.util.run( ['python', '-c', 'import numpy as np; print(np.__version__)'], cwd='benchmarks', echo=False, @@ -647,29 +477,20 @@ def bench(ctx, tests, compare, verbose, quick, commits): _run_asv(cmd_compare) -@click.command(context_settings={ - 'ignore_unknown_options': True -}) -@click.argument("python_args", metavar='', nargs=-1) -@click.pass_context -def python(ctx, python_args, *args, **kwargs): - """🐍 Launch Python shell with PYTHONPATH set - - OPTIONS are passed through directly to Python, e.g.: - - spin python -c 'import sys; print(sys.path)' - """ +@spin.util.extend_command(meson.python) +def python(*, parent_callback, **kwargs): env = os.environ env['PYTHONWARNINGS'] = env.get('PYTHONWARNINGS', 'all') - ctx.forward(meson.python) + + parent_callback(**kwargs) @click.command(context_settings={ 'ignore_unknown_options': True }) @click.argument("ipython_args", metavar='', nargs=-1) -@click.pass_context -def ipython(ctx, ipython_args): +@meson.build_dir_option +def ipython(*, ipython_args, build_dir): """💻 Launch IPython shell with PYTHONPATH set OPTIONS are passed through directly to IPython, e.g.: @@ -679,16 +500,19 @@ def ipython(ctx, ipython_args): env = os.environ env['PYTHONWARNINGS'] = env.get('PYTHONWARNINGS', 'all') + ctx = click.get_current_context() ctx.invoke(build) - ppath = meson._set_pythonpath() + ppath = meson._set_pythonpath(build_dir) print(f'💻 Launching IPython with PYTHONPATH="{ppath}"') + + # In spin >= 0.13.1, can replace with extended command, setting `pre_import` preimport = (r"import numpy as np; " r"print(f'\nPreimported NumPy {np.__version__} as np')") - util.run(["ipython", "--ignore-cwd", - f"--TerminalIPythonApp.exec_lines={preimport}"] + - list(ipython_args)) + spin.util.run(["ipython", "--ignore-cwd", + f"--TerminalIPythonApp.exec_lines={preimport}"] + + list(ipython_args)) @click.command(context_settings={"ignore_unknown_options": True}) @@ -702,6 +526,7 @@ def mypy(ctx): ctx.params['markexpr'] = 'full' ctx.forward(test) + @click.command(context_settings={ 'ignore_unknown_options': True }) @@ -747,8 +572,7 @@ def _config_openblas(blas_variant): help="NumPy version of release", required=False ) -@click.pass_context -def notes(ctx, version_override): +def notes(version_override): """🎉 Generate release notes and validate \b @@ -763,7 +587,7 @@ def notes(ctx, version_override): \b $ spin notes """ - project_config = util.get_config() + project_config = spin.util.get_config() version = version_override or project_config['project.version'] click.secho( @@ -774,7 +598,7 @@ def notes(ctx, version_override): # Check if `towncrier` is installed if not shutil.which("towncrier"): raise click.ClickException( - f"please install `towncrier` to use this command" + "please install `towncrier` to use this command" ) click.secho( @@ -783,7 +607,7 @@ def notes(ctx, version_override): ) # towncrier build --version 2.1 --yes cmd = ["towncrier", "build", "--version", version, "--yes"] - p = util.run(cmd=cmd, sys_exit=False, output=True, encoding="utf-8") + p = spin.util.run(cmd=cmd, sys_exit=False, output=True, encoding="utf-8") if p.returncode != 0: raise click.ClickException( f"`towncrier` failed returned {p.returncode} with error `{p.stderr}`" diff --git a/environment.yml b/environment.yml index e0d2ccdc1117..82f0856668c3 100644 --- a/environment.yml +++ b/environment.yml @@ -16,8 +16,8 @@ dependencies: - ninja - pkg-config - meson-python + - spin==0.13 - pip - - spin=0.8 # Unpin when spin 0.9.1 is released - ccache # For testing - pytest diff --git a/requirements/build_requirements.txt b/requirements/build_requirements.txt index 701867b64465..a51143a780e7 100644 --- a/requirements/build_requirements.txt +++ b/requirements/build_requirements.txt @@ -1,5 +1,5 @@ meson-python>=0.13.1 Cython>=3.0.6 ninja -spin==0.8 +spin==0.13 build diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 215bc1229930..7b67bcb846a7 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ -spin +spin==0.13 # Keep this in sync with ci_requirements.txt scipy-openblas32==0.3.27.44.6 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index 5bed94385819..3f6c2c1b77ae 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ -spin +spin==0.13 # Keep this in sync with ci32_requirements.txt scipy-openblas32==0.3.27.44.6 scipy-openblas64==0.3.27.44.6 diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index 79de7a9f0802..74ef448182af 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -16,3 +16,6 @@ pickleshare # needed to build release notes towncrier toml + +# for doctests, also needs pytz which is in test_requirements +scipy-doctest From d4be9e24e649a942ed380daed40d545baba4680d Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 4 Dec 2024 14:42:26 -0700 Subject: [PATCH 096/101] MAINT: Update submodules --- numpy/_core/src/common/pythoncapi-compat | 2 +- numpy/_core/src/highway | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/common/pythoncapi-compat b/numpy/_core/src/common/pythoncapi-compat index 2d18aecd7b2f..0f1d42a10a3f 160000 --- a/numpy/_core/src/common/pythoncapi-compat +++ b/numpy/_core/src/common/pythoncapi-compat @@ -1 +1 @@ -Subproject commit 2d18aecd7b2f549d38a13e27b682ea4966f37bd8 +Subproject commit 0f1d42a10a3f594ad48894912396df31b2c2d55d diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index a97b5d371d69..68b0fdebffb1 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit a97b5d371d696564e206627a883b1341c65bd983 +Subproject commit 68b0fdebffb14f3b8473fed1c33ce368efc431e7 From ac1398c07f41002a04470d77502be48550646bb7 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 28 Nov 2024 14:02:52 +0100 Subject: [PATCH 097/101] MAINT: Ensure correct handling for very large unicode strings In the future, we can handle these strings (in parts we already can maybe), but for now have to stick to `int` length because more of the code needs cleanup to actually use it safely. (For user dtypes this is less of a problem, although corner cases probably exist.) This adds necessary checks to avoid large unicode dtypes. --- numpy/_core/src/multiarray/common.c | 13 ++- numpy/_core/src/multiarray/convert_datatype.c | 4 + numpy/_core/src/multiarray/descriptor.c | 25 ++++-- numpy/_core/src/multiarray/dtypemeta.c | 10 ++- numpy/_core/src/umath/string_ufuncs.cpp | 9 ++ numpy/_core/tests/test_strings.py | 84 ++++++++++++++++++- 6 files changed, 130 insertions(+), 15 deletions(-) diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 655122ff7f09..ff71548842d5 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -62,7 +62,7 @@ NPY_NO_EXPORT PyArray_Descr * PyArray_DTypeFromObjectStringDiscovery( PyObject *obj, PyArray_Descr *last_dtype, int string_type) { - int itemsize; + npy_intp itemsize; if (string_type == NPY_STRING) { PyObject *temp = PyObject_Str(obj); @@ -75,6 +75,12 @@ PyArray_DTypeFromObjectStringDiscovery( if (itemsize < 0) { return NULL; } + if (itemsize > NPY_MAX_INT) { + /* We can allow this, but should audit code paths before we do. */ + PyErr_SetString(PyExc_TypeError, + "string too large to store inside array."); + return NULL; + } } else if (string_type == NPY_UNICODE) { PyObject *temp = PyObject_Str(obj); @@ -86,6 +92,11 @@ PyArray_DTypeFromObjectStringDiscovery( if (itemsize < 0) { return NULL; } + if (itemsize > NPY_MAX_INT / 4) { + PyErr_SetString(PyExc_TypeError, + "string too large to store inside array."); + return NULL; + } itemsize *= 4; /* convert UCS4 codepoints to bytes */ } else { diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index fc1cd84883b3..45f410b3ff9c 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -2821,6 +2821,10 @@ cast_to_string_resolve_descriptors( return -1; } if (dtypes[1]->type_num == NPY_UNICODE) { + if (size > NPY_MAX_INT / 4) { + PyErr_SetString(PyExc_TypeError, "Result string too large."); + return -1; + } size *= 4; } diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index a47a71d39196..1e8e6e337d0c 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -273,8 +273,16 @@ _convert_from_tuple(PyObject *obj, int align) if (PyDataType_ISUNSIZED(type)) { /* interpret next item as a typesize */ int itemsize = PyArray_PyIntAsInt(PyTuple_GET_ITEM(obj,1)); - - if (error_converting(itemsize)) { + if (type->type_num == NPY_UNICODE) { + if (itemsize > NPY_MAX_INT / 4) { + itemsize = -1; + } + else { + itemsize *= 4; + } + } + if (itemsize < 0) { + /* Error may or may not be set by PyIntAsInt. */ PyErr_SetString(PyExc_ValueError, "invalid itemsize in generic type tuple"); Py_DECREF(type); @@ -284,12 +292,8 @@ _convert_from_tuple(PyObject *obj, int align) if (type == NULL) { return NULL; } - if (type->type_num == NPY_UNICODE) { - type->elsize = itemsize << 2; - } - else { - type->elsize = itemsize; - } + + type->elsize = itemsize; return type; } else if (type->metadata && (PyDict_Check(val) || PyDictProxy_Check(val))) { @@ -1855,7 +1859,10 @@ _convert_from_str(PyObject *obj, int align) */ case NPY_UNICODELTR: check_num = NPY_UNICODE; - elsize <<= 2; + if (elsize > (NPY_MAX_INT / 4)) { + goto fail; + } + elsize *= 4; break; case NPY_VOIDLTR: diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 316a61d31da4..6adb00d16925 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -494,12 +494,14 @@ string_discover_descr_from_pyobject( itemsize = PyUnicode_GetLength(obj); } if (itemsize != -1) { - if (cls->type_num == NPY_UNICODE) { - itemsize *= 4; - } - if (itemsize > NPY_MAX_INT) { + if (itemsize > NPY_MAX_INT || ( + cls->type_num == NPY_UNICODE && itemsize > NPY_MAX_INT / 4)) { PyErr_SetString(PyExc_TypeError, "string too large to store inside array."); + return NULL; + } + if (cls->type_num == NPY_UNICODE) { + itemsize *= 4; } PyArray_Descr *res = PyArray_DescrNewFromType(cls->type_num); if (res == NULL) { diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 2bc4ce20acd6..26fce9b61f54 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -643,6 +643,12 @@ string_addition_resolve_descriptors( PyArray_Descr *loop_descrs[3], npy_intp *NPY_UNUSED(view_offset)) { + /* NOTE: elsize is large enough now, but too much code still uses ints */ + if (given_descrs[0]->elsize + given_descrs[1]->elsize > NPY_MAX_INT) { + PyErr_SetString(PyExc_TypeError, "Result string too large."); + return _NPY_ERROR_OCCURRED_IN_CAST; + } + loop_descrs[0] = NPY_DT_CALL_ensure_canonical(given_descrs[0]); if (loop_descrs[0] == NULL) { return _NPY_ERROR_OCCURRED_IN_CAST; @@ -650,11 +656,14 @@ string_addition_resolve_descriptors( loop_descrs[1] = NPY_DT_CALL_ensure_canonical(given_descrs[1]); if (loop_descrs[1] == NULL) { + Py_DECREF(loop_descrs[0]); return _NPY_ERROR_OCCURRED_IN_CAST; } loop_descrs[2] = PyArray_DescrNew(loop_descrs[0]); if (loop_descrs[2] == NULL) { + Py_DECREF(loop_descrs[0]); + Py_DECREF(loop_descrs[1]); return _NPY_ERROR_OCCURRED_IN_CAST; } loop_descrs[2]->elsize += loop_descrs[1]->elsize; diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index a94b52939b1d..e1798de917ae 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -5,7 +5,7 @@ import numpy as np from numpy.testing import assert_array_equal, assert_raises, IS_PYPY - +from numpy.testing._private.utils import requires_memory COMPARISONS = [ (operator.eq, np.equal, "=="), @@ -109,6 +109,88 @@ def test_float_to_string_cast(str_dt, float_dt): assert_array_equal(res, np.array(expected, dtype=str_dt)) +@pytest.mark.parametrize("str_dt", "US") +@pytest.mark.parametrize("size", [-1, np.iinfo(np.intc).max]) +def test_string_size_dtype_errors(str_dt, size): + if size > 0: + size = size // np.dtype(f"{str_dt}1").itemsize + 1 + + with pytest.raises(ValueError): + np.dtype((str_dt, size)) + with pytest.raises(TypeError): + np.dtype(f"{str_dt}{size}") + + +@pytest.mark.parametrize("str_dt", "US") +def test_string_size_dtype_large_repr(str_dt): + size = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + size_str = str(size) + + dtype = np.dtype((str_dt, size)) + assert size_str in dtype.str + assert size_str in str(dtype) + assert size_str in repr(dtype) + + +@pytest.mark.slow +@requires_memory(2 * np.iinfo(np.intc).max) +@pytest.mark.parametrize("str_dt", "US") +def test_large_string_coercion_error(str_dt): + very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + try: + large_string = "A" * (very_large + 1) + except Exception: + # We may not be able to create this Python string on 32bit. + return + + class MyStr: + def __str__(self): + return large_string + + try: + # TypeError from NumPy, or OverflowError from 32bit Python. + with pytest.raises((TypeError, OverflowError)): + np.array([large_string], dtype=str_dt) + + # Same as above, but input has to be converted to a string. + with pytest.raises((TypeError, OverflowError)): + np.array([MyStr()], dtype=str_dt) + except MemoryError: + # Catch memory errors, because `requires_memory` would do so. + raise AssertionError("Ops should raise before any large allocation.") + +@pytest.mark.slow +@requires_memory(2 * np.iinfo(np.intc).max) +@pytest.mark.parametrize("str_dt", "US") +def test_large_string_addition_error(str_dt): + very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + + a = np.array(["A" * very_large], dtype=str_dt) + b = np.array("B", dtype=str_dt) + try: + with pytest.raises(TypeError): + np.add(a, b) + with pytest.raises(TypeError): + np.add(a, a) + except MemoryError: + # Catch memory errors, because `requires_memory` would do so. + raise AssertionError("Ops should raise before any large allocation.") + + +def test_large_string_cast(): + very_large = np.iinfo(np.intc).max // 4 + # Could be nice to test very large path, but it makes too many huge + # allocations right now (need non-legacy cast loops for this). + # a = np.array([], dtype=np.dtype(("S", very_large))) + # assert a.astype("U").dtype.itemsize == very_large * 4 + + a = np.array([], dtype=np.dtype(("S", very_large + 1))) + # It is not perfect but OK if this raises a MemoryError during setup + # (this happens due clunky code and/or buffer setup.) + with pytest.raises((TypeError, MemoryError)): + a.astype("U") + + @pytest.mark.parametrize("dt", ["S", "U", "T"]) class TestMethods: From e00e7e4d8536fa492ef84327ea93f9615918ed69 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 3 Dec 2024 13:36:53 +0100 Subject: [PATCH 098/101] TST: Use skipif in test to signal that the test did nothing --- numpy/_core/tests/test_strings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index e1798de917ae..9fe4c2693599 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -141,7 +141,7 @@ def test_large_string_coercion_error(str_dt): large_string = "A" * (very_large + 1) except Exception: # We may not be able to create this Python string on 32bit. - return + pytest.skip("python failed to create huge string") class MyStr: def __str__(self): From 95ccab649b43bfcbd9b325399f89496134f059ea Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 4 Dec 2024 10:35:54 +0100 Subject: [PATCH 099/101] Add length information to exception Also add future proof guard, just in case we got a larger string in addition. --- numpy/_core/src/multiarray/common.c | 8 ++++---- numpy/_core/src/multiarray/convert_datatype.c | 3 ++- numpy/_core/src/umath/string_ufuncs.cpp | 14 +++++++++++--- 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index ff71548842d5..9a7351e313ae 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -77,8 +77,8 @@ PyArray_DTypeFromObjectStringDiscovery( } if (itemsize > NPY_MAX_INT) { /* We can allow this, but should audit code paths before we do. */ - PyErr_SetString(PyExc_TypeError, - "string too large to store inside array."); + PyErr_Format(PyExc_TypeError, + "string of length %zd is too large to store inside array.", itemsize); return NULL; } } @@ -93,8 +93,8 @@ PyArray_DTypeFromObjectStringDiscovery( return NULL; } if (itemsize > NPY_MAX_INT / 4) { - PyErr_SetString(PyExc_TypeError, - "string too large to store inside array."); + PyErr_Format(PyExc_TypeError, + "string of length %zd is too large to store inside array.", itemsize); return NULL; } itemsize *= 4; /* convert UCS4 codepoints to bytes */ diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 45f410b3ff9c..e83ef6076c39 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -2822,7 +2822,8 @@ cast_to_string_resolve_descriptors( } if (dtypes[1]->type_num == NPY_UNICODE) { if (size > NPY_MAX_INT / 4) { - PyErr_SetString(PyExc_TypeError, "Result string too large."); + PyErr_Format(PyExc_TypeError, + "string of length %zd is too large to store inside array.", size); return -1; } size *= 4; diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 26fce9b61f54..0e28240ee5f0 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -643,9 +643,17 @@ string_addition_resolve_descriptors( PyArray_Descr *loop_descrs[3], npy_intp *NPY_UNUSED(view_offset)) { - /* NOTE: elsize is large enough now, but too much code still uses ints */ - if (given_descrs[0]->elsize + given_descrs[1]->elsize > NPY_MAX_INT) { - PyErr_SetString(PyExc_TypeError, "Result string too large."); + npy_intp result_itemsize = given_descrs[0]->elsize + given_descrs[1]->elsize; + + /* NOTE: elsize can fit more than MAX_INT, but some code may still use ints */ + if (result_itemsize > NPY_MAX_INT || result_itemsize < 0) { + npy_intp length = result_itemsize; + if (given_descrs[0]->type == NPY_UNICODE) { + length /= 4; + } + PyErr_Format(PyExc_TypeError, + "addition result string of length %zd is too large to store inside array.", + length); return _NPY_ERROR_OCCURRED_IN_CAST; } From 805b8c50294fbcd8884b1823dfa456a747121706 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Mon, 9 Dec 2024 11:46:13 +0200 Subject: [PATCH 100/101] TEST: cleanups [skip cirrus][skip azp] (#27943) --- numpy/f2py/tests/util.py | 1 - numpy/ma/tests/test_core.py | 10 +++++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index 9cad71a9cf5c..61e20c13ffc8 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -59,7 +59,6 @@ def check_language(lang, code_snippet=None): return runmeson.returncode == 0 finally: shutil.rmtree(tmpdir) - return False fortran77_code = ''' diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index ab4f46422109..4744c0d1dd2e 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -23,7 +23,7 @@ import numpy._core.umath as umath from numpy.exceptions import AxisError from numpy.testing import ( - assert_raises, assert_warns, suppress_warnings, IS_WASM + assert_raises, assert_warns, suppress_warnings, IS_WASM, temppath ) from numpy.testing._private.utils import requires_memory from numpy import ndarray @@ -1015,6 +1015,14 @@ def test_object_with_array(self): mx[1].data[0] = 0. assert_(mx2[0] == 0.) + def test_maskedarray_tofile_raises_notimplementederror(self): + xm = masked_array([1, 2, 3], mask=[False, True, False]) + # Test case to check the NotImplementedError. + # It is not implemented at this point of time. We can change this in future + with temppath(suffix='.npy') as path: + with pytest.raises(NotImplementedError): + np.save(path, xm) + class TestMaskedArrayArithmetic: # Base test class for MaskedArrays. From ee406d06697f93750242d5ac8ec08ac2edb054bc Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 7 Dec 2024 18:36:03 -0700 Subject: [PATCH 101/101] MAINT: Fix cirrus MacOs wheel builds [wheel build] --- tools/ci/cirrus_wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml index 99aa6ee2b50f..1516725593df 100644 --- a/tools/ci/cirrus_wheels.yml +++ b/tools/ci/cirrus_wheels.yml @@ -78,7 +78,7 @@ macosx_arm64_task: build_script: | brew install micromamba gfortran - micromamba shell init -s bash -p ~/micromamba + micromamba shell init -s bash --root-prefix ~/micromamba source ~/.bash_profile micromamba create -n numpydev