8000 API: Adjust tests after removing star imports · numpy/numpy@9bbae54 · GitHub
[go: up one dir, main page]

Skip to content

Commit 9bbae54

Browse files
committed
API: Adjust tests after removing star imports
1 parent 110b953 commit 9bbae54

30 files changed

+162
-363
lines changed

numpy/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,7 @@
203203
nanmedian, nanmin, nanpercentile, nanprod, nanquantile, nanstd,
204204
nansum, nanvar, ndenumerate, ndindex, ogrid, packbits, pad,
205205
percentile, piecewise, place, poly, poly1d, polyadd, polyder,
206-
polydiv, polyfit, polyint, polymul, polynomial, polysub, polyval,
206+
polydiv, polyfit, polyint, polymul, polysub, polyval,
207207
put_along_axis, quantile, r_, ravel_multi_index, real, real_if_close,
208208
roots, rot90, row_stack, s_, save, savetxt, savez, savez_compressed,
209209
select, setdiff1d, setxor1d, show_runtime, sinc, sort_complex, split,

numpy/__init__.pyi

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -463,14 +463,11 @@ from numpy.lib.function_base import (
463463
kaiser as kaiser,
464464
trapz as trapz,
465465
i0 as i0,
466-
add_newdoc as add_newdoc,
467-
add_docstring as add_docstring,
468466
meshgrid as meshgrid,
469467
delete as delete,
470468
insert as insert,
471469
append as append,
472470
interp as interp,
473-
add_newdoc_ufunc as add_newdoc_ufunc,
474471
quantile as quantile,
475472
)
476473

numpy/core/code_generators/ufunc_docstrings.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1713,7 +1713,7 @@ def add_newdoc(place, name, doc):
17131713
False
17141714
>>> np.isfinite(np.inf)
17151715
False
1716-
>>> np.isfinite(np.NINF)
1716+
>>> np.isfinite(-np.inf)
17171717
False
17181718
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
17191719
array([False, True, False])
@@ -1765,7 +1765,7 @@ def add_newdoc(place, name, doc):
17651765
True
17661766
>>> np.isinf(np.nan)
17671767
False
1768-
>>> np.isinf(np.NINF)
1768+
>>> np.isinf(-np.inf)
17691769
True
17701770
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
17711771
array([ True, True, False, False])

numpy/core/function_base.pyi

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,6 @@ def geomspace(
178178
axis: SupportsIndex = ...,
179179
) -> NDArray[Any]: ...
180180

181-
# Re-exported to `np.lib.function_base`
182181
def add_newdoc(
183182
place: str,
184183
obj: str,

numpy/core/src/multiarray/mapping.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -294,7 +294,7 @@ prepare_index(PyArrayObject *self, PyObject *index,
294294
/*
295295
* The choice of only unpacking `2*NPY_MAXDIMS` items is historic.
296296
* The longest "reasonable" index that produces a result of <= 32 dimensions
297-
* is `(0,)*np.MAXDIMS + (None,)*np.MAXDIMS`. Longer indices can exist, but
297+
* is `(0,)*ncu.MAXDIMS + (None,)*ncu.MAXDIMS`. Longer indices can exist, but
298298
* are uncommon.
299299
*/
300300
PyObject *raw_indices[NPY_MAXDIMS*2];

numpy/core/tests/test_api.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import sys
22

33
import numpy as np
4+
import numpy.core.umath as ncu
45
from numpy.core._rational_tests import rational
56
import pytest
67
from numpy.testing import (
@@ -92,7 +93,7 @@ def test_array_array():
9293

9394
# test recursion
9495
nested = 1.5
95-
for i in range(np.MAXDIMS):
96+
for i in range(ncu.MAXDIMS):
9697
nested = [nested]
9798

9899
# no error

numpy/core/tests/test_array_coercion.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,8 @@
1010
from pytest import param
1111

1212
import numpy as np
13+
import numpy.core._multiarray_umath as ncu
1314
from numpy.core._rational_tests import rational
14-
from numpy.core._multiarray_umath import _discover_array_parameters
1515

1616
from numpy.testing import (
1717
assert_array_equal, assert_warns, IS_PYPY)
@@ -213,11 +213,11 @@ def test_char_special_case_deep(self):
213213
# Check that the character special case errors correctly if the
214214
# array is too deep:
215215
nested = ["string"] # 2 dimensions (due to string being sequence)
216-
for i in range(np.MAXDIMS - 2):
216+
for i in range(ncu.MAXDIMS - 2):
217217
nested = [nested]
218218

219219
arr = np.array(nested, dtype='c')
220-
assert arr.shape == (1,) * (np.MAXDIMS - 1) + (6,)
220+
assert arr.shape == (1,) * (ncu.MAXDIMS - 1) + (6,)
221221
with pytest.raises(ValueError):
222222
np.array([nested], dtype="c")
223223

@@ -371,7 +371,7 @@ def test_default_dtype_instance(self, dtype_char):
371371
else:
372372
dtype = np.dtype(dtype_char)
373373

374-
discovered_dtype, _ = _discover_array_parameters([], type(dtype))
374+
discovered_dtype, _ = ncu._discover_array_parameters([], type(dtype))
375375

376376
assert discovered_dtype == dtype
377377
assert discovered_dtype.itemsize == dtype.itemsize
@@ -486,11 +486,11 @@ class TestNested:
486486
def test_nested_simple(self):
487487
initial = [1.2]
488488
nested = initial
489-
for i in range(np.MAXDIMS - 1):
489+
for i in range(ncu.MAXDIMS - 1):
490490
nested = [nested]
491491

492492
arr = np.array(nested, dtype="float64")
493-
assert arr.shape == (1,) * np.MAXDIMS
493+
assert arr.shape == (1,) * ncu.MAXDIMS
494494
with pytest.raises(ValueError):
495495
np.array([nested], dtype="float64")
496496

@@ -499,15 +499,15 @@ def test_nested_simple(self):
499499

500500
arr = np.array([nested], dtype=object)
501501
assert arr.dtype == np.dtype("O")
502-
assert arr.shape == (1,) * np.MAXDIMS
502+
assert arr.shape == (1,) * ncu.MAXDIMS
503503
assert arr.item() is initial
504504

505505
def test_pathological_self_containing(self):
506506
# Test that this also works for two nested sequences
507507
l = []
508508
l.append(l)
509509
arr = np.array([l, l, l], dtype=object)
510-
assert arr.shape == (3,) + (1,) * (np.MAXDIMS - 1)
510+
assert arr.shape == (3,) + (1,) * (ncu.MAXDIMS - 1)
511511

512512
# Also check a ragged case:
513513
arr = np.array([l, [None], l], dtype=object)
@@ -525,7 +525,7 @@ def test_nested_arraylikes(self, arraylike):
525525
initial = arraylike(np.ones((1, 1)))
526526

527527
nested = initial
528-
for i in range(np.MAXDIMS - 1):
528+
for i in range(ncu.MAXDIMS - 1):
529529
nested = [nested]
530530

531531
with pytest.raises(ValueError, match=".*would exceed the maximum"):
@@ -536,7 +536,7 @@ def test_nested_arraylikes(self, arraylike):
536536
# (due to running out of dimensions), this is currently supported but
537537
# a special case which is not ideal.
538538
arr = np.array(nested, dtype=object)
539-
assert arr.shape == (1,) * np.MAXDIMS
539+
assert arr.shape == (1,) * ncu.MAXDIMS
540540
assert arr.item() == np.array(initial).item()
541541

542542
@pytest.mark.parametrize("arraylike", arraylikes())
@@ -573,11 +573,11 @@ def test_array_of_different_depths(self):
573573
mismatch_first_dim = np.zeros((1, 2))
574574
mismatch_second_dim = np.zeros((3, 3))
575575

576-
dtype, shape = _discover_array_parameters(
576+
dtype, shape = ncu._discover_array_parameters(
577577
[arr, mismatch_second_dim], dtype=np.dtype("O"))
578578
assert shape == (2, 3)
579579

580-
dtype, shape = _discover_array_parameters(
580+
dtype, shape = ncu._discover_array_parameters(
581581
[arr, mismatch_first_dim], dtype=np.dtype("O"))
582582
assert shape == (2,)
583583
# The second case is currently supported because the arrays

numpy/core/tests/test_deprecations.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -617,14 +617,6 @@ class NoFinalize(np.ndarray):
617617

618618
self.assert_deprecated(lambda: np.array(1).view(NoFinalize))
619619

620-
class TestAxisNotMAXDIMS(_DeprecationTestCase):
621-
# Deprecated 2022-01-08, NumPy 1.23
622-
message = r"Using `axis=32` \(MAXDIMS\) is deprecated"
623-
624-
def test_deprecated(self):
625-
a = np.zeros((1,)*32)
626-
self.assert_deprecated(lambda: np.repeat(a, 1, axis=np.MAXDIMS))
627-
628620

629621
class TestLoadtxtParseIntsViaFloat(_DeprecationTestCase):
630622
# Deprecated 2022-07-03, NumPy 1.23

numpy/core/tests/test_function_base.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,16 @@
1+
import sys
2+
13
import pytest
4+
5+
import numpy as np
26
from numpy import (
37
logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan,
48
ndarray, sqrt, nextafter, stack, errstate
59
)
10+
from numpy.core.function_base import add_newdoc
611
from numpy.testing import (
712
assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose,
13+
IS_PYPY
814
)
915

1016

@@ -444,3 +450,21 @@ def test_any_step_zero_and_not_mult_inplace(self):
444450
assert_array_equal(y, array([[0.0, 1.0], [1.0, 1.0], [2.0, 1.0]]))
445451

446452

453+
class TestAdd_newdoc:
454+
455+
@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
456+
@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc")
457+
def test_add_doc(self):
458+
# test that np.add_newdoc did attach a docstring successfully:
459+
tgt = "Current flat index into the array."
460+
assert_equal(np.core.flatiter.index.__doc__[:len(tgt)], tgt)
461+
assert_(len(np.core.ufunc.identity.__doc__) > 300)
462+
assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300)
463+
464+
@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
465+
def test_errors_are_ignored(self):
466+
prev_doc = np.core.flatiter.index.__doc__
467+
# nothing changed, but error ignored, this should probably
468+
# give a warning (or even error) in the future.
469+
add_newdoc("numpy.core", "flatiter", ("index", "bad docstring"))
470+
assert prev_doc == np.core.flatiter.index.__doc__

numpy/core/tests/test_nditer.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
import subprocess
66

77
import numpy as np
8+
import numpy.core.umath as ncu
89
import numpy.core._multiarray_tests as _multiarray_tests
910
from numpy import array, arange, nditer, all
1011
from numpy.testing import (
@@ -3007,7 +3008,7 @@ def test_object_iter_cleanup():
30073008
assert_raises(TypeError, lambda: np.zeros((17000, 2), dtype='f4') * None)
30083009

30093010
# this more explicit code also triggers the invalid access
3010-
arr = np.arange(np.BUFSIZE * 10).reshape(10, -1).astype(str)
3011+
arr = np.arange(ncu.BUFSIZE * 10).reshape(10, -1).astype(str)
30113012
oarr = arr.astype(object)
30123013
oarr[:, -1] = None
30133014
assert_raises(TypeError, lambda: np.add(oarr[:, ::-1], arr[:, ::-1]))
@@ -3216,7 +3217,7 @@ def test_partial_iteration_cleanup(in_dtype, buf_dtype, steps):
32163217
run e.g. with pytest-valgrind or pytest-leaks
32173218
"""
32183219
value = 2**30 + 1 # just a random value that Python won't intern
3219-
arr = np.full(int(np.BUFSIZE * 2.5), value).astype(in_dtype)
3220+
arr = np.full(int(ncu.BUFSIZE * 2.5), value).astype(in_dtype)
32203221

32213222
it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
32223223
flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
@@ -3241,11 +3242,11 @@ def test_partial_iteration_cleanup(in_dtype, buf_dtype, steps):
32413242
])
32423243
def test_partial_iteration_error(in_dtype, buf_dtype):
32433244
value = 123 # relies on python cache (leak-check will still find it)
3244-
arr = np.full(int(np.BUFSIZE * 2.5), value).astype(in_dtype)
3245+
arr = np.full(int(ncu.BUFSIZE * 2.5), value).astype(in_dtype)
32453246
if in_dtype == "O":
3246-
arr[int(np.BUFSIZE * 1.5)] = None
3247+
arr[int(ncu.BUFSIZE * 1.5)] = None
32473248
else:
3248-
arr[int(np.BUFSIZE * 1.5)]["f0"] = None
3249+
arr[int(ncu.BUFSIZE * 1.5)]["f0"] = None
32493250

32503251
count = sys.getrefcount(value)
32513252

numpy/core/tests/test_ufunc.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
from pytest import param
99

1010
import numpy as np
11+
import numpy.core.umath as ncu
1112
import numpy.core._umath_tests as umt
1213
import numpy.linalg._umath_linalg as uml
1314
import numpy.core._operand_flag_tests as opflag_tests
@@ -2699,9 +2700,9 @@ def test_ufunc_out_casterrors():
26992700
# The following array can be added to itself as an object array, but
27002701
# the result cannot be cast to an integer output:
27012702
value = 123 # relies on python cache (leak-check will still find it)
2702-
arr = np.array([value] * int(np.BUFSIZE * 1.5) +
2703+
arr = np.array([value] * int(ncu.BUFSIZE * 1.5) +
27032704
["string"] +
2704-
[value] * int(1.5 * np.BUFSIZE), dtype=object)
2705+
[value] * int(1.5 * ncu.BUFSIZE), dtype=object)
27052706
out = np.ones(len(arr), dtype=np.intp)
27062707

27072708
count = sys.getrefcount(value)
@@ -2724,24 +2725,24 @@ def test_ufunc_out_casterrors():
27242725
assert out[-1] == 1
27252726

27262727

2727-
@pytest.mark.parametrize("bad_offset", [0, int(np.BUFSIZE * 1.5)])
2728+
@pytest.mark.parametrize("bad_offset", [0, int(ncu.BUFSIZE * 1.5)])
27282729
def test_ufunc_input_casterrors(bad_offset):
27292730
value = 123
27302731
arr = np.array([value] * bad_offset +
27312732
["string"] +
2732-
[value] * int(1.5 * np.BUFSIZE), dtype=object)
2733+
[value] * int(1.5 * ncu.BUFSIZE), dtype=object)
27332734
with pytest.raises(ValueError):
27342735
# Force cast inputs, but the buffered cast of `arr` to intp fails:
27352736
np.add(arr, arr, dtype=np.intp, casting="unsafe")
27362737

27372738

27382739
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
2739-
@pytest.mark.parametrize("bad_offset", [0, int(np.BUFSIZE * 1.5)])
2740+
@pytest.mark.parametrize("bad_offset", [0, int(ncu.BUFSIZE * 1.5)])
27402741
def test_ufunc_input_floatingpoint_error(bad_offset):
27412742
value = 123
27422743
arr = np.array([value] * bad_offset +
27432744
[np.nan] +
2744-
[value] * int(1.5 * np.BUFSIZE))
2745+
[value] * int(1.5 * ncu.BUFSIZE))
27452746
with np.errstate(invalid="raise"), pytest.raises(FloatingPointError):
27462747
# Force cast inputs, but the buffered cast of `arr` to intp fails:
27472748
np.add(arr, arr, dtype=np.intp, casting="unsafe")
@@ -2757,7 +2758,7 @@ def test_trivial_loop_invalid_cast():
27572758

27582759
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
27592760
@pytest.mark.parametrize("offset",
2760-
[0, np.BUFSIZE//2, int(1.5*np.BUFSIZE)])
2761+
[0, ncu.BUFSIZE//2, int(1.5*ncu.BUFSIZE)])
27612762
def test_reduce_casterrors(offset):
27622763
# Test reporting of casting errors in reductions, we test various
27632764
# offsets to where the casting error will occur, since these may occur
@@ -2766,7 +2767,7 @@ def test_reduce_casterrors(offset):
27662767
value = 123 # relies on python cache (leak-check will still find it)
27672768
arr = np.array([value] * offset +
27682769
["string"] +
2769-
[value] * int(1.5 * np.BUFSIZE), dtype=object)
2770+
[value] * int(1.5 * ncu.BUFSIZE), dtype=object)
27702771
out = np.array(-1, dtype=np.intp)
27712772

27722773
count = sys.getrefcount(value)

0 commit comments

Comments
 (0)
0