10000 DRAFT: enabling doctestplus by bsipocz · Pull Request #23812 · numpy/numpy · GitHub
[go: up one dir, main page]

Skip to content

DRAFT: enabling doctestplus #23812

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 16 commits into from
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
TMP: some fixes
  • Loading branch information
bsipocz committed Dec 20, 2023
commit a4b478925e96436312932c24cc9dd07c77d3001a
28 changes: 14 additions & 14 deletions numpy/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,7 @@

Use the built-in ``help`` function to view a function's docstring::

>>> help(np.sort)
... # doctest: +SKIP
>>> help(np.sort) # doctest: +IGNORE_OUTPUT

For some objects, ``np.info(obj)`` may provide additional help. This is
particularly true if you see the line "Help on ufunc object:" at the top
Expand Down Expand Up @@ -169,7 +168,7 @@
where, zeros, zeros_like
)

# NOTE: It's still under discussion whether these aliases
# NOTE: It's still under discussion whether these aliases
# should be removed.
for ta in ["float96", "float128", "complex192", "complex256"]:
try:
Expand All @@ -184,20 +183,20 @@
histogram, histogram_bin_edges, histogramdd
)
from .lib._nanfunctions_impl import (
nanargmax, nanargmin, nancumprod, nancumsum, nanmax, nanmean,
nanargmax, nanargmin, nancumprod, nancumsum, nanmax, nanmean,
nanmedian, nanmin, nanpercentile, nanprod, nanquantile, nanstd,
nansum, nanvar
)
from .lib._function_base_impl import (
select, piecewise, trim_zeros, copy, iterable, percentile, diff,
select, piecewise, trim_zeros, copy, iterable, percentile, diff,
gradient, angle, unwrap, sort_complex, flip, rot90, extract, place,
vectorize, asarray_chkfinite, average, bincount, digitize, cov,
corrcoef, median, sinc, hamming, hanning, bartlett, blackman,
kaiser, trapz, i0, meshgrid, delete, insert, append, interp, quantile
)
from .lib._twodim_base_impl import (
diag, diagflat, eye, fliplr, flipud, tri, triu, tril, vander,
histogram2d, mask_indices, tril_indices, tril_indices_from,
diag, diagflat, eye, fliplr, flipud, tri, triu, tril, vander,
histogram2d, mask_indices, tril_indices, tril_indices_from,
triu_indices, triu_indices_from
)
from .lib._shape_base_impl import (
Expand All @@ -206,7 +205,7 @@
take_along_axis, tile, vsplit
)
from .lib._type_check_impl import (
iscomplexobj, isrealobj, imag, iscomplex, isreal, nan_to_num, real,
iscomplexobj, isrealobj, imag, iscomplex, isreal, nan_to_num, real,
real_if_close, typename, mintypecode, common_type
)
from .lib._arraysetops_impl import (
Expand All @@ -231,20 +230,21 @@
)
from .lib._index_tricks_impl import (
diag_indices_from, diag_indices, fill_diagonal, ndindex, ndenumerate,
ix_, c_, r_, s_, ogrid, mgrid, unravel_index, ravel_multi_index,
ix_, c_, r_, s_, ogrid, mgrid, unravel_index, ravel_multi_index,
index_exp
)
from . import matrixlib as _mat
from .matrixlib import (
asmatrix, bmat, matrix
)


# public submodules are imported lazily, therefore are accessible from
# __getattr__. Note that `distutils` (deprecated) and `array_api`
# (experimental label) are not added here, because `from numpy import *`
# must not raise any warnings - that's too disruptive.
__numpy_submodules__ = {
"linalg", "fft", "dtypes", "random", "polynomial", "ma",
"linalg", "fft", "dtypes", "random", "polynomial", "ma",
"exceptions", "lib", "ctypeslib", "testing", "typing",
"f2py", "test", "rec", "char", "core"
}
Expand Down Expand Up @@ -388,7 +388,7 @@ def __getattr__(attr):

if attr in __former_attrs__:
raise AttributeError(__former_attrs__[attr])

if attr in __expired_attributes__:
raise AttributeError(
f"`np.{attr}` was removed in the NumPy 2.0 release. "
Expand All @@ -411,7 +411,7 @@ def __dir__():
globals().keys() | __numpy_submodules__
)
public_symbols -= {
"matrixlib", "matlib", "tests", "conftest", "version",
"matrixlib", "matlib", "tests", "conftest", "version",
"compat", "distutils", "array_api"
}
return list(public_symbols)
Expand Down Expand Up @@ -480,7 +480,7 @@ def _mac_os_check():
def hugepage_setup():
"""
We usually use madvise hugepages support, but on some old kernels it
is slow and thus better avoided. Specifically kernel version 4.6
is slow and thus better avoided. Specifically kernel version 4.6
had a bug fix which probably fixed this:
https://github.com/torvalds/linux/commit/7cf91a98e607c2f935dbcc177d70011e95b8faff
"""
Expand All @@ -489,7 +489,7 @@ def hugepage_setup():
# If there is an issue with parsing the kernel version,
# set use_hugepage to 0. Usage of LooseVersion will handle
# the kernel version parsing better, but avoided since it
# will increase the import time.
# will increase the import time.
# See: #16679 for related discussion.
try:
use_hugepage = 1
Expand Down
3 changes: 2 additions & 1 deletion numpy/_core/arrayprint.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@
# str/False on the way in/out.
'legacy': sys.maxsize}


def _make_options_dict(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None, nanstr=None, infstr=None,
sign=None, formatter=None, floatmode=None, legacy=None):
Expand Down Expand Up @@ -241,7 +242,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,

>>> np.set_printoptions(precision=4)
>>> np.array([1.123456789])
[1.1235]
array([1.1235])

Long arrays can be summarised:

Expand Down
34 changes: 17 additions & 17 deletions numpy/_core/fromnumeric.py
Original file line number Diff line number Diff line change
Expand Up @@ -775,7 +775,7 @@ def partition(a, kth, axis=-1, kind='introselect', order=None):
>>> a = np.array([7, 1, 7, 7, 1, 5, 7, 2, 3, 2, 6, 2, 3, 0])
>>> p = np.partition(a, 4)
>>> p
array([0, 1, 2, 1, 2, 5, 2, 3, 3, 6, 7, 7, 7, 7]) # may vary
array([0, 1, 2, 1, 2, 5, 2, 3, 3, 6, 7, 7, 7, 7])

``p[4]`` is 2; all elements in ``p[:4]`` are less than or equal
to ``p[4]``, and all elements in ``p[5:]`` are greater than or
Expand Down Expand Up @@ -873,13 +873,13 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None):

>>> x = np.array([3, 4, 2, 1])
>>> x[np.argpartition(x, 3)]
array([2, 1, 3, 4]) # may vary
array([2, 1, 3, 4])
>>> x[np.argpartition(x, (1, 3))]
array([1, 2, 3, 4]) # may vary
array([1, 2, 3, 4])

>>> x = [3, 4, 2, 1]
>>> np.array(x)[np.argpartition(x, 3)]
array([2, 1, 3, 4]) # may vary
array([2, 1, 3, 4])

Multi-dimensional array:

Expand Down Expand Up @@ -1034,14 +1034,14 @@ def sort(a, axis=-1, kind=None, order=None):
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
>>> np.sort(a, order='height') # doctest: +IGNORE_OUTPUT
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])

Sort by age, then height if ages are equal:

>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
>>> np.sort(a, order=['age', 'height']) # doctest: +IGNORE_OUTPUT
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Expand Down Expand Up @@ -1585,8 +1585,8 @@ def squeeze(a, axis=None):
>>> x = np.array([[1234]])
>>> x.shape
(1, 1)
>>> np.squeeze(x)
array(1234) # 0d array
>>> np.squeeze(x) # results a 0d array
array(1234)
>>> np.squeeze(x).shape
()
>>> np.squeeze(x)[()]
Expand Down Expand Up @@ -2547,7 +2547,7 @@ def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue):
>>> o=np.array(False)
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z
(28293632, 28293632, array(True)) # may vary
(28293632, 28293632, array(True))

"""
return _wrapreduction(a, np.logical_and, 'all', axis, None, out,
Expand Down Expand Up @@ -3088,8 +3088,8 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
raised on overflow. That means that, on a 32-bit platform:

>>> x = np.array([536870910, 536870910, 536870910, 536870910])
>>> np.prod(x)
16 # may vary
>>> np.prod(x) # doctest: +SKIP
16

The product of an empty array is the neutral element 1:

Expand Down Expand Up @@ -3520,7 +3520,7 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *,
Computing the mean in float64 is more accurate:

>>> np.mean(a, dtype=np.float64)
0.55000000074505806 # may vary
0.55000000074505806

Specifying a where argument:

Expand Down Expand Up @@ -3676,7 +3676,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949 # may vary
1.1180339887498949
>>> np.std(a, axis=0)
array([1., 1.])
>>> np.std(a, axis=1)
Expand All @@ -3693,13 +3693,13 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
Computing the standard deviation in float64 is more accurate:

>>> np.std(a, dtype=np.float64)
0.44999999925494177 # may vary
0.44999999925494177

Specifying a where argument:

>>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
>>> np.std(a)
2.614064523559687 # may vary
2.614064523559687
>>> np.std(a, where=[[True], [True], [False]])
2.0

Expand Down Expand Up @@ -3881,15 +3881,15 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
Computing the variance in float64 is more accurate:

>>> np.var(a, dtype=np.float64)
0.20249999932944759 # may vary
0.20249999932944759
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025

Specifying a where argument:

>>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
>>> np.var(a)
6.833333333333333 # may vary
6.833333333333333
>>> np.var(a, where=[[True], [True], [False]])
4.0

Expand Down
10 changes: 5 additions & 5 deletions numpy/lib/_arraysetops_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -266,13 +266,13 @@ def unique(ar, return_index=False, return_inverse=False,
array([1, 2, 3, 4, 6])
>>> counts
array([1, 3, 1, 1, 1])
>>> np.repeat(values, counts)
array([1, 2, 2, 2, 3, 4, 6]) # original order not preserved
>>> np.repeat(values, counts) # original order not preserved
array([1, 2, 2, 2, 3, 4, 6])

"""
ar = np.asanyarray(ar)
if axis is None:
ret = _unique1d(ar, return_index, return_inverse, return_counts,
ret = _unique1d(ar, return_index, return_inverse, return_counts,
equal_nan=equal_nan)
return _unpack_tuple(ret)

Expand Down Expand Up @@ -871,11 +871,11 @@ def _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None):
# However, here we set the requirement that by default
# the intermediate array can only be 6x
# the combined memory allocation of the original
# arrays. See discussion on
# arrays. See discussion on
# https://github.com/numpy/numpy/pull/12065.

if (
range_safe_from_overflow and
range_safe_from_overflow and
(below_memory_constraint or kind == 'table')
):

Expand Down
2 changes: 2 additions & 0 deletions numpy/lib/_datasource.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,8 @@

_open = open

__doctest_skip__ = ['DataSource']


def _check_mode(mode, encoding, newline):
"""Check mode and that encoding and newline are compatible.
Expand Down
Loading
0