8000 BUG: Fixed an issue wherein certain `nan<x>` functions could fail for object arrays by BvB93 · Pull Request #19821 · numpy/numpy · GitHub
[go: up one dir, main page]

Skip to content

BUG: Fixed an issue wherein certain nan<x> functions could fail for object arrays #19821

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Sep 5, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 20 additions & 4 deletions numpy/lib/nanfunctions.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,12 @@ def _remove_nan_1d(arr1d, overwrite_input=False):
True if `res` can be modified in place, given the constraint on the
input
"""
if arr1d.dtype == object:
# object arrays do not support `isnan` (gh-9009), so make a guess
c = np.not_equal(arr1d, arr1d, dtype=bool)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Clever.

else:
c = np.isnan(arr1d)

c = np.isnan(arr1d)
s = np.nonzero(c)[0]
if s.size == arr1d.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning,
Expand Down Expand Up @@ -214,7 +218,11 @@ def _divide_by_count(a, b, out=None):
return np.divide(a, b, out=out, casting='unsafe')
else:
if out is None:
return a.dtype.type(a / b)
# Precaution against reduced object arrays
try:
return a.dtype.type(a / b)
except AttributeError:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this faster than checking for the attribute as you do below?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

During a quick local test it shaves off about 100 ns (~400 vs ~300 ns) for the non-object case.
Would you feel this performance gain is worthwhile enough to stick to the current try/except block?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I can't see it making a difference in the big picture. There might be a small difference in clarity, but with one liners that is debatable ...

return a / b
else:
# This is questionable, but currently a numpy scalar can
# be output to a zero dimensional array.
Expand Down Expand Up @@ -1551,7 +1559,13 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):

# Compute variance.
var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
if var.ndim < cnt.ndim:

# Precaution against reduced object arrays
try:
var_ndim = var.ndim
except AttributeError:
var_ndim = np.ndim(var)
if var_ndim < cnt.ndim:
# Subclasses of ndarray may ignore keepdims, so check here.
cnt = cnt.squeeze(axis)
dof = cnt - ddof
Expand Down Expand Up @@ -1671,6 +1685,8 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
keepdims=keepdims)
if isinstance(var, np.ndarray):
std = np.sqrt(var, out=var)
else:
elif hasattr(var, 'dtype'):
std = var.dtype.type(np.sqrt(var))
else:
std = np.sqrt(var)
return std
140 changes: 69 additions & 71 deletions numpy/lib/tests/test_nanfunctions.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,79 +231,77 @@ class MyNDArray(np.ndarray):
assert_(res.shape == ())


class TestNanFunctions_IntTypes:

int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)
@pytest.mark.parametrize(
"dtype",
np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O",
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

np.typecodes['AllFloat'][1:] would remove 'e', but is not quite as robust.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No worries, as of fffcb6e the tests seem to work fine with np.float16.

)
class TestNanFunctions_NumberTypes:

mat = np.array([127, 39, 93, 87, 46])

def integer_arrays(self):
for dtype in self.int_types:
yield self.mat.astype(dtype)

def test_nanmin(self):
tgt = np.min(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmin(mat), tgt)

def test_nanmax(self):
tgt = np.max(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmax(mat), tgt)

def test_nanargmin(self):
tgt = np.argmin(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanargmin(mat), tgt)

def test_nanargmax(self):
tgt = np.argmax(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanargmax(mat), tgt)

def test_nansum(self):
tgt = np.sum(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nansum(mat), tgt)

def test_nanprod(self):
tgt = np.prod(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanprod(mat), tgt)

def test_nancumsum(self):
tgt = np.cumsum(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nancumsum(mat), tgt)

def test_nancumprod(self):
tgt = np.cumprod(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nancumprod(mat), tgt)

def test_nanmean(self):
tgt = np.mean(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmean(mat), tgt)

def test_nanvar(self):
tgt = np.var(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanvar(mat), tgt)

tgt = np.var(mat, ddof=1)
for mat in self.integer_arrays():
assert_equal(np.nanvar(mat, ddof=1), tgt)

def test_nanstd(self):
tgt = np.std(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanstd(mat), tgt)

tgt = np.std(self.mat, ddof=1)
for mat in self.integer_arrays():
assert_equal(np.nanstd(mat, ddof=1), tgt)
mat.setflags(write=False)

nanfuncs = {
np.nanmin: np.min,
np.nanmax: np.max,
np.nanargmin: np.argmin,
np.nanargmax: np.argmax,
np.nansum: np.sum,
np.nanprod: np.prod,
np.nancumsum: np.cumsum,
np.nancumprod: np.cumprod,
np.nanmean: np.mean,
np.nanmedian: np.median,
np.nanvar: np.var,
np.nanstd: np.std,
}
nanfunc_ids = [i.__name__ for i in nanfuncs]

@pytest.mark.parametrize("nanfunc,func", nanfuncs.items(), ids=nanfunc_ids)
def test_nanfunc(self, dtype, nanfunc, func):
if nanfunc is np.nanprod and dtype == "e":
pytest.xfail(reason="overflow encountered in reduce")

mat = self.mat.astype(dtype)
tgt = func(mat)
out = nanfunc(mat)

assert_almost_equal(out, tgt)
if dtype == "O":
assert type(out) is type(tgt)
else:
assert out.dtype == tgt.dtype

@pytest.mark.parametrize(
"nanfunc,func",
[(np.nanquantile, np.quantile), (np.nanpercentile, np.percentile)],
ids=["nanquantile", "nanpercentile"],
)
def test_nanfunc_q(self, dtype, nanfunc, func):
mat = self.mat.astype(dtype)
tgt = func(mat, q=1)
out = nanfunc(mat, q=1)

assert_almost_equal(out, tgt)
if dtype == "O":
assert type(out) is type(tgt)
else:
assert out.dtype == tgt.dtype

@pytest.mark.parametrize(
"nanfunc,func",
[(np.nanvar, np.var), (np.nanstd, np.std)],
ids=["nanvar", "nanstd"],
)
def test_nanfunc_ddof(self, dtype, nanfunc, func):
mat = self.mat.astype(dtype)
tgt = func(mat, ddof=1)
out = nanfunc(mat, ddof=1)

assert_almost_equal(out, tgt)
if dtype == "O":
assert type(out) is type(tgt)
else:
assert out.dtype == tgt.dtype


class SharedNanFunctionsTestsMixin:
Expand Down
0