8000 WIP: Actually try deprecating non-tuple nd-indices by seberg · Pull Request #4434 · numpy/numpy · GitHub
[go: up one dir, main page]

Skip to content

WIP: Actually try deprecating non-tuple nd-indices #4434

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions doc/release/1.11.0-notes.rst
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,13 @@ DeprecationWarning to error
FutureWarning to changed behavior
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

* Multidimensional indexing with anything but a base class tuple is
deprecated. This means that code such as ``arr[[slice(None)]]`` has to
be changed to ``arr[tuple([slice(None)])]``. This is necessary to avoid
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would write: arr[(slice(None),)] or simply arr[slice(None),]

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hehe, yeah, though that would somewhat defeat the purpose ;). should make the example longer... Anyway, doesn't look like 1.11 change.
Hmmm, reading my last comment, part of me wonders why the masked arrays are always so tricky when doing indexing changed.

EDIT: should make the list explicit.

the ambiguity of expressions such as ``arr[[[0, 1], [0, 1]]]`` which
currently are interpreted as ``arr[[0, 1], [0, 1]]`` using heuristics.
Future or deprecation warnings are given depending on the index.

* In ``np.lib.split`` an empty array in the result always had dimension
``(0,)`` no matter the dimensions of the array being split. This
has been changed so that the dimensions will be preserved. A
Expand Down
45 changes: 40 additions & 5 deletions numpy/core/src/multiarray/mapping.c
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,10 @@ prepare_index(PyArrayObject *self, PyObject *index,
* or newaxis, Ellipsis or other arrays or sequences
* embedded, are considered equivalent to an indexing
* tuple. (`a[[[1,2], [3,4]]] == a[[1,2], [3,4]]`)
*
* Use make_tuple = 1 to denote a non-base class tuple
* Use make_tuple = 2 for indices that could be valid non-converted
* use make_tuple = 3 to denote a conversion that will be an error
*/

if (PyTuple_Check(index)) {
Expand All @@ -221,18 +225,49 @@ prepare_index(PyArrayObject *self, PyObject *index,
make_tuple = 0;
break;
}
if (PyArray_Check(tmp_obj) || PySequence_Check(tmp_obj)
|| PySlice_Check(tmp_obj) || tmp_obj == Py_Ellipsis
|| tmp_obj == Py_None) {
make_tuple = 1;
if (PyArray_Check(tmp_obj) || PySequence_Check(tmp_obj)) {
make_tuple = 2;
}
else if (PySlice_Check(tmp_obj) || (tmp_obj == Py_Ellipsis) ||
(tmp_obj == Py_None)) {
make_tuple = 3;
Py_DECREF(tmp_obj);
break;
}
}
Py_DECREF(tmp_obj);
}

if (make_tuple) {
/* We want to interpret it as a tuple, so make it one */
if (make_tuple == 2) {
if (DEPRECATE_FUTUREWARNING(
"using a non-tuple sequence for multidimensional "
"indexing is deprecated; use `arr[tuple(seq)]` "
"instead of `arr[seq]`. In the future this will be "
"interpreted as an array index.") < 0) {
return -1;
}
}
else if (make_tuple == 1) {
if (DEPRECATE_FUTUREWARNING(
"multi dimensional indexing tuple was not base class. "
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there a good reason to deprecate this? Allowing subclasses of tuple doesn't seem unreasonable, as there's still no ambiguity

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No, probably not, I probably wasn't sure and just thought I will go with the extreme version and make it less extreme from there ;).

"Using a non-tuple sequence for multidimensional "
"indexing is deprecated, use `arr[tuple(seq)]` "
"instead of `arr[seq]`. In the future this will be "
"interpreted as an array index.") < 0) {
return -1;
}
}
else {
if (DEPRECATE(
"using a non-tuple sequence for multidimensional "
"indexing is deprecated; use `arr[tuple(seq)]` "
"instead of `arr[seq]`. In the future this indexing "
"operation will be an error.") < 0) {
return -1;
}
}

index = PySequence_Tuple(index);
if (index == NULL) {
return -1;
Expand Down
27 changes: 27 additions & 0 deletions numpy/core/tests/test_deprecations.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,33 @@ def test_operator_deprecation(self):
self.assert_deprecated(operator.sub, args=(generic, generic))


class TestNonTupleNDIndexDeprecation(object):
def test_basic(self):
a = np.zeros((5, 5))
with warnings.catch_warnings():
warnings.filterwarnings('always')
assert_warns(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
assert_warns(DeprecationWarning, a.__getitem__, [slice(None)])

warnings.filterwarnings('error')
assert_raises(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
assert_raises(DeprecationWarning, a.__getitem__, [slice(None)])

# a a[[0, 1]] always was advanced indexing, so no error/warning
a[[0, 1]]

def test_not_exact_tuple(self):
a = np.zeros((5, 5))
class TupleSubclass(tuple):
pass

with warnings.catch_warnings():
warnings.filterwarnings('always')
assert_warns(FutureWarning, a.__getitem__, TupleSubclass((1, 2)))
warnings.filterwarnings('error')
assert_raises(FutureWarning, a.__getitem__, TupleSubclass((1, 2)))


class TestRankDeprecation(_DeprecationTestCase):
"""Test that np.rank is deprecated. The function should simply be
removed. The VisibleDeprecationWarning may become unnecessary.
Expand Down
2 changes: 1 addition & 1 deletion numpy/core/tests/test_memmap.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def test_arithmetic_drops_references(self):
def test_indexing_drops_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
tmp = fp[[(1, 2), (2, 3)]]
tmp = fp[((1, 2), (2, 3))]
if isinstance(tmp, memmap):
assert tmp._mmap is not fp._mmap

Expand Down
4 changes: 2 additions & 2 deletions numpy/fft/fftpack.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,13 +67,13 @@ def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[index]
a = a[tuple(index)]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
z[tuple(index)] = a
a = z

if axis != -1:
Expand Down
6 changes: 3 additions & 3 deletions numpy/lib/arraypad.py
Original file line number Diff line number Diff line change
Expand Up @@ -1363,9 +1363,9 @@ def pad(array, pad_width, mode, **kwargs):
# Create a new padded array
rank = list(range(len(narray.shape)))
total_dim_increase = [np.sum(pad_width[i]) for i in rank]
offset_slices = [slice(pad_width[i][0],
pad_width[i][0] + narray.shape[i])
for i in rank]
offset_slices = tuple(
slice(pad_width[i][0], pad_width[i][0] + narray.shape[i])
for i in rank)
new_shape = np.array(narray.shape) + total_dim_increase
newmat = np.zeros(new_shape, narray.dtype)

Expand Down
52 changes: 28 additions & 24 deletions numpy/lib/function_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -647,7 +647,7 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
ni[i], ni[j] = ni[j], ni[i]

# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
core = D * (slice(1, -1),)
hist = hist[core]

# Normalize if normed is True
Expand Down Expand Up @@ -1242,11 +1242,11 @@ def gradient(f, *varargs, **kwargs):
# Needs to keep the specific units, can't be a general unit
otype = f.dtype

# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# Convert datetime64 data into timedelta. Make dummy variable `y`
# that is a view of timedelta if the data is datetime64, otherwise
# just set y equal to the the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
y = f.view(otype)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What happened here?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

there is some weird problems in gradient that made it work only if it did the wrong, fancy, indexing. I guess this maybe somehow fixed/worked around it, it is possible that the commit message explains more, but likely not.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would not rule out that other datetime fixes in the meantime may have made this better, or that changing things at another place would be more correct.

else:
y = f

Expand All @@ -1266,19 +1266,19 @@ def gradient(f, *varargs, **kwargs):
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
out[tuple(slice1)] = (y[tuple(slice2)] - y[tuple(slice3)])/2.0

slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
out[tuple(slice1)] = (y[tuple(slice2)] - y[tuple(slice3)])

slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
out[tuple(slice1)] = (y[tuple(slice2)] - y[tuple(slice3)])

# Numerical differentiation: 2st order edges, 2nd order interior
else:
Expand All @@ -1289,21 +1289,23 @@ def gradient(f, *varargs, **kwargs):
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
out[tuple(slice1)] = (y[tuple(slice2)] - y[tuple(slice3)])/2.0

slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
out[tuple(slice1)] = -(3.0*y[tuple(slice2)] - 4.0*y[tuple(slice3)] +
y[tuple(slice4)]) / 2.0

slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
out[tuple(slice1)] = (3.0*y[tuple(slice2)] - 4.0*y[tuple(slice3)] +
y[tuple(slice4)]) / 2.0

# divide by step size
out /= dx[i]
Expand Down Expand Up @@ -1601,6 +1603,7 @@ def unwrap(p, discont=pi, axis=-1):
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
slice1 = tuple(slice1)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
Expand Down Expand Up @@ -3344,6 +3347,7 @@ def _median(a, axis=None, out=None, overwrite_input=False):
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
indexer = tuple(indexer)

# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact) and sz > 0:
Expand Down Expand Up @@ -3713,12 +3717,12 @@ def trapz(y, x=None, dx=1.0, axis=-1):
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
ret = add.reduce(d * (y[tuple(slice1)]+y[tuple(slice2)])/2.0, axis)
return ret


Expand Down Expand Up @@ -4009,15 +4013,15 @@ def delete(arr, obj, axis=None):
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
new[tuple(slobj)] = arr[tuple(slobj)]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
new[tuple(slobj)] = arr[tuple(slobj2)]
# copy middle pieces
if step == 1:
pass
Expand All @@ -4027,9 +4031,9 @@ def delete(arr, obj, axis=None):
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
arr = arr[tuple(slobj2)]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
new[tuple(slobj)] = arr[tuple(slobj2)]
if wrap:
return wrap(new)
else:
Expand All @@ -4056,11 +4060,11 @@ def delete(arr, obj, axis=None):
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
new[tuple(slobj)] = arr[tuple(slobj)]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
new[tuple(slobj)] = arr[tuple(slobj2)]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
Expand Down Expand Up @@ -4092,7 +4096,7 @@ def delete(arr, obj, axis=None):

keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
new = arr[tuple(slobj)]

if wrap:
return wrap(new)
Expand Down Expand Up @@ -4267,13 +4271,13 @@ def insert(arr, obj, values, axis=None):
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
new[tuple(slobj)] = arr[tuple(slobj)]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
new[tuple(slobj)] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
new[tuple(slobj)] = arr[tuple(slobj2)]
if wrap:
return wrap(new)
return new
Expand Down Expand Up @@ -4302,8 +4306,8 @@ def insert(arr, obj, values, axis=None):
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
new[tuple(slobj)] = values
new[tuple(slobj2)] = arr

if wrap:
return wrap(new)
Expand Down
5 changes: 5 additions & 0 deletions numpy/ma/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -5347,8 +5347,11 @@ def sort(self, axis=-1, kind='quicksort', order=None,
idx = np.meshgrid(*[np.arange(x) for x in self.shape], sparse=True,
indexing='ij')
idx[axis] = sidx
idx = tuple(idx)

tmp_mask = self._mask[idx].flat
tmp_data = self._data[idx].flat

self._data.flat = tmp_data
self._mask.flat = tmp_mask
return
Expand Down Expand Up @@ -6424,7 +6427,9 @@ def sort(a, axis=-1, kind='quicksort', order=None, endwith=True, fill_value=None
indx = np.meshgrid(*[np.arange(x) for x in a.shape], sparse=True,
indexing='ij')
indx[axis] = sindx
indx = tuple(indx)
return a[indx]

sort.__doc__ = MaskedArray.sort.__doc__


Expand Down
Loading
0