8000 ENH: `__array_function__` for `np.einsum` and `np.block` by shoyer · Pull Request #12163 · numpy/numpy · GitHub
[go: up one dir, main page]

Skip to content

ENH: __array_function__ for np.einsum and np.block #12163

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Oct 26, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions numpy/core/einsumfunc.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from numpy.compat import basestring
from numpy.core.multiarray import c_einsum
from numpy.core.numeric import asanyarray, tensordot
from numpy.core.overrides import array_function_dispatch

__all__ = ['einsum', 'einsum_path']

Expand Down Expand Up @@ -689,6 +690,17 @@ def _parse_einsum_input(operands):
return (input_subscripts, output_subscript, operands)


def _einsum_path_dispatcher(*operands, **kwargs):
# NOTE: technically, we should only dispatch on array-like arguments, not
# subscripts (given as strings). But separating operands into
# arrays/subscripts is a little tricky/slow (given einsum's two supported
# signatures), so as a practical shortcut we dispatch on everything.
# Strings will be ignored for dispatching since they don't define
# __array_function__.
return operands


@array_function_dispatch(_einsum_path_dispatcher)
def einsum_path(*operands, **kwargs):
"""
einsum_path(subscripts, *operands, optimize='greedy')
Expand Down Expand Up @@ -980,7 +992,16 @@ def einsum_path(*operands, **kwargs):
return (path, path_print)


def _einsum_dispatcher(*operands, **kwargs):
# Arguably we dispatch on more arguments that we really should; see note in
# _einsum_path_dispatcher for why.
for op in operands:
yield op
yield kwargs.get('out')


# Rewrite einsum to handle different cases
@array_function_dispatch(_einsum_dispatcher)
def einsum(*operands, **kwargs):
"""
einsum(subscripts, *operands, out=None, dtype=None, order='K',
Expand Down
14 changes: 13 additions & 1 deletion numpy/core/shape_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -631,7 +631,19 @@ def _block(arrays, max_depth, result_ndim, depth=0):
return _atleast_nd(arrays, result_ndim)


# TODO: support array_function_dispatch
def _block_dispatcher(arrays):
# Use type(...) is list to match the behavior of np.block(), which special
# cases list specifically rather than allowing for generic iterables or
# tuple. Also, we know that list.__array_function__ will never exist.
if type(arrays) is list:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure if we'll be breaking backward compatibility by doing this. Maybe isinstance(arrays, collections.Iterable) and not hasattr(arrays, '__array_function__')?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

My reasoning for picking type(arrays) is list is that it matches np.block(), which hard codes list (not even tuple). And we know that list.__array_function__ will never exist.

Copy link
Contributor

Choose a reason for hiding this comment

8000

The reason will be displayed to describe this comment to others. Learn more.

Okay, in that case ignore my comment. I was worried about the case where we might be inadvertently supporting other kinds of containers in the function but not the dispatcher. A comment would be nice, in any case.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, a comment is definitely a good idea here. Done!

for subarrays in arrays:
for subarray in _block_dispatcher(subarrays):
yield subarray
else:
yield arrays


@array_function_dispatch(_block_dispatcher)
def block(arrays):
"""
Assemble an nd-array from nested lists of blocks.
Expand Down
17 changes: 15 additions & 2 deletions numpy/core/tests/test_shape_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,7 @@
array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack,
newaxis, concatenate, stack
)

from numpy.core.shape_base import (_block_setup,
from numpy.core.shape_base import (_block_dispatcher, _block_setup,
_block_concatenate, _block_slicing)
from numpy.testing import (
assert_, assert_raises, assert_array_equal, assert_equal,
Expand Down Expand Up @@ -677,3 +676,17 @@ def test_block_memory_order(self, block):

assert block(b_c).flags['C_CONTIGUOUS']
assert block(b_f).flags['F_CONTIGUOUS']


def test_block_dispatcher():
class ArrayLike(object):
pass
a = ArrayLike()
b = ArrayLike()
c = ArrayLike()
assert_equal(list(_block_dispatcher(a)), [a])
assert_equal(list(_block_dispatcher([a])), [a])
assert_equal(list(_block_dispatcher([a, b])), [a, b])
assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c])
# don't recurse into non-lists
assert_equal(list(_block_dispatcher((a, b))), [(a, b)])
0