8000 Implement `np.diff` for single order differences by soulitzer · Pull Request #50569 · pytorch/pytorch · GitHub
[go: up one dir, main page]

Skip to content

Implement np.diff for single order differences #50569

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 15 commits into from
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Remove hacky wrapper, improve docs
  • Loading branch information
soulitzer committed Jan 21, 2021
commit 84f28c4c784bc1e8e31f356ca64a1ead781441f7
54 changes: 28 additions & 26 deletions aten/src/ATen/native/ReduceOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -413,15 +413,15 @@ Tensor cummaxmin_backward(const Tensor& grad, const Tensor& input, const Tensor&
return result.scatter_add_(dim, indices, grad);
}

static Tensor prepend_append_on_dim(const Tensor& self, const Tensor& prepend, const Tensor& append, int64_t dim) {
// Prepends and/or appends to self along a given dimension using cat
TORCH_INTERNAL_ASSERT(prepend.defined() || append.defined(), "either prepend or append must be defined");
if (!prepend.defined() && append.defined()) {
return at::cat({self, append}, dim);
} else if (prepend.defined() && !append.defined()) {
return at::cat({prepend, self}, dim);
static Tensor prepend_append_on_dim(const Tensor& self, const c10::optional<Tensor>& prepend, const c10::optional<Tensor>& append, int64_t dim) {
// Helper for diff that handles prepending and appending when at least one is present
TORCH_INTERNAL_ASSERT(prepend.has_value() || append.has_value(), "either prepend or append must be has_value");
if (!prepend.has_value() && append.has_value()) {
return at::cat({self, append.value()}, dim);
} else if (prepend.has_value() && !append.has_value()) {
return at::cat({prepend.value(), self}, dim);
} else {
return at::cat({prepend, self, append}, dim);
return at::cat({prepend.value(), self, append.value()}, dim);
}
}

Expand All @@ -448,8 +448,8 @@ static inline Tensor diff_helper(const Tensor& self, int64_t n, int64_t dim) {
return at::narrow(self, dim, 1, out_len) - at::narrow(self, dim, 0, out_len);
}

Tensor diff_tensor_tensor(const Tensor& self, int64_t n, int64_t dim, const Tensor& prepend, const Tensor& append) {
if (!prepend.defined() && !append.defined()) {
Tensor diff_tensor_tensor(const Tensor& self, int64_t n, int64_t dim, const c10::optional<Tensor>& prepend, const c10::optional<Tensor>& append) {
if (!prepend.has_value() && !append.has_value()) {
return diff_helper(self, n, dim);
} else {
auto a = prepend_append_on_dim(self, prepend, append, dim);
Expand All @@ -465,46 +465,48 @@ static inline Tensor& diff_out_helper(const Tensor& self, int64_t n, int64_t dim
return at::sub_out(result, at::narrow(self, dim, 1, out_len), at::narrow(self, dim, 0, out_len));
}

Tensor& diff_tensor_tensor_out(Tensor& result, const Tensor& self, int64_t n, int64_t dim, const Tensor& prepend, const Tensor& append) {
if (!prepend.defined() && !append.defined()) {
Tensor& diff_tensor_tensor_out(const Tensor& self, int64_t n, int64_t dim, const c10::optional<Tensor>& prepend, const c10::optional<Tensor>& append, Tensor& result) {
if (!prepend.has_value() && !append.has_value()) {
return diff_out_helper(self, n, dim, result);
} else {
auto a = prepend_append_on_dim(self, prepend, append, dim);
return diff_out_helper(a, n, dim, result);
}
}

// Broadcasts a scalar to the shape of a tensor, and then narrow the size along dim to be one
static Tensor diff_broadcast_scalar(Scalar scalar, const Tensor& tensor, int64_t dim) {
// Helper for diff to handle when prepend/append are scalars
return at::scalar_tensor(scalar, tensor.options()).broadcast_to(tensor.sizes()).narrow(dim, 0, 1);
}

Tensor diff_scalar_scalar(const Tensor& self, int64_t n, int64_t dim, c10::optional<Scalar> prepend, c10::optional<Scalar> append) {
return diff_tensor_tensor(self, n, dim,
prepend.has_value() ? diff_broadcast_scalar(prepend.value(), self, dim) : Tensor{},
append.has_value() ? diff_broadcast_scalar(append.value(), self, dim) : Tensor{});
prepend.has_value() ? c10::optional<Tensor>(diff_broadcast_scalar(prepend.value(), self, dim)) : c10::nullopt,
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@smessmer be nice to have a .map() polyfill for these, any plans? :)

append.has_value() ? c10::optional<Tensor>(diff_broadcast_scalar(append.value(), self, dim)) : c10::nullopt);
}

Tensor diff_scalar_tensor(const Tensor& self, int64_t n, int64_t dim, c10::optional<Scalar> prepend, const Tensor& append) {
return diff_tensor_tensor(self, n, dim, prepend.has_value() ? diff_broadcast_scalar(prepend.value(), self, dim) : Tensor{}, append);
Tensor diff_scalar_tensor(const Tensor& self, int64_t n, int64_t dim, c10::optional<Scalar> prepend, const c10::optional<Tensor>& append) {
return diff_tensor_tensor(self, n, dim, prepend.has_value() ? c10::optional<Tensor>(diff_broadcast_scalar(prepend.value(), self, dim)) : c10::nullopt, append);
}

Tensor diff_tensor_scalar(const Tensor& self, int64_t n, int64_t dim, const Tensor& prepend, c10::optional<Scalar> append) {
return diff_tensor_tensor(self, n, dim, prepend, append.has_value() ? diff_broadcast_scalar(append.value(), self, dim) : Tensor{});
Tensor diff_tensor_scalar(const Tensor& self, int64_t n, int64_t dim, const c10::optional<Tensor>& prepend, c10::optional<Scalar> append) {
return diff_tensor_tensor(self, n, dim, prepend, append.has_value() ? c10::optional<Tensor>(diff_broadcast_scalar(append.value(), self, dim)) : c10::nullopt);
}

Tensor& diff_scalar_scalar_out(const Tensor& self, int64_t n, int64_t dim, c10::optional<Scalar> prepend, c10::optional<Scalar> append, Tensor& result) {
return diff_tensor_tensor_out(result, self, n, dim,
prepend.has_value() ? diff_broadcast_scalar(prepend.value(), self, dim) : Tensor{},
append.has_value() ? diff_broadcast_scalar(append.value(), self, dim) : Tensor{});
return diff_tensor_tensor_out(self, n, dim,
prepend.has_value() ? c10::optional<Tensor>(diff_broadcast_scalar(prepend.value(), self, dim)) : c10::nullopt,
append.has_value() ? c10::optional<Tensor>(diff_broadcast_scalar(append.value(), self, dim)) : c10::nullopt, result);
}

Tensor& diff_scalar_tensor_out(Tensor& result, const Tensor& self, int64_t n, int64_t dim, c10::optional<Scalar> prepend, const Tensor& append) {
return diff_tensor_tensor_out(result, self, n, dim, prepend.has_value() ? diff_broadcast_scalar(prepend.value(), self, dim) : Tensor{}, append);
Tensor& diff_scalar_tensor_out(const Tensor& self, int64_t n, int64_t dim, c10::optional<Scalar> prepend, const c10::optional<Tensor>& append, Tensor& result) {
return diff_tensor_tensor_out(self, n, dim,
prepend.has_value() ? c10::optional<Tensor>(diff_broadcast_scalar(prepend.value(), self, dim)) : c10::nullopt, append, result);
}

Tensor& diff_tensor_scalar_out(Tensor& result, const Tensor& self, int64_t n, int64_t dim, const Tensor& prepend, c10::optional<Scalar> append) {
return diff_tensor_tensor_out(result, self, n, dim, prepend, append.has_value() ? diff_broadcast_scalar(append.value(), self, dim) : Tensor{});
Tensor& diff_tensor_scalar_out(const Tensor& self, int64_t n, int64_t dim, const c10::optional<Tensor>& prepend, c10::optional<Scalar> append, Tensor& result) {
return diff_tensor_tensor_out(self, n, dim, prepend,
append.has_value() ? c10::optional<Tensor>(diff_broadcast_scalar(append.value(), self, dim)) : c10::nullopt, result);
}

// ALL REDUCE #################################################################
Expand Down
6 changes: 0 additions & 6 deletions aten/src/ATen/native/native_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1374,20 +1374,17 @@
variants: method

- func: diff.Tensor_Tensor(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor
use_c10_dispatcher: hacky_wrapper_for_legacy_signatures
variants: function, method
dispatch:
Math: diff_tensor_tensor

- func: diff.Tensor_Scalar(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Scalar? append=None) -> Tensor
use_c10_dispatcher: hacky_wrapper_for_legacy_signatures
cpp_no_default_args: ['n', 'dim', 'prepend', 'append']
variants: function, method
dispatch:
Math: diff_tensor_scalar

- func: diff.Scalar_Tensor(Tensor self, int n=1, int dim=-1, Scalar? prepend=None, Tensor? append=None) -> Tensor
use_c10_dispatcher: hacky_wrapper_for_legacy_signatures
cpp_no_default_args: ['n', 'dim', 'prepend', 'append']
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can "append" not have a default here? I think prepend being scalar is enough to disambiguate the other overloads.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah yeah, good catch. But looks like from the comments above, we might end up just landing only the Tensor versions for now.

variants: function, method
dispatch:
Expand All @@ -1400,20 +1397,17 @@
Math: diff_scalar_scalar

- func: diff.Tensor_Tensor_out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)
use_c10_dispatcher: hacky_wrapper_for_legacy_signatures
variants: function
dispatch:
Math: diff_tensor_tensor_out

- func: diff.Tensor_Scalar_out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Scalar? append=None, *, Tensor(a!) out) -> Tensor(a!)
use_c10_dispatcher: hacky_wrapper_for_legacy_signatures
cpp_no_default_args: ['n', 'dim', 'prepend', 'append']
variants: function
dispatch:
Math: diff_tensor_scalar_out

- func: diff.Scalar_Tensor_out(Tensor self, int n=1, int dim=-1, Scalar? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)
use_c10_dispatcher: hacky_wrapper_for_legacy_signatures
cpp_no_default_args: ['n', 'dim', 'prepend', 'append']
variants: function
dispatch:
Expand Down
9 changes: 9 additions & 0 deletions torch/_torch_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -2648,6 +2648,15 @@ def merge_dicts(*dicts):

Args:
input (Tensor): the tensor to compute the differences on
n (int, optional): the number of times to recursively compute difference
dim (int, optional): the dimension with respect to compute the difference.
Default is the last dimension.
prepend, append (Tensor or Scalar, optional): values to prepend or append to
:attr:`input` along :attr:`dim` before computing the difference. Scalar
values are expanded to tensors with size 1 along :attr:`dim` and the shape of
:attr:`input` along all other dimensions. Otherwise, its dimension must be
equivalent to that of input, and its shape must match input's shape except on
:attr:`dim`.

Keyword args:
{out}
Expand Down
22 changes: 11 additions & 11 deletions torch/testing/_internal/common_methods_invocations.py
Original file line number Diff line number Diff line change
Expand Up @@ -517,20 +517,20 @@ def sample_inputs_diff(op_info, device, dtype, requires_grad):
((S, S, S), 1, None, None),
((S, S, S), 1, (S, 1, S), (S, 1, S)),)

sample_inputs = tuple()
sample_inputs = []
for size, dim, size_prepend, size_append in test_cases:
args = (make_tensor(size, device, dtype,
low=None, high=None,
requires_grad=requires_grad), 1, dim)
args += (make_tensor(size_prepend, device, dtype,
low=None, high=None,
requires_grad=requires_grad),) if size_prepend else (None,)
args += (make_tensor(size_append, device, dtype,
low=None, high=None,
requires_grad=requires_grad),) if size_append else (None,)
sample_inputs += (SampleInput(args),)

return sample_inputs
requires_grad=requires_grad), 1, dim,
make_tensor(size_prepend, device, dtype,
low=None, high=None,
requires_grad=requires_grad) if size_prepend else None,
make_tensor(size_append, device, dtype,
low=None, high=None,
requires_grad=requires_grad) if size_append else None)
sample_inputs += [SampleInput(args)]

return tuple(sample_inputs)

def sample_inputs_index_select(op_info, device, dtype, requires_grad):
return (SampleInput((make_tensor((S, S, S), device, dtype,
Expand Down
0