-
Notifications
You must be signed in to change notification settings - Fork 24.3k
Implement np.diff
for single order differences
#50569
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
5e99c87
1deba99
3afa2bc
84f28c4
c948f1d
abe47a0
19bb4d4
ca2fc91
1e99f18
c86c91a
49a17ad
de86880
c301c8e
04d4936
ad276dc
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -469,6 +469,7 @@ Other Operations | |
diag_embed | ||
diagflat | ||
diagonal | ||
diff | ||
einsum | ||
flatten | ||
flip | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -23,7 +23,7 @@ | |
do_test_dtypes, IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, slowTest, | ||
skipCUDAMemoryLeakCheckIf, BytesIOContext, | ||
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName, | ||
wrapDeterministicFlagAPITest, DeterministicGuard) | ||
wrapDeterministicFlagAPITest, DeterministicGuard, make_tensor) | ||
from multiprocessing.reduction import ForkingPickler | ||
from torch.testing._internal.common_device_type import ( | ||
instantiate_device_type_tests, | ||
|
@@ -4077,6 +4077,87 @@ def logcumsumexp(a, axis): | |
'expected scalar_type Double but found Float'): | ||
torch.logcumsumexp(b, axis, out=inplace_out) | ||
|
||
def _test_diff_numpy(self, t, dims=None): | ||
# Helper for test_diff to compare with NumPy reference implementation | ||
def to_np(t): | ||
if t.dtype == torch.bfloat16: | ||
return t.to(dtype=torch.float, device="cpu").numpy() | ||
else: | ||
return t.cpu().numpy() | ||
|
||
for dim in dims if dims else range(t.dim()): | ||
prepend = t.narrow(dim, 0, 1) | ||
append = t.narrow(dim, 0, 1) | ||
np_t = to_np(t) | ||
|
||
# test when prepend and append's size along dim is 1 | ||
actual = torch.diff(t, dim=dim, prepend=prepend, append=append) | ||
expected = torch.from_numpy(np.diff(np_t, axis=dim, prepend=to_np(prepend), append=to_np(append))) | ||
self.assertEqual(actual, expected.to(t.dtype)) | ||
|
||
# test when prepend and append's size along dim != 1 | ||
actual = torch.diff(t, dim=dim, prepend=t, append=t) | ||
expected = torch.from_numpy(np.diff(np_t, axis=dim, prepend=np_t, append=np_t)) | ||
self.assertEqual(actual, expected.to(t.dtype)) | ||
|
||
# All tensors appear contiguous on XLA | ||
@onlyOnCPUAndCUDA | ||
@dtypes(*torch.testing.get_all_dtypes()) | ||
def test_diff_noncontig(self, device, dtype): | ||
shapes = ( | ||
(1,), | ||
(1, 5), | ||
(3, 5), | ||
(1, 5, 1), | ||
(2, 3, 5)) | ||
|
||
for shape in shapes: | ||
contig = make_tensor(shape, device, dtype, low=-9, high=9) | ||
|
||
non_contig = torch.empty(shape + (2, 2), device=device, dtype=dtype)[..., 0] | ||
non_contig = non_contig.select(-1, -1) | ||
non_contig.copy_(contig) | ||
self.assertTrue(not non_contig.is_contiguous() or shape == (1,)) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Cool testing of both contiguous and discontiguous use cases. |
||
|
||
self._test_diff_numpy(non_contig) | ||
|
||
# RngNormal not implemented for type f16 for XLA | ||
@dtypes(*torch.testing.get_all_dtypes(include_half=False)) | ||
@dtypesIfCPU(*torch.testing.get_all_dtypes()) | ||
@dtypesIfCUDA(*torch.testing.get_all_dtypes()) | ||
def test_diff(self, device, dtype): | ||
shapes = ( | ||
(1,), | ||
(1, 5), | ||
(3, 5), | ||
(1, 5, 1), | ||
(2, 3, 5)) | ||
|
||
for shape in shapes: | ||
contig = make_tensor(shape, device, dtype, low=-9, high=9) | ||
self._test_diff_numpy(contig) | ||
|
||
t = torch.ones(2, 3) | ||
|
||
with self.assertRaisesRegex( | ||
RuntimeError, 'diff expects prepend or append to be the same dimension as input'): | ||
invalid_prepend = torch.tensor([1, 2, 3], device=device, dtype=dtype) | ||
t.diff(dim=0, prepend=invalid_prepend) | ||
|
||
with self.assertRaisesRegex( | ||
RuntimeError, 'diff expects the shape of tensor to prepend or append to match that of input'): | ||
invalid_prepend = torch.tensor([[0, 1]], device=device, dtype=dtype) | ||
t.diff(dim=0, prepend=invalid_prepend) | ||
|
||
with self.assertRaisesRegex( | ||
RuntimeError, 'diff only supports n = 1 currently'): | ||
torch.diff(t, n=2) | ||
|
||
with self.assertRaisesRegex( | ||
RuntimeError, 'diff expects input to be at least one-dimensional'): | ||
scalar = torch.tensor(2, device=device, dtype=dtype) | ||
torch.diff(scalar) | ||
|
||
def _test_large_cum_fn_helper(self, x, fn): | ||
x_cpu = x.cpu().float() | ||
expected = fn(x_cpu) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -604,6 +604,33 @@ def sample_inputs_gather(op_info, device, dtype, requires_grad): | |
0, torch.tensor(0, dtype=torch.int64, device=device))), | ||
) | ||
|
||
def sample_inputs_diff(op_info, device, dtype, requires_grad): | ||
test_cases = ( | ||
((1,), 0, None, None), | ||
((S,), 0, None, None), | ||
((S, 1), 0, None, None), | ||
((S, 1), 1, None, None), | ||
((S, S), 0, None, None), | ||
((S, S), 1, None, None), | ||
((S, S), 0, (1, S), (2, S)), | ||
((S, S), 0, None, (2, S)), | ||
((S, S, S), 1, None, None), | ||
((S, S, S), 1, (S, 1, S), (S, 1, S)),) | ||
|
||
sample_inputs = [] | ||
for size, dim, size_prepend, size_append in test_cases: | ||
args = (make_tensor(size, device, dtype, | ||
low=None, high=None, | ||
requires_grad=requires_grad), 1, dim, | ||
make_tensor(size_prepend, device, dtype, | ||
low=None, high=None, | ||
requires_grad=requires_grad) if size_prepend else None, | ||
make_tensor(size_append, device, dtype, | ||
low=None, high=None, | ||
requires_grad=requires_grad) if size_append else None) | ||
sample_inputs += [SampleInput(args)] | ||
|
||
return tuple(sample_inputs) | ||
|
||
def sample_inputs_index_select(op_info, device, dtype, requires_grad): | ||
return (SampleInput((make_tensor((S, S, S), device, dtype, | ||
|
@@ -1179,6 +1206,11 @@ def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad): | |
SkipInfo('TestCommon', 'test_variant_consistency_jit', | ||
device_type='cuda', dtypes=[torch.float16]), | ||
)), | ||
OpInfo('diff', | ||
op=torch.diff, | ||
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), | ||
sample_inputs_func=sample_inputs_diff, | ||
test_inplace_grad=False), | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not for this PR, but the test for inplace grads should be skipped automatically if the op doesn't have an inplace variant. |
||
UnaryUfuncInfo('exp', | ||
ref=np_unary_ufunc_integer_promotion_wrapper(np.exp), | ||
dtypes=all_types_and_complex_and(torch.bool, torch.half), | ||
|
@@ -2005,6 +2037,9 @@ def __len__(self): | |
def ident(x): | ||
return x | ||
|
||
# Do NOT add to this list. Method tests are being DEPRECATED and replaced by OpInfos. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks! |
||
# See https://github.com/pytorch/pytorch/wiki/Writing-tests-in-PyTorch-1.8 | ||
# | ||
# ( | ||
# method name, | ||
# input size/constructing fn, | ||
|
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
cc @rgommers for a possible issue with NumPy's signature
np.diff has both prepend and append default to "no value", which is represented by:
https://github.com/numpy/numpy/blob/1de46fe2feee9d3c500a83331ac9b75af5aef947/numpy/_globals.py#L57
The comment indicates it's intended to be used with "deprecated keywords." However, it appears these defaults come from the PR adding the kwargs: numpy/numpy#8206. @rgommers, seems like the NumPy signature would ideally have prepend=None and append=None?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
That comment is a little misleading, or at least incomplete.
_NoValue
is mainly used when keywords are added to a function, and the function forwards its keywords somewhere. See for examplenp.sum
which forwards to the.sum
method of its input (which can be atorch.Tensor
). Adding keywords to the numpy function that are not yet present in all other/downstream libraries implementing the same functionality would then break previously working code. I'll go fix the code comment.Wherever you see
_NoValue
, just readNone
.The PR gives this example that would not work with
prepend=None
for object arrays:A little far-fetched, but unfortunately this works:
Object arrays are very annoying.