8000 Fix signature of torch.sparse_coo_tensor() by ILCSFNO · Pull Request #152681 · pytorch/pytorch · GitHub
[go: up one dir, main page]

Skip to content

Fix signature of torch.sparse_coo_tensor() #152681

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 6 additions & 5 deletions aten/src/ATen/native/sparse/SparseTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -356,13 +356,14 @@ Tensor sparse_coo_tensor(const Tensor& indices, const Tensor& values_,
computed_sizes[static_cast<size_t>(sparse_dim + d)] = values.size(d + 1);
}

return at::_sparse_coo_tensor_with_dims_and_tensors(
sparse_dim,
dense_dim,
computed_sizes,
return at::native::_sparse_coo_tensor_unsafe(
10000 indices,
values,
values.options().layout(kSparse),
computed_sizes,
optTypeMetaToScalarType(options.dtype_opt()),
options.layout_opt(),
options.device_opt(),
options.pinned_memory_opt(),
is_coalesced);
}

Expand Down
22 changes: 13 additions & 9 deletions test/test_sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -440,18 +440,22 @@ def test_ctor_is_coalesced_with_gradcheck(self, device, dtype, coalesced):
self.assertEqual(t.is_coalesced(), coalesced)

def func(indices, values, shape, is_coalesced):
s = torch.sparse_coo_tensor(indices, values, shape, check_invariants=True, is_coalesced=is_coalesced)
if shape is None:
s = torch.sparse_coo_tensor(indices, values, check_invariants=True, is_coalesced=is_coalesced)
else:
s = torch.sparse_coo_tensor(indices, values, shape, check_invariants=True, is_coalesced=is_coalesced)
self.assertEqual(s.is_coalesced(), is_coalesced)
return s.to_dense(masked_grad=False)

if coalesced:
torch.autograd.gradcheck(func, (t._indices(), t._values().requires_grad_(True), t.shape, False))
torch.autograd.gradcheck(func, (t._indices(), t._values().requires_grad_(True), t.shape, True))
else:
torch.autograd.gradcheck(func, (t._indices(), t._values().requires_grad_(True), t.shape, False))
with self.assertRaisesRegex(RuntimeError,
"cannot set is_coalesced to true if indices correspond to uncoalesced COO tensor"):
torch.autograd.gradcheck(func, (t._indices(), t._values().requires_grad_(True), t.shape, True))
for shape in {t.shape, None}:
if coalesced:
torch.autograd.gradcheck(func, (t._indices(), t._values().requires_grad_(True), shape, False))
torch.autograd.gradcheck(func, (t._indices(), t._values().requires_grad_(True), shape, True))
else:
torch.autograd.gradcheck(func, (t._indices(), t._values().requires_grad_(True), shape, False))
with self.assertRaisesRegex(RuntimeError,
"cannot set is_coalesced to true if indices correspond to uncoalesced COO tensor"):
torch.autograd.gradcheck(func, (t._indices(), t._values().requires_grad_(True), shape, True))

@dtypes(*floating_and_complex_types_and(torch.float16, torch.bfloat16))
@unittest.skipIf(TEST_WITH_CROSSREF, "generator unsupport triggers assertion error")
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/python_torch_functions_manual.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ static PyObject* THPVariable_sparse_coo_tensor(
PyObject* kwargs) {
HANDLE_TH_ERRORS
static PythonArgParser parser({
"sparse_coo_tensor(PyObject* indices, PyObject* values, *, ScalarType dtype=None, Device? device=None, bool pin_memory=False, bool requires_grad=False, bool check_invariants=None)",
"sparse_coo_tensor(PyObject* indices, PyObject* values, *, ScalarType dtype=None, Device? device=None, bool pin_memory=False, bool requires_grad=False, bool check_invariants=None, bool is_coalesced=None)",
"sparse_coo_tensor(PyObject* indices, PyObject* values, IntArrayRef size, *, ScalarType dtype=None, Device? device=None, bool pin_memory=False, bool requires_grad=False, bool check_invariants=None, bool is_coalesced=None)",
"sparse_coo_tensor(IntArrayRef size, *, ScalarType dtype=None, Device? device=None, bool requires_grad=False, bool check_invariants=None)",
});
Expand Down
4 changes: 3 additions & 1 deletion torch/csrc/utils/tensor_new.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1157,6 +1157,7 @@ Tensor sparse_coo_tensor_ctor(
ARG_PIN_MEMORY,
ARG_REQUIRES_GRAD,
ARG_CHECK_INVARIANTS,
ARG_IS_COALESCED,
ARGS_COUNT
};
enum {
Expand Down Expand Up @@ -1218,7 +1219,8 @@ Tensor sparse_coo_tensor_ctor(
return at::sparse_coo_tensor(
indices,
values,
values.options().layout(at::kSparse).pinned_memory(pin_memory))
values.options().layout(at::kSparse).pinned_memory(pin_memory),
r.toBoolOptional(ARG_IS_COALESCED))
.set_requires_grad(r.toBool(ARG_REQUIRES_GRAD));
} else if (r.idx == 1) {
bool pin_memory = r.toBool(ARG_PIN_MEMORY1);
Expand Down
Loading
0