|
7283 | 7283 |
|
7284 | 7284 | - func: _sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor |
7285 | 7285 | dispatch: |
7286 | | - SparseCPU, SparseCUDA, SparseMeta, Meta: new_with_dims_sparse |
| 7286 | + SparseCPU, SparseCUDA, SparseMeta, SparseMPS, Meta: new_with_dims_sparse |
7287 | 7287 | autogen: _sparse_coo_tensor_with_dims.out |
7288 | 7288 |
|
7289 | 7289 | - func: _sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor |
7290 | 7290 | dispatch: |
7291 | | - SparseCPU, SparseCUDA, SparseMeta, Meta: new_with_dims_and_tensor_sparse_symint |
| 7291 | + SparseCPU, SparseCUDA, SparseMeta, SparseMPS, Meta: new_with_dims_and_tensor_sparse_symint |
7292 | 7292 | autogen: _sparse_coo_tensor_with_dims_and_tensors.out |
7293 | 7293 |
|
7294 | 7294 | - func: sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) |
7295 | 7295 | use_const_ref_for_mutable_tensors: True |
7296 | 7296 | variants: method |
7297 | 7297 | dispatch: |
7298 | | - SparseCPU, SparseCUDA, SparseMeta: sparse_resize_ |
| 7298 | + SparseCPU, SparseCUDA, SparseMPS, SparseMeta: sparse_resize_ |
7299 | 7299 | autogen: sparse_resize, sparse_resize.out |
7300 | 7300 |
|
7301 | 7301 | - func: sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) |
7302 | 7302 | use_const_ref_for_mutable_tensors: True |
7303 | 7303 | variants: method |
7304 | 7304 | dispatch: |
7305 | | - SparseCPU, SparseCUDA, SparseMeta: sparse_resize_and_clear_ |
| 7305 | + SparseCPU, SparseCUDA, SparseMPS, SparseMeta: sparse_resize_and_clear_ |
7306 | 7306 | autogen: sparse_resize_and_clear, sparse_resize_and_clear.out |
7307 | 7307 |
|
7308 | 7308 | - func: sparse_mask(Tensor self, Tensor mask) -> Tensor |
|
7338 | 7338 | - func: sparse_dim(Tensor self) -> int |
7339 | 7339 | variants: method |
7340 | 7340 | dispatch: |
7341 | | - SparseCPU, SparseCUDA, SparseMeta: sparse_dim_sparse |
7342 | | - SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: sparse_dim_sparse_csr |
| 7341 | + SparseCPU, SparseCUDA, SparseMPS, SparseMeta: sparse_dim_sparse |
| 7342 | + SparseCsrCPU, SparseCsrCUDA, SparseCsrMPS, SparseCsrMeta: sparse_dim_sparse_csr |
7343 | 7343 | CompositeExplicitAutograd: sparse_dim_default |
7344 | 7344 | device_check: NoCheck |
7345 | 7345 | device_guard: False |
|
7372 | 7372 | - func: _nnz(Tensor self) -> int |
7373 | 7373 | variants: method |
7374 | 7374 | dispatch: |
7375 | | - SparseCPU, SparseCUDA, SparseMeta: _nnz_sparse |
7376 | | - SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: _nnz_sparse_csr |
| 7375 | + SparseCPU, SparseCUDA, SparseMPS, SparseMeta: _nnz_sparse |
| 7376 | + SparseCsrCPU, SparseCsrCUDA, SparseCsrMPS, SparseCsrMeta: _nnz_sparse_csr |
7377 | 7377 | device_check: NoCheck |
7378 | 7378 | device_guard: False |
7379 | 7379 |
|
|
7394 | 7394 | - func: is_coalesced(Tensor self) -> bool |
7395 | 7395 | variants: method |
7396 | 7396 | dispatch: |
7397 | | - SparseCPU, SparseCUDA, SparseMeta: is_coalesced_sparse |
| 7397 | + SparseCPU, SparseCUDA, SparseMPS, SparseMeta: is_coalesced_sparse |
7398 | 7398 | CompositeExplicitAutograd: is_coalesced_default |
7399 | 7399 | device_check: NoCheck |
7400 | 7400 | device_guard: False |
7401 | 7401 |
|
7402 | 7402 | - func: _indices(Tensor(a) self) -> Tensor(a) |
7403 | 7403 | variants: method |
7404 | 7404 | dispatch: |
7405 | | - SparseCPU, SparseCUDA, SparseMeta: _indices_sparse |
| 7405 | + SparseCPU, SparseCUDA, SparseMPS, SparseMeta: _indices_sparse |
7406 | 7406 | device_check: NoCheck |
7407 | 7407 | device_guard: False |
7408 | 7408 |
|
7409 | 7409 | - func: _values(Tensor(a) self) -> Tensor(a) |
7410 | 7410 | variants: method |
7411 | 7411 | dispatch: |
7412 | | - SparseCPU, SparseCUDA, SparseMeta: _values_sparse |
| 7412 | + SparseCPU, SparseCUDA, SparseMPS, SparseMeta: _values_sparse |
7413 | 7413 | device_check: NoCheck |
7414 | 7414 | device_guard: False |
7415 | 7415 |
|
|
7419 | 7419 | - func: _coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!) |
7420 | 7420 | variants: method |
7421 | 7421 | dispatch: |
7422 | | - SparseCPU, SparseCUDA, SparseMeta: _coalesced_sparse_ |
| 7422 | + SparseCPU, SparseCUDA, SparseMPS, SparseMeta: _coalesced_sparse_ |
7423 | 7423 | device_check: NoCheck |
7424 | 7424 | device_guard: False |
7425 | 7425 | autogen: _coalesced, _coalesced.out |
|
7508 | 7508 | - func: _to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor |
7509 | 7509 | variants: method |
7510 | 7510 | dispatch: |
7511 | | - CPU, CUDA: dense_to_sparse |
7512 | | - SparseCPU, SparseCUDA: sparse_coo_to_sparse |
7513 | | - SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: sparse_compressed_to_sparse |
| 7511 | + CPU, CUDA, MPS: dense_to_sparse |
| 7512 | + SparseCPU, SparseCUDA, SparseMPS: sparse_coo_to_sparse |
| 7513 | + SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta, SparseCsrMPS: sparse_compressed_to_sparse |
7514 | 7514 | autogen: _to_sparse.sparse_dim_out |
7515 | 7515 |
|
7516 | 7516 | - func: to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor |
|
7520 | 7520 | - func: _to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor |
7521 | 7521 | variants: method |
7522 | 7522 | dispatch: |
7523 | | - CPU, CUDA: dense_to_sparse |
7524 | | - SparseCPU, SparseCUDA: sparse_coo_to_sparse |
| 7523 | + CPU, CUDA, MPS: dense_to_sparse |
| 7524 | + SparseCPU, SparseCUDA, SparseMPS: sparse_coo_to_sparse |
7525 | 7525 | SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: sparse_compressed_to_sparse |
7526 | 7526 | autogen: _to_sparse.out |
7527 | 7527 |
|
|
0 commit comments