@@ -10370,7 +10370,8 @@ def error_inputs_mean(op_info, device, **kwargs):
10370
10370
supports_forward_ad=True,
10371
10371
supports_fwgrad_bwgrad=True,
10372
10372
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
10373
- dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
10373
+ dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
10374
+ backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
10374
10375
assert_autodiffed=True,
10375
10376
decorators=[
10376
10377
DecorateInfo(
@@ -10391,6 +10392,18 @@ def error_inputs_mean(op_info, device, **kwargs):
10391
10392
active_if=IS_WINDOWS),
10392
10393
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
10393
10394
'TestSparseUnaryUfuncs',
1E79
39;test_sparse_fn_grad'),
10395
+ # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf'
10396
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency',
10397
+ dtypes=(torch.chalf,)),
10398
+ # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf'
10399
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_inplace',
10400
+ dtypes=(torch.chalf,)),
10401
+ # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf'
10402
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out',
10403
+ dtypes=(torch.chalf,)),
10404
+ # RuntimeError: "add_out_op2_sparse_csr" not implemented for 'ComplexHalf'
10405
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary',
10406
+ dtypes=(torch.chalf,)),
10394
10407
)),
10395
10408
# NOTE: derivative for inplace asinh is not implemented
10396
10409
UnaryUfuncInfo('asinh',
@@ -14773,7 +14786,11 @@ def error_inputs_mean(op_info, device, **kwargs):
14773
14786
UnaryUfuncInfo('sin',
14774
14787
ref=np.sin,
14775
14788
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
14776
- dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
14789
+ dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
14790
+ # TODO: Add torch.chalf backward dtype support. Currently, we get:
14791
+ # AssertionError: The supported dtypes for sin on device type cuda are incorrect!
14792
+ # The following dtypes did not work in backward but are listed by the OpInfo: {torch.complex32}.
14793
+ backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
14777
14794
assert_autodiffed=True,
14778
14795
handles_large_floats=False,
14779
14796
supports_sparse=True,
@@ -14790,6 +14807,18 @@ def error_inputs_mean(op_info, device, **kwargs):
14790
14807
dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS),
14791
14808
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
14792
14809
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
14810
+ # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf'
14811
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency',
14812
+ dtypes=(torch.chalf,)),
14813
+ # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf'
14814
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_inplace',
14815
+ dtypes=(torch.chalf,)),
14816
+ # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf'
14817
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out',
14818
+ dtypes=(torch.chalf,)),
14819
+ # RuntimeError: "add_out_op2_sparse_csr" not implemented for 'ComplexHalf'
14820
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary',
14821
+ dtypes=(torch.chalf,)),
14793
14822
),
14794
14823
decorators=(precisionOverride({torch.bfloat16: 1e-2}),)),
14795
14824
UnaryUfuncInfo('sinc',
0 commit comments