@@ -10341,7 +10341,7 @@ def error_inputs_mean(op_info, device, **kwargs):
10341
10341
aliases=('arctan', ),
10342
10342
ref=np.arctan,
10343
10343
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
10344
- dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
10344
+ dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch. bool, torch.half, torch.bfloat16),
10345
10345
assert_autodiffed=True,
10346
10346
supports_forward_ad=True,
10347
10347
supports_fwgrad_bwgrad=True,
@@ -10367,6 +10367,18 @@ def error_inputs_mean(op_info, device, **kwargs):
10367
10367
active_if=IS_WINDOWS),
10368
10368
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
10369
10369
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
10370
+ # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf'
10371
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency',
10372
+ dtypes=(torch.chalf,)),
10373
+ # same reason as above
10374
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_inplace',
10375
+ dtypes=(torch.chalf,)),
10376
+ # same reason as above
10377
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out',
10378
+ dtypes=(torch.chalf,)),
10379
+ # RuntimeError: "add_out_op2_sparse_csr" not implemented for 'ComplexHalf'
10380
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary',
10381
+ dtypes=(torch.chalf,)),
10370
10382
)),
10371
10383
BinaryUfuncInfo('atan2',
10372
10384
aliases=('arctan2',),
@@ -14995,7 +15007,8 @@ def error_inputs_mean(op_info, device, **kwargs):
14995
15007
UnaryUfuncInfo('tan',
14996
15008
ref=np.tan,
14997
15009
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
14998
- dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
15010
+ dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
15011
+ backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
14999
15012
assert_autodiffed=True,
15000
15013
supports_forward_ad=True,
15001
15014
supports_fwgrad_bwgrad=True,
@@ -15019,6 +15032,18 @@ def error_inputs_mean(op_info, device, **kwargs):
15019
15032
active_if=TEST_WITH_ROCM),
15020
15033
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
15021
15034
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
15035
+ # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf'
15036
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency',
15037
+ dtypes=(torch.chalf,)),
15038
+ # same reason as above
15039
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_inplace',
15040
+ dtypes=(torch.chalf,)),
15041
+ # same reason as above
15042
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out',
15043
+ dtypes=(torch.chalf,)),
15044
+ # RuntimeError: "add_out_op2_sparse_csr" not implemented for 'ComplexHalf'
15045
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary',
15046
+ dtypes=(torch.chalf,)),
15022
15047
),
15023
15048
# tan(pi/2 * odd_number) is nan
15024
15049
reference_numerics_filter=NumericsFilter(
0 commit comments