@@ -11168,7 +11168,7 @@ def error_inputs_mean(op_info, device, **kwargs):
11168
11168
UnaryUfuncInfo('cos',
11169
11169
ref=np.cos,
11170
11170
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
11171
- dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
11171
+ dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch. bool, torch.half, torch.bfloat16),
11172
11172
assert_autodiffed=True,
11173
11173
handles_large_floats=False,
11174
11174
supports_forward_ad=True,
@@ -11185,11 +11185,17 @@ def error_inputs_mean(op_info, device, **kwargs):
11185
11185
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
11186
11186
device_type='cpu',
11187
11187
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
11188
+ # AssertionError: Tensor-likes are not close!
11189
+ # Greatest absolute difference: nan at index (700,) (up to 1e-05 allowed)
11190
+ # Greatest relative difference: nan at index (700,) (up to 0.001 allowed)
11191
+ DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large',
11192
+ device_type='cuda',
11193
+ dtypes=(torch.chalf,), active_if=IS_WINDOWS),
11188
11194
)),
11189
10000
11195
UnaryUfuncInfo('cosh',
11190
11196
ref=np_unary_ufunc_integer_promotion_wrapper(np.cosh),
11191
11197
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
11192
- dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
11198
+ dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch. bool, torch.half, torch.bfloat16),
11193
11199
assert_autodiffed=True,
11194
11200
supports_forward_ad=True,
11195
11201
supports_fwgrad_bwgrad=True,
@@ -11209,6 +11215,12 @@ def error_inputs_mean(op_info, device, **kwargs):
11209
11215
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
11210
11216
device_type='cpu',
11211
11217
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
11218
+ # AssertionError: Tensor-likes are not close!
11219
+ # Greatest absolute difference: nan at index (6000,) (up to 1e-05 allowed)
11220
+ # Greatest relative difference: nan at index (6000,) (up to 0.001 allowed)
11221
+ DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large',
11222
+ device_type='cuda',
11223
+ dtypes=(torch.chalf,), active_if=IS_WINDOWS),
11212
11224
)),
11213
11225
OpInfo('cov',
11214
11226
dtypes=all_types_and_complex_and(torch.bfloat16),
@@ -15268,10 +15280,6 @@ def error_inputs_mean(op_info, device, **kwargs):
15268
15280
ref=np.sin,
15269
15281
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
15270
15282
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
15271
- # TODO: Add torch.chalf backward dtype support. Currently, we get:
15272
- # AssertionError: The supported dtypes for sin on device type cuda are incorrect!
15273
- # The following dtypes did not work in backward but are listed by the OpInfo: {torch.complex32}.
15274
- backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
15275
15283
assert_autodiffed=True,
15276
15284
handles_large_floats=False,
15277
15285
supports_sparse=True,
@@ -15320,7 +15328,7 @@ def error_inputs_mean(op_info, device, **kwargs):
15320
15328
UnaryUfuncInfo('sinh',
15321
15329
ref=np_unary_ufunc_integer_promotion_wrapper(np.sinh),
15322
15330
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
15323
- dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
15331
+ dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch. bool, torch.half, torch.bfloat16),
15324
15332
assert_autodiffed=True,
15325
15333
supports_forward_ad=True,
15326
15334
supports_fwgrad_bwgrad=True,
@@ -15341,6 +15349,18 @@ def error_inputs_mean(op_info, device, **kwargs):
15341
15349
device_type='cpu', dtypes=[torch.int8]),
15342
15350
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
15343
15351
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
15352
+ # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf'
15353
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency',
15354
+ dtypes=(torch.chalf,)),
15355
+ # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf'
15356
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_inplace',
15357
+ dtypes=(torch.chalf,)),
15358
+ # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf'
15359
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out',
15360
+ dtypes=(torch.chalf,)),
15361
+ # RuntimeError: "add_out_op2_sparse_csr" not implemented for 'ComplexHalf'
15362
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary',
15363
+ dtypes=(torch.chalf,)),
15344
15364
)),
15345
15365
UnaryUfuncInfo('sign',
15346
15366
ref=reference_sign,
@@ -15668,7 +15688,7 @@ def error_inputs_mean(op_info, device, **kwargs):
15668
15688
aliases=('nn.functional.tanh',),
15669
15689
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
15670
15690
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
15671
- dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
15691
+ dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch. bool, torch.half, torch.bfloat16),
15672
15692
assert_autodiffed=True,
15673
15693
assert_jit_shape_analysis=True,
15674
15694
supports_forward_ad=True,
@@ -15687,6 +15707,18 @@ def error_inputs_mean(op_info, device, **kwargs):
15687
15707
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),
15688
15708
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
15689
15709
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
15710
+ # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf'
15711
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency',
15712
+ dtypes=(torch.chalf,)),
15713
+ # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf'
15714
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_inplace',
15715
+ dtypes=(torch.chalf,)),
15716
+ # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf'
15717
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out',
15718
+ dtypes=(torch.chalf,)),
15719
+ # RuntimeError: "add_out_op2_sparse_csr" not implemented for 'ComplexHalf'
15720
+ DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary',
15721
+ dtypes=(torch.chalf,)),
15690
15722
),
15691
15723
# tan(j * pi/2 * odd_number) is nan
15692
15724
reference_numerics_filter=NumericsFilter(
0 commit comments