@@ -4209,10 +4209,13 @@ def wrap_test_class(orig_cls):
4209
4209
):
4210
4210
dct [name ] = unittest .expectedFailure
4211
4211
elif name .startswith ("test_" ):
4212
+ backend = lookup_backend (name )
4213
+ if not HAS_CUDA and backend == "inductor" :
4214
+ continue
4212
4215
ctxs = [
4213
4216
compiled_autograd ._enable (
4214
4217
make_compiler_fn (
4215
- backend = lookup_backend ( name ) ,
4218
+ backend = backend ,
4216
4219
fullgraph = name not in known_graph_breaks_tests ,
4217
4220
)
4218
4221
),
@@ -4305,6 +4308,8 @@ def wrap_test_class(orig_cls):
4305
4308
"test_full_backward_hook_double_backward" , # _pack_with_none
4306
4309
"test_grad_mode_restored_reentrant" , # assertTrue
4307
4310
"test_multi_grad_any_hooks" , # register_multi_grad_hook
4311
+ "test_saved_variable_packing_unpacking_did_not_save_original_with_hooks" , # register_hooks
4312
+ "test_graph_save_on_cpu" , # dynamo disabled
4308
4313
}
4309
4314
4310
4315
test_contexts = {
@@ -4370,19 +4375,7 @@ def wrap_test_class(orig_cls):
4370
4375
"test_to_sparse_backward" , # Out of bounds: frame_state_entry.stride[i] is None
4371
4376
"test_custom_function_non_tensor_inputs_outputs" , # gradient batching rule not implemented for aten::sym_size.int
4372
4377
"test_setitem" , # CopySlices accuracy error
4373
- "test_save_on_cpu_and_checkpoint" , # https://github.com/pytorch/pytorch/issues/147565
4374
- "test_checkpoint_detects_non_determinism" , # different error
4375
- "test_checkpointing_non_reentrant_autocast_cpu" , # saved != recompute
4376
- "test_checkpointing_non_reentrant_autocast_gpu" , # saved != recompute
4377
4378
"test_checkpointing_without_reentrant_saved_object_identity" , # same as https://github.com/pytorch/pytorch/issues/136193
4378
- "test_saved_variable_packing_unpacking_did_not_save_original_with_hooks" , # register_hooks multiple times
4379
- "test_saved_variable_saved_original_inplace_detach" , # RuntimeError not raised
4380
- "test_access_saved_tensor_twice_without_recomputation_works" , # saved != recompute
4381
- "test_checkpointing_without_reentrant_dataparallel" , # https://github.com/pytorch/pytorch/issues/127115
4382
- "test_checkpointing" , # takes very very long
4383
- "test_checkpointing_without_reentrant_input_requires_grad_False" , # takes very very long
4384
- "test_checkpointing_without_reentrant_input_requires_grad_True" , # takes very very long
4385
- "test_checkpointing_without_reentrant_memory_savings" , # takes very very long
4386
4379
"test_dtensor_different_gradient_placement" , # Dynamo failed to run FX node with fake tensors
4387
4380
"test_dtensor_noncontiguous_output" , # Dynamo failed to run FX node with fake tensors
4388
4381
"test_dtensor_partial_placement_graph_output" , # Dynamo failed to run FX node with fake tensors
0 commit comments