@@ -4206,10 +4206,13 @@ def wrap_test_class(orig_cls):
4206
4206
):
4207
4207
dct [name ] = unittest .expectedFailure
4208
4208
elif name .startswith ("test_" ):
4209
+ backend = lookup_backend (name )
4210
+ if not HAS_CUDA and backend == "inductor" :
4211
+ continue
4209
4212
ctxs = [
4210
4213
compiled_autograd ._enable (
4211
4214
make_compiler_fn (
4212
- backend = lookup_backend ( name ) ,
4215
+ backend = backend ,
4213
4216
fullgraph = name not in known_graph_breaks_tests ,
4214
4217
)
4215
4218
),
@@ -4302,6 +4305,8 @@ def wrap_test_class(orig_cls):
4302
4305
"test_full_backward_hook_double_backward" , # _pack_with_none
4303
4306
"test_grad_mode_restored_reentrant" , # assertTrue
4304
4307
"test_multi_grad_any_hooks" , # register_multi_grad_hook
4308
+ "test_saved_variable_packing_unpacking_did_not_save_original_with_hooks" , # register_hooks
4309
+ "test_graph_save_on_cpu" , # dynamo disabled
4305
4310
}
4306
4311
4307
4312
test_contexts = {
@@ -4351,37 +4356,27 @@ def wrap_test_class(orig_cls):
4351
4356
"test_custom_autograd_no_early_free" , # batched gradients
4352
4357
"test_lobpcg" , # NaNs
4353
4358
# Uncategorized
4359
+ "test_autograd_simple_views_python" , # gradient is None
4360
+ "test_function_returns_undefined_tensor" , # gradient is None
4361
+ "test_input_buffer_accum" , # add(sparse, dense) not supported
4354
4362
},
4355
4363
"eager" : { # will be run without torch.compiling the CA graph
4356
4364
"test_setup_context_when_forward_has_default_args" , # autograd.Function with class methods
4357
4365
"test_accumulate_grad_tensor_reference" , # Out of bounds: frame_state_entry.stride[i] is None
4358
4366
"test_custom_function_exception" , # torch.no_grad(), torch._dynamo.exc.Unsupported: missing: WITH_EXCEPT_START
4359
4367
"test_to_sparse_backward" , # Out of bounds: frame_state_entry.stride[i] is None
4360
- "test_autograd_simple_views_python" , # gradient is None
4361
- "test_function_returns_undefined_tensor" , # gradient is None
4362
4368
"test_naughty_autograd_function_stashing_ctx" , # bytecode issue
4363
4369
"test_unrelated_inputs" , # gradient batching rule not implemented for aten::sym_size.int
4364
4370
"test_custom_function_non_tensor_inputs_outputs" , # gradient batching rule not implemented for aten::sym_size.int
4365
4371
"test_return_duplicate" , # gradient batching rule not implemented for aten::sym_size.int
4366
4372
"test_return_duplicate_inplace" , # gradient batching rule not implemented for aten::sym_size.int
4367
4373
"test_setitem" , # CopySlices accuracy error
4368
- "test_save_on_cpu_and_checkpoint" , # https://github.com/pytorch/pytorch/issues/147565
4369
- "test_checkpoint_detects_non_determinism" , # different error
4370
- "test_checkpointing_non_reentrant_autocast_cpu" , # saved != recompute
4371
- "test_checkpointing_non_reentrant_autocast_gpu" , # saved != recompute
4372
4374
"test_checkpointing_without_reentrant_saved_object_identity" , # same as https://github.com/pytorch/pytorch/issues/136193
4373
- "test_saved_variable_packing_unpacking_did_not_save_original_with_hooks" , # register_hooks multiple times
4374
- "test_saved_variable_saved_original_inplace_detach" , # RuntimeError not raised
4375
- "test_access_saved_tensor_twice_without_recomputation_works" , # saved != recompute
4376
- "test_checkpointing_without_reentrant_dataparallel" , # https://github.com/pytorch/pytorch/issues/127115
4377
- "test_checkpointing" , # takes very very long
4378
- "test_checkpointing_without_reentrant_input_requires_grad_False" , # takes very very long
4379
- "test_checkpointing_without_reentrant_input_requires_grad_True" , # takes very very long
4380
- "test_checkpointing_without_reentrant_memory_savings" , # takes very very long
4381
4375
"test_dtensor_different_gradient_placement" , # Dynamo failed to run FX node with fake tensors
4382
4376
"test_dtensor_noncontiguous_output" , # Dynamo failed to run FX node with fake tensors
4383
4377
"test_dtensor_partial_placement_graph_output" , # Dynamo failed to run FX node with fake tensors
4384
4378
"test_unwrap_async_collective_tensor_tangent" , # AttributeError: 'PlainTensorMeta' object has no attribute 'attrs'
4379
+ "test_graph_save_on_cpu" , # PGO strides check out of bounds
4385
4380
},
4386
4381
"aot_eager" : { # will be run with torch.compile(backend="eager")
4387
4382
# Category: FakeTensor
@@ -4390,10 +4385,7 @@ def wrap_test_class(orig_cls):
4390
4385
"test_grad_batched_grad" , # torch._subclasses.fake_tensor.UnsupportedFakeTensorException: meta converter nyi
4391
4386
"test_scalar_grad_mixed_device" , # Fake Tensors aren't propagating device properly for 0-dim grads
4392
4387
},
4393
- "inductor" : { # will be run with torch.compile(backend="aot_eager")
4394
- "test_input_buffer_accum" , # does not support sparse_grad=True: https://github.com/pytorch/pytorch/issues/120267
4395
- "test_graph_save_on_cpu" , # does not support pin_memory: https://github.com/pytorch/pytorch/issues/134173
4396
- },
4388
+ "inductor" : {}, # will be run with torch.compile(backend="aot_eager")
4397
4389
# tests not present in this dict will be run with torch.compile(backend="inductor")
4398
4390
}
4399
4391
0 commit comments