8000 Update · pytorch/pytorch@b4f550f · GitHub
[go: up one dir, main page]

Skip to content

Commit b4f550f

Browse files
committed
Update
[ghstack-poisoned]
2 parents 196ea58 + fd31402 commit b4f550f

File tree

1 file changed

+17
-1
lines changed

1 file changed

+17
-1
lines changed

test/inductor/test_compiled_autograd.py

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,6 @@
4343
scoped_load_inline,
4444
skipIfWindows,
4545
)
46-
from torch.testing._internal.distributed.fake_pg import FakeStore
4746
from torch.testing._internal.hop_db import hop_db
4847
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_CPU, HAS_CUDA, HAS_GPU
4948
from torch.testing._internal.logging_utils import logs_to_string
@@ -4164,7 +4163,13 @@ def aot_eager():
41644163
first, second, third, fourth = fn(eager(), aot_eager())
41654164
self.assertIsNone(third)
41664165

4166+
@unittest.skipIf(
4167+
not torch.distributed.is_available(),
4168+
"FakePG relies on distributed build",
4169+
)
41674170
def test_ddp_cpp_reducer_error(self):
4171+
from torch.testing._internal.distributed.fake_pg import FakeStore
4172+
41684173
store = FakeStore()
41694174
dist.init_process_group(backend="fake", rank=0, world_size=2, store=store)
41704175
try:
@@ -4184,8 +4189,14 @@ def test_ddp_cpp_reducer_error(self):
41844189
finally:
41854190
dist.destroy_process_group()
41864191

4192+
@unittest.skipIf(
4193+
not torch.distributed.is_available(),
4194+
"FakePG relies on distributed build",
4195+
)
41874196
@config.patch(optimize_ddp="python_reducer")
41884197
def test_ddp_python_reducer(self):
4198+
from torch.testing._internal.distributed.fake_pg import FakeStore
4199+
41894200
store = FakeStore()
41904201
dist.init_process_group(backend="fake", rank=0, world_size=2, store=store)
41914202
try:
@@ -4440,6 +4451,11 @@ def wrap_test_class(orig_cls):
44404451
"test_graph_save_on_cpu", # torch.save should no-op and be recorded in the graph
44414452
"test_saving_variable_to_disk", # torch.save should no-op and be recorded in the graph
44424453
"test_nested_checkpoint_early_stop_False", # AOT backward higher order gradients
4454+
# Slow tests, these tests are close to CI timeout if we try to torch.compile them
4455+
"test_checkpointing",
4456+
"test_checkpointing_without_reentrant_memory_savings",
4457+
"test_checkpointing_without_reentrant_input_requires_grad_True",
4458+
"test_checkpointing_without_reentrant_input_requires_grad_False",
44434459
},
44444460
"aot_eager": { # will be run with torch.compile(backend="eager")
44454461
# Category: FakeTensor

0 commit comments

Comments
 (0)
0