8000 Remove dynamo suported check for Windows. by stellaraccident · Pull Request #111313 · pytorch/pytorch · GitHub
[go: up one dir, main page]

Skip to content

Remove dynamo suported check for Windows. #111313

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions test/export/test_export.py
Original file line number Diff line number Diff line change
Expand Up @@ -2494,6 +2494,7 @@ def forward(self, x, y, z):
)
self.assertEqual(ep.module()(*inputs), m3(*inputs))

@unittest.skipIf(torchdynamo.is_win32(), "no compile support for windows")
def test_export_then_compile_tensor_ctor(self):
class M(torch.nn.Module):
def forward(self, scores, mask):
Expand Down
2 changes: 0 additions & 2 deletions test/functorch/test_eager_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -4789,8 +4789,6 @@ def predict(params_and_buffers, x):
actual = opt_fn(params_and_buffers, x)
self.assertEqual(actual, expected)

# torch.compile is not supported on Windows
@expectedFailureIf(IS_WINDOWS)
@torch._dynamo.config.patch(suppress_errors=False)
@torch._dynamo.config.patch(capture_func_transforms=True)
@skipIfTorchDynamo("Do not test torch.compile on top of torch.compile")
Expand Down
9 changes: 9 additions & 0 deletions torch/_dynamo/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import sys

import torch
from . import convert_frame, eval_frame, resume_execution
from .backends.registry import list_backends, lookup_backend, register_backend
Expand Down Expand Up @@ -89,3 +91,10 @@ def reset_code_caches() -> None:
if code:
reset_code(code)
code_context.clear()


def is_win32():
if sys.platform == "win32":
return True
else:
return False
5 changes: 4 additions & 1 deletion torch/_dynamo/backends/inductor.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,13 @@
# mypy: ignore-errors

from torch._dynamo import register_backend
from torch._dynamo import is_win32, register_backend


@register_backend
def inductor(*args, **kwargs):
if is_win32():
raise RuntimeError("Windows not yet supported for inductor")

# do import here to avoid loading inductor into memory when it is not used
from torch._inductor.compile_fx import compile_fx

Expand Down
6 changes: 3 additions & 3 deletions torch/_dynamo/convert_frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -307,9 +307,9 @@ def _convert_frame_assert(
return None
if code.co_name == "<genexpr>" and code.co_filename.endswith(
(
"transformers/file_utils.py",
"transformers/utils/generic.py",
"diffusers/utils/outputs.py",
"transformers/file_utils.py".replace("/", os.sep),
"transformers/utils/generic.py".replace("/", os.sep),
"diffusers/utils/outputs.py".replace("/", os.sep),
)
):
# not needed, but cleans up torchbench error stats
Expand Down
6 changes: 3 additions & 3 deletions torch/_dynamo/eval_frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,9 @@ def revert():
DONT_WRAP_FILES = {
# For tracing into fx modules
inspect.getsourcefile(GraphModule),
join(dirname(dirname(__file__)), "onnx/_internal/fx/dynamo_graph_extractor.py"),
join(
dirname(dirname(__file__)), "onnx/_internal/fx/dynamo_graph_extractor.py"
).replace("/", os.sep),
}


Expand Down Expand Up @@ -592,8 +594,6 @@ def __call__(self, fn):


def check_if_dynamo_supported():
if sys.platform == "win32":
raise RuntimeError("Windows not yet supported for torch.compile")
if sys.version_info >= (3, 12):
raise RuntimeError("Python 3.12+ not yet supported for torch.compile")

Expand Down
10 changes: 5 additions & 5 deletions torch/_dynamo/trace_rules.py
Original file line number Diff line number Diff line change
Expand Up @@ -3195,15 +3195,15 @@ def _module_dir(m: types.ModuleType):
def get_legacy_mod_inlinelist():
inlinelist = set()
for m in LEGACY_MOD_INLINELIST:
inlinelist.add(_module_dir(torch) + m[len("torch.") :].replace(".", "/"))
inlinelist.add(_module_dir(torch) + m[len("torch.") :].replace(".", os.sep))
return inlinelist


@functools.lru_cache(None)
def get_mod_inlinelist():
inlinelist = set()
for m in MOD_INLINELIST:
inlinelist.add(_module_dir(torch) + m[len("torch.") :].replace(".", "/"))
inlinelist.add(_module_dir(torch) + m[len("torch.") :].replace(".", os.sep))
return inlinelist


Expand All @@ -3221,9 +3221,9 @@ def get_mod_inlinelist():
# Skip fbcode paths(including torch.package paths) containing
# one of the following strings.
FBCODE_SKIP_DIRS = {
"torchrec/distributed",
"torchrec/fb/distributed",
"caffe2/torch/fb/sparsenn/pooled_embeddings_modules.py",
"torchrec/distributed".replace("/", os.sep),
"torchrec/fb/distributed".replace("/", os.sep),
"caffe2/torch/fb/sparsenn/pooled_embeddings_modules.py".replace("/", os.sep),
}
FBCODE_SKIP_DIRS_RE = re.compile(f".*({'|'.join(map(re.escape, FBCODE_SKIP_DIRS))})")

Expand Down
3 changes: 2 additions & 1 deletion torch/_dynamo/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2140,9 +2140,10 @@ def build_checkpoint_variable(**options):


def is_compile_supported(device_type):
from . import is_win32
from .eval_frame import is_dynamo_supported

compile_supported = is_dynamo_supported()
compile_supported = is_dynamo_supported() and not is_win32()
if device_type == "cpu":
pass
elif device_type == "cuda" and compile_supported:
Expand Down
0