8000 [dynamo] wrap GraphModule exceptions in dynamo-wrapped tests by williamwen42 · Pull Request #126341 · pytorch/pytorch · GitHub
[go: up one dir, main page]

Skip to content

[dynamo] wrap GraphModule exceptions in dynamo-wrapped tests #126341

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions test/dynamo/test_backends.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,9 @@ def _check_backend_works(self, backend, options=None):
def test_eager(self):
self._check_backend_works("eager")

def test_eager_noexcept(self):
self._check_backend_works("eager_noexcept")

@_force_skip_lazy_graph_module()
def test_torchscript(self):
self._check_backend_works("ts")
Expand Down
4 changes: 4 additions & 0 deletions test/functorch/test_aotdispatch.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@
skipIfRocm,
skipIfTorchDynamo,
TestCase,
xfailIfTorchDynamo,
)
from torch.testing._internal.hop_db import hop_db
from torch.testing._internal.optests import (
Expand Down Expand Up @@ -576,6 +577,9 @@ def forward(self, primals_1, primals_2):

# This is a (hopefully) extremely rare case that is difficult to handle,
# so we ban it.
# https://github.com/pytorch/pytorch/issues/126236
# https://github.com/pytorch/pytorch/pull/126113
@xfailIfTorchDynamo
def test_set__and_data_mutation_bad(self):
def f(a):
a_view = a.view(-1)
Expand Down
19 changes: 19 additions & 0 deletions test/functorch/test_control_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
skipIfTorchDynamo,
TEST_WITH_TORCHDYNAMO,
TestCase,
xfailIfTorchDynamo,
)


Expand Down Expand Up @@ -1021,6 +1022,8 @@ def f(x):
graph_module = make_fx(torch.func.functionalize(f))(*example_inputs)
self.assertEqual(graph_module(*example_inputs), f(*example_inputs))

# https://github.com/pytorch/pytorch/issues/126988
@xfailIfTorchDynamo
def test_cond_functionalized_input_mutation_on_true_branch(self):
def true_fn(x):
view_x = x.view(x.shape)
Expand All @@ -1046,6 +1049,8 @@ def f(x):
):
make_fx(torch.func.functionalize(f))(*example_inputs)

# https://github.com/pytorch/pytorch/issues/126988
@xfailIfTorchDynamo
def test_cond_functionalized_input_mutation_on_false_branch(self):
def true_fn(x):
return x.sin().sum()
Expand All @@ -1071,6 +1076,8 @@ def f(x):
):
make_fx(torch.func.functionalize(f))(*example_inputs)

# https://github.com/pytorch/pytorch/issues/126988
@xfailIfTorchDynamo
def test_cond_functionalized_output_alias_input(self):
def true_fn(x):
return x
Expand Down Expand Up @@ -1098,6 +1105,8 @@ def f(x):
):
make_fx(torch.func.functionalize(f))(*example_inputs)

# https://github.com/pytorch/pytorch/issues/126988
@xfailIfTorchDynamo
def test_cond_functionalized_nested_input_mutation(self):
def true_true_fn(x):
x.add_(4)
Expand Down Expand Up @@ -1129,6 +1138,8 @@ def f(x):
):
make_fx(torch.func.functionalize(f))(*example_inputs)

# https://github.com/pytorch/pytorch/issues/126988
@xfailIfTorchDynamo
def test_cond_functionalized_nested_input_mutation_with_aot_func(self):
def true_true_fn(x):
x.add_(4)
Expand Down Expand Up @@ -1180,6 +1191,8 @@ def wrapper(*args, **kwargs):
):
make_fx(f_wrapper(f))(example_input_func)

# https://github.com/pytorch/pytorch/issues/126988
@xfailIfTorchDynamo
def test_cond_functionalized_input_aliasing_with_aot_func(self):
def true_fn(x):
return x
Expand Down Expand Up @@ -1810,6 +1823,8 @@ def wrapper(*args, **kwargs):

self.assertEqual(gm(*example_inputs), f(*example_inputs))

# https://github.com/pytorch/pytorch/issues/126988
@xfailIfTorchDynamo
def test_map_functionalized_arg_mutation(self):
def map_fn(x, y):
y.add_(4)
Expand All @@ -1825,6 +1840,8 @@ def f(xs, y):
):
functional_f(*example_inputs)

# https://github.com/pytorch/pytorch/issues/126988
@xfailIfTorchDynamo
def test_map_functionalized_elem_mutation(self):
def map_fn(x, y):
x.add_(4)
Expand Down Expand Up @@ -1860,6 +1877,8 @@ def f(x, y):
# Ensure no error is thrown when not running backward
f(*example_inputs)

# https://github.com/pytorch/pytorch/issues/126988
@xfailIfTorchDynamo
def test_map_functionalized_elem_alias(self):
def map_fn(x):
x.view(x.shape)
Expand Down
3 changes: 3 additions & 0 deletions test/functorch/test_eager_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@
subtest,
TEST_WITH_TORCHDYNAMO,
TestCase,
xfailIfTorchDynamo,
)

from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten
Expand Down Expand Up @@ -2340,6 +2341,8 @@ def f(x):
)(x)
self.assertEqual(actual, expected)

# https://github.com/pytorch/pytorch/issues/127036
@xfailIfTorchDynamo
@parametrize("_preallocate_and_copy", (True, False))
def test_chunk_jacrev_chunksize_one(self, device, _preallocate_and_copy):
# With chunk_size=1, we shouldn't `vmap` and hence not be limited
Expand Down
5 changes: 5 additions & 0 deletions test/test_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@
skipIfTorchDynamo,
slowTest,
TestCase,
xfailIfTorchDynamo,
)
from torch.utils._mode_utils import no_dispatch
from torch.utils._python_dispatch import TorchDispatchMode
Expand Down Expand Up @@ -6980,6 +6981,8 @@ def test_checkpointing_without_reentrant_correct_grad(self):
self.assertEqual(b_grad, c_grad)
self.assertEqual(b_grad, d_grad)

# PYTORCH_TEST_WITH_DYNAMO=1 test fails on CI but can't repro locally
@skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/127115")
def test_checkpointing_without_reentrant_dataparallel(self):
"""
Verifies gradient correctness when checkpoint without reentrant autograd
Expand Down Expand Up @@ -7037,6 +7040,8 @@ def hook(grad):
# should only call hook once
self.assertEqual(count, 1)

# https://github.com/pytorch/pytorch/issues/127115
@xfailIfTorchDynamo
def test_checkpointing_without_reentrant_arbitrary_input_output(self):
"""
Ensures checkpointing without reentrant autograd works with functions
Expand Down
5 changes: 5 additions & 0 deletions test/test_binary_ufuncs.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@
TEST_SCIPY,
TestCase,
torch_to_numpy_dtype_dict,
xfailIfTorchDynamo,
)

if TEST_SCIPY:
Expand Down Expand Up @@ -1236,6 +1237,8 @@ def binary_check_input_output_mem_overlap(self, op, device, expected_failure=Fal
expected_failure=expected_failure,
)

# https://github.com/pytorch/pytorch/issues/126474
@xfailIfTorchDynamo
@dtypes(torch.double)
def test_binary_op_mem_overlap(self, device, dtype):
ops = [
Expand Down Expand Up @@ -3691,6 +3694,8 @@ def test_addsub_half_tensor(self, device):
actual = op(x, y, alpha=alpha)
self.assertTrue(not (actual.isnan() or actual.isinf()))

# https://github.com/pytorch/pytorch/issues/127003
@xfailIfTorchDynamo
def test_sub_typing(self, device):
m1 = torch.tensor(
[True, False, False, True, False, False], dtype=torch.bool, device=device
Expand Down
1 change: 1 addition & 0 deletions test/test_custom_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,6 +199,7 @@ def foo_impl(x):
):
torch.library.opcheck(op, (x,), {})

@skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug")
def test_incorrect_abstract_impl(self, device):
lib = self.lib()
lib.define("foo(Tensor x) -> Tensor")
Expand Down
3 changes: 3 additions & 0 deletions test/test_indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
skipIfTorchDynamo,
TEST_CUDA,
TestCase,
xfailIfTorchDynamo,
)


Expand Down Expand Up @@ -1785,6 +1786,8 @@ def test_boolean_indexing_onedim(self, device):
a[b] = 1.0
self.assertEqual(a, tensor([[1.0, 1.0, 1.0]], device=device))

# https://github.com/pytorch/pytorch/issues/127003
@xfailIfTorchDynamo
def test_boolean_assignment_value_mismatch(self, device):
# A boolean assignment should fail when the shape of the values
# cannot be broadcast to the subscription. (see also gh-3458)
Expand Down
5 changes: 4 additions &am 10000 p; 1 deletion test/test_linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest,
xfailIfTorchDynamo)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
Expand Down Expand Up @@ -802,6 +803,8 @@ def test_addr_integral(self, device, dtype):
# when beta is not zero
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=2)

# https://github.com/pytorch/pytorch/issues/127043
@xfailIfTorchDynamo
@precisionOverride({torch.bfloat16: 1e-1})
@dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16))
def test_addr_float_and_complex(self, device, dtype):
Expand Down
6 changes: 5 additions & 1 deletion test/test_mkldnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo
skipIfTorchDynamo, xfailIfTorchDynamo
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
Expand Down Expand Up @@ -807,6 +807,8 @@ def test_max_pool2d_stride_none(self):

self.assertEqual(y1, y2.to_dense())

# https://github.com/pytorch/pytorch/issues/127111
@xfailIfTorchDynamo
def test_max_pool_unsupported(self):
# OneDNN not support dilation max_pooling, will be avilabled in v2.0.
N = torch.randint(3, 10, (1,)).item()
Expand Down Expand Up @@ -1159,6 +1161,8 @@ def test_0_dimension_tensor(self):
out_mkldnn = mkldnn_utils.to_mkldnn(m)(x)
self.assertEqual(out_eager, out_mkldnn)

# https://github.com/pytorch/pytorch/issues/127111
@xfailIfTorchDynamo
def test_view(self):
x = torch.randn(3, 4, 5, dtype=torch.float32).to_mkldnn()
self.assertRaisesRegex(RuntimeError,
Expand Down
4 changes: 3 additions & 1 deletion test/test_module_tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,13 @@
from copy import copy

import torch
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.testing._internal.common_utils import run_tests, TestCase, xfailIfTorchDynamo
from torch.utils.module_tracker import ModuleTracker


class TestModuleTracker(TestCase):
# "https://github.com/pytorch/pytorch/issues/127112
@xfailIfTorchDynamo
def test_module_hierarchy(self):
seen_fw = []
seen_bw = []
Expand Down
7 changes: 6 additions & 1 deletion test/test_schema_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from torch.utils._pytree import tree_map
import unittest

from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.common_utils import run_tests, TEST_WITH_TORCHDYNAMO
from torch.fx.operator_schemas import normalize_function
from torch._subclasses.schema_check_mode import SchemaCheckMode
from torch.utils._python_dispatch import TorchDispatchMode
Expand Down Expand Up @@ -94,6 +94,11 @@ def wrap(e):

# Tests various schema checking functionalities.
class TestSchemaCheck(JitTestCase):
def setUp(self):
if TEST_WITH_TORCHDYNAMO:
self.skipTest("SchemaCheckMode is ignored by dynamo")
super().setUp()

# Tests that SchemaCheckMode records operator order with grad
def test_schema_check_mode_operator_order(self):
with SchemaCheckMode() as schema_check:
Expand Down
13 changes: 11 additions & 2 deletions test/test_tensor_creation_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@
TestCase, run_tests, do_test_empty_full, TEST_WITH_ROCM, suppress_warnings,
torch_to_numpy_dtype_dict, numpy_to_torch_dtype_dict, slowTest,
set_default_dtype, set_default_tensor_type,
TEST_SCIPY, IS_MACOS, IS_PPC, IS_JETSON, IS_WINDOWS, parametrize, skipIfTorchDynamo)
TEST_SCIPY, IS_MACOS, IS_PPC, IS_JETSON, IS_WINDOWS, parametrize, skipIfTorchDynamo,
xfailIfTorchDynamo)
from torch.testing._internal.common_device_type import (
expectedFailureMeta, instantiate_device_type_tests, deviceCountAtLeast, onlyNativeDeviceTypes,
onlyCPU, largeTensorTest, precisionOverride, dtypes,
Expand Down Expand Up @@ -1582,6 +1583,8 @@ def test_random_bool(self, device):
self.assertEqual(t.max(), True)
self.assertTrue(0.4 < (t.eq(True)).to(torch.int).sum().item() / size < 0.6)

# https://github.com/pytorch/pytorch/issues/126834
@xfailIfTorchDynamo
def test_random_from_to_bool(self, device):
size = 2000

Expand Down Expand Up @@ -1661,7 +1664,9 @@ def test_random_full_range(self, device, dtype):

# NB: uint64 is broken because its max value is not representable in
# int64_t, but this is what random expects
@dtypes(*all_types_and(torch.bfloat16, torch.half, torch.uint16, torch.uint32))
# https://github.com/pytorch/pytorch/issues/126834
@xfailIfTorchDynamo
@dtypes(*all_types_and(torch.bfloat16, torch.half, torch .uint16, torch.uint32))
def test_random_from_to(self, device, dtype):
size = 2000
alpha = 0.1
Expand Down Expand Up @@ -1750,6 +1755,8 @@ def test_random_from_to(self, device, dtype):
lambda: t.random_(from_, to_)
)

# https://github.com/pytorch/pytorch/issues/126834
@xfailIfTorchDynamo
@dtypes(*all_types_and(torch.bfloat16, torch.half, torch.uint16, torch.uint32))
def test_random_to(self, device, dtype):
size = 2000
Expand Down Expand Up @@ -3350,6 +3357,8 @@ def test_normal_std_error(self, device):
with self.assertRaisesRegex(RuntimeError, r'normal expects all elements of std >= 0.0'):
torch.normal(input, std)

# https://github.com/pytorch/pytorch/issues/126834
@xfailIfTorchDynamo
@dtypes(torch.float, torch.double, torch.half)
@dtypesIfCUDA(torch.float, torch.double, torch.half, torch.bfloat16)
def test_uniform_from_to(self, device, dtype):
Expand Down
5 changes: 4 additions & 1 deletion test/test_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO)
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
Expand Down Expand Up @@ -4376,6 +4376,9 @@ def test_nullary_op_mem_overlap(self, device):
getattr(x, op)(*args)

# FIXME: move to an elementwise ternary test suite and make this an OpInfo test
# https://github.com/pytorch/pytorch/issues/126474
@xfailIfTorchDynamo
@skipIfTorchInductor("https://github.com/pytorch/pytorch/issues/126474")
@dtypes(torch.double)
def test_ternary_op_mem_overlap(self, device, dtype):
if device == "cpu" and TEST_WITH_TORCHINDUCTOR:
Expand Down
5 changes: 4 additions & 1 deletion test/test_type_promotion.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@

from torch.testing._internal.common_utils import (TestCase, run_tests, load_tests, make_tensor,
TEST_NUMPY, set_default_dtype, torch_to_numpy_dtype_dict,
numpy_to_torch_dtype_dict, skipIfTorchDynamo)
numpy_to_torch_dtype_dict, skipIfTorchDynamo,
xfailIfTorchDynamo)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, onlyNativeDeviceTypes,
dtypes, onlyCPU, expectedFailureMeta, skipMeta)
from torch.testing._internal.common_dtype import (
Expand Down Expand Up @@ -43,6 +44,8 @@ class TestTypePromotion(TestCase):
# `int+float -> float` but `int.add_(float)` is rejected as an error.
# Promoting inplace would require re-allocating and copying the memory of the
# tensor data, since element size could change.
# https://github.com/pytorch/pytorch/issues/127049
@xfailIfTorchDynamo
@float_double_default_dtype
def test_inplace(self, device):
int_tensor = torch.ones([4, 4, 4], dtype=torch.int32, device=device)
Expand Down
3 changes: 3 additions & 0 deletions test/test_unary_ufuncs.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
IS_WINDOWS,
gradcheck,
is_iterable_of_tensors,
xfailIfTorchDynamo,
)
from torch.testing._internal.common_methods_invocations import (
unary_ufuncs,
Expand Down Expand Up @@ -788,6 +789,8 @@ def _test(op, output, input):
_test(op, data[0:sz], data[1 : sz + 1])

# TODO: run on non-native device types
# https://github.com/pytorch/pytorch/issues/126474
@xfailIfTorchDynamo
@dtypes(torch.double)
def test_unary_out_op_mem_overlap(self, device, dtype):
sz = 3
Expand Down
Loading
Loading
0