8000 [dynamo] wrap GraphModule exceptions in dynamo-wrapped tests (#126341) · pytorch/pytorch@5359af0 · GitHub
[go: up one dir, main page]

Skip to content

Commit 5359af0

Browse files
williamwen42pytorchmergebot
authored andcommitted
[dynamo] wrap GraphModule exceptions in dynamo-wrapped tests (#126341)
Better approach to #126197 to catch issues like #125568. Pull Request resolved: #126341 Approved by: https://github.com/anijain2305, https://github.com/jansel
1 parent cdf2133 commit 5359af0

18 files changed

+101
-9
lines changed

test/dynamo/test_backends.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,9 @@ def _check_backend_works(self, backend, options=None):
106106
def test_eager(self):
107107
self._check_backend_works("eager")
108108

109+
def test_eager_noexcept(self):
110+
self._check_backend_works("eager_noexcept")
111+
109112
@_force_skip_lazy_graph_module()
110113
def test_torchscript(self):
111114
self._check_backend_works("ts")

test/functorch/test_aotdispatch.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,7 @@
7171
skipIfRocm,
7272
skipIfTorchDynamo,
7373
TestCase,
74+
xfailIfTorchDynamo,
7475
)
7576
from torch.testing._internal.hop_db import hop_db
7677
from torch.testing._internal.optests import (
@@ -576,6 +577,9 @@ def forward(self, primals_1, primals_2):
576577

577578
# This is a (hopefully) extremely rare case that is difficult to handle,
578579
# so we ban it.
580+
# https://github.com/pytorch/pytorch/issues/126236
581+
# https://github.com/pytorch/pytorch/pull/126113
582+
@xfailIfTorchDynamo
579583
def test_set__and_data_mutation_bad(self):
580584
def f(a):
581585
a_view = a.view(-1)

test/functorch/test_control_flow.py

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
skipIfTorchDynamo,
2727
TEST_WITH_TORCHDYNAMO,
2828
TestCase,
29+
xfailIfTorchDynamo,
2930
)
3031

3132

@@ -1021,6 +1022,8 @@ def f(x):
10211022
graph_module = make_fx(torch.func.functionalize(f))(*example_inputs)
10221023
self.assertEqual(graph_module(*example_inputs), f(*example_inputs))
10231024

1025+
# https://github.com/pytorch/pytorch/issues/126988
1026+
@xfailIfTorchDynamo
10241027
def test_cond_functionalized_input_mutation_on_true_branch(self):
10251028
def true_fn(x):
10261029
view_x = x.view(x.shape)
@@ -1046,6 +1049,8 @@ def f(x):
10461049
):
10471050
make_fx(torch.func.functionalize(f))(*example_inputs)
10481051

1052+
# https://github.com/pytorch/pytorch/issues/126988
1053+
@xfailIfTorchDynamo
10491054
def test_cond_functionalized_input_mutation_on_false_branch(self):
10501055
def true_fn(x):
10511056
return x.sin().sum()
@@ -1071,6 +1076,8 @@ def f(x):
10711076
):
10721077
make_fx(torch.func.functionalize(f))(*example_inputs)
10731078

1079+
# https://github.com/pytorch/pytorch/issues/126988
1080+
@xfailIfTorchDynamo
10741081
def test_cond_functionalized_output_alias_input(self):
10751082
def true_fn(x):
10761083
return x
@@ -1098,6 +1105,8 @@ def f(x):
10981105
):
10991106
make_fx(torch.func.functionalize(f))(*example_inputs)
11001107

1108+
# https://github.com/pytorch/pytorch/issues/126988
1109+
@xfailIfTorchDynamo
11011110
def test_cond_functionalized_nested_input_mutation(self):
11021111
def true_true_fn(x):
11031112
x.add_(4)
@@ -1129,6 +1138,8 @@ def f(x):
11291138
):
11301139
make_fx(torch.func.functionalize(f))(*example_inputs)
11311140

1141+
# https://github.com/pytorch/pytorch/issues/126988
1142+
@xfailIfTorchDynamo
11321143
def test_cond_functionalized_nested_input_mutation_with_aot_func(self):
11331144
def true_true_fn(x):
11341145
x.add_(4)
@@ -1180,6 +1191,8 @@ def wrapper(*args, **kwargs):
11801191
):
11811192
make_fx(f_wrapper(f))(example_input_func)
11 10000 821193

1194+
# https://github.com/pytorch/pytorch/issues/126988
1195+
@xfailIfTorchDynamo
11831196
def test_cond_functionalized_input_aliasing_with_aot_func(self):
11841197
def true_fn(x):
11851198
return x
@@ -1810,6 +1823,8 @@ def wrapper(*args, **kwargs):
18101823

18111824
self.assertEqual(gm(*example_inputs), f(*example_inputs))
18121825

1826+
# https://github.com/pytorch/pytorch/issues/126988
1827+
@xfailIfTorchDynamo
18131828
def test_map_functionalized_arg_mutation(self):
18141829
def map_fn(x, y):
18151830
y.add_(4)
@@ -1825,6 +1840,8 @@ def f(xs, y):
18251840
):
18261841
functional_f(*example_inputs)
18271842

1843+
# https://github.com/pytorch/pytorch/issues/126988
1844+
@xfailIfTorchDynamo
18281845
def test_map_functionalized_elem_mutation(self):
18291846
def map_fn(x, y):
18301847
x.add_(4)
@@ -1860,6 +1877,8 @@ def f(x, y):
18601877
# Ensure no error is thrown when not running backward
18611878
f(*example_inputs)
18621879

1880+
# https://github.com/pytorch/pytorch/issues/126988
1881+
@xfailIfTorchDynamo
18631882
def test_map_functionalized_elem_alias(self):
18641883
def map_fn(x):
18651884
x.view(x.shape)

test/functorch/test_eager_transforms.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,7 @@
7777
subtest,
7878
TEST_WITH_TORCHDYNAMO,
7979
TestCase,
80+
xfailIfTorchDynamo,
8081
)
8182

8283
from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten
@@ -2340,6 +2341,8 @@ def f(x):
23402341
)(x)
23412342
self.assertEqual(actual, expected)
23422343

2344+
# https://github.com/pytorch/pytorch/issues/127036
2345+
@xfailIfTorchDynamo
23432346
@parametrize("_preallocate_and_copy", (True, False))
23442347
def test_chunk_jacrev_chunksize_one(self, device, _preallocate_and_copy):
23452348
# With chunk_size=1, we shouldn't `vmap` and hence not be limited

test/test_autograd.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,7 @@
7575
skipIfTorchDynamo,
7676
slowTest,
7777
TestCase,
78+
xfailIfTorchDynamo,
7879
)
7980
from torch.utils._mode_utils import no_dispatch
8081
from torch.utils._python_dispatch import TorchDispatchMode
@@ -6980,6 +6981,8 @@ def test_checkpointing_without_reentrant_correct_grad(self):
69806981
self.assertEqual(b_grad, c_grad)
69816982
self.assertEqual(b_grad, d_grad)
69826983

6984+
# PYTORCH_TEST_WITH_DYNAMO=1 test fails on CI but can't repro locally
6985+
@skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/127115")
69836986
def test_checkpointing_without_reentrant_dataparallel(self):
69846987
"""
69856988
Verifies gradient correctness when checkpoint without reentrant autograd
@@ -7037,6 +7040,8 @@ def hook(grad):
70377040
# should only call hook once
70387041
self.assertEqual(count, 1)
70397042

7043+
# https://github.com/pytorch/pytorch/issues/127115
7044+
@xfailIfTorchDynamo
70407045
def test_checkpointing_without_reentrant_arbitrary_input_output(self):
70417046
"""
70427047
Ensures checkpointing without reentrant autograd works with functions

test/test_binary_ufuncs.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,7 @@
6565
TEST_SCIPY,
6666
TestCase,
6767
torch_to_numpy_dtype_dict,
68+
xfailIfTorchDynamo,
6869
)
6970

7071
if TEST_SCIPY:
@@ -1236,6 +1237,8 @@ def binary_check_input_output_mem_overlap(self, op, device, expected_failure=Fal
12361237
expected_failure=expected_failure,
12371238
)
12381239

1240+
# https://github.com/pytorch/pytorch/issues/126474
1241+
@xfailIfTorchDynamo
12391242
@dtypes(torch.double)
12401243
def test_binary_op_mem_overlap(self, device, dtype):
12411244
ops = [
@@ -3691,6 +3694,8 @@ def test_addsub_half_tensor(self, device):
36913694
actual = op(x, y, alpha=alpha)
36923695
self.assertTrue(not (actual.isnan() or actual.isinf()))
36933696

3697+
# https://github.com/pytorch/pytorch/issues/127003
3698+
@xfailIfTorchDynamo
36943699
def test_sub_typing(self, device):
36953700
m1 = torch.tensor(
36963701
[True, False, False, True, False, False], dtype=torch.bool, device=device

test/test_custom_ops.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -199,6 +199,7 @@ def foo_impl(x):
199199
):
200200
torch.library.opcheck(op, (x,), {})
201201

202+
@skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug")
202203
def test_incorrect_abstract_impl(self, device):
203204
lib = self.lib()
204205
lib.define("foo(Tensor x) -> Tensor")

test/test_indexing.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line c 8000 hange
@@ -29,6 +29,7 @@
2929
skipIfTorchDynamo,
3030
TEST_CUDA,
3131
TestCase,
32+
xfailIfTorchDynamo,
3233
)
3334

3435

@@ -1785,6 +1786,8 @@ def test_boolean_indexing_onedim(self, device):
17851786
a[b] = 1.0
17861787
self.assertEqual(a, tensor([[1.0, 1.0, 1.0]], device=device))
17871788

1789+
# https://github.com/pytorch/pytorch/issues/127003
1790+
@xfailIfTorchDynamo
17881791
def test_boolean_assignment_value_mismatch(self, device):
17891792
# A boolean assignment should fail when the shape of the values
17901793
# cannot be broadcast to the subscription. (see also gh-3458)

test/test_linalg.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,8 @@
1818
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
1919
make_fullrank_matrices_with_distinct_singular_values,
2020
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
21-
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
21+
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest,
22+
xfailIfTorchDynamo)
2223
from torch.testing._internal.common_device_type import \
2324
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
2425
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
@@ -802,6 +803,8 @@ def test_addr_integral(self, device, dtype):
802803
# when beta is not zero
803804
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=2)
804805

806+
# https://github.com/pytorch/pytorch/issues/127043
807+
@xfailIfTorchDynamo
805808
@precisionOverride({torch.bfloat16: 1e-1})
806809
@dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16))
807810
def test_addr_float_and_complex(self, device, dtype):

test/test_mkldnn.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
from torch.utils import mkldnn as mkldnn_utils
2222
from torch.testing._internal.common_utils import TestCase, \
2323
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
24-
skipIfTorchDynamo
24+
skipIfTorchDynamo, xfailIfTorchDynamo
2525
from torch.testing._internal.common_device_type import (
2626
instantiate_device_type_tests,
2727
dtypes,
@@ -807,6 +807,8 @@ def test_max_pool2d_stride_none(self):
807807

808808
self.assertEqual(y1, y2.to_dense())
809809

810+
# https://github.com/pytorch/pytorch/issues/127111
811+
@xfailIfTorchDynamo
810812
def test_max_pool_unsupported(self):
811813
# OneDNN not support dilation max_pooling, will be avilabled in v2.0.
812814
N = torch.randint(3, 10, (1,)).item()
@@ -1159,6 +1161,8 @@ def test_0_dimension_tensor(self):
11591161
out_mkldnn = mkldnn_utils.to_mkldnn(m)(x)
11601162
self.assertEqual(out_eager, out_mkldnn)
11611163

1164+
# https://github.com/pytorch/pytorch/issues/127111
1165+
@xfailIfTorchDynamo
11621166
def test_view(self):
11631167
x = torch.randn(3, 4, 5, dtype=torch.float32).to_mkldnn()
11641168
self.assertRaisesRegex(RuntimeError,

0 commit comments

Comments
 (0)
0