8000 [Break XPU] generalize newly introduced device bias code in Inductor … · pytorch/pytorch@75c71ab · GitHub
[go: up one dir, main page]

Skip to content

Commit 75c71ab

Browse files
etafpytorchmergebot
authored andcommitted
[Break XPU] generalize newly introduced device bias code in Inductor UT. (#151926)
Pull Request resolved: #151926 Approved by: https://github.com/EikanWang, https://github.com/jansel
1 parent d70490e commit 75c71ab

File tree

4 files changed

+15
-8
lines changed

4 files changed

+15
-8
lines changed

test/higher_order_ops/test_invoke_subgraph.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
TEST_WITH_CROSSREF,
2121
TestCase,
2222
)
23-
from torch.testing._internal.inductor_utils import HAS_GPU
23+
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU
2424
from torch.testing._internal.triton_utils import requires_cuda, requires_gpu
2525

2626

@@ -1407,8 +1407,8 @@ def fn(x, y):
14071407
z = gn(x, y)
14081408
return gn(z, y)
14091409

1410-
t1 = torch.rand(5, device="cuda")
1411-
t2 = torch.rand(5, device="cuda")
1410+
t1 = torch.rand(5, device=GPU_TYPE)
1411+
t2 = torch.rand(5, device=GPU_TYPE)
14121412

14131413
ref = fn(t1, t2)
14141414
backend = AotEagerAndRecordGraphs()

test/inductor/test_aot_inductor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3838,7 +3838,7 @@ def forward(self, x):
38383838

38393839
aot_inductor_module = torch._inductor.aoti_load_package(package_path)
38403840
aot_inductor_module(x)
3841-
x_casted = x.to("cuda")
3841+
x_casted = x.to(GPU_TYPE)
38423842
with self.assertRaisesRegex(Exception, ""):
38433843
aot_inductor_module(x_casted)
38443844

test/inductor/test_compile_subprocess.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
from torch._inductor.compile_fx import _InProcessFxCompile, FxCompile, FxCompileMode
1717
from torch._inductor.test_case import TestCase
1818
from torch.testing._internal.common_utils import TEST_WITH_ASAN
19-
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_CPU, HAS_GPU
19+
from torch.testing._internal.inductor_utils import GPU_TYPE, RUN_CPU, RUN_GPU
2020

2121

2222
# Make the helper files in test/ importable
@@ -37,6 +37,11 @@
3737
test_failures = {
3838
# TypeError: cannot pickle 'generator' object
3939
"test_layer_norm": TestFailure(("cpu", "cuda"), is_skip=True),
40+
"test_remove_noop_slice": TestFailure(("xpu"), is_skip=True),
41+
"test_remove_noop_slice1": TestFailure(("xpu"), is_skip=True),
42+
"test_remove_noop_slice_scatter": TestFailure(("xpu"), is_skip=True),
43+
"test_remove_noop_view_default": TestFailure(("xpu"), is_skip=True),
44+
"test_remove_noop_view_dtype": TestFailure(("xpu"), is_skip=True),
4045
}
4146

4247

@@ -127,7 +132,7 @@ def model_add(x, y):
127132
self.assertEqual(_AsyncFxCompile._stat_bg_finished, 1)
128133

129134

130-
if HAS_CPU:
135+
if RUN_CPU:
131136

132137
class CpuTests(TestSubprocess):
133138
common = check_model
@@ -137,7 +142,7 @@ class CpuTests(TestSubprocess):
137142
inductor.test_torchinductor.CommonTemplate, CpuTests, "cpu", test_failures
138143
)
139144

140-
if HAS_GPU and not TEST_WITH_ASAN:
145+
if RUN_GPU and not TEST_WITH_ASAN:
141146

142147
class GPUTests(TestSubprocess):
143148
common = check_model_gpu
@@ -151,5 +156,5 @@ class GPUTests(TestSubprocess):
151156
if __name__ == "__main__":
152157
from torch._inductor.test_case import run_tests
153158

154-
if HAS_CPU or HAS_GPU:
159+
if RUN_CPU or RUN_GPU:
155160
run_tests(needs="filelock")

test/inductor/test_torchinductor_strided_blocks.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
from torch.testing._internal.common_utils import (
1616
instantiate_parametrized_tests,
1717
parametrize,
18+
skipIfXpu,
1819
subtest,
1920
)
2021
from torch.testing._internal.inductor_utils import (
@@ -542,6 +543,7 @@ def test_dynamic_shapes_pointwise(self, nd_tiling: bool, num_block_pointers: int
542543
(False, 0), # We can't infer that the load is a power of 2.
543544
],
544545
)
546+
@skipIfXpu(msg="Remove this after Intel triton issue #4000 resolved.")
545547
def test_dynamic_shapes_reduction(self, with_tiling: bool, num_block_pointers: int):
546548
"""
547549
Test a reduction kernel with dynamic shapes.

0 commit comments

Comments
 (0)
0