8000 [ARM] Enable some additional Aarch64 unit tests by robert-hardwick · Pull Request #146895 · pytorch/pytorch · GitHub
[go: up one dir, main page]

Skip to content

[ARM] Enable some additional Aarch64 unit tests #146895

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .ci/pytorch/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -1438,7 +1438,7 @@ test_executorch() {
test_linux_aarch64() {
python test/run_test.py --include test_modules test_mkldnn test_mkldnn_fusion test_openmp test_torch test_dynamic_shapes \
test_transformers test_multiprocessing test_numpy_interop test_autograd test_binary_ufuncs test_complex test_spectral_ops \
test_foreach test_reductions test_unary_ufuncs test_tensor_creation_ops \
test_foreach test_reductions test_unary_ufuncs test_tensor_creation_ops nn/test_convolution \
--shard "$SHARD_NUMBER" "$NUM_TEST_SHARDS" --verbose

# Dynamo tests
Expand All @@ -1457,7 +1457,7 @@ test_linux_aarch64() {
inductor/test_split_cat_fx_passes inductor/test_standalone_compile inductor/test_torchinductor \
inductor/test_torchinductor_codegen_dynamic_shapes inductor/test_torchinductor_dynamic_shapes inductor/test_memory \
inductor/test_triton_cpu_backend inductor/test_triton_extension_backend inductor/test_mkldnn_pattern_matcher inductor/test_cpu_cpp_wrapper \
--shard "$SHARD_NUMBER" "$NUM_TEST_SHARDS" --verbose
inductor/test_fused_attention inductor/test_cpu_select_algorithm --shard "$SHARD_NUMBER" "$NUM_TEST_SHARDS" --verbose
}

if ! [[ "${BUILD_ENVIRONMENT}" == *libtorch* || "${BUILD_ENVIRONMENT}" == *-bazel-* ]]; then
Expand Down
11 changes: 11 additions & 0 deletions test/inductor/test_cpu_select_algorithm.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,13 @@
_calculate_dynamic_per_channel_qparams,
)
from torch.testing._internal.common_utils import (
IS_ARM64,
IS_MACOS,
parametrize,
skipIfWindows,
TEST_MKL,
TEST_MKLDNN_BF16,
xfailIfAarch64,
)


Expand Down Expand Up @@ -921,6 +924,10 @@ def forward(self, view_368):
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
self.assertEqual(counters["inductor"]["cpp_epilogue_fusion_counter"], 2)

# Issue for Aarch64 non-bf16 failure https://github.com/pytorch/pytorch/issues/147104
@unittest.skipIf(
IS_ARM64 and not TEST_MKLDNN_BF16, "Test fails on non-bf16 hw supported Aarch64"
)
@inductor_config.patch({"freezing": True})
@patches
@torch.no_grad
Expand Down Expand Up @@ -1008,6 +1015,8 @@ def forward(self, view_12, input_ids, view_9):
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
self.assertEqual(counters["inductor"]["cpp_epilogue_fusion_counter"], 1)

# xfail on Aarch64 due to https://github.com/pytorch/pytorch/issues/146915
@xfailIfAarch64
@inductor_config.patch({"freezing": True})
@patches
@torch.no_grad
Expand Down Expand Up @@ -1365,6 +1374,8 @@ def forward(self, x):
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 2)
self.assertEqual(counters["inductor"]["cpp_epilogue_fusion_counter"], 0)

# xfail on Aarch64 due to https://github.com/pytorch/pytorch/issues/146914
@xfailIfAarch64
@inductor_config.patch({"freezing": True})
@patches
@torch.no_grad
Expand Down
4 changes: 4 additions & 0 deletions test/nn/test_convolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@
GRADCHECK_NONDET_TOL,
gradgradcheck,
instantiate_parametrized_tests,
IS_ARM64,
IS_LINUX,
MACOS_VERSION,
parametrize as parametrize_test,
run_tests,
Expand Down Expand Up @@ -658,6 +660,8 @@ def test_ConvTranspose2d_output_size(self):
else:
self.assertRaises(ValueError, lambda: m(i, (h, w)))

# skip test on Aarch64 until https://github.com/pytorch/pytorch/issues/146857 is fixed
@unittest.skipIf(IS_LINUX and IS_ARM64, "Test segfaults on Aarch64")
def test_ConvTranspose2d_output_size_downsample_upsample(self):
b, c, hid_c = 2, 3, 2
for h in range(13, 24):
Expand Down
3 changes: 3 additions & 0 deletions torch/testing/_internal/common_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1415,6 +1415,7 @@ def is_privateuse1_backend_available():
TEST_SCIPY = _check_module_exists('scipy')
TEST_MKL = torch.backends.mkl.is_available()
TEST_ACL = torch.backends.mkldnn.is_available() and torch.ops.mkldnn._is_mkldnn_acl_supported()
TEST_MKLDNN_BF16 = torch.backends.mkldnn.is_available() and torch.ops.mkldnn._is_mkldnn_bf16_supported()
TEST_MPS = torch.backends.mps.is_available()
MACOS_VERSION = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1)
TEST_XPU = torch.xpu.is_available()
Expand Down Expand Up @@ -1590,6 +1591,8 @@ def xpassIfTorchDynamo_np(func):
return unittest.skip("skipping numpy 2.0+ dynamo-wrapped test")(func)
return func if TEST_WITH_TORCHDYNAMO else unittest.expectedFailure(func)

def xfailIfAarch64(func):
return unittest.expectedFailure(func) if IS_ARM64 else func

def xfailIfACL(func):
return unittest.expectedFailure(func) if TEST_ACL else func
Expand Down
Loading
0