From 82e4c500ff8dc502b372da81c0e6947a3bbd924f Mon Sep 17 00:00:00 2001 From: Jonathan Deakin Date: Thu, 12 Dec 2024 16:07:36 +0000 Subject: [PATCH] Enable AArch64 CI scripts to be used for local dev - Allow user to specify a custom Arm Compute Library directory with the ACL_SOURCE_DIR environment variable, which is then built rather than checking out a clean copy - Remove `setup.py clean` in build. The CI environment should be clean already, removing this enables incremental rebuilds - Use all cores for building ComputeLibrary - Remove restriction of building with MAX_JOBS=5 on CPU backend Mostly a port of https://github.com/pytorch/builder/pull/2028 with the conda part removed, because aarch64_ci_setup.sh has changed and can now handle being called twice. Co-authored-by: David Svantesson-Yeung Co-authored-by: Fadi Arafeh --- .ci/aarch64_linux/aarch64_wheel_ci_build.py | 40 +++++++++++---------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/.ci/aarch64_linux/aarch64_wheel_ci_build.py b/.ci/aarch64_linux/aarch64_wheel_ci_build.py index 1cce2836974dcf..1a53d626c0be9e 100755 --- a/.ci/aarch64_linux/aarch64_wheel_ci_build.py +++ b/.ci/aarch64_linux/aarch64_wheel_ci_build.py @@ -31,27 +31,29 @@ def build_ArmComputeLibrary() -> None: "build=native", ] acl_install_dir = "/acl" - acl_checkout_dir = "ComputeLibrary" - os.makedirs(acl_install_dir) - check_call( - [ - "git", - "clone", - "https://github.com/ARM-software/ComputeLibrary.git", - "-b", - "v25.02", - "--depth", - "1", - "--shallow-submodules", - ] - ) + acl_checkout_dir = os.getenv("ACL_SOURCE_DIR", "ComputeLibrary") + if os.path.isdir(acl_install_dir): + shutil.rmtree(acl_install_dir) + if not os.path.isdir(acl_checkout_dir) or not len(os.listdir(acl_checkout_dir)): + check_call( + [ + "git", + "clone", + "https://github.com/ARM-software/ComputeLibrary.git", + "-b", + "v25.02", + "--depth", + "1", + "--shallow-submodules", + ] + ) check_call( - ["scons", "Werror=1", "-j8", f"build_dir=/{acl_install_dir}/build"] + ["scons", "Werror=1", f"-j{os.cpu_count()}"] + acl_build_flags, cwd=acl_checkout_dir, ) - for d in ["arm_compute", "include", "utils", "support", "src"]: + for d in ["arm_compute", "include", "utils", "support", "src", "build"]: shutil.copytree(f"{acl_checkout_dir}/{d}", f"{acl_install_dir}/{d}") @@ -194,8 +196,10 @@ def parse_arguments(): ).decode() print("Building PyTorch wheel") - build_vars = "MAX_JOBS=5 CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000 " - os.system("cd /pytorch; python setup.py clean") + build_vars = "CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000 " + # MAX_JOB=5 is not required for CPU backend (see commit 465d98b) + if enable_cuda: + build_vars = "MAX_JOBS=5 " + build_vars override_package_version = os.getenv("OVERRIDE_PACKAGE_VERSION") desired_cuda = os.getenv("DESIRED_CUDA")