8000 Enable AArch64 CI scripts to be used for local dev · pytorch/pytorch@82e4c50 · GitHub
[go: up one dir, main page]

Skip to content

Commit 82e4c50

Browse files
jondeadavsva01fadara01
authored andcommitted
Enable AArch64 CI scripts to be used for local dev
- Allow user to specify a custom Arm Compute Library directory with the ACL_SOURCE_DIR environment variable, which is then built rather than checking out a clean copy - Remove `setup.py clean` in build. The CI environment should be clean already, removing this enables incremental rebuilds - Use all cores for building ComputeLibrary - Remove restriction of building with MAX_JOBS=5 on CPU backend Mostly a port of pytorch/builder#2028 with the conda part removed, because aarch64_ci_setup.sh has changed and can now handle being called twice. Co-authored-by: David Svantesson-Yeung <David.Svantesson-Yeung@arm.com> Co-authored-by: Fadi Arafeh <Fadi.Arafeh@arm.com>
1 parent 7482eb2 commit 82e4c50

File tree

1 file changed

+22
-18
lines changed

1 file changed

+22
-18
lines changed

.ci/aarch64_linux/aarch64_wheel_ci_build.py

+22-18
Original file line numberDiff line numberDiff line change
@@ -31,27 +31,29 @@ def build_ArmComputeLibrary() -> None:
3131
"build=native",
3232
]
3333
acl_install_dir = "/acl"
34-
acl_checkout_dir = "ComputeLibrary"
35-
os.makedirs(acl_install_dir)
36-
check_call(
37-
[
38-
"git",
39-
"clone",
40-
"https://github.com/ARM-software/ComputeLibrary.git",
41-
"-b",
42-
"v25.02",
43-
"--depth",
44-
"1",
45-
"--shallow-submodules",
46-
]
47-
)
34+
acl_checkout_dir = os.getenv("ACL_SOURCE_DIR", "ComputeLibrary")
35+
if os.path.isdir(acl_install_dir):
36+
shutil.rmtree(acl_install_dir)
37+
if not os.path.isdir(acl_checkout_dir) or not len(os.listdir(acl_checkout_dir)):
38+
check_call(
39+
[
40+
"git",
41+
"clone",
42+
"https://github.com/ARM-software/ComputeLibrary.git",
43+
"-b",
44+
"v25.02",
45+
"--depth",
46+
"1",
< 8000 /code>
47+
"--shallow-submodules",
48+
]
49+
)
4850

4951
check_call(
50-
["scons", "Werror=1", "-j8", f"build_dir=/{acl_install_dir}/build"]
52+
["scons", "Werror=1", f"-j{os.cpu_count()}"]
5153
+ acl_build_flags,
5254
cwd=acl_checkout_dir,
5355
)
54-
for d in ["arm_compute", "include", "utils", "support", "src"]:
56+
for d in ["arm_compute", "include", "utils", "support", "src", "build"]:
5557
shutil.copytree(f"{acl_checkout_dir}/{d}", f"{acl_install_dir}/{d}")
5658

5759

@@ -194,8 +196,10 @@ def parse_arguments():
194196
).decode()
195197

196198
print("Building PyTorch wheel")
197-
build_vars = "MAX_JOBS=5 CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000 "
198-
os.system("cd /pytorch; python setup.py clean")
199+
build_vars = "CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000 "
200+
# MAX_JOB=5 is not required for CPU backend (see commit 465d98b)
201+
if enable_cuda:
202+
build_vars = "MAX_JOBS=5 " + build_vars
199203

200204
override_package_version = os.getenv("OVERRIDE_PACKAGE_VERSION")
201205
desired_cuda = os.getenv("DESIRED_CUDA")

0 commit comments

Comments
 (0)
0