8000 fix: set LLAMA_METAL_EMBED_LIBRARY=on on MacOS arm64 (#1289) · asusevski/llama-cpp-python@740f3f3 · GitHub
[go: up one dir, main page]

Skip to content

Commit 740f3f3

Browse files
authored
fix: set LLAMA_METAL_EMBED_LIBRARY=on on MacOS arm64 (abetlen#1289)
1 parent f7decc9 commit 740f3f3

File tree

1 file changed

+5
-0
lines changed

1 file changed

+5
-0
lines changed

CMakeLists.txt

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,11 @@ if (LLAMA_BUILD)
1717
set(LLAMA_FMA "Off" CACHE BOOL "llama: enable FMA" FORCE)
1818
set(LLAMA_F16C "Off" CACHE BOOL "llama: enable F16C" FORCE)
1919
endif()
20+
21+
if (APPLE AND CMAKE_SYSTEM_PROCESSOR MATCHES "arm64")
22+
set(LLAMA_METAL_EMBED_LIBRARY "On" CACHE BOOL "llama: embed metal library" FORCE)
23+
endif()
24+
2025
add_subdirectory(vendor/llama.cpp)
2126
install(
2227
TARGETS llama

0 commit comments

Comments
 (0)
0