@@ -6,6 +6,10 @@ option(LLAMA_BUILD "Build llama.cpp shared library and install alongside python
6
6
option (LLAVA_BUILD "Build llava shared library and install alongside python package" ON )
7
7
8
8
function (llama_cpp_python_install_target target )
9
+ if (NOT TARGET ${target} )
10
+ return ()
11
+ endif ()
12
+
9
13
install (
10
14
TARGETS ${target}
11
15
LIBRARY DESTINATION ${CMAKE_CURRENT_SOURCE_DIR} /llama_cpp/lib
@@ -75,21 +79,21 @@ if (LLAMA_BUILD)
75
79
add_subdirectory (vendor/llama.cpp )
76
80
llama_cpp_python_install_target (llama )
77
81
llama_cpp_python_install_target (ggml )
78
- llama_cpp_python_install_target (ggml-cpu )
79
- llama_cpp_python_install_target (ggml-base )
80
- llama_cpp_python_install_target (ggml-amx )
81
-
82
- if (GGML_METAL )
83
- llama_cpp_python_install_target (ggml-metal )
84
- endif ()
85
82
86
- if (GGML_CUDA )
87
- llama_cpp_python_install_target (ggml-cuda )
88
- endif ()
83
+ llama_cpp_python_install_target (ggml-base )
89
84
90
- if (GGML_VULKAN )
91
- llama_cpp_python_install_target (ggml-vulkan )
92
- endif ()
85
+ llama_cpp_python_install_target (ggml-amx )
86
+ llama_cpp_python_install_target (ggml-blas )
87
+ llama_cpp_python_install_target (ggml-can )
88
+ llama_cpp_python_install_target (ggml-cpu )
89
+ llama_cpp_python_install_target (ggml-cuda )
90
+ llama_cpp_python_install_target (ggml-hip )
91
+ llama_cpp_python_install_target (ggml-kompute )
92
+ llama_cpp_python_install_target (ggml-metal )
93
+ llama_cpp_python_install_target (ggml-musa )
94
+ llama_cpp_python_install_target (ggml-rpc )
95
+ llama_cpp_python_install_target (ggml-sycl )
96
+ llama_cpp_python_install_target (ggml-vulkan )
93
97
94
98
# Workaround for Windows + CUDA https://github.com/abetlen/llama-cpp-python/issues/563
95
99
if (WIN32 )
0 commit comments