8000 feat: Update llama.cpp · lmbelo/llama-cpp-python@cafa33e · GitHub
[go: up one dir, main page]

Skip to content

Commit cafa33e

Browse files
committed
feat: Update llama.cpp
1 parent e712cff commit cafa33e

File tree

2 files changed

+16
-2
lines changed

2 files changed

+16
-2
lines changed

CMakeLists.txt

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,21 @@ if (LLAMA_BUILD)
7575
add_subdirectory(vendor/llama.cpp)
7676
llama_cpp_python_install_target(llama)
7777
llama_cpp_python_install_target(ggml)
78-
78+
llama_cpp_python_install_target(ggml-cpu)
79+
llama_cpp_python_install_target(ggml-base)
80+
81+
if (GGML_METAL)
82+
llama_cpp_python_install_target(ggml-metal)
83+
endif()
84+
85+
if (GGML_CUDA)
86+
llama_cpp_python_install_target(ggml-cuda)
87+
endif()
88+
89+
if (GGML_VULKAN)
90+
llama_cpp_python_install_target(ggml-vulkan)
91+
endif()
92+
7993
# Workaround for Windows + CUDA https://github.com/abetlen/llama-cpp-python/issues/563
8094
if (WIN32)
8195
install(

vendor/llama.cpp

0 commit comments

Comments
 (0)
0