8000 llama : print hint when loading a model when no backends are loaded (… · robbiemu/llama.cpp@5364ae4 · GitHub
[go: up one dir, main page]

Skip to content

Commit 5364ae4

Browse files
authored
llama : print hint when loading a model when no backends are loaded (ggml-org#13589)
1 parent 7c07ac2 commit 5364ae4

File tree

1 file changed

+5
-0
lines changed

1 file changed

+5
-0
lines changed

src/llama.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,11 @@ static struct llama_model * llama_model_load_from_file_impl(
140140
struct llama_model_params params) {
141141
ggml_time_init();
142142

143+
if (!params.vocab_only && ggml_backend_reg_count() == 0) {
144+
LLAMA_LOG_ERROR(&qu 59B1 ot;%s: no backends are loaded. hint: use ggml_backend_load() or ggml_backend_load_all() to load a backend before calling this function\n", __func__);
145+
return nullptr;
146+
}
147+
143148
unsigned cur_percentage = 0;
144149
if (params.progress_callback == NULL) {
145150
params.progress_callback_user_data = &cur_percentage;

0 commit comments

Comments
 (0)
0