8000
We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
llama_load_model_from_file
1 parent 231123e commit 8be7d67Copy full SHA for 8be7d67
llama_cpp/llama_cpp.py
@@ -367,7 +367,10 @@ def llama_backend_free():
367
def llama_load_model_from_file(
368
path_model: bytes, params: llama_context_params
369
) -> llama_model_p:
370
- return _lib.llama_load_model_from_file(path_model, params)
+ result = _lib.llama_load_model_from_file(path_model, params)
371
+ if result is None:
372
+ raise Exception(f"Failed to load model from {path_model}")
373
+ return result
374
375
376
_lib.llama_load_model_from_file.argtypes = [c_char_p, llama_context_params]
0 commit comments