8000
We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 07f0f3a commit 401309dCopy full SHA for 401309d
llama_cpp/llama_cpp.py
@@ -423,10 +423,7 @@ def llama_backend_free():
423
def llama_load_model_from_file(
424
path_model: bytes, params: llama_context_params
425
) -> llama_model_p:
426
- result = _lib.llama_load_model_from_file(path_model, params)
427
- if result is None:
428
- raise Exception(f"Failed to load model from {path_model}")
429
- return result
+ return _lib.llama_load_model_from_file(path_model, params)
430
431
432
_lib.llama_load_model_from_file.argtypes = [c_char_p, llama_context_params]
0 commit comments