8000 fix: Raise exceptions when llama model or context fails to load · meymchen/llama-cpp-python@dd22010 · GitHub
[go: up one dir, main page]

Skip to content

Commit dd22010

Browse files
committed
fix: Raise exceptions when llama model or context fails to load
1 parent 3632241 commit dd22010

File tree

Expand file tree

1 file changed

+6
-0
lines changed

1 file changed

+6
-0
lines changed

llama_cpp/_internals.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,9 @@ def __init__(
5151
self.path_model.encode("utf-8"), self.params
5252
)
5353

54+
if self.model is None:
55+
raise ValueError(f"Failed to load model from file: {path_model}")
56+
5457
def __del__(self):
5558
if self.model is not None and self._llama_free_model is not None:
5659
self._llama_free_model(self.model)
@@ -258,6 +261,9 @@ def __init__(
258261
self.model.model, self.params
259262
)
260263

264+
if self.ctx is None:
265+
raise ValueError("Failed to create llama_context")
266+
261267
def __del__(self):
262268
if self.ctx is not None and self._llama_free is not None:
263269
self._llama_free(self.ctx)

0 commit comments

Comments
 (0)
0