8000 Fix Llama.close didn't free lora adapter by jkawamoto · Pull Request #1679 · abetlen/llama-cpp-python · GitHub
[go: up one dir, main page]

Skip to content

Fix Llama.close didn't free lora adapter #1679

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Aug 15, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 11 additions & 8 deletions llama_cpp/llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,7 @@ def __init__(
A Llama instance.
"""
self.verbose = verbose
self._stack = contextlib.ExitStack()

set_verbose(verbose)

Expand Down Expand Up @@ -365,8 +366,6 @@ def __init__(
if not os.path.exists(model_path):
raise ValueError(f"Model path does not exist: {model_path}")

self._stack = contextlib.ExitStack()

self._model = self._stack.enter_context(
contextlib.closing(
_LlamaModel(
Expand Down Expand Up @@ -420,6 +419,15 @@ def __init__(
raise RuntimeError(
f"Failed to initialize LoRA adapter from lora path: {self.lora_path}"
)

def free_lora_adapter():
if self._lora_adapter is None:
return
llama_cpp.llama_lora_adapter_free(self._lora_adapter)
self._lora_adapter = None

self._stack.callback(free_lora_adapter)

assert self._ctx.ctx is not None
if llama_cpp.llama_lora_adapter_set(
self._ctx.ctx, self._lora_adapter, self.lora_scale
Expand Down Expand Up @@ -2085,14 +2093,9 @@ def pooling_type(self) -> str:

def close(self) -> None:
"""Explicitly free the model from memory."""
if hasattr(self,'_stack'):
if self._stack is not None:
self._stack.close()
self._stack.close()

def __del__(self) -> None:
if hasattr(self,'_lora_adapter'):
if self._lora_adapter is not None:
llama_cpp.llama_lora_adapter_free(self._lora_adapter)
self.close()

@staticmethod
Expand Down
0