8000 Update llama.cpp · coderonion/llama-cpp-python@fa92740 · GitHub
[go: up one dir, main page]

Skip to content

Commit fa92740

Browse files
committed
Update llama.cpp
1 parent dfe8608 commit fa92740

File tree

2 files changed

+7
-1
lines changed

2 files changed

+7
-1
lines changed

llama_cpp/llama_cpp.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ class llama_token_data(Structure):
4040

4141
llama_token_data_p = POINTER(llama_token_data)
4242

8000 43+
llama_progress_callback = ctypes.CFUNCTYPE(None, c_double, c_void_p)
4344

4445
class llama_context_params(Structure):
4546
_fields_ = [
@@ -54,6 +55,11 @@ class llama_context_params(Structure):
5455
("vocab_only", c_bool), # only load the vocabulary, no weights
5556
("use_mlock", c_bool), # force system to keep model in RAM
5657
("embedding", c_bool), # embedding mode only
58+
59+
# called with a progress value between 0 and 1, pass NULL to disable
60+
("progress_callback", llama_progress_callback),
61+
# context pointer passed to the progress callback
62+
("progress_callback_user_data", c_void_p),
5763
]
5864

5965

vendor/llama.cpp

0 commit comments

Comments
 (0)
0