8000 Update llama.cpp · lidanger/llama-cpp-python@9339929 · GitHub
[go: up one dir, main page]

Skip to content

Commit 9339929

Browse files
committed
Update llama.cpp
1 parent cbd26fd commit 9339929

File tree

2 files changed

+9
-1
lines changed

2 files changed

+9
-1
lines changed

llama_cpp/llama_cpp.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,8 @@ class llama_context_params(Structure):
120120
LLAMA_FTYPE_MOSTLY_Q4_2 = ctypes.c_int(5) # except 1d tensors
121121
LLAMA_FTYPE_MOSTYL_Q4_3 = ctypes.c_int(6) # except 1d tensors
122122
LLAMA_FTYPE_MOSTYL_Q8_0 = ctypes.c_int(7) # except 1d tensors
123+
LLAMA_FTYPE_MOSTYL_Q5_0 = ctypes.c_int(8) # except 1d tensors
124+
LLAMA_FTYPE_MOSTYL_Q5_1 = ctypes.c_int(9) # except 1d tensors
123125

124126
# Functions
125127

@@ -210,6 +212,12 @@ def llama_get_kv_cache_token_count(ctx: llama_context_p) -> c_int:
210212
_lib.llama_get_kv_cache_token_count.argtypes = [llama_context_p]
211213
_lib.llama_get_kv_cache_token_count.restype = c_int
212214

215+
# Sets the current rng seed.
216+
def llama_set_rng_seed(ctx: llama_context_p, seed: c_int):
217+
return _lib.llama_set_rng_seed(ctx, seed)
218+
219+
_lib.llama_set_rng_seed.argtypes = [llama_context_p, c_int]
220+
_lib.llama_set_rng_seed.restype = None
213221

214222
# Returns the size in bytes of the state (rng, logits, embedding and kv_cache)
215223
def llama_get_state_size(ctx: llama_context_p) -> c_size_t:

vendor/llama.cpp

0 commit comments

Comments
 (0)
0