8000 Formatting · coderonion/llama-cpp-python@c928e0a · GitHub
[go: up one dir, main page]

Skip to content

Commit c928e0a

Browse files
committed
Formatting
1 parent 8d9560e commit c928e0a

File tree

1 file changed

+0
-2
lines changed

1 file changed

+0
-2
lines changed

llama_cpp/llama_cpp.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,6 @@ class llama_context_params(Structure):
4747
("n_ctx", c_int), # text context
4848
("n_parts", c_int), # -1 for default
4949
("seed", c_int), # RNG seed, 0 for random
50-
5150
("f16_kv", c_bool), # use fp16 for KV cache
5251
(
5352
"logits_all",
@@ -56,7 +55,6 @@ class llama_context_params(Structure):
5655
("vocab_only", c_bool), # only load the vocabulary, no weights
5756
("use_mlock", c_bool), # force system to keep model in RAM
5857
("embedding", c_bool), # embedding mode only
59-
6058
# called with a progress value between 0 and 1, pass NULL to disable
6159
("progress_callback", llama_progress_callback),
6260
# context pointer passed to the progress callback

0 commit comments

Comments
 (0)
0