8000 add hf_tokenizer_path in server ModelSettings · notwa/llama-cpp-python@9594d5c · GitHub
[go: up one dir, main page]

Skip to content

Commit 9594d5c

Browse files
committed
add hf_tokenizer_path in server ModelSettings
1 parent ebb4ec0 commit 9594d5c

File tree

2 files changed

+7
-0
lines changed

2 files changed

+7
-0
lines changed

llama_cpp/server/model.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -119,6 +119,8 @@ def load_llama_from_model_settings(settings: ModelSettings) -> llama_cpp.Llama:
119119
use_mmap=settings.use_mmap,
120120
use_mlock=settings.use_mlock,
121121
kv_overrides=kv_overrides,
122+
# Tokenizer Params (optionally for Functionary function calling)
123+
hf_tokenizer_path=settings.hf_tokenizer_path,
122124
# Context Params
123125
seed=settings.seed,
124126
n_ctx=settings.n_ctx,

llama_cpp/server/settings.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,11 @@ class ModelSettings(BaseSettings):
5656
default=None,
5757
description="List of model kv overrides in the format key=type:value where type is one of (bool, int, float). Valid true values are (true, TRUE, 1), otherwise false.",
5858
)
59+
# Tokenizer Params
60+
hf_tokenizer_path: Optional[str] = Field(
61+
default=None,
62+
description="Override llama.cpp tokenizer with HF AutoTokenizer from this path if provided.",
63+
)
5964
# Context Params
6065
seed: int = Field(
6166
default=llama_cpp.LLAMA_DEFAULT_SEED, description="Random seed. -1 for random."

0 commit comments

Comments
 (0)
0