8000 Use hf_pretrained_model_name_or_path for tokenizer · notwa/llama-cpp-python@a79743b · GitHub
[go: up one dir, main page]

Skip to content

Commit a79743b

Browse files
committed
Use hf_pretrained_model_name_or_path for tokenizer
1 parent 3657cba commit a79743b

File tree

2 files changed

+1
-8
lines changed

2 files changed

+1
-8
lines changed

llama_cpp/server/model.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ def load_llama_from_model_settings(settings: ModelSettings) -> llama_cpp.Llama:
9292
json.load(open(settings.hf_tokenizer_config_path))
9393
)
9494
)
95-
95+
9696
tokenizer: Optional[llama_cpp.BaseLlamaTokenizer] = None
9797
if settings.hf_pretrained_model_name_or_path is not None:
9898
tokenizer = llama_cpp.LlamaHFTokenizer.from_pretrained(settings.hf_pretrained_model_name_or_path)
@@ -130,8 +130,6 @@ def load_llama_from_model_settings(settings: ModelSettings) -> llama_cpp.Llama:
130130
use_mmap=settings.use_mmap,
131131
use_mlock=settings.use_mlock,
132132
kv_overrides=kv_overrides,
133-
# Tokenizer Params (optionally for Functionary function calling)
134-
hf_tokenizer_path=settings.hf_tokenizer_path,
135133
# Context Params
136134
seed=settings.seed,
137135
n_ctx=settings.n_ctx,

llama_cpp/server/settings.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -56,11 +56,6 @@ class ModelSettings(BaseSettings):
5656
default=None,
5757
description="List of model kv overrides in the format key=type:value where type is one of (bool, int, float). Valid true values are (true, TRUE, 1), otherwise false.",
5858
)
59-
# Tokenizer Params
60-
hf_tokenizer_path: Optional[str] = Field(
61-
default=None,
62-
description="Override llama.cpp tokenizer with HF AutoTokenizer from this path if provided.",
63-
)
6459
# Context Params
6560
seed: int = Field(
6661
default=llama_cpp.LLAMA_DEFAULT_SEED, description="Random seed. -1 for random."

0 commit comments

Comments
 (0)
0