8000 debug · themrzmaster/llama-cpp-python@0391dd9 · GitHub
[go: up one dir, main page]

Skip to content

Commit 0391dd9

Browse files
committed
debug
1 parent 454b5e3 commit 0391dd9

File tree

1 file changed

+5
-3
lines changed

1 file changed

+5
-3
lines changed

llama_cpp/llama_chat_format.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2470,6 +2470,7 @@ def base_function_calling(
24702470
tool_calls=True,
24712471
add_generation_prompt=True,
24722472
)
2473+
print(prompt)
24732474
completion_or_chunks = llama.create_completion(
24742475
prompt=prompt,
24752476
temperature=temperature,
@@ -2495,6 +2496,7 @@ def base_function_calling(
24952496
)
24962497
completion: llama_types.CreateCompletionResponse = completion_or_chunks # type: ignore
24972498
text = completion["choices"][0]["text"]
2499+
print(text)
24982500
if "message" in text:
24992501
return _convert_completion_to_chat(
25002502
llama.create_completion(
@@ -3117,9 +3119,9 @@ def mixtral_function_calling(
31173119
mirostat_eta=mirostat_eta,
31183120
model=model,
31193121
logits_processor=logits_processor,
3120-
# grammar=llama_grammar.LlamaGrammar.from_string(
3121-
# initial_gbnf_tool_grammar, verbose=llama.verbose
3122-
# ),
3122+
grammar=llama_grammar.LlamaGrammar.from_string(
3123+
initial_gbnf_tool_grammar, verbose=llama.verbose
3124+
),
31233125
)
31243126
completion: llama_types.CreateCompletionResponse = completion_or_chunks # type: ignore
31253127
text = completion["choices"][0]["text"]

0 commit comments

Comments
 (0)
0