8000 up · themrzmaster/llama-cpp-python@c2407e6 · GitHub
[go: up one dir, main page]

Skip to content
< 8000 /div>

Commit c2407e6

Browse files
committed
up
1 parent 4a896a1 commit c2407e6

File tree

1 file changed

+6
-4
lines changed

1 file changed

+6
-4
lines changed

llama_cpp/llama_chat_format.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2500,8 +2500,7 @@ def base_function_calling(
25002500
text = completion["choices"][0]["text"]
25012501
print(text)
25022502
if "message" in text:
2503-
return _convert_completion_to_chat(
2504-
llama.create_completion(
2503+
message_output = llama.create_completion(
25052504
prompt=prompt + "message:\n",
25062505
temperature=temperature,
25072506
top_p=top_p,
@@ -2524,8 +2523,11 @@ def base_function_calling(
25242523
# grammar=llama_grammar.LlamaGrammar.from_string(
25252524
# follow_up_gbnf_tool_grammar, verbose=llama.verbose
25262525
# ),
2527-
),stream=stream
2528-
)
2526+
)
2527+
text: llama_types.CreateCompletionResponse = message_output # type: ignore
2528+
# fallback
2529+
if not text["choices"][0]["text"].startswith("functions."):
2530+
return _convert_completion_to_chat(message_output,stream=stream)
25292531

25302532
# One or more function calls
25312533
tool_name = text[len("functions.") :].replace(":", "")

0 commit comments

Comments
 (0)
0