8000 prompt · themrzmaster/llama-cpp-python@94f9975 · GitHub
[go: up one dir, main page]

Skip to content

Commit 94f9975

Browse files
committed
prompt
1 parent 003fa3b commit 94f9975

File tree

1 file changed

+4
-3
lines changed

1 file changed

+4
-3
lines changed

llama_cpp/llama_chat_format.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2498,8 +2498,8 @@ def base_function_calling(
24982498
text = completion["choices"][0]["text"]
24992499
print(text)
25002500
if "message" in text:
2501-
#return _convert_completion_to_chat(
2502-
msg_completion_or_chunks = llama.create_completion(
2501+
return _convert_completion_to_chat(
2502+
llama.create_completion(
25032503
prompt=prompt + "message:\n",
25042504
temperature=temperature,
25052505
top_p=top_p,
@@ -2522,7 +2522,8 @@ def base_function_calling(
25222522
# grammar=llama_grammar.LlamaGrammar.from_string(
25232523
# follow_up_gbnf_tool_grammar, verbose=llama.verbose
25242524
# ),
2525-
)
2525+
),stream=stream
2526+
)
25262527

25272528
# One or more function calls
25282529
tool_name = text[len("functions.") :].replace(":", "")

0 commit comments

Comments
 (0)
0