8000 prompt · themrzmaster/llama-cpp-python@003fa3b · GitHub
[go: up one dir, main page]

Skip to content
8000

Commit 003fa3b

Browse files
committed
prompt
1 parent a9e221e commit 003fa3b

File tree

1 file changed

+4
-6
lines changed

1 file changed

+4
-6
lines changed

llama_cpp/llama_chat_format.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2498,8 +2498,8 @@ def base_function_calling(
24982498
text = completion["choices"][0]["text"]
24992499
print(text)
25002500
if "message" in text:
2501-
return _convert_completion_to_chat(
2502-
llama.create_completion(
2501+
#return _convert_completion_to_chat(
2502+
msg_completion_or_chunks = llama.create_completion(
25032503
prompt=prompt + "message:\n",
25042504
temperature=temperature,
25052505
top_p=top_p,
@@ -2522,9 +2522,7 @@ def base_function_calling(
25222522
# grammar=llama_grammar.LlamaGrammar.from_string(
25232523
# follow_up_gbnf_tool_grammar, verbose=llama.verbose
25242524
# ),
2525-
),
2526-
stream=stream,
2527-
)
2525+
)
25282526

25292527
# One or more function calls
25302528
tool_name = text[len("functions.") :].replace(":", "")
@@ -2809,7 +2807,7 @@ def vicuna_function_calling(
28092807
"\nfunctions.{{ tool.function.name }}:\n"
28102808
"{{ tool.function.parameters | tojson }}"
28112809
"\n{% endfor %}"
2812-
"\n\nYou can respond to users messages with either a single message or multiple function calls, never both. Prioritize function calls over messages, when applicable."
2810+
"\n\nYou can respond to users messages with either a single message or multiple function calls, never both. If function calls are used, they must be the first part of the response."
28132811
"\n\nTo respond with a message begin the message with 'message:', use the following format:"
28142812
"\n\nmessage:"
28152813
"\n<message>"

0 commit comments

Comments
 (0)
0