8000 fix: missing generation_prompt in chatml-function-calling · thiner/llama-cpp-python@68fb71b · GitHub
[go: up one dir, main page]

Skip to content

Commit 68fb71b

Browse files
committed
fix: missing generation_prompt in chatml-function-calling
1 parent 4b0e332 commit 68fb71b

File tree

1 file changed

+4
-0
lines changed

1 file changed

+4
-0
lines changed

llama_cpp/llama_chat_format.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2088,6 +2088,7 @@ def chatml_function_calling(
20882088
"{% endif %}"
20892089
"{% endif %}"
20902090
"{% endfor %}"
2091+
"{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% 8000 endif %}"
20912092
)
20922093
template_renderer = jinja2.Environment(
20932094
loader=jinja2.BaseLoader(),
@@ -2130,6 +2131,7 @@ def chatml_function_calling(
21302131
messages=messages,
21312132
tools=[],
21322133
tool_calls=None,
2134+
add_generation_prompt=True,
21332135
)
21342136
if response_format is not None and response_format["type"] == "json_object":
21352137
try:
@@ -2363,6 +2365,7 @@ def _stream_response_to_function_stream(
23632365
messages=messages,
23642366
tools=tools,
23652367
tool_calls=True,
2368+
add_generation_prompt=True,
23662369
)
23672370
prompt += f"functions.{tool_name}:\n"
23682371
try:
@@ -2420,6 +2423,7 @@ def _stream_response_to_function_stream(
24202423
messages=messages,
24212424
tools=tools,
24222425
tool_calls=True,
2426+
add_generation_prompt=True,
24232427
)
24242428
completion_or_chunks = llama.create_completion(
24252429
prompt=prompt,

0 commit comments

Comments
 (0)
0