8000 Bugfix · Redwa/llama-cpp-python@02f9fb8 · GitHub
[go: up one dir, main page]

Skip to content

Commit 02f9fb8

Browse files
committed
Bugfix
1 parent 3cd67c7 commit 02f9fb8

File tree

1 file changed

+1
-4
lines changed

1 file changed

+1
-4
lines changed

llama_cpp/llama.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -695,10 +695,7 @@ def create_chat_completion(
695695
Returns:
696696
Generated chat completion or a stream of chat completion chunks.
697697
"""
698-
stop = stop if not None else []
699-
instructions = """Complete the following chat conversation between the user and the assistant. System messages should be strictly followed as additi 5D5B onal instructions."""
700-
chat_history = "\n".join(
701-
f'{message["role"]} {message.get("user", "")}: {message["content"]}'
698+
stop = stop if stop is not None else []
702699
for message in messages
703700
)
704701
PROMPT = f" \n\n### Instructions:{instructions}\n\n### Inputs:{chat_history}\n\n### Response:\nassistant: "

0 commit comments

Comments
 (0)
0