8000 Add Llama-3 chat format · abetlen/llama-cpp-python@f114963 · GitHub
[go: up one dir, main page]

Skip to content

Commit f114963

Browse files
committed
Add Llama-3 chat format
1 parent 0281214 commit f114963

File tree

1 file changed

+20
-0
lines changed

1 file changed

+20
-0
lines changed

llama_cpp/llama_chat_format.py

Lines changed: 20 additions & 0 deletions
< 4727 tr class="diff-line-row">
Original file line numberDiff line numberDiff line change
@@ -920,6 +920,26 @@ def format_llama2(
920920
return ChatFormatterResponse(prompt=_prompt)
921921

922922

923+
# Chat format for Llama-3 models, see more details at:
924+
# https://github.com/meta-llama/llama3/blob/main/llama/tokenizer.py#L202-L229
925+
@register_chat_format("llama-3")
926+
def format_llama3(
927+
messages: List[llama_types.ChatCompletionRequestMessage],
928+
**kwargs: Any,
929+
) -> ChatFormatterResponse:
930+
_roles = dict(
931+
system="<|start_header_id|>system<|end_header_id|>\n\n",
932+
user="<|start_header_id|>user<|end_header_id|>\n\n",
933+
assistant="<|start_header_id|>assistant<|end_header_id|>\n\n",
934+
)
935+
_begin_token = "<|begin_of_text|>"
936+
_sep = "<|eot_id|>"
937+
_messages = _map_roles(messages, _roles)
938+
_messages.append((_roles["assistant"], None))
939+
_prompt = _format_no_colon_single(_begin_token, _messages, _sep)
940+
return ChatFormatterResponse(prompt=_prompt, stop=_sep)
941+
942+
923943
@register_chat_format("alpaca")
924944
def format_alpaca(
925945
messages: List[llama_types.ChatCompletionRequestMessage],

0 commit comments

Comments
 (0)
0