8000 fix: Pass raise_exception and add_generation_prompt to jinja2 chat te… · coderonion/llama-cpp-python@078cca0 · GitHub
[go: up one dir, main page]

Skip to content

Commit 078cca0

Browse files
committed
fix: Pass raise_exception and add_generation_prompt to jinja2 chat template
1 parent 4114947 commit 078cca0

File tree

1 file changed

+9
-8
lines changed

1 file changed

+9
-8
lines changed

llama_cpp/llama_chat_format.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -185,16 +185,17 @@ def __call__(
185185
messages: List[llama_types.ChatCompletionRequestMessage],
186186
**kwargs: Any,
187187
) -> ChatFormatterResponse:
188-
if self.add_generation_prompt:
189-
messages = [
190-
*messages,
191-
llama_types.ChatCompletionRequestAssistantMessage(
192-
role="assistant", content=""
193-
),
194-
]
188+
def raise_exception(message: str):
189+
raise ValueError(message)
190+
195191
prompt = self._environment.render(
196-
messages=messages, eos_token=self.eos_token, bos_token=self.bos_token
192+
messages=messages,
193+
eos_token=self.eos_token,
194+
bos_token=self.bos_token,
195+
raise_exception=raise_exception,
196+
add_generation_prompt=self.add_generation_prompt
197197
)
198+
198199
return ChatFormatterResponse(prompt=prompt, stop=[self.eos_token])
199200

200201
def to_chat_handler(self) -> LlamaChatCompletionHandler:

0 commit comments

Comments
 (0)
< 2F69 /div>
0