File tree Expand file tree Collapse file tree 2 files changed +62
-0
lines changed Expand file tree Collapse file tree 2 files changed +62
-0
lines changed Original file line number Diff line number Diff line change @@ -2635,6 +2635,54 @@ class Llava16ChatHandler(Llava15ChatHandler):
2635
2635
"{% endif %}"
2636
2636
)
2637
2637
2638
+ class NanoLlavaChatHandler (Llava15ChatHandler ):
2639
+ # Prompt Format
2640
+ # The model follow the ChatML standard, however, without \n at the end of <|im_end|>:
2641
+
2642
+ # <|im_start|>system
2643
+ # Answer the question<|im_end|><|im_start|>user
2644
+ # <image>
2645
+ # What is the picture about?<|im_end|><|im_start|>assistant
2646
+
2647
+ CHAT_FORMAT = (
2648
+ "{% for message in messages %}"
2649
+ # System message
2650
+ "{% if message.role == 'system' %}"
2651
+ "<|im_start|>system\n "
2652
+ "{{ message.content }}"
2653
+ "<|im_end|>"
2654
+ "{% endif %}"
2655
+ # User message
2656
+ "{% if message.role == 'user' %}"
2657
+ "<|im_start|>user\n "
2658
+ "{% if message.content is string %}"
2659
+ "{{ message.content }}"
2660
+ "{% endif %}"
2661
+ "{% if message.content is iterable %}"
2662
+ "{% for content in message.content %}"
2663
+ "{% if content.type == 'text' %}"
2664
+ "{{ content.text }}"
2665
+ "{% endif %}"
2666
+ "{% if content.type == 'image_url' %}"
2667
+ "{{ content.image_url }}"
2668
+ "{% endif %}"
2669
+ "{% endfor %}"
2670
+ "{% endif %}"
2671
+ "<|im_end|>"
2672
+ "{% endif %}"
2673
+ # Assistant message
2674
+ "{% if message.role == 'assistant' %}"
2675
+ "<|im_start|>assistant\n "
2676
+ "{{ message.content }}"
2677
+ "<|im_end|>"
2678
+ "{% endif %}"
2679
+ "{% endfor %}"
2680
+ # Generation prompt
2681
+ "{% if add_generation_prompt %}"
2682
+ "<|im_start|>assistant\n "
2683
+ "{% endif %}"
2684
+ )
2685
+
2638
2686
2639
2687
@register_chat_completion_handler ("chatml-function-calling" )
2640
2688
def chatml_function_calling (
Original file line number Diff line number Diff line change @@ -98,6 +98,20 @@ def load_llama_from_model_settings(settings: ModelSettings) -> llama_cpp.Llama:
98
98
chat_handler = llama_cpp .llama_chat_format .Llava16ChatHandler (
99
99
clip_model_path = settings .clip_model_path , verbose = settings .verbose
100
100
)
101
+ elif settings .chat_format == "nanollava" :
102
+ assert settings .clip_model_path is not None , "clip model not found"
103
+ if settings .hf_model_repo_id is not None :
104
+ chat_handler = (
105
+ llama_cpp .llama_chat_format .NanoLlavaChatHandler .from_pretrained (
106
+ repo_id = settings .hf_model_repo_id ,
107
+ filename = settings .clip_model_path ,
108
+ verbose = settings .verbose ,
109
+ )
110
+ )
111
+ else :
112
+ chat_handler = llama_cpp .llama_chat_format .NanoLlavaChatHandler (
113
+ clip_model_path = settings .clip_model_path , verbose = settings .verbose
114
+ )
101
115
elif settings .chat_format == "moondream" :
102
116
assert settings .clip_model_path is not None , "clip model not found"
103
117
if settings .hf_model_repo_id is not None :
You can’t perform that action at this time.
0 commit comments