File tree Expand file tree Collapse file tree 1 file changed +5
-1
lines changed Expand file tree Collapse file tree 1 file changed +5
-1
lines changed Original file line number Diff line number Diff line change @@ -233,7 +233,11 @@ Then you'll need to use a custom chat handler to load the clip model and process
233
233
>> > from llama_cpp import Llama
234
234
>> > from llama_cpp.llama_chat_format import Llava15ChatHandler
235
235
>> > chat_handler = Llava15ChatHandler(clip_model_path = " path/to/llava/mmproj.bin" )
236
- >> > llm = Llama(model_path = " ./path/to/llava/llama-model.gguf" , chat_handler = chat_handler)
236
+ >> > llm = Llama(
237
+ model_path = " ./path/to/llava/llama-model.gguf" ,
238
+ chat_handler = chat_handler,
239
+ n_ctx = 2048 # n_ctx should be increased to accomodate the image embedding
240
+ )
237
241
>> > llm.create_chat_completion(
238
242
messages = [
239
243
{" role" : " system" , " content" : " You are an assistant who perfectly describes images." },
You can’t perform that action at this time.
0 commit comments