File tree Expand file tree Collapse file tree 2 files changed +64
-0
lines changed Expand file tree Collapse file tree 2 files changed +64
-0
lines changed Original file line number Diff line number Diff line change @@ -2539,6 +2539,56 @@ def from_pretrained(
2539
2539
** kwargs ,
2540
2540
)
2541
2541
2542
+ class ObsidianChatHandler (Llava15ChatHandler ):
2543
+ # Prompt Format
2544
+ # The model followed ChatML format. However, with ### as the seperator
2545
+
2546
+ # <|im_start|>user
2547
+ # What is this sign about?\n<image>
2548
+ # ###
2549
+ # <|im_start|>assistant
2550
+ # The sign is about bullying, and it is placed on a black background with a red background.
2551
+ # ###
2552
+
2553
+ CHAT_FORMAT = (
2554
+ "{% for message in messages %}"
2555
+ # System message
2556
+ "{% if message.role == 'system' %}"
2557
+ "<|im_start|>system\n "
2558
+ "{{ message.content }}\n "
2559
+ "###\n "
2560
+ "{% endif %}"
2561
+ # User message
2562
+ "{% if message.role == 'user' %}"
2563
+ "<|im_start|>user\n "
2564
+ "{% if message.content is string %}"
2565
+ "{{ message.content }}"
2566
+ "{% endif %}"
2567
+ "{% if message.content is iterable %}"
2568
+ "{% for content in message.content %}"
2569
+ "{% if content.type == 'text' %}"
2570
+ "{{ content.text }}"
2571
+ "{% endif %}"
2572
+ "{% if content.type == 'image_url' %}"
2573
+ "{{ content.image_url }}"
2574
+ "{% endif %}"
2575
+ "{% endfor %}"
2576
+ "{% endif %}"
2577
+ "###\n "
2578
+ "{% endif %}"
2579
+ # Assistant message
2580
+ "{% if message.role == 'assistant' %}"
2581
+ "<|im_start|>assistant\n "
2582
+ "{{ message.content }}"
2583
+ "###\n "
2584
+ "{% endif %}"
2585
+ "{% endfor %}"
2586
+ # Generation prompt
2587
+ "{% if add_generation_prompt %}"
2588
+ "<|im_start|>assistant\n "
2589
+ "{% endif %}"
2590
+ )
2591
+
2542
2592
class MoondreamChatHanlder (Llava15ChatHandler ):
2543
2593
# Chat Format:
2544
2594
# f"<image>\n\n{chat_history}Question: {question}\n\nAnswer:"
Original file line number Diff line number Diff line change @@ -84,6 +84,20 @@ def load_llama_from_model_settings(settings: ModelSettings) -> llama_cpp.Llama:
84
84
chat_handler = llama_cpp .llama_chat_format .Llava15ChatHandler (
85
85
clip_model_path = settings .clip_model_path , verbose = settings .verbose
86
86
)
87
+ elif settings .chat_format == "obsidian" :
88
+ assert settings .clip_model_path is not None , "clip model not found"
89
+ if settings .hf_model_repo_id is not None :
90
+ chat_handler = (
91
+ llama_cpp .llama_chat_format .ObsidianChatHandler .from_pretrained (
92
+ repo_id = settings .hf_model_repo_id ,
93
+ filename = settings .clip_model_path ,
94
+ verbose = settings .verbose ,
95
+ )
96
+ )
97
+ else :
98
+ chat_handler = llama_cpp .llama_chat_format .ObsidianChatHandler (
99
+ clip_model_path = settings .clip_model_path , verbose = settings .verbose
100
+ )
87
101
elif settings .chat_format == "llava-1-6" :
88
102
assert settings .clip_model_path is not None , "clip model not found"
89
103
if settings .hf_model_repo_id is not None :
You can’t perform that action at this time.
0 commit comments