Skip to content

Commit 8f09d42

Browse files
committed
Add obisidian support
1 parent 8324ee0 commit 8f09d42

File tree

2 files changed

+64
-0
lines changed

2 files changed

+64
-0
lines changed

llama_cpp/llama_chat_format.py

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2539,6 +2539,56 @@ def from_pretrained(
25392539
**kwargs,
25402540
)
25412541

2542+
class ObsidianChatHandler(Llava15ChatHandler):
2543+
# Prompt Format
2544+
# The model followed ChatML format. However, with ### as the seperator
2545+
2546+
# <|im_start|>user
2547+
# What is this sign about?\n<image>
2548+
# ###
2549+
# <|im_start|>assistant
2550+
# The sign is about bullying, and it is placed on a black background with a red background.
2551+
# ###
2552+
2553+
CHAT_FORMAT = (
2554+
"{% for message in messages %}"
2555+
# System message
2556+
"{% if message.role == 'system' %}"
2557+
"<|im_start|>system\n"
2558+
"{{ message.content }}\n"
2559+
"###\n"
2560+
"{% endif %}"
2561+
# User message
2562+
"{% if message.role == 'user' %}"
2563+
"<|im_start|>user\n"
2564+
"{% if message.content is string %}"
2565+
"{{ message.content }}"
2566+
"{% endif %}"
2567+
"{% if message.content is iterable %}"
2568+
"{% for content in message.content %}"
2569+
"{% if content.type == 'text' %}"
2570+
"{{ content.text }}"
2571+
"{% endif %}"
2572+
"{% if content.type == 'image_url' %}"
2573+
"{{ content.image_url }}"
2574+
"{% endif %}"
2575+
"{% endfor %}"
2576+
"{% endif %}"
2577+
"###\n"
2578+
"{% endif %}"
2579+
# Assistant message
2580+
"{% if message.role == 'assistant' %}"
2581+
"<|im_start|>assistant\n"
2582+
"{{ message.content }}"
2583+
"###\n"
2584+
"{% endif %}"
2585+
"{% endfor %}"
2586+
# Generation prompt
2587+
"{% if add_generation_prompt %}"
2588+
"<|im_start|>assistant\n"
2589+
"{% endif %}"
2590+
)
2591+
25422592
class MoondreamChatHanlder(Llava15ChatHandler):
25432593
# Chat Format:
25442594
# f"<image>\n\n{chat_history}Question: {question}\n\nAnswer:"

llama_cpp/server/model.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,20 @@ def load_llama_from_model_settings(settings: ModelSettings) -> llama_cpp.Llama:
8484
chat_handler = llama_cpp.llama_chat_format.Llava15ChatHandler(
8585
clip_model_path=settings.clip_model_path, verbose=settings.verbose
8686
)
87+
elif settings.chat_format == "obsidian":
88+
assert settings.clip_model_path is not None, "clip model not found"
89+
if settings.hf_model_repo_id is not None:
90+
chat_handler = (
91+
llama_cpp.llama_chat_format.ObsidianChatHandler.from_pretrained(
92+
repo_id=settings.hf_model_repo_id,
93+
filename=settings.clip_model_path,
94+
verbose=settings.verbose,
95+
)
96+
)
97+
else:
98+
chat_handler = llama_cpp.llama_chat_format.ObsidianChatHandler(
99+
clip_model_path=settings.clip_model_path, verbose=settings.verbose
100+
)
87101
elif settings.chat_format == "llava-1-6":
88102
assert settings.clip_model_path is not None, "clip model not found"
89103
if settings.hf_model_repo_id is not None:

0 commit comments

Comments
 (0)