Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 20e0967

Browse filesBrowse files
committed
Add Llava1.6 support
1 parent 0e182be commit 20e0967
Copy full SHA for 20e0967

File tree

Expand file treeCollapse file tree

2 files changed

+65
-0
lines changed
Filter options
Expand file treeCollapse file tree

2 files changed

+65
-0
lines changed

‎llama_cpp/llama_chat_format.py

Copy file name to clipboardExpand all lines: llama_cpp/llama_chat_format.py
+51Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2584,6 +2584,57 @@ class MoondreamChatHanlder(Llava15ChatHandler):
25842584
"{% endif %}"
25852585
)
25862586

2587+
class Llava16ChatHandler(Llava15ChatHandler):
2588+
DEFAULT_SYSTEM_MESSAGE = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. "
2589+
2590+
# Example prompt
2591+
# "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. USER: <image>\nWhat is shown in this image? ASSISTANT:"
2592+
2593+
CHAT_FORMAT = (
2594+
"{% for message in messages %}"
2595+
"{% if message.role == 'system' %}"
2596+
"{{ message.content }}"
2597+
"{% endif %}"
2598+
"{% if message.role == 'user' %}"
2599+
"{% if message.content is iterable %}"
2600+
"{% for content in message.content %}"
2601+
2602+
# <image>
2603+
"{% if content.type == 'image_url' %}"
2604+
"{% if content.image_url is string %}"
2605+
"{{ content.image_url }}\n"
2606+
"{% endif %}"
2607+
"{% if content.image_url is mapping %}"
2608+
"{{ content.image_url.url }}\n"
2609+
"{% endif %}"
2610+
"{% endif %}"
2611+
2612+
# Question:
2613+
"{% if content.type == 'text' %}"
2614+
"{{ content.text }}"
2615+
"{% endif %}"
2616+
"{% endfor %}"
2617+
"{% endif %}"
2618+
2619+
# Question:
2620+
"{% if message.content is string %}"
2621+
"{{ message.content }}"
2622+
"{% endif %}"
2623+
2624+
"{% endif %}"
2625+
2626+
# Answer:
2627+
"{% if message.role == 'assistant' %}"
2628+
"{{ message.content }}"
2629+
"{% endif %}"
2630+
"{% endfor %}"
2631+
2632+
# Generation prompt
2633+
"{% if add_generation_prompt %}"
2634+
"Answer:"
2635+
"{% endif %}"
2636+
)
2637+
25872638

25882639
@register_chat_completion_handler("chatml-function-calling")
25892640
def chatml_function_calling(

‎llama_cpp/server/model.py

Copy file name to clipboardExpand all lines: llama_cpp/server/model.py
+14Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,20 @@ def load_llama_from_model_settings(settings: ModelSettings) -> llama_cpp.Llama:
8484
chat_handler = llama_cpp.llama_chat_format.Llava15ChatHandler(
8585
clip_model_path=settings.clip_model_path, verbose=settings.verbose
8686
)
87+
elif settings.chat_format == "llava-1-6":
88+
assert settings.clip_model_path is not None, "clip model not found"
89+
if settings.hf_model_repo_id is not None:
90+
chat_handler = (
91+
llama_cpp.llama_chat_format.Llava16ChatHandler.from_pretrained(
92+
repo_id=settings.hf_model_repo_id,
93+
filename=settings.clip_model_path,
94+
verbose=settings.verbose,
95+
)
96+
)
97+
else:
98+
chat_handler = llama_cpp.llama_chat_format.Llava16ChatHandler(
99+
clip_model_path=settings.clip_model_path, verbose=settings.verbose
100+
)
87101
elif settings.chat_format == "moondream":
88102
assert settings.clip_model_path is not None, "clip model not found"
89103
if settings.hf_model_repo_id is not None:

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.