Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 64008aa

Browse filesBrowse files
committed
Fix typo
1 parent f70326f commit 64008aa
Copy full SHA for 64008aa

File tree

Expand file treeCollapse file tree

1 file changed

+2
-2
lines changed
Filter options
Expand file treeCollapse file tree

1 file changed

+2
-2
lines changed

‎llama_cpp/server/model.py

Copy file name to clipboardExpand all lines: llama_cpp/server/model.py
+2-2Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -116,14 +116,14 @@ def load_llama_from_model_settings(settings: ModelSettings) -> llama_cpp.Llama:
116116
assert settings.clip_model_path is not None, "clip model not found"
117117
if settings.hf_model_repo_id is not None:
118118
chat_handler = (
119-
llama_cpp.llama_chat_format.MoondreamChatHanlder.from_pretrained(
119+
llama_cpp.llama_chat_format.MoondreamChatHandler.from_pretrained(
120120
repo_id=settings.hf_model_repo_id,
121121
filename=settings.clip_model_path,
122122
verbose=settings.verbose,
123123
)
124124
)
125125
else:
126-
chat_handler = llama_cpp.llama_chat_format.MoondreamChatHanlder(
126+
chat_handler = llama_cpp.llama_chat_format.MoondreamChatHandler(
127127
clip_model_path=settings.clip_model_path, verbose=settings.verbose
128128
)
129129
elif settings.chat_format == "nanollava":

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.