Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 8383a9e

Browse filesBrowse files
committed
fix: llava this function takes at least 4 arguments (0 given)
1 parent 3411178 commit 8383a9e
Copy full SHA for 8383a9e

File tree

Expand file treeCollapse file tree

1 file changed

+9
-9
lines changed
Filter options
Expand file treeCollapse file tree

1 file changed

+9
-9
lines changed

‎llama_cpp/llama_chat_format.py

Copy file name to clipboardExpand all lines: llama_cpp/llama_chat_format.py
+9-9Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1840,7 +1840,7 @@ def create_completion(stop):
18401840
class Llava15ChatHandler:
18411841
_clip_free = None
18421842

1843-
def __init__(self, clip_model_path: str, verbose: bool = False):
1843+
def __init__(self, clip_model_path: str, verbose: bool = False):
18441844
import llama_cpp.llava_cpp as llava_cpp
18451845

18461846
self._llava_cpp = llava_cpp
@@ -1957,21 +1957,21 @@ def __call__(
19571957
with suppress_stdout_stderr(disable=self.verbose):
19581958
embed = (
19591959
self._llava_cpp.llava_image_embed_make_with_bytes(
1960-
ctx_clip=self.clip_ctx,
1961-
n_threads=llama.context_params.n_threads,
1962-
image_bytes=c_ubyte_ptr,
1963-
image_bytes_length=len(image_bytes),
1960+
self.clip_ctx,
1961+
llama.context_params.n_threads,
1962+
c_ubyte_ptr,
1963+
length=len(image_bytes),
19641964
)
19651965
)
19661966
try:
19671967
n_past = ctypes.c_int(llama.n_tokens)
19681968
n_past_p = ctypes.pointer(n_past)
19691969
with suppress_stdout_stderr(disable=self.verbose):
19701970
self._llava_cpp.llava_eval_image_embed(
1971-
ctx_llama=llama.ctx,
1972-
embed=embed,
1973-
n_batch=llama.n_batch,
1974-
n_past=n_past_p,
1971+
llama.ctx,
1972+
embed,
1973+
llama.n_batch,
1974+
n_past_p,
19751975
)
19761976
assert llama.n_ctx() >= n_past.value
19771977
llama.n_tokens = n_past.value

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.