Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 1d5f534

Browse filesBrowse files
committed
feat: Update llama.cpp
1 parent c9dfad4 commit 1d5f534
Copy full SHA for 1d5f534

File tree

Expand file treeCollapse file tree

2 files changed

+30
-3
lines changed
Filter options
Expand file treeCollapse file tree

2 files changed

+30
-3
lines changed

‎llama_cpp/llama_cpp.py

Copy file name to clipboardExpand all lines: llama_cpp/llama_cpp.py
+29-2Lines changed: 29 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -222,6 +222,7 @@
222222
# LLAMA_VOCAB_PRE_TYPE_EXAONE = 25,
223223
# LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26,
224224
# LLAMA_VOCAB_PRE_TYPE_MINERVA = 27,
225+
# LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28,
225226
# };
226227
LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0
227228
LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1
@@ -251,6 +252,7 @@
251252
LLAMA_VOCAB_PRE_TYPE_EXAONE = 25
252253
LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26
253254
LLAMA_VOCAB_PRE_TYPE_MINERVA = 27
255+
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28
254256

255257

256258
# // note: these values should be synchronized with ggml_rope
@@ -1090,9 +1092,10 @@ def llama_backend_free():
10901092
...
10911093

10921094

1093-
# LLAMA_API struct llama_model * llama_load_model_from_file(
1095+
# DEPRECATED(LLAMA_API struct llama_model * llama_load_model_from_file(
10941096
# const char * path_model,
1095-
# struct llama_model_params params);
1097+
# struct llama_model_params params),
1098+
# "use llama_model_load_from_file instead");
10961099
@ctypes_function(
10971100
"llama_load_model_from_file",
10981101
[ctypes.c_char_p, llama_model_params],
@@ -1104,6 +1107,20 @@ def llama_load_model_from_file(
11041107
...
11051108

11061109

1110+
# LLAMA_API struct llama_model * llama_model_load_from_file(
1111+
# const char * path_model,
1112+
# struct llama_model_params params);
1113+
@ctypes_function(
1114+
"llama_model_load_from_file",
1115+
[ctypes.c_char_p, llama_model_params],
1116+
llama_model_p_ctypes,
1117+
)
1118+
def llama_model_load_from_file(
1119+
path_model: bytes, params: llama_model_params, /
1120+
) -> Optional[llama_model_p]:
1121+
...
1122+
1123+
11071124
# LLAMA_API void llama_free_model(struct llama_model * model);
11081125
@ctypes_function(
11091126
"llama_free_model",
@@ -1114,6 +1131,16 @@ def llama_free_model(model: llama_model_p, /):
11141131
...
11151132

11161133

1134+
# LLAMA_API void llama_model_free(struct llama_model * model);
1135+
@ctypes_function(
1136+
"llama_model_free",
1137+
[llama_model_p_ctypes],
1138+
None,
1139+
)
1140+
def llama_model_free(model: llama_model_p, /):
1141+
...
1142+
1143+
11171144
# LLAMA_API struct llama_context * llama_new_context_with_model(
11181145
# struct llama_model * model,
11191146
# struct llama_context_params params);

‎vendor/llama.cpp

Copy file name to clipboard

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.