Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 3fbda71

Browse filesBrowse files
committed
Fix mlock_supported and mmap_supported return type
1 parent 5a3413e commit 3fbda71
Copy full SHA for 3fbda71

File tree

Expand file treeCollapse file tree

2 files changed

+10
-6
lines changed
Filter options
Expand file treeCollapse file tree

2 files changed

+10
-6
lines changed

‎llama_cpp/llama_cpp.py

Copy file name to clipboardExpand all lines: llama_cpp/llama_cpp.py
+8-4Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -157,15 +157,15 @@ def llama_context_default_params() -> llama_context_params:
157157
_lib.llama_context_default_params.restype = llama_context_params
158158

159159

160-
def llama_mmap_supported() -> c_bool:
160+
def llama_mmap_supported() -> bool:
161161
return _lib.llama_mmap_supported()
162162

163163

164164
_lib.llama_mmap_supported.argtypes = []
165165
_lib.llama_mmap_supported.restype = c_bool
166166

167167

168-
def llama_mlock_supported() -> c_bool:
168+
def llama_mlock_supported() -> bool:
169169
return _lib.llama_mlock_supported()
170170

171171

@@ -387,7 +387,9 @@ def llama_n_embd(ctx: llama_context_p) -> c_int:
387387
# Can be mutated in order to change the probabilities of the next token
388388
# Rows: n_tokens
389389
# Cols: n_vocab
390-
def llama_get_logits(ctx: llama_context_p): # type: (...) -> Array[float] # type: ignore
390+
def llama_get_logits(
391+
ctx: llama_context_p,
392+
): # type: (...) -> Array[float] # type: ignore
391393
return _lib.llama_get_logits(ctx)
392394

393395

@@ -397,7 +399,9 @@ def llama_get_logits(ctx: llama_context_p): # type: (...) -> Array[float] # typ
397399

398400
# Get the embeddings for the input
399401
# shape: [n_embd] (1-dimensional)
400-
def llama_get_embeddings(ctx: llama_context_p): # type: (...) -> Array[float] # type: ignore
402+
def llama_get_embeddings(
403+
ctx: llama_context_p,
404+
): # type: (...) -> Array[float] # type: ignore
401405
return _lib.llama_get_embeddings(ctx)
402406

403407

‎llama_cpp/server/app.py

Copy file name to clipboardExpand all lines: llama_cpp/server/app.py
+2-2Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,11 +27,11 @@ class Settings(BaseSettings):
2727
)
2828
f16_kv: bool = Field(default=True, description="Whether to use f16 key/value.")
2929
use_mlock: bool = Field(
30-
default=bool(llama_cpp.llama_mlock_supported().value),
30+
default=llama_cpp.llama_mlock_supported(),
3131
description="Use mlock.",
3232
)
3333
use_mmap: bool = Field(
34-
default=bool(llama_cpp.llama_mmap_supported().value),
34+
default=llama_cpp.llama_mmap_supported(),
3535
description="Use mmap.",
3636
)
3737
embedding: bool = Field(default=True, description="Whether to use embeddings.")

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.