We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 1372e4f commit b70b6a8Copy full SHA for b70b6a8
llama_cpp/llama_cpp.py
@@ -252,6 +252,7 @@ class llama_token_data_array(Structure):
252
# bool use_mmap; // use mmap if possible
253
# bool use_mlock; // force system to keep model in RAM
254
# bool embedding; // embedding mode only
255
+# bool numa; // optimizations that help on some systems with non-uniform memory access
256
# };
257
class llama_context_params(Structure):
258
_fields_ = [
@@ -273,6 +274,7 @@ class llama_context_params(Structure):
273
274
("use_mmap", c_bool),
275
("use_mlock", c_bool),
276
("embedding", c_bool),
277
+ ("numa", c_bool),
278
]
279
280
0 commit comments