We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent a1b2d5c commit 142fe7eCopy full SHA for 142fe7e
llama_cpp/llama.py
@@ -207,6 +207,7 @@ def __init__(
207
n_ctx: int = 512,
208
n_parts: int = -1,
209
n_gpu_layers: int = 0,
210
+ tensor_split: List[float] = [0],
211
seed: int = 1337,
212
f16_kv: bool = True,
213
logits_all: bool = False,
@@ -254,6 +255,7 @@ def __init__(
254
255
self.params = llama_cpp.llama_context_default_params()
256
self.params.n_ctx = n_ctx
257
self.params.n_gpu_layers = n_gpu_layers
258
+ self.params.tensor_split = tensor_split
259
self.params.seed = seed
260
self.params.f16_kv = f16_kv
261
self.params.logits_all = logits_all
0 commit comments