@@ -71,28 +71,27 @@ extern "C" {
71
71
72
72
typedef void (*llama_progress_callback)(float progress, void *ctx);
73
73
74
- struct llama_context_params {
74
+ struct llama_context_params {
75
+ int seed; // RNG seed, -1 for random
75
76
int n_ctx; // text context
76
77
int n_batch; // prompt processing batch size
77
78
int n_gpu_layers; // number of layers to store in VRAM
78
79
int main_gpu; // the GPU that is used for scratch and small tensors
79
80
float tensor_split[LLAMA_MAX_DEVICES]; // how to split layers across multiple GPUs
80
- bool low_vram; // if true, reduce VRAM usage at the cost of performance
81
- int seed; // RNG seed, -1 for random
81
+ // called with a progress value between 0 and 1, pass NULL to disable
82
+ llama_progress_callback progress_callback;
83
+ // context pointer passed to the progress callback
84
+ void * progress_callback_user_data;
82
85
86
+ // Keep the booleans together to avoid misalignment during copy-by-value.
87
+ bool low_vram; // if true, reduce VRAM usage at the cost of performance
83
88
bool f16_kv; // use fp16 for KV cache
84
89
bool logits_all; // the llama_eval() call computes all logits, not just the last one
85
90
bool vocab_only; // only load the vocabulary, no weights
86
91
bool use_mmap; // use mmap if possible
87
92
bool use_mlock; // force system to keep model in RAM
88
93
bool embedding; // embedding mode only
89
-
90
- // called with a progress value between 0 and 1, pass NULL to disable
91
- llama_progress_callback progress_callback;
92
- // context pointer passed to the progress callback
93
- void * progress_callback_user_data;
94
94
};
95
-
96
95
// model file types
97
96
enum llama_ftype {
98
97
LLAMA_FTYPE_ALL_F32 = 0 ,
0 commit comments