@@ -149,8 +149,15 @@ class CreateCompletionRequest(BaseModel):
149
149
description = "The number of logprobs to generate. If None, no logprobs are generated."
150
150
)
151
151
152
- # ignored, but marked as required for the sake of compatibility with openai's api
153
- model : str = model_field
152
+ # ignored or currently unsupported
153
+ model : Optional [str ] = model_field
154
+ n : Optional [int ] = 1
155
+ logprobs : Optional [int ] = Field (None )
156
+ presence_penalty : Optional [float ] = 0
157
+ frequency_penalty : Optional [float ] = 0
158
+ best_of : Optional [int ] = 1
159
+ logit_bias : Optional [Dict [str , float ]] = Field (None )
160
+ user : Optional [str ] = Field (None )
154
161
155
162
# llama.cpp specific parameters
156
163
top_k : int = top_k_field
@@ -190,11 +197,11 @@ def create_completion(
190
197
191
198
192
199
class CreateEmbeddingRequest (BaseModel ):
193
- # ignored, but marked as required for the sake of compatibility with openai's api
194
- model : str = model_field
200
+ model : Optional [str ] = model_field
195
201
input : str = Field (
196
202
description = "The input to embed."
197
203
)
204
+ user : Optional [str ]
198
205
199
206
class Config :
200
207
schema_extra = {
@@ -235,8 +242,13 @@ class CreateChatCompletionRequest(BaseModel):
235
242
stop : Optional [List [str ]] = stop_field
236
243
stream : bool = stream_field
237
244
238
- # ignored, but marked as required for the sake of compatibility with openai's api
239
- model : str = model_field
245
+ # ignored or currently unsupported
246
+ model : Optional [str ] = model_field
247
+ n : Optional [int ] = 1
248
+ presence_penalty : Optional [float ] = 0
249
+ frequency_penalty : Optional [float ] = 0
250
+ logit_bias : Optional [Dict [str , float ]] = Field (None )
251
+ user : Optional [str ] = Field (None )
240
252
241
253
# llama.cpp specific parameters
242
254
top_k : int = top_k_field
0 commit comments