Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Add logprobs return in ChatCompletionResponse #1311

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Mar 31, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions 1 llama_cpp/llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -1653,6 +1653,7 @@ def create_chat_completion(
top_k=top_k,
min_p=min_p,
typical_p=typical_p,
logprobs=top_logprobs if logprobs else None,
stream=stream,
stop=stop,
seed=seed,
Expand Down
5 changes: 5 additions & 0 deletions 5 llama_cpp/llama_chat_format.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,6 +231,7 @@ def _convert_text_completion_to_chat(
"role": "assistant",
"content": completion["choices"][0]["text"],
},
"logprobs": completion["choices"][0]["logprobs"],
"finish_reason": completion["choices"][0]["finish_reason"],
}
],
Expand All @@ -254,6 +255,7 @@ def _convert_text_completion_chunks_to_chat(
"delta": {
"role": "assistant",
},
"logprobs": None,
"finish_reason": None,
}
],
Expand All @@ -273,6 +275,7 @@ def _convert_text_completion_chunks_to_chat(
if chunk["choices"][0]["finish_reason"] is None
else {}
),
"logprobs": chunk["choices"][0]["logprobs"],
"finish_reason": chunk["choices"][0]["finish_reason"],
}
],
Expand Down Expand Up @@ -487,6 +490,7 @@ def chat_completion_handler(
temperature: float = 0.2,
top_p: float = 0.95,
top_k: int = 40,
logprobs: int = 0,
min_p: float = 0.05,
typical_p: float = 1.0,
stream: bool = False,
Expand Down Expand Up @@ -576,6 +580,7 @@ def chat_completion_handler(
top_k=top_k,
min_p=min_p,
typical_p=typical_p,
logprobs=logprobs,
stream=stream,
stop=stop,
seed=seed,
Expand Down
1 change: 1 addition & 0 deletions 1 llama_cpp/llama_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ class ChatCompletionFunction(TypedDict):
class ChatCompletionResponseChoice(TypedDict):
index: int
message: "ChatCompletionResponseMessage"
logprobs: Optional[CompletionLogprobs]
finish_reason: Optional[str]


Expand Down
12 changes: 12 additions & 0 deletions 12 llama_cpp/server/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -405,6 +405,18 @@ async def create_chat_completion(
}
},
},
"logprobs": {
"summary": "Logprobs",
"value": {
"model": "gpt-3.5-turbo",
"messages": [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is the capital of France?"},
],
"logprobs": True,
"top_logprobs": 10
},
},
}
),
llama_proxy: LlamaProxy = Depends(get_llama_proxy),
Expand Down
10 changes: 9 additions & 1 deletion 10 llama_cpp/server/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,6 @@ class CreateCompletionRequest(BaseModel):
presence_penalty: Optional[float] = presence_penalty_field
frequency_penalty: Optional[float] = frequency_penalty_field
logit_bias: Optional[Dict[str, float]] = Field(None)
logprobs: Optional[int] = Field(None)
seed: Optional[int] = Field(None)

# ignored or currently unsupported
Expand Down Expand Up @@ -209,6 +208,15 @@ class CreateChatCompletionRequest(BaseModel):
default=None,
description="The maximum number of tokens to generate. Defaults to inf",
)
logprobs: Optional[bool] = Field(
default=False,
description="Whether to output the logprobs or not. Default is True"
)
top_logprobs: Optional[int] = Field(
default=None,
ge=0,
description="The number of logprobs to generate. If None, no logprobs are generated. logprobs need to set to True.",
)
temperature: float = temperature_field
top_p: float = top_p_field
min_p: float = min_p_field
Expand Down
Morty Proxy This is a proxified and sanitized view of the page, visit original site.