We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 6208751 commit 83b2be6Copy full SHA for 83b2be6
llama_cpp/llama.py
@@ -672,12 +672,12 @@ def _convert_text_completion_chunks_to_chat(
672
def create_chat_completion(
673
self,
674
messages: List[ChatCompletionMessage],
675
- temperature: float = 0.8,
+ temperature: float = 0.2,
676
top_p: float = 0.95,
677
top_k: int = 40,
678
stream: bool = False,
679
stop: Optional[List[str]] = [],
680
- max_tokens: int = 128,
+ max_tokens: int = 256,
681
repeat_penalty: float = 1.1,
682
) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:
683
"""Generate a chat completion from a list of messages.
0 commit comments