We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent e811a81 commit d99a6baCopy full SHA for d99a6ba
llama_cpp/llama.py
@@ -413,8 +413,8 @@ def __init__(
413
eos_token_id = self.token_eos()
414
bos_token_id = self.token_bos()
415
416
- eos_token = self._model.token_get_text(eos_token_id)
417
- bos_token = self._model.token_get_text(bos_token_id)
+ eos_token = self._model.token_get_text(eos_token_id) if eos_token_id != -1 else ""
+ bos_token = self._model.token_get_text(bos_token_id) if bos_token_id != -1 else ""
418
419
# Unfortunately the llama.cpp API does not return metadata arrays, so we can't get template names from tokenizer.chat_templates
420
template_choices = dict((name[10:], template) for name, template in self.metadata.items() if name.startswith("tokenizer.chat_template."))
0 commit comments