Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit d99a6ba

Browse files
committed
fix: segfault for models without eos / bos tokens. Closes abetlen#1463
1 parent e811a81 commit d99a6ba

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

llama_cpp/llama.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -413,8 +413,8 @@ def __init__(
413413
eos_token_id = self.token_eos()
414414
bos_token_id = self.token_bos()
415415

416-
eos_token = self._model.token_get_text(eos_token_id)
417-
bos_token = self._model.token_get_text(bos_token_id)
416+
eos_token = self._model.token_get_text(eos_token_id) if eos_token_id != -1 else ""
417+
bos_token = self._model.token_get_text(bos_token_id) if bos_token_id != -1 else ""
418418

419419
# Unfortunately the llama.cpp API does not return metadata arrays, so we can't get template names from tokenizer.chat_templates
420420
template_choices = dict((name[10:], template) for name, template in self.metadata.items() if name.startswith("tokenizer.chat_template."))

0 commit comments

Comments
 (0)