Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 6b018e0

Browse files
committed
misc: Improve llava error messages
1 parent a6457ba commit 6b018e0

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

llama_cpp/llama_chat_format.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -2642,13 +2642,13 @@ def embed_image_bytes(image_bytes: bytes):
26422642
if type_ == "text":
26432643
tokens = llama.tokenize(value.encode("utf8"), add_bos=False, special=True)
26442644
if llama.n_tokens + len(tokens) > llama.n_ctx():
2645-
raise ValueError("Prompt exceeds n_ctx") # TODO: Fix
2645+
raise ValueError(f"Prompt exceeds n_ctx: {llama.n_tokens + len(tokens)} > {llama.n_ctx()}")
26462646
llama.eval(tokens)
26472647
else:
26482648
image_bytes = self.load_image(value)
26492649
embed = embed_image_bytes(image_bytes)
26502650
if llama.n_tokens + embed.contents.n_image_pos > llama.n_ctx():
2651-
raise ValueError("Prompt exceeds n_ctx") # TODO: Fix
2651+
raise ValueError(f"Prompt exceeds n_ctx: {llama.n_tokens + embed.contents.n_image_pos} > {llama.n_ctx()}")
26522652
n_past = ctypes.c_int(llama.n_tokens)
26532653
n_past_p = ctypes.pointer(n_past)
26542654
with suppress_stdout_stderr(disable=self.verbose):

0 commit comments

Comments
 (0)