Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 7180535

Browse files
committed
Use python warnings module
1 parent 9f14fd2 commit 7180535

File tree

1 file changed

+5
-1
lines changed

1 file changed

+5
-1
lines changed

llama_cpp/llama.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
import ctypes
99
import typing
1010
import fnmatch
11+
import warnings
1112
import multiprocessing
1213

1314
from typing import (
@@ -1020,7 +1021,10 @@ def _create_completion(
10201021
model_name: str = model if model is not None else self.model_path
10211022

10221023
if prompt_tokens[:2] == [self.token_bos()] * 2:
1023-
print(f'*** WARNING: Detected duplicate leading "{self._model.token_get_text(self.token_bos())}" in prompt, this will likely reduce response quality, consider removing it...', file=sys.stderr)
1024+
warnings.warn(
1025+
f'Detected duplicate leading "{self._model.token_get_text(self.token_bos())}" in prompt, this will likely reduce response quality, consider removing it...',
1026+
RuntimeWarning,
1027+
)
10241028

10251029
# NOTE: This likely doesn't work correctly for the first token in the prompt
10261030
# because of the extra space added to the start of the prompt_tokens

0 commit comments

Comments
 (0)