Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 08f2bb3

Browse files
committed
fix(minor): Minor ruff fixes
1 parent c1ae815 commit 08f2bb3

File tree

3 files changed

+11
-7
lines changed

3 files changed

+11
-7
lines changed

llama_cpp/_internals.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
import ctypes
55

66
from typing import (
7+
Dict,
78
List,
89
Optional,
910
Sequence,

llama_cpp/llama.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,11 @@
1111
import warnings
1212
import contextlib
1313
import multiprocessing
14-
from types import TracebackType
1514

1615
from typing import (
16+
Any,
1717
List,
18+
Literal,
1819
Optional,
1920
Union,
2021
Generator,
@@ -23,14 +24,11 @@
2324
Deque,
2425
Callable,
2526
Dict,
26-
Type,
2727
)
2828
from collections import deque
2929
from pathlib import Path
3030

3131

32-
from llama_cpp.llama_types import List
33-
3432
from .llama_types import *
3533
from .llama_grammar import LlamaGrammar
3634
from .llama_cache import (
@@ -901,7 +899,7 @@ def embed(
901899
pooling_type = self.pooling_type()
902900
logits_all = pooling_type == llama_cpp.LLAMA_POOLING_TYPE_NONE
903901

904-
if self.context_params.embeddings == False:
902+
if self.context_params.embeddings is False:
905903
raise RuntimeError(
906904
"Llama model must be created with embedding=True to call this method"
907905
)

llama_cpp/llama_chat_format.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
from __future__ import annotations
22

33
import os
4+
import sys
45
import json
56
import ctypes
67
import dataclasses
@@ -627,6 +628,8 @@ def chat_completion_handler(
627628
json.dumps(schema), verbose=llama.verbose
628629
)
629630
except Exception as e:
631+
if llama.verbose:
632+
print(str(e), file=sys.stderr)
630633
grammar = llama_grammar.LlamaGrammar.from_string(
631634
llama_grammar.JSON_GBNF, verbose=llama.verbose
632635
)
@@ -1611,12 +1614,12 @@ def message_to_str(msg: llama_types.ChatCompletionRequestMessage):
16111614
function_call = completion_text.split(".")[-1][:-1]
16121615
new_prompt = prompt + completion_text + stop
16131616
elif isinstance(function_call, str) and function_call != "none":
1614-
new_prompt = prompt + f":\n"
1617+
new_prompt = prompt + ":\n"
16151618
elif isinstance(function_call, dict):
16161619
new_prompt = prompt + f" to=functions.{function_call['name']}:\n"
16171620
function_call = function_call["name"]
16181621
else:
1619-
new_prompt = prompt + f":\n"
1622+
new_prompt = prompt + ":\n"
16201623

16211624
function_body = None
16221625
for function in functions or []:
@@ -2871,6 +2874,8 @@ def embed_image_bytes(image_bytes: bytes):
28712874
json.dumps(schema), verbose=llama.verbose
28722875
)
28732876
except Exception as e:
2877+
if llama.verbose:
2878+
print(str(e), file=sys.stderr)
28742879
grammar = llama_grammar.LlamaGrammar.from_string(
28752880
llama_grammar.JSON_GBNF, verbose=llama.verbose
28762881
)

0 commit comments

Comments
 (0)