26
26
from google .generativeai .client import get_default_discuss_async_client
27
27
from google .generativeai import string_utils
28
28
from google .generativeai .types import discuss_types
29
+ from google .generativeai .types import helper_types
29
30
from google .generativeai .types import model_types
30
- from google .generativeai .types import safety_types
31
+ from google .generativeai .types import palm_safety_types
31
32
32
33
33
34
def _make_message (content : discuss_types .MessageOptions ) -> glm .Message :
@@ -316,7 +317,7 @@ def chat(
316
317
top_k : float | None = None ,
317
318
prompt : discuss_types .MessagePromptOptions | None = None ,
318
319
client : glm .DiscussServiceClient | None = None ,
319
- request_options : dict [ str , Any ] | None = None ,
320
+ request_options : helper_types . RequestOptionsType | None = None ,
320
321
) -> discuss_types .ChatResponse :
321
322
"""Calls the API and returns a `types.ChatResponse` containing the response.
322
323
@@ -416,7 +417,7 @@ async def chat_async(
416
417
top_k : float | None = None ,
417
418
prompt : discuss_types .MessagePromptOptions | None = None ,
418
419
client : glm .DiscussServiceAsyncClient | None = None ,
419
- request_options : dict [ str , Any ] | None = None ,
420
+ request_options : helper_types . RequestOptionsType | None = None ,
420
421
) -> discuss_types .ChatResponse :
421
422
request = _make_generate_message_request (
422
423
model = model ,
@@ -469,7 +470,7 @@ def last(self, message: discuss_types.MessageOptions):
469
470
def reply (
470
471
self ,
471
472
message : discuss_types .MessageOptions ,
472
- request_options : dict [ str , Any ] | None = None ,
473
+ request_options : helper_types . RequestOptionsType | None = None ,
473
474
) -> discuss_types .ChatResponse :
474
475
if isinstance (self ._client , glm .DiscussServiceAsyncClient ):
475
476
raise TypeError (f"reply can't be called on an async client, use reply_async instead." )
@@ -521,7 +522,7 @@ def _build_chat_response(
521
522
response = type (response ).to_dict (response )
522
523
response .pop ("messages" )
523
524
524
- response ["filters" ] = safety_types .convert_filters_to_enums (response ["filters" ])
525
+ response ["filters" ] = palm_safety_types .convert_filters_to_enums (response ["filters" ])
525
526
526
527
if response ["candidates" ]:
527
528
last = response ["candidates" ][0 ]
@@ -537,7 +538,7 @@ def _build_chat_response(
537
538
def _generate_response (
538
539
request : glm .GenerateMessageRequest ,
539
540
client : glm .DiscussServiceClient | None = None ,
540
- request_options : dict [ str , Any ] | None = None ,
541
+ request_options : helper_types . RequestOptionsType | None = None ,
541
542
) -> ChatResponse :
542
543
if request_options is None :
543
544
request_options = {}
@@ -553,7 +554,7 @@ def _generate_response(
553
554
async def _generate_response_async (
554
555
request : glm .GenerateMessageRequest ,
555
556
client : glm .DiscussServiceAsyncClient | None = None ,
556
- request_options : dict [ str , Any ] | None = None ,
557
+ request_options : helper_types . RequestOptionsType | None = None ,
557
558
) -> ChatResponse :
558
559
if request_options is None :
559
560
request_options = {}
@@ -574,7 +575,7 @@ def count_message_tokens(
574
575
messages : discuss_types .MessagesOptions | None = None ,
575
576
model : model_types .AnyModelNameOptions = DEFAULT_DISCUSS_MODEL ,
576
577
client : glm .DiscussServiceAsyncClient | None = None ,
577
- request_options : dict [ str , Any ] | None = None ,
578
+ request_options : helper_types . RequestOptionsType | None = None ,
578
579
) -> discuss_types .TokenCount :
579
580
model = model_types .make_model_name (model )
580
581
prompt = _make_message_prompt (prompt , context = context , examples = examples , messages = messages )
0 commit comments