From 759fdad552c5bb0cf50cab39205c0739b81e0c70 Mon Sep 17 00:00:00 2001 From: Andy963 Date: Fri, 15 Dec 2023 16:15:42 +0800 Subject: [PATCH] fix TypeError in count_tokens & count_tokens_async async def count_tokens( self, request: Optional[Union[generative_service.CountTokensRequest, dict]] = None, *, model: Optional[str] = None, contents: Optional[MutableSequence[content.Content]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), if you don't use keyword args : model=model, contents=contents, but position args, this will lead to : TypeError: count_tokens() takes from 1 to 2 positional arguments but 3 were given --- google/generativeai/generative_models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/google/generativeai/generative_models.py b/google/generativeai/generative_models.py index d2b02b1d8..f1725815b 100644 --- a/google/generativeai/generative_models.py +++ b/google/generativeai/generative_models.py @@ -275,13 +275,13 @@ def count_tokens( self, contents: content_types.ContentsType ) -> glm.CountTokensResponse: contents = content_types.to_contents(contents) - return self._client.count_tokens(self.model_name, contents) + return self._client.count_tokens(model=self.model_name, contents=contents) async def count_tokens_async( self, contents: content_types.ContentsType ) -> glm.CountTokensResponse: contents = content_types.to_contents(contents) - return await self._client.count_tokens(self.model_name, contents) + return await self._client.count_tokens(model=self.model_name, contents=contents) # fmt: on def start_chat(