From 0a9012840a372df0e9c8c270861b327a5cee66cd Mon Sep 17 00:00:00 2001 From: rajveer43 Date: Mon, 28 Aug 2023 20:25:13 +0530 Subject: [PATCH 1/7] update model.py --- google/generativeai/models.py | 43 +++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/google/generativeai/models.py b/google/generativeai/models.py index 498e5566b..1b78fb1c3 100644 --- a/google/generativeai/models.py +++ b/google/generativeai/models.py @@ -35,6 +35,21 @@ def get_model(name: str, *, client=None) -> model_types.Model: class ModelsIterable(model_types.ModelsIterable): + """ + An iterable class to traverse through a list of models. + + This class allows you to iterate over a list of models, fetching them in pages + if necessary based on the provided page_size and page_token. + + Args: + page_size (int): The number of models to fetch per page. + page_token (str | None): Token representing the current page. Pass None for the first page. + models (List[model_types.Model]): List of models to iterate through. + client (glm.ModelServiceClient | None): An optional client for model service. + + Returns: + ModelsIterable: An iterable object that allows iterating through the models. + """ def __init__( self, *, @@ -49,12 +64,25 @@ def __init__( self._client = client def __iter__(self): + """ + Returns an iterator over the models. + + Yields: + model_types.Model: A model object from the iterable. + """ while self: page = self._models yield from page self = self._next_page() def _next_page(self): + """ + Fetches the next page of models based on the page token. + + Returns: + ModelsIterable | None: The next iterable object with the next page of models, + or None if there are no more pages. + """ if not self._page_token: return None return _list_models( @@ -63,6 +91,21 @@ def _next_page(self): def _list_models(page_size, page_token, client): + """ + Fetches a page of models using the provided client and pagination tokens. + + This function queries the client to retrieve a page of models based on the given + page_size and page_token. It then processes the response and returns an iterable + object to traverse through the models. + + Args: + page_size (int): The number of models to fetch per page. + page_token (str): Token representing the current page. + client (glm.ModelServiceClient): The client to communicate with the model service. + + Returns: + ModelsIterable: An iterable object containing the fetched models and pagination info. + """ result = client.list_models(page_size=page_size, page_token=page_token) result = result._response result = type(result).to_dict(result) From 1490bd9e3423f6859259aad999294cf47bc73d17 Mon Sep 17 00:00:00 2001 From: rajveer43 Date: Mon, 28 Aug 2023 20:48:04 +0530 Subject: [PATCH 2/7] more --- google/generativeai/discuss.py | 63 ++++++++++++++++++++++++++++++++++ google/generativeai/text.py | 55 +++++++++++++++++++++++++++++ 2 files changed, 118 insertions(+) diff --git a/google/generativeai/discuss.py b/google/generativeai/discuss.py index 60bcbba95..49c8a3d5b 100644 --- a/google/generativeai/discuss.py +++ b/google/generativeai/discuss.py @@ -30,6 +30,7 @@ def _make_message(content: discuss_types.MessageOptions) -> glm.Message: + """Creates a glm.Message object from the provided content.""" if isinstance(content, glm.Message): return content if isinstance(content, str): @@ -39,6 +40,20 @@ def _make_message(content: discuss_types.MessageOptions) -> glm.Message: def _make_messages(messages: discuss_types.MessagesOptions) -> List[glm.Message]: + """ + Creates a list of glm.Message objects from the provided messages. + + This function takes a variety of message content inputs, such as strings, dictionaries, + or glm.Message objects, and creates a list of glm.Message objects. It ensures that + the authors of the messages alternate appropriately. If authors are not provided, + default authors are assigned based on their position in the list. + + Args: + messages (discuss_types.MessagesOptions): The messages to convert. + + Returns: + List[glm.Message]: A list of glm.Message objects with alternating authors if needed. + """ if isinstance(messages, (str, dict, glm.Message)): messages = [_make_message(messages)] else: @@ -71,6 +86,7 @@ def _make_messages(messages: discuss_types.MessagesOptions) -> List[glm.Message] def _make_example(item: discuss_types.ExampleOptions) -> glm.Example: + """Creates a glm.Example object from the provided item.""" if isinstance(item, glm.Example): return item @@ -91,6 +107,19 @@ def _make_example(item: discuss_types.ExampleOptions) -> glm.Example: def _make_examples_from_flat( examples: List[discuss_types.MessageOptions], ) -> List[glm.Example]: + """ + Creates a list of glm.Example objects from a list of message options. + + This function takes a list of message options and pairs them into glm.Example objects. + The input examples must be in pairs to create valid examples. It raises a ValueError + if the provided list of examples is not of even length. + + Args: + examples (List[discuss_types.MessageOptions]): The list of message options. + + Returns: + List[glm.Example]: A list of glm.Example objects created from the provided message pairs. + """ if len(examples) % 2 != 0: raise ValueError( textwrap.dedent( @@ -116,6 +145,19 @@ def _make_examples_from_flat( def _make_examples(examples: discuss_types.ExamplesOptions) -> List[glm.Example]: + """ + Creates a list of glm.Example objects from the provided examples. + + This function takes various types of example content inputs and creates a list + of glm.Example objects. It handles the conversion of different input types and ensures + the appropriate structure for creating valid examples. + + Args: + examples (discuss_types.ExamplesOptions): The examples to convert. + + Returns: + List[glm.Example]: A list of glm.Example objects created from the provided examples. + """ if isinstance(examples, glm.Example): return [examples] @@ -155,6 +197,24 @@ def _make_message_prompt_dict( examples: discuss_types.ExamplesOptions | None = None, messages: discuss_types.MessagesOptions | None = None, ) -> glm.MessagePrompt: + """ + Creates a glm.MessagePrompt object from the provided prompt components. + + This function constructs a glm.MessagePrompt object using the provided context, examples, + or messages. It ensures the proper structure and handling of the input components. + + Args: + prompt (discuss_types.MessagePromptOptions, optional): The complete prompt components. + Defaults to None. + context (str | None, optional): The context for the prompt. Defaults to None. + examples (discuss_types.ExamplesOptions | None, optional): The examples for the prompt. + Defaults to None. + messages (discuss_types.MessagesOptions | None, optional): The messages for the prompt. + Defaults to None. + + Returns: + glm.MessagePrompt: A glm.MessagePrompt object created from the provided prompt components. + """ if prompt is None: prompt = dict( context=context, @@ -201,6 +261,7 @@ def _make_message_prompt( examples: discuss_types.ExamplesOptions | None = None, messages: discuss_types.MessagesOptions | None = None, ) -> glm.MessagePrompt: + """Creates a glm.MessagePrompt object from the provided prompt components.""" prompt = _make_message_prompt_dict( prompt=prompt, context=context, examples=examples, messages=messages ) @@ -219,6 +280,7 @@ def _make_generate_message_request( top_k: float | None = None, prompt: discuss_types.MessagePromptOptions | None = None, ) -> glm.GenerateMessageRequest: + """Creates a glm.GenerateMessageRequest object for generating messages.""" model = model_types.make_model_name(model) prompt = _make_message_prompt( @@ -236,6 +298,7 @@ def _make_generate_message_request( def set_doc(doc): + """A decorator to set the docstring of a function.""" def inner(f): f.__doc__ = doc return f diff --git a/google/generativeai/text.py b/google/generativeai/text.py index 051bfa2f9..d6855ea98 100644 --- a/google/generativeai/text.py +++ b/google/generativeai/text.py @@ -29,6 +29,18 @@ def _make_text_prompt(prompt: str | dict[str, str]) -> glm.TextPrompt: + """ + Creates a TextPrompt object based on the provided prompt input. + + Args: + prompt (str | dict[str, str]): The prompt input, either a string or a dictionary. + + Returns: + glm.TextPrompt: A TextPrompt object containing the prompt text. + + Raises: + TypeError: If the provided prompt is neither a string nor a dictionary. + """ if isinstance(prompt, str): return glm.TextPrompt(text=prompt) elif isinstance(prompt, dict): @@ -49,6 +61,34 @@ def _make_generate_text_request( safety_settings: safety_types.SafetySettingOptions | None = None, stop_sequences: str | Iterable[str] | None = None, ) -> glm.GenerateTextRequest: + """ + Creates a GenerateTextRequest object based on the provided parameters. + + This function generates a glm.GenerateTextRequest object with the specified + parameters. It prepares the input parameters and creates a request that can be + used for generating text using the chosen model. + + Args: + model (model_types.ModelNameOptions, optional): The model to use for text generation. + Defaults to DEFAULT_TEXT_MODEL. + prompt (str | None, optional): The prompt for text generation. Defaults to None. + temperature (float | None, optional): The temperature for randomness in generation. + Defaults to None. + candidate_count (int | None, optional): The number of candidates to consider. + Defaults to None. + max_output_tokens (int | None, optional): The maximum number of output tokens. + Defaults to None. + top_p (float | None, optional): The nucleus sampling probability threshold. + Defaults to None. + top_k (int | None, optional): The top-k sampling parameter. Defaults to None. + safety_settings (safety_types.SafetySettingOptions | None, optional): Safety settings + for generated text. Defaults to None. + stop_sequences (str | Iterable[str] | None, optional): Stop sequences to halt + text generation. Can be a string or iterable of strings. Defaults to None. + + Returns: + glm.GenerateTextRequest: A GenerateTextRequest object configured with the specified parameters. + """ model = model_types.make_model_name(model) prompt = _make_text_prompt(prompt=prompt) safety_settings = safety_types.normalize_safety_settings(safety_settings) @@ -155,6 +195,21 @@ def __init__(self, **kwargs): def _generate_response( request: glm.GenerateTextRequest, client: glm.TextServiceClient = None ) -> Completion: + """ + Generates a response using the provided GenerateTextRequest and client. + + This function utilizes the provided client to generate a response based on the + given GenerateTextRequest. It then processes the response and returns a Completion + object containing the generated text and associated information. + + Args: + request (glm.GenerateTextRequest): The text generation request. + client (glm.TextServiceClient, optional): The client to use for text generation. + Defaults to None, in which case the default text client is used. + + Returns: + Completion: A Completion object with the generated text and response information. + """ if client is None: client = get_default_text_client() From 7f7f4fe69605ae3e57fb9ea4e92d2baa3d2a2901 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Mon, 28 Aug 2023 10:07:45 -0700 Subject: [PATCH 3/7] Add back-ticks, remove some redundant types. --- google/generativeai/discuss.py | 53 +++++++++++++++++----------------- 1 file changed, 27 insertions(+), 26 deletions(-) diff --git a/google/generativeai/discuss.py b/google/generativeai/discuss.py index 49c8a3d5b..3936e5467 100644 --- a/google/generativeai/discuss.py +++ b/google/generativeai/discuss.py @@ -30,7 +30,7 @@ def _make_message(content: discuss_types.MessageOptions) -> glm.Message: - """Creates a glm.Message object from the provided content.""" + """Creates a `glm.Message` object from the provided content.""" if isinstance(content, glm.Message): return content if isinstance(content, str): @@ -44,15 +44,15 @@ def _make_messages(messages: discuss_types.MessagesOptions) -> List[glm.Message] Creates a list of glm.Message objects from the provided messages. This function takes a variety of message content inputs, such as strings, dictionaries, - or glm.Message objects, and creates a list of glm.Message objects. It ensures that + or `glm.Message` objects, and creates a list of `glm.Message` objects. It ensures that the authors of the messages alternate appropriately. If authors are not provided, default authors are assigned based on their position in the list. Args: - messages (discuss_types.MessagesOptions): The messages to convert. + messages: The messages to convert. Returns: - List[glm.Message]: A list of glm.Message objects with alternating authors if needed. + A list of `glm.Message` objects with alternating authors if needed. """ if isinstance(messages, (str, dict, glm.Message)): messages = [_make_message(messages)] @@ -86,7 +86,7 @@ def _make_messages(messages: discuss_types.MessagesOptions) -> List[glm.Message] def _make_example(item: discuss_types.ExampleOptions) -> glm.Example: - """Creates a glm.Example object from the provided item.""" + """Creates a `glm.Example` object from the provided item.""" if isinstance(item, glm.Example): return item @@ -108,17 +108,19 @@ def _make_examples_from_flat( examples: List[discuss_types.MessageOptions], ) -> List[glm.Example]: """ - Creates a list of glm.Example objects from a list of message options. + Creates a list of `glm.Example` objects from a list of message options. - This function takes a list of message options and pairs them into glm.Example objects. - The input examples must be in pairs to create valid examples. It raises a ValueError - if the provided list of examples is not of even length. + This function takes a list of message options and pairs them into `glm.Example` objects. + The input examples must be in pairs to create valid examples. Args: - examples (List[discuss_types.MessageOptions]): The list of message options. + examples: The list of message options. Returns: - List[glm.Example]: A list of glm.Example objects created from the provided message pairs. + A list of glm.Example objects created from the provided message pairs. + + Raises: + ValueError: If the provided list of examples is not of even length. """ if len(examples) % 2 != 0: raise ValueError( @@ -146,17 +148,17 @@ def _make_examples_from_flat( def _make_examples(examples: discuss_types.ExamplesOptions) -> List[glm.Example]: """ - Creates a list of glm.Example objects from the provided examples. + Creates a list of `glm.Example` objects from the provided examples. This function takes various types of example content inputs and creates a list - of glm.Example objects. It handles the conversion of different input types and ensures + of `glm.Example` objects. It handles the conversion of different input types and ensures the appropriate structure for creating valid examples. Args: - examples (discuss_types.ExamplesOptions): The examples to convert. + examples: The examples to convert. Returns: - List[glm.Example]: A list of glm.Example objects created from the provided examples. + A list of glm.Example objects created from the provided examples. """ if isinstance(examples, glm.Example): return [examples] @@ -198,22 +200,21 @@ def _make_message_prompt_dict( messages: discuss_types.MessagesOptions | None = None, ) -> glm.MessagePrompt: """ - Creates a glm.MessagePrompt object from the provided prompt components. + Creates a `glm.MessagePrompt` object from the provided prompt components. + + This function constructs a `glm.MessagePrompt` object using the provided `context`, `examples`, + or `messages`. It ensures the proper structure and handling of the input components. - This function constructs a glm.MessagePrompt object using the provided context, examples, - or messages. It ensures the proper structure and handling of the input components. + Either pass a `prompt` or it's component `context`, `examples`, `messages`. Args: - prompt (discuss_types.MessagePromptOptions, optional): The complete prompt components. - Defaults to None. - context (str | None, optional): The context for the prompt. Defaults to None. - examples (discuss_types.ExamplesOptions | None, optional): The examples for the prompt. - Defaults to None. - messages (discuss_types.MessagesOptions | None, optional): The messages for the prompt. - Defaults to None. + prompt: The complete prompt components. + context: The context for the prompt. + examples: The examples for the prompt. + messages: The messages for the prompt. Returns: - glm.MessagePrompt: A glm.MessagePrompt object created from the provided prompt components. + A `glm.MessagePrompt` object created from the provided prompt components. """ if prompt is None: prompt = dict( From 49e378ca37475f3e45f1a9036432b6c94bde98ee Mon Sep 17 00:00:00 2001 From: rajveer43 Date: Tue, 29 Aug 2023 21:59:01 +0530 Subject: [PATCH 4/7] update model.py 1 --- google/generativeai/models.py | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/google/generativeai/models.py b/google/generativeai/models.py index 1b78fb1c3..6b9f650dc 100644 --- a/google/generativeai/models.py +++ b/google/generativeai/models.py @@ -15,7 +15,7 @@ from __future__ import annotations import re -from typing import Optional, List +from typing import Optional, List, Iterable import google.ai.generativelanguage as glm from google.generativeai.client import get_default_model_client @@ -42,13 +42,13 @@ class ModelsIterable(model_types.ModelsIterable): if necessary based on the provided page_size and page_token. Args: - page_size (int): The number of models to fetch per page. + page_size (int): The number of `models` to fetch per page. page_token (str | None): Token representing the current page. Pass None for the first page. models (List[model_types.Model]): List of models to iterate through. client (glm.ModelServiceClient | None): An optional client for model service. Returns: - ModelsIterable: An iterable object that allows iterating through the models. + An `ModelsIterable` iterable object that allows iterating through the models. """ def __init__( self, @@ -63,25 +63,18 @@ def __init__( self._models = models self._client = client - def __iter__(self): + def __iter__(self) -> Iterable[model_types.Model]: """ Returns an iterator over the models. - - Yields: - model_types.Model: A model object from the iterable. """ while self: page = self._models yield from page self = self._next_page() - def _next_page(self): + def _next_page(self) -> ModelsIterable | None: """ Fetches the next page of models based on the page token. - - Returns: - ModelsIterable | None: The next iterable object with the next page of models, - or None if there are no more pages. """ if not self._page_token: return None @@ -90,21 +83,21 @@ def _next_page(self): ) -def _list_models(page_size, page_token, client): +def _list_models(page_size, page_token, client) -> ModelsIterable: """ Fetches a page of models using the provided client and pagination tokens. - This function queries the client to retrieve a page of models based on the given - page_size and page_token. It then processes the response and returns an iterable + This function queries the `client` to retrieve a page of models based on the given + `page_size` and `page_token`. It then processes the response and returns an iterable object to traverse through the models. Args: - page_size (int): The number of models to fetch per page. + page_size (int): How many `types.Models` to fetch per page (api call). page_token (str): Token representing the current page. - client (glm.ModelServiceClient): The client to communicate with the model service. + client (`glm.ModelServiceClient`): The client to communicate with the model service. Returns: - ModelsIterable: An iterable object containing the fetched models and pagination info. + An iterable `ModelsIterable` object containing the fetched models and pagination info. """ result = client.list_models(page_size=page_size, page_token=page_token) result = result._response From fb5368091c3b3e15dd1dd17df29cb0d87606056e Mon Sep 17 00:00:00 2001 From: rajveer43 Date: Tue, 29 Aug 2023 22:05:07 +0530 Subject: [PATCH 5/7] fomatting --- google/generativeai/discuss.py | 3 ++- google/generativeai/models.py | 17 +++++++++-------- google/generativeai/text.py | 2 +- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/google/generativeai/discuss.py b/google/generativeai/discuss.py index 3936e5467..957d6618a 100644 --- a/google/generativeai/discuss.py +++ b/google/generativeai/discuss.py @@ -111,7 +111,7 @@ def _make_examples_from_flat( Creates a list of `glm.Example` objects from a list of message options. This function takes a list of message options and pairs them into `glm.Example` objects. - The input examples must be in pairs to create valid examples. + The input examples must be in pairs to create valid examples. Args: examples: The list of message options. @@ -300,6 +300,7 @@ def _make_generate_message_request( def set_doc(doc): """A decorator to set the docstring of a function.""" + def inner(f): f.__doc__ = doc return f diff --git a/google/generativeai/models.py b/google/generativeai/models.py index 6b9f650dc..13d21f69d 100644 --- a/google/generativeai/models.py +++ b/google/generativeai/models.py @@ -39,17 +39,18 @@ class ModelsIterable(model_types.ModelsIterable): An iterable class to traverse through a list of models. This class allows you to iterate over a list of models, fetching them in pages - if necessary based on the provided page_size and page_token. + if necessary based on the provided `page_size` and `page_token`. Args: - page_size (int): The number of `models` to fetch per page. - page_token (str | None): Token representing the current page. Pass None for the first page. - models (List[model_types.Model]): List of models to iterate through. - client (glm.ModelServiceClient | None): An optional client for model service. + `page_size` (int): The number of `models` to fetch per page. + `page_token` (str | None): Token representing the current page. Pass None for the first page. + `models` (List[model_types.Model]): List of models to iterate through. + `client` (glm.ModelServiceClient | None): An optional client for model service. Returns: An `ModelsIterable` iterable object that allows iterating through the models. """ + def __init__( self, *, @@ -92,9 +93,9 @@ def _list_models(page_size, page_token, client) -> ModelsIterable: object to traverse through the models. Args: - page_size (int): How many `types.Models` to fetch per page (api call). - page_token (str): Token representing the current page. - client (`glm.ModelServiceClient`): The client to communicate with the model service. + `page_size` (int): How many `types.Models` to fetch per page (api call). + `page_token`` (str): Token representing the current page. + `client` (`glm.ModelServiceClient`): The client to communicate with the model service. Returns: An iterable `ModelsIterable` object containing the fetched models and pagination info. diff --git a/google/generativeai/text.py b/google/generativeai/text.py index d6855ea98..5cbf51017 100644 --- a/google/generativeai/text.py +++ b/google/generativeai/text.py @@ -37,7 +37,7 @@ def _make_text_prompt(prompt: str | dict[str, str]) -> glm.TextPrompt: Returns: glm.TextPrompt: A TextPrompt object containing the prompt text. - + Raises: TypeError: If the provided prompt is neither a string nor a dictionary. """ From 0ed880c97fbd8af580dfa6e1fd5c9aa608303b9f Mon Sep 17 00:00:00 2001 From: rajveer43 Date: Tue, 29 Aug 2023 22:37:30 +0530 Subject: [PATCH 6/7] update text.py --- google/generativeai/text.py | 55 ++++++++++++++++++------------------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/google/generativeai/text.py b/google/generativeai/text.py index 5cbf51017..aa186c4d1 100644 --- a/google/generativeai/text.py +++ b/google/generativeai/text.py @@ -62,32 +62,35 @@ def _make_generate_text_request( stop_sequences: str | Iterable[str] | None = None, ) -> glm.GenerateTextRequest: """ - Creates a GenerateTextRequest object based on the provided parameters. + Creates a `glm.GenerateTextRequest` object based on the provided parameters. - This function generates a glm.GenerateTextRequest object with the specified + This function generates a `glm.GenerateTextRequest` object with the specified parameters. It prepares the input parameters and creates a request that can be used for generating text using the chosen model. Args: - model (model_types.ModelNameOptions, optional): The model to use for text generation. - Defaults to DEFAULT_TEXT_MODEL. - prompt (str | None, optional): The prompt for text generation. Defaults to None. - temperature (float | None, optional): The temperature for randomness in generation. - Defaults to None. - candidate_count (int | None, optional): The number of candidates to consider. - Defaults to None. - max_output_tokens (int | None, optional): The maximum number of output tokens. - Defaults to None. - top_p (float | None, optional): The nucleus sampling probability threshold. - Defaults to None. - top_k (int | None, optional): The top-k sampling parameter. Defaults to None. - safety_settings (safety_types.SafetySettingOptions | None, optional): Safety settings - for generated text. Defaults to None. - stop_sequences (str | Iterable[str] | None, optional): Stop sequences to halt - text generation. Can be a string or iterable of strings. Defaults to None. + `model` (`model_types.ModelNameOptions`, optional): + The model to use for text generation. + `prompt` (str | None, optional): + The prompt for text generation. Defaults to None. + `temperature` (float | None, optional): + The temperature for randomness in generation. Defaults to None. + `candidate_count` (int | None, optional): + The number of candidates to consider. Defaults to None. + `max_output_tokens` (int | None, optional): + The maximum number of output tokens. Defaults to None. + `top_p` (float | None, optional): + The nucleus sampling probability threshold. Defaults to None. + `top_k` (int | None, optional): + The top-k sampling parameter. Defaults to None. + `safety_settings` (`safety_types.SafetySettingOptions` | None, optional): + Safety settings for generated text. Defaults to None. + `stop_sequences` (str | Iterable[str] | None, optional): + Stop sequences to halt text generation. + Can be a string or iterable of strings. Defaults to None. Returns: - glm.GenerateTextRequest: A GenerateTextRequest object configured with the specified parameters. + `glm.GenerateTextRequest`: A `GenerateTextRequest` object configured with the specified parameters. """ model = model_types.make_model_name(model) prompt = _make_text_prompt(prompt=prompt) @@ -196,19 +199,15 @@ def _generate_response( request: glm.GenerateTextRequest, client: glm.TextServiceClient = None ) -> Completion: """ - Generates a response using the provided GenerateTextRequest and client. - - This function utilizes the provided client to generate a response based on the - given GenerateTextRequest. It then processes the response and returns a Completion - object containing the generated text and associated information. + Generates a response using the provided `glm.GenerateTextRequest` and client. Args: - request (glm.GenerateTextRequest): The text generation request. - client (glm.TextServiceClient, optional): The client to use for text generation. - Defaults to None, in which case the default text client is used. + `request` (`glm.GenerateTextRequest`): The text generation request. + `client` (`glm.TextServiceClient`, optional): + The client to use for text generation. Defaults to None, in which case the default text client is used. Returns: - Completion: A Completion object with the generated text and response information. + `Completion`: A `Completion` object with the generated text and response information. """ if client is None: client = get_default_text_client() From 627e85169b3fb25b854ca8828f68773ff9793c12 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Wed, 30 Aug 2023 11:00:03 -0700 Subject: [PATCH 7/7] Formatting fixes --- google/generativeai/discuss.py | 18 ++++++++-------- google/generativeai/models.py | 24 +++++++++++---------- google/generativeai/text.py | 39 +++++++++++++--------------------- tests/test_models.py | 1 + 4 files changed, 38 insertions(+), 44 deletions(-) diff --git a/google/generativeai/discuss.py b/google/generativeai/discuss.py index 957d6618a..1ada73036 100644 --- a/google/generativeai/discuss.py +++ b/google/generativeai/discuss.py @@ -41,7 +41,7 @@ def _make_message(content: discuss_types.MessageOptions) -> glm.Message: def _make_messages(messages: discuss_types.MessagesOptions) -> List[glm.Message]: """ - Creates a list of glm.Message objects from the provided messages. + Creates a list of `glm.Message` objects from the provided messages. This function takes a variety of message content inputs, such as strings, dictionaries, or `glm.Message` objects, and creates a list of `glm.Message` objects. It ensures that @@ -52,7 +52,7 @@ def _make_messages(messages: discuss_types.MessagesOptions) -> List[glm.Message] messages: The messages to convert. Returns: - A list of `glm.Message` objects with alternating authors if needed. + A list of `glm.Message` objects with alternating authors. """ if isinstance(messages, (str, dict, glm.Message)): messages = [_make_message(messages)] @@ -110,14 +110,14 @@ def _make_examples_from_flat( """ Creates a list of `glm.Example` objects from a list of message options. - This function takes a list of message options and pairs them into `glm.Example` objects. - The input examples must be in pairs to create valid examples. + This function takes a list of `discuss_types.MessageOptions` and pairs them into + `glm.Example` objects. The input examples must be in pairs to create valid examples. Args: - examples: The list of message options. + examples: The list of `discuss_types.MessageOptions`. Returns: - A list of glm.Example objects created from the provided message pairs. + A list of `glm.Example objects` created by pairing up the provided messages. Raises: ValueError: If the provided list of examples is not of even length. @@ -158,7 +158,7 @@ def _make_examples(examples: discuss_types.ExamplesOptions) -> List[glm.Example] examples: The examples to convert. Returns: - A list of glm.Example objects created from the provided examples. + A list of `glm.Example` objects created from the provided examples. """ if isinstance(examples, glm.Example): return [examples] @@ -262,7 +262,7 @@ def _make_message_prompt( examples: discuss_types.ExamplesOptions | None = None, messages: discuss_types.MessagesOptions | None = None, ) -> glm.MessagePrompt: - """Creates a glm.MessagePrompt object from the provided prompt components.""" + """Creates a `glm.MessagePrompt` object from the provided prompt components.""" prompt = _make_message_prompt_dict( prompt=prompt, context=context, examples=examples, messages=messages ) @@ -281,7 +281,7 @@ def _make_generate_message_request( top_k: float | None = None, prompt: discuss_types.MessagePromptOptions | None = None, ) -> glm.GenerateMessageRequest: - """Creates a glm.GenerateMessageRequest object for generating messages.""" + """Creates a `glm.GenerateMessageRequest` object for generating messages.""" model = model_types.make_model_name(model) prompt = _make_message_prompt( diff --git a/google/generativeai/models.py b/google/generativeai/models.py index 13d21f69d..b725cc034 100644 --- a/google/generativeai/models.py +++ b/google/generativeai/models.py @@ -15,7 +15,7 @@ from __future__ import annotations import re -from typing import Optional, List, Iterable +from typing import Optional, List, Iterator import google.ai.generativelanguage as glm from google.generativeai.client import get_default_model_client @@ -42,13 +42,13 @@ class ModelsIterable(model_types.ModelsIterable): if necessary based on the provided `page_size` and `page_token`. Args: - `page_size` (int): The number of `models` to fetch per page. - `page_token` (str | None): Token representing the current page. Pass None for the first page. - `models` (List[model_types.Model]): List of models to iterate through. - `client` (glm.ModelServiceClient | None): An optional client for model service. + page_size: The number of `models` to fetch per page. + page_token: Token representing the current page. Pass `None` for the first page. + models: List of models to iterate through. + client: An optional client for the model service. Returns: - An `ModelsIterable` iterable object that allows iterating through the models. + A `ModelsIterable` iterable object that allows iterating through the models. """ def __init__( @@ -64,7 +64,7 @@ def __init__( self._models = models self._client = client - def __iter__(self) -> Iterable[model_types.Model]: + def __iter__(self) -> Iterator[model_types.Model]: """ Returns an iterator over the models. """ @@ -84,7 +84,9 @@ def _next_page(self) -> ModelsIterable | None: ) -def _list_models(page_size, page_token, client) -> ModelsIterable: +def _list_models( + page_size: int, page_token: str | None, client: glm.ModelServiceClient +) -> ModelsIterable: """ Fetches a page of models using the provided client and pagination tokens. @@ -93,9 +95,9 @@ def _list_models(page_size, page_token, client) -> ModelsIterable: object to traverse through the models. Args: - `page_size` (int): How many `types.Models` to fetch per page (api call). - `page_token`` (str): Token representing the current page. - `client` (`glm.ModelServiceClient`): The client to communicate with the model service. + page_size: How many `types.Models` to fetch per page (api call). + page_token: Token representing the current page. + client: The client to communicate with the model service. Returns: An iterable `ModelsIterable` object containing the fetched models and pagination info. diff --git a/google/generativeai/text.py b/google/generativeai/text.py index aa186c4d1..d9362ce7f 100644 --- a/google/generativeai/text.py +++ b/google/generativeai/text.py @@ -30,10 +30,10 @@ def _make_text_prompt(prompt: str | dict[str, str]) -> glm.TextPrompt: """ - Creates a TextPrompt object based on the provided prompt input. + Creates a `glm.TextPrompt` object based on the provided prompt input. Args: - prompt (str | dict[str, str]): The prompt input, either a string or a dictionary. + prompt: The prompt input, either a string or a dictionary. Returns: glm.TextPrompt: A TextPrompt object containing the prompt text. @@ -69,25 +69,16 @@ def _make_generate_text_request( used for generating text using the chosen model. Args: - `model` (`model_types.ModelNameOptions`, optional): - The model to use for text generation. - `prompt` (str | None, optional): - The prompt for text generation. Defaults to None. - `temperature` (float | None, optional): - The temperature for randomness in generation. Defaults to None. - `candidate_count` (int | None, optional): - The number of candidates to consider. Defaults to None. - `max_output_tokens` (int | None, optional): - The maximum number of output tokens. Defaults to None. - `top_p` (float | None, optional): - The nucleus sampling probability threshold. Defaults to None. - `top_k` (int | None, optional): - The top-k sampling parameter. Defaults to None. - `safety_settings` (`safety_types.SafetySettingOptions` | None, optional): - Safety settings for generated text. Defaults to None. - `stop_sequences` (str | Iterable[str] | None, optional): - Stop sequences to halt text generation. - Can be a string or iterable of strings. Defaults to None. + model: The model to use for text generation. + prompt: The prompt for text generation. Defaults to None. + temperature: The temperature for randomness in generation. Defaults to None. + candidate_count: The number of candidates to consider. Defaults to None. + max_output_tokens: The maximum number of output tokens. Defaults to None. + top_p: The nucleus sampling probability threshold. Defaults to None. + top_k: The top-k sampling parameter. Defaults to None. + safety_settings: Safety settings for generated text. Defaults to None. + stop_sequences: Stop sequences to halt text generation. Can be a string + or iterable of strings. Defaults to None. Returns: `glm.GenerateTextRequest`: A `GenerateTextRequest` object configured with the specified parameters. @@ -202,9 +193,9 @@ def _generate_response( Generates a response using the provided `glm.GenerateTextRequest` and client. Args: - `request` (`glm.GenerateTextRequest`): The text generation request. - `client` (`glm.TextServiceClient`, optional): - The client to use for text generation. Defaults to None, in which case the default text client is used. + request: The text generation request. + client: The client to use for text generation. Defaults to None, in which + case the default text client is used. Returns: `Completion`: A `Completion` object with the generated text and response information. diff --git a/tests/test_models.py b/tests/test_models.py index bfa829d3b..12e5366c4 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -17,6 +17,7 @@ from absl.testing import absltest import google.ai.generativelanguage as glm + from google.ai.generativelanguage_v1beta2.types import model from google.generativeai import models