diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index 6dcd9ab15..19574ff0a 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -11,5 +11,5 @@ jobs: pull-requests: write runs-on: ubuntu-latest steps: - - uses: actions/labeler@v4 + - uses: actions/labeler@ac9175f8a1f3625fd0d4fb234536d26811351594 # v4 if: ${{ github.event.pull_request.draft == false }} diff --git a/.github/workflows/remove-issue-labels.yml b/.github/workflows/remove-issue-labels.yml index 43f43dd13..79783430d 100644 --- a/.github/workflows/remove-issue-labels.yml +++ b/.github/workflows/remove-issue-labels.yml @@ -11,7 +11,7 @@ jobs: issues: write runs-on: ubuntu-latest steps: - - uses: actions-ecosystem/action-remove-labels@v1 + - uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0 # v1 with: labels: | status:triaged diff --git a/.github/workflows/remove-pr-labels.yml b/.github/workflows/remove-pr-labels.yml index 3aed6ced7..64aeeaa64 100644 --- a/.github/workflows/remove-pr-labels.yml +++ b/.github/workflows/remove-pr-labels.yml @@ -11,7 +11,7 @@ jobs: pull-requests: write runs-on: ubuntu-latest steps: - - uses: actions-ecosystem/action-remove-labels@v1 + - uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0 # v1 with: labels: | status:awaiting review diff --git a/.github/workflows/samples.yaml b/.github/workflows/samples.yaml index 04f2611aa..8a3dc6462 100644 --- a/.github/workflows/samples.yaml +++ b/.github/workflows/samples.yaml @@ -10,11 +10,11 @@ jobs: steps: - name: Checkout Code - uses: actions/checkout@v3 + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 - name: Get Changed Files id: changed_files - uses: tj-actions/changed-files@v44 + uses: tj-actions/changed-files@2f7c5bfce28377bc069a65ba478de0a74aa0ca32 # v44 with: files: | samples/*.py @@ -49,11 +49,11 @@ jobs: steps: - name: Checkout Code - uses: actions/checkout@v3 + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 - name: Get Changed Files id: changed_files - uses: tj-actions/changed-files@v44 + uses: tj-actions/changed-files@2f7c5bfce28377bc069a65ba478de0a74aa0ca32 # v44 with: files: | samples/rest/*.sh diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index df7d6c186..1f7710e5a 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -19,7 +19,7 @@ jobs: pull-requests: write steps: - - uses: actions/stale@v5 + - uses: actions/stale@f7176fd3007623b69d27091f9b9d4ab7995f0a06 # v5 with: repo-token: ${{ secrets.GITHUB_TOKEN }} days-before-issue-stale: 14 diff --git a/.github/workflows/test_pr.yaml b/.github/workflows/test_pr.yaml index 362a53e49..35f7c8fea 100644 --- a/.github/workflows/test_pr.yaml +++ b/.github/workflows/test_pr.yaml @@ -19,8 +19,8 @@ jobs: name: Test Py3.12 runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 + - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 + - uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4 with: python-version: '3.12' - name: Run tests @@ -32,8 +32,8 @@ jobs: name: Test Py3.11 runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 + - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 + - uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4 with: python-version: '3.11' - name: Run tests @@ -45,8 +45,8 @@ jobs: name: Test Py3.10 runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 + - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 + - uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4 with: python-version: '3.10' - name: Run tests @@ -58,8 +58,8 @@ jobs: name: Test Py3.9 runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 + - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 + - uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4 with: python-version: '3.9' - name: Run tests @@ -71,8 +71,8 @@ jobs: name: pytype 3.11 runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 + - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 + - uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4 with: python-version: '3.11' - name: Run pytype @@ -86,8 +86,8 @@ jobs: name: Check format with black runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 + - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 + - uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4 with: python-version: '3.11' - name: Check format diff --git a/.github/workflows/user-input.yml b/.github/workflows/user-input.yml new file mode 100644 index 000000000..0aa0d6512 --- /dev/null +++ b/.github/workflows/user-input.yml @@ -0,0 +1,16 @@ +name: Manage awaiting user response + +on: + issue_comment: + types: [created] + pull_request_review_comment: + types: [created] + +jobs: + remove_label: + runs-on: ubuntu-latest + if: "contains(github.event.issue.labels.*.name, 'status: awaiting user response')" + steps: + - uses: actions-ecosystem/action-remove-labels@v1 + with: + labels: "status: awaiting user response" diff --git a/README.md b/README.md index f05edb77b..e31c5ffbb 100644 --- a/README.md +++ b/README.md @@ -1,51 +1,26 @@ -# Google AI Python SDK for the Gemini API +# [Deprecated] Google AI Python SDK for the Gemini API -[![PyPI version](https://badge.fury.io/py/google-generativeai.svg)](https://badge.fury.io/py/google-generativeai) -![Python support](https://img.shields.io/pypi/pyversions/google-generativeai) -![PyPI - Downloads](https://img.shields.io/pypi/dd/google-generativeai) +With Gemini 2.0, we took the chance to create a single unified SDK for all developers who want to use Google's GenAI models (Gemini, Veo, Imagen, etc). As part of that process, we took all of the feedback from this SDK and what developers like about other SDKs in the ecosystem to create the [Google Gen AI SDK](https://github.com/googleapis/python-genai). -The Google AI Python SDK is the easiest way for Python developers to build with the Gemini API. The Gemini API gives you access to Gemini [models](https://ai.google.dev/models/gemini) created by [Google DeepMind](https://deepmind.google/technologies/gemini/#introduction). Gemini models are built from the ground up to be multimodal, so you can reason seamlessly across text, images, and code. +The full migration guide from the old SDK to new SDK is available in the [Gemini API docs](https://ai.google.dev/gemini-api/docs/migrate). -## Get started with the Gemini API -1. Go to [Google AI Studio](https://aistudio.google.com/). -2. Login with your Google account. -3. [Create](https://aistudio.google.com/app/apikey) an API key. -4. Try a Python SDK [quickstart](https://github.com/google-gemini/gemini-api-cookbook/blob/main/quickstarts/Prompting.ipynb) in the [Gemini API Cookbook](https://github.com/google-gemini/gemini-api-cookbook/). -5. For detailed instructions, try the -[Python SDK tutorial](https://ai.google.dev/tutorials/python_quickstart) on [ai.google.dev](https://ai.google.dev). +The Gemini API docs are fully updated to show examples of the new Google Gen AI SDK. We know how disruptive an SDK change can be and don't take this change lightly, but our goal is to create an extremely simple and clear path for developers to build with our models so it felt necessary to make this change. -## Usage example -See the [Gemini API Cookbook](https://github.com/google-gemini/gemini-api-cookbook/) or [ai.google.dev](https://ai.google.dev) for complete code. +Thank you for building with Gemini and [let us know](https://discuss.ai.google.dev/c/gemini-api/4) if you need any help! -1. Install from [PyPI](https://pypi.org/project/google-generativeai). +**Please be advised that this repository is now considered legacy.** For the latest features, performance improvements, and active development, we strongly recommend migrating to the official **[Google Generative AI SDK for Python](https://github.com/googleapis/python-genai)**. -`pip install -U google-generativeai` +**Support Plan for this Repository:** -2. Import the SDK and configure your API key. +* **Limited Maintenance:** Development is now restricted to **critical bug fixes only**. No new features will be added. +* **Purpose:** This limited support aims to provide stability for users while they transition to the new SDK. +* **End-of-Life Date:** All support for this repository (including bug fixes) will permanently end on **August 31st, 2025**. -```python -import google.generativeai as genai -import os +We encourage all users to begin planning their migration to the [Google Generative AI SDK](https://github.com/googleapis/python-genai) to ensure continued access to the latest capabilities and support. -genai.configure(api_key=os.environ["GEMINI_API_KEY"]) -``` - -3. Create a model and run a prompt. - -```python -model = genai.GenerativeModel('gemini-1.5-flash') -response = model.generate_content("The opposite of hot is") -print(response.text) -``` - -## Documentation - -See the [Gemini API Cookbook](https://github.com/google-gemini/gemini-api-cookbook/) or [ai.google.dev](https://ai.google.dev) for complete documentation. - -## Contributing - -See [Contributing](https://github.com/google/generative-ai-python/blob/main/CONTRIBUTING.md) for more information on contributing to the Google AI Python SDK. - -## License - -The contents of this repository are licensed under the [Apache License, version 2.0](http://www.apache.org/licenses/LICENSE-2.0). + diff --git a/docs/api/google/generativeai/protos/GenerationConfig.md b/docs/api/google/generativeai/protos/GenerationConfig.md index 87d1a63f8..5a068c968 100644 --- a/docs/api/google/generativeai/protos/GenerationConfig.md +++ b/docs/api/google/generativeai/protos/GenerationConfig.md @@ -242,7 +242,7 @@ been seen in the respponse so far. A positive penalty will discourage the use of tokens that have already been used, proportional to the number of times the token has been used: The more a token is used, the more -dificult it is for the model to use that token again +difficult it is for the model to use that token again increasing the vocabulary of responses. Caution: A *negative* penalty will encourage the model to diff --git a/google/generativeai/client.py b/google/generativeai/client.py index c9c5c8c5b..113592594 100644 --- a/google/generativeai/client.py +++ b/google/generativeai/client.py @@ -185,12 +185,12 @@ def configure( "Invalid configuration: Please set either `api_key` or `client_options['api_key']`, but not both." ) else: - if api_key is None: + if not api_key: # If no key is provided explicitly, attempt to load one from the # environment. api_key = os.getenv("GEMINI_API_KEY") - if api_key is None: + if not api_key: # If the GEMINI_API_KEY doesn't exist, attempt to load the # GOOGLE_API_KEY from the environment. api_key = os.getenv("GOOGLE_API_KEY") diff --git a/google/generativeai/notebook/command_utils.py b/google/generativeai/notebook/command_utils.py index 355592c21..f4432c0c2 100644 --- a/google/generativeai/notebook/command_utils.py +++ b/google/generativeai/notebook/command_utils.py @@ -106,7 +106,7 @@ def create_llm_function( def _convert_simple_compare_fn( - name_and_simple_fn: tuple[str, Callable[[str, str], Any]] + name_and_simple_fn: tuple[str, Callable[[str, str], Any]], ) -> tuple[str, llm_function.CompareFn]: simple_fn = name_and_simple_fn[1] new_fn = lambda x, y: simple_fn(x.result_value(), y.result_value()) diff --git a/google/generativeai/notebook/lib/llm_function.py b/google/generativeai/notebook/lib/llm_function.py index c3eb7b52d..c4f379828 100644 --- a/google/generativeai/notebook/lib/llm_function.py +++ b/google/generativeai/notebook/lib/llm_function.py @@ -64,7 +64,7 @@ def _convert_compare_fn_to_batch_add_fn( llmfn_output_row.LLMFnOutputRowView, ], Any, - ] + ], ) -> llmfn_post_process.LLMCompareFnPostProcessBatchAddFn: """Vectorize a single-row-based comparison function.""" @@ -74,7 +74,7 @@ def _fn( llmfn_output_row.LLMFnOutputRowView, llmfn_output_row.LLMFnOutputRowView, ] - ] + ], ) -> Sequence[Any]: return [fn(lhs, rhs) for lhs, rhs in lhs_and_rhs_rows] diff --git a/google/generativeai/responder.py b/google/generativeai/responder.py index dd388c6a6..70a58cf5b 100644 --- a/google/generativeai/responder.py +++ b/google/generativeai/responder.py @@ -23,6 +23,7 @@ import pydantic from google.generativeai import protos +from google.generativeai.types import content_types Type = protos.Type @@ -89,52 +90,36 @@ def _generate_schema( """ if descriptions is None: descriptions = {} - if required is None: - required = [] defaults = dict(inspect.signature(f).parameters) - fields_dict = { - name: ( - # 1. We infer the argument type here: use Any rather than None so - # it will not try to auto-infer the type based on the default value. - (param.annotation if param.annotation != inspect.Parameter.empty else Any), - pydantic.Field( - # 2. We do not support default values for now. - # default=( - # param.default if param.default != inspect.Parameter.empty - # else None - # ), - # 3. We support user-provided descriptions. - description=descriptions.get(name, None), - ), - ) - for name, param in defaults.items() - # We do not support *args or **kwargs - if param.kind - in ( + + fields_dict = {} + for name, param in defaults.items(): + if param.kind in ( inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY, inspect.Parameter.POSITIONAL_ONLY, - ) - } - parameters = pydantic.create_model(f.__name__, **fields_dict).model_json_schema() - # Postprocessing - # 4. Suppress unnecessary title generation: - # * https://github.com/pydantic/pydantic/issues/1051 - # * http://cl/586221780 - parameters.pop("title", None) - for name, function_arg in parameters.get("properties", {}).items(): - function_arg.pop("title", None) - annotation = defaults[name].annotation - # 5. Nullable fields: - # * https://github.com/pydantic/pydantic/issues/1270 - # * https://stackoverflow.com/a/58841311 - # * https://github.com/pydantic/pydantic/discussions/4872 - if typing.get_origin(annotation) is typing.Union and type(None) in typing.get_args( - annotation ): - function_arg["nullable"] = True + # We do not support default values for now. + # default=( + # param.default if param.default != inspect.Parameter.empty + # else None + # ), + field = pydantic.Field( + # We support user-provided descriptions. + description=descriptions.get(name, None) + ) + + # 1. We infer the argument type here: use Any rather than None so + # it will not try to auto-infer the type based on the default value. + if param.annotation != inspect.Parameter.empty: + fields_dict[name] = param.annotation, field + else: + fields_dict[name] = Any, field + + parameters = _build_schema(f.__name__, fields_dict) + # 6. Annotate required fields. - if required: + if required is not None: # We use the user-provided "required" fields if specified. parameters["required"] = required else: @@ -152,10 +137,138 @@ def _generate_schema( ) ) ] - schema = dict(name=f.__name__, description=f.__doc__, parameters=parameters) + schema = dict(name=f.__name__, description=f.__doc__) + if parameters["properties"]: + schema["parameters"] = parameters + return schema +def _build_schema(fname, fields_dict): + parameters = pydantic.create_model(fname, **fields_dict).model_json_schema() + defs = parameters.pop("$defs", {}) + # flatten the defs + for name, value in defs.items(): + unpack_defs(value, defs) + unpack_defs(parameters, defs) + + # 5. Nullable fields: + # * https://github.com/pydantic/pydantic/issues/1270 + # * https://stackoverflow.com/a/58841311 + # * https://github.com/pydantic/pydantic/discussions/4872 + convert_to_nullable(parameters) + add_object_type(parameters) + # Postprocessing + # 4. Suppress unnecessary title generation: + # * https://github.com/pydantic/pydantic/issues/1051 + # * http://cl/586221780 + strip_titles(parameters) + strip_additional_properties(parameters) + return parameters + + +def unpack_defs(schema, defs): + properties = schema.get("properties", None) + if properties is None: + return + + for name, value in properties.items(): + ref_key = value.get("$ref", None) + if ref_key is not None: + ref = defs[ref_key.split("defs/")[-1]] + unpack_defs(ref, defs) + properties[name] = ref + continue + + anyof = value.get("anyOf", None) + if anyof is not None: + for i, atype in enumerate(anyof): + ref_key = atype.get("$ref", None) + if ref_key is not None: + ref = defs[ref_key.split("defs/")[-1]] + unpack_defs(ref, defs) + anyof[i] = ref + continue + + items = value.get("items", None) + if items is not None: + ref_key = items.get("$ref", None) + if ref_key is not None: + ref = defs[ref_key.split("defs/")[-1]] + unpack_defs(ref, defs) + value["items"] = ref + continue + + +def strip_titles(schema): + title = schema.pop("title", None) + + properties = schema.get("properties", None) + if properties is not None: + for name, value in properties.items(): + strip_titles(value) + + items = schema.get("items", None) + if items is not None: + strip_titles(items) + + +def strip_additional_properties(schema): + schema.pop("additionalProperties", None) + + properties = schema.get("properties", None) + if properties is not None: + for name, value in properties.items(): + strip_additional_properties(value) + + items = schema.get("items", None) + if items is not None: + strip_additional_properties(items) + + +def add_object_type(schema): + properties = schema.get("properties", None) + if properties is not None: + schema.pop("required", None) + schema["type"] = "object" + for name, value in properties.items(): + add_object_type(value) + + items = schema.get("items", None) + if items is not None: + add_object_type(items) + + +def convert_to_nullable(schema): + anyof = schema.pop("anyOf", None) + if anyof is not None: + if len(anyof) != 2: + raise ValueError( + "Invalid input: Type Unions are not supported, except for `Optional` types. " + "Please provide an `Optional` type or a non-Union type." + ) + a, b = anyof + if a == {"type": "null"}: + schema.update(b) + elif b == {"type": "null"}: + schema.update(a) + else: + raise ValueError( + "Invalid input: Type Unions are not supported, except for `Optional` types. " + "Please provide an `Optional` type or a non-Union type." + ) + schema["nullable"] = True + + properties = schema.get("properties", None) + if properties is not None: + for name, value in properties.items(): + convert_to_nullable(value) + + items = schema.get("items", None) + if items is not None: + convert_to_nullable(items) + + def _rename_schema_fields(schema: dict[str, Any]): if schema is None: return schema diff --git a/google/generativeai/types/content_types.py b/google/generativeai/types/content_types.py index f3db610e1..80f60d2b2 100644 --- a/google/generativeai/types/content_types.py +++ b/google/generativeai/types/content_types.py @@ -435,6 +435,7 @@ def _build_schema(fname, fields_dict): # * https://github.com/pydantic/pydantic/issues/1051 # * http://cl/586221780 strip_titles(parameters) + strip_additional_properties(parameters) return parameters @@ -484,6 +485,19 @@ def strip_titles(schema): strip_titles(items) +def strip_additional_properties(schema): + schema.pop("additionalProperties", None) + + properties = schema.get("properties", None) + if properties is not None: + for name, value in properties.items(): + strip_additional_properties(value) + + items = schema.get("items", None) + if items is not None: + strip_additional_properties(items) + + def add_object_type(schema): properties = schema.get("properties", None) if properties is not None: diff --git a/google/generativeai/types/generation_types.py b/google/generativeai/types/generation_types.py index 42571bbe7..5a2012e64 100644 --- a/google/generativeai/types/generation_types.py +++ b/google/generativeai/types/generation_types.py @@ -532,7 +532,7 @@ def text(self): texts.extend([f"```{outcome_result}", part.code_execution_result.output, "```"]) continue - part_type = protos.Part.pb(part).whichOneof("data") + part_type = protos.Part.pb(part).WhichOneof("data") raise ValueError(f"Could not convert `part.{part_type}` to text.") return "\n".join(texts) diff --git a/google/generativeai/types/model_types.py b/google/generativeai/types/model_types.py index ff66d6339..631e44d33 100644 --- a/google/generativeai/types/model_types.py +++ b/google/generativeai/types/model_types.py @@ -355,7 +355,10 @@ def make_model_name(name: AnyModelNameOptions): if isinstance(name, (Model, protos.Model, TunedModel, protos.TunedModel)): name = name.name # pytype: disable=attribute-error elif isinstance(name, str): - name = name + if "/" not in name: + name = "models/" + name + else: + name = name else: raise TypeError( "Invalid input type. Expected one of the following types: `str`, `Model`, or `TunedModel`." diff --git a/google/generativeai/version.py b/google/generativeai/version.py index b5271a21d..6df9e6f74 100644 --- a/google/generativeai/version.py +++ b/google/generativeai/version.py @@ -14,4 +14,4 @@ # limitations under the License. from __future__ import annotations -__version__ = "0.8.4" +__version__ = "0.8.5" diff --git a/samples/rest/cache.sh b/samples/rest/cache.sh index 218b5e4b1..8df687886 100644 --- a/samples/rest/cache.sh +++ b/samples/rest/cache.sh @@ -34,7 +34,7 @@ echo '{ "ttl": "300s" }' > request.json -curl -X POST "https://generativelanguage.googleapis.com/v1beta/cachedContents?key=$GOOGLE_API_KEY" \ +curl -X POST "https://generativelanguage.googleapis.com/v1beta/cachedContents?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -d @request.json \ > cache.json @@ -43,7 +43,7 @@ CACHE_NAME=$(cat cache.json | grep '"name":' | cut -d '"' -f 4 | head -n 1) echo "[START cache_generate_content]" # [START cache_generate_content] -curl -X POST "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash-001:generateContent?key=$GOOGLE_API_KEY" \ +curl -X POST "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash-001:generateContent?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -d '{ "contents": [ @@ -62,22 +62,22 @@ rm a11.txt request.json echo "[START cache_list]" # [START cache_list] -curl "https://generativelanguage.googleapis.com/v1beta/cachedContents?key=$GOOGLE_API_KEY" +curl "https://generativelanguage.googleapis.com/v1beta/cachedContents?key=$GEMINI_API_KEY" # [END cache_list] echo "[START cache_get]" # [START cache_get] -curl "https://generativelanguage.googleapis.com/v1beta/$CACHE_NAME?key=$GOOGLE_API_KEY" +curl "https://generativelanguage.googleapis.com/v1beta/$CACHE_NAME?key=$GEMINI_API_KEY" # [END cache_get] echo "[START cache_update]" # [START cache_update] -curl -X PATCH "https://generativelanguage.googleapis.com/v1beta/$CACHE_NAME?key=$GOOGLE_API_KEY" \ +curl -X PATCH "https://generativelanguage.googleapis.com/v1beta/$CACHE_NAME?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -d '{"ttl": "600s"}' # [END cache_update] echo "[START cache_delete]" # [START cache_delete] -curl -X DELETE "https://generativelanguage.googleapis.com/v1beta/$CACHE_NAME?key=$GOOGLE_API_KEY" -# [END cache_delete] \ No newline at end of file +curl -X DELETE "https://generativelanguage.googleapis.com/v1beta/$CACHE_NAME?key=$GEMINI_API_KEY" +# [END cache_delete] diff --git a/samples/rest/chat.sh b/samples/rest/chat.sh index 78e6f9917..0243e4152 100644 --- a/samples/rest/chat.sh +++ b/samples/rest/chat.sh @@ -5,7 +5,7 @@ MEDIA_DIR=$(realpath ${SCRIPT_DIR}/../../third_party) echo "[START chat]" # [START chat] -curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY \ +curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY \ -H 'Content-Type: application/json' \ -X POST \ -d '{ @@ -25,7 +25,7 @@ curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:ge echo "[START chat_streaming]" # [START chat_streaming] -curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?alt=sse&key=$GOOGLE_API_KEY \ +curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse&key=$GEMINI_API_KEY \ -H 'Content-Type: application/json' \ -X POST \ -d '{ @@ -53,7 +53,7 @@ else B64FLAGS="-w0" fi -curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?alt=sse&key=$GOOGLE_API_KEY \ +curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse&key=$GEMINI_API_KEY \ -H 'Content-Type: application/json' \ -X POST \ -d '{ @@ -90,4 +90,4 @@ curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:st } ] }' 2> /dev/null | grep "text" -# [END chat_streaming_with_images] \ No newline at end of file +# [END chat_streaming_with_images] diff --git a/samples/rest/code_execution.sh b/samples/rest/code_execution.sh index 44fbf679c..f134e728f 100644 --- a/samples/rest/code_execution.sh +++ b/samples/rest/code_execution.sh @@ -2,7 +2,7 @@ set -eu echo "[START code_execution_basic]" # [START code_execution_basic] -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -d ' {"tools": [{"code_execution": {}}], "contents": { @@ -16,7 +16,7 @@ curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:g echo "[START code_execution_chat]" # [START code_execution_chat] -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -d '{"tools": [{"code_execution": {}}], "contents": [ diff --git a/samples/rest/configure_model_parameters.sh b/samples/rest/configure_model_parameters.sh index bd8d9d4c6..2e6e31b35 100644 --- a/samples/rest/configure_model_parameters.sh +++ b/samples/rest/configure_model_parameters.sh @@ -2,21 +2,15 @@ set -eu echo "[START configure_model_parameters]" # [START configure_model_parameters] -curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY \ +curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY \ -H 'Content-Type: application/json' \ -X POST \ -d '{ "contents": [{ "parts":[ - {"text": "Write a story about a magic backpack."} + {"text": "Explain how AI works"} ] }], - "safetySettings": [ - { - "category": "HARM_CATEGORY_DANGEROUS_CONTENT", - "threshold": "BLOCK_ONLY_HIGH" - } - ], "generationConfig": { "stopSequences": [ "Title" diff --git a/samples/rest/controlled_generation.sh b/samples/rest/controlled_generation.sh index 533870649..352b435de 100644 --- a/samples/rest/controlled_generation.sh +++ b/samples/rest/controlled_generation.sh @@ -2,7 +2,7 @@ set -eu echo "json_controlled_generation" # [START json_controlled_generation] -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -d '{ "contents": [{ @@ -27,7 +27,7 @@ curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:g echo "json_no_schema" # [START json_no_schema] -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -d '{ "contents": [{ diff --git a/samples/rest/count_tokens.sh b/samples/rest/count_tokens.sh index e69fd6d1c..3c6be6719 100644 --- a/samples/rest/count_tokens.sh +++ b/samples/rest/count_tokens.sh @@ -19,14 +19,14 @@ fi echo "[START tokens_context_window]" # [START tokens_context_window] -curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro?key=$GOOGLE_API_KEY > model.json +curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro?key=$GEMINI_API_KEY > model.json jq .inputTokenLimit model.json jq .outputTokenLimit model.json # [END tokens_context_window] echo "[START tokens_text_only]" # [START tokens_text_only] -curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY \ +curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:countTokens?key=$GEMINI_API_KEY \ -H 'Content-Type: application/json' \ -X POST \ -d '{ @@ -40,7 +40,7 @@ curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:co echo "[START tokens_chat]" # [START tokens_chat] -curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY \ +curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:countTokens?key=$GEMINI_API_KEY \ -H 'Content-Type: application/json' \ -X POST \ -d '{ @@ -57,7 +57,7 @@ curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:co echo "[START tokens_multimodal_image_inline]" # [START tokens_multimodal_image_inline] -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:countTokens?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ -d '{ @@ -285,4 +285,4 @@ curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-lat ' > tools_output.json jq .usageMetadata.totalTokenCount tools_output.json -# [END tokens_tools] \ No newline at end of file +# [END tokens_tools] diff --git a/samples/rest/embed.sh b/samples/rest/embed.sh index 26fa11d44..49f6ddfcb 100644 --- a/samples/rest/embed.sh +++ b/samples/rest/embed.sh @@ -2,7 +2,7 @@ set -eu echo "[START embed_content]" # [START embed_content] -curl "https://generativelanguage.googleapis.com/v1beta/models/text-embedding-004:embedContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/text-embedding-004:embedContent?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -d '{"model": "models/text-embedding-004", "content": { @@ -12,7 +12,7 @@ curl "https://generativelanguage.googleapis.com/v1beta/models/text-embedding-004 echo "[START batch_embed_contents]" # [START batch_embed_contents] -curl "https://generativelanguage.googleapis.com/v1beta/models/text-embedding-004:batchEmbedContents?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/text-embedding-004:batchEmbedContents?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -d '{"requests": [{ "model": "models/text-embedding-004", @@ -29,4 +29,4 @@ curl "https://generativelanguage.googleapis.com/v1beta/models/text-embedding-004 "content": { "parts":[{ "text": "How does the brain work?"}]}, }, ]}' 2> /dev/null | grep -C 5 values -# [END batch_embed_contents] \ No newline at end of file +# [END batch_embed_contents] diff --git a/samples/rest/files.sh b/samples/rest/files.sh index 8f292c4f6..dd72db3b1 100644 --- a/samples/rest/files.sh +++ b/samples/rest/files.sh @@ -22,7 +22,7 @@ tmp_header_file=upload-header.tmp # Initial resumable request defining metadata. # The upload url is in the response headers dump them to a file. -curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ +curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \ -D upload-header.tmp \ -H "X-Goog-Upload-Protocol: resumable" \ -H "X-Goog-Upload-Command: start" \ @@ -45,7 +45,7 @@ file_uri=$(jq ".file.uri" file_info.json) echo file_uri=$file_uri # Now generate content using that file -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ -d '{ @@ -75,7 +75,7 @@ echo file_uri=$file_uri echo "[START files_delete]" # [START files_delete] -curl --request "DELETE" https://generativelanguage.googleapis.com/v1beta/files/$name?key=$GOOGLE_API_KEY +curl --request "DELETE" https://generativelanguage.googleapis.com/v1beta/files/$name?key=$GEMINI_API_KEY # [END files_delete] # [END files_create_text] @@ -90,7 +90,7 @@ tmp_header_file=upload-header.tmp # Initial resumable request defining metadata. # The upload url is in the response headers dump them to a file. -curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ +curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \ -D upload-header.tmp \ -H "X-Goog-Upload-Protocol: resumable" \ -H "X-Goog-Upload-Command: start" \ @@ -113,7 +113,7 @@ file_uri=$(jq ".file.uri" file_info.json) echo file_uri=$file_uri # Now generate content using that file -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ -d '{ @@ -143,7 +143,7 @@ tmp_header_file=upload-header.tmp # Initial resumable request defining metadata. # The upload url is in the response headers dump them to a file. -curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ +curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \ -D upload-header.tmp \ -H "X-Goog-Upload-Protocol: resumable" \ -H "X-Goog-Upload-Command: start" \ @@ -166,7 +166,7 @@ file_uri=$(jq ".file.uri" file_info.json) echo file_uri=$file_uri # Now generate content using that file -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ -d '{ @@ -191,7 +191,7 @@ DISPLAY_NAME=VIDEO_PATH # Initial resumable request defining metadata. # The upload url is in the response headers dump them to a file. -curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ +curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \ -D upload-header.tmp \ -H "X-Goog-Upload-Protocol: resumable" \ -H "X-Goog-Upload-Command: start" \ @@ -227,7 +227,7 @@ do done # Now generate content using that file -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ -d '{ @@ -252,7 +252,7 @@ tmp_header_file=upload-header.tmp # Initial resumable request defining metadata. # The upload url is in the response headers dump them to a file. -curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ +curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \ -D upload-header.tmp \ -H "X-Goog-Upload-Protocol: resumable" \ -H "X-Goog-Upload-Command: start" \ @@ -296,5 +296,5 @@ echo "[START files_list]" # [START files_list] echo "My files: " -curl "https://generativelanguage.googleapis.com/v1beta/files?key=$GOOGLE_API_KEY" -# [END files_list] \ No newline at end of file +curl "https://generativelanguage.googleapis.com/v1beta/files?key=$GEMINI_API_KEY" +# [END files_list] diff --git a/samples/rest/function_calling.sh b/samples/rest/function_calling.sh index f88641e81..a0e0fa28d 100644 --- a/samples/rest/function_calling.sh +++ b/samples/rest/function_calling.sh @@ -8,8 +8,7 @@ cat > tools.json << EOF "function_declarations": [ { "name": "enable_lights", - "description": "Turn on the lighting system.", - "parameters": { "type": "object" } + "description": "Turn on the lighting system." }, { "name": "set_light_color", @@ -29,14 +28,13 @@ cat > tools.json << EOF }, { "name": "stop_lights", - "description": "Turn off the lighting system.", - "parameters": { "type": "object" } + "description": "Turn off the lighting system." } ] } EOF -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -d @<(echo ' { @@ -45,16 +43,16 @@ curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-lat "text": "You are a helpful lighting system bot. You can turn lights on and off, and you can set the color. Do not perform any other tasks." } }, - "tools": ['$(source "$tools")'], + "tools": ['$(cat tools.json)'], "tool_config": { - "function_calling_config": {"mode": "none"} + "function_calling_config": {"mode": "auto"} }, "contents": { "role": "user", "parts": { - "text": "What can you do?" + "text": "Turn on the lights please." } } } diff --git a/samples/rest/models.sh b/samples/rest/models.sh index a03d5585b..465d627d8 100644 --- a/samples/rest/models.sh +++ b/samples/rest/models.sh @@ -2,10 +2,10 @@ set -eu echo "[START models_list]" # [START models_list] -curl https://generativelanguage.googleapis.com/v1beta/models?key=$GOOGLE_API_KEY +curl https://generativelanguage.googleapis.com/v1beta/models?key=$GEMINI_API_KEY # [END models_list] echo "[START models_get]" # [START models_get] -curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash?key=$GOOGLE_API_KEY +curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash?key=$GEMINI_API_KEY # [END models_get] diff --git a/samples/rest/safety_settings.sh b/samples/rest/safety_settings.sh index 713d25c06..a087307db 100644 --- a/samples/rest/safety_settings.sh +++ b/samples/rest/safety_settings.sh @@ -10,7 +10,7 @@ echo "[START safety_settings]" "parts":[{ "text": "'I support Martians Soccer Club and I think Jupiterians Football Club sucks! Write a ironic phrase about them.'"}]}]}' > request.json - curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ + curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ -d @request.json 2> /dev/null @@ -27,7 +27,7 @@ echo "[START safety_settings_multi]" "parts":[{ "text": "'I support Martians Soccer Club and I think Jupiterians Football Club sucks! Write a ironic phrase about them.'"}]}]}' > request.json - curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ + curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ -d @request.json 2> /dev/null diff --git a/samples/rest/system_instruction.sh b/samples/rest/system_instruction.sh index 1e4c36d6c..44f77ea04 100644 --- a/samples/rest/system_instruction.sh +++ b/samples/rest/system_instruction.sh @@ -2,7 +2,7 @@ set -eu echo "[START system_instruction]" # [START system_instruction] -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -d '{ "system_instruction": { "parts": @@ -10,4 +10,4 @@ curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:g "contents": { "parts": { "text": "Hello there"}}}' -# [END system_instruction] \ No newline at end of file +# [END system_instruction] diff --git a/samples/rest/text_generation.sh b/samples/rest/text_generation.sh index 8cfadd688..fc21e7d00 100755 --- a/samples/rest/text_generation.sh +++ b/samples/rest/text_generation.sh @@ -19,7 +19,7 @@ BASE_URL="https://generativelanguage.googleapis.com" echo "[START text_gen_text_only_prompt]" # [START text_gen_text_only_prompt] -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ -d '{ @@ -31,7 +31,7 @@ curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:g echo "[START text_gen_text_only_prompt_streaming]" # [START text_gen_text_only_prompt_streaming] -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?alt=sse&key=${GOOGLE_API_KEY}" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse&key=${GEMINI_API_KEY}" \ -H 'Content-Type: application/json' \ --no-buffer \ -d '{ "contents":[{"parts":[{"text": "Write a story about a magic backpack."}]}]}' @@ -64,7 +64,7 @@ cat > "$TEMP_JSON" << EOF } EOF -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ -d "@$TEMP_JSON" 2> /dev/null @@ -88,7 +88,7 @@ cat > "$TEMP_JSON" << EOF } EOF -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?alt=sse&key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse&key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ -d "@$TEMP_JSON" 2> /dev/null @@ -129,7 +129,7 @@ cat > "$TEMP_JSON" << EOF EOF # Make the API request using the JSON file -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ -d "@$TEMP_JSON" 2> /dev/null > response.json @@ -162,7 +162,7 @@ cat > "$TEMP_JSON" << EOF EOF # Make the API request using the JSON file -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ -d "@$TEMP_JSON" 2> /dev/null > response.json @@ -181,7 +181,7 @@ tmp_header_file=upload-header.tmp # Initial resumable request defining metadata. # The upload url is in the response headers dump them to a file. -curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ +curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \ -D upload-header.tmp \ -H "X-Goog-Upload-Protocol: resumable" \ -H "X-Goog-Upload-Command: start" \ @@ -203,7 +203,7 @@ curl "${upload_url}" \ file_uri=$(jq ".file.uri" file_info.json) echo file_uri=$file_uri -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ -d '{ @@ -231,7 +231,7 @@ tmp_header_file=upload-header.tmp # Initial resumable request defining metadata. # The upload url is in the response headers dump them to a file. -curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ +curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \ -D upload-header.tmp \ -H "X-Goog-Upload-Protocol: resumable" \ -H "X-Goog-Upload-Command: start" \ @@ -253,7 +253,7 @@ curl "${upload_url}" \ file_uri=$(jq ".file.uri" file_info.json) echo file_uri=$file_uri -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?alt=sse&key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse&key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ -d '{ @@ -277,7 +277,7 @@ DISPLAY_NAME=VIDEO # Initial resumable request defining metadata. # The upload url is in the response headers dump them to a file. -curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ +curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \ -D "${tmp_header_file}" \ -H "X-Goog-Upload-Protocol: resumable" \ -H "X-Goog-Upload-Command: start" \ @@ -314,7 +314,7 @@ do state=$(jq ".file.state" file_info.json) done -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ -d '{ @@ -340,7 +340,7 @@ DISPLAY_NAME=VIDEO_PATH # Initial resumable request defining metadata. # The upload url is in the response headers dump them to a file. -curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ +curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \ -D upload-header.tmp \ -H "X-Goog-Upload-Protocol: resumable" \ -H "X-Goog-Upload-Command: start" \ @@ -374,7 +374,7 @@ do state=$(jq ".file.state" file_info.json) done -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?alt=sse&key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse&key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ -d '{ @@ -401,7 +401,7 @@ tmp_header_file=upload-header.tmp # Initial resumable request defining metadata. # The upload url is in the response headers dump them to a file. -curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ +curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \ -D upload-header.tmp \ -H "X-Goog-Upload-Protocol: resumable" \ -H "X-Goog-Upload-Command: start" \ @@ -424,7 +424,7 @@ file_uri=$(jq ".file.uri" file_info.json) echo file_uri=$file_uri # Now generate content using that file -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ -d '{ @@ -453,7 +453,7 @@ tmp_header_file=upload-header.tmp # Initial resumable request defining metadata. # The upload url is in the response headers dump them to a file. -curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ +curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \ -D upload-header.tmp \ -H "X-Goog-Upload-Protocol: resumable" \ -H "X-Goog-Upload-Command: start" \ @@ -476,7 +476,7 @@ file_uri=$(jq ".file.uri" file_info.json) echo file_uri=$file_uri # Now generate content using that file -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?alt=sse&key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse&key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ -d '{ @@ -489,4 +489,4 @@ curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:s cat response.json echo -# [END text_gen_multimodal_pdf_streaming] \ No newline at end of file +# [END text_gen_multimodal_pdf_streaming] diff --git a/samples/rest/tuned_models.sh b/samples/rest/tuned_models.sh index 5594734f6..0e32f97a0 100644 --- a/samples/rest/tuned_models.sh +++ b/samples/rest/tuned_models.sh @@ -2,7 +2,7 @@ set -eu echo "[START tuned_models_create]" # [START tuned_models_create] -curl -X POST "https://generativelanguage.googleapis.com/v1beta/tunedModels?key=$GOOGLE_API_KEY" \ +curl -X POST "https://generativelanguage.googleapis.com/v1beta/tunedModels?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ -d ' { @@ -77,7 +77,7 @@ tuning_done=false while [[ "$tuning_done" != "true" ]]; do sleep 5 - curl -X GET "https://generativelanguage.googleapis.com/v1/${operation}?key=$GOOGLE_API_KEY" \ + curl -X GET "https://generativelanguage.googleapis.com/v1/${operation}?key=$GEMINI_API_KEY" \ -H 'Content-Type: application/json' \ 2> /dev/null > tuning_operation.json @@ -90,7 +90,7 @@ done # Or get the TunedModel and check it's state. The model is ready to use if the state is active. modelname=$(cat tunemodel.json | jq ".metadata.tunedModel" | tr -d '"') -curl -X GET https://generativelanguage.googleapis.com/v1beta/${modelname}?key=$GOOGLE_API_KEY \ +curl -X GET https://generativelanguage.googleapis.com/v1beta/${modelname}?key=$GEMINI_API_KEY \ -H 'Content-Type: application/json' > tuned_model.json cat tuned_model.json | jq ".state" @@ -99,7 +99,7 @@ cat tuned_model.json | jq ".state" echo "[START tuned_models_generate_content]" # [START tuned_models_generate_content] -curl -X POST https://generativelanguage.googleapis.com/v1beta/$modelname:generateContent?key=$GOOGLE_API_KEY \ +curl -X POST https://generativelanguage.googleapis.com/v1beta/$modelname:generateContent?key=$GEMINI_API_KEY \ -H 'Content-Type: application/json' \ -d '{ "contents": [{ @@ -112,7 +112,7 @@ curl -X POST https://generativelanguage.googleapis.com/v1beta/$modelname:generat echo "[START tuned_models_get]" # [START tuned_models_get] -curl -X GET https://generativelanguage.googleapis.com/v1beta/${modelname}?key=$GOOGLE_API_KEY \ +curl -X GET https://generativelanguage.googleapis.com/v1beta/${modelname}?key=$GEMINI_API_KEY \ -H 'Content-Type: application/json' | grep state # [END tuned_models_get] @@ -130,7 +130,7 @@ jq .tunedModels[].name < tuned_models.json page_token=$(jq .nextPageToken < tuned_models.json | tr -d '"') if [[ "$page_token" != "null"" ]]; then -curl -X GET https://generativelanguage.googleapis.com/v1beta/tunedModels?page_size=5\&page_token=${page_token}?key=$GOOGLE_API_KEY \ +curl -X GET https://generativelanguage.googleapis.com/v1beta/tunedModels?page_size=5\&page_token=${page_token}?key=$GEMINI_API_KEY \ -H "Content-Type: application/json" > tuned_models2.json jq .tunedModels[].name < tuned_models.json fi @@ -138,6 +138,6 @@ fi echo "[START tuned_models_delete]" # [START tuned_models_delete] -curl -X DELETE https://generativelanguage.googleapis.com/v1beta/${modelname}?key=$GOOGLE_API_KEY \ +curl -X DELETE https://generativelanguage.googleapis.com/v1beta/${modelname}?key=$GEMINI_API_KEY \ -H 'Content-Type: application/json' -# [END tuned_models_delete] \ No newline at end of file +# [END tuned_models_delete] diff --git a/samples/tuned_models.py b/samples/tuned_models.py index 970919115..df12903ac 100644 --- a/samples/tuned_models.py +++ b/samples/tuned_models.py @@ -22,7 +22,9 @@ class UnitTests(absltest.TestCase): - def test_tuned_models_create(self): + @classmethod + def setUpClass(cls): + # Code to run once before all tests in the class # [START tuned_models_create] import google.generativeai as genai @@ -53,7 +55,7 @@ def test_tuned_models_create(self): # You can use a tuned model here too. Set `source_model="tunedModels/..."` display_name="increment", source_model=base_model, - epoch_count=20, + epoch_count=5, batch_size=4, learning_rate=0.001, training_data=training_data, @@ -62,22 +64,24 @@ def test_tuned_models_create(self): for status in operation.wait_bar(): time.sleep(10) - result = operation.result() - print(result) + tuned_model = operation.result() + print(tuned_model) # # You can plot the loss curve with: # snapshots = pd.DataFrame(result.tuning_task.snapshots) # sns.lineplot(data=snapshots, x='epoch', y='mean_loss') - model = genai.GenerativeModel(model_name=result.name) + model = genai.GenerativeModel(model_name=tuned_model.name) result = model.generate_content("III") print(result.text) # IV # [END tuned_models_create] + cls.tuned_model_name = tuned_model_name = tuned_model.name + def test_tuned_models_generate_content(self): # [START tuned_models_generate_content] import google.generativeai as genai - model = genai.GenerativeModel(model_name="tunedModels/my-increment-model") + model = genai.GenerativeModel(model_name=self.tuned_model_name) result = model.generate_content("III") print(result.text) # "IV" # [END tuned_models_generate_content] @@ -86,7 +90,7 @@ def test_tuned_models_get(self): # [START tuned_models_get] import google.generativeai as genai - model_info = genai.get_model("tunedModels/my-increment-model") + model_info = genai.get_model(self.tuned_model_name) print(model_info) # [END tuned_models_get] @@ -100,6 +104,7 @@ def test_tuned_models_list(self): def test_tuned_models_delete(self): import time + import google.generativeai as genai base_model = "models/gemini-1.5-flash-001-tuning" training_data = samples / "increment_tuning_data.json" @@ -109,7 +114,7 @@ def test_tuned_models_delete(self): # You can use a tuned model here too. Set `source_model="tunedModels/..."` display_name="increment", source_model=base_model, - epoch_count=20, + epoch_count=5, batch_size=4, learning_rate=0.001, training_data=training_data, @@ -135,7 +140,7 @@ def test_tuned_models_permissions_create(self): # [START tuned_models_permissions_create] import google.generativeai as genai - model_info = genai.get_model("tunedModels/my-increment-model") + model_info = genai.get_model(self.tuned_model_name) # [START_EXCLUDE] for p in model_info.permissions.list(): if p.role.name != "OWNER": @@ -161,7 +166,7 @@ def test_tuned_models_permissions_list(self): # [START tuned_models_permissions_list] import google.generativeai as genai - model_info = genai.get_model("tunedModels/my-increment-model") + model_info = genai.get_model(self.tuned_model_name) # [START_EXCLUDE] for p in model_info.permissions.list(): @@ -190,7 +195,7 @@ def test_tuned_models_permissions_get(self): # [START tuned_models_permissions_get] import google.generativeai as genai - model_info = genai.get_model("tunedModels/my-increment-model") + model_info = genai.get_model(self.tuned_model_name) # [START_EXCLUDE] for p in model_info.permissions.list(): @@ -214,7 +219,7 @@ def test_tuned_models_permissions_update(self): # [START tuned_models_permissions_update] import google.generativeai as genai - model_info = genai.get_model("tunedModels/my-increment-model") + model_info = genai.get_model(self.tuned_model_name) # [START_EXCLUDE] for p in model_info.permissions.list(): @@ -235,7 +240,7 @@ def test_tuned_models_permission_delete(self): # [START tuned_models_permissions_delete] import google.generativeai as genai - model_info = genai.get_model("tunedModels/my-increment-model") + model_info = genai.get_model(self.tuned_model_name) # [START_EXCLUDE] for p in model_info.permissions.list(): if p.role.name != "OWNER": diff --git a/setup.py b/setup.py index ee7160eaf..d8ab792a4 100644 --- a/setup.py +++ b/setup.py @@ -36,10 +36,7 @@ def get_version(): version = get_version() -if version[0] == "0": - release_status = "Development Status :: 4 - Beta" -else: - release_status = "Development Status :: 5 - Production/Stable" +release_status = "Development Status :: 7 - Inactive" dependencies = [ "google-ai-generativelanguage==0.6.15", @@ -86,6 +83,7 @@ def get_version(): "Programming Language :: Python :: 3.10", # Colab "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Operating System :: OS Independent", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Typing :: Typed", diff --git a/tests/notebook/lib/test_llm_function.py b/tests/notebook/lib/test_llm_function.py index 896e49c88..008ed7a38 100644 --- a/tests/notebook/lib/test_llm_function.py +++ b/tests/notebook/lib/test_llm_function.py @@ -393,7 +393,7 @@ def _is_length_greater_than(lhs: Mapping[str, Any], rhs: Mapping[str, Any]) -> b # Batch-based comparison function for post-processing. def _sum_of_lengths( - rows: Sequence[tuple[Mapping[str, Any], Mapping[str, Any]]] + rows: Sequence[tuple[Mapping[str, Any], Mapping[str, Any]]], ) -> Sequence[int]: return [lhs["length"] + rhs["length"] for lhs, rhs in rows] diff --git a/tests/test_client.py b/tests/test_client.py index 9162c3d75..e6e4acfd4 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -29,6 +29,18 @@ def test_api_key_passed_via_client_options(self): client_opts = client._client_manager.client_config["client_options"] self.assertEqual(client_opts.api_key, "AIzA_client_opts") + @mock.patch.dict(os.environ, {"GEMINI_API_KEY": "AIzA_env"}) + def test_api_key_from_environment(self): + # Default to API key loaded from environment. + client.configure() + client_opts = client._client_manager.client_config["client_options"] + self.assertEqual(client_opts.api_key, "AIzA_env") + + # But not when a key is provided explicitly. + client.configure(api_key="AIzA_client") + client_opts = client._client_manager.client_config["client_options"] + self.assertEqual(client_opts.api_key, "AIzA_client") + @mock.patch.dict(os.environ, {"GOOGLE_API_KEY": "AIzA_env"}) def test_api_key_from_environment(self): # Default to API key loaded from environment. @@ -41,6 +53,30 @@ def test_api_key_from_environment(self): client_opts = client._client_manager.client_config["client_options"] self.assertEqual(client_opts.api_key, "AIzA_client") + @mock.patch.dict(os.environ, {"GEMINI_API_KEY": "", "GOOGLE_API_KEY": "AIzA_env"}) + def test_empty_gemini_api_key_doesnt_shadow(self): + # Default to API key loaded from environment. + client.configure() + client_opts = client._client_manager.client_config["client_options"] + self.assertEqual(client_opts.api_key, "AIzA_env") + + # But not when a key is provided explicitly. + client.configure(api_key="AIzA_client") + client_opts = client._client_manager.client_config["client_options"] + self.assertEqual(client_opts.api_key, "AIzA_client") + + @mock.patch.dict(os.environ, {"GEMINI_API_KEY": "", "GOOGLE_API_KEY": "AIzA_env"}) + def test_empty_google_api_key_doesnt_shadow(self): + # Default to API key loaded from environment. + client.configure() + client_opts = client._client_manager.client_config["client_options"] + self.assertEqual(client_opts.api_key, "AIzA_env") + + # But not when a key is provided explicitly. + client.configure(api_key="AIzA_client") + client_opts = client._client_manager.client_config["client_options"] + self.assertEqual(client_opts.api_key, "AIzA_client") + def test_api_key_cannot_be_set_twice(self): client_opts = client_options.ClientOptions(api_key="AIzA_client_opts") diff --git a/tests/test_models.py b/tests/test_models.py index c7cd1dbcd..6f10f9123 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -169,14 +169,6 @@ def test_max_temperature(self): model = models.get_base_model(name) self.assertEqual(max_temperature, model.max_temperature) - @parameterized.named_parameters( - ["simple", "mystery-bison-001"], - ["model-instance", protos.Model(name="how?-bison-001")], - ) - def test_fail_with_unscoped_model_name(self, name): - with self.assertRaises(ValueError): - model = models.get_model(name) - def test_list_models(self): # The low level lib wraps the response in an iterable, so this is a fair test. self.responses = { diff --git a/tests/test_responder.py b/tests/test_responder.py deleted file mode 100644 index d2818da8a..000000000 --- a/tests/test_responder.py +++ /dev/null @@ -1,255 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import pathlib -from typing import Any - -from absl.testing import absltest -from absl.testing import parameterized -from google.generativeai import protos -from google.generativeai import responder - - -HERE = pathlib.Path(__file__).parent -TEST_PNG_PATH = HERE / "test_img.png" -TEST_PNG_URL = "https://storage.googleapis.com/generativeai-downloads/data/test_img.png" -TEST_PNG_DATA = TEST_PNG_PATH.read_bytes() - -TEST_JPG_PATH = HERE / "test_img.jpg" -TEST_JPG_URL = "https://storage.googleapis.com/generativeai-downloads/data/test_img.jpg" -TEST_JPG_DATA = TEST_JPG_PATH.read_bytes() - - -# simple test function -def datetime(): - "Returns the current UTC date and time." - - -class UnitTests(parameterized.TestCase): - @parameterized.named_parameters( - [ - "FunctionLibrary", - responder.FunctionLibrary( - tools=protos.Tool( - function_declarations=[ - protos.FunctionDeclaration( - name="datetime", description="Returns the current UTC date and time." - ) - ] - ) - ), - ], - [ - "IterableTool-Tool", - [ - responder.Tool( - function_declarations=[ - protos.FunctionDeclaration( - name="datetime", description="Returns the current UTC date and time." - ) - ] - ) - ], - ], - [ - "IterableTool-protos.Tool", - [ - protos.Tool( - function_declarations=[ - protos.FunctionDeclaration( - name="datetime", - description="Returns the current UTC date and time.", - ) - ] - ) - ], - ], - [ - "IterableTool-ToolDict", - [ - dict( - function_declarations=[ - dict( - name="datetime", - description="Returns the current UTC date and time.", - ) - ] - ) - ], - ], - [ - "IterableTool-IterableFD", - [ - [ - protos.FunctionDeclaration( - name="datetime", - description="Returns the current UTC date and time.", - ) - ] - ], - ], - [ - "IterableTool-FD", - [ - protos.FunctionDeclaration( - name="datetime", - description="Returns the current UTC date and time.", - ) - ], - ], - [ - "Tool", - responder.Tool( - function_declarations=[ - protos.FunctionDeclaration( - name="datetime", description="Returns the current UTC date and time." - ) - ] - ), - ], - [ - "protos.Tool", - protos.Tool( - function_declarations=[ - protos.FunctionDeclaration( - name="datetime", description="Returns the current UTC date and time." - ) - ] - ), - ], - [ - "ToolDict", - dict( - function_declarations=[ - dict(name="datetime", description="Returns the current UTC date and time.") - ] - ), - ], - [ - "IterableFD-FD", - [ - responder.FunctionDeclaration( - name="datetime", description="Returns the current UTC date and time." - ) - ], - ], - [ - "IterableFD-CFD", - [ - responder.CallableFunctionDeclaration( - name="datetime", - description="Returns the current UTC date and time.", - function=datetime, - ) - ], - ], - [ - "IterableFD-dict", - [dict(name="datetime", description="Returns the current UTC date and time.")], - ], - ["IterableFD-Callable", [datetime]], - [ - "FD", - responder.FunctionDeclaration( - name="datetime", description="Returns the current UTC date and time." - ), - ], - [ - "CFD", - responder.CallableFunctionDeclaration( - name="datetime", - description="Returns the current UTC date and time.", - function=datetime, - ), - ], - [ - "protos.FD", - protos.FunctionDeclaration( - name="datetime", description="Returns the current UTC date and time." - ), - ], - ["dict", dict(name="datetime", description="Returns the current UTC date and time.")], - ["Callable", datetime], - ) - def test_to_tools(self, tools): - function_library = responder.to_function_library(tools) - if function_library is None: - raise ValueError("This shouldn't happen") - tools = function_library.to_proto() - - tools = type(tools[0]).to_dict(tools[0]) - tools["function_declarations"][0].pop("parameters", None) - - expected = dict( - function_declarations=[ - dict(name="datetime", description="Returns the current UTC date and time.") - ] - ) - - self.assertEqual(tools, expected) - - def test_two_fun_is_one_tool(self): - def a(): - pass - - def b(): - pass - - function_library = responder.to_function_library([a, b]) - if function_library is None: - raise ValueError("This shouldn't happen") - tools = function_library.to_proto() - - self.assertLen(tools, 1) - self.assertLen(tools[0].function_declarations, 2) - - @parameterized.named_parameters( - ["int", int, protos.Schema(type=protos.Type.INTEGER)], - ["float", float, protos.Schema(type=protos.Type.NUMBER)], - ["str", str, protos.Schema(type=protos.Type.STRING)], - [ - "list", - list[str], - protos.Schema( - type=protos.Type.ARRAY, - items=protos.Schema(type=protos.Type.STRING), - ), - ], - [ - "list-list-int", - list[list[int]], - protos.Schema( - type=protos.Type.ARRAY, - items=protos.Schema( - protos.Schema( - type=protos.Type.ARRAY, - items=protos.Schema(type=protos.Type.INTEGER), - ), - ), - ), - ], - ["dict", dict, protos.Schema(type=protos.Type.OBJECT)], - ["dict-str-any", dict[str, Any], protos.Schema(type=protos.Type.OBJECT)], - ) - def test_auto_schema(self, annotation, expected): - def fun(a: annotation): - pass - - cfd = responder.FunctionDeclaration.from_function(fun) - got = cfd.parameters.properties["a"] - self.assertEqual(got, expected) - - -if __name__ == "__main__": - absltest.main()