From cd648a55781c42cf7b5558c438b56e2b9fb064cb Mon Sep 17 00:00:00 2001 From: Valeriy Burlaka Date: Tue, 10 Dec 2024 15:37:07 +0100 Subject: [PATCH 1/4] feat(vertex-ai): multimodal embeddings api: convert markdown --- .../embeddings/multimodal_example_syntax.py | 41 +++++++++++++++++++ .../embeddings/test_embeddings_examples.py | 7 ++++ 2 files changed, 48 insertions(+) create mode 100644 generative_ai/embeddings/multimodal_example_syntax.py diff --git a/generative_ai/embeddings/multimodal_example_syntax.py b/generative_ai/embeddings/multimodal_example_syntax.py new file mode 100644 index 00000000000..ee15b133851 --- /dev/null +++ b/generative_ai/embeddings/multimodal_example_syntax.py @@ -0,0 +1,41 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import vertexai + +from vertexai.vision_models import MultiModalEmbeddingResponse + +PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") + +vertexai.init(project=PROJECT_ID, location="us-central1") + + +def create_embeddings() -> MultiModalEmbeddingResponse: + # [START generativeaionvertexai_multimodal_embedding_example_syntax] + from vertexai.vision_models import MultiModalEmbeddingModel + + model = MultiModalEmbeddingModel.from_pretrained("multimodalembedding@001") + embeddings = model.get_embeddings( + contextual_text="Cars on Highway", + # image=..., + # video=..., + ) + # [END generativeaionvertexai_multimodal_embedding_example_syntax] + return embeddings + + +if __name__ == "__main__": + create_embeddings() diff --git a/generative_ai/embeddings/test_embeddings_examples.py b/generative_ai/embeddings/test_embeddings_examples.py index afa350e50db..37e57b7dedd 100644 --- a/generative_ai/embeddings/test_embeddings_examples.py +++ b/generative_ai/embeddings/test_embeddings_examples.py @@ -30,6 +30,7 @@ import generate_embeddings_with_lower_dimension import model_tuning_example import multimodal_example +import multimodal_example_syntax import multimodal_image_example import multimodal_video_example @@ -78,6 +79,12 @@ def test_generate_embeddings_with_lower_dimension() -> None: assert len(embeddings.text_embedding) == 128 +@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) +def test_create_embeddings() -> None: + embeddings = multimodal_example_syntax.create_embeddings() + assert embeddings is not None + + @backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) def test_text_embed_text() -> None: embeddings = document_retrieval_example.embed_text() From 7124e91aec33c8574c3c2f63b5268d717a2f1ea3 Mon Sep 17 00:00:00 2001 From: Valeriy Burlaka Date: Tue, 10 Dec 2024 16:04:01 +0100 Subject: [PATCH 2/4] feat(vertex-ai): text embeddings api: convert markdown --- .../embeddings/test_embeddings_examples.py | 7 ++++ .../embeddings/text_example_syntax.py | 37 +++++++++++++++++++ 2 files changed, 44 insertions(+) create mode 100644 generative_ai/embeddings/text_example_syntax.py diff --git a/generative_ai/embeddings/test_embeddings_examples.py b/generative_ai/embeddings/test_embeddings_examples.py index 37e57b7dedd..704c69e1517 100644 --- a/generative_ai/embeddings/test_embeddings_examples.py +++ b/generative_ai/embeddings/test_embeddings_examples.py @@ -33,6 +33,7 @@ import multimodal_example_syntax import multimodal_image_example import multimodal_video_example +import text_example_syntax @backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) @@ -85,6 +86,12 @@ def test_create_embeddings() -> None: assert embeddings is not None +@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) +def test_create_text_embeddings() -> None: + embeddings = text_example_syntax.create_embeddings() + assert embeddings is not None + + @backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) def test_text_embed_text() -> None: embeddings = document_retrieval_example.embed_text() diff --git a/generative_ai/embeddings/text_example_syntax.py b/generative_ai/embeddings/text_example_syntax.py new file mode 100644 index 00000000000..f95ec1c6afe --- /dev/null +++ b/generative_ai/embeddings/text_example_syntax.py @@ -0,0 +1,37 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import vertexai + +from vertexai.language_models import TextEmbedding + +PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") + +vertexai.init(project=PROJECT_ID, location="us-central1") + + +def create_embeddings() -> TextEmbedding: + # [START generativeaionvertexai_text_embedding_example_syntax] + from vertexai.language_models import TextEmbeddingModel + + model = TextEmbeddingModel.from_pretrained("text-embedding-005") + embeddings = model.get_embeddings(["Cars on a highway", "Traffic lights"]) + # [END generativeaionvertexai_text_embedding_example_syntax] + return embeddings + + +if __name__ == "__main__": + create_embeddings() From 2f9ff5d4c1b5fb08085350be920ffaeb604bb855 Mon Sep 17 00:00:00 2001 From: Valeriy Burlaka Date: Tue, 10 Dec 2024 16:43:45 +0100 Subject: [PATCH 3/4] feat(vertex-ai): inference api: convert markdown --- generative_ai/inference/example_syntax.py | 56 +++++++++++++++++++ generative_ai/inference/inference_api_test.py | 6 ++ 2 files changed, 62 insertions(+) create mode 100644 generative_ai/inference/example_syntax.py diff --git a/generative_ai/inference/example_syntax.py b/generative_ai/inference/example_syntax.py new file mode 100644 index 00000000000..1bab09918c3 --- /dev/null +++ b/generative_ai/inference/example_syntax.py @@ -0,0 +1,56 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import vertexai + +from vertexai.language_models import TextEmbedding + +PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") + +vertexai.init(project=PROJECT_ID, location="us-central1") + + +def generate_response() -> TextEmbedding: + # [START generativeaionvertexai_example_syntax] + from vertexai.generative_models import GenerationConfig, GenerativeModel + + gemini_model = GenerativeModel("gemini-1.5-flash-002") + generation_config = GenerationConfig( + # See https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/content-generation-parameters + ) + safety_settings = { + # See https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters + } + model_response = gemini_model.generate_content( + "...prompt content...", + generation_config=generation_config, + safety_settings=safety_settings, + ) + # [END generativeaionvertexai_example_syntax] + # [START generativeaionvertexai_example_syntax_streaming] + model_response = gemini_model.generate_content( + "...prompt content...", + generation_config=generation_config, + safety_settings=safety_settings, + stream=True, + ) + # [END generativeaionvertexai_example_syntax_streaming] + + return model_response + + +if __name__ == "__main__": + generate_response() diff --git a/generative_ai/inference/inference_api_test.py b/generative_ai/inference/inference_api_test.py index b3c1f238a9d..1a2c3bc4ff9 100644 --- a/generative_ai/inference/inference_api_test.py +++ b/generative_ai/inference/inference_api_test.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import example_syntax import non_stream_multimodality_basic import non_stream_text_basic import stream_multimodality_basic @@ -36,3 +37,8 @@ def test_stream_text_basic() -> None: def test_stream_multi_modality_basic() -> None: responses = stream_multimodality_basic.generate_content() assert responses + + +def test_generate_response() -> None: + response = example_syntax.generate_response() + assert response is not None From 24d53c6ee1309e0ded3df150069f9e92b867aabc Mon Sep 17 00:00:00 2001 From: Valeriy Burlaka Date: Tue, 10 Dec 2024 16:44:29 +0100 Subject: [PATCH 4/4] chore: rename the test file for inference api according to devrel standard --- .../inference/{inference_api_test.py => test_inference.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename generative_ai/inference/{inference_api_test.py => test_inference.py} (100%) diff --git a/generative_ai/inference/inference_api_test.py b/generative_ai/inference/test_inference.py similarity index 100% rename from generative_ai/inference/inference_api_test.py rename to generative_ai/inference/test_inference.py