Thanks to visit codestin.com
Credit goes to github.com

Skip to content

add thin client for azure #15

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 2 commits into
base: v1
Choose a base branch
from
Draft

add thin client for azure #15

wants to merge 2 commits into from

Conversation

kristapratico
Copy link
Collaborator

@kristapratico kristapratico commented Oct 9, 2023

Usage

Chat completions

import os
from openai.azure import AzureOpenAIClient
from azure.identity import DefaultAzureCredential

endpoint = os.environ["AZURE_OPENAI_ENDPOINT"]
deployment_id = "gpt-35-turbo-16k"

client = AzureOpenAIClient(
    credential=DefaultAzureCredential(),
    base_url=f"{endpoint}/openai/deployments/{deployment_id}/",
)

completion = client.chat.completions.create(
    model=deployment_id,
    messages=[
        {
            "role": "user",
            "content": "How do I output all files in a directory using Python?",
        },
    ]
)
response = completion.model_dump_json()  # Access response as dict
print(completion["choices"][0]["message"]["content"])

Chat completion extensions

import os
from openai.azure import AzureOpenAIClient
from azure.identity import DefaultAzureCredential

endpoint = os.environ["AZURE_OPENAI_ENDPOINT"]
deployment_id = "gpt-35-turbo-16k"

client = AzureOpenAIClient(
    credential=DefaultAzureCredential(),
    base_url=f"{endpoint}/openai/deployments/{deployment_id}/extensions",
)

completion = client.chat.completions.create(
    model=deployment_id,
    messages=[
        {
            "role": "user",
            "content": "How is Azure machine learning different than Azure OpenAI?",
        },
    ],
    extra_body={
        "dataSources": [
            {
                "type": "AzureCognitiveSearch",
                "parameters": {
                    "endpoint": os.environ["AZURE_SEARCH_ENDPOINT"],
                    "key": os.environ["AZURE_SEARCH_KEY"],
                    "indexName": os.environ["AZURE_SEARCH_INDEX"]
                }
            }
        ]
    }
)
# Access response as dict (example above) or access by attribute with Azure-only properties stored in `model_extra`
assert completion.id
assert completion.object == "extensions.chat.completion"
assert completion.created
assert completion.model
assert len(completion.choices) == 1
assert completion.choices[0].finish_reason
assert completion.choices[0].index is not None
assert completion.choices[0].message.content
assert completion.choices[0].message.role

# model_extra for context
assert completion.choices[0].message.model_extra["context"]["messages"][0]["role"] == "tool"
assert completion.choices[0].message.model_extra["context"]["messages"][0]["content"]

Completions

import os
from openai.azure import AzureOpenAIClient

endpoint = os.environ["AZURE_OPENAI_ENDPOINT"]
api_key = os.environ["AZURE_OPENAI_KEY"]
deployment_id = "text-davinci-003"

client = AzureOpenAIClient(
    api_key=api_key,
    base_url=f"{endpoint}/openai/deployments/{deployment_id}/",
)

completion = client.completions.create(
    model=deployment_id,
    prompt="How do I bake a chocolate cake?",
)
assert completion.id
assert completion.object == "text_completion"
assert completion.created
assert completion.model
assert completion.usage.completion_tokens is not None
assert completion.usage.prompt_tokens is not None
assert completion.usage.total_tokens == completion.usage.completion_tokens + completion.usage.prompt_tokens
assert len(completion.choices) == 1
assert completion.choices[0].finish_reason
assert completion.choices[0].index is not None
assert completion.choices[0].text

# model extra for content filter results
prompt_filter_result = completion.model_extra["prompt_filter_results"][0]["content_filter_results"]
assert prompt_filter_result["hate"]["filtered"] is False
assert prompt_filter_result["hate"]["severity"] == "safe"
assert prompt_filter_result["self_harm"]["filtered"] is False
assert prompt_filter_result["self_harm"]["severity"] == "safe"
assert prompt_filter_result["sexual"]["filtered"] is False
assert prompt_filter_result["sexual"]["severity"] == "safe"
assert prompt_filter_result["violence"]["filtered"] is False
assert prompt_filter_result["violence"]["severity"] == "safe"

Embeddings

import os
from openai.azure import AzureOpenAIClient

endpoint = os.environ["AZURE_OPENAI_ENDPOINT"]
api_key = os.environ["AZURE_OPENAI_KEY"]
deployment_id = "text-embedding-ada-002"

client = AzureOpenAIClient(
    api_key=api_key,
    base_url=f"{endpoint}/openai/deployments/{deployment_id}/",
)

embeddings = client.embeddings.create(
    model=deployment_id,
    input="hello world"
)
assert embedding.object == "list"
assert embedding.model
assert embedding.usage.prompt_tokens is not None
assert embedding.usage.total_tokens is not None
assert len(embedding.data) == 1
assert embedding.data[0].object == "embedding"
assert embedding.data[0].index is not None
assert len(embedding.data[0].embedding) > 0

Audio

import os
from openai.azure import AzureOpenAIClient

endpoint = os.environ["AZURE_OPENAI_ENDPOINT"]
api_key = os.environ["AZURE_OPENAI_KEY"]
deployment_id = "whisper-1"

client = AzureOpenAIClient(
    api_key=api_key,
    base_url=f"{endpoint}/openai/deployments/{deployment_id}/",
)

audio = client.audio.transcriptions.create(
    model=deployment_id,
    file=open("hello.m4a", "rb").read()
)
assert result.text == "Hello."

Dall-e

No support.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

Successfully merging this pull request may close these issues.

1 participant