diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 34349b74..c35a470c 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -2,12 +2,15 @@ name: test on: pull_request: + paths: + - 'examples/**' + - '**/README.md' jobs: test: strategy: matrix: - python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] + python-version: ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13'] runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 diff --git a/README.md b/README.md index 14dfddae..454c1595 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,12 @@ The Ollama Python library provides the easiest way to integrate Python 3.8+ projects with [Ollama](https://github.com/ollama/ollama). +## Prerequisites + +- [Ollama](https://ollama.com/download) should be installed and running +- Pull a model to use with the library: `ollama pull ` e.g. `ollama pull llama3.2` + - See [Ollama.com](https://ollama.com/search) for more information on the models available. + ## Install ```sh @@ -11,25 +17,34 @@ pip install ollama ## Usage ```python -import ollama -response = ollama.chat(model='llama3.1', messages=[ +from ollama import chat +from ollama import ChatResponse + +response: ChatResponse = chat(model='llama3.2', messages=[ { 'role': 'user', 'content': 'Why is the sky blue?', }, ]) print(response['message']['content']) +# or access fields directly from the response object +print(response.message.content) ``` +See [_types.py](ollama/_types.py) for more information on the response types. + ## Streaming responses -Response streaming can be enabled by setting `stream=True`, modifying function calls to return a Python generator where each part is an object in the stream. +Response streaming can be enabled by setting `stream=True`. + +> [!NOTE] +> Streaming Tool/Function calling is not yet supported. ```python -import ollama +from ollama import chat -stream = ollama.chat( - model='llama3.1', +stream = chat( + model='llama3.2', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}], stream=True, ) @@ -38,6 +53,54 @@ for chunk in stream: print(chunk['message']['content'], end='', flush=True) ``` +## Custom client +A custom client can be created by instantiating `Client` or `AsyncClient` from `ollama`. + +All extra keyword arguments are passed into the [`httpx.Client`](https://www.python-httpx.org/api/#client). + +```python +from ollama import Client +client = Client( + host='http://localhost:11434', + headers={'x-some-header': 'some-value'} +) +response = client.chat(model='llama3.2', messages=[ + { + 'role': 'user', + 'content': 'Why is the sky blue?', + }, +]) +``` + +## Async client + +The `AsyncClient` class is used to make asynchronous requests. It can be configured with the same fields as the `Client` class. + +```python +import asyncio +from ollama import AsyncClient + +async def chat(): + message = {'role': 'user', 'content': 'Why is the sky blue?'} + response = await AsyncClient().chat(model='llama3.2', messages=[message]) + +asyncio.run(chat()) +``` + +Setting `stream=True` modifies functions to return a Python asynchronous generator: + +```python +import asyncio +from ollama import AsyncClient + +async def chat(): + message = {'role': 'user', 'content': 'Why is the sky blue?'} + async for part in await AsyncClient().chat(model='llama3.2', messages=[message], stream=True): + print(part['message']['content'], end='', flush=True) + +asyncio.run(chat()) +``` + ## API The Ollama Python library's API is designed around the [Ollama REST API](https://github.com/ollama/ollama/blob/main/docs/api.md) @@ -45,13 +108,13 @@ The Ollama Python library's API is designed around the [Ollama REST API](https:/ ### Chat ```python -ollama.chat(model='llama3.1', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}]) +ollama.chat(model='llama3.2', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}]) ``` ### Generate ```python -ollama.generate(model='llama3.1', prompt='Why is the sky blue?') +ollama.generate(model='llama3.2', prompt='Why is the sky blue?') ``` ### List @@ -63,14 +126,14 @@ ollama.list() ### Show ```python -ollama.show('llama3.1') +ollama.show('llama3.2') ``` ### Create ```python modelfile=''' -FROM llama3.1 +FROM llama3.2 SYSTEM You are mario from super mario bros. ''' @@ -80,83 +143,45 @@ ollama.create(model='example', modelfile=modelfile) ### Copy ```python -ollama.copy('llama3.1', 'user/llama3.1') +ollama.copy('llama3.2', 'user/llama3.2') ``` ### Delete ```python -ollama.delete('llama3.1') +ollama.delete('llama3.2') ``` ### Pull ```python -ollama.pull('llama3.1') +ollama.pull('llama3.2') ``` ### Push ```python -ollama.push('user/llama3.1') -``` - -### Embeddings - -```python -ollama.embeddings(model='llama3.1', prompt='The sky is blue because of rayleigh scattering') +ollama.push('user/llama3.2') ``` -### Ps +### Embed ```python -ollama.ps() +ollama.embed(model='llama3.2', input='The sky is blue because of rayleigh scattering') ``` -## Custom client - -A custom client can be created with the following fields: - -- `host`: The Ollama host to connect to -- `timeout`: The timeout for requests +### Embed (batch) ```python -from ollama import Client -client = Client(host='http://localhost:11434') -response = client.chat(model='llama3.1', messages=[ - { - 'role': 'user', - 'content': 'Why is the sky blue?', - }, -]) +ollama.embed(model='llama3.2', input=['The sky is blue because of rayleigh scattering', 'Grass is green because of chlorophyll']) ``` -## Async client +### Ps ```python -import asyncio -from ollama import AsyncClient - -async def chat(): - message = {'role': 'user', 'content': 'Why is the sky blue?'} - response = await AsyncClient().chat(model='llama3.1', messages=[message]) - -asyncio.run(chat()) +ollama.ps() ``` -Setting `stream=True` modifies functions to return a Python asynchronous generator: - -```python -import asyncio -from ollama import AsyncClient - -async def chat(): - message = {'role': 'user', 'content': 'Why is the sky blue?'} - async for part in await AsyncClient().chat(model='llama3.1', messages=[message], stream=True): - print(part['message']['content'], end='', flush=True) - -asyncio.run(chat()) -``` ## Errors diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 00000000..a455c602 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,57 @@ +# Running Examples + +Run the examples in this directory with: +```sh +# Run example +python3 examples/.py +``` + +### Chat - Chat with a model +- [chat.py](chat.py) +- [async-chat.py](async-chat.py) +- [chat-stream.py](chat-stream.py) - Streamed outputs +- [chat-with-history.py](chat-with-history.py) - Chat with model and maintain history of the conversation + + +### Generate - Generate text with a model +- [generate.py](generate.py) +- [async-generate.py](async-generate.py) +- [generate-stream.py](generate-stream.py) - Streamed outputs +- [fill-in-middle.py](fill-in-middle.py) - Given a prefix and suffix, fill in the middle + + +### Tools/Function Calling - Call a function with a model +- [tools.py](tools.py) - Simple example of Tools/Function Calling +- [async-tools.py](async-tools.py) + + +### Multimodal with Images - Chat with a multimodal (image chat) model +- [multimodal_chat.py](multimodal_chat.py) +- [multimodal_generate.py](multimodal_generate.py) + + +### Ollama List - List all downloaded models and their properties +- [list.py](list.py) + + +### Ollama ps - Show model status with CPU/GPU usage +- [ps.py](ps.py) + + +### Ollama Pull - Pull a model from Ollama +Requirement: `pip install tqdm` +- [pull.py](pull.py) + + +### Ollama Create - Create a model from a Modelfile +```python +python create.py +``` +- [create.py](create.py) + +See [ollama/docs/modelfile.md](https://github.com/ollama/ollama/blob/main/docs/modelfile.md) for more information on the Modelfile format. + + +### Ollama Embed - Generate embeddings with a model +- [embed.py](embed.py) + diff --git a/examples/async-chat-stream/README.md b/examples/async-chat-stream/README.md deleted file mode 100644 index 611295a6..00000000 --- a/examples/async-chat-stream/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# async-chat-stream - -This example demonstrates how to create a conversation history using an asynchronous Ollama client and the chat endpoint. The streaming response is outputted to `stdout` as well as a TTS if enabled with `--speak` and available. Supported TTS are `say` on macOS and `espeak` on Linux. diff --git a/examples/async-chat-stream/main.py b/examples/async-chat-stream/main.py deleted file mode 100644 index 65047767..00000000 --- a/examples/async-chat-stream/main.py +++ /dev/null @@ -1,59 +0,0 @@ -import shutil -import asyncio -import argparse - -import ollama - - -async def speak(speaker, content): - if speaker: - p = await asyncio.create_subprocess_exec(speaker, content) - await p.communicate() - - -async def main(): - parser = argparse.ArgumentParser() - parser.add_argument('--speak', default=False, action='store_true') - args = parser.parse_args() - - speaker = None - if not args.speak: - ... - elif say := shutil.which('say'): - speaker = say - elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): - speaker = espeak - - client = ollama.AsyncClient() - - messages = [] - - while True: - if content_in := input('>>> '): - messages.append({'role': 'user', 'content': content_in}) - - content_out = '' - message = {'role': 'assistant', 'content': ''} - async for response in await client.chat(model='mistral', messages=messages, stream=True): - if response['done']: - messages.append(message) - - content = response['message']['content'] - print(content, end='', flush=True) - - content_out += content - if content in ['.', '!', '?', '\n']: - await speak(speaker, content_out) - content_out = '' - - message['content'] += content - - if content_out: - await speak(speaker, content_out) - print() - - -try: - asyncio.run(main()) -except (KeyboardInterrupt, EOFError): - ... diff --git a/examples/async-chat.py b/examples/async-chat.py new file mode 100644 index 00000000..81a50d9a --- /dev/null +++ b/examples/async-chat.py @@ -0,0 +1,19 @@ +import asyncio +from ollama import AsyncClient + + +async def main(): + messages = [ + { + 'role': 'user', + 'content': 'Why is the sky blue?', + }, + ] + + client = AsyncClient() + response = await client.chat('llama3.2', messages=messages) + print(response['message']['content']) + + +if __name__ == '__main__': + asyncio.run(main()) diff --git a/examples/async-generate.py b/examples/async-generate.py new file mode 100644 index 00000000..0097af16 --- /dev/null +++ b/examples/async-generate.py @@ -0,0 +1,15 @@ +import asyncio +import ollama + + +async def main(): + client = ollama.AsyncClient() + response = await client.generate('llama3.2', 'Why is the sky blue?') + print(response['response']) + + +if __name__ == '__main__': + try: + asyncio.run(main()) + except KeyboardInterrupt: + print('\nGoodbye!') diff --git a/examples/async-tools.py b/examples/async-tools.py new file mode 100644 index 00000000..07b3c4a8 --- /dev/null +++ b/examples/async-tools.py @@ -0,0 +1,78 @@ +import asyncio +from ollama import ChatResponse +import ollama + + +def add_two_numbers(a: int, b: int) -> int: + """ + Add two numbers + + Args: + a (int): The first number + b (int): The second number + + Returns: + int: The sum of the two numbers + """ + return a + b + + +def subtract_two_numbers(a: int, b: int) -> int: + """ + Subtract two numbers + """ + return a - b + + +# Tools can still be manually defined and passed into chat +subtract_two_numbers_tool = { + 'type': 'function', + 'function': { + 'name': 'subtract_two_numbers', + 'description': 'Subtract two numbers', + 'parameters': { + 'type': 'object', + 'required': ['a', 'b'], + 'properties': { + 'a': {'type': 'integer', 'description': 'The first number'}, + 'b': {'type': 'integer', 'description': 'The second number'}, + }, + }, + }, +} + + +async def main(): + client = ollama.AsyncClient() + + prompt = 'What is three plus one?' + print('Prompt:', prompt) + + available_functions = { + 'add_two_numbers': add_two_numbers, + 'subtract_two_numbers': subtract_two_numbers, + } + + response: ChatResponse = await client.chat( + 'llama3.1', + messages=[{'role': 'user', 'content': prompt}], + tools=[add_two_numbers, subtract_two_numbers_tool], + ) + + if response.message.tool_calls: + # There may be multiple tool calls in the response + for tool in response.message.tool_calls: + # Ensure the function is available, and then call it + if function_to_call := available_functions.get(tool.function.name): + print('Calling function:', tool.function.name) + print('Arguments:', tool.function.arguments) + print('Function output:', function_to_call(**tool.function.arguments)) + else: + print('Function', tool.function.name, 'not found') + + +if __name__ == '__main__': + try: + asyncio.run(main()) + except KeyboardInterrupt: + print('\nGoodbye!') diff --git a/examples/chat-stream/main.py b/examples/chat-stream.py similarity index 68% rename from examples/chat-stream/main.py rename to examples/chat-stream.py index 2a573466..cccab01a 100644 --- a/examples/chat-stream/main.py +++ b/examples/chat-stream.py @@ -8,8 +8,7 @@ }, ] -for part in chat('mistral', messages=messages, stream=True): +for part in chat('llama3.2', messages=messages, stream=True): print(part['message']['content'], end='', flush=True) -# end with a newline print() diff --git a/examples/chat-with-history.py b/examples/chat-with-history.py new file mode 100644 index 00000000..e98d15f6 --- /dev/null +++ b/examples/chat-with-history.py @@ -0,0 +1,38 @@ +from ollama import chat + + +messages = [ + { + 'role': 'user', + 'content': 'Why is the sky blue?', + }, + { + 'role': 'assistant', + 'content': "The sky is blue because of the way the Earth's atmosphere scatters sunlight.", + }, + { + 'role': 'user', + 'content': 'What is the weather in Tokyo?', + }, + { + 'role': 'assistant', + 'content': 'The weather in Tokyo is typically warm and humid during the summer months, with temperatures often exceeding 30°C (86°F). The city experiences a rainy season from June to September, with heavy rainfall and occasional typhoons. Winter is mild, with temperatures rarely dropping below freezing. The city is known for its high-tech and vibrant culture, with many popular tourist attractions such as the Tokyo Tower, Senso-ji Temple, and the bustling Shibuya district.', + }, +] + +while True: + user_input = input('Chat with history: ') + response = chat( + 'llama3.2', + messages=messages + + [ + {'role': 'user', 'content': user_input}, + ], + ) + + # Add the response to the messages to maintain the history + messages.append( + {'role': 'user', 'content': user_input}, + {'role': 'assistant', 'content': response.message.content}, + ) + print(response.message.content + '\n') diff --git a/examples/chat/main.py b/examples/chat.py similarity index 75% rename from examples/chat/main.py rename to examples/chat.py index 90c5f90a..2a30f8a2 100644 --- a/examples/chat/main.py +++ b/examples/chat.py @@ -1,6 +1,5 @@ from ollama import chat - messages = [ { 'role': 'user', @@ -8,5 +7,5 @@ }, ] -response = chat('mistral', messages=messages) +response = chat('llama3.2', messages=messages) print(response['message']['content']) diff --git a/examples/create.py b/examples/create.py new file mode 100644 index 00000000..d4b5b1f3 --- /dev/null +++ b/examples/create.py @@ -0,0 +1,30 @@ +import sys + +from ollama import create + + +args = sys.argv[1:] +if len(args) == 2: + # create from local file + path = args[1] +else: + print('usage: python create.py ') + sys.exit(1) + +# TODO: update to real Modelfile values +modelfile = f""" +FROM {path} +""" +example_modelfile = """ +FROM llama3.2 +# sets the temperature to 1 [higher is more creative, lower is more coherent] +PARAMETER temperature 1 +# sets the context window size to 4096, this controls how many tokens the LLM can use as context to generate the next token +PARAMETER num_ctx 4096 + +# sets a custom system message to specify the behavior of the chat assistant +SYSTEM You are Mario from super mario bros, acting as an assistant. +""" + +for response in create(model=args[0], modelfile=modelfile, stream=True): + print(response['status']) diff --git a/examples/create/main.py b/examples/create/main.py deleted file mode 100644 index 0a1161d9..00000000 --- a/examples/create/main.py +++ /dev/null @@ -1,20 +0,0 @@ -import sys - -from ollama import create - - -args = sys.argv[1:] -if len(args) == 2: - # create from local file - path = args[1] -else: - print('usage: python main.py ') - sys.exit(1) - -# TODO: update to real Modelfile values -modelfile = f""" -FROM {path} -""" - -for response in create(model=args[0], modelfile=modelfile, stream=True): - print(response['status']) diff --git a/examples/embed.py b/examples/embed.py new file mode 100644 index 00000000..5af145ea --- /dev/null +++ b/examples/embed.py @@ -0,0 +1,4 @@ +from ollama import embed + +response = embed(model='llama3.2', input='Hello, world!') +print(response['embeddings']) diff --git a/examples/fill-in-middle/main.py b/examples/fill-in-middle.py similarity index 100% rename from examples/fill-in-middle/main.py rename to examples/fill-in-middle.py diff --git a/examples/generate-stream/main.py b/examples/generate-stream.py similarity index 51% rename from examples/generate-stream/main.py rename to examples/generate-stream.py index a24b4106..10b7dc76 100644 --- a/examples/generate-stream/main.py +++ b/examples/generate-stream.py @@ -1,5 +1,5 @@ from ollama import generate -for part in generate('mistral', 'Why is the sky blue?', stream=True): +for part in generate('llama3.2', 'Why is the sky blue?', stream=True): print(part['response'], end='', flush=True) diff --git a/examples/generate/main.py b/examples/generate.py similarity index 50% rename from examples/generate/main.py rename to examples/generate.py index e39e2950..1a2311dc 100644 --- a/examples/generate/main.py +++ b/examples/generate.py @@ -1,5 +1,5 @@ from ollama import generate -response = generate('mistral', 'Why is the sky blue?') +response = generate('llama3.2', 'Why is the sky blue?') print(response['response']) diff --git a/examples/list.py b/examples/list.py new file mode 100644 index 00000000..32d45257 --- /dev/null +++ b/examples/list.py @@ -0,0 +1,14 @@ +from ollama import list +from ollama import ListResponse + +response: ListResponse = list() + +for model in response.models: + print('Name:', model.model) + print(' Size (MB):', f'{(model.size.real / 1024 / 1024):.2f}') + if model.details: + print(' Format:', model.details.format) + print(' Family:', model.details.family) + print(' Parameter Size:', model.details.parameter_size) + print(' Quantization Level:', model.details.quantization_level) + print('\n') diff --git a/examples/multimodal-chat.py b/examples/multimodal-chat.py new file mode 100644 index 00000000..8aff9f46 --- /dev/null +++ b/examples/multimodal-chat.py @@ -0,0 +1,23 @@ +from ollama import chat +# from pathlib import Path + +# Pass in the path to the image +path = input('Please enter the path to the image: ') + +# You can also pass in base64 encoded image data +# img = base64.b64encode(Path(path).read_bytes()).decode() +# or the raw bytes +# img = Path(path).read_bytes() + +response = chat( + model='llama3.2-vision', + messages=[ + { + 'role': 'user', + 'content': 'What is in this image? Be concise.', + 'images': [path], + } + ], +) + +print(response.message.content) diff --git a/examples/multimodal/main.py b/examples/multimodal-generate.py similarity index 100% rename from examples/multimodal/main.py rename to examples/multimodal-generate.py diff --git a/examples/ps.py b/examples/ps.py new file mode 100644 index 00000000..34d5230a --- /dev/null +++ b/examples/ps.py @@ -0,0 +1,27 @@ +from ollama import ps, pull, chat +from ollama import ProcessResponse + +# Ensure at least one model is loaded +response = pull('llama3.2', stream=True) +progress_states = set() +for progress in response: + if progress.get('status') in progress_states: + continue + progress_states.add(progress.get('status')) + print(progress.get('status')) + +print('\n') + +print('Waiting for model to load... \n') +chat(model='llama3.2', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}]) + + +response: ProcessResponse = ps() +for model in response.models: + print('Model: ', model.model) + print(' Digest: ', model.digest) + print(' Expires at: ', model.expires_at) + print(' Size: ', model.size) + print(' Size vram: ', model.size_vram) + print(' Details: ', model.details) + print('\n') diff --git a/examples/ps/main.py b/examples/ps/main.py deleted file mode 100644 index 822d09aa..00000000 --- a/examples/ps/main.py +++ /dev/null @@ -1,31 +0,0 @@ -from ollama import ps, pull, chat - -response = pull('mistral', stream=True) -progress_states = set() -for progress in response: - if progress.get('status') in progress_states: - continue - progress_states.add(progress.get('status')) - print(progress.get('status')) - -print('\n') - -response = chat('mistral', messages=[{'role': 'user', 'content': 'Hello!'}]) -print(response['message']['content']) - -print('\n') - -response = ps() - -name = response['models'][0]['name'] -size = response['models'][0]['size'] -size_vram = response['models'][0]['size_vram'] - -if size == size_vram: - print(f'{name}: 100% GPU') -elif not size_vram: - print(f'{name}: 100% CPU') -else: - size_cpu = size - size_vram - cpu_percent = round(size_cpu / size * 100) - print(f'{name}: {cpu_percent}% CPU/{100 - cpu_percent}% GPU') diff --git a/examples/pull-progress/README.md b/examples/pull-progress/README.md deleted file mode 100644 index 8a44f60c..00000000 --- a/examples/pull-progress/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# pull-progress - -This example emulates `ollama pull` using the Python library and [`tqdm`](https://tqdm.github.io/). - -## Setup - -```shell -pip install -r requirements.txt -``` diff --git a/examples/pull-progress/requirements.txt b/examples/pull-progress/requirements.txt deleted file mode 100644 index ae3df91e..00000000 --- a/examples/pull-progress/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -tqdm==4.66.1 diff --git a/examples/pull-progress/main.py b/examples/pull.py similarity index 92% rename from examples/pull-progress/main.py rename to examples/pull.py index 89b2f3a1..e24f2e94 100644 --- a/examples/pull-progress/main.py +++ b/examples/pull.py @@ -3,7 +3,7 @@ current_digest, bars = '', {} -for progress in pull('mistral', stream=True): +for progress in pull('llama3.2', stream=True): digest = progress.get('digest', '') if digest != current_digest and current_digest in bars: bars[current_digest].close() diff --git a/examples/tools.py b/examples/tools.py new file mode 100644 index 00000000..6151cd96 --- /dev/null +++ b/examples/tools.py @@ -0,0 +1,66 @@ +from ollama import chat +from ollama import ChatResponse + + +def add_two_numbers(a: int, b: int) -> int: + """ + Add two numbers + + Args: + a (int): The first number + b (int): The second number + + Returns: + int: The sum of the two numbers + """ + return a + b + + +def subtract_two_numbers(a: int, b: int) -> int: + """ + Subtract two numbers + """ + return a - b + + +# Tools can still be manually defined and passed into chat +subtract_two_numbers_tool = { + 'type': 'function', + 'function': { + 'name': 'subtract_two_numbers', + 'description': 'Subtract two numbers', + 'parameters': { + 'type': 'object', + 'required': ['a', 'b'], + 'properties': { + 'a': {'type': 'integer', 'description': 'The first number'}, + 'b': {'type': 'integer', 'description': 'The second number'}, + }, + }, + }, +} + +prompt = 'What is three plus one?' +print('Prompt:', prompt) + +available_functions = { + 'add_two_numbers': add_two_numbers, + 'subtract_two_numbers': subtract_two_numbers, +} + +response: ChatResponse = chat( + 'llama3.1', + messages=[{'role': 'user', 'content': prompt}], + tools=[add_two_numbers, subtract_two_numbers_tool], +) + +if response.message.tool_calls: + # There may be multiple tool calls in the response + for tool in response.message.tool_calls: + # Ensure the function is available, and then call it + if function_to_call := available_functions.get(tool.function.name): + print('Calling function:', tool.function.name) + print('Arguments:', tool.function.arguments) + print('Function output:', function_to_call(**tool.function.arguments)) + else: + print('Function', tool.function.name, 'not found') diff --git a/examples/tools/README.md b/examples/tools/README.md deleted file mode 100644 index 85ca5dd8..00000000 --- a/examples/tools/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# tools - -This example demonstrates how to utilize tool calls with an asynchronous Ollama client and the chat endpoint. diff --git a/examples/tools/main.py b/examples/tools/main.py deleted file mode 100644 index 133b2384..00000000 --- a/examples/tools/main.py +++ /dev/null @@ -1,87 +0,0 @@ -import json -import ollama -import asyncio - - -# Simulates an API call to get flight times -# In a real application, this would fetch data from a live database or API -def get_flight_times(departure: str, arrival: str) -> str: - flights = { - 'NYC-LAX': {'departure': '08:00 AM', 'arrival': '11:30 AM', 'duration': '5h 30m'}, - 'LAX-NYC': {'departure': '02:00 PM', 'arrival': '10:30 PM', 'duration': '5h 30m'}, - 'LHR-JFK': {'departure': '10:00 AM', 'arrival': '01:00 PM', 'duration': '8h 00m'}, - 'JFK-LHR': {'departure': '09:00 PM', 'arrival': '09:00 AM', 'duration': '7h 00m'}, - 'CDG-DXB': {'departure': '11:00 AM', 'arrival': '08:00 PM', 'duration': '6h 00m'}, - 'DXB-CDG': {'departure': '03:00 AM', 'arrival': '07:30 AM', 'duration': '7h 30m'}, - } - - key = f'{departure}-{arrival}'.upper() - return json.dumps(flights.get(key, {'error': 'Flight not found'})) - - -async def run(model: str): - client = ollama.AsyncClient() - # Initialize conversation with a user query - messages = [{'role': 'user', 'content': 'What is the flight time from New York (NYC) to Los Angeles (LAX)?'}] - - # First API call: Send the query and function description to the model - response = await client.chat( - model=model, - messages=messages, - tools=[ - { - 'type': 'function', - 'function': { - 'name': 'get_flight_times', - 'description': 'Get the flight times between two cities', - 'parameters': { - 'type': 'object', - 'properties': { - 'departure': { - 'type': 'string', - 'description': 'The departure city (airport code)', - }, - 'arrival': { - 'type': 'string', - 'description': 'The arrival city (airport code)', - }, - }, - 'required': ['departure', 'arrival'], - }, - }, - }, - ], - ) - - # Add the model's response to the conversation history - messages.append(response['message']) - - # Check if the model decided to use the provided function - if not response['message'].get('tool_calls'): - print("The model didn't use the function. Its response was:") - print(response['message']['content']) - return - - # Process function calls made by the model - if response['message'].get('tool_calls'): - available_functions = { - 'get_flight_times': get_flight_times, - } - for tool in response['message']['tool_calls']: - function_to_call = available_functions[tool['function']['name']] - function_response = function_to_call(tool['function']['arguments']['departure'], tool['function']['arguments']['arrival']) - # Add function response to the conversation - messages.append( - { - 'role': 'tool', - 'content': function_response, - } - ) - - # Second API call: Get final response from the model - final_response = await client.chat(model=model, messages=messages) - print(final_response['message']['content']) - - -# Run the async function -asyncio.run(run('mistral')) diff --git a/ollama/__init__.py b/ollama/__init__.py index c452f710..23d736a8 100644 --- a/ollama/__init__.py +++ b/ollama/__init__.py @@ -1,10 +1,17 @@ from ollama._client import Client, AsyncClient from ollama._types import ( + Options, + Message, + Tool, GenerateResponse, ChatResponse, + EmbedResponse, + EmbeddingsResponse, + StatusResponse, ProgressResponse, - Message, - Options, + ListResponse, + ShowResponse, + ProcessResponse, RequestError, ResponseError, ) @@ -12,25 +19,20 @@ __all__ = [ 'Client', 'AsyncClient', + 'Options', + 'Message', + 'Tool', 'GenerateResponse', 'ChatResponse', + 'EmbedResponse', + 'EmbeddingsResponse', + 'StatusResponse', 'ProgressResponse', - 'Message', - 'Options', + 'ListResponse', + 'ShowResponse', + 'ProcessResponse', 'RequestError', 'ResponseError', - 'generate', - 'chat', - 'embed', - 'embeddings', - 'pull', - 'push', - 'create', - 'delete', - 'list', - 'copy', - 'show', - 'ps', ] _client = Client() diff --git a/ollama/_client.py b/ollama/_client.py index ec9acb90..548f3432 100644 --- a/ollama/_client.py +++ b/ollama/_client.py @@ -1,21 +1,31 @@ -import ipaddress import os import io import json -import httpx -import binascii import platform +import ipaddress import urllib.parse from os import PathLike from pathlib import Path -from copy import deepcopy from hashlib import sha256 -from base64 import b64encode, b64decode -from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal, overload +from typing import ( + Any, + Callable, + Literal, + Mapping, + Optional, + Sequence, + Type, + TypeVar, + Union, + overload, +) import sys + +from ollama._utils import convert_function_to_tool + if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: @@ -28,7 +38,38 @@ except metadata.PackageNotFoundError: __version__ = '0.0.0' -from ollama._types import Message, Options, RequestError, ResponseError, Tool +import httpx + +from ollama._types import ( + ChatRequest, + ChatResponse, + CreateRequest, + CopyRequest, + DeleteRequest, + EmbedRequest, + EmbedResponse, + EmbeddingsRequest, + EmbeddingsResponse, + GenerateRequest, + GenerateResponse, + Image, + ListResponse, + Message, + Options, + ProcessResponse, + ProgressResponse, + PullRequest, + PushRequest, + RequestError, + ResponseError, + ShowRequest, + ShowResponse, + StatusResponse, + Tool, +) + + +T = TypeVar('T') class BaseClient: @@ -38,6 +79,7 @@ def __init__( host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, + headers: Optional[Mapping[str, str]] = None, **kwargs, ) -> None: """ @@ -48,16 +90,20 @@ def __init__( `kwargs` are passed to the httpx client. """ - headers = kwargs.pop('headers', {}) - headers['Content-Type'] = 'application/json' - headers['Accept'] = 'application/json' - headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' - self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, - headers=headers, + # Lowercase all headers to ensure override + headers={ + k.lower(): v + for k, v in { + **(headers or {}), + 'Content-Type': 'application/json', + 'Accept': 'application/json', + 'User-Agent': f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}', + }.items() + }, **kwargs, ) @@ -66,37 +112,67 @@ class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) - def _request(self, method: str, url: str, **kwargs) -> httpx.Response: - response = self._client.request(method, url, **kwargs) - + def _request_raw(self, *args, **kwargs): + r = self._client.request(*args, **kwargs) try: - response.raise_for_status() + r.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None + return r - return response + @overload + def _request( + self, + cls: Type[T], + *args, + stream: Literal[False] = False, + **kwargs, + ) -> T: ... - def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: - with self._client.stream(method, url, **kwargs) as r: - try: - r.raise_for_status() - except httpx.HTTPStatusError as e: - e.response.read() - raise ResponseError(e.response.text, e.response.status_code) from None + @overload + def _request( + self, + cls: Type[T], + *args, + stream: Literal[True] = True, + **kwargs, + ) -> Iterator[T]: ... - for line in r.iter_lines(): - partial = json.loads(line) - if e := partial.get('error'): - raise ResponseError(e) - yield partial + @overload + def _request( + self, + cls: Type[T], + *args, + stream: bool = False, + **kwargs, + ) -> Union[T, Iterator[T]]: ... - def _request_stream( + def _request( self, + cls: Type[T], *args, stream: bool = False, **kwargs, - ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: - return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() + ) -> Union[T, Iterator[T]]: + if stream: + + def inner(): + with self._client.stream(*args, **kwargs) as r: + try: + r.raise_for_status() + except httpx.HTTPStatusError as e: + e.response.read() + raise ResponseError(e.response.text, e.response.status_code) from None + + for line in r.iter_lines(): + part = json.loads(line) + if err := part.get('error'): + raise ResponseError(err) + yield cls(**part) + + return inner() + + return cls(**self._request_raw(*args, **kwargs).json()) @overload def generate( @@ -104,16 +180,17 @@ def generate( model: str = '', prompt: str = '', suffix: str = '', + *, system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: Literal[False] = False, raw: bool = False, - format: Literal['', 'json'] = '', - images: Optional[Sequence[AnyStr]] = None, - options: Optional[Options] = None, + format: Optional[Literal['', 'json']] = None, + images: Optional[Sequence[Union[str, bytes]]] = None, + options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, - ) -> Mapping[str, Any]: ... + ) -> GenerateResponse: ... @overload def generate( @@ -121,32 +198,34 @@ def generate( model: str = '', prompt: str = '', suffix: str = '', + *, system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: Literal[True] = True, raw: bool = False, - format: Literal['', 'json'] = '', - images: Optional[Sequence[AnyStr]] = None, - options: Optional[Options] = None, + format: Optional[Literal['', 'json']] = None, + images: Optional[Sequence[Union[str, bytes]]] = None, + options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, - ) -> Iterator[Mapping[str, Any]]: ... + ) -> Iterator[GenerateResponse]: ... def generate( self, model: str = '', - prompt: str = '', - suffix: str = '', - system: str = '', - template: str = '', + prompt: Optional[str] = None, + suffix: Optional[str] = None, + *, + system: Optional[str] = None, + template: Optional[str] = None, context: Optional[Sequence[int]] = None, stream: bool = False, - raw: bool = False, - format: Literal['', 'json'] = '', - images: Optional[Sequence[AnyStr]] = None, - options: Optional[Options] = None, + raw: Optional[bool] = None, + format: Optional[Literal['', 'json']] = None, + images: Optional[Sequence[Union[str, bytes]]] = None, + options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, - ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: + ) -> Union[GenerateResponse, Iterator[GenerateResponse]]: """ Create a response using the requested model. @@ -157,26 +236,24 @@ def generate( Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ - if not model: - raise RequestError('must provide a model') - - return self._request_stream( + return self._request( + GenerateResponse, 'POST', '/api/generate', - json={ - 'model': model, - 'prompt': prompt, - 'suffix': suffix, - 'system': system, - 'template': template, - 'context': context or [], - 'stream': stream, - 'raw': raw, - 'images': [_encode_image(image) for image in images or []], - 'format': format, - 'options': options or {}, - 'keep_alive': keep_alive, - }, + json=GenerateRequest( + model=model, + prompt=prompt, + suffix=suffix, + system=system, + template=template, + context=context, + stream=stream, + raw=raw, + format=format, + images=[Image(value=image) for image in images] if images else None, + options=options, + keep_alive=keep_alive, + ).model_dump(exclude_none=True), stream=stream, ) @@ -184,39 +261,66 @@ def generate( def chat( self, model: str = '', - messages: Optional[Sequence[Message]] = None, - tools: Optional[Sequence[Tool]] = None, + messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, + *, + tools: Optional[Sequence[Union[Mapping[str, Any], Tool]]] = None, stream: Literal[False] = False, - format: Literal['', 'json'] = '', - options: Optional[Options] = None, + format: Optional[Literal['', 'json']] = None, + options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, - ) -> Mapping[str, Any]: ... + ) -> ChatResponse: ... @overload def chat( self, model: str = '', - messages: Optional[Sequence[Message]] = None, - tools: Optional[Sequence[Tool]] = None, + messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, + *, + tools: Optional[Sequence[Union[Mapping[str, Any], Tool]]] = None, stream: Literal[True] = True, - format: Literal['', 'json'] = '', - options: Optional[Options] = None, + format: Optional[Literal['', 'json']] = None, + options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, - ) -> Iterator[Mapping[str, Any]]: ... + ) -> Iterator[ChatResponse]: ... def chat( self, model: str = '', - messages: Optional[Sequence[Message]] = None, - tools: Optional[Sequence[Tool]] = None, + messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, + *, + tools: Optional[Sequence[Union[Mapping[str, Any], Tool, Callable]]] = None, stream: bool = False, - format: Literal['', 'json'] = '', - options: Optional[Options] = None, + format: Optional[Literal['', 'json']] = None, + options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, - ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: + ) -> Union[ChatResponse, Iterator[ChatResponse]]: """ Create a chat response using the requested model. + Args: + tools: + A JSON schema as a dict, an Ollama Tool or a Python Function. + Python functions need to follow Google style docstrings to be converted to an Ollama Tool. + For more information, see: https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings + stream: Whether to stream the response. + format: The format of the response. + + Example: + def add_two_numbers(a: int, b: int) -> int: + ''' + Add two numbers together. + + Args: + a: First number to add + b: Second number to add + + Returns: + int: The sum of a and b + ''' + return a + b + + client.chat(model='llama3.2', tools=[add_two_numbers], messages=[...]) + Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. @@ -224,106 +328,104 @@ def chat( Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ - if not model: - raise RequestError('must provide a model') - - messages = deepcopy(messages) - - for message in messages or []: - if images := message.get('images'): - message['images'] = [_encode_image(image) for image in images] - - return self._request_stream( + return self._request( + ChatResponse, 'POST', '/api/chat', - json={ - 'model': model, - 'messages': messages, - 'tools': tools or [], - 'stream': stream, - 'format': format, - 'options': options or {}, - 'keep_alive': keep_alive, - }, + json=ChatRequest( + model=model, + messages=[message for message in _copy_messages(messages)], + tools=[tool for tool in _copy_tools(tools)], + stream=stream, + format=format, + options=options, + keep_alive=keep_alive, + ).model_dump(exclude_none=True), stream=stream, ) def embed( self, model: str = '', - input: Union[str, Sequence[AnyStr]] = '', - truncate: bool = True, - options: Optional[Options] = None, + input: Union[str, Sequence[str]] = '', + truncate: Optional[bool] = None, + options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, - ) -> Mapping[str, Any]: - if not model: - raise RequestError('must provide a model') - + ) -> EmbedResponse: return self._request( + EmbedResponse, 'POST', '/api/embed', - json={ - 'model': model, - 'input': input, - 'truncate': truncate, - 'options': options or {}, - 'keep_alive': keep_alive, - }, - ).json() + json=EmbedRequest( + model=model, + input=input, + truncate=truncate, + options=options, + keep_alive=keep_alive, + ).model_dump(exclude_none=True), + ) def embeddings( self, model: str = '', - prompt: str = '', - options: Optional[Options] = None, + prompt: Optional[str] = None, + options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, - ) -> Mapping[str, Sequence[float]]: + ) -> EmbeddingsResponse: + """ + Deprecated in favor of `embed`. + """ return self._request( + EmbeddingsResponse, 'POST', '/api/embeddings', - json={ - 'model': model, - 'prompt': prompt, - 'options': options or {}, - 'keep_alive': keep_alive, - }, - ).json() + json=EmbeddingsRequest( + model=model, + prompt=prompt, + options=options, + keep_alive=keep_alive, + ).model_dump(exclude_none=True), + ) @overload def pull( self, model: str, + *, insecure: bool = False, stream: Literal[False] = False, - ) -> Mapping[str, Any]: ... + ) -> ProgressResponse: ... @overload def pull( self, model: str, + *, insecure: bool = False, stream: Literal[True] = True, - ) -> Iterator[Mapping[str, Any]]: ... + ) -> Iterator[ProgressResponse]: ... def pull( self, model: str, + *, insecure: bool = False, stream: bool = False, - ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: + ) -> Union[ProgressResponse, Iterator[ProgressResponse]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ - return self._request_stream( + return self._request( + ProgressResponse, 'POST', '/api/pull', - json={ - 'name': model, - 'insecure': insecure, - 'stream': stream, - }, + json=PullRequest( + model=model, + insecure=insecure, + stream=stream, + ).model_dump(exclude_none=True), stream=stream, ) @@ -331,37 +433,41 @@ def pull( def push( self, model: str, + *, insecure: bool = False, stream: Literal[False] = False, - ) -> Mapping[str, Any]: ... + ) -> ProgressResponse: ... @overload def push( self, model: str, + *, insecure: bool = False, stream: Literal[True] = True, - ) -> Iterator[Mapping[str, Any]]: ... + ) -> Iterator[ProgressResponse]: ... def push( self, model: str, + *, insecure: bool = False, stream: bool = False, - ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: + ) -> Union[ProgressResponse, Iterator[ProgressResponse]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ - return self._request_stream( + return self._request( + ProgressResponse, 'POST', '/api/push', - json={ - 'name': model, - 'insecure': insecure, - 'stream': stream, - }, + json=PushRequest( + model=model, + insecure=insecure, + stream=stream, + ).model_dump(exclude_none=True), stream=stream, ) @@ -371,9 +477,10 @@ def create( model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, + *, quantize: Optional[str] = None, stream: Literal[False] = False, - ) -> Mapping[str, Any]: ... + ) -> ProgressResponse: ... @overload def create( @@ -381,18 +488,20 @@ def create( model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, + *, quantize: Optional[str] = None, stream: Literal[True] = True, - ) -> Iterator[Mapping[str, Any]]: ... + ) -> Iterator[ProgressResponse]: ... def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, + *, quantize: Optional[str] = None, stream: bool = False, - ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: + ) -> Union[ProgressResponse, Iterator[ProgressResponse]]: """ Raises `ResponseError` if the request could not be fulfilled. @@ -405,15 +514,16 @@ def create( else: raise RequestError('must provide either path or modelfile') - return self._request_stream( + return self._request( + ProgressResponse, 'POST', '/api/create', - json={ - 'name': model, - 'modelfile': modelfile, - 'stream': stream, - 'quantize': quantize, - }, + json=CreateRequest( + model=model, + modelfile=modelfile, + stream=stream, + quantize=quantize, + ).model_dump(exclude_none=True), stream=stream, ) @@ -446,77 +556,126 @@ def _create_blob(self, path: Union[str, Path]) -> str: digest = f'sha256:{sha256sum.hexdigest()}' - try: - self._request('HEAD', f'/api/blobs/{digest}') - except ResponseError as e: - if e.status_code != 404: - raise - - with open(path, 'rb') as r: - self._request('POST', f'/api/blobs/{digest}', content=r) + with open(path, 'rb') as r: + self._request_raw('POST', f'/api/blobs/sha256:{digest}', content=r) return digest - def delete(self, model: str) -> Mapping[str, Any]: - response = self._request('DELETE', '/api/delete', json={'name': model}) - return {'status': 'success' if response.status_code == 200 else 'error'} + def list(self) -> ListResponse: + return self._request( + ListResponse, + 'GET', + '/api/tags', + ) - def list(self) -> Mapping[str, Any]: - return self._request('GET', '/api/tags').json() + def delete(self, model: str) -> StatusResponse: + r = self._request_raw( + 'DELETE', + '/api/delete', + json=DeleteRequest( + model=model, + ).model_dump(exclude_none=True), + ) + return StatusResponse( + status='success' if r.status_code == 200 else 'error', + ) - def copy(self, source: str, destination: str) -> Mapping[str, Any]: - response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) - return {'status': 'success' if response.status_code == 200 else 'error'} + def copy(self, source: str, destination: str) -> StatusResponse: + r = self._request_raw( + 'POST', + '/api/copy', + json=CopyRequest( + source=source, + destination=destination, + ).model_dump(exclude_none=True), + ) + return StatusResponse( + status='success' if r.status_code == 200 else 'error', + ) - def show(self, model: str) -> Mapping[str, Any]: - return self._request('POST', '/api/show', json={'name': model}).json() + def show(self, model: str) -> ShowResponse: + return self._request( + ShowResponse, + 'POST', + '/api/show', + json=ShowRequest( + model=model, + ).model_dump(exclude_none=True), + ) - def ps(self) -> Mapping[str, Any]: - return self._request('GET', '/api/ps').json() + def ps(self) -> ProcessResponse: + return self._request( + ProcessResponse, + 'GET', + '/api/ps', + ) class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) - async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: - response = await self._client.request(method, url, **kwargs) - + async def _request_raw(self, *args, **kwargs): + r = await self._client.request(*args, **kwargs) try: - response.raise_for_status() + r.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None + return r - return response - - async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: - async def inner(): - async with self._client.stream(method, url, **kwargs) as r: - try: - r.raise_for_status() - except httpx.HTTPStatusError as e: - await e.response.aread() - raise ResponseError(e.response.text, e.response.status_code) from None + @overload + async def _request( + self, + cls: Type[T], + *args, + stream: Literal[False] = False, + **kwargs, + ) -> T: ... - async for line in r.aiter_lines(): - partial = json.loads(line) - if e := partial.get('error'): - raise ResponseError(e) - yield partial + @overload + async def _request( + self, + cls: Type[T], + *args, + stream: Literal[True] = True, + **kwargs, + ) -> AsyncIterator[T]: ... - return inner() + @overload + async def _request( + self, + cls: Type[T], + *args, + stream: bool = False, + **kwargs, + ) -> Union[T, AsyncIterator[T]]: ... - async def _request_stream( + async def _request( self, + cls: Type[T], *args, stream: bool = False, **kwargs, - ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: + ) -> Union[T, AsyncIterator[T]]: if stream: - return await self._stream(*args, **kwargs) - response = await self._request(*args, **kwargs) - return response.json() + async def inner(): + async with self._client.stream(*args, **kwargs) as r: + try: + r.raise_for_status() + except httpx.HTTPStatusError as e: + await e.response.aread() + raise ResponseError(e.response.text, e.response.status_code) from None + + async for line in r.aiter_lines(): + part = json.loads(line) + if err := part.get('error'): + raise ResponseError(err) + yield cls(**part) + + return inner() + + return cls(**(await self._request_raw(*args, **kwargs)).json()) @overload async def generate( @@ -524,16 +683,17 @@ async def generate( model: str = '', prompt: str = '', suffix: str = '', + *, system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: Literal[False] = False, raw: bool = False, - format: Literal['', 'json'] = '', - images: Optional[Sequence[AnyStr]] = None, - options: Optional[Options] = None, + format: Optional[Literal['', 'json']] = None, + images: Optional[Sequence[Union[str, bytes]]] = None, + options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, - ) -> Mapping[str, Any]: ... + ) -> GenerateResponse: ... @overload async def generate( @@ -541,32 +701,34 @@ async def generate( model: str = '', prompt: str = '', suffix: str = '', + *, system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: Literal[True] = True, raw: bool = False, - format: Literal['', 'json'] = '', - images: Optional[Sequence[AnyStr]] = None, - options: Optional[Options] = None, + format: Optional[Literal['', 'json']] = None, + images: Optional[Sequence[Union[str, bytes]]] = None, + options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, - ) -> AsyncIterator[Mapping[str, Any]]: ... + ) -> AsyncIterator[GenerateResponse]: ... async def generate( self, model: str = '', - prompt: str = '', - suffix: str = '', - system: str = '', - template: str = '', + prompt: Optional[str] = None, + suffix: Optional[str] = None, + *, + system: Optional[str] = None, + template: Optional[str] = None, context: Optional[Sequence[int]] = None, stream: bool = False, - raw: bool = False, - format: Literal['', 'json'] = '', - images: Optional[Sequence[AnyStr]] = None, - options: Optional[Options] = None, + raw: Optional[bool] = None, + format: Optional[Literal['', 'json']] = None, + images: Optional[Sequence[Union[str, bytes]]] = None, + options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, - ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: + ) -> Union[GenerateResponse, AsyncIterator[GenerateResponse]]: """ Create a response using the requested model. @@ -576,26 +738,24 @@ async def generate( Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ - if not model: - raise RequestError('must provide a model') - - return await self._request_stream( + return await self._request( + GenerateResponse, 'POST', '/api/generate', - json={ - 'model': model, - 'prompt': prompt, - 'suffix': suffix, - 'system': system, - 'template': template, - 'context': context or [], - 'stream': stream, - 'raw': raw, - 'images': [_encode_image(image) for image in images or []], - 'format': format, - 'options': options or {}, - 'keep_alive': keep_alive, - }, + json=GenerateRequest( + model=model, + prompt=prompt, + suffix=suffix, + system=system, + template=template, + context=context, + stream=stream, + raw=raw, + format=format, + images=[Image(value=image) for image in images] if images else None, + options=options, + keep_alive=keep_alive, + ).model_dump(exclude_none=True), stream=stream, ) @@ -603,149 +763,171 @@ async def generate( async def chat( self, model: str = '', - messages: Optional[Sequence[Message]] = None, - tools: Optional[Sequence[Tool]] = None, + messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, + *, + tools: Optional[Sequence[Union[Mapping[str, Any], Tool]]] = None, stream: Literal[False] = False, - format: Literal['', 'json'] = '', - options: Optional[Options] = None, + format: Optional[Literal['', 'json']] = None, + options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, - ) -> Mapping[str, Any]: ... + ) -> ChatResponse: ... @overload async def chat( self, model: str = '', - messages: Optional[Sequence[Message]] = None, - tools: Optional[Sequence[Tool]] = None, + messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, + *, + tools: Optional[Sequence[Union[Mapping[str, Any], Tool, Callable]]] = None, stream: Literal[True] = True, - format: Literal['', 'json'] = '', - options: Optional[Options] = None, + format: Optional[Literal['', 'json']] = None, + options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, - ) -> AsyncIterator[Mapping[str, Any]]: ... + ) -> AsyncIterator[ChatResponse]: ... async def chat( self, model: str = '', - messages: Optional[Sequence[Message]] = None, - tools: Optional[Sequence[Tool]] = None, + messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None, + *, + tools: Optional[Sequence[Union[Mapping[str, Any], Tool]]] = None, stream: bool = False, - format: Literal['', 'json'] = '', - options: Optional[Options] = None, + format: Optional[Literal['', 'json']] = None, + options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, - ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: + ) -> Union[ChatResponse, AsyncIterator[ChatResponse]]: """ Create a chat response using the requested model. + Args: + tools: + A JSON schema as a dict, an Ollama Tool or a Python Function. + Python functions need to follow Google style docstrings to be converted to an Ollama Tool. + For more information, see: https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings + stream: Whether to stream the response. + format: The format of the response. + + Example: + def add_two_numbers(a: int, b: int) -> int: + ''' + Add two numbers together. + + Args: + a: First number to add + b: Second number to add + + Returns: + int: The sum of a and b + ''' + return a + b + + await client.chat(model='llama3.2', tools=[add_two_numbers], messages=[...]) + Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ - if not model: - raise RequestError('must provide a model') - messages = deepcopy(messages) - - for message in messages or []: - if images := message.get('images'): - message['images'] = [_encode_image(image) for image in images] - - return await self._request_stream( + return await self._request( + ChatResponse, 'POST', '/api/chat', - json={ - 'model': model, - 'messages': messages, - 'tools': tools or [], - 'stream': stream, - 'format': format, - 'options': options or {}, - 'keep_alive': keep_alive, - }, + json=ChatRequest( + model=model, + messages=[message for message in _copy_messages(messages)], + tools=[tool for tool in _copy_tools(tools)], + stream=stream, + format=format, + options=options, + keep_alive=keep_alive, + ).model_dump(exclude_none=True), stream=stream, ) async def embed( self, model: str = '', - input: Union[str, Sequence[AnyStr]] = '', - truncate: bool = True, - options: Optional[Options] = None, + input: Union[str, Sequence[str]] = '', + truncate: Optional[bool] = None, + options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, - ) -> Mapping[str, Any]: - if not model: - raise RequestError('must provide a model') - - response = await self._request( + ) -> EmbedResponse: + return await self._request( + EmbedResponse, 'POST', '/api/embed', - json={ - 'model': model, - 'input': input, - 'truncate': truncate, - 'options': options or {}, - 'keep_alive': keep_alive, - }, + json=EmbedRequest( + model=model, + input=input, + truncate=truncate, + options=options, + keep_alive=keep_alive, + ).model_dump(exclude_none=True), ) - return response.json() - async def embeddings( self, model: str = '', - prompt: str = '', - options: Optional[Options] = None, + prompt: Optional[str] = None, + options: Optional[Union[Mapping[str, Any], Options]] = None, keep_alive: Optional[Union[float, str]] = None, - ) -> Mapping[str, Sequence[float]]: - response = await self._request( + ) -> EmbeddingsResponse: + """ + Deprecated in favor of `embed`. + """ + return await self._request( + EmbeddingsResponse, 'POST', '/api/embeddings', - json={ - 'model': model, - 'prompt': prompt, - 'options': options or {}, - 'keep_alive': keep_alive, - }, + json=EmbeddingsRequest( + model=model, + prompt=prompt, + options=options, + keep_alive=keep_alive, + ).model_dump(exclude_none=True), ) - return response.json() - @overload async def pull( self, model: str, + *, insecure: bool = False, stream: Literal[False] = False, - ) -> Mapping[str, Any]: ... + ) -> ProgressResponse: ... @overload async def pull( self, model: str, + *, insecure: bool = False, stream: Literal[True] = True, - ) -> AsyncIterator[Mapping[str, Any]]: ... + ) -> AsyncIterator[ProgressResponse]: ... async def pull( self, model: str, + *, insecure: bool = False, stream: bool = False, - ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: + ) -> Union[ProgressResponse, AsyncIterator[ProgressResponse]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ - return await self._request_stream( + return await self._request( + ProgressResponse, 'POST', '/api/pull', - json={ - 'name': model, - 'insecure': insecure, - 'stream': stream, - }, + json=PullRequest( + model=model, + insecure=insecure, + stream=stream, + ).model_dump(exclude_none=True), stream=stream, ) @@ -753,37 +935,41 @@ async def pull( async def push( self, model: str, + *, insecure: bool = False, stream: Literal[False] = False, - ) -> Mapping[str, Any]: ... + ) -> ProgressResponse: ... @overload async def push( self, model: str, + *, insecure: bool = False, stream: Literal[True] = True, - ) -> AsyncIterator[Mapping[str, Any]]: ... + ) -> AsyncIterator[ProgressResponse]: ... async def push( self, model: str, + *, insecure: bool = False, stream: bool = False, - ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: + ) -> Union[ProgressResponse, AsyncIterator[ProgressResponse]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ - return await self._request_stream( + return await self._request( + ProgressResponse, 'POST', '/api/push', - json={ - 'name': model, - 'insecure': insecure, - 'stream': stream, - }, + json=PushRequest( + model=model, + insecure=insecure, + stream=stream, + ).model_dump(exclude_none=True), stream=stream, ) @@ -793,9 +979,10 @@ async def create( model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, + *, quantize: Optional[str] = None, stream: Literal[False] = False, - ) -> Mapping[str, Any]: ... + ) -> ProgressResponse: ... @overload async def create( @@ -803,18 +990,20 @@ async def create( model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, + *, quantize: Optional[str] = None, stream: Literal[True] = True, - ) -> AsyncIterator[Mapping[str, Any]]: ... + ) -> AsyncIterator[ProgressResponse]: ... async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, + *, quantize: Optional[str] = None, stream: bool = False, - ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: + ) -> Union[ProgressResponse, AsyncIterator[ProgressResponse]]: """ Raises `ResponseError` if the request could not be fulfilled. @@ -827,15 +1016,16 @@ async def create( else: raise RequestError('must provide either path or modelfile') - return await self._request_stream( + return await self._request( + ProgressResponse, 'POST', '/api/create', - json={ - 'name': model, - 'modelfile': modelfile, - 'stream': stream, - 'quantize': quantize, - }, + json=CreateRequest( + model=model, + modelfile=modelfile, + stream=stream, + quantize=quantize, + ).model_dump(exclude_none=True), stream=stream, ) @@ -868,74 +1058,78 @@ async def _create_blob(self, path: Union[str, Path]) -> str: digest = f'sha256:{sha256sum.hexdigest()}' - try: - await self._request('HEAD', f'/api/blobs/{digest}') - except ResponseError as e: - if e.status_code != 404: - raise - - async def upload_bytes(): - with open(path, 'rb') as r: - while True: - chunk = r.read(32 * 1024) - if not chunk: - break - yield chunk + async def upload_bytes(): + with open(path, 'rb') as r: + while True: + chunk = r.read(32 * 1024) + if not chunk: + break + yield chunk - await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) + await self._request_raw('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest - async def delete(self, model: str) -> Mapping[str, Any]: - response = await self._request('DELETE', '/api/delete', json={'name': model}) - return {'status': 'success' if response.status_code == 200 else 'error'} - - async def list(self) -> Mapping[str, Any]: - response = await self._request('GET', '/api/tags') - return response.json() - - async def copy(self, source: str, destination: str) -> Mapping[str, Any]: - response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) - return {'status': 'success' if response.status_code == 200 else 'error'} + async def list(self) -> ListResponse: + return await self._request( + ListResponse, + 'GET', + '/api/tags', + ) - async def show(self, model: str) -> Mapping[str, Any]: - response = await self._request('POST', '/api/show', json={'name': model}) - return response.json() + async def delete(self, model: str) -> StatusResponse: + r = await self._request_raw( + 'DELETE', + '/api/delete', + json=DeleteRequest( + model=model, + ).model_dump(exclude_none=True), + ) + return StatusResponse( + status='success' if r.status_code == 200 else 'error', + ) - async def ps(self) -> Mapping[str, Any]: - response = await self._request('GET', '/api/ps') - return response.json() + async def copy(self, source: str, destination: str) -> StatusResponse: + r = await self._request_raw( + 'POST', + '/api/copy', + json=CopyRequest( + source=source, + destination=destination, + ).model_dump(exclude_none=True), + ) + return StatusResponse( + status='success' if r.status_code == 200 else 'error', + ) + async def show(self, model: str) -> ShowResponse: + return await self._request( + ShowResponse, + 'POST', + '/api/show', + json=ShowRequest( + model=model, + ).model_dump(exclude_none=True), + ) -def _encode_image(image) -> str: - """ - >>> _encode_image(b'ollama') - 'b2xsYW1h' - >>> _encode_image(io.BytesIO(b'ollama')) - 'b2xsYW1h' - >>> _encode_image('LICENSE') - 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' - >>> _encode_image(Path('LICENSE')) - 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' - >>> _encode_image('YWJj') - 'YWJj' - >>> _encode_image(b'YWJj') - 'YWJj' - """ + async def ps(self) -> ProcessResponse: + return await self._request( + ProcessResponse, + 'GET', + '/api/ps', + ) - if p := _as_path(image): - return b64encode(p.read_bytes()).decode('utf-8') - try: - b64decode(image, validate=True) - return image if isinstance(image, str) else image.decode('utf-8') - except (binascii.Error, TypeError): - ... +def _copy_messages(messages: Optional[Sequence[Union[Mapping[str, Any], Message]]]) -> Iterator[Message]: + for message in messages or []: + yield Message.model_validate( + {k: [Image(value=image) for image in v] if k == 'images' else v for k, v in dict(message).items() if v}, + ) - if b := _as_bytesio(image): - return b64encode(b.read()).decode('utf-8') - raise RequestError('image must be bytes, path-like object, or file-like object') +def _copy_tools(tools: Optional[Sequence[Union[Mapping[str, Any], Tool, Callable]]] = None) -> Iterator[Tool]: + for unprocessed_tool in tools or []: + yield convert_function_to_tool(unprocessed_tool) if callable(unprocessed_tool) else Tool.model_validate(unprocessed_tool) def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: @@ -948,14 +1142,6 @@ def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: return None -def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: - if isinstance(s, io.BytesIO): - return s - elif isinstance(s, bytes): - return io.BytesIO(s) - return None - - def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) @@ -1033,9 +1219,9 @@ def _parse_host(host: Optional[str]) -> str: host = split.hostname or '127.0.0.1' port = split.port or port - # Fix missing square brackets for IPv6 from urlsplit try: if isinstance(ipaddress.ip_address(host), ipaddress.IPv6Address): + # Fix missing square brackets for IPv6 from urlsplit host = f'[{host}]' except ValueError: ... diff --git a/ollama/_types.py b/ollama/_types.py index 7bdcbe49..bcf88969 100644 --- a/ollama/_types.py +++ b/ollama/_types.py @@ -1,43 +1,165 @@ import json -from typing import Any, TypedDict, Sequence, Literal, Mapping +from base64 import b64decode, b64encode +from pathlib import Path +from datetime import datetime +from typing import Any, Mapping, Optional, Union, Sequence -import sys +from typing_extensions import Annotated, Literal -if sys.version_info < (3, 11): - from typing_extensions import NotRequired -else: - from typing import NotRequired +from pydantic import ( + BaseModel, + ByteSize, + ConfigDict, + Field, + model_serializer, +) -class BaseGenerateResponse(TypedDict): - model: str +class SubscriptableBaseModel(BaseModel): + def __getitem__(self, key: str) -> Any: + return getattr(self, key) + + def __setitem__(self, key: str, value: Any) -> None: + setattr(self, key, value) + + def __contains__(self, key: str) -> bool: + return hasattr(self, key) + + def get(self, key: str, default: Any = None) -> Any: + return getattr(self, key, default) + + +class Options(SubscriptableBaseModel): + # load time options + numa: Optional[bool] = None + num_ctx: Optional[int] = None + num_batch: Optional[int] = None + num_gpu: Optional[int] = None + main_gpu: Optional[int] = None + low_vram: Optional[bool] = None + f16_kv: Optional[bool] = None + logits_all: Optional[bool] = None + vocab_only: Optional[bool] = None + use_mmap: Optional[bool] = None + use_mlock: Optional[bool] = None + embedding_only: Optional[bool] = None + num_thread: Optional[int] = None + + # runtime options + num_keep: Optional[int] = None + seed: Optional[int] = None + num_predict: Optional[int] = None + top_k: Optional[int] = None + top_p: Optional[float] = None + tfs_z: Optional[float] = None + typical_p: Optional[float] = None + repeat_last_n: Optional[int] = None + temperature: Optional[float] = None + repeat_penalty: Optional[float] = None + presence_penalty: Optional[float] = None + frequency_penalty: Optional[float] = None + mirostat: Optional[int] = None + mirostat_tau: Optional[float] = None + mirostat_eta: Optional[float] = None + penalize_newline: Optional[bool] = None + stop: Optional[Sequence[str]] = None + + +class BaseRequest(SubscriptableBaseModel): + model: Annotated[str, Field(min_length=1)] + 'Model to use for the request.' + + +class BaseStreamableRequest(BaseRequest): + stream: Optional[bool] = None + 'Stream response.' + + +class BaseGenerateRequest(BaseStreamableRequest): + options: Optional[Union[Mapping[str, Any], Options]] = None + 'Options to use for the request.' + + format: Optional[Literal['', 'json']] = None + 'Format of the response.' + + keep_alive: Optional[Union[float, str]] = None + 'Keep model alive for the specified duration.' + + +class Image(BaseModel): + value: Union[str, bytes, Path] + + @model_serializer + def serialize_model(self): + if isinstance(self.value, (Path, bytes)): + return b64encode(self.value.read_bytes() if isinstance(self.value, Path) else self.value).decode() + + if isinstance(self.value, str): + if Path(self.value).exists(): + return b64encode(Path(self.value).read_bytes()).decode() + + if self.value.split('.')[-1] in ('png', 'jpg', 'jpeg', 'webp'): + raise ValueError(f'File {self.value} does not exist') + + try: + # Try to decode to check if it's already base64 + b64decode(self.value) + return self.value + except Exception: + raise ValueError('Invalid image data, expected base64 string or path to image file') from Exception + + +class GenerateRequest(BaseGenerateRequest): + prompt: Optional[str] = None + 'Prompt to generate response from.' + + suffix: Optional[str] = None + 'Suffix to append to the response.' + + system: Optional[str] = None + 'System prompt to prepend to the prompt.' + + template: Optional[str] = None + 'Template to use for the response.' + + context: Optional[Sequence[int]] = None + 'Tokenized history to use for the response.' + + raw: Optional[bool] = None + + images: Optional[Sequence[Image]] = None + 'Image data for multimodal models.' + + +class BaseGenerateResponse(SubscriptableBaseModel): + model: Optional[str] = None 'Model used to generate response.' - created_at: str + created_at: Optional[str] = None 'Time when the request was created.' - done: bool + done: Optional[bool] = None 'True if response is complete, otherwise False. Useful for streaming to detect the final response.' - done_reason: str + done_reason: Optional[str] = None 'Reason for completion. Only present when done is True.' - total_duration: int + total_duration: Optional[int] = None 'Total duration in nanoseconds.' - load_duration: int + load_duration: Optional[int] = None 'Load duration in nanoseconds.' - prompt_eval_count: int + prompt_eval_count: Optional[int] = None 'Number of tokens evaluated in the prompt.' - prompt_eval_duration: int + prompt_eval_duration: Optional[int] = None 'Duration of evaluating the prompt in nanoseconds.' - eval_count: int + eval_count: Optional[int] = None 'Number of tokens evaluated in inference.' - eval_duration: int + eval_duration: Optional[int] = None 'Duration of evaluating inference in nanoseconds.' @@ -49,43 +171,22 @@ class GenerateResponse(BaseGenerateResponse): response: str 'Response content. When streaming, this contains a fragment of the response.' - context: Sequence[int] + context: Optional[Sequence[int]] = None 'Tokenized history up to the point of the response.' -class ToolCallFunction(TypedDict): - """ - Tool call function. - """ - - name: str - 'Name of the function.' - - arguments: NotRequired[Mapping[str, Any]] - 'Arguments of the function.' - - -class ToolCall(TypedDict): - """ - Model tool calls. - """ - - function: ToolCallFunction - 'Function to be called.' - - -class Message(TypedDict): +class Message(SubscriptableBaseModel): """ Chat message. """ role: Literal['user', 'assistant', 'system', 'tool'] - "Assumed role of the message. Response messages always has role 'assistant' or 'tool'." + "Assumed role of the message. Response messages has role 'assistant' or 'tool'." - content: NotRequired[str] + content: Optional[str] = None 'Content of the message. Response messages contains message fragments when streaming.' - images: NotRequired[Sequence[Any]] + images: Optional[Sequence[Image]] = None """ Optional list of image data for multimodal models. @@ -97,33 +198,61 @@ class Message(TypedDict): Valid image formats depend on the model. See the model card for more information. """ - tool_calls: NotRequired[Sequence[ToolCall]] + class ToolCall(SubscriptableBaseModel): + """ + Model tool calls. + """ + + class Function(SubscriptableBaseModel): + """ + Tool call function. + """ + + name: str + 'Name of the function.' + + arguments: Mapping[str, Any] + 'Arguments of the function.' + + function: Function + 'Function to be called.' + + tool_calls: Optional[Sequence[ToolCall]] = None """ Tools calls to be made by the model. """ -class Property(TypedDict): - type: str - description: str - enum: NotRequired[Sequence[str]] # `enum` is optional and can be a list of strings +class Tool(SubscriptableBaseModel): + type: Optional[Literal['function']] = 'function' + + class Function(SubscriptableBaseModel): + name: Optional[str] = None + description: Optional[str] = None + + class Parameters(SubscriptableBaseModel): + type: Optional[Literal['object']] = 'object' + required: Optional[Sequence[str]] = None + + class Property(SubscriptableBaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + + type: Optional[str] = None + description: Optional[str] = None + properties: Optional[Mapping[str, Property]] = None -class Parameters(TypedDict): - type: str - required: Sequence[str] - properties: Mapping[str, Property] + parameters: Optional[Parameters] = None + function: Optional[Function] = None -class ToolFunction(TypedDict): - name: str - description: str - parameters: Parameters +class ChatRequest(BaseGenerateRequest): + messages: Optional[Sequence[Union[Mapping[str, Any], Message]]] = None + 'Messages to chat with.' -class Tool(TypedDict): - type: str - function: ToolFunction + tools: Optional[Sequence[Tool]] = None + 'Tools to use for the chat.' class ChatResponse(BaseGenerateResponse): @@ -135,47 +264,157 @@ class ChatResponse(BaseGenerateResponse): 'Response message.' -class ProgressResponse(TypedDict): - status: str - completed: int - total: int - digest: str +class EmbedRequest(BaseRequest): + input: Union[str, Sequence[str]] + 'Input text to embed.' + truncate: Optional[bool] = None + 'Truncate the input to the maximum token length.' -class Options(TypedDict, total=False): - # load time options - numa: bool - num_ctx: int - num_batch: int - num_gpu: int - main_gpu: int - low_vram: bool - f16_kv: bool - logits_all: bool - vocab_only: bool - use_mmap: bool - use_mlock: bool - embedding_only: bool - num_thread: int + options: Optional[Union[Mapping[str, Any], Options]] = None + 'Options to use for the request.' - # runtime options - num_keep: int - seed: int - num_predict: int - top_k: int - top_p: float - tfs_z: float - typical_p: float - repeat_last_n: int - temperature: float - repeat_penalty: float - presence_penalty: float - frequency_penalty: float - mirostat: int - mirostat_tau: float - mirostat_eta: float - penalize_newline: bool - stop: Sequence[str] + keep_alive: Optional[Union[float, str]] = None + + +class EmbedResponse(BaseGenerateResponse): + """ + Response returned by embed requests. + """ + + embeddings: Sequence[Sequence[float]] + 'Embeddings of the inputs.' + + +class EmbeddingsRequest(BaseRequest): + prompt: Optional[str] = None + 'Prompt to generate embeddings from.' + + options: Optional[Union[Mapping[str, Any], Options]] = None + 'Options to use for the request.' + + keep_alive: Optional[Union[float, str]] = None + + +class EmbeddingsResponse(SubscriptableBaseModel): + """ + Response returned by embeddings requests. + """ + + embedding: Sequence[float] + 'Embedding of the prompt.' + + +class PullRequest(BaseStreamableRequest): + """ + Request to pull the model. + """ + + insecure: Optional[bool] = None + 'Allow insecure (HTTP) connections.' + + +class PushRequest(BaseStreamableRequest): + """ + Request to pull the model. + """ + + insecure: Optional[bool] = None + 'Allow insecure (HTTP) connections.' + + +class CreateRequest(BaseStreamableRequest): + """ + Request to create a new model. + """ + + modelfile: Optional[str] = None + + quantize: Optional[str] = None + + +class ModelDetails(SubscriptableBaseModel): + parent_model: Optional[str] = None + format: Optional[str] = None + family: Optional[str] = None + families: Optional[Sequence[str]] = None + parameter_size: Optional[str] = None + quantization_level: Optional[str] = None + + +class ListResponse(SubscriptableBaseModel): + class Model(SubscriptableBaseModel): + model: Optional[str] = None + modified_at: Optional[datetime] = None + digest: Optional[str] = None + size: Optional[ByteSize] = None + details: Optional[ModelDetails] = None + + models: Sequence[Model] + 'List of models.' + + +class DeleteRequest(BaseRequest): + """ + Request to delete a model. + """ + + +class CopyRequest(BaseModel): + """ + Request to copy a model. + """ + + source: str + 'Source model to copy.' + + destination: str + 'Destination model to copy to.' + + +class StatusResponse(SubscriptableBaseModel): + status: Optional[str] = None + + +class ProgressResponse(StatusResponse): + completed: Optional[int] = None + total: Optional[int] = None + digest: Optional[str] = None + + +class ShowRequest(BaseRequest): + """ + Request to show model information. + """ + + +class ShowResponse(SubscriptableBaseModel): + modified_at: Optional[datetime] = None + + template: Optional[str] = None + + modelfile: Optional[str] = None + + license: Optional[str] = None + + details: Optional[ModelDetails] = None + + modelinfo: Optional[Mapping[str, Any]] = Field(alias='model_info') + + parameters: Optional[str] = None + + +class ProcessResponse(SubscriptableBaseModel): + class Model(SubscriptableBaseModel): + model: Optional[str] = None + name: Optional[str] = None + digest: Optional[str] = None + expires_at: Optional[datetime] = None + size: Optional[ByteSize] = None + size_vram: Optional[ByteSize] = None + details: Optional[ModelDetails] = None + + models: Sequence[Model] class RequestError(Exception): diff --git a/ollama/_utils.py b/ollama/_utils.py new file mode 100644 index 00000000..c0b67c99 --- /dev/null +++ b/ollama/_utils.py @@ -0,0 +1,87 @@ +from __future__ import annotations +from collections import defaultdict +import inspect +from typing import Callable, Union +import re + +import pydantic +from ollama._types import Tool + + +def _parse_docstring(doc_string: Union[str, None]) -> dict[str, str]: + parsed_docstring = defaultdict(str) + if not doc_string: + return parsed_docstring + + key = hash(doc_string) + for line in doc_string.splitlines(): + lowered_line = line.lower().strip() + if lowered_line.startswith('args:'): + key = 'args' + elif lowered_line.startswith('returns:') or lowered_line.startswith('yields:') or lowered_line.startswith('raises:'): + key = '_' + + else: + # maybe change to a list and join later + parsed_docstring[key] += f'{line.strip()}\n' + + last_key = None + for line in parsed_docstring['args'].splitlines(): + line = line.strip() + if ':' in line: + # Split the line on either: + # 1. A parenthetical expression like (integer) - captured in group 1 + # 2. A colon : + # Followed by optional whitespace. Only split on first occurrence. + parts = re.split(r'(?:\(([^)]*)\)|:)\s*', line, maxsplit=1) + + arg_name = parts[0].strip() + last_key = arg_name + + # Get the description - will be in parts[1] if parenthetical or parts[-1] if after colon + arg_description = parts[-1].strip() + if len(parts) > 2 and parts[1]: # Has parenthetical content + arg_description = parts[-1].split(':', 1)[-1].strip() + + parsed_docstring[last_key] = arg_description + + elif last_key and line: + parsed_docstring[last_key] += ' ' + line + + return parsed_docstring + + +def convert_function_to_tool(func: Callable) -> Tool: + doc_string_hash = hash(inspect.getdoc(func)) + parsed_docstring = _parse_docstring(inspect.getdoc(func)) + schema = type( + func.__name__, + (pydantic.BaseModel,), + { + '__annotations__': {k: v.annotation if v.annotation != inspect._empty else str for k, v in inspect.signature(func).parameters.items()}, + '__signature__': inspect.signature(func), + '__doc__': parsed_docstring[doc_string_hash], + }, + ).model_json_schema() + + for k, v in schema.get('properties', {}).items(): + # If type is missing, the default is string + types = {t.get('type', 'string') for t in v.get('anyOf')} if 'anyOf' in v else {v.get('type', 'string')} + if 'null' in types: + schema['required'].remove(k) + types.discard('null') + + schema['properties'][k] = { + 'description': parsed_docstring[k], + 'type': ', '.join(types), + } + + tool = Tool( + function=Tool.Function( + name=func.__name__, + description=schema.get('description', ''), + parameters=Tool.Function.Parameters(**schema), + ) + ) + + return Tool.model_validate(tool) diff --git a/poetry.lock b/poetry.lock index 483a2032..a08f7a07 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,14 +1,28 @@ # This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + [[package]] name = "anyio" -version = "4.3.0" +version = "4.5.2" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" files = [ - {file = "anyio-4.3.0-py3-none-any.whl", hash = "sha256:048e05d0f6caeed70d731f3db756d35dcc1f35747c8c403364a8332c630441b8"}, - {file = "anyio-4.3.0.tar.gz", hash = "sha256:f75253795a87df48568485fd18cdd2a3fa5c4f7c5be8e5e36637733fce06fed6"}, + {file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"}, + {file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"}, ] [package.dependencies] @@ -18,19 +32,19 @@ sniffio = ">=1.1" typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} [package.extras] -doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] -trio = ["trio (>=0.23)"] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"] +trio = ["trio (>=0.26.1)"] [[package]] name = "certifi" -version = "2024.2.2" +version = "2024.8.30" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"}, - {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, + {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, + {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, ] [[package]] @@ -46,63 +60,83 @@ files = [ [[package]] name = "coverage" -version = "7.4.4" +version = "7.6.1" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0be5efd5127542ef31f165de269f77560d6cdef525fffa446de6f7e9186cfb2"}, - {file = "coverage-7.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ccd341521be3d1b3daeb41960ae94a5e87abe2f46f17224ba5d6f2b8398016cf"}, - {file = "coverage-7.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fa497a8ab37784fbb20ab699c246053ac294d13fc7eb40ec007a5043ec91f8"}, - {file = "coverage-7.4.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b1a93009cb80730c9bca5d6d4665494b725b6e8e157c1cb7f2db5b4b122ea562"}, - {file = "coverage-7.4.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:690db6517f09336559dc0b5f55342df62370a48f5469fabf502db2c6d1cffcd2"}, - {file = "coverage-7.4.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:09c3255458533cb76ef55da8cc49ffab9e33f083739c8bd4f58e79fecfe288f7"}, - {file = "coverage-7.4.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8ce1415194b4a6bd0cdcc3a1dfbf58b63f910dcb7330fe15bdff542c56949f87"}, - {file = "coverage-7.4.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b91cbc4b195444e7e258ba27ac33769c41b94967919f10037e6355e998af255c"}, - {file = "coverage-7.4.4-cp310-cp310-win32.whl", hash = "sha256:598825b51b81c808cb6f078dcb972f96af96b078faa47af7dfcdf282835baa8d"}, - {file = "coverage-7.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:09ef9199ed6653989ebbcaacc9b62b514bb63ea2f90256e71fea3ed74bd8ff6f"}, - {file = "coverage-7.4.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f9f50e7ef2a71e2fae92774c99170eb8304e3fdf9c8c3c7ae9bab3e7229c5cf"}, - {file = "coverage-7.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:623512f8ba53c422fcfb2ce68362c97945095b864cda94a92edbaf5994201083"}, - {file = "coverage-7.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0513b9508b93da4e1716744ef6ebc507aff016ba115ffe8ecff744d1322a7b63"}, - {file = "coverage-7.4.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40209e141059b9370a2657c9b15607815359ab3ef9918f0196b6fccce8d3230f"}, - {file = "coverage-7.4.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a2b2b78c78293782fd3767d53e6474582f62443d0504b1554370bde86cc8227"}, - {file = "coverage-7.4.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:73bfb9c09951125d06ee473bed216e2c3742f530fc5acc1383883125de76d9cd"}, - {file = "coverage-7.4.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1f384c3cc76aeedce208643697fb3e8437604b512255de6d18dae3f27655a384"}, - {file = "coverage-7.4.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:54eb8d1bf7cacfbf2a3186019bcf01d11c666bd495ed18717162f7eb1e9dd00b"}, - {file = "coverage-7.4.4-cp311-cp311-win32.whl", hash = "sha256:cac99918c7bba15302a2d81f0312c08054a3359eaa1929c7e4b26ebe41e9b286"}, - {file = "coverage-7.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:b14706df8b2de49869ae03a5ccbc211f4041750cd4a66f698df89d44f4bd30ec"}, - {file = "coverage-7.4.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:201bef2eea65e0e9c56343115ba3814e896afe6d36ffd37bab783261db430f76"}, - {file = "coverage-7.4.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:41c9c5f3de16b903b610d09650e5e27adbfa7f500302718c9ffd1c12cf9d6818"}, - {file = "coverage-7.4.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d898fe162d26929b5960e4e138651f7427048e72c853607f2b200909794ed978"}, - {file = "coverage-7.4.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3ea79bb50e805cd6ac058dfa3b5c8f6c040cb87fe83de10845857f5535d1db70"}, - {file = "coverage-7.4.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce4b94265ca988c3f8e479e741693d143026632672e3ff924f25fab50518dd51"}, - {file = "coverage-7.4.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:00838a35b882694afda09f85e469c96367daa3f3f2b097d846a7216993d37f4c"}, - {file = "coverage-7.4.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:fdfafb32984684eb03c2d83e1e51f64f0906b11e64482df3c5db936ce3839d48"}, - {file = "coverage-7.4.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:69eb372f7e2ece89f14751fbcbe470295d73ed41ecd37ca36ed2eb47512a6ab9"}, - {file = "coverage-7.4.4-cp312-cp312-win32.whl", hash = "sha256:137eb07173141545e07403cca94ab625cc1cc6bc4c1e97b6e3846270e7e1fea0"}, - {file = "coverage-7.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:d71eec7d83298f1af3326ce0ff1d0ea83c7cb98f72b577097f9083b20bdaf05e"}, - {file = "coverage-7.4.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d5ae728ff3b5401cc320d792866987e7e7e880e6ebd24433b70a33b643bb0384"}, - {file = "coverage-7.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cc4f1358cb0c78edef3ed237ef2c86056206bb8d9140e73b6b89fbcfcbdd40e1"}, - {file = "coverage-7.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8130a2aa2acb8788e0b56938786c33c7c98562697bf9f4c7d6e8e5e3a0501e4a"}, - {file = "coverage-7.4.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf271892d13e43bc2b51e6908ec9a6a5094a4df1d8af0bfc360088ee6c684409"}, - {file = "coverage-7.4.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4cdc86d54b5da0df6d3d3a2f0b710949286094c3a6700c21e9015932b81447e"}, - {file = "coverage-7.4.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ae71e7ddb7a413dd60052e90528f2f65270aad4b509563af6d03d53e979feafd"}, - {file = "coverage-7.4.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:38dd60d7bf242c4ed5b38e094baf6401faa114fc09e9e6632374388a404f98e7"}, - {file = "coverage-7.4.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa5b1c1bfc28384f1f53b69a023d789f72b2e0ab1b3787aae16992a7ca21056c"}, - {file = "coverage-7.4.4-cp38-cp38-win32.whl", hash = "sha256:dfa8fe35a0bb90382837b238fff375de15f0dcdb9ae68ff85f7a63649c98527e"}, - {file = "coverage-7.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:b2991665420a803495e0b90a79233c1433d6ed77ef282e8e152a324bbbc5e0c8"}, - {file = "coverage-7.4.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3b799445b9f7ee8bf299cfaed6f5b226c0037b74886a4e11515e569b36fe310d"}, - {file = "coverage-7.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b4d33f418f46362995f1e9d4f3a35a1b6322cb959c31d88ae56b0298e1c22357"}, - {file = "coverage-7.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aadacf9a2f407a4688d700e4ebab33a7e2e408f2ca04dbf4aef17585389eff3e"}, - {file = "coverage-7.4.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c95949560050d04d46b919301826525597f07b33beba6187d04fa64d47ac82e"}, - {file = "coverage-7.4.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff7687ca3d7028d8a5f0ebae95a6e4827c5616b31a4ee1192bdfde697db110d4"}, - {file = "coverage-7.4.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5fc1de20b2d4a061b3df27ab9b7c7111e9a710f10dc2b84d33a4ab25065994ec"}, - {file = "coverage-7.4.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c74880fc64d4958159fbd537a091d2a585448a8f8508bf248d72112723974cbd"}, - {file = "coverage-7.4.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:742a76a12aa45b44d236815d282b03cfb1de3b4323f3e4ec933acfae08e54ade"}, - {file = "coverage-7.4.4-cp39-cp39-win32.whl", hash = "sha256:d89d7b2974cae412400e88f35d86af72208e1ede1a541954af5d944a8ba46c57"}, - {file = "coverage-7.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:9ca28a302acb19b6af89e90f33ee3e1906961f94b54ea37de6737b7ca9d8827c"}, - {file = "coverage-7.4.4-pp38.pp39.pp310-none-any.whl", hash = "sha256:b2c5edc4ac10a7ef6605a966c58929ec6c1bd0917fb8c15cb3363f65aa40e677"}, - {file = "coverage-7.4.4.tar.gz", hash = "sha256:c901df83d097649e257e803be22592aedfd5182f07b3cc87d640bbb9afd50f49"}, + {file = "coverage-7.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16"}, + {file = "coverage-7.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61c0abb4c85b095a784ef23fdd4aede7a2628478e7baba7c5e3deba61070a02"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd21f6ae3f08b41004dfb433fa895d858f3f5979e7762d052b12aef444e29afc"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f59d57baca39b32db42b83b2a7ba6f47ad9c394ec2076b084c3f029b7afca23"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a1ac0ae2b8bd743b88ed0502544847c3053d7171a3cff9228af618a068ed9c34"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e6a08c0be454c3b3beb105c0596ebdc2371fab6bb90c0c0297f4e58fd7e1012c"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f5796e664fe802da4f57a168c85359a8fbf3eab5e55cd4e4569fbacecc903959"}, + {file = "coverage-7.6.1-cp310-cp310-win32.whl", hash = "sha256:7bb65125fcbef8d989fa1dd0e8a060999497629ca5b0efbca209588a73356232"}, + {file = "coverage-7.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:3115a95daa9bdba70aea750db7b96b37259a81a709223c8448fa97727d546fe0"}, + {file = "coverage-7.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7dea0889685db8550f839fa202744652e87c60015029ce3f60e006f8c4462c93"}, + {file = "coverage-7.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed37bd3c3b063412f7620464a9ac1314d33100329f39799255fb8d3027da50d3"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85f5e9a5f8b73e2350097c3756ef7e785f55bd71205defa0bfdaf96c31616ff"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bc572be474cafb617672c43fe989d6e48d3c83af02ce8de73fff1c6bb3c198d"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0420b573964c760df9e9e86d1a9a622d0d27f417e1a949a8a66dd7bcee7bc6"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f4aa8219db826ce6be7099d559f8ec311549bfc4046f7f9fe9b5cea5c581c56"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fc5a77d0c516700ebad189b587de289a20a78324bc54baee03dd486f0855d234"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b48f312cca9621272ae49008c7f613337c53fadca647d6384cc129d2996d1133"}, + {file = "coverage-7.6.1-cp311-cp311-win32.whl", hash = "sha256:1125ca0e5fd475cbbba3bb67ae20bd2c23a98fac4e32412883f9bcbaa81c314c"}, + {file = "coverage-7.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:8ae539519c4c040c5ffd0632784e21b2f03fc1340752af711f33e5be83a9d6c6"}, + {file = "coverage-7.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778"}, + {file = "coverage-7.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d"}, + {file = "coverage-7.6.1-cp312-cp312-win32.whl", hash = "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5"}, + {file = "coverage-7.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb"}, + {file = "coverage-7.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a4acd025ecc06185ba2b801f2de85546e0b8ac787cf9d3b06e7e2a69f925b106"}, + {file = "coverage-7.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a6d3adcf24b624a7b778533480e32434a39ad8fa30c315208f6d3e5542aeb6e9"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0c212c49b6c10e6951362f7c6df3329f04c2b1c28499563d4035d964ab8e08c"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e81d7a3e58882450ec4186ca59a3f20a5d4440f25b1cff6f0902ad890e6748a"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b260de9790fd81e69401c2dc8b17da47c8038176a79092a89cb2b7d945d060"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a78d169acd38300060b28d600344a803628c3fd585c912cacc9ea8790fe96862"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2c09f4ce52cb99dd7505cd0fc8e0e37c77b87f46bc9c1eb03fe3bc9991085388"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6878ef48d4227aace338d88c48738a4258213cd7b74fd9a3d4d7582bb1d8a155"}, + {file = "coverage-7.6.1-cp313-cp313-win32.whl", hash = "sha256:44df346d5215a8c0e360307d46ffaabe0f5d3502c8a1cefd700b34baf31d411a"}, + {file = "coverage-7.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:8284cf8c0dd272a247bc154eb6c95548722dce90d098c17a883ed36e67cdb129"}, + {file = "coverage-7.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d3296782ca4eab572a1a4eca686d8bfb00226300dcefdf43faa25b5242ab8a3e"}, + {file = "coverage-7.6.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:502753043567491d3ff6d08629270127e0c31d4184c4c8d98f92c26f65019962"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a89ecca80709d4076b95f89f308544ec8f7b4727e8a547913a35f16717856cb"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a318d68e92e80af8b00fa99609796fdbcdfef3629c77c6283566c6f02c6d6704"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13b0a73a0896988f053e4fbb7de6d93388e6dd292b0d87ee51d106f2c11b465b"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4421712dbfc5562150f7554f13dde997a2e932a6b5f352edcce948a815efee6f"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:166811d20dfea725e2e4baa71fffd6c968a958577848d2131f39b60043400223"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:225667980479a17db1048cb2bf8bfb39b8e5be8f164b8f6628b64f78a72cf9d3"}, + {file = "coverage-7.6.1-cp313-cp313t-win32.whl", hash = "sha256:170d444ab405852903b7d04ea9ae9b98f98ab6d7e63e1115e82620807519797f"}, + {file = "coverage-7.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b9f222de8cded79c49bf184bdbc06630d4c58eec9459b939b4a690c82ed05657"}, + {file = "coverage-7.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6db04803b6c7291985a761004e9060b2bca08da6d04f26a7f2294b8623a0c1a0"}, + {file = "coverage-7.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f1adfc8ac319e1a348af294106bc6a8458a0f1633cc62a1446aebc30c5fa186a"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a95324a9de9650a729239daea117df21f4b9868ce32e63f8b650ebe6cef5595b"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b43c03669dc4618ec25270b06ecd3ee4fa94c7f9b3c14bae6571ca00ef98b0d3"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8929543a7192c13d177b770008bc4e8119f2e1f881d563fc6b6305d2d0ebe9de"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a09ece4a69cf399510c8ab25e0950d9cf2b42f7b3cb0374f95d2e2ff594478a6"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9054a0754de38d9dbd01a46621636689124d666bad1936d76c0341f7d71bf569"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0dbde0f4aa9a16fa4d754356a8f2e36296ff4d83994b2c9d8398aa32f222f989"}, + {file = "coverage-7.6.1-cp38-cp38-win32.whl", hash = "sha256:da511e6ad4f7323ee5702e6633085fb76c2f893aaf8ce4c51a0ba4fc07580ea7"}, + {file = "coverage-7.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:3f1156e3e8f2872197af3840d8ad307a9dd18e615dc64d9ee41696f287c57ad8"}, + {file = "coverage-7.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abd5fd0db5f4dc9289408aaf34908072f805ff7792632250dcb36dc591d24255"}, + {file = "coverage-7.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:547f45fa1a93154bd82050a7f3cddbc1a7a4dd2a9bf5cb7d06f4ae29fe94eaf8"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645786266c8f18a931b65bfcefdbf6952dd0dea98feee39bd188607a9d307ed2"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e0b2df163b8ed01d515807af24f63de04bebcecbd6c3bfeff88385789fdf75a"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:609b06f178fe8e9f89ef676532760ec0b4deea15e9969bf754b37f7c40326dbc"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:702855feff378050ae4f741045e19a32d57d19f3e0676d589df0575008ea5004"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2bdb062ea438f22d99cba0d7829c2ef0af1d768d1e4a4f528087224c90b132cb"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9c56863d44bd1c4fe2abb8a4d6f5371d197f1ac0ebdee542f07f35895fc07f36"}, + {file = "coverage-7.6.1-cp39-cp39-win32.whl", hash = "sha256:6e2cd258d7d927d09493c8df1ce9174ad01b381d4729a9d8d4e38670ca24774c"}, + {file = "coverage-7.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:06a737c882bd26d0d6ee7269b20b12f14a8704807a01056c80bb881a4b2ce6ca"}, + {file = "coverage-7.6.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:e9a6e0eb86070e8ccaedfbd9d38fec54864f3125ab95419970575b42af7541df"}, + {file = "coverage-7.6.1.tar.gz", hash = "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d"}, ] [package.dependencies] @@ -113,13 +147,13 @@ toml = ["tomli"] [[package]] name = "exceptiongroup" -version = "1.2.0" +version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, - {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, ] [package.extras] @@ -138,13 +172,13 @@ files = [ [[package]] name = "httpcore" -version = "1.0.4" +version = "1.0.6" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.4-py3-none-any.whl", hash = "sha256:ac418c1db41bade2ad53ae2f3834a3a0f5ae76b56cf5aa497d2d033384fc7d73"}, - {file = "httpcore-1.0.4.tar.gz", hash = "sha256:cb2839ccfcba0d2d3c1131d3c3e26dfc327326fbe7a5dc0dbfe9f6c9151bb022"}, + {file = "httpcore-1.0.6-py3-none-any.whl", hash = "sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f"}, + {file = "httpcore-1.0.6.tar.gz", hash = "sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f"}, ] [package.dependencies] @@ -155,17 +189,17 @@ h11 = ">=0.13,<0.15" asyncio = ["anyio (>=4.0,<5.0)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<0.25.0)"] +trio = ["trio (>=0.22.0,<1.0)"] [[package]] name = "httpx" -version = "0.27.0" +version = "0.27.2" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, - {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, + {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, + {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, ] [package.dependencies] @@ -180,18 +214,22 @@ brotli = ["brotli", "brotlicffi"] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] [[package]] name = "idna" -version = "3.6" +version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" files = [ - {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"}, - {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + [[package]] name = "iniconfig" version = "2.0.0" @@ -274,13 +312,13 @@ files = [ [[package]] name = "packaging" -version = "24.0" +version = "24.1" description = "Core utilities for Python packages" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "packaging-24.0-py3-none-any.whl", hash = "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5"}, - {file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"}, + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, ] [[package]] @@ -395,15 +433,139 @@ files = [ dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] +[[package]] +name = "pydantic" +version = "2.9.2" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"}, + {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"}, +] + +[package.dependencies] +annotated-types = ">=0.6.0" +pydantic-core = "2.23.4" +typing-extensions = [ + {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, + {version = ">=4.6.1", markers = "python_version < \"3.13\""}, +] + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata"] + +[[package]] +name = "pydantic-core" +version = "2.23.4" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"}, + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"}, + {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"}, + {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"}, + {file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"}, + {file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"}, + {file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"}, + {file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"}, + {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"}, + {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"}, + {file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"}, + {file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"}, + {file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"}, + {file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"}, + {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + [[package]] name = "pytest" -version = "8.3.2" +version = "8.3.3" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, - {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, + {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"}, + {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"}, ] [package.dependencies] @@ -469,29 +631,29 @@ Werkzeug = ">=2.0.0" [[package]] name = "ruff" -version = "0.6.3" +version = "0.7.4" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.6.3-py3-none-linux_armv6l.whl", hash = "sha256:97f58fda4e309382ad30ede7f30e2791d70dd29ea17f41970119f55bdb7a45c3"}, - {file = "ruff-0.6.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:3b061e49b5cf3a297b4d1c27ac5587954ccb4ff601160d3d6b2f70b1622194dc"}, - {file = "ruff-0.6.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:34e2824a13bb8c668c71c1760a6ac7d795ccbd8d38ff4a0d8471fdb15de910b1"}, - {file = "ruff-0.6.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bddfbb8d63c460f4b4128b6a506e7052bad4d6f3ff607ebbb41b0aa19c2770d1"}, - {file = "ruff-0.6.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ced3eeb44df75353e08ab3b6a9e113b5f3f996bea48d4f7c027bc528ba87b672"}, - {file = "ruff-0.6.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47021dff5445d549be954eb275156dfd7c37222acc1e8014311badcb9b4ec8c1"}, - {file = "ruff-0.6.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:7d7bd20dc07cebd68cc8bc7b3f5ada6d637f42d947c85264f94b0d1cd9d87384"}, - {file = "ruff-0.6.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:500f166d03fc6d0e61c8e40a3ff853fa8a43d938f5d14c183c612df1b0d6c58a"}, - {file = "ruff-0.6.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:42844ff678f9b976366b262fa2d1d1a3fe76f6e145bd92c84e27d172e3c34500"}, - {file = "ruff-0.6.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70452a10eb2d66549de8e75f89ae82462159855e983ddff91bc0bce6511d0470"}, - {file = "ruff-0.6.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:65a533235ed55f767d1fc62193a21cbf9e3329cf26d427b800fdeacfb77d296f"}, - {file = "ruff-0.6.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d2e2c23cef30dc3cbe9cc5d04f2899e7f5e478c40d2e0a633513ad081f7361b5"}, - {file = "ruff-0.6.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d8a136aa7d228975a6aee3dd8bea9b28e2b43e9444aa678fb62aeb1956ff2351"}, - {file = "ruff-0.6.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:f92fe93bc72e262b7b3f2bba9879897e2d58a989b4714ba6a5a7273e842ad2f8"}, - {file = "ruff-0.6.3-py3-none-win32.whl", hash = "sha256:7a62d3b5b0d7f9143d94893f8ba43aa5a5c51a0ffc4a401aa97a81ed76930521"}, - {file = "ruff-0.6.3-py3-none-win_amd64.whl", hash = "sha256:746af39356fee2b89aada06c7376e1aa274a23493d7016059c3a72e3b296befb"}, - {file = "ruff-0.6.3-py3-none-win_arm64.whl", hash = "sha256:14a9528a8b70ccc7a847637c29e56fd1f9183a9db743bbc5b8e0c4ad60592a82"}, - {file = "ruff-0.6.3.tar.gz", hash = "sha256:183b99e9edd1ef63be34a3b51fee0a9f4ab95add123dbf89a71f7b1f0c991983"}, + {file = "ruff-0.7.4-py3-none-linux_armv6l.whl", hash = "sha256:a4919925e7684a3f18e18243cd6bea7cfb8e968a6eaa8437971f681b7ec51478"}, + {file = "ruff-0.7.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:cfb365c135b830778dda8c04fb7d4280ed0b984e1aec27f574445231e20d6c63"}, + {file = "ruff-0.7.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:63a569b36bc66fbadec5beaa539dd81e0527cb258b94e29e0531ce41bacc1f20"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d06218747d361d06fd2fdac734e7fa92df36df93035db3dc2ad7aa9852cb109"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e0cea28d0944f74ebc33e9f934238f15c758841f9f5edd180b5315c203293452"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80094ecd4793c68b2571b128f91754d60f692d64bc0d7272ec9197fdd09bf9ea"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:997512325c6620d1c4c2b15db49ef59543ef9cd0f4aa8065ec2ae5103cedc7e7"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00b4cf3a6b5fad6d1a66e7574d78956bbd09abfd6c8a997798f01f5da3d46a05"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7dbdc7d8274e1422722933d1edddfdc65b4336abf0b16dfcb9dedd6e6a517d06"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e92dfb5f00eaedb1501b2f906ccabfd67b2355bdf117fea9719fc99ac2145bc"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3bd726099f277d735dc38900b6a8d6cf070f80828877941983a57bca1cd92172"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:2e32829c429dd081ee5ba39aef436603e5b22335c3d3fff013cd585806a6486a"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:662a63b4971807623f6f90c1fb664613f67cc182dc4d991471c23c541fee62dd"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:876f5e09eaae3eb76814c1d3b68879891d6fde4824c015d48e7a7da4cf066a3a"}, + {file = "ruff-0.7.4-py3-none-win32.whl", hash = "sha256:75c53f54904be42dd52a548728a5b572344b50d9b2873d13a3f8c5e3b91f5cac"}, + {file = "ruff-0.7.4-py3-none-win_amd64.whl", hash = "sha256:745775c7b39f914238ed1f1b0bebed0b9155a17cd8bc0b08d3c87e4703b990d6"}, + {file = "ruff-0.7.4-py3-none-win_arm64.whl", hash = "sha256:11bff065102c3ae9d3ea4dc9ecdfe5a5171349cdd0787c1fc64761212fc9cf1f"}, + {file = "ruff-0.7.4.tar.gz", hash = "sha256:cd12e35031f5af6b9b93715d8c4f40360070b2041f81273d0527683d5708fce2"}, ] [[package]] @@ -507,35 +669,35 @@ files = [ [[package]] name = "tomli" -version = "2.0.1" +version = "2.0.2" description = "A lil' TOML parser" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, + {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"}, + {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, ] [[package]] name = "typing-extensions" -version = "4.10.0" +version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.10.0-py3-none-any.whl", hash = "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475"}, - {file = "typing_extensions-4.10.0.tar.gz", hash = "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"}, + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] [[package]] name = "werkzeug" -version = "3.0.1" +version = "3.0.6" description = "The comprehensive WSGI web application library." optional = false python-versions = ">=3.8" files = [ - {file = "werkzeug-3.0.1-py3-none-any.whl", hash = "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10"}, - {file = "werkzeug-3.0.1.tar.gz", hash = "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc"}, + {file = "werkzeug-3.0.6-py3-none-any.whl", hash = "sha256:1bc0c2310d2fbb07b1dd1105eba2f7af72f322e1e455f2f93c993bee8c8a5f17"}, + {file = "werkzeug-3.0.6.tar.gz", hash = "sha256:a8dd59d4de28ca70471a34cba79bed5f7ef2e036a76b3ab0835474246eb41f8d"}, ] [package.dependencies] @@ -547,4 +709,4 @@ watchdog = ["watchdog (>=2.3)"] [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "e36516c932ab9dd7497acc0c3d55ab2c963004595efe97c2bc80854687c32c1e" +content-hash = "61443e0ce98d3e24a45da6f9c890699fc44fe98cd191b0eb38e6b59093e8149d" diff --git a/pyproject.toml b/pyproject.toml index 3adf10f3..afafd2fc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,12 +5,13 @@ description = "The official Python client for Ollama." authors = ["Ollama "] license = "MIT" readme = "README.md" -homepage = "https://ollama.ai" -repository = "https://github.com/jmorganca/ollama-python" +homepage = "https://ollama.com" +repository = "https://github.com/ollama/ollama-python" [tool.poetry.dependencies] python = "^3.8" httpx = "^0.27.0" +pydantic = "^2.9.0" [tool.poetry.group.dev.dependencies] pytest = ">=7.4.3,<9.0.0" @@ -18,7 +19,7 @@ pytest-asyncio = ">=0.23.2,<0.25.0" pytest-cov = ">=4.1,<6.0" pytest-httpserver = "^1.0.8" pillow = "^10.2.0" -ruff = ">=0.1.8,<0.7.0" +ruff = ">=0.1.8,<0.8.0" [build-system] requires = ["poetry-core"] diff --git a/requirements.txt b/requirements.txt index f1dde1f1..c7bfb080 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,27 +1,123 @@ -anyio==4.3.0 ; python_version >= "3.8" and python_version < "4.0" \ - --hash=sha256:048e05d0f6caeed70d731f3db756d35dcc1f35747c8c403364a8332c630441b8 \ - --hash=sha256:f75253795a87df48568485fd18cdd2a3fa5c4f7c5be8e5e36637733fce06fed6 -certifi==2024.2.2 ; python_version >= "3.8" and python_version < "4.0" \ - --hash=sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f \ - --hash=sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1 -exceptiongroup==1.2.0 ; python_version >= "3.8" and python_version < "3.11" \ - --hash=sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14 \ - --hash=sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68 +annotated-types==0.7.0 ; python_version >= "3.8" and python_version < "4.0" \ + --hash=sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53 \ + --hash=sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89 +anyio==4.5.2 ; python_version >= "3.8" and python_version < "4.0" \ + --hash=sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b \ + --hash=sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f +certifi==2024.8.30 ; python_version >= "3.8" and python_version < "4.0" \ + --hash=sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8 \ + --hash=sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9 +exceptiongroup==1.2.2 ; python_version >= "3.8" and python_version < "3.11" \ + --hash=sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b \ + --hash=sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc h11==0.14.0 ; python_version >= "3.8" and python_version < "4.0" \ --hash=sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d \ --hash=sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761 -httpcore==1.0.4 ; python_version >= "3.8" and python_version < "4.0" \ - --hash=sha256:ac418c1db41bade2ad53ae2f3834a3a0f5ae76b56cf5aa497d2d033384fc7d73 \ - --hash=sha256:cb2839ccfcba0d2d3c1131d3c3e26dfc327326fbe7a5dc0dbfe9f6c9151bb022 -httpx==0.27.0 ; python_version >= "3.8" and python_version < "4.0" \ - --hash=sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5 \ - --hash=sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5 -idna==3.6 ; python_version >= "3.8" and python_version < "4.0" \ - --hash=sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca \ - --hash=sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f +httpcore==1.0.6 ; python_version >= "3.8" and python_version < "4.0" \ + --hash=sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f \ + --hash=sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f +httpx==0.27.2 ; python_version >= "3.8" and python_version < "4.0" \ + --hash=sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0 \ + --hash=sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2 +idna==3.10 ; python_version >= "3.8" and python_version < "4.0" \ + --hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \ + --hash=sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 +pydantic-core==2.23.4 ; python_version >= "3.8" and python_version < "4.0" \ + --hash=sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36 \ + --hash=sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05 \ + --hash=sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071 \ + --hash=sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327 \ + --hash=sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c \ + --hash=sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36 \ + --hash=sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29 \ + --hash=sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744 \ + --hash=sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d \ + --hash=sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec \ + --hash=sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e \ + --hash=sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e \ + --hash=sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577 \ + --hash=sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232 \ + --hash=sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863 \ + --hash=sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6 \ + --hash=sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368 \ + --hash=sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480 \ + --hash=sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2 \ + --hash=sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2 \ + --hash=sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6 \ + --hash=sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769 \ + --hash=sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d \ + --hash=sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2 \ + --hash=sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84 \ + --hash=sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166 \ + --hash=sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271 \ + --hash=sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5 \ + --hash=sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb \ + --hash=sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13 \ + --hash=sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323 \ + --hash=sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556 \ + --hash=sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665 \ + --hash=sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef \ + --hash=sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb \ + --hash=sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119 \ + --hash=sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126 \ + --hash=sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510 \ + --hash=sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b \ + --hash=sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87 \ + --hash=sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f \ + --hash=sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc \ + --hash=sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8 \ + --hash=sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21 \ + --hash=sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f \ + --hash=sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6 \ + --hash=sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658 \ + --hash=sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b \ + --hash=sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3 \ + --hash=sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb \ + --hash=sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59 \ + --hash=sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24 \ + --hash=sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9 \ + --hash=sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3 \ + --hash=sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd \ + --hash=sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753 \ + --hash=sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55 \ + --hash=sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad \ + --hash=sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a \ + --hash=sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605 \ + --hash=sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e \ + --hash=sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b \ + --hash=sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433 \ + --hash=sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8 \ + --hash=sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07 \ + --hash=sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728 \ + --hash=sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0 \ + --hash=sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327 \ + --hash=sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555 \ + --hash=sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64 \ + --hash=sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6 \ + --hash=sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea \ + --hash=sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b \ + --hash=sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df \ + --hash=sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e \ + --hash=sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd \ + --hash=sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068 \ + --hash=sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3 \ + --hash=sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040 \ + --hash=sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12 \ + --hash=sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916 \ + --hash=sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f \ + --hash=sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f \ + --hash=sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801 \ + --hash=sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231 \ + --hash=sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5 \ + --hash=sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8 \ + --hash=sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee \ + --hash=sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607 +pydantic==2.9.2 ; python_version >= "3.8" and python_version < "4.0" \ + --hash=sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f \ + --hash=sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12 sniffio==1.3.1 ; python_version >= "3.8" and python_version < "4.0" \ --hash=sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2 \ --hash=sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc -typing-extensions==4.10.0 ; python_version >= "3.8" and python_version < "3.11" \ - --hash=sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475 \ - --hash=sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb +typing-extensions==4.12.2 ; python_version >= "3.8" and python_version < "4.0" \ + --hash=sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d \ + --hash=sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8 diff --git a/tests/test_client.py b/tests/test_client.py index 0b062f5d..fbd01bda 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -1,6 +1,7 @@ import os import io import json +from pydantic import ValidationError import pytest import tempfile from pathlib import Path @@ -8,7 +9,7 @@ from werkzeug.wrappers import Request, Response from PIL import Image -from ollama._client import Client, AsyncClient +from ollama._client import Client, AsyncClient, _copy_tools class PrefixPattern(URIPattern): @@ -28,9 +29,6 @@ def test_client_chat(httpserver: HTTPServer): 'messages': [{'role': 'user', 'content': 'Why is the sky blue?'}], 'tools': [], 'stream': False, - 'format': '', - 'options': {}, - 'keep_alive': None, }, ).respond_with_json( { @@ -76,9 +74,6 @@ def generate(): 'messages': [{'role': 'user', 'content': 'Why is the sky blue?'}], 'tools': [], 'stream': True, - 'format': '', - 'options': {}, - 'keep_alive': None, }, ).respond_with_handler(stream_handler) @@ -106,9 +101,6 @@ def test_client_chat_images(httpserver: HTTPServer): ], 'tools': [], 'stream': False, - 'format': '', - 'options': {}, - 'keep_alive': None, }, ).respond_with_json( { @@ -137,16 +129,7 @@ def test_client_generate(httpserver: HTTPServer): json={ 'model': 'dummy', 'prompt': 'Why is the sky blue?', - 'suffix': '', - 'system': '', - 'template': '', - 'context': [], 'stream': False, - 'raw': False, - 'images': [], - 'format': '', - 'options': {}, - 'keep_alive': None, }, ).respond_with_json( { @@ -183,16 +166,7 @@ def generate(): json={ 'model': 'dummy', 'prompt': 'Why is the sky blue?', - 'suffix': '', - 'system': '', - 'template': '', - 'context': [], 'stream': True, - 'raw': False, - 'images': [], - 'format': '', - 'options': {}, - 'keep_alive': None, }, ).respond_with_handler(stream_handler) @@ -212,16 +186,8 @@ def test_client_generate_images(httpserver: HTTPServer): json={ 'model': 'dummy', 'prompt': 'Why is the sky blue?', - 'suffix': '', - 'system': '', - 'template': '', - 'context': [], 'stream': False, - 'raw': False, 'images': ['iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAADElEQVR4nGNgYGAAAAAEAAH2FzhVAAAAAElFTkSuQmCC'], - 'format': '', - 'options': {}, - 'keep_alive': None, }, ).respond_with_json( { @@ -244,15 +210,11 @@ def test_client_pull(httpserver: HTTPServer): '/api/pull', method='POST', json={ - 'name': 'dummy', + 'model': 'dummy', 'insecure': False, 'stream': False, }, - ).respond_with_json( - { - 'status': 'success', - } - ) + ).respond_with_json({'status': 'success'}) client = Client(httpserver.url_for('/')) response = client.pull('dummy') @@ -274,7 +236,7 @@ def generate(): '/api/pull', method='POST', json={ - 'name': 'dummy', + 'model': 'dummy', 'insecure': False, 'stream': True, }, @@ -293,15 +255,15 @@ def test_client_push(httpserver: HTTPServer): '/api/push', method='POST', json={ - 'name': 'dummy', + 'model': 'dummy', 'insecure': False, 'stream': False, }, - ).respond_with_json({}) + ).respond_with_json({'status': 'success'}) client = Client(httpserver.url_for('/')) response = client.push('dummy') - assert isinstance(response, dict) + assert response['status'] == 'success' def test_client_push_stream(httpserver: HTTPServer): @@ -317,7 +279,7 @@ def generate(): '/api/push', method='POST', json={ - 'name': 'dummy', + 'model': 'dummy', 'insecure': False, 'stream': True, }, @@ -332,17 +294,16 @@ def generate(): def test_client_create_path(httpserver: HTTPServer): - httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='HEAD').respond_with_response(Response(status=200)) + httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='POST').respond_with_response(Response(status=200)) httpserver.expect_ordered_request( '/api/create', method='POST', json={ - 'name': 'dummy', + 'model': 'dummy', 'modelfile': 'FROM @sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n', 'stream': False, - 'quantize': None, }, - ).respond_with_json({}) + ).respond_with_json({'status': 'success'}) client = Client(httpserver.url_for('/')) @@ -352,21 +313,20 @@ def test_client_create_path(httpserver: HTTPServer): modelfile.flush() response = client.create('dummy', path=modelfile.name) - assert isinstance(response, dict) + assert response['status'] == 'success' def test_client_create_path_relative(httpserver: HTTPServer): - httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='HEAD').respond_with_response(Response(status=200)) + httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='POST').respond_with_response(Response(status=200)) httpserver.expect_ordered_request( '/api/create', method='POST', json={ - 'name': 'dummy', + 'model': 'dummy', 'modelfile': 'FROM @sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n', 'stream': False, - 'quantize': None, }, - ).respond_with_json({}) + ).respond_with_json({'status': 'success'}) client = Client(httpserver.url_for('/')) @@ -376,7 +336,7 @@ def test_client_create_path_relative(httpserver: HTTPServer): modelfile.flush() response = client.create('dummy', path=modelfile.name) - assert isinstance(response, dict) + assert response['status'] == 'success' @pytest.fixture @@ -389,17 +349,16 @@ def userhomedir(): def test_client_create_path_user_home(httpserver: HTTPServer, userhomedir): - httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='HEAD').respond_with_response(Response(status=200)) + httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='POST').respond_with_response(Response(status=200)) httpserver.expect_ordered_request( '/api/create', method='POST', json={ - 'name': 'dummy', + 'model': 'dummy', 'modelfile': 'FROM @sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n', 'stream': False, - 'quantize': None, }, - ).respond_with_json({}) + ).respond_with_json({'status': 'success'}) client = Client(httpserver.url_for('/')) @@ -409,36 +368,35 @@ def test_client_create_path_user_home(httpserver: HTTPServer, userhomedir): modelfile.flush() response = client.create('dummy', path=modelfile.name) - assert isinstance(response, dict) + assert response['status'] == 'success' def test_client_create_modelfile(httpserver: HTTPServer): - httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='HEAD').respond_with_response(Response(status=200)) + httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='POST').respond_with_response(Response(status=200)) httpserver.expect_ordered_request( '/api/create', method='POST', json={ - 'name': 'dummy', + 'model': 'dummy', 'modelfile': 'FROM @sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n', 'stream': False, - 'quantize': None, }, - ).respond_with_json({}) + ).respond_with_json({'status': 'success'}) client = Client(httpserver.url_for('/')) with tempfile.NamedTemporaryFile() as blob: response = client.create('dummy', modelfile=f'FROM {blob.name}') - assert isinstance(response, dict) + assert response['status'] == 'success' def test_client_create_modelfile_roundtrip(httpserver: HTTPServer): - httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='HEAD').respond_with_response(Response(status=200)) + httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='POST').respond_with_response(Response(status=200)) httpserver.expect_ordered_request( '/api/create', method='POST', json={ - 'name': 'dummy', + 'model': 'dummy', 'modelfile': '''FROM @sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 TEMPLATE """[INST] <>{{.System}}<> {{.Prompt}} [/INST]""" @@ -452,9 +410,8 @@ def test_client_create_modelfile_roundtrip(httpserver: HTTPServer): PARAMETER stop <> PARAMETER stop <>''', 'stream': False, - 'quantize': None, }, - ).respond_with_json({}) + ).respond_with_json({'status': 'success'}) client = Client(httpserver.url_for('/')) @@ -478,7 +435,7 @@ def test_client_create_modelfile_roundtrip(httpserver: HTTPServer): ] ), ) - assert isinstance(response, dict) + assert response['status'] == 'success' def test_client_create_from_library(httpserver: HTTPServer): @@ -486,21 +443,19 @@ def test_client_create_from_library(httpserver: HTTPServer): '/api/create', method='POST', json={ - 'name': 'dummy', + 'model': 'dummy', 'modelfile': 'FROM llama2', 'stream': False, - 'quantize': None, }, - ).respond_with_json({}) + ).respond_with_json({'status': 'success'}) client = Client(httpserver.url_for('/')) response = client.create('dummy', modelfile='FROM llama2') - assert isinstance(response, dict) + assert response['status'] == 'success' def test_client_create_blob(httpserver: HTTPServer): - httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='HEAD').respond_with_response(Response(status=404)) httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='POST').respond_with_response(Response(status=201)) client = Client(httpserver.url_for('/')) @@ -511,7 +466,7 @@ def test_client_create_blob(httpserver: HTTPServer): def test_client_create_blob_exists(httpserver: HTTPServer): - httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='HEAD').respond_with_response(Response(status=200)) + httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='POST').respond_with_response(Response(status=200)) client = Client(httpserver.url_for('/')) @@ -520,6 +475,20 @@ def test_client_create_blob_exists(httpserver: HTTPServer): assert response == 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' +def test_client_delete(httpserver: HTTPServer): + httpserver.expect_ordered_request(PrefixPattern('/api/delete'), method='DELETE').respond_with_response(Response(status=200)) + client = Client(httpserver.url_for('/api/delete')) + response = client.delete('dummy') + assert response['status'] == 'success' + + +def test_client_copy(httpserver: HTTPServer): + httpserver.expect_ordered_request(PrefixPattern('/api/copy'), method='POST').respond_with_response(Response(status=200)) + client = Client(httpserver.url_for('/api/copy')) + response = client.copy('dum', 'dummer') + assert response['status'] == 'success' + + @pytest.mark.asyncio async def test_async_client_chat(httpserver: HTTPServer): httpserver.expect_ordered_request( @@ -530,15 +499,22 @@ async def test_async_client_chat(httpserver: HTTPServer): 'messages': [{'role': 'user', 'content': 'Why is the sky blue?'}], 'tools': [], 'stream': False, - 'format': '', - 'options': {}, - 'keep_alive': None, }, - ).respond_with_json({}) + ).respond_with_json( + { + 'model': 'dummy', + 'message': { + 'role': 'assistant', + 'content': "I don't know.", + }, + } + ) client = AsyncClient(httpserver.url_for('/')) response = await client.chat('dummy', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}]) - assert isinstance(response, dict) + assert response['model'] == 'dummy' + assert response['message']['role'] == 'assistant' + assert response['message']['content'] == "I don't know." @pytest.mark.asyncio @@ -569,9 +545,6 @@ def generate(): 'messages': [{'role': 'user', 'content': 'Why is the sky blue?'}], 'tools': [], 'stream': True, - 'format': '', - 'options': {}, - 'keep_alive': None, }, ).respond_with_handler(stream_handler) @@ -600,18 +573,25 @@ async def test_async_client_chat_images(httpserver: HTTPServer): ], 'tools': [], 'stream': False, - 'format': '', - 'options': {}, - 'keep_alive': None, }, - ).respond_with_json({}) + ).respond_with_json( + { + 'model': 'dummy', + 'message': { + 'role': 'assistant', + 'content': "I don't know.", + }, + } + ) client = AsyncClient(httpserver.url_for('/')) with io.BytesIO() as b: Image.new('RGB', (1, 1)).save(b, 'PNG') response = await client.chat('dummy', messages=[{'role': 'user', 'content': 'Why is the sky blue?', 'images': [b.getvalue()]}]) - assert isinstance(response, dict) + assert response['model'] == 'dummy' + assert response['message']['role'] == 'assistant' + assert response['message']['content'] == "I don't know." @pytest.mark.asyncio @@ -622,22 +602,19 @@ async def test_async_client_generate(httpserver: HTTPServer): json={ 'model': 'dummy', 'prompt': 'Why is the sky blue?', - 'suffix': '', - 'system': '', - 'template': '', - 'context': [], 'stream': False, - 'raw': False, - 'images': [], - 'format': '', - 'options': {}, - 'keep_alive': None, }, - ).respond_with_json({}) + ).respond_with_json( + { + 'model': 'dummy', + 'response': 'Because it is.', + } + ) client = AsyncClient(httpserver.url_for('/')) response = await client.generate('dummy', 'Why is the sky blue?') - assert isinstance(response, dict) + assert response['model'] == 'dummy' + assert response['response'] == 'Because it is.' @pytest.mark.asyncio @@ -663,16 +640,7 @@ def generate(): json={ 'model': 'dummy', 'prompt': 'Why is the sky blue?', - 'suffix': '', - 'system': '', - 'template': '', - 'context': [], 'stream': True, - 'raw': False, - 'images': [], - 'format': '', - 'options': {}, - 'keep_alive': None, }, ).respond_with_handler(stream_handler) @@ -693,25 +661,23 @@ async def test_async_client_generate_images(httpserver: HTTPServer): json={ 'model': 'dummy', 'prompt': 'Why is the sky blue?', - 'suffix': '', - 'system': '', - 'template': '', - 'context': [], 'stream': False, - 'raw': False, 'images': ['iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAADElEQVR4nGNgYGAAAAAEAAH2FzhVAAAAAElFTkSuQmCC'], - 'format': '', - 'options': {}, - 'keep_alive': None, }, - ).respond_with_json({}) + ).respond_with_json( + { + 'model': 'dummy', + 'response': 'Because it is.', + } + ) client = AsyncClient(httpserver.url_for('/')) with tempfile.NamedTemporaryFile() as temp: Image.new('RGB', (1, 1)).save(temp, 'PNG') response = await client.generate('dummy', 'Why is the sky blue?', images=[temp.name]) - assert isinstance(response, dict) + assert response['model'] == 'dummy' + assert response['response'] == 'Because it is.' @pytest.mark.asyncio @@ -720,15 +686,15 @@ async def test_async_client_pull(httpserver: HTTPServer): '/api/pull', method='POST', json={ - 'name': 'dummy', + 'model': 'dummy', 'insecure': False, 'stream': False, }, - ).respond_with_json({}) + ).respond_with_json({'status': 'success'}) client = AsyncClient(httpserver.url_for('/')) response = await client.pull('dummy') - assert isinstance(response, dict) + assert response['status'] == 'success' @pytest.mark.asyncio @@ -747,7 +713,7 @@ def generate(): '/api/pull', method='POST', json={ - 'name': 'dummy', + 'model': 'dummy', 'insecure': False, 'stream': True, }, @@ -767,15 +733,15 @@ async def test_async_client_push(httpserver: HTTPServer): '/api/push', method='POST', json={ - 'name': 'dummy', + 'model': 'dummy', 'insecure': False, 'stream': False, }, - ).respond_with_json({}) + ).respond_with_json({'status': 'success'}) client = AsyncClient(httpserver.url_for('/')) response = await client.push('dummy') - assert isinstance(response, dict) + assert response['status'] == 'success' @pytest.mark.asyncio @@ -792,7 +758,7 @@ def generate(): '/api/push', method='POST', json={ - 'name': 'dummy', + 'model': 'dummy', 'insecure': False, 'stream': True, }, @@ -808,17 +774,16 @@ def generate(): @pytest.mark.asyncio async def test_async_client_create_path(httpserver: HTTPServer): - httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='HEAD').respond_with_response(Response(status=200)) + httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='POST').respond_with_response(Response(status=200)) httpserver.expect_ordered_request( '/api/create', method='POST', json={ - 'name': 'dummy', + 'model': 'dummy', 'modelfile': 'FROM @sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n', 'stream': False, - 'quantize': None, }, - ).respond_with_json({}) + ).respond_with_json({'status': 'success'}) client = AsyncClient(httpserver.url_for('/')) @@ -828,22 +793,21 @@ async def test_async_client_create_path(httpserver: HTTPServer): modelfile.flush() response = await client.create('dummy', path=modelfile.name) - assert isinstance(response, dict) + assert response['status'] == 'success' @pytest.mark.asyncio async def test_async_client_create_path_relative(httpserver: HTTPServer): - httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='HEAD').respond_with_response(Response(status=200)) + httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='POST').respond_with_response(Response(status=200)) httpserver.expect_ordered_request( '/api/create', method='POST', json={ - 'name': 'dummy', + 'model': 'dummy', 'modelfile': 'FROM @sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n', 'stream': False, - 'quantize': None, }, - ).respond_with_json({}) + ).respond_with_json({'status': 'success'}) client = AsyncClient(httpserver.url_for('/')) @@ -853,22 +817,21 @@ async def test_async_client_create_path_relative(httpserver: HTTPServer): modelfile.flush() response = await client.create('dummy', path=modelfile.name) - assert isinstance(response, dict) + assert response['status'] == 'success' @pytest.mark.asyncio async def test_async_client_create_path_user_home(httpserver: HTTPServer, userhomedir): - httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='HEAD').respond_with_response(Response(status=200)) + httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='POST').respond_with_response(Response(status=200)) httpserver.expect_ordered_request( '/api/create', method='POST', json={ - 'name': 'dummy', + 'model': 'dummy', 'modelfile': 'FROM @sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n', 'stream': False, - 'quantize': None, }, - ).respond_with_json({}) + ).respond_with_json({'status': 'success'}) client = AsyncClient(httpserver.url_for('/')) @@ -878,38 +841,37 @@ async def test_async_client_create_path_user_home(httpserver: HTTPServer, userho modelfile.flush() response = await client.create('dummy', path=modelfile.name) - assert isinstance(response, dict) + assert response['status'] == 'success' @pytest.mark.asyncio async def test_async_client_create_modelfile(httpserver: HTTPServer): - httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='HEAD').respond_with_response(Response(status=200)) + httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='POST').respond_with_response(Response(status=200)) httpserver.expect_ordered_request( '/api/create', method='POST', json={ - 'name': 'dummy', + 'model': 'dummy', 'modelfile': 'FROM @sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n', 'stream': False, - 'quantize': None, }, - ).respond_with_json({}) + ).respond_with_json({'status': 'success'}) client = AsyncClient(httpserver.url_for('/')) with tempfile.NamedTemporaryFile() as blob: response = await client.create('dummy', modelfile=f'FROM {blob.name}') - assert isinstance(response, dict) + assert response['status'] == 'success' @pytest.mark.asyncio async def test_async_client_create_modelfile_roundtrip(httpserver: HTTPServer): - httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='HEAD').respond_with_response(Response(status=200)) + httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='POST').respond_with_response(Response(status=200)) httpserver.expect_ordered_request( '/api/create', method='POST', json={ - 'name': 'dummy', + 'model': 'dummy', 'modelfile': '''FROM @sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 TEMPLATE """[INST] <>{{.System}}<> {{.Prompt}} [/INST]""" @@ -923,9 +885,8 @@ async def test_async_client_create_modelfile_roundtrip(httpserver: HTTPServer): PARAMETER stop <> PARAMETER stop <>''', 'stream': False, - 'quantize': None, }, - ).respond_with_json({}) + ).respond_with_json({'status': 'success'}) client = AsyncClient(httpserver.url_for('/')) @@ -949,7 +910,7 @@ async def test_async_client_create_modelfile_roundtrip(httpserver: HTTPServer): ] ), ) - assert isinstance(response, dict) + assert response['status'] == 'success' @pytest.mark.asyncio @@ -958,22 +919,20 @@ async def test_async_client_create_from_library(httpserver: HTTPServer): '/api/create', method='POST', json={ - 'name': 'dummy', + 'model': 'dummy', 'modelfile': 'FROM llama2', 'stream': False, - 'quantize': None, }, - ).respond_with_json({}) + ).respond_with_json({'status': 'success'}) client = AsyncClient(httpserver.url_for('/')) response = await client.create('dummy', modelfile='FROM llama2') - assert isinstance(response, dict) + assert response['status'] == 'success' @pytest.mark.asyncio async def test_async_client_create_blob(httpserver: HTTPServer): - httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='HEAD').respond_with_response(Response(status=404)) httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='POST').respond_with_response(Response(status=201)) client = AsyncClient(httpserver.url_for('/')) @@ -985,10 +944,95 @@ async def test_async_client_create_blob(httpserver: HTTPServer): @pytest.mark.asyncio async def test_async_client_create_blob_exists(httpserver: HTTPServer): - httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='HEAD').respond_with_response(Response(status=200)) + httpserver.expect_ordered_request(PrefixPattern('/api/blobs/'), method='POST').respond_with_response(Response(status=200)) client = AsyncClient(httpserver.url_for('/')) with tempfile.NamedTemporaryFile() as blob: response = await client._create_blob(blob.name) assert response == 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + + +@pytest.mark.asyncio +async def test_async_client_delete(httpserver: HTTPServer): + httpserver.expect_ordered_request(PrefixPattern('/api/delete'), method='DELETE').respond_with_response(Response(status=200)) + client = AsyncClient(httpserver.url_for('/api/delete')) + response = await client.delete('dummy') + assert response['status'] == 'success' + + +@pytest.mark.asyncio +async def test_async_client_copy(httpserver: HTTPServer): + httpserver.expect_ordered_request(PrefixPattern('/api/copy'), method='POST').respond_with_response(Response(status=200)) + client = AsyncClient(httpserver.url_for('/api/copy')) + response = await client.copy('dum', 'dummer') + assert response['status'] == 'success' + + +def test_headers(): + client = Client() + assert client._client.headers['content-type'] == 'application/json' + assert client._client.headers['accept'] == 'application/json' + assert client._client.headers['user-agent'].startswith('ollama-python/') + + client = Client( + headers={ + 'X-Custom': 'value', + 'Content-Type': 'text/plain', + } + ) + assert client._client.headers['x-custom'] == 'value' + assert client._client.headers['content-type'] == 'application/json' + + +def test_copy_tools(): + def func1(x: int) -> str: + """Simple function 1. + Args: + x (integer): A number + """ + pass + + def func2(y: str) -> int: + """Simple function 2. + Args: + y (string): A string + """ + pass + + # Test with list of functions + tools = list(_copy_tools([func1, func2])) + assert len(tools) == 2 + assert tools[0].function.name == 'func1' + assert tools[1].function.name == 'func2' + + # Test with empty input + assert list(_copy_tools()) == [] + assert list(_copy_tools(None)) == [] + assert list(_copy_tools([])) == [] + + # Test with mix of functions and tool dicts + tool_dict = { + 'type': 'function', + 'function': { + 'name': 'test', + 'description': 'Test function', + 'parameters': { + 'type': 'object', + 'properties': {'x': {'type': 'string', 'description': 'A string'}}, + 'required': ['x'], + }, + }, + } + + tools = list(_copy_tools([func1, tool_dict])) + assert len(tools) == 2 + assert tools[0].function.name == 'func1' + assert tools[1].function.name == 'test' + + +def test_tool_validation(): + # Raises ValidationError when used as it is a generator + with pytest.raises(ValidationError): + invalid_tool = {'type': 'invalid_type', 'function': {'name': 'test'}} + list(_copy_tools([invalid_tool])) diff --git a/tests/test_type_serialization.py b/tests/test_type_serialization.py new file mode 100644 index 00000000..e3e8268c --- /dev/null +++ b/tests/test_type_serialization.py @@ -0,0 +1,48 @@ +from base64 import b64encode +from pathlib import Path + +import pytest +from ollama._types import Image +import tempfile + + +def test_image_serialization_bytes(): + image_bytes = b'test image bytes' + encoded_string = b64encode(image_bytes).decode() + img = Image(value=image_bytes) + assert img.model_dump() == encoded_string + + +def test_image_serialization_base64_string(): + b64_str = 'dGVzdCBiYXNlNjQgc3RyaW5n' + img = Image(value=b64_str) + assert img.model_dump() == b64_str # Should return as-is if valid base64 + + +def test_image_serialization_plain_string(): + img = Image(value='not a path or base64') + assert img.model_dump() == 'not a path or base64' # Should return as-is + + +def test_image_serialization_path(): + with tempfile.NamedTemporaryFile() as temp_file: + temp_file.write(b'test file content') + temp_file.flush() + img = Image(value=Path(temp_file.name)) + assert img.model_dump() == b64encode(b'test file content').decode() + + +def test_image_serialization_string_path(): + with tempfile.NamedTemporaryFile() as temp_file: + temp_file.write(b'test file content') + temp_file.flush() + img = Image(value=temp_file.name) + assert img.model_dump() == b64encode(b'test file content').decode() + + with pytest.raises(ValueError): + img = Image(value='some_path/that/does/not/exist.png') + img.model_dump() + + with pytest.raises(ValueError): + img = Image(value='not an image') + img.model_dump() diff --git a/tests/test_utils.py b/tests/test_utils.py new file mode 100644 index 00000000..9fb1e3b2 --- /dev/null +++ b/tests/test_utils.py @@ -0,0 +1,270 @@ +import json +import sys +from typing import Dict, List, Mapping, Sequence, Set, Tuple, Union + + +from ollama._utils import convert_function_to_tool + + +def test_function_to_tool_conversion(): + def add_numbers(x: int, y: Union[int, None] = None) -> int: + """Add two numbers together. + args: + x (integer): The first number + y (integer, optional): The second number + + Returns: + integer: The sum of x and y + """ + return x + y + + tool = convert_function_to_tool(add_numbers).model_dump() + + assert tool['type'] == 'function' + assert tool['function']['name'] == 'add_numbers' + assert tool['function']['description'] == 'Add two numbers together.' + assert tool['function']['parameters']['type'] == 'object' + assert tool['function']['parameters']['properties']['x']['type'] == 'integer' + assert tool['function']['parameters']['properties']['x']['description'] == 'The first number' + assert tool['function']['parameters']['required'] == ['x'] + + +def test_function_with_no_args(): + def simple_func(): + """ + A simple function with no arguments. + Args: + None + Returns: + None + """ + pass + + tool = convert_function_to_tool(simple_func).model_dump() + assert tool['function']['name'] == 'simple_func' + assert tool['function']['description'] == 'A simple function with no arguments.' + assert tool['function']['parameters']['properties'] == {} + + +def test_function_with_all_types(): + if sys.version_info >= (3, 10): + + def all_types( + x: int, + y: str, + z: list[int], + w: dict[str, int], + v: int | str | None, + ) -> int | dict[str, int] | str | list[int] | None: + """ + A function with all types. + Args: + x (integer): The first number + y (string): The second number + z (array): The third number + w (object): The fourth number + v (integer | string | None): The fifth number + """ + pass + else: + + def all_types( + x: int, + y: str, + z: Sequence, + w: Mapping[str, int], + d: Dict[str, int], + s: Set[int], + t: Tuple[int, str], + l: List[int], # noqa: E741 + o: Union[int, None], + ) -> Union[Mapping[str, int], str, None]: + """ + A function with all types. + Args: + x (integer): The first number + y (string): The second number + z (array): The third number + w (object): The fourth number + d (object): The fifth number + s (array): The sixth number + t (array): The seventh number + l (array): The eighth number + o (integer | None): The ninth number + """ + pass + + tool_json = convert_function_to_tool(all_types).model_dump_json() + tool = json.loads(tool_json) + assert tool['function']['parameters']['properties']['x']['type'] == 'integer' + assert tool['function']['parameters']['properties']['y']['type'] == 'string' + + if sys.version_info >= (3, 10): + assert tool['function']['parameters']['properties']['z']['type'] == 'array' + assert tool['function']['parameters']['properties']['w']['type'] == 'object' + assert set(x.strip().strip("'") for x in tool['function']['parameters']['properties']['v']['type'].removeprefix('[').removesuffix(']').split(',')) == {'string', 'integer'} + assert tool['function']['parameters']['properties']['v']['type'] != 'null' + assert tool['function']['parameters']['required'] == ['x', 'y', 'z', 'w'] + else: + assert tool['function']['parameters']['properties']['z']['type'] == 'array' + assert tool['function']['parameters']['properties']['w']['type'] == 'object' + assert tool['function']['parameters']['properties']['d']['type'] == 'object' + assert tool['function']['parameters']['properties']['s']['type'] == 'array' + assert tool['function']['parameters']['properties']['t']['type'] == 'array' + assert tool['function']['parameters']['properties']['l']['type'] == 'array' + assert tool['function']['parameters']['properties']['o']['type'] == 'integer' + assert tool['function']['parameters']['properties']['o']['type'] != 'null' + assert tool['function']['parameters']['required'] == ['x', 'y', 'z', 'w', 'd', 's', 't', 'l'] + + +def test_function_docstring_parsing(): + from typing import List, Dict, Any + + def func_with_complex_docs(x: int, y: List[str]) -> Dict[str, Any]: + """ + Test function with complex docstring. + + Args: + x (integer): A number + with multiple lines + y (array of string): A list + with multiple lines + + Returns: + object: A dictionary + with multiple lines + """ + pass + + tool = convert_function_to_tool(func_with_complex_docs).model_dump() + assert tool['function']['description'] == 'Test function with complex docstring.' + assert tool['function']['parameters']['properties']['x']['description'] == 'A number with multiple lines' + assert tool['function']['parameters']['properties']['y']['description'] == 'A list with multiple lines' + + +def test_skewed_docstring_parsing(): + def add_two_numbers(x: int, y: int) -> int: + """ + Add two numbers together. + Args: + x (integer): : The first number + + + + + y (integer ): The second number + Returns: + integer: The sum of x and y + """ + pass + + tool = convert_function_to_tool(add_two_numbers).model_dump() + assert tool['function']['parameters']['properties']['x']['description'] == ': The first number' + assert tool['function']['parameters']['properties']['y']['description'] == 'The second number' + + +def test_function_with_no_docstring(): + def no_docstring(): + pass + + def no_docstring_with_args(x: int, y: int): + pass + + tool = convert_function_to_tool(no_docstring).model_dump() + assert tool['function']['description'] == '' + + tool = convert_function_to_tool(no_docstring_with_args).model_dump() + assert tool['function']['description'] == '' + assert tool['function']['parameters']['properties']['x']['description'] == '' + assert tool['function']['parameters']['properties']['y']['description'] == '' + + +def test_function_with_only_description(): + def only_description(): + """ + A function with only a description. + """ + pass + + tool = convert_function_to_tool(only_description).model_dump() + assert tool['function']['description'] == 'A function with only a description.' + assert tool['function']['parameters'] == {'type': 'object', 'properties': {}, 'required': None} + + def only_description_with_args(x: int, y: int): + """ + A function with only a description. + """ + pass + + tool = convert_function_to_tool(only_description_with_args).model_dump() + assert tool['function']['description'] == 'A function with only a description.' + assert tool['function']['parameters'] == { + 'type': 'object', + 'properties': { + 'x': {'type': 'integer', 'description': ''}, + 'y': {'type': 'integer', 'description': ''}, + }, + 'required': ['x', 'y'], + } + + +def test_function_with_yields(): + def function_with_yields(x: int, y: int): + """ + A function with yields section. + + Args: + x: the first number + y: the second number + + Yields: + The sum of x and y + """ + pass + + tool = convert_function_to_tool(function_with_yields).model_dump() + assert tool['function']['description'] == 'A function with yields section.' + assert tool['function']['parameters']['properties']['x']['description'] == 'the first number' + assert tool['function']['parameters']['properties']['y']['description'] == 'the second number' + + +def test_function_with_no_types(): + def no_types(a, b): + """ + A function with no types. + """ + pass + + tool = convert_function_to_tool(no_types).model_dump() + assert tool['function']['parameters']['properties']['a']['type'] == 'string' + assert tool['function']['parameters']['properties']['b']['type'] == 'string' + + +def test_function_with_parentheses(): + def func_with_parentheses(a: int, b: int) -> int: + """ + A function with parentheses. + Args: + a: First (:thing) number to add + b: Second number to add + Returns: + int: The sum of a and b + """ + pass + + def func_with_parentheses_and_args(a: int, b: int): + """ + A function with parentheses and args. + Args: + a(integer) : First (:thing) number to add + b(integer) :Second number to add + """ + pass + + tool = convert_function_to_tool(func_with_parentheses).model_dump() + assert tool['function']['parameters']['properties']['a']['description'] == 'First (:thing) number to add' + assert tool['function']['parameters']['properties']['b']['description'] == 'Second number to add' + + tool = convert_function_to_tool(func_with_parentheses_and_args).model_dump() + assert tool['function']['parameters']['properties']['a']['description'] == 'First (:thing) number to add' + assert tool['function']['parameters']['properties']['b']['description'] == 'Second number to add'