Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Prompts support #876

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 16, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
79 changes: 79 additions & 0 deletions examples/basic/prompt_template.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
import argparse
import asyncio
import random

from agents import Agent, GenerateDynamicPromptData, Runner

"""
NOTE: This example will not work out of the box, because the default prompt ID will not be available
in your project.

To use it, please:
1. Go to https://platform.openai.com/playground/prompts
2. Create a new prompt variable, `poem_style`.
3. Create a system prompt with the content:
```
Write a poem in {{poem_style}}
```
4. Run the example with the `--prompt-id` flag.
"""

DEFAULT_PROMPT_ID = "pmpt_6850729e8ba481939fd439e058c69ee004afaa19c520b78b"


class DynamicContext:
def __init__(self, prompt_id: str):
self.prompt_id = prompt_id
self.poem_style = random.choice(["limerick", "haiku", "ballad"])
print(f"[debug] DynamicContext initialized with poem_style: {self.poem_style}")


async def _get_dynamic_prompt(data: GenerateDynamicPromptData):
ctx: DynamicContext = data.context.context
return {
"id": ctx.prompt_id,
"version": "1",
"variables": {
"poem_style": ctx.poem_style,
},
}


async def dynamic_prompt(prompt_id: str):
context = DynamicContext(prompt_id)

agent = Agent(
name="Assistant",
prompt=_get_dynamic_prompt,
)

result = await Runner.run(agent, "Tell me about recursion in programming.", context=context)
print(result.final_output)


async def static_prompt(prompt_id: str):
agent = Agent(
name="Assistant",
prompt={
"id": prompt_id,
"version": "1",
"variables": {
"poem_style": "limerick",
},
},
)

result = await Runner.run(agent, "Tell me about recursion in programming.")
print(result.final_output)


if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dynamic", action="store_true")
parser.add_argument("--prompt-id", type=str, default=DEFAULT_PROMPT_ID)
args = parser.parse_args()

if args.dynamic:
asyncio.run(dynamic_prompt(args.prompt_id))
else:
asyncio.run(static_prompt(args.prompt_id))
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ requires-python = ">=3.9"
license = "MIT"
authors = [{ name = "OpenAI", email = "[email protected]" }]
dependencies = [
"openai>=1.81.0",
"openai>=1.87.0",
"pydantic>=2.10, <3",
"griffe>=1.5.6, <2",
"typing-extensions>=4.12.2, <5",
Expand Down
4 changes: 4 additions & 0 deletions src/agents/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
from .models.openai_chatcompletions import OpenAIChatCompletionsModel
from .models.openai_provider import OpenAIProvider
from .models.openai_responses import OpenAIResponsesModel
from .prompts import DynamicPromptFunction, GenerateDynamicPromptData, Prompt
from .repl import run_demo_loop
from .result import RunResult, RunResultStreaming
from .run import RunConfig, Runner
Expand Down Expand Up @@ -178,6 +179,9 @@ def enable_verbose_stdout_logging():
"AgentsException",
"InputGuardrailTripwireTriggered",
"OutputGuardrailTripwireTriggered",
"DynamicPromptFunction",
"GenerateDynamicPromptData",
"Prompt",
"MaxTurnsExceeded",
"ModelBehaviorError",
"UserError",
Expand Down
14 changes: 14 additions & 0 deletions src/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, cast

from openai.types.responses.response_prompt_param import ResponsePromptParam
from typing_extensions import NotRequired, TypeAlias, TypedDict

from .agent_output import AgentOutputSchemaBase
Expand All @@ -17,6 +18,7 @@
from .mcp import MCPUtil
from .model_settings import ModelSettings
from .models.interface import Model
from .prompts import DynamicPromptFunction, Prompt, PromptUtil
from .run_context import RunContextWrapper, TContext
from .tool import FunctionTool, FunctionToolResult, Tool, function_tool
from .util import _transforms
Expand Down Expand Up @@ -95,6 +97,12 @@ class Agent(Generic[TContext]):
return a string.
"""

prompt: Prompt | DynamicPromptFunction | None = None
"""A prompt object (or a function that returns a Prompt). Prompts allow you to dynamically
configure the instructions, tools and other config for an agent outside of your code. Only
usable with OpenAI models, using the Responses API.
"""

handoff_description: str | None = None
"""A description of the agent. This is used when the agent is used as a handoff, so that an
LLM knows what it does and when to invoke it.
Expand Down Expand Up @@ -242,6 +250,12 @@ async def get_system_prompt(self, run_context: RunContextWrapper[TContext]) -> s

return None

async def get_prompt(
self, run_context: RunContextWrapper[TContext]
) -> ResponsePromptParam | None:
"""Get the prompt for the agent."""
return await PromptUtil.to_model_input(self.prompt, run_context, self)

async def get_mcp_tools(self) -> list[Tool]:
"""Fetches the available tools from the MCP servers."""
convert_schemas_to_strict = self.mcp_config.get("convert_schemas_to_strict", False)
Expand Down
8 changes: 7 additions & 1 deletion src/agents/extensions/models/litellm_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ async def get_response(
handoffs: list[Handoff],
tracing: ModelTracing,
previous_response_id: str | None,
prompt: Any | None = None,
) -> ModelResponse:
with generation_span(
model=str(self.model),
Expand All @@ -88,6 +89,7 @@ async def get_response(
span_generation,
tracing,
stream=False,
prompt=prompt,
)

assert isinstance(response.choices[0], litellm.types.utils.Choices)
Expand Down Expand Up @@ -153,8 +155,8 @@ async def stream_response(
output_schema: AgentOutputSchemaBase | None,
handoffs: list[Handoff],
tracing: ModelTracing,
*,
previous_response_id: str | None,
prompt: Any | None = None,
) -> AsyncIterator[TResponseStreamEvent]:
with generation_span(
model=str(self.model),
Expand All @@ -172,6 +174,7 @@ async def stream_response(
span_generation,
tracing,
stream=True,
prompt=prompt,
)

final_response: Response | None = None
Expand Down Expand Up @@ -202,6 +205,7 @@ async def _fetch_response(
span: Span[GenerationSpanData],
tracing: ModelTracing,
stream: Literal[True],
prompt: Any | None = None,
) -> tuple[Response, AsyncStream[ChatCompletionChunk]]: ...

@overload
Expand All @@ -216,6 +220,7 @@ async def _fetch_response(
span: Span[GenerationSpanData],
tracing: ModelTracing,
stream: Literal[False],
prompt: Any | None = None,
) -> litellm.types.utils.ModelResponse: ...

async def _fetch_response(
Expand All @@ -229,6 +234,7 @@ async def _fetch_response(
span: Span[GenerationSpanData],
tracing: ModelTracing,
stream: bool = False,
prompt: Any | None = None,
) -> litellm.types.utils.ModelResponse | tuple[Response, AsyncStream[ChatCompletionChunk]]:
converted_messages = Converter.items_to_messages(input)

Expand Down
6 changes: 6 additions & 0 deletions src/agents/models/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
from collections.abc import AsyncIterator
from typing import TYPE_CHECKING

from openai.types.responses.response_prompt_param import ResponsePromptParam

from ..agent_output import AgentOutputSchemaBase
from ..handoffs import Handoff
from ..items import ModelResponse, TResponseInputItem, TResponseStreamEvent
Expand Down Expand Up @@ -46,6 +48,7 @@ async def get_response(
tracing: ModelTracing,
*,
previous_response_id: str | None,
prompt: ResponsePromptParam | None,
) -> ModelResponse:
"""Get a response from the model.

Expand All @@ -59,6 +62,7 @@ async def get_response(
tracing: Tracing configuration.
previous_response_id: the ID of the previous response. Generally not used by the model,
except for the OpenAI Responses API.
prompt: The prompt config to use for the model.

Returns:
The full model response.
Expand All @@ -77,6 +81,7 @@ def stream_response(
tracing: ModelTracing,
*,
previous_response_id: str | None,
prompt: ResponsePromptParam | None,
) -> AsyncIterator[TResponseStreamEvent]:
"""Stream a response from the model.

Expand All @@ -90,6 +95,7 @@ def stream_response(
tracing: Tracing configuration.
previous_response_id: the ID of the previous response. Generally not used by the model,
except for the OpenAI Responses API.
prompt: The prompt config to use for the model.

Returns:
An iterator of response stream events, in OpenAI Responses format.
Expand Down
9 changes: 8 additions & 1 deletion src/agents/models/openai_chatcompletions.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from openai.types import ChatModel
from openai.types.chat import ChatCompletion, ChatCompletionChunk
from openai.types.responses import Response
from openai.types.responses.response_prompt_param import ResponsePromptParam
from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails

from .. import _debug
Expand Down Expand Up @@ -53,6 +54,7 @@ async def get_response(
handoffs: list[Handoff],
tracing: ModelTracing,
previous_response_id: str | None,
prompt: ResponsePromptParam | None = None,
) -> ModelResponse:
with generation_span(
model=str(self.model),
Expand All @@ -69,6 +71,7 @@ async def get_response(
span_generation,
tracing,
stream=False,
prompt=prompt,
)

first_choice = response.choices[0]
Expand Down Expand Up @@ -136,8 +139,8 @@ async def stream_response(
output_schema: AgentOutputSchemaBase | None,
handoffs: list[Handoff],
tracing: ModelTracing,
*,
previous_response_id: str | None,
prompt: ResponsePromptParam | None = None,
) -> AsyncIterator[TResponseStreamEvent]:
"""
Yields a partial message as it is generated, as well as the usage information.
Expand All @@ -157,6 +160,7 @@ async def stream_response(
span_generation,
tracing,
stream=True,
prompt=prompt,
)

final_response: Response | None = None
Expand Down Expand Up @@ -187,6 +191,7 @@ async def _fetch_response(
span: Span[GenerationSpanData],
tracing: ModelTracing,
stream: Literal[True],
prompt: ResponsePromptParam | None = None,
) -> tuple[Response, AsyncStream[ChatCompletionChunk]]: ...

@overload
Expand All @@ -201,6 +206,7 @@ async def _fetch_response(
span: Span[GenerationSpanData],
tracing: ModelTracing,
stream: Literal[False],
prompt: ResponsePromptParam | None = None,
) -> ChatCompletion: ...

async def _fetch_response(
Expand All @@ -214,6 +220,7 @@ async def _fetch_response(
span: Span[GenerationSpanData],
tracing: ModelTracing,
stream: bool = False,
prompt: ResponsePromptParam | None = None,
) -> ChatCompletion | tuple[Response, AsyncStream[ChatCompletionChunk]]:
converted_messages = Converter.items_to_messages(input)

Expand Down
9 changes: 9 additions & 0 deletions src/agents/models/openai_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
WebSearchToolParam,
response_create_params,
)
from openai.types.responses.response_prompt_param import ResponsePromptParam

from .. import _debug
from ..agent_output import AgentOutputSchemaBase
Expand Down Expand Up @@ -74,6 +75,7 @@ async def get_response(
handoffs: list[Handoff],
tracing: ModelTracing,
previous_response_id: str | None,
prompt: ResponsePromptParam | None = None,
) -> ModelResponse:
with response_span(disabled=tracing.is_disabled()) as span_response:
try:
Expand All @@ -86,6 +88,7 @@ async def get_response(
handoffs,
previous_response_id,
stream=False,
prompt=prompt,
)

if _debug.DONT_LOG_MODEL_DATA:
Expand Down Expand Up @@ -141,6 +144,7 @@ async def stream_response(
handoffs: list[Handoff],
tracing: ModelTracing,
previous_response_id: str | None,
prompt: ResponsePromptParam | None = None,
) -> AsyncIterator[ResponseStreamEvent]:
"""
Yields a partial message as it is generated, as well as the usage information.
Expand All @@ -156,6 +160,7 @@ async def stream_response(
handoffs,
previous_response_id,
stream=True,
prompt=prompt,
)

final_response: Response | None = None
Expand Down Expand Up @@ -192,6 +197,7 @@ async def _fetch_response(
handoffs: list[Handoff],
previous_response_id: str | None,
stream: Literal[True],
prompt: ResponsePromptParam | None = None,
) -> AsyncStream[ResponseStreamEvent]: ...

@overload
Expand All @@ -205,6 +211,7 @@ async def _fetch_response(
handoffs: list[Handoff],
previous_response_id: str | None,
stream: Literal[False],
prompt: ResponsePromptParam | None = None,
) -> Response: ...

async def _fetch_response(
Expand All @@ -217,6 +224,7 @@ async def _fetch_response(
handoffs: list[Handoff],
previous_response_id: str | None,
stream: Literal[True] | Literal[False] = False,
prompt: ResponsePromptParam | None = None,
) -> Response | AsyncStream[ResponseStreamEvent]:
list_input = ItemHelpers.input_to_new_input_list(input)

Expand Down Expand Up @@ -252,6 +260,7 @@ async def _fetch_response(
input=list_input,
include=converted_tools.includes,
tools=converted_tools.tools,
prompt=self._non_null_or_not_given(prompt),
temperature=self._non_null_or_not_given(model_settings.temperature),
top_p=self._non_null_or_not_given(model_settings.top_p),
truncation=self._non_null_or_not_given(model_settings.truncation),
Expand Down
Loading