Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit fcafae1

Browse files
authored
Add reasoning content - fix on openai#494 (openai#871)
my personal update on openai#494 Key changes: - Updated `ResponseReasoningItem` to use the correct structure with `id`, `summary`, and `type="reasoning"` fields - Fixed `ResponseReasoningSummaryPartAddedEvent` to include `summary_index` parameter and use a dictionary for the `part` parameter - Fixed `ResponseReasoningSummaryTextDeltaEvent` to include `summary_index` parameter - Updated `ResponseReasoningSummaryPartDoneEvent` to include `summary_index` and use a dictionary for `part` - Changed how the `Summary` object is accessed in tests (using `.text` property instead of subscript notation) - Updated `chatcmpl_converter.py` to use the `Summary` class instead of a dictionary for better handlign Fixes openai#494
1 parent 27912d4 commit fcafae1

File tree

6 files changed

+643
-15
lines changed

6 files changed

+643
-15
lines changed
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
"""
2+
Examples demonstrating how to use models that provide reasoning content.
3+
"""

examples/reasoning_content/main.py

Lines changed: 124 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,124 @@
1+
"""
2+
Example demonstrating how to use the reasoning content feature with models that support it.
3+
4+
Some models, like deepseek-reasoner, provide a reasoning_content field in addition to the regular content.
5+
This example shows how to access and use this reasoning content from both streaming and non-streaming responses.
6+
7+
To run this example, you need to:
8+
1. Set your OPENAI_API_KEY environment variable
9+
2. Use a model that supports reasoning content (e.g., deepseek-reasoner)
10+
"""
11+
12+
import asyncio
13+
import os
14+
from typing import Any, cast
15+
16+
from agents import ModelSettings
17+
from agents.models.interface import ModelTracing
18+
from agents.models.openai_provider import OpenAIProvider
19+
from agents.types import ResponseOutputRefusal, ResponseOutputText # type: ignore
20+
21+
MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or "deepseek-reasoner"
22+
23+
24+
async def stream_with_reasoning_content():
25+
"""
26+
Example of streaming a response from a model that provides reasoning content.
27+
The reasoning content will be emitted as separate events.
28+
"""
29+
provider = OpenAIProvider()
30+
model = provider.get_model(MODEL_NAME)
31+
32+
print("\n=== Streaming Example ===")
33+
print("Prompt: Write a haiku about recursion in programming")
34+
35+
reasoning_content = ""
36+
regular_content = ""
37+
38+
async for event in model.stream_response(
39+
system_instructions="You are a helpful assistant that writes creative content.",
40+
input="Write a haiku about recursion in programming",
41+
model_settings=ModelSettings(),
42+
tools=[],
43+
output_schema=None,
44+
handoffs=[],
45+
tracing=ModelTracing.DISABLED,
46+
previous_response_id=None,
47+
prompt=None
48+
):
49+
if event.type == "response.reasoning_summary_text.delta":
50+
print(
51+
f"\033[33m{event.delta}\033[0m", end="", flush=True
52+
) # Yellow for reasoning content
53+
reasoning_content += event.delta
54+
elif event.type == "response.output_text.delta":
55+
print(f"\033[32m{event.delta}\033[0m", end="", flush=True) # Green for regular content
56+
regular_content += event.delta
57+
58+
print("\n\nReasoning Content:")
59+
print(reasoning_content)
60+
print("\nRegular Content:")
61+
print(regular_content)
62+
print("\n")
63+
64+
65+
async def get_response_with_reasoning_content():
66+
"""
67+
Example of getting a complete response from a model that provides reasoning content.
68+
The reasoning content will be available as a separate item in the response.
69+
"""
70+
provider = OpenAIProvider()
71+
model = provider.get_model(MODEL_NAME)
72+
73+
print("\n=== Non-streaming Example ===")
74+
print("Prompt: Explain the concept of recursion in programming")
75+
76+
response = await model.get_response(
77+
system_instructions="You are a helpful assistant that explains technical concepts clearly.",
78+
input="Explain the concept of recursion in programming",
79+
model_settings=ModelSettings(),
80+
tools=[],
81+
output_schema=None,
82+
handoffs=[],
83+
tracing=ModelTracing.DISABLED,
84+
previous_response_id=None,
85+
prompt=None
86+
)
87+
88+
# Extract reasoning content and regular content from the response
89+
reasoning_content = None
90+
regular_content = None
91+
92+
for item in response.output:
93+
if hasattr(item, "type") and item.type == "reasoning":
94+
reasoning_content = item.summary[0].text
95+
elif hasattr(item, "type") and item.type == "message":
96+
if item.content and len(item.content) > 0:
97+
content_item = item.content[0]
98+
if isinstance(content_item, ResponseOutputText):
99+
regular_content = content_item.text
100+
elif isinstance(content_item, ResponseOutputRefusal):
101+
refusal_item = cast(Any, content_item)
102+
regular_content = refusal_item.refusal
103+
104+
print("\nReasoning Content:")
105+
print(reasoning_content or "No reasoning content provided")
106+
107+
print("\nRegular Content:")
108+
print(regular_content or "No regular content provided")
109+
110+
print("\n")
111+
112+
113+
async def main():
114+
try:
115+
await stream_with_reasoning_content()
116+
await get_response_with_reasoning_content()
117+
except Exception as e:
118+
print(f"Error: {e}")
119+
print("\nNote: This example requires a model that supports reasoning content.")
120+
print("You may need to use a specific model like deepseek-reasoner or similar.")
121+
122+
123+
if __name__ == "__main__":
124+
asyncio.run(main())
Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
"""
2+
Example demonstrating how to use the reasoning content feature with the Runner API.
3+
4+
This example shows how to extract and use reasoning content from responses when using
5+
the Runner API, which is the most common way users interact with the Agents library.
6+
7+
To run this example, you need to:
8+
1. Set your OPENAI_API_KEY environment variable
9+
2. Use a model that supports reasoning content (e.g., deepseek-reasoner)
10+
"""
11+
12+
import asyncio
13+
import os
14+
from typing import Any
15+
16+
from agents import Agent, Runner, trace
17+
from agents.items import ReasoningItem
18+
19+
MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or "deepseek-reasoner"
20+
21+
22+
async def main():
23+
print(f"Using model: {MODEL_NAME}")
24+
25+
# Create an agent with a model that supports reasoning content
26+
agent = Agent(
27+
name="Reasoning Agent",
28+
instructions="You are a helpful assistant that explains your reasoning step by step.",
29+
model=MODEL_NAME,
30+
)
31+
32+
# Example 1: Non-streaming response
33+
with trace("Reasoning Content - Non-streaming"):
34+
print("\n=== Example 1: Non-streaming response ===")
35+
result = await Runner.run(
36+
agent, "What is the square root of 841? Please explain your reasoning."
37+
)
38+
39+
# Extract reasoning content from the result items
40+
reasoning_content = None
41+
# RunResult has 'response' attribute which has 'output' attribute
42+
for item in result.response.output: # type: ignore
43+
if isinstance(item, ReasoningItem):
44+
reasoning_content = item.summary[0].text # type: ignore
45+
break
46+
47+
print("\nReasoning Content:")
48+
print(reasoning_content or "No reasoning content provided")
49+
50+
print("\nFinal Output:")
51+
print(result.final_output)
52+
53+
# Example 2: Streaming response
54+
with trace("Reasoning Content - Streaming"):
55+
print("\n=== Example 2: Streaming response ===")
56+
print("\nStreaming response:")
57+
58+
# Buffers to collect reasoning and regular content
59+
reasoning_buffer = ""
60+
content_buffer = ""
61+
62+
# RunResultStreaming is async iterable
63+
stream = Runner.run_streamed(agent, "What is 15 x 27? Please explain your reasoning.")
64+
65+
async for event in stream: # type: ignore
66+
if isinstance(event, ReasoningItem):
67+
# This is reasoning content
68+
reasoning_item: Any = event
69+
reasoning_buffer += reasoning_item.summary[0].text
70+
print(
71+
f"\033[33m{reasoning_item.summary[0].text}\033[0m", end="", flush=True
72+
) # Yellow for reasoning
73+
elif hasattr(event, "text"):
74+
# This is regular content
75+
content_buffer += event.text
76+
print(
77+
f"\033[32m{event.text}\033[0m", end="", flush=True
78+
) # Green for regular content
79+
80+
print("\n\nCollected Reasoning Content:")
81+
print(reasoning_buffer)
82+
83+
print("\nCollected Final Answer:")
84+
print(content_buffer)
85+
86+
87+
if __name__ == "__main__":
88+
asyncio.run(main())

src/agents/models/chatcmpl_converter.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,10 @@
3333
ResponseOutputMessageParam,
3434
ResponseOutputRefusal,
3535
ResponseOutputText,
36+
ResponseReasoningItem,
3637
)
3738
from openai.types.responses.response_input_param import FunctionCallOutput, ItemReference, Message
39+
from openai.types.responses.response_reasoning_item import Summary
3840

3941
from ..agent_output import AgentOutputSchemaBase
4042
from ..exceptions import AgentsException, UserError
@@ -85,6 +87,16 @@ def convert_response_format(
8587
def message_to_output_items(cls, message: ChatCompletionMessage) -> list[TResponseOutputItem]:
8688
items: list[TResponseOutputItem] = []
8789

90+
# Handle reasoning content if available
91+
if hasattr(message, "reasoning_content") and message.reasoning_content:
92+
items.append(
93+
ResponseReasoningItem(
94+
id=FAKE_RESPONSES_ID,
95+
summary=[Summary(text=message.reasoning_content, type="summary_text")],
96+
type="reasoning",
97+
)
98+
)
99+
88100
message_item = ResponseOutputMessage(
89101
id=FAKE_RESPONSES_ID,
90102
content=[],

0 commit comments

Comments
 (0)