7
7
8
8
from openai import NOT_GIVEN , AsyncOpenAI , AsyncStream
9
9
from openai .types import ChatModel
10
- from openai .types .chat import ChatCompletion , ChatCompletionChunk
10
+ from openai .types .chat import ChatCompletion , ChatCompletionChunk , ChatCompletionMessage
11
+ from openai .types .chat .chat_completion import Choice
11
12
from openai .types .responses import Response
12
13
from openai .types .responses .response_prompt_param import ResponsePromptParam
13
14
from openai .types .responses .response_usage import InputTokensDetails , OutputTokensDetails
@@ -74,8 +75,11 @@ async def get_response(
74
75
prompt = prompt ,
75
76
)
76
77
77
- first_choice = response .choices [0 ]
78
- message = first_choice .message
78
+ message : ChatCompletionMessage | None = None
79
+ first_choice : Choice | None = None
80
+ if response .choices and len (response .choices ) > 0 :
81
+ first_choice = response .choices [0 ]
82
+ message = first_choice .message
79
83
80
84
if _debug .DONT_LOG_MODEL_DATA :
81
85
logger .debug ("Received model response" )
@@ -86,10 +90,8 @@ async def get_response(
86
90
json .dumps (message .model_dump (), indent = 2 ),
87
91
)
88
92
else :
89
- logger .debug (
90
- "LLM resp had no message. finish_reason: %s" ,
91
- first_choice .finish_reason ,
92
- )
93
+ finish_reason = first_choice .finish_reason if first_choice else "-"
94
+ logger .debug (f"LLM resp had no message. finish_reason: { finish_reason } " )
93
95
94
96
usage = (
95
97
Usage (
0 commit comments