From 37b6c7ecdec4bb8449662afaa4621dbd6ccad4af Mon Sep 17 00:00:00 2001 From: Dominik Kundel Date: Fri, 6 Jun 2025 11:33:04 -0700 Subject: [PATCH] chore(openai): add more accurate debug logging --- .changeset/clever-eels-lay.md | 5 ++ .../src/openaiChatCompletionsModel.ts | 65 +++++++++---------- .../agents-openai/src/openaiResponsesModel.ts | 56 +++++++--------- 3 files changed, 58 insertions(+), 68 deletions(-) create mode 100644 .changeset/clever-eels-lay.md diff --git a/.changeset/clever-eels-lay.md b/.changeset/clever-eels-lay.md new file mode 100644 index 0000000..3cf122c --- /dev/null +++ b/.changeset/clever-eels-lay.md @@ -0,0 +1,5 @@ +--- +'@openai/agents-openai': patch +--- + +chore(openai): add more accurate debug logging diff --git a/packages/agents-openai/src/openaiChatCompletionsModel.ts b/packages/agents-openai/src/openaiChatCompletionsModel.ts index a9b7c77..74177de 100644 --- a/packages/agents-openai/src/openaiChatCompletionsModel.ts +++ b/packages/agents-openai/src/openaiChatCompletionsModel.ts @@ -245,20 +245,6 @@ export class OpenAIChatCompletionsModel implements Model { parallelToolCalls = request.modelSettings.parallelToolCalls; } - if (logger.dontLogModelData) { - logger.debug('Calling LLM'); - } else { - logger.debug( - [ - `Calling LLM ${this.#model} with input:`, - JSON.stringify(request.input, null, 2), - `Tools: ${JSON.stringify(tools, null, 2)}`, - `Stream: ${stream}`, - `Response format: ${JSON.stringify(responseFormat, null, 2)}`, - ].join('\n'), - ); - } - const messages = itemsToMessages(request.input); if (request.systemInstructions) { messages.unshift({ @@ -271,28 +257,35 @@ export class OpenAIChatCompletionsModel implements Model { span.spanData.input = messages; } - const completion = await this.#client.chat.completions.create( - { - model: this.#model, - messages, - tools, - temperature: request.modelSettings.temperature, - top_p: request.modelSettings.topP, - frequency_penalty: request.modelSettings.frequencyPenalty, - presence_penalty: request.modelSettings.presencePenalty, - max_tokens: request.modelSettings.maxTokens, - tool_choice: convertToolChoice(request.modelSettings.toolChoice), - response_format: responseFormat, - parallel_tool_calls: parallelToolCalls, - stream, - store: request.modelSettings.store, - ...request.modelSettings.providerData, - }, - { - headers: HEADERS, - signal: request.signal, - }, - ); + const requestData = { + model: this.#model, + messages, + tools, + temperature: request.modelSettings.temperature, + top_p: request.modelSettings.topP, + frequency_penalty: request.modelSettings.frequencyPenalty, + presence_penalty: request.modelSettings.presencePenalty, + max_tokens: request.modelSettings.maxTokens, + tool_choice: convertToolChoice(request.modelSettings.toolChoice), + response_format: responseFormat, + parallel_tool_calls: parallelToolCalls, + stream, + store: request.modelSettings.store, + ...request.modelSettings.providerData, + }; + + if (logger.dontLogModelData) { + logger.debug('Calling LLM'); + } else { + logger.debug( + `Calling LLM. Request data: ${JSON.stringify(requestData, null, 2)}`, + ); + } + + const completion = await this.#client.chat.completions.create(requestData, { + headers: HEADERS, + signal: request.signal, + }); if (logger.dontLogModelData) { logger.debug('Response received'); diff --git a/packages/agents-openai/src/openaiResponsesModel.ts b/packages/agents-openai/src/openaiResponsesModel.ts index 4b00944..d676761 100644 --- a/packages/agents-openai/src/openaiResponsesModel.ts +++ b/packages/agents-openai/src/openaiResponsesModel.ts @@ -640,45 +640,37 @@ export class OpenAIResponsesModel implements Model { parallelToolCalls = request.modelSettings.parallelToolCalls; } + const requestData = { + instructions: request.systemInstructions, + model: this.#model, + input, + include, + tools, + previous_response_id: request.previousResponseId, + temperature: request.modelSettings.temperature, + top_p: request.modelSettings.topP, + truncation: request.modelSettings.truncation, + max_output_tokens: request.modelSettings.maxTokens, + tool_choice: toolChoice as ToolChoiceOptions, + parallel_tool_calls: parallelToolCalls, + stream, + text: responseFormat, + store: request.modelSettings.store, + ...request.modelSettings.providerData, + }; + if (logger.dontLogModelData) { logger.debug('Calling LLM'); } else { logger.debug( - [ - `Calling LLM ${this.#model} with input:`, - JSON.stringify(request.input, null, 2), - `Tools: ${JSON.stringify(tools, null, 2)}`, - `Stream: ${stream}`, - `Tool choice: ${toolChoice}`, - `Response format: ${JSON.stringify(responseFormat, null, 2)}`, - ].join('\n'), + `Calling LLM. Request data: ${JSON.stringify(requestData, null, 2)}`, ); } - const response = await this.#client.responses.create( - { - instructions: request.systemInstructions, - model: this.#model, - input, - include, - tools, - previous_response_id: request.previousResponseId, - temperature: request.modelSettings.temperature, - top_p: request.modelSettings.topP, - truncation: request.modelSettings.truncation, - max_output_tokens: request.modelSettings.maxTokens, - tool_choice: toolChoice as ToolChoiceOptions, - parallel_tool_calls: parallelToolCalls, - stream, - text: responseFormat, - store: request.modelSettings.store, - ...request.modelSettings.providerData, - }, - { - headers: HEADERS, - signal: request.signal, - }, - ); + const response = await this.#client.responses.create(requestData, { + headers: HEADERS, + signal: request.signal, + }); if (logger.dontLogModelData) { logger.debug('Response received');