in codex-cli/src/utils/responses.ts [319:443]
async function nonStreamResponses(
input: ResponseCreateInput,
completion: OpenAI.Chat.Completions.ChatCompletion,
): Promise<ResponseOutput> {
const fullMessages = getFullMessages(input);
try {
const chatResponse = completion;
if (!("choices" in chatResponse) || chatResponse.choices.length === 0) {
throw new Error("No choices in chat completion response");
}
const assistantMessage = chatResponse.choices?.[0]?.message;
if (!assistantMessage) {
throw new Error("No assistant message in chat completion response");
}
// Construct ResponseOutput
const responseId = generateId("resp");
const outputItemId = generateId("msg");
const outputContent: Array<ResponseContentOutput> = [];
// Check if the response contains tool calls
const hasFunctionCalls =
assistantMessage.tool_calls && assistantMessage.tool_calls.length > 0;
if (hasFunctionCalls && assistantMessage.tool_calls) {
for (const toolCall of assistantMessage.tool_calls) {
if (toolCall.type === "function") {
outputContent.push({
type: "function_call",
call_id: toolCall.id,
name: toolCall.function.name,
arguments: toolCall.function.arguments,
});
}
}
}
if (assistantMessage.content) {
outputContent.push({
type: "output_text",
text: assistantMessage.content,
annotations: [],
});
}
// Create response with appropriate status and properties
const responseOutput = {
id: responseId,
object: "response",
created_at: Math.floor(Date.now() / 1000),
status: hasFunctionCalls ? "requires_action" : "completed",
error: null,
incomplete_details: null,
instructions: null,
max_output_tokens: null,
model: chatResponse.model,
output: [
{
type: "message",
id: outputItemId,
status: "completed",
role: "assistant",
content: outputContent,
},
],
parallel_tool_calls: input.parallel_tool_calls ?? false,
previous_response_id: input.previous_response_id ?? null,
reasoning: null,
temperature: input.temperature,
text: { format: { type: "text" } },
tool_choice: input.tool_choice ?? "auto",
tools: input.tools ?? [],
top_p: input.top_p,
truncation: input.truncation ?? "disabled",
usage: chatResponse.usage
? {
input_tokens: chatResponse.usage.prompt_tokens,
input_tokens_details: { cached_tokens: 0 },
output_tokens: chatResponse.usage.completion_tokens,
output_tokens_details: { reasoning_tokens: 0 },
total_tokens: chatResponse.usage.total_tokens,
}
: undefined,
user: input.user ?? undefined,
metadata: input.metadata ?? {},
output_text: "",
} as ResponseOutput;
// Add required_action property for tool calls
if (hasFunctionCalls && assistantMessage.tool_calls) {
// Define type with required action
type ResponseWithAction = Partial<ResponseOutput> & {
required_action: unknown;
};
// Use the defined type for the assertion
(responseOutput as ResponseWithAction).required_action = {
type: "submit_tool_outputs",
submit_tool_outputs: {
tool_calls: assistantMessage.tool_calls.map((toolCall) => ({
id: toolCall.id,
type: toolCall.type,
function: {
name: toolCall.function.name,
arguments: toolCall.function.arguments,
},
})),
},
};
}
// Store history
const newHistory = [...fullMessages, assistantMessage];
conversationHistories.set(responseId, {
previous_response_id: input.previous_response_id ?? null,
messages: newHistory,
});
return responseOutput;
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
throw new Error(`Failed to process chat completion: ${errorMessage}`);
}
}