in codex-cli/src/utils/agent/agent-loop.ts [337:439]
private async handleFunctionCall(
item: ResponseFunctionToolCall,
): Promise<Array<ResponseInputItem>> {
// If the agent has been canceled in the meantime we should not perform any
// additional work. Returning an empty array ensures that we neither execute
// the requested tool call nor enqueue any follow‑up input items. This keeps
// the cancellation semantics intuitive for users – once they interrupt a
// task no further actions related to that task should be taken.
if (this.canceled) {
return [];
}
// ---------------------------------------------------------------------
// Normalise the function‑call item into a consistent shape regardless of
// whether it originated from the `/responses` or the `/chat/completions`
// endpoint – their JSON differs slightly.
// ---------------------------------------------------------------------
const isChatStyle =
// The chat endpoint nests function details under a `function` key.
// We conservatively treat the presence of this field as a signal that
// we are dealing with the chat format.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(item as any).function != null;
const name: string | undefined = isChatStyle
? // eslint-disable-next-line @typescript-eslint/no-explicit-any
(item as any).function?.name
: // eslint-disable-next-line @typescript-eslint/no-explicit-any
(item as any).name;
const rawArguments: string | undefined = isChatStyle
? // eslint-disable-next-line @typescript-eslint/no-explicit-any
(item as any).function?.arguments
: // eslint-disable-next-line @typescript-eslint/no-explicit-any
(item as any).arguments;
// The OpenAI "function_call" item may have either `call_id` (responses
// endpoint) or `id` (chat endpoint). Prefer `call_id` if present but fall
// back to `id` to remain compatible.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const callId: string = (item as any).call_id ?? (item as any).id;
const args = parseToolCallArguments(rawArguments ?? "{}");
log(
`handleFunctionCall(): name=${
name ?? "undefined"
} callId=${callId} args=${rawArguments}`,
);
if (args == null) {
const outputItem: ResponseInputItem.FunctionCallOutput = {
type: "function_call_output",
call_id: item.call_id,
output: `invalid arguments: ${rawArguments}`,
};
return [outputItem];
}
const outputItem: ResponseInputItem.FunctionCallOutput = {
type: "function_call_output",
// `call_id` is mandatory – ensure we never send `undefined` which would
// trigger the "No tool output found…" 400 from the API.
call_id: callId,
output: "no function found",
};
// We intentionally *do not* remove this `callId` from the `pendingAborts`
// set right away. The output produced below is only queued up for the
// *next* request to the OpenAI API – it has not been delivered yet. If
// the user presses ESC‑ESC (i.e. invokes `cancel()`) in the small window
// between queuing the result and the actual network call, we need to be
// able to surface a synthetic `function_call_output` marked as
// "aborted". Keeping the ID in the set until the run concludes
// successfully lets the next `run()` differentiate between an aborted
// tool call (needs the synthetic output) and a completed one (cleared
// below in the `flush()` helper).
// used to tell model to stop if needed
const additionalItems: Array<ResponseInputItem> = [];
// TODO: allow arbitrary function calls (beyond shell/container.exec)
if (name === "container.exec" || name === "shell") {
const {
outputText,
metadata,
additionalItems: additionalItemsFromExec,
} = await handleExecCommand(
args,
this.config,
this.approvalPolicy,
this.additionalWritableRoots,
this.getCommandConfirmation,
this.execAbortController?.signal,
);
outputItem.output = JSON.stringify({ output: outputText, metadata });
if (additionalItemsFromExec) {
additionalItems.push(...additionalItemsFromExec);
}
}
return [outputItem, ...additionalItems];
}