in src/WebJobs.Extensions.OpenAI/Assistants/AssistantService.cs [243:285]
async Task ProcessConversationWithLLM(
AssistantPostAttribute attribute,
InternalChatState chatState,
List<TableTransactionAction> batch,
CancellationToken cancellationToken)
{
IList<ChatTool>? functions = this.skillInvoker.GetFunctionsDefinitions();
// We loop if the model returns function calls. Otherwise, we break after receiving a response.
while (true)
{
// Get the LLM response
ClientResult<ChatCompletion> response = await this.GetLLMResponse(attribute, chatState, functions, cancellationToken);
// Process text response if available
string replyMessage = this.FormatReplyMessage(response);
if (!string.IsNullOrWhiteSpace(replyMessage) || response.Value.ToolCalls.Any())
{
this.LogAndAddAssistantReply(attribute.Id, replyMessage, response, chatState, batch);
}
// Update token count
chatState.Metadata.TotalTokens = response.Value.Usage.TotalTokenCount;
// Handle function calls
List<ChatToolCall> functionCalls = response.Value.ToolCalls.OfType<ChatToolCall>().ToList();
if (functionCalls.Count == 0)
{
// No function calls, so we're done
break;
}
if (batch.Count > FunctionCallBatchLimit)
{
// Too many function calls, something might be wrong
this.LogBatchLimitExceeded(attribute.Id, functionCalls.Count);
break;
}
// Process function calls
await this.ProcessFunctionCalls(attribute.Id, functionCalls, chatState, batch, cancellationToken);
}
}