in pkg/rules/langchain/llm_setup.go [49:82]
func openaiGenerateContentOnExit(call api.CallContext, resp *llms.ContentResponse, err error) {
data := call.GetData().(map[string]interface{})
request := langChainLLMRequest{}
response := langChainLLMResponse{}
ctx, ok := data["ctx"].(context.Context)
if !ok {
return
}
if err != nil {
langChainLLMInstrument.End(ctx, request, response, err)
return
}
request = data["request"].(langChainLLMRequest)
if len(resp.Choices) > 0 {
var finishReasons []string
for _, choice := range resp.Choices {
finishReasons = append(finishReasons, choice.StopReason)
}
response.responseFinishReasons = finishReasons
if totalTokensAny, ok1 := resp.Choices[0].GenerationInfo["TotalTokens"]; ok1 {
if totalTokens, ok2 := totalTokensAny.(int); ok2 {
response.usageOutputTokens = int64(totalTokens)
}
}
if reasoningTokensAny, ok1 := resp.Choices[0].GenerationInfo["ReasoningTokens"]; ok1 {
if totalTokens, ok2 := reasoningTokensAny.(int); ok2 {
request.usageInputTokens = int64(totalTokens)
}
}
}
langChainLLMInstrument.End(ctx, request, response, nil)
}