in ai-patterns/spring-ai-function-calling/src/main/java/services/gemini/FunctionCallingApplication.java [100:194]
ApplicationRunner applicationRunner(
VertexAiGeminiChatModel vertexAiGemini,
OpenAiChatModel openAI) {
//--- Multi-turn function calling ---
return args -> {
// String prompt = """
// Please use multi-turn invocation to answer the following question:
// What is the status of my payment transactions 002, 001 and 003?
// Please indicate the status for each transaction and return the results in JSON format
// """;
String prompt = """
Take the result of 1.5 to the power of 4.5 and take it to the power of 2
""";
long start = System.currentTimeMillis();
System.out.println("VERTEX_AI_GEMINI multi-turn fn calling: " + vertexAiGemini.call(
new Prompt(prompt,
VertexAiGeminiChatOptions.builder()
.temperature(0.2).build())
).getResult().getOutput().getText().trim());
System.out.println("VertexAI Gemini multi-turn call took " + (System.currentTimeMillis() - start) + " ms");
start = System.currentTimeMillis();
Flux<ChatResponse> geminiStream = vertexAiGemini.stream(
new Prompt(prompt,
VertexAiGeminiChatOptions.builder()
.temperature(0.2).build())
);
geminiStream.collectList().block().stream().findFirst().ifPresent(resp -> {
System.out.println("\nVERTEX_AI_GEMINI (Streaming) multi-turn fn calling: " + resp.getResult().getOutput().getText().trim());
});
System.out.println("VertexAI Gemini multi-turn streaming call took " + (System.currentTimeMillis() - start) + " ms");
//--- Parallel function calling ---
String parallelizedPrompt = """
What is the status of my payment transactions 002, 001 and 003?
Please indicate the status for each transaction and return the results in JSON format
""";
// start = System.currentTimeMillis();
// System.out.println("\nOPEN_AI parallel fn calling: " + openAI.call(
// new Prompt(parallelizedPrompt,
// VertexAiGeminiChatOptions.builder()
// .withTemperature(0.2f).build())
// ).getResult().getOutput().getContent().trim());
// System.out.println("OpenAI (with parallel function calling) call took " + (System.currentTimeMillis() - start) + " ms");
String token = getOauth2Token(baseURL + completionsPath);
String model = "google/gemini-2.0-flash-001";
// String model = "meta/llama3-405b-instruct-maas";
OpenAiApi openAiApi = new OpenAiApi(baseURL, new SimpleApiKey(token), new LinkedMultiValueMap<>(), completionsPath,
"/v1/embeddings",
RestClient.builder(),
WebClient.builder(),
RetryUtils.DEFAULT_RESPONSE_ERROR_HANDLER);
OpenAiChatModel openAIGemini = OpenAiChatModel.builder().openAiApi(openAiApi).build();
OpenAiChatOptions openAiChatOptions = OpenAiChatOptions.builder()
.temperature(0.2)
.model(model)
.build();
start = System.currentTimeMillis();
System.out.println("\nOPEN_AI API (with parallel fn calling) in Vertex AI: " + openAIGemini.call(
new Prompt(parallelizedPrompt, openAiChatOptions))
.getResult().getOutput().getText());
System.out.println("OpenAI API (with parallel function calling) in VertexAI call took " + (System.currentTimeMillis() - start) + " ms");
start = System.currentTimeMillis();
System.out.println("\nVERTEX_AI_GEMINI parallel fn calling: " + vertexAiGemini.call(
new Prompt(parallelizedPrompt,
VertexAiGeminiChatOptions.builder()
.temperature(0.2).build())
).getResult().getOutput().getText().trim());
System.out.println("VertexAI Gemini (with parallel function calling) call took " + (System.currentTimeMillis() - start) + " ms");
start = System.currentTimeMillis();
geminiStream = vertexAiGemini.stream(
new Prompt(parallelizedPrompt,
VertexAiGeminiChatOptions.builder()
.temperature(0.2).build())
);
geminiStream.collectList().block().stream().findFirst().ifPresent(resp -> {
System.out.println("\nVERTEX_AI_GEMINI (Streaming) parallel fn calling: " + resp.getResult().getOutput().getText().trim());
});
System.out.println("VertexAI Gemini parallel streaming call took " + (System.currentTimeMillis() - start) + " ms");
};
}