in sessions/next24/books-genai-vertex-langchain4j/src/main/java/services/ai/VertexAIClient.java [52:82]
public String promptOnImage(String prompt,
String bucketName,
String fileName) throws IOException {
long start = System.currentTimeMillis();
// bucket where image has been uploaded
String imageURL = String.format("gs://%s/%s",bucketName, fileName);
UserMessage userMessage = UserMessage.from(
ImageContent.from(imageURL),
TextContent.from(prompt)
);
ChatLanguageModel visionModel = VertexAiGeminiChatModel.builder()
.project(project)
.location(location)
.modelName(VertexModels.GEMINI_FLASH_VISION_VERSION)
.build();
Response<AiMessage> multiModalResponse = visionModel.generate(userMessage);
String response = multiModalResponse.content().text();
logger.info("Multi-modal response: " + response);
// response from Vertex is in Markdown, remove annotations
response = response.replaceAll("```json", "").replaceAll("```", "").replace("'", "\"");
logger.info("Elapsed time (gemini-pro-vision, with Langchain4J): " + (System.currentTimeMillis() - start) + "ms");
// return the response in String format, extract values in caller
return response;
}