generative-ai/snippets/function-calling/functionCallingStreamChat.js (62 lines of code) (raw):

// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // [START generativeaionvertexai_gemini_function_calling_chat] const { VertexAI, FunctionDeclarationSchemaType, } = require('@google-cloud/vertexai'); const functionDeclarations = [ { function_declarations: [ { name: 'get_current_weather', description: 'get weather in a given location', parameters: { type: FunctionDeclarationSchemaType.OBJECT, properties: { location: {type: FunctionDeclarationSchemaType.STRING}, unit: { type: FunctionDeclarationSchemaType.STRING, enum: ['celsius', 'fahrenheit'], }, }, required: ['location'], }, }, ], }, ]; const functionResponseParts = [ { functionResponse: { name: 'get_current_weather', response: {name: 'get_current_weather', content: {weather: 'super nice'}}, }, }, ]; /** * TODO(developer): Update these variables before running the sample. */ async function functionCallingStreamChat( projectId = 'PROJECT_ID', location = 'us-central1', model = 'gemini-2.0-flash-001' ) { // Initialize Vertex with your Cloud project and location const vertexAI = new VertexAI({project: projectId, location: location}); // Instantiate the model const generativeModel = vertexAI.getGenerativeModel({ model: model, }); // Create a chat session and pass your function declarations const chat = generativeModel.startChat({ tools: functionDeclarations, }); const chatInput1 = 'What is the weather in Boston?'; // This should include a functionCall response from the model const result1 = await chat.sendMessageStream(chatInput1); for await (const item of result1.stream) { console.log(item.candidates[0]); } await result1.response; // Send a follow up message with a FunctionResponse const result2 = await chat.sendMessageStream(functionResponseParts); for await (const item of result2.stream) { console.log(item.candidates[0]); } // This should include a text response from the model using the response content // provided above const response2 = await result2.response; console.log(response2.candidates[0].content.parts[0].text); } // [END generativeaionvertexai_gemini_function_calling_chat] functionCallingStreamChat(...process.argv.slice(2)).catch(err => { console.error(err.message); process.exitCode = 1; });