in dependency-overview/lib/gcp/geminihelper.js [22:79]
async function callPredict(codeBase) {
/* Load config */
let projectId = configEnv.getProject();
let locationId = configFile.getLocation();
let modelId = configFile.getModel();
let temperature = parseFloat(configFile.getTemperature());
let maxOutTokens = parseFloat(configFile.getMaxtokens());
/* Initialize Vertex */
const vertex_ai = new VertexAI({ project: projectId, location: locationId });
/* System Information and History */
const systemInstruction = contextFile.getMainContext() + codeBase;
let completeHistory = [];
/* Initialize Model */
let generativeModel = vertex_ai.preview.getGenerativeModel({
model: modelId,
generationConfig: {
'maxOutputTokens': maxOutTokens,
'temperature': temperature,
'topP': 0.95,
},
safety_settings: [
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"threshold": "BLOCK_ONLY_HIGH"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"threshold": "BLOCK_ONLY_HIGH"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
}
],
systemInstruction: {
parts: [{ "text": systemInstruction }]
},
});
const chat = generativeModel.startChat({ history: completeHistory });
/* Overview Message */
let firstMessage = contextFile.getOverviewContext() + codeBase;
let streamResult = await chat.sendMessageStream(firstMessage);
let currentResponse = (await streamResult.response).candidates[0].content.parts[0].text;
completeHistory.push({content: firstMessage, role: "user"});
completeHistory.push({content: currentResponse, role: "assistant"});
/* return summary */
return currentResponse;
}