async function callPredict()

in evaluator/lib/gcp/geminihelper.js [22:81]


async function callPredict(mycontent) {

    /* Config Parameters */
    const project = configEnv.getProject();
    const location = configFile.getLocation();
    const model = configFile.getModel();
    const thistemperature = parseFloat(configFile.getTemperature());
    const thismaxtokens = parseFloat(configFile.getMaxtokens());

    const vertex_ai = new VertexAI({
        project: project,
        location: location
    });

    /* Instantiate the models */
    const generativeModel = vertex_ai.preview.getGenerativeModel({
        model: model,
        generation_config: {
            "max_output_tokens": thismaxtokens,
            "temperature": thistemperature,
            "top_p": 1
        },
        safety_settings: [
            {
                "category": "HARM_CATEGORY_HATE_SPEECH",
                "threshold": "BLOCK_ONLY_HIGH"
            },
            {
                "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
                "threshold": "BLOCK_ONLY_HIGH"
            },
            {
                "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
                "threshold": "BLOCK_MEDIUM_AND_ABOVE"
            },
            {
                "category": "HARM_CATEGORY_HARASSMENT",
                "threshold": "BLOCK_MEDIUM_AND_ABOVE"
            }
        ],
    });

    /* Getting the prompt */
    const mycontext = contextFile.getContext();

    /* Building our request */
    const req = {
        contents: [{role: 'user', parts: [{text: mycontext + "\n\n" + mycontent}]}],
    };
    
    const streamingResp = await generativeModel.generateContentStream(req);

    const response = await streamingResp.response;
    
    if(response.candidates[0].content.parts[0].text != "") {
        return response.candidates[0].content.parts[0].text;
    } else {
        return "";
    }
}