async function connect_response()

in lambda/fulfillment/lib/middleware/5_assemble.js [37:85]


async function connect_response(req, res) {
    // If QnABot is in multi language mode, translate NextPrompt into target language
    if (_.get(req._settings, 'ENABLE_MULTI_LANGUAGE_SUPPORT')){
        const locale = _.get(req, 'session.qnabotcontext.userLocale');
        let nextPromptVarName = _.get(req,"_settings.CONNECT_NEXT_PROMPT_VARNAME",'nextPrompt') ;
        let prompt = _.get(res.session,nextPromptVarName,"");
        if (prompt) {
            prompt = await translate.get_translation(prompt,'auto',locale,req);
        }
        _.set(res.session,nextPromptVarName,prompt);
    }
    // If in elicit response, set next prompt to empty
    if ( _.get(res,"session.qnabotcontext.elicitResponse.responsebot")) {
        let nextPromptVarName = _.get(req,"_settings.CONNECT_NEXT_PROMPT_VARNAME",'nextPrompt') ;
        _.set(res.session,nextPromptVarName,"");
    }

    // Split multi-part sentences to enable barge in for long fulfillment messages when using Connect voice.. 
    // except when QnAbot is in ElicitResoonse mode.. in that case we keep the bot session with GetCustomerInput block open, so 
    // the Connect contact flow loop is not invoked (and CONNECT_NEXT_PROMPT would not be played)
    if (req._clientType == "LEX.AmazonConnect.Voice") {
        if (! _.get(res,"session.qnabotcontext.elicitResponse.responsebot")) {
            // QnABot is not doing elicitResponse
            if (_.get(req,"_settings.CONNECT_ENABLE_VOICE_RESPONSE_INTERRUPT")) {
                let nextPromptVarName = _.get(req,"_settings.CONNECT_NEXT_PROMPT_VARNAME",'nextPrompt') ;
                qnabot.log("CONNECT_ENABLE_VOICE_RESPONSE_INTERRUPT is true. splitting response.")
                // split multi sentence responses.. First sentence stays in response, remaining sentences get prepended to next prompt session attribute.
                let message = res.message ;
                let prompt = _.get(res.session,nextPromptVarName,"").replace(/<speak>|<\/speak>/g, "") ;
                if (res.type == "PlainText") {
                    // process plain text
                    let a = split_message(message) ; //split on first period
                    res.message = a[0];
                    _.set(res.session,nextPromptVarName,a[1] + " " + prompt);
                } else if (res.type == "SSML") {
                    // process SSML
                    // strip <speak> tags
                    message = message.replace(/<speak>|<\/speak>/g, "");
                    let a = split_message(message) ;
                    res.message = "<speak>" + a[0] + "</speak>" ;
                    _.set(res.session,nextPromptVarName, "<speak>" + a[1] + " " + prompt + "</speak>");
                }
                qnabot.log("Response message:", res.message);
                qnabot.log("Reponse session var:", nextPromptVarName, ":", _.get(res.session,nextPromptVarName)) ;
            }
        } 
    }
    return res ;
}