in static/js/chat.js [37:82]
function connectAvatar() {
fetch("/get-speech-region")
.then(response => response.json())
.then(regionData => {
speechRegion = regionData.speech_region;
// Get the speech token
fetch("/get-speech-token")
.then(response => response.json())
.then(tokenData => {
token = tokenData.token; // store globally for recognition
speechTokenTimestamp = new Date();
// Create speech synthesis configuration
const speechSynthesisConfig = SpeechSDK.SpeechConfig.fromAuthorizationToken(token, speechRegion);
speechSynthesisConfig.speechSynthesisVoiceName = "en-US-AvaMultilingualNeural";
// Set default avatar configuration (hardcoded from env)
const talkingAvatarCharacter = "Lisa";
const talkingAvatarStyle = "casual-sitting";
const avatarConfig = new SpeechSDK.AvatarConfig(talkingAvatarCharacter, talkingAvatarStyle);
avatarConfig.customized = false;
avatarSynthesizer = new SpeechSDK.AvatarSynthesizer(speechSynthesisConfig, avatarConfig);
avatarSynthesizer.avatarEventReceived = function(s, e) {
console.log("Avatar event: " + e.description);
};
// Get ICE token from backend for WebRTC
fetch("/get-ice-server-token")
.then(response => response.json())
.then(iceData => {
const iceServerUrl = iceData.Urls[0];
const iceServerUsername = iceData.Username;
const iceServerCredential = iceData.Password;
setupWebRTC(iceServerUrl, iceServerUsername, iceServerCredential);
})
.catch(err => console.error("Error fetching ICE token:", err));
})
.catch(err => console.error("Error fetching speech token:", err));
})
.catch(err => console.error("Error fetching speech region:", err));
if (!messageInitiated) {
initMessages();
messageInitiated = true;
}
// Disable the start session button once clicked
document.getElementById('startSession').disabled = true;
}