in src/common.speech/DialogServiceAdapter.ts [140:268]
protected async processTypeSpecificMessages(connectionMessage: SpeechConnectionMessage): Promise<boolean> {
const resultProps: PropertyCollection = new PropertyCollection();
if (connectionMessage.messageType === MessageType.Text) {
resultProps.setProperty(PropertyId.SpeechServiceResponse_JsonResult, connectionMessage.textBody);
}
let result: SpeechRecognitionResult;
let processed: boolean;
switch (connectionMessage.path.toLowerCase()) {
case "speech.phrase":
const speechPhrase: SimpleSpeechPhrase = SimpleSpeechPhrase.fromJSON(connectionMessage.textBody);
this.privRequestSession.onPhraseRecognized(this.privRequestSession.currentTurnAudioOffset + speechPhrase.Offset + speechPhrase.Duration);
if (speechPhrase.RecognitionStatus !== RecognitionStatus.TooManyRequests && speechPhrase.RecognitionStatus !== RecognitionStatus.Error) {
const args: SpeechRecognitionEventArgs = this.fireEventForResult(speechPhrase, resultProps);
this.privLastResult = args.result;
if (!!this.privDialogServiceConnector.recognized) {
try {
this.privDialogServiceConnector.recognized(this.privDialogServiceConnector, args);
/* tslint:disable:no-empty */
} catch (error) {
// Not going to let errors in the event handler
// trip things up.
}
}
}
processed = true;
break;
case "speech.hypothesis":
const hypothesis: SpeechHypothesis = SpeechHypothesis.fromJSON(connectionMessage.textBody);
const offset: number = hypothesis.Offset + this.privRequestSession.currentTurnAudioOffset;
result = new SpeechRecognitionResult(
this.privRequestSession.requestId,
ResultReason.RecognizingSpeech,
hypothesis.Text,
hypothesis.Duration,
offset,
hypothesis.Language,
hypothesis.LanguageDetectionConfidence,
undefined,
undefined,
connectionMessage.textBody,
resultProps);
this.privRequestSession.onHypothesis(offset);
const ev = new SpeechRecognitionEventArgs(result, hypothesis.Duration, this.privRequestSession.sessionId);
if (!!this.privDialogServiceConnector.recognizing) {
try {
this.privDialogServiceConnector.recognizing(this.privDialogServiceConnector, ev);
/* tslint:disable:no-empty */
} catch (error) {
// Not going to let errors in the event handler
// trip things up.
}
}
processed = true;
break;
case "speech.keyword":
const keyword: SpeechKeyword = SpeechKeyword.fromJSON(connectionMessage.textBody);
result = new SpeechRecognitionResult(
this.privRequestSession.requestId,
keyword.Status === "Accepted" ? ResultReason.RecognizedKeyword : ResultReason.NoMatch,
keyword.Text,
keyword.Duration,
keyword.Offset,
undefined,
undefined,
undefined,
undefined,
connectionMessage.textBody,
resultProps);
if (keyword.Status !== "Accepted") {
this.privLastResult = result;
}
const event = new SpeechRecognitionEventArgs(result, result.duration, result.resultId);
if (!!this.privDialogServiceConnector.recognized) {
try {
this.privDialogServiceConnector.recognized(this.privDialogServiceConnector, event);
/* tslint:disable:no-empty */
} catch (error) {
// Not going to let errors in the event handler
// trip things up.
}
}
processed = true;
break;
case "audio":
{
const audioRequestId = connectionMessage.requestId.toUpperCase();
const turn = this.privTurnStateManager.GetTurn(audioRequestId);
try {
// Empty binary message signals end of stream.
if (!connectionMessage.binaryBody) {
turn.endAudioStream();
} else {
turn.audioStream.write(connectionMessage.binaryBody);
}
} catch (error) {
// Not going to let errors in the event handler
// trip things up.
}
}
processed = true;
break;
case "response":
{
this.handleResponseMessage(connectionMessage);
}
processed = true;
break;
default:
break;
}
return processed;
}