async function detectIntentAudio()

in dialogflow-cx/detect-intent-streaming.js [52:123]


  async function detectIntentAudio() {
    const sessionId = Math.random().toString(36).substring(7);
    const sessionPath = client.projectLocationAgentSessionPath(
      projectId,
      location,
      agentId,
      sessionId
    );
    console.info(sessionPath);

    // Create a stream for the streaming request.
    const detectStream = client
      .streamingDetectIntent()
      .on('error', console.error)
      .on('data', data => {
        if (data.recognitionResult) {
          console.log(
            `Intermediate Transcript: ${data.recognitionResult.transcript}`
          );
        } else {
          console.log('Detected Intent:');
          const result = data.detectIntentResponse.queryResult;

          console.log(`User Query: ${result.transcript}`);
          for (const message of result.responseMessages) {
            if (message.text) {
              console.log(`Agent Response: ${message.text.text}`);
            }
          }
          if (result.match.intent) {
            console.log(`Matched Intent: ${result.match.intent.displayName}`);
          }
          console.log(`Current Page: ${result.currentPage.displayName}`);
        }
      });

    // Write the initial stream request to config for audio input.
    const initialStreamRequest = {
      session: sessionPath,
      queryInput: {
        audio: {
          config: {
            audioEncoding: encoding,
            sampleRateHertz: sampleRateHertz,
            synthesize_speech_config: {
              voice: {
                // Set's the name and gender of the ssml voice
                name: 'en-GB-Standard-A',
                ssml_gender: 'SSML_VOICE_GENDER_FEMALE',
              },
            },
            singleUtterance: true,
          },
        },
        languageCode: languageCode,
      },
    };
    detectStream.write(initialStreamRequest);

    // Stream the audio from audio file to Dialogflow.
    await pump(
      fs.createReadStream(audioFileName),
      // Format the audio stream into the request format.
      new Transform({
        objectMode: true,
        transform: (obj, _, next) => {
          next(null, {queryInput: {audio: {audio: obj}}});
        },
      }),
      detectStream
    );
  }