in genesyscloud/genesyscloud-audiohook/dialogflow_api.py [0:0]
def streaming_analyze_content(
self,
audio_stream: Stream,
participant: dialogflow.Participant,
audio_config: dialogflow.InputAudioConfig):
"""Call dialogflow backend StreamingAnalyzeContent endpoint,
and send the audio binary stream from Audiohook.
"""
try:
logging.debug("call streaming analyze content for %s", participant)
responses = self.participants_client.streaming_analyze_content(
requests=self.generator_streaming_analyze_content_request(
audio_config, participant, audio_stream))
except OutOfRange as e:
audio_stream.closed = True
logging.warning(
"The single audio stream last more than 120 second %s ", e)
return
except FailedPrecondition as e:
audio_stream.closed = True
logging.warning(
"Failed the precondition check for StreamingAnalyzeContent %s ", e)
return
except ResourceExhausted as e:
audio_stream.closed = True
logging.warning(
"Exceed quota for calling streaming analyze content %s ", e)
return
for response in responses:
audio_stream.speech_end_offset = response.recognition_result.speech_end_offset.seconds * 1000
logging.debug(response)
if response.recognition_result.is_final:
audio_stream.is_final = True
logging.debug(
"Final transcript for %s: %s, and is final offset",
participant.role.name,
response.recognition_result.transcript,
)
offset = response.recognition_result.speech_end_offset
audio_stream.is_final_offset = int(
offset.seconds * 1000 + offset.microseconds / 1000
)
if response.recognition_result:
logging.debug(
"Role %s: Interim response recognition result transcript: %s, time %s",
participant.role.name,
response.recognition_result.transcript,
response.recognition_result.speech_end_offset)