in dialogflow/streaming_transcription.py [0:0]
def generator(self):
"""Stream Audio from microphone to API and to local buffer"""
try:
# Handle restart.
print("restart generator")
# Flip the bit of is_final so it can continue stream.
self.is_final = False
total_processed_time = self.last_start_time + self.is_final_offset
processed_bytes_length = (
int(total_processed_time * SAMPLE_RATE * 16 / 8) / 1000
)
self.last_start_time = total_processed_time
# Send out bytes stored in self.audio_input_chunks that is after the
# processed_bytes_length.
if processed_bytes_length != 0:
audio_bytes = b"".join(self.audio_input_chunks)
# Lookback for unprocessed audio data.
need_to_process_length = min(
int(len(audio_bytes) - processed_bytes_length),
int(MAX_LOOKBACK * SAMPLE_RATE * 16 / 8),
)
# Note that you need to explicitly use `int` type for substring.
need_to_process_bytes = audio_bytes[(-1) * need_to_process_length :]
yield need_to_process_bytes
while not self.closed and not self.is_final:
data = []
# Use a blocking get() to ensure there's at least one chunk of
# data, and stop iteration if the chunk is None, indicating the
# end of the audio stream.
chunk = self._buff.get()
if chunk is None:
return
data.append(chunk)
# Now try to the rest of chunks if there are any left in the _buff.
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
except queue.Empty:
break
self.audio_input_chunks.extend(data)
if data:
yield b"".join(data)
finally:
print("Stop generator")