in pyrit/prompt_converter/azure_speech_audio_to_text_converter.py [0:0]
def recognize_audio(self, audio_bytes: bytes) -> str:
"""
Recognize audio file and return transcribed text.
Args:
audio_bytes (bytes): Audio bytes input.
Returns:
str: Transcribed text
"""
try:
import azure.cognitiveservices.speech as speechsdk # noqa: F811
except ModuleNotFoundError as e:
logger.error(
"Could not import azure.cognitiveservices.speech. "
+ "You may need to install it via 'pip install pyrit[speech]'"
)
raise e
speech_config = speechsdk.SpeechConfig(
subscription=self._azure_speech_key,
region=self._azure_speech_region,
)
speech_config.speech_recognition_language = self._recognition_language
# Create a PullAudioInputStream from the byte stream
push_stream = speechsdk.audio.PushAudioInputStream()
audio_config = speechsdk.audio.AudioConfig(stream=push_stream)
# Instantiate a speech recognizer object
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config)
# Create an empty list to store recognized text
transcribed_text: list[str] = []
# Flag is set to False to indicate that recognition is not yet finished
self.done = False
# Connect callbacks to the events fired by the speech recognizer
speech_recognizer.recognized.connect(lambda evt: self.transcript_cb(evt, transcript=transcribed_text))
speech_recognizer.recognizing.connect(lambda evt: logger.info("RECOGNIZING: {}".format(evt)))
speech_recognizer.recognized.connect(lambda evt: logger.info("RECOGNIZED: {}".format(evt)))
speech_recognizer.session_started.connect(lambda evt: logger.info("SESSION STARTED: {}".format(evt)))
speech_recognizer.session_stopped.connect(lambda evt: logger.info("SESSION STOPPED: {}".format(evt)))
# Stop continuous recognition when stopped or canceled event is fired
speech_recognizer.canceled.connect(lambda evt: self.stop_cb(evt, recognizer=speech_recognizer))
speech_recognizer.session_stopped.connect(lambda evt: self.stop_cb(evt, recognizer=speech_recognizer))
# Start continuous recognition
speech_recognizer.start_continuous_recognition_async()
# Push the entire audio data into the stream
push_stream.write(audio_bytes)
push_stream.close()
while not self.done:
time.sleep(0.5)
return "".join(transcribed_text)