src/Backend/src/api/adapters/google/speech_to_text_v2.py [85:123]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        )

    def _create_recognizer(
        self,
        sync_client: speech.SpeechClient,
        region: str,
        model_type: str,
        language: str,
        features: cloud_speech.RecognitionFeatures,
    ) -> str:
        recognizer_data = speech.Recognizer(
            display_name="LIA Recognizer",
            model=model_type,
            language_codes=[language],
            default_recognition_config=cloud_speech.RecognitionConfig(
                features=features,
            ),
        )

        create_request = speech.CreateRecognizerRequest(
            parent=f"projects/{self.project_id}/locations/{region}",
            recognizer=recognizer_data,
            recognizer_id=f"a{uuid.uuid4()}",
        )
        operation = sync_client.create_recognizer(request=create_request)
        recognizer: speech.Recognizer = operation.result()  # type: ignore[no-untyped-call]
        return recognizer.name

    # pylint: disable=too-many-arguments,too-many-locals,too-complex,too-many-nested-blocks
    async def process(
        self,
        phrases_id: str,
        path: str,
        desired: str,
        words: list[str],
        duration: int,
        depth: int = 0,
        sample_rate: int | None = None,
        channels: int | None = None,
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



src/Backend/src/api/adapters/google/speech_to_text_v2.py [322:359]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        )

    def _create_recognizer(
        self,
        sync_client: speech.SpeechClient,
        region: str,
        model_type: str,
        language: str,
        features: cloud_speech.RecognitionFeatures,
    ) -> str:
        recognizer_data = speech.Recognizer(
            display_name="LIA Recognizer",
            model=model_type,
            language_codes=[language],
            default_recognition_config=cloud_speech.RecognitionConfig(
                features=features,
            ),
        )

        create_request = speech.CreateRecognizerRequest(
            parent=f"projects/{self.project_id}/locations/{region}",
            recognizer=recognizer_data,
            recognizer_id=f"a{uuid.uuid4()}",
        )
        operation = sync_client.create_recognizer(request=create_request)
        recognizer: speech.Recognizer = operation.result()  # type: ignore[no-untyped-call]
        return recognizer.name

    async def process(
        self,
        phrases_id: str,
        path: str,
        desired: str,
        words: list[str],
        duration: int,
        depth: int = 0,
        sample_rate: int | None = None,
        channels: int | None = None,
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



