def process()

in STT/paraformer_handler.py [0:0]


    def process(self, spoken_prompt):
        logger.debug("infering paraformer...")

        global pipeline_start
        pipeline_start = perf_counter()

        pred_text = (
            self.model.generate(spoken_prompt)[0]["text"].strip().replace(" ", "")
        )
        torch.mps.empty_cache()

        logger.debug("finished paraformer inference")
        console.print(f"[yellow]USER: {pred_text}")

        yield pred_text