in docker_images/speechbrain/app/pipelines/audio_to_audio.py [0:0]
def __init__(self, model_id: str):
model_type = get_type(model_id)
if model_type == ModelType.SEPFORMERSEPARATION:
self.model = SepformerSeparation.from_hparams(source=model_id)
self.type = "audio-source-separation"
elif model_type == ModelType.SPECTRALMASKENHANCEMENT:
self.model = SpectralMaskEnhancement.from_hparams(source=model_id)
self.type = "speech-enhancement"
elif model_type == ModelType.WAVEFORMENHANCEMENT:
self.type = "speech-enhancement"
self.model = WaveformEnhancement.from_hparams(source=model_id)
else:
raise ValueError(f"{model_type.value} is invalid for audio-to-audio")
self.sampling_rate = self.model.hparams.sample_rate