in src/diarizers/data/synthetic_pipeline.py [0:0]
def denoise_audio_segment(self, audio_file, rank=None):
"""Denoise input audio.
Args:
audio_file (np.ndarray): generated meeting audio array.
Returns:
audio_file (np.ndarray): denoised generated meeting audio array.
"""
device = f"cuda:{(rank or 0)% torch.cuda.device_count()}"
self.denoiser = self.denoiser.to(device)
audio_file_converted = convert_audio(
torch.tensor(audio_file).unsqueeze(0).to(device),
self.sample_rate,
self.denoiser.sample_rate,
self.denoiser.chin,
)
with torch.no_grad():
audio_file = (
self.denoiser(torch.tensor(audio_file_converted, dtype=torch.float32))[0].squeeze(0).cpu().numpy()
)
return audio_file