in training/data.py [0:0]
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lengths and need
# different padding methods
audios = [feature[self.audio_column_name]["array"] for feature in features]
len_audio = [len(audio) for audio in audios]
if self.max_length is not None:
audios = [audio[: min(l, self.max_length)] for audio, l in zip(audios, len_audio)]
# since resampling has already been performed in the 'load_multiple_datasets' function,
# a fixed sampling_rate(44100hz) is passed to the feature_extractor.
sampling_rate = self.feature_extractor.sampling_rate
batch = self.feature_extractor(
audios, sampling_rate=sampling_rate, return_tensors="pt", padding=self.padding, max_length=self.max_length
)
batch["len_audio"] = torch.tensor(len_audio).unsqueeze(1)
return batch