pytouch/datasets/slip_sequence.py [94:107]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        clip = self.loader(sequence_path, frame_indices)

        if self.spatial_transform is not None:
            self.spatial_transform.randomize_parameters()
            clip = [self.spatial_transform(img) for img in clip]
        clip = torch.stack(clip, 0).permute(1, 0, 2, 3)

        target = self.annotations[index]
        if self.target_transform is not None:
            target = self.target_transform
        return clip, target

    def __len__(self):
        return len(self.videos)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



pytouch/datasets/slip_sequence.py [139:151]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        clip = self.loader(sequence_path, frame_indices)
        if self.spatial_transform is not None:
            self.spatial_transform.randomize_parameters()
            clip = [self.spatial_transform(img) for img in clip]
        clip = torch.stack(clip, 0).permute(1, 0, 2, 3)

        target = self.annotations[index]
        if self.target_transform is not None:
            target = self.target_transform
        return clip, target

    def __len__(self):
        return len(self.videos)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



