in pytouch/datasets/slip_sequence.py [0:0]
def __getitem__(self, index):
sequence_path = os.path.join(self.root, self.videos[index])
sequence_list = sequence_path.split("/")
seq_num = int(sequence_list[-1].replace("seq", ""))
obj_num = int(sequence_list[-2].replace("obj", ""))
frame_indices = None
if seq_num < 10:
shift = np.random.choice(5, 1)[0]
seq_begin = self.slip_label[obj_num][seq_num] + shift
seq_end = seq_begin + self.frame_duration
frame_indices = list(range(seq_begin, seq_end))
else:
shift = np.random.choice(100, 1)[0]
frame_indices = list(range(shift, shift + self.frame_duration))
# list of frames
clip = self.loader(sequence_path, frame_indices)
if self.spatial_transform is not None:
self.spatial_transform.randomize_parameters()
clip = [self.spatial_transform(img) for img in clip]
clip = torch.stack(clip, 0).permute(1, 0, 2, 3)
target = self.annotations[index]
if self.target_transform is not None:
target = self.target_transform
return clip, target