def get_minibatch_info()

in lib/datasets/epic.py [0:0]


    def get_minibatch_info(self, indices):
        """
        Given iteration indices, return the necessarry information for
        constructing a minibatch. This will later be used in epic_data_input.py
        to actually load the data and constructing blobs.
        """

        half_len = self._seq_len // 2

        image_paths = []
        labels = []
        lfb = []

        if not isinstance(indices, list):
            indices = indices.tolist()

        while len(indices) < self._batch_size // cfg.NUM_GPUS:
            indices.append(indices[0])

        for idx in indices:

            if self._is_train:
                ann_idx = np.random.choice(range(len(self._annotations)))
            else:
                ann_idx = idx

            (person, video_name,
             start_frame, stop_frame, verb, noun) = self._annotations[ann_idx]

            num_frames = len(self._image_paths[video_name])

            seq, center_idx = get_sequence(
                start_frame, stop_frame, half_len, self._sample_rate,
                num_frames, self._is_train)

            image_paths.append(
                [self._image_paths[video_name][frame] for frame in seq])

            labels.append(verb if cfg.EPIC.CLASS_TYPE == 'verb' else noun)
            if self._lfb_enabled:
                lfb.append(self.sample_lfb(video_name, center_idx))

        split_list = [self._split_num] * len(indices)

        if self._is_train:
            spatial_shift_positions = [None] * len(indices)
        elif self._shift is None:
            spatial_shift_positions = [CENTER_CROP_INDEX] * len(indices)
        else:
            spatial_shift_positions = [self._shift] * len(indices)

        return (image_paths, labels, split_list, spatial_shift_positions, lfb)