def get_minibatch_info()

in lib/datasets/charades.py [0:0]


    def get_minibatch_info(self, indices):
        """
        Given iteration indices, return the necessarry information for
        constructing a minibatch. This will later be used in charades_data_input.py
        to actually load the data and constructing blobs.
        """
        half_len = self._seq_len // 2

        image_paths = []
        labels = []
        spatial_shift_positions = []
        lfb = []

        if not isinstance(indices, list):
            indices = indices.tolist()

        while len(indices) < self._batch_size // cfg.NUM_GPUS:
            indices.append(indices[0])

        for idx in indices:

            # center_idx is the middle frame in a clip.
            if self._lfb_infer_only:
                video_idx, center_idx = self._lfb_frames[idx]
                num_frames = len(self._image_paths[video_idx])
                spatial_shift_positions.append(CENTER_CROP_INDEX)
            else:
                video_idx = idx % self._num_videos
                num_frames = len(self._image_paths[video_idx])
                if self._split == 'train':
                    center_idx = sample_train_idx(num_frames, self._seq_len)
                    spatial_shift_positions.append(None)
                else:
                    # for, e.g., 30-clip testing, multi_clip_idx stands for
                    # (0-left, 0-center, 0-right, ... 9-left, 9-center, 9-right)
                    multi_clip_idx = idx // self._num_videos

                    spatial_shift_positions.append(multi_clip_idx % 3)
                    segment_id = multi_clip_idx // 3

                    center_idx = sample_center_of_segments(
                        segment_id, num_frames, self._num_test_segments, half_len)

            seq = dataset_helper.get_sequence(
                center_idx, half_len, self._sample_rate, num_frames)

            image_paths.append([self._image_paths[video_idx][frame]
                                for frame in seq])
            labels.append(aggregate_labels(
                [self._image_labels[video_idx][frame]
                 for frame in range(seq[0], seq[-1] + 1)]))
            if self._lfb_enabled:
                lfb.append(sample_lfb(video_idx, center_idx, self._lfb))

        split_list = [self._split_num] * len(indices)

        return (image_paths, labels, split_list, spatial_shift_positions, lfb)