def get_video_clips()

in siammot/data/video_dataset.py [0:0]


    def get_video_clips(self, sampling_interval_ms=250):
        """
        Process the long videos to a small video chunk (with self.clip_len seconds)
        Video clips are generated in a temporal sliding window fashion
        """
        video_clips = []
        for (sample_id, sample) in tqdm(self.data.items()):
            frame_idxs_with_anno = sample.get_non_empty_frames(self.filter_fn)
            if len(frame_idxs_with_anno) == 0:
                continue
            # The video clip may not be temporally continuous
            start_frame = min(frame_idxs_with_anno)
            end_frame = max(frame_idxs_with_anno)
            # make sure that the video clip has at least two frames
            clip_len_in_frames = max(self.frames_in_clip, int(self.clip_len / 1000. * sample.fps))
            sampling_interval = int(sampling_interval_ms / 1000. * sample.fps)
            for idx in range(start_frame, end_frame, sampling_interval):
                clip_frame_ids = []
                # only include frames with annotation within the video clip
                for frame_idx in range(idx, idx + clip_len_in_frames):
                    if frame_idx in frame_idxs_with_anno:
                        clip_frame_ids.append(frame_idx)
                # Only include video clips that have at least self.frames_in_clip annotating frames
                if len(clip_frame_ids) >= self.frames_in_clip:
                    video_clips.append((sample_id, clip_frame_ids))

        return video_clips