def transform_clip()

in pytorchvideo/data/epic_kitchen_forecasting.py [0:0]


        def transform_clip(clip: Dict[str, Any]) -> Dict[str, Any]:
            assert all(
                clip["actions"][i].start_time <= clip["actions"][i + 1].start_time
                for i in range(len(clip["actions"]) - 1)
            ), "Actions must be sorted"
            next_k_actions: List[ActionData] = [
                a for a in clip["actions"] if (a.start_time > clip["stop_time"])
            ][:num_forecast_actions]
            clip["actions"] = next_k_actions

            assert clip["video"].size()[1] == num_input_clips * frames_per_clip
            clip_video_tensor = torch.stack(
                [
                    clip["video"][
                        :, (i * frames_per_clip) : ((i + 1) * frames_per_clip), :, :
                    ]
                    for i in range(num_input_clips)
                ]
            )
            clip["video"] = clip_video_tensor

            for key in clip:
                if clip[key] is None:
                    clip[key] = torch.tensor([])

            if transform:
                clip = transform(clip)

            return clip