def dense_labels_to_segments()

in datasets/base_video_dataset.py [0:0]


def dense_labels_to_segments(
        dense_labels,
        segment_start_time,
        segment_end_time,
        # -1 => get as many as possible
        pred_steps=-1,
        fixed_duration=None,
        dummy_label=-1):
    segments = []
    for start, end, label in dense_labels:
        if end < segment_start_time:
            # Then this action is past, not relevant here
            # should only happen for the pos-1 action being added
            continue
        if start > segment_end_time:
            # This action starts after the segment, so leave this
            continue
        # should not look at anything beyond the segment end time
        end = min(end, segment_end_time)
        if start > segment_start_time:
            # Add an empty slot of action, for the time where we don't know
            # what happened. Setting the action itself to be -1, so the
            # model can predict whatever and it won't be penalized
            new_segments, duration_used = break_segments_by_duration(
                start - segment_start_time, dummy_label, fixed_duration)
            segments += new_segments
            segment_start_time += duration_used
        new_segments, duration_used = break_segments_by_duration(
            end - segment_start_time, label, fixed_duration)
        segments += new_segments
        segment_start_time += duration_used
        if fixed_duration is None:
            assert segment_start_time == end
        if pred_steps > 0 and len(segments) >= pred_steps:
            break
    if pred_steps > 0:
        segments = segments[:pred_steps]
        # Pad it with dummy intervals for batching, if lower
        if not isinstance(dummy_label, list):
            dummy_label = [dummy_label]
        segments += [[-1] + dummy_label] * (pred_steps - len(segments))
    return segments