def create_optimized_MP4_clips()

in source/controlplaneapi/infrastructure/lambda/EventClipGenerator/mre-event-clip-generator.py [0:0]


def create_optimized_MP4_clips(segment, event, chunks):

    hls_input_setting = []

    try:
        job_settings_filename = os.path.join(os.path.dirname(__file__), 'job_settings_mp4.json')

        with open(job_settings_filename) as json_data:
            jobSettings = json.load(json_data)

        
        # Check if an AudioTrack has been sent in the event.
        # If yes, set the AudioTrack for extraction in MediaConvert
        # Also use the AudioTrack in the Output video key prefix
        audioTrack = 1 if 'TrackNumber' not in event else event['TrackNumber']

        hls_input_setting = build_hls_input(segment, event, chunks ,audioTrack)

        #------------- Update MediaConvert AudioSelectors Input -------------

        # Leave the default Input AudioSelectors as is if we are dealing with default Track or only one.
        # If we have multiple AudioTracks, this lambda will be provided with one.
        if int(audioTrack) > 0:
            jobSettings['Inputs'][0]['AudioSelectors'] = {
                                                            "Audio Selector 1": {
                                                                "Tracks": [
                                                                    int(audioTrack)
                                                                ],
                                                                "DefaultSelection": "NOT_DEFAULT",
                                                                "SelectorType": "TRACK"
                                                            }
                                                        }
        else:
            jobSettings['Inputs'][0]['AudioSelectors'] =   {
                                "Audio Selector 1": {
                                    "DefaultSelection": "DEFAULT"
                                }
                            }

            jobSettings['Inputs'][0]['AudioSelectorGroups'] =   {
                                                                    "Audio Selector Group 1": {
                                                                        "AudioSelectorNames": [
                                                                        "Audio Selector 1"
                                                                        ]
                                                                    }
                                                                }
        #------------- Update MediaConvert AudioSelectors Input Ends -------------

        runid = str(uuid.uuid4())

        # Get the Corresponding OptoStart and OptoEnd timings based on audio track in the Map
        keyprefix = f"optimized_assets/{runid}/MP4/{str(audioTrack)}/{str(get_OptoStart(segment, event)).replace('.',':')}-{str(get_OptoEnd(segment, event)).replace('.',':')}"
        job_output_destination = f"s3://{OUTPUT_BUCKET}/{keyprefix}"

        jobSettings["OutputGroups"][0]["OutputGroupSettings"]["FileGroupSettings"]["Destination"] = job_output_destination

        # We use the first index in the Map of OptoStart and OptoEnd for Thumbnail since we only need one thumbnail and is not depending on number of audio tracks
        thumbnail_keyprefix = f"thumbnail/{runid}/{str(get_OptoStart(segment, event)).replace('.',':')}-{str(get_OptoEnd(segment, event)).replace('.',':')}"
        thumbnail_job_output_destination = f"s3://{OUTPUT_BUCKET}/{thumbnail_keyprefix}"
        jobSettings["OutputGroups"][1]["OutputGroupSettings"]["FileGroupSettings"]["Destination"] = thumbnail_job_output_destination

        # Input should be based on the Number of chunks. A Segment timing can constitute multiple chunks
        # If only one Chunk found, then we create one Input for the MediaConvert Job 
        # If more than on Chunk, create as many Inputs and set the InputClippings Accordingly.
        # Inputclippings will be assigned as follows
        # When #Chunks > 1 , 1st Chunk - StartTime = Segment.OptoStart, EndTime = Empty
        # When #Chunks > 1 , 2nd Chunk - StartTime = Empty, EndTime = empty
        # When #Chunks > 1 , 3rd Chunk - StartTime = Empty, EndTime = Segment.OptoEnd
        print(f"we got {len(chunks)} number of chunks")
        if len(chunks) == 1:
            # Update the job settings with the source video from the S3 event and destination 
            input_segment_location = f"s3://{chunks[0]['S3Bucket']}/{chunks[0]['S3Key']}"
            jobSettings['Inputs'][0]['FileInput'] = input_segment_location

            print("Only one Chunk found .. Clip Timings is")
            if "OptoEnd" in segment and "OptoStart" in segment:
            #if len(segment['OptoEnd']) > 0 and len(segment['OptoStart']) > 0 :
            #if does_segment_have_optimized_times(segment):
                if type(segment['OptoEnd']) is dict:
                    print(f"Segment OptoEnd is {segment['OptoEnd'][audioTrack]}")
                else:
                    print(f"Segment OptoEnd is {segment['OptoEnd']}")
            
                if type(segment['OptoStart']) is dict:
                    print(f"Segment OptoStart is {segment['OptoStart'][audioTrack]}")
                else:
                    print(f"Segment OptoStart is {segment['OptoStart']}")

            print(f"Here are the modified Clip Timings when Total chunks = 1")
            print(get_clip_timings(segment, event))

            endtime, starttime = get_clip_timings(segment, event)
            jobSettings['Inputs'][0]['InputClippings'][0]['EndTimecode'] = str(endtime)
            jobSettings['Inputs'][0]['InputClippings'][0]['StartTimecode'] = str(starttime)

            #If we have a single Chunk we don't need the Endtime Configured if it is less than Start time. Remove it.
            if datetime.strptime(endtime, "%H:%M:%S:%f") < datetime.strptime(starttime, "%H:%M:%S:%f"):
                jobSettings['Inputs'][0]['InputClippings'][0].pop('EndTimecode', None)    

            #jobSettings['Inputs'][0]['InputClippings'][0]['EndTimecode'], jobSettings['Inputs'][0]['InputClippings'][0]['StartTimecode'] = get_clip_timings(segment, event)

            #If we have a single Chunk we don't need the Endtime Configured. Remove it.
            #jobSettings['Inputs'][0]['InputClippings'][0].pop('EndTimecode', None)

            print("Single Chunk processed .. JobSettings is ...")
            print(json.dumps(jobSettings))

            # Convert the video using AWS Elemental MediaConvert
            jobid = str(uuid.uuid4())
            jobMetadata = {'JobId': jobid}
            create_job(jobMetadata, jobSettings)
            
        elif len(chunks) > 1:
            for chunk_index in range(len(chunks)):
                
                input_segment_location = f"s3://{chunks[chunk_index]['S3Bucket']}/{chunks[chunk_index]['S3Key']}"
                    
                if chunk_index == 0:    # First Chunk
                    print(f"Chunk index is {chunk_index}")
                    jobSettings['Inputs'][0]['FileInput'] = input_segment_location
                    jobSettings['Inputs'][0]['InputClippings'][0]['StartTimecode'] = get_start_clip_timings(segment, event)
                    jobSettings['Inputs'][0]['InputClippings'][0].pop('EndTimecode', None)

                    print(f"First chunk processing ... Job Setting is")
                    print(json.dumps(jobSettings))
                elif chunk_index == len(chunks)-1:  # Last Chunk

                    print(f"Chunk index is {chunk_index}")
                    jobSettings['Inputs'].append(copy.deepcopy(jobSettings['Inputs'][0]))   #Clone the existing InputSettings and add it to the Inputs Key
                    jobSettings['Inputs'][chunk_index]['FileInput'] = input_segment_location
                    jobSettings['Inputs'][chunk_index]['InputClippings'][0].pop('StartTimecode', None)
                    jobSettings['Inputs'][chunk_index]['InputClippings'][0]['EndTimecode'] = get_end_clip_timings(segment, event)

                    print(f"Last chunk processing ... Job Setting is")
                    print(json.dumps(jobSettings))
                else:   #in between chunks
                    print(f"Chunk index is {chunk_index}")
                    jobSettings['Inputs'].append(copy.deepcopy(jobSettings['Inputs'][0]))
                    jobSettings['Inputs'][chunk_index]['FileInput'] = input_segment_location
                    jobSettings['Inputs'][chunk_index]['InputClippings']= []   # No need to Clip for sandwitched Chunks
                    print(f"Sandwitch chunk processing ... Job Setting is")
                    print(json.dumps(jobSettings))

            # Convert the video using AWS Elemental MediaConvert
            jobid = str(uuid.uuid4())
            jobMetadata = { 'JobId': jobid }
            create_job(jobMetadata, jobSettings)

        # Update the Segment with the S3KeyPrefix
        segment['OptimizedS3KeyPrefix'] = f"{job_output_destination}.mp4"
        segment['OptimizedThumbnailS3KeyPrefix'] = f"{thumbnail_job_output_destination}.0000000.jpg"

        #print(json.dumps(jobSettings))

    except Exception as e:
        print ('Exception: %s' % e)
        raise
    
    return keyprefix, hls_input_setting