HRESULT FFmpegInteropMSS::InitFFmpegContext()

in FFmpegInterop/Source/FFmpegInteropMSS.cpp [284:514]


HRESULT FFmpegInteropMSS::InitFFmpegContext(bool forceAudioDecode, bool forceVideoDecode)
{
	HRESULT hr = S_OK;

	if (SUCCEEDED(hr))
	{
		if (avformat_find_stream_info(avFormatCtx, NULL) < 0)
		{
			hr = E_FAIL; // Error finding info
		}
	}

	if (SUCCEEDED(hr))
	{
		m_pReader = ref new FFmpegReader(avFormatCtx);
		if (m_pReader == nullptr)
		{
			hr = E_OUTOFMEMORY;
		}
	}

	if (SUCCEEDED(hr))
	{
		// Find the audio stream and its decoder
		AVCodec* avAudioCodec = nullptr;
		audioStreamIndex = av_find_best_stream(avFormatCtx, AVMEDIA_TYPE_AUDIO, -1, -1, &avAudioCodec, 0);
		if (audioStreamIndex != AVERROR_STREAM_NOT_FOUND && avAudioCodec)
		{
			// allocate a new decoding context
			avAudioCodecCtx = avcodec_alloc_context3(avAudioCodec);
			if (!avAudioCodecCtx)
			{
				hr = E_OUTOFMEMORY;
				DebugMessage(L"Could not allocate a decoding context\n");
				avformat_close_input(&avFormatCtx);
			}

			if (SUCCEEDED(hr))
			{
				// initialize the stream parameters with demuxer information
				if (avcodec_parameters_to_context(avAudioCodecCtx, avFormatCtx->streams[audioStreamIndex]->codecpar) < 0)
				{
					hr = E_FAIL;
					avformat_close_input(&avFormatCtx);
					avcodec_free_context(&avAudioCodecCtx);
				}

				if (SUCCEEDED(hr))
				{
					if (avcodec_open2(avAudioCodecCtx, avAudioCodec, NULL) < 0)
					{
						avAudioCodecCtx = nullptr;
						hr = E_FAIL;
					}
					else
					{
						// Detect audio format and create audio stream descriptor accordingly
						hr = CreateAudioStreamDescriptor(forceAudioDecode);
						if (SUCCEEDED(hr))
						{
							hr = audioSampleProvider->AllocateResources();
							if (SUCCEEDED(hr))
							{
								m_pReader->SetAudioStream(audioStreamIndex, audioSampleProvider);
							}
						}

						if (SUCCEEDED(hr))
						{
							// Convert audio codec name for property
							hr = ConvertCodecName(avAudioCodec->name, &audioCodecName);
						}
					}
				}
			}
		}
	}

	if (SUCCEEDED(hr))
	{
		// Find the video stream and its decoder
		AVCodec* avVideoCodec = nullptr;
		videoStreamIndex = av_find_best_stream(avFormatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, &avVideoCodec, 0);
		if (videoStreamIndex != AVERROR_STREAM_NOT_FOUND && avVideoCodec)
		{
			// FFmpeg identifies album/cover art from a music file as a video stream
			// Avoid creating unnecessarily video stream from this album/cover art
			if (avFormatCtx->streams[videoStreamIndex]->disposition == AV_DISPOSITION_ATTACHED_PIC)
			{
				thumbnailStreamIndex = videoStreamIndex;
				videoStreamIndex = AVERROR_STREAM_NOT_FOUND;
				avVideoCodec = nullptr;
			}
			else
			{
				thumbnailStreamIndex = AVERROR_STREAM_NOT_FOUND;
				AVDictionaryEntry *rotate_tag = av_dict_get(avFormatCtx->streams[videoStreamIndex]->metadata, "rotate", NULL, 0);
				if (rotate_tag != NULL)
				{
					rotateVideo = true;
					rotationAngle = atoi(rotate_tag->value);
				}
				else
				{
					rotateVideo = false;
				}
				// allocate a new decoding context
				avVideoCodecCtx = avcodec_alloc_context3(avVideoCodec);
				if (!avVideoCodecCtx)
				{
					DebugMessage(L"Could not allocate a decoding context\n");
					avformat_close_input(&avFormatCtx);
					hr = E_OUTOFMEMORY;
				}

				if (SUCCEEDED(hr))
				{
					// initialize the stream parameters with demuxer information
					if (avcodec_parameters_to_context(avVideoCodecCtx, avFormatCtx->streams[videoStreamIndex]->codecpar) < 0)
					{
						avformat_close_input(&avFormatCtx);
						avcodec_free_context(&avVideoCodecCtx);
						hr = E_FAIL;
					}
				}

				if (SUCCEEDED(hr))
				{
					// enable multi threading
					unsigned threads = std::thread::hardware_concurrency();
					if (threads > 0)
					{
						avVideoCodecCtx->thread_count = threads;
						avVideoCodecCtx->thread_type = FF_THREAD_FRAME | FF_THREAD_SLICE;
					}

					if (avcodec_open2(avVideoCodecCtx, avVideoCodec, NULL) < 0)
					{
						avVideoCodecCtx = nullptr;
						hr = E_FAIL; // Cannot open the video codec
					}
					else
					{
						// Detect video format and create video stream descriptor accordingly
						hr = CreateVideoStreamDescriptor(forceVideoDecode);
						if (SUCCEEDED(hr))
						{
							hr = videoSampleProvider->AllocateResources();
							if (SUCCEEDED(hr))
							{
								m_pReader->SetVideoStream(videoStreamIndex, videoSampleProvider);
							}
						}

						if (SUCCEEDED(hr))
						{
							// Convert video codec name for property
							hr = ConvertCodecName(avVideoCodec->name, &videoCodecName);
						}
					}
				}
			}
		}
	}

	if (SUCCEEDED(hr))
	{
		// Convert media duration from AV_TIME_BASE to TimeSpan unit
		mediaDuration = { LONGLONG(avFormatCtx->duration * 10000000 / double(AV_TIME_BASE)) };

		if (audioStreamDescriptor)
		{
			if (videoStreamDescriptor)
			{
				if (mss)
				{
					mss->AddStreamDescriptor(videoStreamDescriptor);
					mss->AddStreamDescriptor(audioStreamDescriptor);
				}
				else
				{
					mss = ref new MediaStreamSource(videoStreamDescriptor, audioStreamDescriptor);
				}
			}
			else
			{
				if (mss)
				{
					mss->AddStreamDescriptor(audioStreamDescriptor);
				}
				else
				{
					mss = ref new MediaStreamSource(audioStreamDescriptor);
				}
			}
		}
		else if (videoStreamDescriptor)
		{
			if (mss)
			{
				mss->AddStreamDescriptor(videoStreamDescriptor);
			}
			else
			{
				mss = ref new MediaStreamSource(videoStreamDescriptor);
			}
		}
		if (mss)
		{
			if (mediaDuration.Duration > 0)
			{
				mss->Duration = mediaDuration;
				mss->CanSeek = true;
			}
			else
			{
				// Set buffer time to 0 for realtime streaming to reduce latency
				mss->BufferTime = { 0 };
			}

			startingRequestedToken = mss->Starting += ref new TypedEventHandler<MediaStreamSource ^, MediaStreamSourceStartingEventArgs ^>(this, &FFmpegInteropMSS::OnStarting);
			sampleRequestedToken = mss->SampleRequested += ref new TypedEventHandler<MediaStreamSource ^, MediaStreamSourceSampleRequestedEventArgs ^>(this, &FFmpegInteropMSS::OnSampleRequested);
		}
		else
		{
			hr = E_OUTOFMEMORY;
		}
	}

	return hr;
}