﻿// ffmpeg_pusher.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
//

#include "ffmpeg_pusher.h"
#include "desktopstreaming.h"
#pragma comment(lib, "d3d9.lib")
#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "swscale.lib")
#pragma comment(lib, "swresample.lib")

const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
const IID IID_IAudioClient = __uuidof(IAudioClient);
const IID IID_IAudioCaptureClient = __uuidof(IAudioCaptureClient);
const int REFTIMES_PER_SEC = 10000000;

//int main()
//{
//	avdevice_register_all();	//备注:不加这一句,av_find_input_format("gdigrab")调用将会失败
//	std::cout << avdevice_configuration();
//#ifdef _WIN32
//	CoInitialize(NULL);
//#endif // _WIN32
//
//	ffPusherState fState;
//	fState.rtsp_url = "rtsp://127.0.0.1/live/push_desk";
//	do
//	{
//		auto monitors = GetMonitors();
//		for (auto itr = monitors.begin(); monitors.end() != itr; ++itr)
//		{
//			std::cout << itr->high_part << "," << itr->low_part << ":"
//				<< itr->left << "->" << itr->right << ","
//				<< itr->top << "->" << itr->bottom << std::endl;
//		}
//		if (monitors.empty())
//			break;
//
//		fState.width = monitors[0].right - monitors[0].left;
//		fState.height = monitors[0].bottom - monitors[0].top;
//		fState.desktop_x = monitors[0].left;
//		fState.desktop_y = monitors[0].top;
//		fState.is_monitor = true;
//
//		bool bS = ffmpegOpenVideoInput(&fState);
//		std::cout << "Open video input " << (bS ? "Success" : "Failed") << ",With: " << fState.width << "*" << fState.height << std::endl;
//	} while (0);
//	
//	fState.pts_start_time = CurrentTimeMSec();
//	if (fState.is_video_input_inited)
//	{
//		bool bS = StartVideoCapture(&fState);
//		std::cout << "StartVideoCapture " << (bS ? "Success" : "Failed") << std::endl;
//
//		if (bS)
//		{
//			bS = ffmpegOpenVideoOutput(&fState);
//		}
//		std::cout << "ffmpegOpenVideoOutput " << (bS ? "Success" : "Failed") << std::endl;
//		if (bS)
//		{
//			OpenVideoConverter(&fState);
//
//			bS = StartVideoEncode(&fState);
//		}
//		std::cout << "StartVideoEncode " << (bS ? "Success" : "Failed") << std::endl;
//	}
//
//	bool bS = MicrosoftOpenAudioInput(&fState);
//	std::cout << "MicrosoftOpenAudioInput " << (bS ? "Success" : "Failed") << std::endl;
//	if (bS)
//	{
//		bS = StartAudioCapture(&fState);
//		std::cout << "StartAudioCapture " << (bS ? "Success" : "Failed") << std::endl;
//		if (bS)
//		{
//			bS = OpenAudioEncode(&fState);
//		}
//		std::cout << "OpenAudioEncode " << (bS ? "Success" : "Failed") << std::endl;
//		
//		if (bS)
//		{
//			bS = StartAudioEncode(&fState);
//		}
//		std::cout << "StartAudioEncode " << (bS ? "Success" : "Failed") << std::endl;
//	}
//
//	bS = InitRtspPusher(&fState);
//	std::cout << "InitRtspPusher " << (bS ? "Success" : "Failed") << std::endl;
//	if (bS)
//	{
//		bS = OpenRtspStreams(&fState);
//		std::cout << "OpenRtspStreams " << (bS ? "Success" : "Failed") << std::endl;
//		if (bS)
//		{
//			bS = StartRtspPusher(&fState);
//		}
//		std::cout << "StartRtspPusher " << (bS ? "Success" : "Failed") << std::endl;
//	}
//
//	system("pause");
//	fState.is_pusher_exit = true;
//
//	Sleep(1000);
//	StopRtspPusher(&fState);
//	StopAudioEncode(&fState);
//	StopAudioCapture(&fState);
//	MicrosoftCloseAudioInput(&fState);
//	StopVideoEncode(&fState);
//	StopVideoCapture(&fState);
//	ffmpegCloseVideoInput(&fState);
//	ffmpegCloseVideoOutput(&fState);
//	CloseVideoConverter(&fState);
//
//	CloseRtspPusher(&fState);
//#ifdef _WIN32
//	CoUninitialize();
//#endif // _WIN32
//
//	system("pause");
//	return 0;
//}

uint64_t NowMs()
{
	return clock() / (CLOCKS_PER_SEC / 1000);
}

std::shared_ptr<AVPacket> AudioEncode(ffPusherState* fState, const uint8_t* pcm, int samples, int64_t pts)
{
	std::shared_ptr<AVFrame> inf(av_frame_alloc(), [](AVFrame* f) {
		av_frame_free(&f);
	});
	inf->sample_rate = fState->audio_encode_ctx->sample_rate;
	inf->format = AV_SAMPLE_FMT_FLT;//?????????????????
	inf->channels = fState->audio_encode_ctx->channels;
	inf->channel_layout = fState->audio_encode_ctx->channel_layout;
	inf->nb_samples = samples;
	inf->pts = /*pts*/fState->audio_encode_frame_pts;//手动RTSP从0开始,FFMPEG需要手动计算PTS
	inf->pts = av_rescale_q(fState->audio_encode_frame_pts, { 1, fState->audio_encode_ctx->sample_rate }, fState->audio_encode_ctx->time_base);
	fState->audio_encode_frame_pts += inf->nb_samples;

	if (av_frame_get_buffer(inf.get(), 0) < 0)
	{
		std::cout << "AudioEncode.av_frame_get_buffer Failed." << std::endl;
		return nullptr;
	}

	int bytes_per_sample = av_get_bytes_per_sample((AVSampleFormat)fState->audio_sample_fmt);
	if (0 == bytes_per_sample)
	{
		std::cout << "AudioEncode.av_get_bytes_per_sample Failed." << std::endl;
		return nullptr;
	}
	memcpy(inf->data[0], pcm, bytes_per_sample* inf->channels*samples);

	auto fnResample = [](ffPusherState* fState, std::shared_ptr<AVFrame> in_frame, std::shared_ptr<AVFrame>& out_frame)->int {
		out_frame.reset(av_frame_alloc(), [](AVFrame* f) {
			av_frame_free(&f);
		});
		out_frame->sample_rate = fState->audio_samplerate;
		out_frame->format = AV_SAMPLE_FMT_FLTP;
		out_frame->channels = fState->audio_channels;
		out_frame->nb_samples = (int)av_rescale_rnd(in_frame->nb_samples, out_frame->sample_rate, in_frame->sample_rate, AV_ROUND_UP);
		out_frame->pts = out_frame->pkt_dts = in_frame->pts;
		if (0 != av_frame_get_buffer(out_frame.get(), 0))
		{
			std::cout << "AudioEncode.fnResample.av_frame_get_buffer Failed." << std::endl;
			return -1;
		}

		int len = swr_convert(fState->audio_resample_ctx, (uint8_t**)&out_frame->data, out_frame->nb_samples, (const uint8_t**)in_frame->data, in_frame->nb_samples);
		if (len < 0)
		{
			std::cout << "AudioEncode.fnResample.swr_convert Failed." << std::endl;
			return -1;
		}
		return len;
	};

	std::shared_ptr<AVFrame> fltp_frame = nullptr;
	if (fnResample(fState, inf, fltp_frame) <= 0)
	{
		std::cout << "AudioEncode.fnResample Failed." << std::endl;
		return nullptr;
	}

	int ret = avcodec_send_frame(fState->audio_encode_ctx, fltp_frame.get());
	if (0 != ret)
	{
		std::cout << "AudioEncode.avcodec_send_frame Failed." << std::endl;
		return nullptr;
	}

	std::shared_ptr<AVPacket> av_packet(av_packet_alloc(), [](AVPacket* p) {
		av_packet_free(&p);
	});
	av_init_packet(av_packet.get());
	ret = avcodec_receive_packet(fState->audio_encode_ctx, av_packet.get());
	if (AVERROR(EAGAIN) == ret || AVERROR_EOF == ret)
	{
		std::cout << "AudioEncode.avcodec_receive_packet Failed.1" << std::endl;
		return nullptr;
	}
	else if (ret < 0)
	{
		std::cout << "AudioEncode.avcodec_receive_packet Failed.2" << std::endl;
		return nullptr;
	}

	return av_packet;
}

void AudioEncodeThreadFn(ffPusherState* fState)
{
	std::cout << "Audio Encode Thread started." << std::endl;
	fState->is_audio_encode_running = true;

	auto fnGetAudioSamples = [](ffPusherState* f)->int {
		return f->audio_buffer->size() * 8 / f->audio_caping_bits_per_sample / f->audio_caping_channels;
	};

	auto fnReadAudioSamples = [&](ffPusherState* f, uint8_t* data, uint32_t samples)->int {
		if (fnGetAudioSamples(f) < samples)
			return 0;

		f->audio_buffer->read((char*)data, samples*f->audio_caping_bits_per_sample / 8 * f->audio_caping_channels);
		return samples;
	};

	std::shared_ptr<uint8_t> pcm_buffer(new uint8_t[48000 * 8]);
	uint32_t frame_samples = fState->audio_encode_ctx->frame_size;
	uint32_t channels = fState->audio_channels;
	uint32_t samplerate = fState->audio_samplerate;
	while (!fState->is_pusher_exit)
	{
		int nAudioSamples = fnGetAudioSamples(fState);
		if (frame_samples <= nAudioSamples)
		{
			int nRet = fnReadAudioSamples(fState, pcm_buffer.get(), frame_samples);
			if (frame_samples != nRet)
				continue;

			{
				int64_t pts = -1;
				auto audio_pkt = AudioEncode(fState, pcm_buffer.get(), frame_samples, pts);
				if (audio_pkt)
				{
					//PostPkt2Rtsp(fState, audio_pkt, RTSP_PKT_AUDIO);
					std::shared_ptr<uint8_t> audio_buf(new uint8_t[audio_pkt->size], [](uint8_t* d) { delete[]d; });
					memcpy(audio_buf.get(), audio_pkt->data, audio_pkt->size);
					if (fState->main_widget_)
					{
						fState->main_widget_->PushAudio(audio_buf, audio_pkt->size, 0);
					}
					//std::cout << "Audio Decode.pkt."<<pts << "-->" << audio_pkt->pts << std::endl;
					if (fState->audio_dump_file)
					{
						uint8_t adts_header[7] = { 0 };
						GetAdtsHeader(fState, adts_header, audio_pkt->size);
						fwrite(adts_header, 1, 7, fState->audio_dump_file);
						fwrite(audio_pkt->data, 1, audio_pkt->size, fState->audio_dump_file);
						fflush(fState->audio_dump_file);
					}
				}
			}
		}
		else
		{
			std::this_thread::sleep_for(std::chrono::milliseconds(1));
		}
	}

	fState->is_audio_encode_running = false;
	std::cout << "Audio Encode Thread exited." << std::endl;
}
bool StartAudioEncode(ffPusherState* fState)
{
	bool bRet = false;
	do
	{
		if (!fState->is_audio_output_inited)
			break;
		if (fState->is_audio_encode_running)
			break;

		fState->audio_encode_thread = std::thread(AudioEncodeThreadFn, fState);
		bRet = true;
	} while (0);
	return bRet;
}
void StopAudioEncode(ffPusherState* fState)
{
	fState->is_pusher_exit = true;
	if(fState->audio_encode_thread.joinable())
		fState->audio_encode_thread.join();
}

bool OpenAudioEncode(ffPusherState* fState)
{
	bool bRet = false;
	do
	{
		if (fState->is_audio_output_inited)
			break;

		auto codec = avcodec_find_encoder(AV_CODEC_ID_AAC);
		if (!codec)
		{
			break;
		}

		fState->audio_encode_ctx = avcodec_alloc_context3(codec);
		if (!fState->audio_encode_ctx)
		{
			break;
		}
		fState->audio_encode_ctx->sample_rate = fState->audio_samplerate;
		fState->audio_encode_ctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
		fState->audio_encode_ctx->channels = fState->audio_channels;
		fState->audio_encode_ctx->channel_layout = av_get_default_channel_layout(fState->audio_channels);
		fState->audio_encode_ctx->bit_rate = fState->audio_bitrate;

		if (0 != avcodec_open2(fState->audio_encode_ctx, codec, NULL))
		{
			break;
		}

		int64_t inchannelLayout = av_get_default_channel_layout(fState->audio_channels);
		int64_t outchannelLayout = av_get_default_channel_layout(fState->audio_channels);
		fState->audio_resample_ctx = swr_alloc();
		av_opt_set_int(fState->audio_resample_ctx, "in_channel_layout", inchannelLayout, 0);
		av_opt_set_int(fState->audio_resample_ctx, "in_sample_rate", fState->audio_samplerate,0);
		av_opt_set_sample_fmt(fState->audio_resample_ctx, "in_sample_fmt", (AVSampleFormat)fState->audio_sample_fmt,0);
		av_opt_set_int(fState->audio_resample_ctx, "out_channel_layout", outchannelLayout, 0);
		av_opt_set_int(fState->audio_resample_ctx, "out_sample_rate", fState->audio_samplerate, 0);
		av_opt_set_sample_fmt(fState->audio_resample_ctx, "out_sample_fmt", AV_SAMPLE_FMT_FLTP, 0);

		int ret = swr_init(fState->audio_resample_ctx);
		if (ret < 0)
		{
			break;
		}

		fState->audio_resample_in_bits_per_sample = av_get_bytes_per_sample((AVSampleFormat)fState->audio_sample_fmt);
		fState->audio_resample_out_bits_per_sample = av_get_bytes_per_sample(AV_SAMPLE_FMT_FLTP);
		bRet = true;
		fState->is_audio_output_inited = true;
		fState->audio_dump_file = fopen("../bin_x86/audio_dump.aac", "wb");
	} while (0);
	if (!bRet)
	{
		CloseAudioEncode(fState);
	}
	return bRet;
}
void CloseAudioEncode(ffPusherState* fState)
{
	fState->is_audio_output_inited = false;
	if (fState->audio_resample_ctx)
	{
		if (swr_is_initialized(fState->audio_resample_ctx))
		{
			swr_close(fState->audio_resample_ctx);
		}
		swr_free(&fState->audio_resample_ctx);
	}

	if (fState->audio_encode_ctx)
	{
		avcodec_close(fState->audio_encode_ctx);
		avcodec_free_context(&fState->audio_encode_ctx);
	}
	
	if (fState->audio_dump_file)
	{
		fclose(fState->audio_dump_file);
		fState->audio_dump_file = nullptr;
	}
}

int AudioCapture(ffPusherState* fState)
{
	HRESULT hr = S_OK;
	uint32_t packetLength = 0;
	uint32_t numFramesAvailable = 0;
	BYTE* pData = nullptr;
	DWORD flags = 0;

	hr = fState->audio_capture_client->GetNextPacketSize(&packetLength);
	if (FAILED(hr))
	{
		return -1;
	}
	if (0 == packetLength)
	{
		std::this_thread::sleep_for(std::chrono::milliseconds(1));
		return 0;
	}

	while (0 < packetLength)
	{
		hr = fState->audio_capture_client->GetBuffer(&pData, &numFramesAvailable, &flags, NULL, NULL);
		if (FAILED(hr))
		{
			return -1;
		}

		//if (fState->audio_pcm_buf_size < numFramesAvailable* fState->audio_mix_format->nBlockAlign)
		//{
		//	fState->audio_pcm_buf_size = numFramesAvailable * fState->audio_mix_format->nBlockAlign;
		//	fState->audio_pcm_buf.reset(new uint8_t[fState->audio_pcm_buf_size]);
		//}

		//if (flags & AUDCLNT_BUFFERFLAGS_SILENT)
		//{
		//	memset(fState->audio_pcm_buf.get(), 0, fState->audio_pcm_buf_size);
		//}
		//else
		//{
		//	memcpy(fState->audio_pcm_buf.get(), pData, numFramesAvailable * fState->audio_mix_format->nBlockAlign);
		//}
		{
			fState->audio_caping_channels = fState->audio_mix_format->nChannels;
			fState->audio_caping_samplerate = fState->audio_mix_format->nSamplesPerSec;
			fState->audio_caping_bits_per_sample = fState->audio_mix_format->wBitsPerSample;
			fState->audio_buffer->write((char*)pData, fState->audio_mix_format->nBlockAlign*numFramesAvailable);
			//std::cout << "A:" << fState->audio_caping_channels << "," << fState->audio_caping_samplerate << "," << fState->audio_caping_bits_per_sample << std::endl;
		}

		hr = fState->audio_capture_client->ReleaseBuffer(numFramesAvailable);
		if (FAILED(hr))
		{
			return -1;
		}

		hr = fState->audio_capture_client->GetNextPacketSize(&packetLength);
		if (FAILED(hr))
		{
			return -1;
		}
	}

	return 0;
}

void AudioCaptureThreadfn(ffPusherState* fState)
{
	std::cout << "Audio capture thread started." << std::endl;
	fState->is_audio_capture_running = true;
	while (!fState->is_pusher_exit)
	{
		if (AudioCapture(fState) < 0)
		{
			std::cout << "Audio capture Get Error." << std::endl;
			break;
		}
	}
	fState->is_audio_capture_running = false;
	std::cout << "Audio capture thread exited." << std::endl;
}
bool StartAudioCapture(ffPusherState* fState)
{
	bool bRet = false;
	do
	{
		if (fState->is_audio_capture_running)
			break;
		if (!fState->is_audio_input_inited)
			break;

		HRESULT hr = fState->audio_client->Start();
		if (FAILED(hr))
		{
			std::cout << "[WASAPICapture] Failed to start audio client"<<std::endl;
			break;
		}
		fState->audio_capture_thread = std::thread(AudioCaptureThreadfn, fState);
		bRet = true;
	} while (0);
	return bRet;
}
void StopAudioCapture(ffPusherState* fState)
{
	fState->is_pusher_exit = true;
	if(fState->audio_capture_thread.joinable())
		fState->audio_capture_thread.join();
}

bool MicrosoftOpenAudioInput(ffPusherState* fState)
{
	bool bRet = false;
	do
	{
		if (fState->is_audio_input_inited)
			break;

		HRESULT hr = S_OK;
		hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)fState->audio_enumerator.GetAddressOf());
		if (FAILED(hr))
		{
			break;
		}

		hr = fState->audio_enumerator->GetDefaultAudioEndpoint(eRender, eMultimedia, fState->audio_device.GetAddressOf());
		if (FAILED(hr))
		{
			break;
		}

		hr = fState->audio_device->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)fState->audio_client.GetAddressOf());
		if (FAILED(hr))
		{
			break;
		}

		hr = fState->audio_client->GetMixFormat(&fState->audio_mix_format);
		if (FAILED(hr))
		{
			break;
		}

		auto fnAdjustFormatTo16Bits = [](WAVEFORMATEX* pwfx)->int {
			if (WAVE_FORMAT_IEEE_FLOAT == pwfx->wFormatTag)
			{
				pwfx->wFormatTag = WAVE_FORMAT_PCM;
			}
			else if (WAVE_FORMAT_EXTENSIBLE == pwfx->wFormatTag)
			{
				PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(pwfx);
				if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat))
				{
					pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
					pEx->Samples.wValidBitsPerSample = 16;
				}
			}
			else
			{
				return -1;
			}

			pwfx->wBitsPerSample = 16;
			pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8;
			pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec;
			return 0;
		};
		fnAdjustFormatTo16Bits(fState->audio_mix_format);
		fState->audio_actual_duration = REFTIMES_PER_SEC;
		hr = fState->audio_client->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_LOOPBACK, fState->audio_actual_duration, 0, fState->audio_mix_format, NULL);
		if (FAILED(hr))
		{
			break;
		}

		hr = fState->audio_client->GetBufferSize(&fState->audio_buf_frame_count);
		if (FAILED(hr))
		{
			break;
		}

		hr = fState->audio_client->GetService(IID_IAudioCaptureClient, (void**)fState->audio_capture_client.GetAddressOf());
		if (FAILED(hr))
		{
			break;
		}

		fState->audio_actual_duration = REFERENCE_TIME(REFTIMES_PER_SEC*fState->audio_buf_frame_count / fState->audio_mix_format->nSamplesPerSec);
		fState->is_audio_input_inited = true;
		fState->audio_buffer.reset(new AudioBuffer(20480));
		fState->audio_buffer->clear();
		bRet = true;
	} while (0);
	return bRet;
}
void MicrosoftCloseAudioInput(ffPusherState* fState)
{
	if (fState->is_audio_input_inited)
	{
		fState->is_audio_input_inited = false;
		if (fState->audio_client)
		{
			fState->audio_client->Stop();
		}
		
		//fState->audio_pcm_buf.reset();
	}
}

std::shared_ptr<AVFrame> getRawVideoFrame(ffPusherState* fState)
{
	std::shared_ptr<AVFrame> inf(av_frame_alloc(), [](AVFrame* f) {
		av_frame_free(&f);
	});
	{
		std::vector<uint8_t> image;
		uint32_t width = 0;
		uint32_t height = 0;
		{
			std::lock_guard<std::mutex> lock(fState->video_decode_mutex);
			if (!fState->video_decode_image || (fState->video_decode_image_size == 0))
				return nullptr;
			image.reserve(fState->video_decode_image_size);
			image.assign(fState->video_decode_image.get(), fState->video_decode_image.get() + fState->video_decode_image_size);
			width = fState->video_decode_width;
			height = fState->video_decode_height;
		}

		inf->width = fState->width;
		inf->height = fState->height;
		inf->format = fState->video_dec_codec_ctx->pix_fmt;
		if (0 != av_frame_get_buffer(inf.get(), 32))
		{
			return nullptr;
		}
		memcpy(inf->data[0], &image[0], image.size());
	}
	return inf;
}

std::shared_ptr<AVFrame> getYUVFrame(ffPusherState* fState, std::shared_ptr<AVFrame>& inf)
{
	if (!fState->video_convert_ctx)
		return inf;

	std::shared_ptr<AVFrame> yuvf(av_frame_alloc(), [](AVFrame* f) {
		av_frame_free(&f);
	});
	yuvf->width = fState->width;
	yuvf->height = fState->height;
	yuvf->format = fState->video_enc_codec_ctx->pix_fmt;
	yuvf->pts = inf->pts;
	yuvf->pkt_dts = inf->pkt_dts;
	if (0 != av_frame_get_buffer(yuvf.get(), 32))
	{
		return nullptr;
	}

	int out_height = sws_scale(fState->video_convert_ctx, inf->data, inf->linesize, 0, inf->height,
		yuvf->data, yuvf->linesize);
	if (out_height < 0)
	{
		return nullptr;
	}
	return yuvf;
}

std::shared_ptr<AVPacket> EncodeYUVFrame(ffPusherState* fState, std::shared_ptr<AVFrame>& yuv, int64_t pts)
{
	if (!fState->video_enc_codec_ctx)
		return nullptr;
	yuv->pts = pts;
	yuv->pict_type = AV_PICTURE_TYPE_NONE;
	if (avcodec_send_frame(fState->video_enc_codec_ctx, yuv.get()) < 0)
	{
		return nullptr;
	}

	std::shared_ptr<AVPacket> pkt(av_packet_alloc(), [](AVPacket* p) {
		av_packet_free(&p);
	});
	av_init_packet(pkt.get());
	int ret = avcodec_receive_packet(fState->video_enc_codec_ctx, pkt.get());
	if (AVERROR(EAGAIN) == ret || ret == AVERROR_EOF)
		return nullptr;
	else if (ret < 0)
		return nullptr;

	return pkt;
}

void VideoEncodeThreadFn(ffPusherState* fState)
{
	std::cout << "Video Encode thread started." << std::endl;
	fState->is_video_encode_running = true;
	uint32_t msec = 1000 / fState->fps_;
	uint64_t encoding_ts = NowMs();
	while (!fState->is_pusher_exit)
	{
		{
			uint32_t delay = msec;
			uint32_t elapsed = NowMs() - encoding_ts;
			if (delay < elapsed)
				delay = 0;
			else
				delay -= elapsed;

			std::this_thread::sleep_for(std::chrono::milliseconds(delay));
			encoding_ts = NowMs();
		}

		auto inf = getRawVideoFrame(fState);
		if (!inf)
		{
			continue;
		}

		//Encode
		auto yuv = getYUVFrame(fState, inf);
		if (!yuv)
		{
			continue;
		}

		int64_t pts = GetVideoPts(fState);
		auto pkt = EncodeYUVFrame(fState, yuv, pts);
		if (!pkt)
		{
			continue;
		}

		//std::vector<uint8_t> out_frame;
		std::shared_ptr<uint8_t> out_buffer(new uint8_t[fState->video_decode_width*fState->video_decode_height * 4],
			[](uint8_t* d) { delete[] d; });
		int frame_size = 0;
		if (IsKeyFrame(pkt->data, pkt->size))
		{
			uint8_t* extra_data = fState->video_enc_codec_ctx->extradata;
			int extra_data_size = fState->video_enc_codec_ctx->extradata_size;
			memcpy(out_buffer.get(), extra_data, extra_data_size);
			frame_size += extra_data_size;
		}
		memcpy(out_buffer.get() + frame_size, pkt->data, pkt->size);
		frame_size += pkt->size;

		if (fState->main_widget_)
		{
			fState->main_widget_->PushVideo(out_buffer, frame_size, 0);
		}

		//PostPkt2Rtsp(fState, pkt, RTSP_PKT_VIDEO);
		//std::cout << "H264 Pkt." << pts << "--" << pkt->pts << ":" << pkt->size << std::endl;
		if (fState->video_dump_file)
		{
			fwrite(pkt->data, pkt->size, 1, fState->video_dump_file);
			fflush(fState->video_dump_file);
		}
	}
	fState->is_video_encode_running = false;
	std::cout << "Video Encode thread exited." << std::endl;
}

bool IsKeyFrame(const uint8_t* data, uint32_t size)
{
	if (4 < size)
	{
		//0x67: SPS, 0x65:IDR, 0x6: SEI
		if (data[4] == 0x67 || data[4] == 0x65 ||
			0x6 == data[4] || 0x27 == data[4])
			return true;
	}

	return false;
}

bool StartVideoEncode(ffPusherState* fState)
{
	bool bRet = false;
	do
	{
		if (!fState->is_video_output_inited)
			break;
		if (fState->is_video_encode_running)
			break;
		fState->video_encode_thread = std::thread(VideoEncodeThreadFn, fState);
		bRet = true;
	} while (0);
	return bRet;
}

void StopVideoEncode(ffPusherState* fState)
{
	fState->is_pusher_exit = true;
	fState->is_video_encode_running = false;
	if(fState->video_encode_thread.joinable())
		fState->video_encode_thread.join();
}

void GetVideoFrame(ffPusherState* fState)
{
	if (!fState->is_video_input_inited)
		return;
	std::shared_ptr<AVFrame> vFrame(av_frame_alloc(), [](AVFrame* f) {
		av_frame_free(&f);
	});
	std::shared_ptr<AVPacket> vPacket(av_packet_alloc(), [](AVPacket* p) {
		av_packet_free(&p);
	});

	av_init_packet(vPacket.get());
	int ret = av_read_frame(fState->video_ifmt_ctx, vPacket.get());
	if (ret < 0)
	{
		return;
	}

	if (vPacket->stream_index == fState->video_index)
	{
		DecodeVideoFrame(fState, vFrame.get(), vPacket.get());
	}
	av_packet_unref(vPacket.get());
}

void DecodeVideoFrame(ffPusherState* fState, AVFrame* frame, AVPacket* pkt)
{
	int ret = avcodec_send_packet(fState->video_dec_codec_ctx, pkt);
	if (ret < 0)
	{
		return;
	}

	ret = avcodec_receive_frame(fState->video_dec_codec_ctx, frame);
	if (AVERROR(EAGAIN) == ret || AVERROR_EOF == ret)
	{
		return;
	}

	if (ret < 0)
	{
		return;
	}

	{
		std::lock_guard<std::mutex> lock(fState->video_decode_mutex);
		fState->video_decode_image_size = frame->pkt_size;
		fState->video_decode_width = frame->width;
		fState->video_decode_height = frame->height;
		fState->video_decode_image.reset(new uint8_t[fState->video_decode_image_size]);
		for (uint32_t i = 0; i < fState->video_decode_height; ++i)
		{
			memcpy(fState->video_decode_image.get() + i * fState->video_decode_width * 4,
				frame->data[0] + i * frame->linesize[0], frame->linesize[0]);
		}
	}

	//std::cout << "Video Decode time:" << NowMs() << "," << frame->width << "*" << frame->height <<":"<< frame->format << std::endl;
	av_frame_unref(frame);
}

void GetRawVideoFrame(ffPusherState* fState, int& width, int& height, int& data_size, std::shared_ptr<uint8_t>& data)
{
	if(!fState->is_video_capture_running)
		return;
	{
		std::lock_guard<std::mutex> lock(fState->video_decode_mutex);
		if (!fState->video_decode_image || fState->video_decode_height < 1 || fState->video_decode_width < 1)
			return;
		
		data_size = fState->video_decode_image_size;
		width = fState->video_decode_width;
		height = fState->video_decode_height;
		data.reset(new uint8_t[data_size]);
		{
			memcpy(data.get(),fState->video_decode_image.get(), data_size);
		}
	}
}

void VideoCaptureThreadFn(ffPusherState* fState)
{
	std::cout << "video capture thread is started." << std::endl;
	fState->is_video_capture_running = true;
	int nInterval = 1000 / fState->fps_;
	if (30 < nInterval)
		nInterval -= 20;
	while (!fState->is_pusher_exit)
	{
		GetVideoFrame(fState);
		std::this_thread::sleep_for(std::chrono::milliseconds(nInterval/*1000/ fState->fps_*/));
	}
	fState->is_video_capture_running = false;
	std::cout << "video capture thread is exited." << std::endl;
}
bool StartVideoCapture(ffPusherState* fState)
{
	bool bRet = false;
	do
	{
		if (!fState->is_video_input_inited)
			break;
		if (fState->is_video_capture_running)
			break;

		fState->video_capture_thread = std::thread(VideoCaptureThreadFn, fState);
		bRet = true;
	} while (0);

	return bRet;
}

void StopVideoCapture(ffPusherState* fState)
{
	fState->is_pusher_exit = true;
	if(fState->video_capture_thread.joinable())
		fState->video_capture_thread.join();
	fState->video_decode_height = 0;
	fState->video_decode_width = 0;
	fState->video_decode_image_size = 0;
	fState->video_decode_image.reset();
}

bool ffmpegOpenVideoInput(ffPusherState* fState)
{
	bool bRet = false;
	do
	{
		if (!fState->is_monitor)
			break;
		if (fState->is_video_input_inited)
			break;
		char video_size[20] = { 0 };
		snprintf(video_size, 20, "%dx%d", fState->width, fState->height);
		AVDictionary *options = nullptr;
		av_dict_set_int(&options, "framerate", fState->fps_, AV_DICT_MATCH_CASE);
		av_dict_set_int(&options, "draw_mouse", 1, AV_DICT_MATCH_CASE);
		av_dict_set_int(&options, "offset_x", fState->desktop_x, AV_DICT_MATCH_CASE);
		av_dict_set_int(&options, "offset_y", fState->desktop_y, AV_DICT_MATCH_CASE);
		av_dict_set(&options, "video_size", video_size, 1);

		fState->video_input_fmt = av_find_input_format("gdigrab");
		if (!fState->video_input_fmt) {
			std::cerr << "av_find_input_format.video failed." << std::endl;
			break;
		}

		fState->video_ifmt_ctx = avformat_alloc_context();
		if (0 != avformat_open_input(&fState->video_ifmt_ctx, "desktop", fState->video_input_fmt, &options))
		{
			std::cerr << "avformat_open_input.video failed." << std::endl;
			break;
		}

		if (avformat_find_stream_info(fState->video_ifmt_ctx, nullptr) < 0)
		{
			std::cerr << "avformat_find_stream_info.video failed." << std::endl;
			break;
		}

		for (int i = 0; i < fState->video_ifmt_ctx->nb_streams; ++i)
		{
			if (AVMEDIA_TYPE_VIDEO == fState->video_ifmt_ctx->streams[i]->codecpar->codec_type)
			{
				fState->video_index = i;
			}
		}

		auto codec = avcodec_find_decoder(fState->video_ifmt_ctx->streams[fState->video_index]->codecpar->codec_id);
		if (!codec)
		{
			std::cerr << "avcodec_find_decoder.video failed." << std::endl;
			break;
		}

		fState->video_dec_codec_ctx = avcodec_alloc_context3(codec);
		if (!fState->video_dec_codec_ctx)
		{
			std::cerr << "avcodec_alloc_context3.video failed." << std::endl;
			break;
		}

		avcodec_parameters_to_context(fState->video_dec_codec_ctx, fState->video_ifmt_ctx->streams[fState->video_index]->codecpar);
		if (0 != avcodec_open2(fState->video_dec_codec_ctx, codec, nullptr))
		{
			std::cerr << "avcodec_open2.video failed." << std::endl;
			break;
		}

		//std::cout << "ffmpegOpenVideoInput decode pix format: " << fState->video_dec_codec_ctx->pix_fmt << std::endl;
		fState->is_video_input_inited = true;
		bRet = true;
	} while (0);
	if (!bRet)
	{
		ffmpegCloseVideoInput(fState);
	}
	return bRet;
}

void ffmpegCloseVideoInput(ffPusherState* fState)
{
	fState->is_video_input_inited = false;
	fState->video_index = -1;
	if (fState->video_dec_codec_ctx)
	{
		avcodec_close(fState->video_dec_codec_ctx);
		avcodec_free_context(&fState->video_dec_codec_ctx);
	}
	if (fState->video_ifmt_ctx)
	{
		avformat_close_input(&fState->video_ifmt_ctx);
		fState->video_input_fmt = nullptr;
	}
}

bool ffmpegOpenVideoOutput(ffPusherState* fState)
{
	bool bRet = false;
	AVCodecContext* c = nullptr;
	do
	{
		if (fState->is_video_output_inited)
			break;
		auto codec = avcodec_find_encoder(AV_CODEC_ID_H264);
		if (!codec)
		{
			break;
		}

		c = avcodec_alloc_context3(codec);
		if (!c)
		{
			break;
		}
		c->width = fState->video_decode_width > 0 ? fState->video_decode_width : fState->width;
		c->height = fState->video_decode_height > 0 ? fState->video_decode_height : fState->height;
		c->framerate = { fState->fps_, 1 };
		c->gop_size = 1/*fState->fps_*/;
		c->time_base = { 1, fState->fps_ };
		c->max_b_frames = fState->b_frames;
		c->pix_fmt = (AVPixelFormat)fState->video_pixel_fmt;
		c->bit_rate = fState->video_bit_rate;
		c->rc_min_rate = fState->video_bit_rate;
		c->rc_max_rate = fState->video_bit_rate;
		c->rc_buffer_size = fState->video_bit_rate;
		c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
		c->codec_type = AVMEDIA_TYPE_VIDEO;

		AVDictionary* param = nullptr;
		if (AV_CODEC_ID_H264 == c->codec_id)
		{
			av_dict_set(&param, "preset", "medium", 0);
			av_dict_set(&param, "tune", "zerolatency", 0);
			av_dict_set(&param, "profile", "high", 0);
		}
		else if(AV_CODEC_ID_H265 == c->codec_id)
		{
			av_dict_set(&param, "preset", "ultrafast", 0);
			av_dict_set(&param, "tune", "zero-latency", 0);
		}

		if (avcodec_open2(c, codec, &param) < 0)
		{
			break;
		}

		if (c->extradata)
		{
			uint8_t* sps = c->extradata + 4;
			int sps_len = 0;
			uint8_t* pps = nullptr;
			int pps_len = 0;
			uint8_t* data = c->extradata + 4;
			for (int i = 0; i < c->extradata_size - 4; ++i)
			{
				if (0 == data[i] && 0 == data[i + 1] && 0 == data[i + 2] && 1 == data[i + 3])
				{
					pps = &data[i + 4];
					break;
				}
			}
			sps_len = int(pps - sps) - 4;
			pps_len = c->extradata_size - 4 * 2 - sps_len;
			fState->video_sps.append(sps, sps + sps_len);
			fState->video_pps.append(pps, pps + pps_len);
			//std::cout << "Video SPS: " << fState->video_sps << std::endl;
			//std::cout << "Video PPS: " << fState->video_pps << std::endl;
			fState->video_dump_file = fopen("../bin_x86/video_dump.h264", "wb");
			if (fState->video_dump_file)
			{
				fwrite(c->extradata, 1, c->extradata_size, fState->video_dump_file);
				fflush(fState->video_dump_file);
			}
		}

		fState->video_frame_duration = 1000 / fState->fps_;
		fState->is_video_output_inited = true;
		fState->video_enc_codec_ctx = c;
		bRet = true;
	} while (0);
	return bRet;
}

void ffmpegCloseVideoOutput(ffPusherState* fState)
{
	fState->is_video_output_inited = false;
	if (fState->video_enc_codec_ctx)
	{
		avcodec_free_context(&fState->video_enc_codec_ctx);
	}
	if (fState->video_dump_file)
	{
		fclose(fState->video_dump_file);
		fState->video_dump_file = nullptr;
	}
}

void OpenVideoConverter(ffPusherState* fState)
{
	if (!fState->video_convert_ctx)
	{
		if (fState->video_dec_codec_ctx->pix_fmt != fState->video_enc_codec_ctx->pix_fmt)
		{
			fState->video_convert_ctx = sws_getContext(fState->width, fState->height, fState->video_dec_codec_ctx->pix_fmt,
				fState->width, fState->height, fState->video_enc_codec_ctx->pix_fmt, SWS_BICUBIC, 0, 0, 0);
		}
	}
}

void CloseVideoConverter(ffPusherState* fState)
{
	if (fState->video_convert_ctx)
	{
		sws_freeContext(fState->video_convert_ctx);
		fState->video_convert_ctx = nullptr;
	}
}

std::vector<Monitor> GetMonitors()
{
	std::vector<Monitor> monitors;

	HRESULT hr = S_OK;
	IDirect3D9Ex* d3d9Ex = nullptr;
	hr = Direct3DCreate9Ex(D3D_SDK_VERSION, &d3d9Ex);
	if (FAILED(hr))
	{
		return monitors;
	}

	int adapter_count = d3d9Ex->GetAdapterCount();
	for (int i = 0; i < adapter_count; ++i)
	{
		Monitor m;
		LUID luid = { 0,0 };
		hr = d3d9Ex->GetAdapterLUID(i, &luid);
		if (FAILED(hr))
			continue;

		m.low_part = (uint64_t)luid.LowPart;
		m.high_part = (uint64_t)luid.HighPart;
		HMONITOR hm = d3d9Ex->GetAdapterMonitor(i);
		if (hm)
		{
			MONITORINFO mi;
			mi.cbSize = sizeof(MONITORINFO);
			BOOL ret = GetMonitorInfoA(hm, &mi);
			if (ret)
			{
				m.left = mi.rcMonitor.left;
				m.right = mi.rcMonitor.right;
				m.top = mi.rcMonitor.top;
				m.bottom = mi.rcMonitor.bottom;
				monitors.push_back(m);
			}
		}
	}

	d3d9Ex->Release();
	return monitors;
}

int64_t CurrentTimeMSec()
{
#ifdef _WIN32
	struct timeval tv;
	time_t clock;
	struct tm tm;
	SYSTEMTIME wtm;
	GetLocalTime(&wtm);
	tm.tm_year = wtm.wYear - 1900;
	tm.tm_mon = wtm.wMonth - 1;
	tm.tm_mday = wtm.wDay;
	tm.tm_hour = wtm.wHour;
	tm.tm_min = wtm.wMinute;
	tm.tm_sec = wtm.wSecond;
	tm.tm_isdst = -1;
	clock = mktime(&tm);
	tv.tv_sec = clock;
	tv.tv_usec = wtm.wMilliseconds * 1000;
	return ((unsigned long long)tv.tv_sec * 1000 + (long)tv.tv_usec / 1000);
#endif // _WIN32

}

uint32_t GetVideoPts(ffPusherState* fState)
{
	int64_t pts = CurrentTimeMSec() - fState->pts_start_time;
	if (PTS_RECTIFY == fState->video_pts_strategy)
	{
		uint32_t diff = (uint32_t)abs(pts - (long long)(fState->video_pre_pts + fState->video_frame_duration));
		if (diff < fState->video_frame_duration / 2)
		{
			fState->video_pre_pts += fState->video_frame_duration;
			return (uint32_t)(((int64_t)fState->video_pre_pts) % 0xFFFFFFFF);
		}
		fState->video_pre_pts = (double)pts;
		return (uint32_t)(pts % 0xFFFFFFFF);
	}
	else
	{
		fState->video_pre_pts = (double)pts;
		return (uint32_t)(pts % 0xFFFFFFFF);
	}

	return 0;
}

uint32_t GetAudioPts(ffPusherState* fState)
{
	int64_t pts = CurrentTimeMSec() - fState->pts_start_time;
	if (PTS_RECTIFY == fState->audio_pts_strategy)
	{
		uint32_t diff = (uint32_t)abs(pts - (long long)(fState->audio_pre_pts + fState->audio_frame_duration));
		if (diff < fState->audio_frame_duration / 2)
		{
			fState->audio_pre_pts += fState->audio_frame_duration;
			return (uint32_t)(((int64_t)fState->audio_pre_pts) % 0xFFFFFFFF);
		}
		fState->audio_pre_pts = (double)pts;
		return (uint32_t)(pts % 0xFFFFFFFF);
	}
	else
	{
		fState->audio_pre_pts = (double)pts;
		return (uint32_t)(pts % 0xFFFFFFFF);
	}
	return 0;
}

void GetAdtsHeader(ffPusherState* fState, uint8_t* adts_header, int aac_length)
{
	uint8_t freqIdx = 0;
	switch (fState->audio_encode_ctx->sample_rate)
	{
	case 96000: freqIdx = 0; break;
	case 88200: freqIdx = 1; break;
	case 64000: freqIdx = 2; break;
	case 48000: freqIdx = 3; break;
	case 44100: freqIdx = 4; break;
	case 32000: freqIdx = 5; break;
	case 24000: freqIdx = 6; break;
	case 22050: freqIdx = 7; break;
	case 16000: freqIdx = 8; break;
	case 12000: freqIdx = 9; break;
	case 11025: freqIdx = 10; break;
	case 8000: freqIdx = 11; break;
	case 7350: freqIdx = 12; break;
	default:
		freqIdx = 4;
		break;
	}

	uint8_t  channel_cfg = fState->audio_encode_ctx->channels;
	uint32_t frame_length = aac_length + 7;
	adts_header[0] = 0xFF;
	adts_header[1] = 0xF1;
	adts_header[2] = ((fState->audio_encode_ctx->profile) << 6) + (freqIdx << 2) + (channel_cfg >> 2);
	adts_header[3] = ((channel_cfg & 3) << 6) + (frame_length >> 11);
	adts_header[4] = ((frame_length & 0x7FF) >> 3);
	adts_header[5] = ((frame_length & 7) << 5) + 0x1F;
	adts_header[6] = 0xFC;
}

bool InitRtspPusher(ffPusherState* fState)
{
	bool bRet = false;
	do
	{
		if (fState->rtsp_url.empty())
			break;
		if (fState->rtsp_fmt_ctx)
			break;
		if (!fState->audio_encode_ctx && !fState->video_enc_codec_ctx)
			break;

		int nRet = avformat_network_init();
		if (nRet < 0)
		{
			break;
		}

		nRet = avformat_alloc_output_context2(&fState->rtsp_fmt_ctx, nullptr, "rtsp", fState->rtsp_url.c_str());
		if (nRet < 0)
		{
			break;
		}

		av_opt_set(fState->rtsp_fmt_ctx, "rtsp_transport", "udp", 0);
//		snprintf(fState->rtsp_fmt_ctx->filename, sizeof(fState->rtsp_fmt_ctx->filename), "%s", fState->rtsp_url.c_str());
		bRet = true;
	} while (0);
	return bRet;
}
void CloseRtspPusher(ffPusherState* fState)
{
	if (fState->rtsp_fmt_ctx && !fState->is_rtsp_push_running)
	{
		avformat_free_context(fState->rtsp_fmt_ctx);
		fState->rtsp_fmt_ctx = nullptr;
	}

}

bool ConnectRtspServer(ffPusherState* fState)
{
	bool bRet = false;
	do
	{
		if (!fState->rtsp_fmt_ctx)
			break;

		int nRet = avformat_write_header(fState->rtsp_fmt_ctx, nullptr);
		if (nRet < 0)
		{
			break;
		}
		bRet = true;
	} while (0);
	return bRet;
}

bool OpenRtspStreams(ffPusherState* fState)
{
	bool bRet = false;
	do
	{
		if (!fState->rtsp_fmt_ctx)
			break;
		if (fState->rtsp_video_stream || fState->rtsp_audio_stream)
			break;

		if (fState->video_enc_codec_ctx && (AVMEDIA_TYPE_VIDEO == fState->video_enc_codec_ctx->codec_type))
		{
			auto vs = avformat_new_stream(fState->rtsp_fmt_ctx, 0);
			if (!vs)
			{
				break;
			}

			vs->codecpar->codec_tag = 0;
			avcodec_parameters_from_context(vs->codecpar, fState->video_enc_codec_ctx);
			fState->rtsp_video_stream = vs;
			bRet = true;
		}

		if (fState->audio_encode_ctx && (AVMEDIA_TYPE_AUDIO == fState->audio_encode_ctx->codec_type))
		{
			auto as = avformat_new_stream(fState->rtsp_fmt_ctx, 0);
			if (!as)
			{
				break;
			}
			as->codecpar->codec_tag = 0;
			avcodec_parameters_from_context(as->codecpar, fState->audio_encode_ctx);
			fState->rtsp_audio_stream = as;
			bRet = true;
		}
		if (bRet)
		{
			bRet = ConnectRtspServer(fState);
		}
	} while (0);
	if (!bRet)
	{
		CloseRtspPusher(fState);
	}
	return bRet;
}
void CloseRtspStreams(ffPusherState* fState)
{
	if (fState->rtsp_fmt_ctx)
	{
		av_write_trailer(fState->rtsp_fmt_ctx);
		avio_closep(&fState->rtsp_fmt_ctx->pb);
	}
}

bool RtspSendPacket(ffPusherState* fState, int pktType, std::shared_ptr<AVPacket>& pkt)
{
	bool bRet = false;
	do
	{
		if (!pkt->data || (pkt->size <= 0))
		{
			break;
		}
		if ((RTSP_PKT_VIDEO != pktType) && (RTSP_PKT_AUDIO != pktType))
			break;

		AVRational srcTimeBase = { 1,1000 };
		AVRational dstTimeBase;
		if (RTSP_PKT_AUDIO == pktType)
		{
			pkt->stream_index = fState->rtsp_audio_stream->index;
			dstTimeBase = fState->rtsp_audio_stream->time_base;
		}
		else if (RTSP_PKT_VIDEO == pktType)
		{
			pkt->stream_index = fState->rtsp_video_stream->index;
			dstTimeBase = fState->rtsp_video_stream->time_base;
		}
		pkt->pts = av_rescale_q(pkt->pts, srcTimeBase, dstTimeBase);
		pkt->dts = av_rescale_q(pkt->dts, srcTimeBase, dstTimeBase);
		pkt->duration = av_rescale_q(pkt->duration, srcTimeBase, dstTimeBase);
		int nRet = av_write_frame(fState->rtsp_fmt_ctx, pkt.get());
		if (nRet < 0)
		{
			char buf[1024] = { 0 };
			av_strerror(nRet, buf, sizeof(buf) - 1);
			std::cout << "av_write_frame failed." << buf << std::endl;
			break;
		}

		bRet = true;
	} while (0);
	return bRet;
}

void RtspHandlePacket(ffPusherState* fState,int pktType, std::shared_ptr<AVPacket>& pkt)
{
	switch (pktType)
	{
	case RTSP_PKT_AUDIO:
	{
		if (!fState->rtsp_audio_stream)
			return;

		RtspSendPacket(fState,pktType, pkt);
	}
		break;
	case RTSP_PKT_VIDEO:
	{
		if (!fState->rtsp_video_stream)
			return;

		RtspSendPacket(fState,pktType, pkt);
	}
		break;
	default:
		break;
	}
}

void RtspPushThread(ffPusherState* fState)
{
	std::cout << "rtsp push thread started." << std::endl;
	fState->is_rtsp_push_running = true;
	while (!fState->is_pusher_exit)
	{
		fState->av_pkts_mutex.lock();
		int nSize = fState->av_pkts_queue.size();
		if (0 < nSize)
		{
			auto msg = fState->av_pkts_queue.front();
			fState->av_pkts_queue.pop();
			fState->av_pkts_mutex.unlock();
			if (RTSP_QUIT == msg->pkt_type || !msg->pkt)
			{
				break;
			}
			{//handle pkt
				RtspHandlePacket(fState, msg->pkt_type, msg->pkt);
			}
		}
		else
		{
			fState->av_pkts_mutex.unlock();
			{
				std::unique_lock<std::mutex> lock(fState->av_pkt_available_mutex);
				fState->av_pkt_available_condtion.wait(lock);
			}
			std::this_thread::sleep_for(std::chrono::milliseconds(1));
		}
	}

	fState->is_rtsp_push_running = false;
	std::cout << "rtsp push thread exited.1" << std::endl;
	CloseRtspStreams(fState);
	std::cout << "rtsp push thread exited.2.close rtsp streams" << std::endl;
}

bool StartRtspPusher(ffPusherState* fState)
{
	bool bRet = false;
	do
	{
		if (fState->is_rtsp_push_running)
			break;
		if (!fState->rtsp_fmt_ctx)
			break;

		fState->rtsp_push_thread = std::thread(RtspPushThread, fState);
		bRet = true;
	} while (0);
	return bRet;
}
void StopRtspPusher(ffPusherState* fState)
{
	fState->is_pusher_exit = true;
	{
		std::shared_ptr<AVPacket> msg;
		PostPkt2Rtsp(fState, msg, RTSP_QUIT);
	}
	if (fState->rtsp_push_thread.joinable())
		fState->rtsp_push_thread.join();
}

void PostPkt2Rtsp(ffPusherState* fState, std::shared_ptr<AVPacket>& pkt, RTSP_PKT_TYPE pktType)
{
	
	{
		std::shared_ptr<RtspPktMsg> msg = std::make_shared<RtspPktMsg>();
		msg->pkt = pkt;
		msg->pkt_type = pktType;
		std::lock_guard<std::mutex> lock(fState->av_pkts_mutex);
		fState->av_pkts_queue.push(msg);
	}
	{
		std::unique_lock<std::mutex> lock(fState->av_pkt_available_mutex);
		fState->av_pkt_available_condtion.notify_one();
	}
}



// 运行程序: Ctrl + F5 或调试 >“开始执行(不调试)”菜单
// 调试程序: F5 或调试 >“开始调试”菜单

// 入门使用技巧: 
//   1. 使用解决方案资源管理器窗口添加/管理文件
//   2. 使用团队资源管理器窗口连接到源代码管理
//   3. 使用输出窗口查看生成输出和其他消息
//   4. 使用错误列表窗口查看错误
//   5. 转到“项目”>“添加新项”以创建新的代码文件，或转到“项目”>“添加现有项”以将现有代码文件添加到项目
//   6. 将来，若要再次打开此项目，请转到“文件”>“打开”>“项目”并选择 .sln 文件
