#include "dina_decoder.h"
#include <sys/time.h>
#include <sys/timeb.h>
#include <unistd.h>
//-------------------------------------
static int getVideoFrameData(VideoDecoder *decoder);
//-------------------------------------

static bool initDecoder(VideoDecoder *decoder, int videoCallbackPtr, int audioCallbackPtr, int outImgFrameType)
{

	const int errBufSize = 1024;
	char errBuf[errBufSize];

	VideoCallback videoCallback = (VideoCallback)videoCallbackPtr;
	AudioCallback audioCallback = (AudioCallback)audioCallbackPtr;

	decoder->audioCallback = audioCallback;
	decoder->videoCallback = videoCallback;
	decoder->outImgFrameType = outImgFrameType;

	decoder->videoCodec = avcodec_find_decoder(decoder->videoCodecId);
	if (!decoder->videoCodec)
	{

		fprintf(stderr, "Codec not found video AV_CODEC.\n");
		return false;
	}
	decoder->videoCodecContext = avcodec_alloc_context3(decoder->videoCodec);
	if (!decoder->videoCodecContext)
	{
		fprintf(stderr, "Could not allocate codec context.\n");
		return false;
	}

	if (avcodec_open2(decoder->videoCodecContext, decoder->videoCodec, NULL) < 0)
	{
		fprintf(stderr, "Could not open  video codec.\n");
		return false;
	}
	// audio
	decoder->audioCodecId = AV_CODEC_ID_AAC;
	decoder->audioCodec = avcodec_find_decoder(decoder->audioCodecId);
	if (!decoder->audioCodec)
	{
		fprintf(stderr, "Codec not found audio AV_CODEC.\n");
		return false;
	}
 	decoder->audioCodecContext = avcodec_alloc_context3(decoder->audioCodec );
	if (avcodec_open2(decoder->audioCodecContext, decoder->audioCodec, NULL) < 0)
	{
		fprintf(stderr, "Could not open  audio codec.\n");
		return false;
	}  

	decoder->packet = av_packet_alloc();
	if (NULL == decoder->packet)
		return false;
	decoder->videoFrame.yuvFrame = av_frame_alloc();
	if (NULL == decoder->videoFrame.yuvFrame)
		return false;
	decoder->audioFrame.pcmFrame = av_frame_alloc();
	if (NULL == decoder->audioFrame.pcmFrame)
		return false;
	decoder->parser = av_parser_init(decoder->videoCodec->id);
	if (NULL == decoder->parser)
		return false;

	return true;
}

//////////////////////////////////Export methods////////////////////////////////////////
//构造264解码器
int initH264Decoder(int videoCallbackPtr, int audioCallbackPtr, int outImgFrameType)
{
	debug("initH264Decoder");
	VideoDecoder *decoder = (VideoDecoder *)av_mallocz(sizeof(VideoDecoder));

	decoder->videoCodecId = AV_CODEC_ID_H264;

	bool b = initDecoder(decoder, videoCallbackPtr, audioCallbackPtr, outImgFrameType);
	if (!b)
	{
		//err return null
		return 0;
	}
	return (int)decoder;
}

//构造265解码器
//outImgFrameType  输出时候的数据类型0=yuv  1=rgb
int initH265Decoder(int videoCallbackPtr, int audioCallbackPtr, int outImgFrameType)
{
	debug("initH265Decoder");
	VideoDecoder *decoder = (VideoDecoder *)av_mallocz(sizeof(VideoDecoder));
	decoder->videoCodecId = AV_CODEC_ID_H265;

	bool b = initDecoder(decoder, videoCallbackPtr, audioCallbackPtr, outImgFrameType);

	if (!b)
	{
		//err return null
		return 0;
	}
	return (int)decoder;
}

//释放initH264Decoder、initH265Decoder生成的解码器
void closeDecoder(int decoderImpl)
{
	debug("closeDecoder");
	VideoDecoder *decoder = (VideoDecoder *)decoderImpl;
	if (NULL != decoder->videoFrame.yuvBuf)
	{
		free(decoder->videoFrame.yuvBuf);
	}
	if (NULL != decoder->packet)
	{
		av_packet_free(&decoder->packet);
	}
	if (NULL != decoder->videoFrame.yuvFrame)
	{
		av_frame_free(&decoder->videoFrame.yuvFrame);
	}
	if (NULL != decoder->videoCodecContext)
	{
		avcodec_free_context(&decoder->videoCodecContext);
	}

	if (NULL != decoder->audioCodecContext)
	{
		avcodec_free_context(&decoder->audioCodecContext);
	}

	if (NULL != decoder->parser)
	{
		av_parser_close(decoder->parser);
	}
	free(decoder->videoFrame.rgbBuffer);

	if (NULL != decoder->audioFrame.pcmFrame)
	{
		av_frame_free(&decoder->audioFrame.pcmFrame);
	}

	if (decoder->audioFrame.pcmBuffer != NULL)
	{
		av_free(decoder->audioFrame.pcmBuffer);
	}

	av_free(decoder);
}

static int getYuvFrameBufSize(VideoDecoder *decoder)
{
	if (decoder->videoCodecContext->pix_fmt == AV_PIX_FMT_YUV420P || decoder->videoCodecContext->pix_fmt == AV_PIX_FMT_YUVJ420P)
	{
		return decoder->videoFrame.yuvFrame->height * decoder->videoFrame.yuvFrame->width * 1.5;
	}
	else if (decoder->videoCodecContext->pix_fmt == AV_PIX_FMT_YUV420P10LE)
	{
		return decoder->videoFrame.yuvFrame->height * decoder->videoFrame.yuvFrame->width * 3;
	}
	else
	{
		return 0;
	}
}
//----------------------------------------------------------------------------------------------
//转码 yuv240 2 rgb

static int Table_fv1[256] = {-180, -179, -177, -176, -174, -173, -172, -170, -169, -167, -166, -165, -163, -162, -160, -159, -158, -156, -155, -153, -152, -151, -149, -148, -146, -145, -144, -142, -141, -139, -138, -137, -135, -134, -132, -131, -130, -128, -127, -125, -124, -123, -121, -120, -118, -117, -115, -114, -113, -111, -110, -108, -107, -106, -104, -103, -101, -100, -99, -97, -96, -94, -93, -92, -90, -89, -87, -86, -85, -83, -82, -80, -79, -78, -76, -75, -73, -72, -71, -69, -68, -66, -65, -64, -62, -61, -59, -58, -57, -55, -54, -52, -51, -50, -48, -47, -45, -44, -43, -41, -40, -38, -37, -36, -34, -33, -31, -30, -29, -27, -26, -24, -23, -22, -20, -19, -17, -16, -15, -13, -12, -10, -9, -8, -6, -5, -3, -2, 0, 1, 2, 4, 5, 7, 8, 9, 11, 12, 14, 15, 16, 18, 19, 21, 22, 23, 25, 26, 28, 29, 30, 32, 33, 35, 36, 37, 39, 40, 42, 43, 44, 46, 47, 49, 50, 51, 53, 54, 56, 57, 58, 60, 61, 63, 64, 65, 67, 68, 70, 71, 72, 74, 75, 77, 78, 79, 81, 82, 84, 85, 86, 88, 89, 91, 92, 93, 95, 96, 98, 99, 100, 102, 103, 105, 106, 107, 109, 110, 112, 113, 114, 116, 117, 119, 120, 122, 123, 124, 126, 127, 129, 130, 131, 133, 134, 136, 137, 138, 140, 141, 143, 144, 145, 147, 148, 150, 151, 152, 154, 155, 157, 158, 159, 161, 162, 164, 165, 166, 168, 169, 171, 172, 173, 175, 176, 178};
static int Table_fv2[256] = {-92, -91, -91, -90, -89, -88, -88, -87, -86, -86, -85, -84, -83, -83, -82, -81, -81, -80, -79, -78, -78, -77, -76, -76, -75, -74, -73, -73, -72, -71, -71, -70, -69, -68, -68, -67, -66, -66, -65, -64, -63, -63, -62, -61, -61, -60, -59, -58, -58, -57, -56, -56, -55, -54, -53, -53, -52, -51, -51, -50, -49, -48, -48, -47, -46, -46, -45, -44, -43, -43, -42, -41, -41, -40, -39, -38, -38, -37, -36, -36, -35, -34, -33, -33, -32, -31, -31, -30, -29, -28, -28, -27, -26, -26, -25, -24, -23, -23, -22, -21, -21, -20, -19, -18, -18, -17, -16, -16, -15, -14, -13, -13, -12, -11, -11, -10, -9, -8, -8, -7, -6, -6, -5, -4, -3, -3, -2, -1, 0, 0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 12, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 24, 25, 25, 26, 27, 27, 28, 29, 30, 30, 31, 32, 32, 33, 34, 35, 35, 36, 37, 37, 38, 39, 40, 40, 41, 42, 42, 43, 44, 45, 45, 46, 47, 47, 48, 49, 50, 50, 51, 52, 52, 53, 54, 55, 55, 56, 57, 57, 58, 59, 60, 60, 61, 62, 62, 63, 64, 65, 65, 66, 67, 67, 68, 69, 70, 70, 71, 72, 72, 73, 74, 75, 75, 76, 77, 77, 78, 79, 80, 80, 81, 82, 82, 83, 84, 85, 85, 86, 87, 87, 88, 89, 90, 90};
static int Table_fu1[256] = {-44, -44, -44, -43, -43, -43, -42, -42, -42, -41, -41, -41, -40, -40, -40, -39, -39, -39, -38, -38, -38, -37, -37, -37, -36, -36, -36, -35, -35, -35, -34, -34, -33, -33, -33, -32, -32, -32, -31, -31, -31, -30, -30, -30, -29, -29, -29, -28, -28, -28, -27, -27, -27, -26, -26, -26, -25, -25, -25, -24, -24, -24, -23, -23, -22, -22, -22, -21, -21, -21, -20, -20, -20, -19, -19, -19, -18, -18, -18, -17, -17, -17, -16, -16, -16, -15, -15, -15, -14, -14, -14, -13, -13, -13, -12, -12, -11, -11, -11, -10, -10, -10, -9, -9, -9, -8, -8, -8, -7, -7, -7, -6, -6, -6, -5, -5, -5, -4, -4, -4, -3, -3, -3, -2, -2, -2, -1, -1, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 15, 16, 16, 16, 17, 17, 17, 18, 18, 18, 19, 19, 19, 20, 20, 20, 21, 21, 22, 22, 22, 23, 23, 23, 24, 24, 24, 25, 25, 25, 26, 26, 26, 27, 27, 27, 28, 28, 28, 29, 29, 29, 30, 30, 30, 31, 31, 31, 32, 32, 33, 33, 33, 34, 34, 34, 35, 35, 35, 36, 36, 36, 37, 37, 37, 38, 38, 38, 39, 39, 39, 40, 40, 40, 41, 41, 41, 42, 42, 42, 43, 43};
static int Table_fu2[256] = {-227, -226, -224, -222, -220, -219, -217, -215, -213, -212, -210, -208, -206, -204, -203, -201, -199, -197, -196, -194, -192, -190, -188, -187, -185, -183, -181, -180, -178, -176, -174, -173, -171, -169, -167, -165, -164, -162, -160, -158, -157, -155, -153, -151, -149, -148, -146, -144, -142, -141, -139, -137, -135, -134, -132, -130, -128, -126, -125, -123, -121, -119, -118, -116, -114, -112, -110, -109, -107, -105, -103, -102, -100, -98, -96, -94, -93, -91, -89, -87, -86, -84, -82, -80, -79, -77, -75, -73, -71, -70, -68, -66, -64, -63, -61, -59, -57, -55, -54, -52, -50, -48, -47, -45, -43, -41, -40, -38, -36, -34, -32, -31, -29, -27, -25, -24, -22, -20, -18, -16, -15, -13, -11, -9, -8, -6, -4, -2, 0, 1, 3, 5, 7, 8, 10, 12, 14, 15, 17, 19, 21, 23, 24, 26, 28, 30, 31, 33, 35, 37, 39, 40, 42, 44, 46, 47, 49, 51, 53, 54, 56, 58, 60, 62, 63, 65, 67, 69, 70, 72, 74, 76, 78, 79, 81, 83, 85, 86, 88, 90, 92, 93, 95, 97, 99, 101, 102, 104, 106, 108, 109, 111, 113, 115, 117, 118, 120, 122, 124, 125, 127, 129, 131, 133, 134, 136, 138, 140, 141, 143, 145, 147, 148, 150, 152, 154, 156, 157, 159, 161, 163, 164, 166, 168, 170, 172, 173, 175, 177, 179, 180, 182, 184, 186, 187, 189, 191, 193, 195, 196, 198, 200, 202, 203, 205, 207, 209, 211, 212, 214, 216, 218, 219, 221, 223, 225};

static void convertYUV420SPToBGR(unsigned char *src, unsigned char *Dst, int ImageWidth, int ImageHeight)
{
	if (ImageWidth < 1 || ImageHeight < 1 || src == NULL || Dst == NULL)
		return;
	const long len = ImageWidth * ImageHeight;
	unsigned char *yData = src;
	unsigned char *vData = &yData[len];
	unsigned char *uData = &vData[len >> 2];

	int bgr[3];
	int yIdx, uIdx, vIdx, idx;
	int rdif, invgdif, bdif;
	for (int i = 0; i < ImageHeight; i++)
	{
		for (int j = 0; j < ImageWidth; j++)
		{
			yIdx = i * ImageWidth + j;
			vIdx = (i / 2) * (ImageWidth / 2) + (j / 2);
			uIdx = vIdx;

			rdif = Table_fv1[vData[vIdx]];
			invgdif = Table_fu1[uData[uIdx]] + Table_fv2[vData[vIdx]];
			bdif = Table_fu2[uData[uIdx]];

			bgr[0] = yData[yIdx] + bdif;
			bgr[1] = yData[yIdx] - invgdif;
			bgr[2] = yData[yIdx] + rdif;

			for (int k = 0; k < 3; k++)
			{
				idx = (i * ImageWidth + j) * 3 + k;
				if (bgr[k] >= 0 && bgr[k] <= 255)
					Dst[idx] = bgr[k];
				else
					Dst[idx] = (bgr[k] < 0) ? 0 : 255;
			}
		}
	}
}
//----------------------------------------------------------------------------------------------
static int roundUp(int numToRound, int multiple)
{
	return (numToRound + multiple - 1) & -multiple;
}
//----------------------------------------------------------------------------------------------

//----------------------------------------------------------------------------------------------

//获得解码的yuv和rgb数据
//成功以后填入decoder的 yumFrame,rgbFrame等
//返回0=成功
static int getVideoFrameData(VideoDecoder *decoder)
{

	const int errBufSize = 1024;
	char errBuf[errBufSize];

	int ret = avcodec_receive_frame(decoder->videoCodecContext, decoder->videoFrame.yuvFrame);
	/*
		receive 0            ：receive_frame返回值为0，正常状态，意味着已经输出一帧。
		receive EAGAIN：receive_frame返回值为EAGAIN，未能输出frame，需要输入更多的packet才能输出当前frame。
		receive EOF       ：receive_frame返回值为EOF，当处于send EOF状态后，调用一次或者多次receive_frame后就能得到该状态，表示所有的帧已经被输出。
		*/
	//debug("avcodec_receive_frame,ret=%d", ret);
	switch (ret)
	{
	case 0:
		//debug("avcodec_receive_frame succ\n");
		break;

	case AVERROR(EAGAIN):
		return ret;
	case AVERROR_EOF:
		//debug("ALL FRAME HAS POST \n");
		return ret;
	default:
		av_strerror(ret, errBuf, errBufSize);
		debug("avcodec_receive_frame,ret=%d,err=%s", ret, errBuf);
		return ret;
	}

	if (0 == decoder->frameWidth)
		decoder->frameWidth = decoder->videoFrame.yuvFrame->width;
	if (0 == decoder->frameHeight)
		decoder->frameHeight = decoder->videoFrame.yuvFrame->height;

	if (nullptr == decoder->videoFrame.yuvBuf)
	{
		decoder->videoFrame.yuvBufferSize = decoder->videoFrame.yuvFrame->height * decoder->videoFrame.yuvFrame->width * 3;

		int rgbBuffSize = avpicture_get_size(AV_PIX_FMT_BGR24, decoder->videoFrame.yuvFrame->width, decoder->videoFrame.yuvFrame->height);
		decoder->videoFrame.rgbBufferSize = rgbBuffSize;
		debug("getVideoFrameData, BufferSize=%d,width=%d,height=%d", decoder->videoFrame.yuvBufferSize, decoder->frameWidth, decoder->frameHeight);
		decoder->videoFrame.yuvBuf = malloc(decoder->videoFrame.yuvBufferSize);
		decoder->videoFrame.rgbBuffer = malloc(decoder->videoFrame.rgbBufferSize);
	}

	if (decoder->videoCodecContext->pix_fmt == AV_PIX_FMT_YUV420P || decoder->videoCodecContext->pix_fmt == AV_PIX_FMT_YUVJ420P)
	{

		int a = 0;
		for (int i = 0; i < decoder->videoFrame.yuvFrame->height; i++)
		{
			memcpy(decoder->videoFrame.yuvBuf + a, decoder->videoFrame.yuvFrame->data[0] + i * decoder->videoFrame.yuvFrame->linesize[0], decoder->videoFrame.yuvFrame->width);
			a += decoder->videoFrame.yuvFrame->width;
		}
		for (int i = 0; i < decoder->videoFrame.yuvFrame->height / 2; i++)
		{
			memcpy(decoder->videoFrame.yuvBuf + a, decoder->videoFrame.yuvFrame->data[1] + i * decoder->videoFrame.yuvFrame->linesize[1], decoder->videoFrame.yuvFrame->width / 2);
			a += decoder->videoFrame.yuvFrame->width / 2;
		}
		for (int i = 0; i < decoder->videoFrame.yuvFrame->height / 2; i++)
		{
			memcpy(decoder->videoFrame.yuvBuf + a, decoder->videoFrame.yuvFrame->data[2] + i * decoder->videoFrame.yuvFrame->linesize[2], decoder->videoFrame.yuvFrame->width / 2);
			a += decoder->videoFrame.yuvFrame->width / 2;
		}
	}
	else if (decoder->videoCodecContext->pix_fmt == AV_PIX_FMT_YUV420P10LE)
	{
		debug("getVideoFrameData,AV_PIX_FMT_YUV420P10LE");
		int a = 0;
		for (int i = 0; i < decoder->videoFrame.yuvFrame->height; i++)
		{
			memcpy(decoder->videoFrame.yuvBuf + a,
				   decoder->videoFrame.yuvFrame->data[0] + i * decoder->videoFrame.yuvFrame->linesize[0],
				   decoder->videoFrame.yuvFrame->width * 2);
			a += decoder->videoFrame.yuvFrame->width * 2;
		}
		for (int i = 0; i < decoder->videoFrame.yuvFrame->height / 2; i++)
		{
			memcpy(decoder->videoFrame.yuvBuf + a,
				   decoder->videoFrame.yuvFrame->data[1] + i * decoder->videoFrame.yuvFrame->linesize[1],
				   decoder->videoFrame.yuvFrame->width);
			a += decoder->videoFrame.yuvFrame->width;
		}
		for (int i = 0; i < decoder->videoFrame.yuvFrame->height / 2; i++)
		{
			memcpy(decoder->videoFrame.yuvBuf + a,
				   decoder->videoFrame.yuvFrame->data[2] + i * decoder->videoFrame.yuvFrame->linesize[2],
				   decoder->videoFrame.yuvFrame->width);
			a += decoder->videoFrame.yuvFrame->width;
		}
	}

	//转换图像格式，将解压出来的YUV420P的图像转换为RGB的图像
	if (decoder->outImgFrameType == OUT_IMG_RGB)
	{
		//debug("convertYUV420SPToBGR,w=%d,h=%d", decoder->pix_w, decoder->pix_h);
		convertYUV420SPToBGR(decoder->videoFrame.yuvBuf, decoder->videoFrame.rgbBuffer, decoder->frameWidth, decoder->frameHeight);
		//下面代码在wasm有问题 颜色不对，
		//报错No accelerated colorspace conversion found from yuv420p to rgba
		//搜索发现需要libx264库， 该库还需要先emcc编译 同时ffmpeg需要额外修改编译脚本并且打开 --enable-libx264
		//因此用convertYUV420SPToBGR
		/* 	int linesize[8] = {decoder->yuvFrame->linesize[0] * 3};
		int num_bytes = av_image_get_buffer_size(AV_PIX_FMT_BGR24, decoder->yuvFrame->width,
												 decoder->yuvFrame->height, 1);
		uint8_t *bgr_buffer[8] = {decoder->m_rgbBuffer};
		sws_scale(decoder->m_img_convert_ctx, decoder->yuvFrame->data, decoder->yuvFrame->linesize,
				  0, decoder->yuvFrame->height, bgr_buffer, linesize); */
		//bgr_buffer[0] is the BGR raw data
	}
	//更新pts dts
	decoder->videoFrame.pts = decoder->videoFrame.yuvFrame->pts;
	decoder->videoFrame.dts = decoder->videoFrame.yuvFrame->pkt_dts;
	return 0;
}

//将当前音频帧回调到前端
static void popAudioFrame(VideoDecoder *decoder)
{
	const int kInitialPcmBufferSize = 128 * 1024;

	const int errBufSize = 1024;
	char errBuf[errBufSize];

	int sampleSize = 0;
	int audioDataSize = 0;
	int targetSize = 0;
	int offset = 0;
	int i = 0;
	int ch = 0;

	debug("popAudioFrame start");
	int ret = avcodec_receive_frame(decoder->audioCodecContext, decoder->audioFrame.pcmFrame);
	if (ret != 0)
	{
		av_strerror(ret, errBuf, errBufSize);
		debug("audio avcodec_receive_frame,ret=%d,err=%s", ret, errBuf);
		return;
	}

	decoder->audioFrame.dts = decoder->audioFrame.pcmFrame->pkt_dts;
	decoder->audioFrame.pts = decoder->audioFrame.pcmFrame->pts;

	sampleSize = av_get_bytes_per_sample(decoder->audioCodecContext->sample_fmt);
	if (sampleSize < 0)
	{
		debug("Failed to calculate data size.");
		return;
	}

	if (decoder->audioFrame.pcmBuffer == NULL)
	{
		decoder->audioFrame.pcmBuffer = (unsigned char *)av_mallocz(kInitialPcmBufferSize);
		decoder->audioFrame.pcmBufferSize = kInitialPcmBufferSize;
		debug("Initial PCM buffer size %d.", decoder->audioFrame.pcmBufferSize);
	}

	audioDataSize = decoder->audioFrame.pcmFrame->nb_samples * decoder->audioCodecContext->channels * sampleSize;
	debug("popAudioFrame audioDataSize=%d", audioDataSize);

	if (decoder->audioFrame.pcmBufferSize < audioDataSize)
	{
		targetSize = roundUp(audioDataSize, 4);
		debug("Current PCM buffer size %d not sufficient for data size %d, round up to target %d.",
			  decoder->audioFrame.pcmBufferSize,
			  audioDataSize,
			  targetSize);
		decoder->audioFrame.pcmBufferSize = targetSize;
		av_free(decoder->audioFrame.pcmBuffer);
		decoder->audioFrame.pcmBuffer = (unsigned char *)av_mallocz(decoder->audioFrame.pcmBufferSize);
	}

	for (i = 0; i < decoder->audioFrame.pcmFrame->nb_samples; i++)
	{
		for (ch = 0; ch < decoder->audioCodecContext->channels; ch++)
		{
			memcpy(decoder->audioFrame.pcmBuffer + offset, decoder->audioFrame.pcmFrame->data[ch] + sampleSize * i, sampleSize);
			offset += sampleSize;
		}
	}

	if (decoder->audioCallback != NULL)
	{
		decoder->audioCallback(decoder->audioFrame.pcmBuffer, audioDataSize, decoder->audioFrame.pts, decoder->audioFrame.dts);
	}

	return;
}

//获得当前解码器的所有视频帧
//通过回调返回到前端
//返回0=成功
static void popAllVideoFrame(VideoDecoder *decoder)
{

	//debug("popAllVideoFrame");
	unsigned char *pOutData = NULL;
	int outSize = 0;
	//struct timeval t1, t2;

	//gettimeofday(&t1, NULL);
	int decodeSucc = getVideoFrameData(decoder);
	//gettimeofday(&t2, NULL);
	//int cost_time = t2.tv_usec - t1.tv_usec;
	//debug("getVideoFrameData cost %ld us", cost_time);

	while (decodeSucc == 0)
	{
		if (decoder->outImgFrameType == OUT_IMG_RGB)
		{
			//rgb
			pOutData = decoder->videoFrame.rgbBuffer;
			outSize = decoder->videoFrame.rgbBufferSize;
		}
		else
		{
			//yuv
			pOutData = decoder->videoFrame.yuvBuf;
			outSize = decoder->videoFrame.yuvBufferSize;
		}

		//解码成功
		//回调

		if (decoder->videoCallback)
		{
			decoder->videoCallback(pOutData, outSize, decoder->frameWidth, decoder->frameHeight, decoder->videoFrame.pts, decoder->videoFrame.dts);
		}
		else
		{
			debug("popAllVideoFrame ,call callback,BUT decoder->videoCallback IS NULL");
		}
		//继续读 一直到读完

		//gettimeofday(&t1, NULL);
		decodeSucc = getVideoFrameData(decoder);
		// gettimeofday(&t2, NULL);
		// int cost_time = t2.tv_usec - t1.tv_usec;
		// debug("getVideoFrameData cost %ld us", cost_time);
	}
}
//--------------------------------------------------------------------------------------------
//将packet送入解码器
//假如解码器满，将会输出
static void sendPacketToCodec(VideoDecoder *decoder, enum FrameType frameType)
{
	//	debug("sendPacketToCodec\n");

	const int errBufSize = 512;
	char errBuf[errBufSize];
	AVCodecContext *avctx = (frameType == FT_VIDEO ? decoder->videoCodecContext : decoder->audioCodecContext);

	int sendRet = avcodec_send_packet(avctx, decoder->packet);
	//send 0                ：send_packet返回值为0，正常状态，意味着输入的packet被解码器正常接收。
	//send EAGAIN    ：send_packet返回值为EAGAIN，输入的packet未被接收，需要输出一个或多个的frame后才能重新输入当前packet
	// send EOF           ：send_packet返回值为EOF，当send_packet输入为NULL时才会触发该状态，用于通知解码器输入packet已结束
	//debug("sendPacketToCodec avcodec_send_packet,ret=%d\n", sendRet);
	switch (sendRet)
	{
	case 0: //succ
		if (frameType == FT_VIDEO)
		{
			popAllVideoFrame(decoder);
		}
		else
		{
			popAudioFrame(decoder);
		}
		return;
	case AVERROR_EOF:
		//没有更多的frame，都已经消费
		//debug("avcodec_send_packet eof");
		return;
	case AVERROR(EAGAIN):
		//无法输入 需要输出
		if (frameType == FT_VIDEO)
		{
			popAllVideoFrame(decoder);
		}
		else
		{
			popAudioFrame(decoder);
		}
		//重新输入解码器
		sendPacketToCodec(decoder, frameType);
		break;
	default:
		av_strerror(sendRet, errBuf, errBufSize);
		//debug("avcodec_send_packet error,%s.\n", errBuf);
		fprintf(stderr, "avcodec_send_packet error,%s.\n", errBuf);
		return;
	}
}

//输入待解码的音视频帧数据
//pData 一帧数据 size pData长度
//pts dts 索引
//frameType 音频、视频帧
static void addOneFrameData(VideoDecoder *decoder, unsigned char *pData, int size, int64_t pts, int64_t dts, enum FrameType frameType)
{
	const int errBufSize = 1024;
	char errBuf[errBufSize];

	unsigned char *data = pData;
	int data_size = size;
	int ret = 0;

	AVCodecContext *avctx = (frameType == FT_VIDEO ? decoder->videoCodecContext : decoder->audioCodecContext);

	while (data_size > 0)
	{
		ret = av_parser_parse2(decoder->parser, avctx, &decoder->packet->data, &decoder->packet->size,
							   (uint8_t *)data, data_size, pts, dts, 0);
		//debug("addOneFrameData,av_parser_parse2 return=%d", ret);
		if (ret < 0)
		{
			av_strerror(ret, errBuf, errBufSize);
			fprintf(stderr, "Error while parsing.%s\n", errBuf);
			return;
		}
		data += ret;
		data_size -= ret;

		if (decoder->packet->size)
		{
			sendPacketToCodec(decoder, frameType);
		}
	}
}

//----------------------------------

//输入待解码的裸流视频帧数据（裸流不支持音频帧）
//decoderImpl 解码器实例指针
//pData 一帧数据
void addRawVideoFrameData(int decoderImpl, unsigned char *pData, int size, int pts, int dts)
{
	VideoDecoder *decoder = (VideoDecoder *)decoderImpl;

	int64_t pts2, dts2;
	if (pts == 0 || dts == 0)
	{
		pts2 = AV_NOPTS_VALUE;
		dts2 = AV_NOPTS_VALUE;
	}
	else
	{
		pts2 = pts;
		dts2 = dts;
	}

	addOneFrameData(decoder, pData, size, pts2, dts2, FT_VIDEO);
}

//--------------------------------------------
void addRtpFrameData(int decoderImpl, unsigned char *pData, int size)
{
	VideoDecoder *decoder = (VideoDecoder *)decoderImpl;

	//添加rtp 帧
	readDinaRtpHead(pData, size, &decoder->currFrameRtpHead);
	pData += sizeof(DinaRtpHead);
	size -= sizeof(DinaRtpHead);
	int64_t pts2, dts2;
	pts2 = decoder->currFrameRtpHead.timestamp;
	dts2 = decoder->currFrameRtpHead.sequenceNumber;

	enum FrameType frameType;

	if (decoder->currFrameRtpHead.payloadType == PAYLOAD_TYPE_AUDIO_AAC)
	{
		//aac
		frameType = FT_AUDIO;
	}
	else
	{
		//视频
		frameType = FT_VIDEO;
	}
	addOneFrameData(decoder, pData, size, pts2, dts2, frameType);
	return;
}
