
#include "player.h"

using namespace player;

#define ASSERT(c)   do{if(c == NULL){printf("%s is NULL\n",#c);return -1;}}while(0);

Player::Player(const char *file):
    screenH(0)
    ,screenW(0)
    ,filename(NULL)
    ,isEventClose(true)
    ,pFormatCtx(NULL)
    ,pPacket(NULL)
    ,pCodec(NULL)
    ,isClose(true)
    ,isUnpackClose(true)
    ,isAudioClose(true)
    ,isVideoClose(true)
    ,audioStreamIndex(-1)
    ,videoStreamIndex(-1)
    ,eventThread(NULL)
    ,unpackThread(NULL)
    ,audioPlayerThread(NULL)
    ,videoPlayerThread(NULL)
{
    if(file)
    {
        filename = (char *)malloc(strlen(file)+1);
        if(filename)
            sprintf(filename,"%s",file);
    }
    else
        filename = NULL;
    avformat_network_init();
    pMutex = SDL_CreateMutex();
}
Player::~Player()
{
    if(filename)
        free(filename);
    if(pMutex != NULL)
        SDL_DestroyMutex(pMutex);
}
int Player::versionShow(void)
{
    printf(VERSION);
    return 0;
}
//初始化分3步
int Player::init(void)
{
    //1.为格式化上下文分配空间
    pFormatCtx = avformat_alloc_context();
    ASSERT(pFormatCtx)
    printf("filename:%s\n",filename);
    //初始化SDL
	if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER | SDL_INIT_EVENTS)) {  
		printf( "Could not initialize SDL - %s\n", SDL_GetError()); 
		return -1;
	}else
    {
        //2.读取文件到格式化上下文(上下文可以看作是缓存)
        if(avformat_open_input(&pFormatCtx,filename,NULL,NULL)!=0){
            printf("Couldn't open input stream.\n");
            return -1;
        }
        //3.从格式化上下文中读取流
        if(avformat_find_stream_info(pFormatCtx,NULL)<0){
            printf("Couldn't find stream information.\n");
            return -1;
        }
        //打印pFormatCtx信息
        av_dump_format(pFormatCtx, 0, filename, false);
        //创建事件线程。主要负责键盘输入，或者窗口事件，比如点击窗口红叉
        eventThread = SDL_CreateThread(eventLoop,"event",(void *)this);
        ASSERT(eventThread)  
        isEventClose = false;      
        //创建解包线程。主要负责从pFormatCtx里读取packet，然后发送给解码器，获取可以播放或者显示的帧
        unpackThread = SDL_CreateThread(unpackLoop,"unpack",(void *)this);
        ASSERT(unpackThread)
    }
    return 0;
}
int Player::exit(void)
{
    isClose = true;
    //确保其他线程都结束后，事件线程最后结束
    while(!(isUnpackClose && isAudioClose && isVideoClose));
    printf("SDL_Quit\n");
    SDL_Quit();
    if(pFormatCtx)
    {
        avformat_close_input(&pFormatCtx);
        pFormatCtx = NULL;
    }
    isEventClose = true;
    return 0;
}
//这里虽然写了处理键盘事件，但是没有完成
int Player::keyEvent(int key)
{
    switch (key)
    {
    case SDLK_SPACE:
        
        break;
    
    default:
        break;
    }
    return 0;
}
//这里事件线程主要就处理了关闭窗口这个事件
//关闭窗口就结束播放，但是要确保资源都已经释放，所以事件线程最后退出
int Player::eventLoop(void *pthis)
{
    ASSERT(pthis)
    SDL_Event event;
    Player *tmpthis = (Player *) pthis;
    tmpthis->isEventClose = false;
    tmpthis->isClose = false;
    printf("Start eventLoop -- %x\n",tmpthis);
    while((!tmpthis->isEventClose) && (pthis !=NULL))
    {
        SDL_WaitEvent(&event);
        switch (event.type)
        {
        case SDL_KEYDOWN://键盘按下
            tmpthis->keyEvent(event.key.keysym.sym);
            break;
        case SDL_QUIT://关闭窗口
            tmpthis->exit();
            break;
        default:
            break;
        }
    }
    printf("leave eventLoop\n");
    return 0;
}
//单个流对应的编解码器初始化(这里只有解码器)，分为4步
int Player::codecContextInit(int streamIndex)
{
    printf("Init %d\n",streamIndex);
    //1.为解码器上下文分配空间
    pCodec[streamIndex].pCodeCtx = avcodec_alloc_context3(NULL);
    ASSERT(pCodec[streamIndex].pCodeCtx)
    //2.由解码器参数转换为解码器上下文
    avcodec_parameters_to_context(pCodec[streamIndex].pCodeCtx,pFormatCtx->streams[streamIndex]->codecpar);
    pCodec[streamIndex].pCodeCtx->pkt_timebase = pFormatCtx->streams[streamIndex]->time_base;
    //3.寻找当前上下文对应的解码器，如果找不到，可能是你的FFMPEG没有安装对于的编解码器动态库
    pCodec[streamIndex].pAvCodec = avcodec_find_decoder(pCodec[streamIndex].pCodeCtx->codec_id);
    ASSERT(pCodec[streamIndex].pAvCodec)
    //4.打开对应的解码器，到这一步，已经可以从流里面解码了
    if(avcodec_open2(pCodec[streamIndex].pCodeCtx, pCodec[streamIndex].pAvCodec,NULL)<0){
        printf("Could not open codec.\n");
        return -1;
    }
    //根据是音频流还是视频流决定如何初始化
    switch (pFormatCtx->streams[streamIndex]->codecpar->codec_type)
    {
    case AVMEDIA_TYPE_VIDEO:
    printf("AVMEDIA_TYPE_VIDEO\n");
        videoStreamIndex = streamIndex;
        //雷神demo里的视频格式是YUV，而MP3是RGB24，这里没有考虑其他情况，只针对这两个测试文件编写的代码
        //所以其他文件播放可能有问题
        if(pCodec[streamIndex].pCodeCtx->pix_fmt == AV_PIX_FMT_YUV420P)
        {
            //从解码器接收到的视频帧
            pCodec[streamIndex].pFrame = av_frame_alloc();
            ASSERT(pCodec[streamIndex].pFrame)
            //转换后的视频帧
            pCodec[streamIndex].videoOutFmt.pOutFrame = av_frame_alloc();
            ASSERT(pCodec[streamIndex].videoOutFmt.pOutFrame)
            //上面的alloc并没有分配实际内存，pOutFrame的内存其实是下面申请的内存填充而来
            pCodec[streamIndex].pBuffer=(unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P,  pCodec[streamIndex].pCodeCtx->width, pCodec[streamIndex].pCodeCtx->height,1));
            ASSERT(pCodec[streamIndex].pBuffer)
            //把上面申请的内存填充到pOutFrame中去，pFrame直接接收的是解码器输出的帧，所以内存不用自己分配
            av_image_fill_arrays(pCodec[streamIndex].videoOutFmt.pOutFrame->data, pCodec[streamIndex].videoOutFmt.pOutFrame->linesize,pCodec[streamIndex].pBuffer, \
            AV_PIX_FMT_YUV420P,pCodec[streamIndex].pCodeCtx->width, pCodec[streamIndex].pCodeCtx->height,1);
            //视频转换上下文初始化，因为SDL并不是什么格式都能播放，所以首先从视频流里面解码出来的帧的格式是FFMPEG中的某个格式
            //然后需要找到FFMPEG中这个格式对应到SDL中是什么格式，如果没有对应格式或者该格式在SDL中播放有问题，最简单的办法就是
            //无论解码出来什么格式都转化为SDL支持比较好的格式
            pCodec[streamIndex].pSwsConvertCtx = sws_getContext(pCodec[streamIndex].pCodeCtx->width, pCodec[streamIndex].pCodeCtx->height, pCodec[streamIndex].pCodeCtx->pix_fmt, \
            pCodec[streamIndex].pCodeCtx->width, pCodec[streamIndex].pCodeCtx->height,AV_PIX_FMT_YUV420P , SWS_BICUBIC, NULL, NULL, NULL);
            ASSERT(pCodec[streamIndex].pSwsConvertCtx)
        }
        else
        {
            //同上
            pCodec[streamIndex].pFrame = av_frame_alloc();
            ASSERT(pCodec[streamIndex].pFrame)
            pCodec[streamIndex].videoOutFmt.pOutFrame = av_frame_alloc();
            ASSERT(pCodec[streamIndex].videoOutFmt.pOutFrame)
            pCodec[streamIndex].pBuffer=(unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_RGB24,  pCodec[streamIndex].pCodeCtx->width, pCodec[streamIndex].pCodeCtx->height,1));
            ASSERT(pCodec[streamIndex].pBuffer)
            av_image_fill_arrays(pCodec[streamIndex].videoOutFmt.pOutFrame->data, pCodec[streamIndex].videoOutFmt.pOutFrame->linesize,pCodec[streamIndex].pBuffer, \
                AV_PIX_FMT_RGB24,pCodec[streamIndex].pCodeCtx->width, pCodec[streamIndex].pCodeCtx->height,1);
            pCodec[streamIndex].pSwsConvertCtx = sws_getContext(pCodec[streamIndex].pCodeCtx->width, pCodec[streamIndex].pCodeCtx->height, pCodec[streamIndex].pCodeCtx->pix_fmt, \
            pCodec[streamIndex].pCodeCtx->width, pCodec[streamIndex].pCodeCtx->height,AV_PIX_FMT_RGB24 , SWS_BICUBIC, NULL, NULL, NULL);
            ASSERT(pCodec[streamIndex].pSwsConvertCtx)
        }
        //把视频流中的视频画面宽、高保存，在视频线程中初始化窗口会用到
        if(screenW<pCodec[streamIndex].pCodeCtx->width)
            screenW = pCodec[streamIndex].pCodeCtx->width;
        if(screenH<pCodec[streamIndex].pCodeCtx->height)
            screenH = pCodec[streamIndex].pCodeCtx->height;
        break;
    case AVMEDIA_TYPE_AUDIO:
    printf("AVMEDIA_TYPE_AUDIO\n");
        audioStreamIndex = streamIndex;
        pCodec[streamIndex].pAudioOutFmt = (AudioOutFmt *)malloc(sizeof(AudioOutFmt));
        ASSERT(pCodec[streamIndex].pAudioOutFmt)
        pCodec[streamIndex].pAudioOutFmt->outChLayout = AV_CH_LAYOUT_STEREO;
        pCodec[streamIndex].pAudioOutFmt->outNbSamples = pCodec[streamIndex].pCodeCtx->frame_size;
        pCodec[streamIndex].pAudioOutFmt->outNbSampleFmt = AV_SAMPLE_FMT_S16;
        pCodec[streamIndex].pAudioOutFmt->outSamplesRate = pCodec[streamIndex].pCodeCtx->sample_rate;
        pCodec[streamIndex].pAudioOutFmt->outChannels = av_get_channel_layout_nb_channels(pCodec[streamIndex].pAudioOutFmt->outChLayout);
        pCodec[streamIndex].pAudioOutFmt->outFrameSize = av_samples_get_buffer_size(NULL,pCodec[streamIndex].pAudioOutFmt->outChannels ,pCodec[streamIndex].pAudioOutFmt->outNbSamples,pCodec[streamIndex].pAudioOutFmt->outNbSampleFmt, 1);
        pCodec[streamIndex].audioBufCtrl.audioLen = 0;
        pCodec[streamIndex].audioBufCtrl.pAudioChunk = NULL;
        pCodec[streamIndex].audioBufCtrl.pAudioPos = NULL;
        pCodec[streamIndex].pBuffer = (unsigned char *)av_malloc(MAX_AUDIO_FRAME_SIZE*2);
        ASSERT(pCodec[streamIndex].pBuffer)
        pCodec[streamIndex].pFrame = av_frame_alloc();
        ASSERT(pCodec[streamIndex].pFrame)
        break;
    default:
        break;
    }
    return 0;
}
//退出时需要把所有资源释放
int Player::codecContextExit(int streamIndex)
{
    if(pCodec != NULL)
    {
        if(pCodec[streamIndex].pCodeCtx != NULL)
        {
            //FFMPEG中的函数是否为线程安全并不清楚
            SDL_LockMutex(pMutex);
            avcodec_close(pCodec[streamIndex].pCodeCtx);
            pCodec[streamIndex].pCodeCtx = NULL;
            SDL_UnlockMutex(pMutex);
        }
    }
    return 0;
}
int Player::unpackLoop(void *pthis)
{
    ASSERT(pthis)
    Player *tmpthis = (Player *) pthis;
    tmpthis->isUnpackClose = false;
    //每个流对应一个解码器
    tmpthis->pCodec = (Codec *)malloc(sizeof(Codec)*(tmpthis->pFormatCtx->nb_streams));
    ASSERT(tmpthis->pCodec)
    //循环初始化所有流，本来软件设计思路是可以应对多个视频流或者音频流，但是实际并不可以，因为音视频流索引各保存了一个
    //如果出现多个音频视频流，记录的流索引也只是最后那个
 	for(int i=0; i < tmpthis->pFormatCtx->nb_streams; i++)
    {
        if(tmpthis->codecContextInit(i)<0)
        {
            printf("Stream%d init failed\n",i);
            return -1;
        }
    }
    //创建音视频线程
    tmpthis->audioPlayerThread = SDL_CreateThread(audioLoop,"audio",(void *)pthis);
    ASSERT(tmpthis->audioPlayerThread)
    tmpthis->isAudioClose = false;
    tmpthis->videoPlayerThread = SDL_CreateThread(videoLoop,"video",(void *)pthis);
    ASSERT(tmpthis->videoPlayerThread)
    tmpthis->isVideoClose = false;
    tmpthis->pPacket = (AVPacket *)av_malloc(sizeof(AVPacket));
    ASSERT(tmpthis->pPacket)
    tmpthis->gotAudioPicture = -1;
    tmpthis->gotVideoPicture = -1;
   while(!tmpthis->isClose)
    {   
        //在音视频线程中，转换完一帧数据后会把此状态给设置为-1
        if((tmpthis->gotAudioPicture<0) && (tmpthis->gotVideoPicture<0))
        {
            //1.从格式化上下文获取包数据
            if(av_read_frame(tmpthis->pFormatCtx, tmpthis->pPacket) == 0)
            {
                //判断获取到的包数据是属于音频流还是视频流的
                switch (tmpthis->pFormatCtx->streams[tmpthis->pPacket->stream_index]->codecpar->codec_type)
                {
                case AVMEDIA_TYPE_AUDIO:
                    tmpthis->audioStreamIndex = tmpthis->pPacket->stream_index;
                    //如果是音频流，就发送给之前初始化好的解码器
                    avcodec_send_packet(tmpthis->pCodec[tmpthis->audioStreamIndex].pCodeCtx, tmpthis->pPacket);
                    //发送后从解码器中接收帧，并不一定是一个包对应一帧数据，可能是发送多个包后才能接收到一帧
                    tmpthis->gotAudioPicture = avcodec_receive_frame(tmpthis->pCodec[tmpthis->audioStreamIndex].pCodeCtx,
                    tmpthis->pCodec[tmpthis->audioStreamIndex].pFrame); 
                    break;
                case AVMEDIA_TYPE_VIDEO:
                    //视频流同上
                    tmpthis->videoStreamIndex = tmpthis->pPacket->stream_index;
                    avcodec_send_packet(tmpthis->pCodec[tmpthis->videoStreamIndex].pCodeCtx, tmpthis->pPacket);
                    tmpthis->gotVideoPicture = avcodec_receive_frame(tmpthis->pCodec[tmpthis->videoStreamIndex].pCodeCtx,
                    tmpthis->pCodec[tmpthis->videoStreamIndex].pFrame); 
                    break;
                default:
                    break;
                }
                //必须要释放
                av_packet_unref(tmpthis->pPacket);
            }
        }
    }
    printf("leave unpackLoop\n");
    while(!(tmpthis->isVideoClose && tmpthis->isAudioClose));
    if(tmpthis->pCodec)
    {
        free(tmpthis->pCodec);
        tmpthis->pCodec = NULL;        
    }
   tmpthis->codecContextExit(tmpthis->audioStreamIndex);
   tmpthis->codecContextExit(tmpthis->videoStreamIndex);
   tmpthis->isUnpackClose = true;
   printf("unpackLoop free over\n");
    return 0;
}
void Player::audioCallBack(void *pthis,unsigned char *stream,int len)
{
    if(pthis == NULL)
        return;
    Player *tmpthis = (Player *)pthis;
    //每次回调时必须先清空
    SDL_memset(stream, 0, len);
    //如果当前剩余播放为0则退出，这个变量在音频线程中被赋值
	if(tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.audioLen == 0)
		return; 
    //如果len大于audioLen，则播放audioLen
	len=(len>tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.audioLen?tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.audioLen:len);
	//复制len长度的音频数据到stream
    memcpy(stream, tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.pAudioPos, len);
    //当前播放指针后移len
	tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.pAudioPos += len; 
    //剩余播放长度-len
	tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.audioLen -= len; 
}
int Player::audioLoop(void *pthis)
{
    ASSERT(pthis)
    Player *tmpthis = (Player *)pthis;
    tmpthis->isAudioClose = false;
    SDL_AudioSpec audioSpec;
    int i = tmpthis->audioStreamIndex;
    //把输出音频格式保存到audioSpec中，audioSpec为打开的音频设备参数
    audioSpec.freq = tmpthis->pCodec[i].pAudioOutFmt->outSamplesRate;
    audioSpec.format = AUDIO_S16SYS;
    audioSpec.channels = tmpthis->pCodec[i].pAudioOutFmt->outChannels;
    audioSpec.silence = 0;
    audioSpec.samples = tmpthis->pCodec[i].pAudioOutFmt->outNbSamples;
    audioSpec.callback = audioCallBack;
    audioSpec.userdata = pthis;
    //打开音频设备
	if (SDL_OpenAudio(&audioSpec, NULL)<0){ 
		printf("can't open audio.\n"); 
		return -1; 
	}
    //音频转化上下文申请内存
    tmpthis->pCodec[i].pSwrConvertCtx = swr_alloc();
    ASSERT(tmpthis->pCodec[i].pSwrConvertCtx)
    //设置转换输入输出参数
	tmpthis->pCodec[i].pSwrConvertCtx=swr_alloc_set_opts(tmpthis->pCodec[i].pSwrConvertCtx, \
    tmpthis->pCodec[i].pAudioOutFmt->outChLayout,                                                                                                \
    tmpthis->pCodec[i].pAudioOutFmt->outNbSampleFmt,                                                                                      \
    tmpthis->pCodec[i].pAudioOutFmt->outSamplesRate,                                                                                         \
    av_get_default_channel_layout(tmpthis->pCodec[i].pCodeCtx->channels),                                           \
    tmpthis->pCodec[i].pCodeCtx->sample_fmt ,                                                                                                         \
    tmpthis->pCodec[i].pCodeCtx->sample_rate,0, NULL);
    //音频转换初始化
	swr_init(tmpthis->pCodec[i].pSwrConvertCtx);   
    //开始播放
    SDL_PauseAudio(0);
    while(!tmpthis->isClose)
    {
        //如果=0则代表在解包线程中，一帧音频数据被解码成功
        if(tmpthis->gotAudioPicture == 0)
        {
            //开始转换
            swr_convert(tmpthis->pCodec[tmpthis->audioStreamIndex].pSwrConvertCtx,   \
            &tmpthis->pCodec[tmpthis->audioStreamIndex].pBuffer,                                                 \
            MAX_AUDIO_FRAME_SIZE,                                                                                            \
            (const uint8_t **)tmpthis->pCodec[tmpthis->audioStreamIndex].pFrame->data , \
            tmpthis->pCodec[tmpthis->audioStreamIndex].pFrame->nb_samples); 

            tmpthis->gotAudioPicture = -1;        
            //等待音频播放回调播放完当前音频数据
            while(tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.audioLen>0)
                SDL_Delay(1); 
            //新的音频数据赋值
            tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.pAudioChunk = tmpthis->pCodec[tmpthis->audioStreamIndex].pBuffer; 
            tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.audioLen =tmpthis->pCodec[tmpthis->audioStreamIndex].pAudioOutFmt->outFrameSize;
            tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.pAudioPos = tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.pAudioChunk;              
        } 
    }
    printf("leave audioLoop\n");
    av_frame_free(&tmpthis->pCodec[tmpthis->audioStreamIndex].pFrame);
    if(tmpthis->pCodec[tmpthis->audioStreamIndex].pAudioOutFmt!=NULL)
    {
        free(tmpthis->pCodec[tmpthis->audioStreamIndex].pAudioOutFmt);
        tmpthis->pCodec[tmpthis->audioStreamIndex].pAudioOutFmt = NULL;
    }
    swr_free(&tmpthis->pCodec[tmpthis->audioStreamIndex].pSwrConvertCtx);
    tmpthis->pCodec[tmpthis->audioStreamIndex].pSwrConvertCtx = NULL;
    SDL_CloseAudio();
    av_free(tmpthis->pCodec[tmpthis->audioStreamIndex].pBuffer);
    tmpthis->pCodec[tmpthis->audioStreamIndex].pBuffer = NULL;
    tmpthis->isAudioClose = true;
    printf("audioLoop free over\n");
    return 0;
}

int Player::videoLoop(void *pthis)
{
    ASSERT(pthis)
    Player *tmpthis = (Player *)pthis;
    tmpthis->isVideoClose = false;
    SDL_Window *screen;
    //创建窗口
    screen = SDL_CreateWindow("Player",SDL_WINDOWPOS_UNDEFINED,SDL_WINDOWPOS_UNDEFINED,\
    tmpthis->screenW,tmpthis->screenH,SDL_WINDOW_SHOWN);
    if(!screen)
    {
        printf("Can't Create Window -- %s\n",SDL_GetError());
        return -1;
    }
    //创建渲染器
    SDL_Renderer* sdlRenderer = SDL_CreateRenderer(screen, -1, 0);
    //清空渲染器
    SDL_RenderClear(sdlRenderer);
    //创建纹理，根据不同的格式创建不同的纹理，这个就是把FFMPEG中的格式转换为SDL中对应的格式
    SDL_Texture* sdlTexture = NULL;
    if(tmpthis->pCodec[tmpthis->videoStreamIndex].pCodeCtx->pix_fmt == AV_PIX_FMT_YUV420P)
    sdlTexture = SDL_CreateTexture(sdlRenderer,SDL_PIXELFORMAT_IYUV,\
    SDL_TEXTUREACCESS_STREAMING,tmpthis->screenW,tmpthis->screenH);//SDL_PIXELFORMAT_RGB24 /SDL_PIXELFORMAT_IYUV
    else
    sdlTexture = SDL_CreateTexture(sdlRenderer,SDL_PIXELFORMAT_RGB24,\
    SDL_TEXTUREACCESS_STREAMING,tmpthis->screenW,tmpthis->screenH);//SDL_PIXELFORMAT_RGB24 /SDL_PIXELFORMAT_IYUV
	//描述一个矩形
    SDL_Rect sdlRect;
    //矩形高
    sdlRect.h = tmpthis->screenH;
    //矩形宽
    sdlRect.w = tmpthis->screenW;
    while(!tmpthis->isClose)
    {
        if(tmpthis->gotVideoPicture == 0)
        {
            //转换视频格式
            sws_scale(tmpthis->pCodec[tmpthis->videoStreamIndex].pSwsConvertCtx, 
            (const unsigned char* const*)tmpthis->pCodec[tmpthis->videoStreamIndex].pFrame->data, 
            tmpthis->pCodec[tmpthis->videoStreamIndex].pFrame->linesize, 
            0, 
            tmpthis->pCodec[tmpthis->videoStreamIndex].pCodeCtx->height, 
            tmpthis->pCodec[tmpthis->videoStreamIndex].videoOutFmt.pOutFrame->data, 
            tmpthis->pCodec[tmpthis->videoStreamIndex].videoOutFmt.pOutFrame->linesize);
            tmpthis->gotVideoPicture = -1; 
            //设置纹理像素数据
            SDL_UpdateTexture( sdlTexture, &sdlRect,                                                                                            \
            tmpthis->pCodec[tmpthis->videoStreamIndex].videoOutFmt.pOutFrame->data[0],         \
            tmpthis->pCodec[tmpthis->videoStreamIndex].videoOutFmt.pOutFrame->linesize[0] ); \
            SDL_RenderClear( sdlRenderer );  
            //纹理复制到渲染器
            SDL_RenderCopy( sdlRenderer, sdlTexture, NULL, &sdlRect);  
            //显示
            SDL_RenderPresent( sdlRenderer );          
        }
    }
    printf("leave videoLoop\n");
    av_free(tmpthis->pCodec[tmpthis->videoStreamIndex].pBuffer);
    tmpthis->pCodec[tmpthis->videoStreamIndex].pBuffer = NULL;
    av_frame_free(&tmpthis->pCodec[tmpthis->videoStreamIndex].pFrame);
    tmpthis->pCodec[tmpthis->videoStreamIndex].pFrame = NULL;
    av_frame_free(&tmpthis->pCodec[tmpthis->videoStreamIndex].videoOutFmt.pOutFrame);
    tmpthis->pCodec[tmpthis->videoStreamIndex].videoOutFmt.pOutFrame = NULL;
    sws_freeContext(tmpthis->pCodec[tmpthis->videoStreamIndex].pSwsConvertCtx);
    tmpthis->pCodec[tmpthis->videoStreamIndex].pSwsConvertCtx = NULL;
    SDL_DestroyWindow(screen);
    tmpthis->isVideoClose = true;
    printf("videoLoop free over\n");
    return 0;
}