#include "head.h"

double FFmpegDecode::vedioMSec = 0;
double FFmpegDecode::audioMSec = 0;
int    FFmpegDecode::audioPtsDelayoffset = 0;
int    FFmpegDecode::vedioPtsDelayoffset = 0;
bool   FFmpegDecode::threadExit          = false;
//class AudioDecode
//        :public VedioDecode
//{
//public:
//protected:
//    void convertAndRender(AVFrame *temp) override;
//    void checkQueueEmpty(const CircularQueue &queue) const override;
//private:
//    SwrContextWrap                  _swrContext;
//};

bool AudioDecode::Synchronization(AVFrame *temp)
{

    auto vedioPts = FFmpegDecode::vedioMSec;
    FFmpegDecode::audioMSec = GetPts(temp);
    auto AudioPts = FFmpegDecode::audioMSec;
    auto diffTime =  AudioPts - vedioPts;
//    ++FFmpegDecode::audioPtsDelayoffset;
//    diffTime = std::abs(diffTime);
//    SingleInstance::GetInstanca().ProgressRotia = AudioPts/(_duration * 1000);

    if(diffTime > FFmpegDecode::SynchronizationPrecision + FFmpegDecode::audioPtsDelayoffset)
    {
//        --FFmpegDecode::vedioPtsDelayoffset;
        std::this_thread::sleep_for(std::chrono::milliseconds(static_cast<int64_t>(diffTime)));
    }
    return true;
}
void AudioDecode::Destroy()
{
    //父类的一些成员也需要清理，以后用struct定义公开类取代继承，不然太容易模糊
    VedioDecode::Destroy();
    _swrContext.Destroy();
}
void AudioDecode::convertAndRender(AVFrame *temp)
{
//    thread_local  AudioPlayer      audioPlayer;
    if(temp == nullptr || _pCodecCtx == nullptr)
    {
        return;
    }
    Synchronization(temp);

    if(_swrContext.TempContext == nullptr)
    {
        _swrContext.TempContext = swr_alloc();
        swr_alloc_set_opts(_swrContext.TempContext,
                           av_get_default_channel_layout(_pCodecCtx->channels),
                           AV_SAMPLE_FMT_S16,
                           _pCodecCtx->sample_rate,
                           av_get_default_channel_layout(_pCodecCtx->channels),
                           _pCodecCtx->sample_fmt,
                           _pCodecCtx->sample_rate,
                           0, nullptr);
        if(_swrContext.TempContext)
        {
            //
            swr_init(_swrContext.TempContext);
        }
//        SingleInstance::GetInstanca().audioPlayer.init(_pCodecCtx->sample_rate, _pCodecCtx->channels, 16, "audio/pcm");

    }

    auto buffsize = av_samples_get_buffer_size(nullptr, _pCodecCtx->channels, temp->nb_samples
                                               , AV_SAMPLE_FMT_S16
                                               , 0);

    static SwrBufferWrap swrBuffer;
    //多次循环解码的时候如果某次数据量比较大可能会溢出就采用如下判断
    if(swrBuffer.data == nullptr || swrBuffer.length < buffsize)
    {
        //内存相对充裕的情况下realloc比malloc的效率要好些
        swrBuffer.data = reinterpret_cast<uint8_t*>(realloc(swrBuffer.data, buffsize));
        swrBuffer.length = buffsize;
    }
    uint8_t *outFile[2] = {swrBuffer.data, 0};
    auto res = swr_convert(_swrContext.TempContext
                           , outFile
                           , temp->nb_samples, const_cast<const uint8_t **>(reinterpret_cast<uint8_t **>(temp->data)), temp->nb_samples);


    if(res < 0)
    {
        qDebug() << "swr_convert err !";
        return;
    }
    SingleInstance::GetInstanca().audioPlayer.WriteData(const_cast<const char *>(reinterpret_cast<char *>(swrBuffer.data)) , buffsize);
}







VedioDecode::VedioDecode()
{
    _pCodecCtx.reset(nullptr);
//    _pAVFrame.reset(nullptr);
}


VedioDecode::~VedioDecode()
{

}

void VedioDecode::SetAVCodecContext(AVCodec *avCode, AVCodecParameters *decodeVedioContex)
{
    //创建解码器，并实例化内容，上下文
    _pCodecCtx.reset(avcodec_alloc_context3(avCode));
    _codec = avCode;
    if(avCode == nullptr)
    {
        qDebug() << "vedio decoder is not found!";
    }
    //将上下文信息拷贝到AVCodecContext中
    if( avcodec_parameters_to_context(_pCodecCtx.get(),decodeVedioContex) < 0)
    {
        qDebug() <<"decoder open err!";
    }
    //打开解码器
    if(avcodec_open2(_pCodecCtx.get(), avCode, nullptr) < 0)
    {
        qDebug() <<"decoder open err!";
    }
}

void VedioDecode::SetYUVBuffer()
{
    _yuvFrame.reset(av_frame_alloc());
//    _yuvBuffer.data = reinterpret_cast<uint8_t *>(av_malloc(av_image_get_buffer_size
//                                                            (AV_PIX_FMT_YUV420P,
//                                                             Screen::Width, Screen::Height, 1)));
//    //Screen::Width, Screen::Height是为了填充linesize，不会填充width和height
//    av_image_fill_arrays(_yuvFrame.get()->data, _yuvFrame.get()->linesize, _yuvBuffer.data, AV_PIX_FMT_YUV420P, Screen::Width, Screen::Height, 1);
    _yuvFrame->format = AV_PIX_FMT_YUV420P;
    _yuvFrame->width = Screen::Width;
    _yuvFrame->height = Screen::Height;
    auto res = av_frame_get_buffer(_yuvFrame.get(), 32);
    if(res < 0)
    {
        qDebug() << "av_frame_get_buffer err!VedioDecode::SetYUVBuffer";
        return;
    }
}

void H264Frame::MemcpyPanel(int minWidth, int height, int linesize, AVFrame *temp, uint8_t *src, uint8_t *dst)
{
    for(auto i = 0;i < height;i++)
    {
        memcpy(dst, src, minWidth);
        dst += minWidth;
        src += linesize;
    }
}


void H264Frame::Memcpy(AVFrame *temp)
{
    auto width = std::min(Width, temp->linesize[0]);
    auto src = temp->data[0];
    MemcpyPanel(width, Height, temp->linesize[0], temp, src, Y.dataBuffer);

    auto width1 = std::min(Width/2, temp->linesize[1]);
    auto src1 = temp->data[1];
    MemcpyPanel(width1, Height/2, temp->linesize[1], temp, src1, U.dataBuffer);

    auto width2 = std::min(Width/2, temp->linesize[2]);
    auto src2 = temp->data[2];
    MemcpyPanel(width2, Height/2, temp->linesize[2], temp, src2, V.dataBuffer);
}

bool VedioDecode::Synchronization(AVFrame *temp)
{
    auto vedioPts = GetPts(temp);
//    qDebug() << FFmpegDecode::vedioMSec << FFmpegDecode::audioMSec;
    FFmpegDecode::vedioMSec = vedioPts;
//    qDebug() << FFmpegDecode::vedioMSec << FFmpegDecode::audioMSec;
    auto AudioPts = FFmpegDecode::audioMSec;
    auto diffTime = vedioPts - AudioPts;

    SingleInstance::GetInstanca().ProgressRotia = vedioPts/(_duration * 1000);
//    ++FFmpegDecode::vedioPtsDelayoffset;
//    diffTime = std::abs(diffTime);
//    if(diffTime > 2000 && _adjust)
//    {
//        return false;
//    }
    if(diffTime > FFmpegDecode::SynchronizationPrecision + FFmpegDecode::vedioPtsDelayoffset)
    {
//        --FFmpegDecode::audioPtsDelayoffset;
        std::this_thread::sleep_for(std::chrono::milliseconds(static_cast<int64_t>(diffTime)));
    }
//    _adjust = false;
    return true;
}

void VedioDecode::convertAndRender(AVFrame *temp)
{
    if(_pCodecCtx.get() == nullptr)
    {
        qDebug() << "AVCodecContext not init";
        return;
    }
//    qDebug() << FFmpegDecode::vedioMSec << FFmpegDecode::audioMSec;
    /*qDebug() << */GetPts(temp);
//    std::this_thread::sleep_for(std::chrono::milliseconds(1000));

    Synchronization(temp);
//    if(_swsContextWrap.TempContext == nullptr)
//    {
//        _swsContextWrap.TempContext = sws_getContext(_pCodecCtx->width,
//                                                     _pCodecCtx->height, _pCodecCtx->pix_fmt,
//                                                     Screen::Width, Screen::Height, AV_PIX_FMT_YUV420P,
//                                                     SWS_BICUBIC, nullptr, nullptr, nullptr);
//    }

    //转码
    _swsContextWrap.TempContext = sws_getCachedContext(_swsContextWrap.TempContext,
                                                       _pCodecCtx->width, _pCodecCtx->height,
                                                       _pCodecCtx->pix_fmt, Screen::Width,
                                                       Screen::Height, AV_PIX_FMT_YUV420P, SWS_BICUBIC,
                                                       nullptr, nullptr, nullptr);

    sws_scale(_swsContextWrap.TempContext, temp->data,
              temp->linesize, 0, _pCodecCtx->height,
              _yuvFrame.get()->data, _yuvFrame.get()->linesize);

    //抽取数据放到一个合适的自定义结构体用来给openGL渲染
    auto yuvGLPtr = std::unique_ptr<H264Frame>(new H264Frame());
    auto &yuvGL = *yuvGLPtr;
    yuvGL.Width = Screen::Width;
    yuvGL.Height = Screen::Height;
    //字节对齐的问题
    yuvGL.Y.length =  std::min(static_cast<int>(yuvGL.Width), _yuvFrame->linesize[0])*yuvGL.Height;
    yuvGL.U.length =  (yuvGL.Height)/2 * std::min(static_cast<int>(yuvGL.Width/2), _yuvFrame->linesize[1]);
    yuvGL.V.length = (yuvGL.Height)/2 * std::min(static_cast<int>(yuvGL.Width/2), _yuvFrame->linesize[2]);

    yuvGL.Y.dataBuffer = reinterpret_cast<uint8_t*>(malloc(yuvGL.Y.length));
    yuvGL.U.dataBuffer = reinterpret_cast<uint8_t*>(malloc(yuvGL.U.length));
    yuvGL.V.dataBuffer = reinterpret_cast<uint8_t*>(malloc(yuvGL.V.length));

    //必须要自定义一个拷贝函数，因为如果字节不对齐帧数据会有一些无意义填充数据，这些不能拷贝
//    memcpy(yuvGL.Y.dataBuffer, _yuvFrame->data[0], yuvGL.Y.length);
//    memcpy(yuvGL.U.dataBuffer, _yuvFrame->data[1], yuvGL.U.length);
//    memcpy(yuvGL.Y.dataBuffer, _yuvFrame->data[2], yuvGL.Y.length);
    yuvGL.Memcpy(_yuvFrame.get());
    yuvGL.pts = temp->pts;
    if(_Render != nullptr && _screen != 0)
    {
        _Render(std::move(yuvGLPtr), _screen);
    }else{
        qDebug() << "not found render function";
    }
}
//由时间基换算成毫秒
double VedioDecode::GetPts(AVFrame *temp)
{
    auto pts = temp->pts;
    auto res = pts*av_q2d(*_avr)*1000;//单位毫秒
    return res;
}
//检查播放状态如果是停止则阻塞


void VedioDecode::checkQueueEmpty(const CircularQueue &queue) const
{
    //避免虚假唤醒
    while(queue.isEmpty())
    {
        std::unique_lock<std::mutex> lock(SingleInstance::GetInstanca().CondConsumeMutex);
        SingleInstance::GetInstanca().CondConsumePacket.wait(lock);
        if(SingleInstance::GetInstanca().End)
        {
            return;
        }
    }
}

void VedioDecode::startDecode(CircularQueue &queue)
{
    if(_pCodecCtx.get() == nullptr)
    {
        qDebug() << "decoder not init";
        return;
    }
    //AVFrame通常是由av_frame_alloc和free来管理,
    std::unique_ptr<AVFrame, AVFrameDeleter> tempFrame;
    tempFrame.reset(av_frame_alloc());

    while (!SingleInstance::GetInstanca().End) {

        SingleInstance::GetInstanca().CheckPlayStatus();
        checkQueueEmpty(queue);

        //当播放结束时只有两种状态，要么被阻塞在checkQueueEmpty，要么退出while循环
        if(SingleInstance::GetInstanca().End)
        {
            qDebug() << "startDecode";
            return;
        }

        //保证数组一直有数据不用等
        if(queue.GetCurSize() < queue.GetCacpity()/4)
        {
            SingleInstance::GetInstanca().CondProductPacket.notify_all();
        }
       AVPacketWrap tempPacket;
       queue.Consume(&tempPacket.tempPacket);
       //发送解码
       if(avcodec_send_packet(_pCodecCtx.get(), &tempPacket.tempPacket) != 0)
       {
           qDebug() << "packet send err!";
           continue;
       }



//       av_frame_ref()
//       AVFrame tempFrame;
       auto res = avcodec_receive_frame(_pCodecCtx.get(), tempFrame.get());
       //之前是直接res != 0,后面发现，有些帧需要到后面才能解码（p,I,B帧的概念）
       if(res == 0)
       {
           convertAndRender(tempFrame.get());
           //避免最后一个循环内存没有被avcodec_receive_frame unref
           av_frame_unref(tempFrame.get());
       }     
    }
    if(SingleInstance::GetInstanca().End)
    {
        qDebug() << "startDecode";
    }
}





FFmpegDecode::FFmpegDecode(const QString &filePath)
    :_filePath(filePath)
{

}


FFmpegDecode::~FFmpegDecode()
{
    SingleInstance::GetInstanca().End = true;
    SingleInstance::GetInstanca().StartFlag = true;
    threadExit = true;
    //避免线程卡住无法退出
    SingleInstance::GetInstanca().CondProductPacket.notify_all();
    SingleInstance::GetInstanca().CondConsumePacket.notify_all();
    SingleInstance::GetInstanca().CondPlayStatues.notify_all();

    threadJoin();
//    avcodec_flush_buffers(_vd.GetAVCodecContext());
//    avcodec_flush_buffers(_ad.GetAVCodecContext());
    _myAvFormatContext.Destroy();
    _vd.Destroy();
    _ad.Destroy();
}

void FFmpegDecode::SetFilePath(const QString &filePah)
{
    _filePath = filePah;
}

void FFmpegDecode::threadJoin()
{
    if(_vedioThread.joinable())
    {
        _vedioThread.join();

    }
    if(_audioThread.joinable())
    {
        _audioThread.join();
    }
    //必须在这个位置释放，否则readFrame读完但解码线程还未解完缓存中的帧数据时，插入新的视频播放，此时剩余的缓存帧数据永远无法消耗掉导致readFrame线程无法退出
    _shareDataAudio.Destroy();
    _shareDataVedio.Destroy();

    if(_readThread.joinable())
    {
        _readThread.join();

    }


}


void FFmpegDecode::reset()
{
    SingleInstance::GetInstanca().End = true;
    SingleInstance::GetInstanca().StartFlag = true;
    threadExit = true;
    //避免线程卡住无法退出
    SingleInstance::GetInstanca().CondProductPacket.notify_all();
    SingleInstance::GetInstanca().CondConsumePacket.notify_all();
    SingleInstance::GetInstanca().CondPlayStatues.notify_all();

    threadJoin();

//笔记记得记录      析构会将对象销毁，而我只是需要将对象内部的一些内存释放掉，直接用析构函数，后面对象就不可用了，强行用，对应被析构对象内存没有保护会被覆盖最后crush
//    _shareDataAudio.~CircularQueue();
//    _shareDataVedio.~CircularQueue();
//    _myAvFormatContext.~MyAVFormatContext();
//    _vd.~VedioDecode();
//    _ad.~AudioDecode();

    _myAvFormatContext.Destroy();
    _vd.Destroy();
    _ad.Destroy();
    SingleInstance::GetInstanca().End = false;
    SingleInstance::GetInstanca().StartFlag = false;
    threadExit = false;
    SingleInstance::GetInstanca().ProgressRotia = 0;
}

void FFmpegDecode::init()
{
    SingleInstance::GetInstanca().End = false;
    if(_filePath.isEmpty())
    {
        qDebug() << "file is not loaded";
        return;
    }
    //必须要用toLocal8Bit，否则会乱码
    //audioPlayer.init();
    auto res = avformat_open_input(&_myAvFormatContext._avFormatContext, _filePath.toLocal8Bit().constData(), nullptr, nullptr);

    if(res == -1)
    {
        qDebug() << "avformat_open_input err!";
        return;

    }
    res = avformat_find_stream_info(_myAvFormatContext._avFormatContext, nullptr);
    if(res == -1)
    {
        qDebug() << "avformat_find_stream_info err!";
        return;

    }
    decltype (_myAvFormatContext._avFormatContext->streams[0]->duration) vedioSumTime = 0;
    decltype (_myAvFormatContext._avFormatContext->streams[0]->duration) audioSumTime = 0;

    av_dump_format(_myAvFormatContext._avFormatContext, -1, _filePath.toLocal8Bit().constData(), 0);
    for(unsigned int i = 0;i < _myAvFormatContext._avFormatContext->nb_streams; i++)
    {
        if(_myAvFormatContext._avFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            _vd.SetId(i);
            _vd.SetTimeBase(&_myAvFormatContext._avFormatContext->streams[i]->time_base);
            auto decodeVedioContex = _myAvFormatContext._avFormatContext->streams[i]->codecpar;
            auto decodeVedio = avcodec_find_decoder(decodeVedioContex->codec_id);
            _vd.SetAVCodecContext(decodeVedio, decodeVedioContex);
            vedioSumTime = _myAvFormatContext._avFormatContext->streams[i]->duration;
        }
        if(_myAvFormatContext._avFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
        {
            _ad.SetId(i);
            _ad.SetTimeBase(&_myAvFormatContext._avFormatContext->streams[i]->time_base);
            auto decodeVedioContex = _myAvFormatContext._avFormatContext->streams[i]->codecpar;
            auto decodeVedio = avcodec_find_decoder(decodeVedioContex->codec_id);
            _ad.SetAVCodecContext(decodeVedio, decodeVedioContex);
            audioSumTime = _myAvFormatContext._avFormatContext->streams[i]->duration;
        }
    }
    _vd.SetYUVBuffer();

    _realAudio = audioSumTime*av_q2d(*_ad.GetTimeBase());
    _ad.SetDuration(_realAudio);
    _realVedio = vedioSumTime*av_q2d(*_vd.GetTimeBase());
    _vd.SetDuration(_realVedio);
//    _realDuration = std::max(audioMs, vedioMs);//单位毫秒


    _readThread = std::thread(&FFmpegDecode::startReadFrame, this);

    _vedioThread = std::thread(&VedioDecode::startDecode, &_vd, std::ref(_shareDataVedio));

    _audioThread = std::thread(&AudioDecode::startDecode, &_ad, std::ref(_shareDataAudio));

}

void FFmpegDecode::SetProgress(double rotia)
{
    std::this_thread::sleep_for(std::chrono::milliseconds(10));//等待编解码线程和帧读取线程停止
    auto resAudio = rotia * _realAudio;
    auto resVedio = rotia * _realVedio;

    /*auto setTimeSec = setTime/1000;*///转化成秒
    FFmpegDecode::audioMSec = resAudio  * 1000;
    FFmpegDecode::vedioMSec = resVedio  * 1000;

    resVedio = resVedio/av_q2d(*_vd.GetTimeBase());
    resAudio = resAudio/av_q2d(*_ad.GetTimeBase());
    _shareDataAudio.Destroy();
    _shareDataVedio.Destroy();
    //切换到新的帧位置时，需要清空旧有位置av_read_frame的帧缓存，否则会有同步问题。比如到新位置时读取的前面几个帧数据是定位前的，导致pts和更新后audioMSec的差距巨大，直接停机
    avcodec_flush_buffers(_vd.GetAVCodecContext());
    avcodec_flush_buffers(_ad.GetAVCodecContext());
    auto resVedioInt = static_cast<int64_t>(resVedio);
//    auto resAudioInt = static_cast<int64_t>(resAudio);
//    _vd.SetAdjust();
    //调进度条这种场景用AVSEEK_FLAG_BACKWARD会比较好，不会有明显的卡顿现象，最好只定位一个流，不用两个流都定，本质是定位文件中的位置
    if(av_seek_frame(_myAvFormatContext._avFormatContext, _vd.GetId(), resVedioInt, AVSEEK_FLAG_BACKWARD) < 0)
    {
        qDebug() << "seek err!";
    }
//    if(av_seek_frame(_myAvFormatContext._avFormatContext, _ad.GetId(), resVedioInt, AVSEEK_FLAG_BACKWARD) < 0)
//    {
//        qDebug() << "seek err!";
//    }
//    if(av_seek_frame(_myAvFormatContext._avFormatContext, _ad.GetId(), resAudioInt, AVSEEK_FLAG_FRAME) < 0)
//    {
//        qDebug() << "seek err!";
//    }

    SingleInstance::GetInstanca().StartFlag = Control::LastStartFlag;
//    qDebug() << Control::LastStartFlag;
//    qDebug() << Control::LastStartFlag;

    _afterSeekFirstPacket = true;

//    _afterSeekFirstPacketTwo = true;
    /*std::this_thread::sleep_for(std::chrono::milliseconds(10));*///等待编解码线程停止 后面发现不需要，因为前面编解码线程已经停止了
    SingleInstance::GetInstanca().CondProductPacket.notify_all();
    SingleInstance::GetInstanca().CondPlayStatues.notify_all();
}

void FFmpegDecode::startReadFrame()
{
    AVPacketWrap element;
    av_init_packet(&element.tempPacket);
//调试用 有一个小问题还未解决，定位到指定帧时一定要记得更新单例类里面同步用的pts数据。
//    av_seek_frame(_myAvFormatContext._avFormatContext, 0, 34000000, AVSEEK_FLAG_ANY);
//    av_seek_frame(_myAvFormatContext._avFormatContext, 1, 34000000, AVSEEK_FLAG_ANY);

    //关于此处的内存泄漏问题可以放在简历上
    while (!SingleInstance::GetInstanca().End) {
        /*SingleInstance::GetInstanca().CheckPlayStatus();*///这个必须在av_read_frame前面否则
        if(av_read_frame(_myAvFormatContext._avFormatContext, &element.tempPacket) < 0)
        {
            //此处可以做终末后直接回转到最开始，重置，用av_seek_frame，startFlag置为暂停，用一个定时器检查startFlag更改暂停图片
            break;
        }
        if(threadExit)
        {
            qDebug() << "startReadFrame";
            element.~AVPacketWrap();

            return;
        }
        if(element.tempPacket.stream_index == _vd.GetId())
        {
            //避免虚假唤醒
            while (_shareDataVedio.isFull()) {
                std::unique_lock<std::mutex> lock(SingleInstance::GetInstanca().CondProductMutex);
                SingleInstance::GetInstanca().CondConsumePacket.notify_all();
                SingleInstance::GetInstanca().CondProductPacket.wait(lock);
                if(_afterSeekFirstPacket)
                {
                    break;
                }
                if(threadExit)
                {
                    qDebug() << "startReadFrame";
                    element.Destroy();
                    return;
                }
            }
            if(_afterSeekFirstPacket)
            {
                _afterSeekFirstPacket = false;
                element.Destroy();
                continue;

            }
            _shareDataVedio.AddElement(&element.tempPacket);            
        }
        if(element.tempPacket.stream_index == _ad.GetId())
        {
            while (_shareDataAudio.isFull()) {
                std::unique_lock<std::mutex> lock(SingleInstance::GetInstanca().CondProductMutex);
                SingleInstance::GetInstanca().CondConsumePacket.notify_all();
                SingleInstance::GetInstanca().CondProductPacket.wait(lock);
                if(_afterSeekFirstPacket)
                {
                    break;//setProgress后会有一个包是执行setProgress之前的包，音视频同步会出问题
                }
                if(threadExit)
                {
                    qDebug() << "startReadFrame";
                    element.Destroy();

                    return;
                }
            }
            if(_afterSeekFirstPacket)
            {
                _afterSeekFirstPacket = false;
                element.Destroy();
                continue;
            }
            _shareDataAudio.AddElement(&element.tempPacket);
        }
        element.Destroy();
    }
    //抵达真正结尾
    SingleInstance::GetInstanca().LoadFile = false;
    while (!(_shareDataVedio.isEmpty()&&_shareDataAudio.isEmpty())) {
        //避免线程阻塞在一个地方
        SingleInstance::GetInstanca().CondConsumePacket.notify_all();
        std::this_thread::sleep_for(std::chrono::milliseconds(1));
    }
    SingleInstance::GetInstanca().End = true;
//    std::this_thread::sleep_for(std::chrono::milliseconds(1));
    SingleInstance::GetInstanca().CondConsumePacket.notify_all();
    qDebug() << "startReadFrame";
}

void FFmpegDecode::SetCallBackRender(UpdateOpenGLDate func, int64_t screen)
{
    _vd.SetCallBackRender(func,screen);
}


