//
// Created by zjb on 2020/10/11.
//

#include "MediaExporter.h"
#include<thread>
#include<mutex>
#include<chrono>
#include<atomic>
#include<queue>
#include <condition_variable>
using namespace std;
#if _MSC_VER
#define snprintf _snprintf
#endif
extern "C" {
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#include <libavutil/imgutils.h>
#include <libavutil/samplefmt.h>
#include <libavformat/avformat.h>
#include <libavutil/opt.h>
#include <libavutil/avstring.h>
#include <libavutil/timestamp.h>
#include<libavutil/mathematics.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
#include <libavutil/avassert.h>
#include <libavutil/channel_layout.h>
#include <libavutil/opt.h>
}
#define STREAM_PIX_FMT    AV_PIX_FMT_YUV420P /* default pix_fmt */

namespace liyuvideo {
    /**
    *    基于ffmpeg的复用器，支持rtmp,srt协议推流，服务器使用srs联调
    */
    typedef struct OutputStream {
        AVStream *st;
        AVCodecContext *enc;

        /* pts of the next frame that will be generated */
        int64_t next_pts;
        int samples_count;

        AVFrame *frame;
        AVFrame *tmp_frame;

        float t, tincr, tincr2;

        struct SwsContext *sws_ctx;
        struct SwrContext *swr_ctx;
    } OutputStream;

    static int checkConfig(std::shared_ptr<ExportConfig> config) {
        if (config == nullptr) return -1;
        if (config->videoWidth == 0) return -2;
        if (config->videoHeight == 0) return -2;
        if (config->m_pVideoSource == nullptr) return -3;
        if (config->m_pAudioSourceList[0] == nullptr && config->m_pAudioSourceList[1] == nullptr
            && config->m_pAudioSourceList[2] == nullptr) {
            return -4;
        }
        if (config->m_videoFile.empty() == true) {
            return -5;
        }
        return 0;
    }

    class MediaExportImpl : public MediaExport {
    public:
        virtual int initModule(std::shared_ptr<ExportConfig> config) override {
            m_config = config;
            int ret = checkConfig(config);
            if (ret != 0) {
                return ret;
            }
            if (m_bInit == true) {
                return -99;
            }
            m_bInit = true;
            //init ffmpeg
            video_st = new OutputStream();
            audio_st = new OutputStream();
            avformat_alloc_output_context2(&oc, NULL, m_config->format.c_str(), m_config->m_videoFile.c_str());
            if (!oc) {
                printf("Could not deduce output format from file extension: using MPEG.\n");
                return -11;
            }
            fmt = oc->oformat;
            fmt->video_codec = AV_CODEC_ID_H264;
            //fmt->video_codec = AV_CODEC_ID_H265;
            fmt->audio_codec = AV_CODEC_ID_AAC;
            /* Add the audio and video streams using the default format codecs
             * and initialize the codecs. */
            if (fmt->video_codec != AV_CODEC_ID_NONE) {
                if (add_stream(video_st, oc, &video_codec, fmt->video_codec) == 0) {
                    have_video = 1;
                    encode_video = 1;
                } else {
                    return -21;
                }

            }
            if (fmt->audio_codec != AV_CODEC_ID_NONE) {
                if (add_stream(audio_st, oc, &audio_codec, fmt->audio_codec) == 0) {
                    have_audio = 1;
                    encode_audio = 1;
                } else {
                    return -22;
                }
            }

            /* Now that all the parameters are set, we can open the audio and
             * video codecs and allocate the necessary encode buffers. */
            if (have_video) {
                if (open_video(oc, video_codec, video_st, opt) != 0)
                    return -23;
            }

            if (have_audio) {
                if (open_audio(oc, audio_codec, audio_st, opt) != 0)
                    return -24;
            }

            if (have_video == 0 && have_audio == 0) {
                printf("not video and nod audio\n");
                return -12;
            }
            av_dump_format(oc, 0, m_config->m_videoFile.c_str(), 1);

            /* open the output file, if needed */
            if (!(fmt->flags & AVFMT_NOFILE)) {
                if (avio_open(&oc->pb, m_config->m_videoFile.c_str(), AVIO_FLAG_WRITE) != 0) {
                    printf("can not open file\n");
                    return -13;
                }
            }
            if (avformat_write_header(oc, &opt) < 0) {
                fprintf(stderr, "Error occurred when opening output file\n");
                return -14;
            }
            //start thread
            m_bCaptureing = true;
            m_captureThread = new std::thread([&]() {
                this->captureLoop();
            });
            m_bEncodeing = true;
            m_encodeThread = new std::thread([&]() {
                this->EncodeLoop();
            });
            return 0;
        }

        virtual void releaseModule() override {
            printf("release module\n");
            if (m_bInit == false) {
                return;
            }
            m_bInit = false;
            m_bCaptureing = false;
            if (m_captureThread && m_captureThread->joinable()) {
                m_captureThread->join();
                delete m_captureThread;
                m_captureThread = nullptr;
            }
            m_bEncodeing = false;
            if (m_encodeThread && m_encodeThread->joinable()) {
                m_encodeThread->join();
                delete m_encodeThread;
                m_encodeThread = nullptr;
            }
            if (video_st != nullptr) {
                delete video_st;
                video_st = nullptr;
            }

            if (audio_st != nullptr) {
                delete audio_st;
                audio_st = nullptr;
            }
        }

    private:
        //问上层要数据线程
        void captureLoop() {
            printf("start capture loop\n");
            m_bRecvEndCmd = false;
            while (m_bCaptureing) {
                std::shared_ptr<MyMediaBuf> pVideoFrame = nullptr;
                bool hasGetData = false;
                if (m_config && m_config->m_pVideoSource) {
                    int videoQueueSize = 0;
                    {
                        std::unique_lock<std::mutex> autoLock(m_videoFrameQueueMutex);
                        videoQueueSize = m_videoFrameQueue.size();
                    }
                    if (videoQueueSize <= 10) {
                        m_config->m_pVideoSource->feedVideoFrame(pVideoFrame);
                        if (pVideoFrame) {
                            std::unique_lock<std::mutex> autoLock(m_videoFrameQueueMutex);
                            //printf("video frame ts:%u\n", pVideoFrame->m_ts);
                            if (pVideoFrame->m_bEnd == true) {
                                printf("recv end command,current ts:%u\n", pVideoFrame->m_ts);
                                //m_bCaptureing = false;
                            }
                            m_videoFrameQueue.push(pVideoFrame);
                            hasGetData = true;
                        }
                    }
                }
                //get audio frame
                int maxAudioQueueLen = 0;
                {
                    std::unique_lock<std::mutex> autoLock(m_audioFrameQueueMutex);
                    for (int i = 0; i < 3; i++) {
                        if (m_audioFrameQueue[i].size() > maxAudioQueueLen) {
                            maxAudioQueueLen = m_audioFrameQueue[i].size();
                        }
                    }
                }

                if (maxAudioQueueLen <= 1000) {
                    std::unique_lock<std::mutex> autoLock(m_audioFrameQueueMutex);
                    for (int i = 0; i < 3; i++) {
                        AudioSource *pAudioSource = m_config->m_pAudioSourceList[i];
                        if (pAudioSource != nullptr) {
                            std::shared_ptr<MyMediaBuf> pAudioFrame = nullptr;
                            pAudioSource->feedAudioFrame(pAudioFrame);
                            if (pAudioFrame) {
                                //printf("get audio frame,ts:%u\n", pAudioFrame->m_ts);
                                m_audioFrameQueue[i].push(pAudioFrame);
                                hasGetData = true;
                            }
                        }
                    }
                }

                if (hasGetData == false) {
                    //避免空跑
                    std::this_thread::sleep_for(std::chrono::milliseconds(10));
                    m_cv_uptr.notify_one();
                }
            }
            printf("cpatuer loop quit\n");
        }

        //视频编码线程
        void EncodeLoop() {
            printf("start video encode loop\n");
            std::unique_lock<std::mutex> lock(m_mutex);
            m_cv_uptr.wait(lock);
            while (m_bEncodeing) {
                if (av_compare_ts(video_st->next_pts, video_st->enc->time_base,
                                  audio_st->next_pts, audio_st->enc->time_base) <= 0) {
                    //编码视频
                    write_video_frame(oc, video_st);
                } else {
                    //编码音频
                    if (write_audio_frame(oc, audio_st) == 0) {
                        //LOG(INFO) << "write audio frame video pts:" << video_st->next_pts*50 << ",audio pts:" << (audio_st->next_pts*10/441)<<",buffer:"<<audioQueue.size();
                    }
                }
            }
            printf("video encode loop quit\n");
            av_write_trailer(oc);
            if (have_video) {
                close_stream(oc, video_st);
            }
            if (have_audio) {
                close_stream(oc, audio_st);
            }
            if (!(fmt->flags & AVFMT_NOFILE)) {
                avio_closep(&oc->pb);
            }
            avformat_free_context(oc);
            if(m_config->m_pCompleteEvent){
                //通知导出文件完成了
                m_config->m_pCompleteEvent();
            }
        }

        int add_stream(OutputStream *ost, AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id) {
            AVCodecContext *c;
            int i;

            /* find the encoder */
            if (codec_id == AV_CODEC_ID_H265) {
                //hevc_qsv 使用nv12
                //*codec = avcodec_find_decoder_by_name("hevc_qsv");
                *codec = avcodec_find_encoder(codec_id);
            } else {
                *codec = avcodec_find_encoder(codec_id);
            }

            if (!(*codec)) {
                fprintf(stderr, "Could not find encoder for '%s'\n",
                        avcodec_get_name(codec_id));
                return -1;
            }

            ost->st = avformat_new_stream(oc, NULL);
            if (!ost->st) {
                fprintf(stderr, "Could not allocate stream\n");
                return -1;
            }
            ost->st->id = oc->nb_streams - 1;
            c = avcodec_alloc_context3(*codec);
            if (!c) {
                fprintf(stderr, "Could not alloc an encoding context\n");
                return -1;
            }
            ost->enc = c;

            switch ((*codec)->type) {
                case AVMEDIA_TYPE_AUDIO:
                    c->sample_fmt = (*codec)->sample_fmts ?
                                    (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
                    c->bit_rate = m_config->audioBitrate * 1000;
                    c->sample_rate = m_config->audioChannels;
                    if ((*codec)->supported_samplerates) {
                        c->sample_rate = (*codec)->supported_samplerates[0];
                        for (i = 0; (*codec)->supported_samplerates[i]; i++) {
                            if ((*codec)->supported_samplerates[i] == m_config->audioSamplerate)
                                c->sample_rate = m_config->audioSamplerate;
                        }
                    }
                    if (m_config->audioChannels == 1) {
                        c->channel_layout = AV_CH_LAYOUT_MONO;
                    } else {
                        c->channel_layout = AV_CH_LAYOUT_STEREO;
                    }

                    c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
                    /*
                    if ((*codec)->channel_layouts) {
                        c->channel_layout = (*codec)->channel_layouts[0];
                        for (i = 0; (*codec)->channel_layouts[i]; i++) {
                            if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
                                c->channel_layout = AV_CH_LAYOUT_STEREO;
                        }
                    }
                    c->channels        = av_get_channel_layout_nb_channels(c->channel_layout);
                    */
                    ost->st->time_base.num = 1;
                    ost->st->time_base.den = c->sample_rate;
                    break;

                case AVMEDIA_TYPE_VIDEO:
                    c->codec_id = codec_id;
                    c->bit_rate = m_config->videoBitrate * 1000;
                    /* Resolution must be a multiple of two. */
                    c->width = m_config->videoWidth;
                    c->height = m_config->videoHeight;
                    /* timebase: This is the fundamental unit of time (in seconds) in terms
                     * of which frame timestamps are represented. For fixed-fps content,
                     * timebase should be 1/framerate and timestamp increments should be
                     * identical to 1. */
                    ost->st->time_base.num = 1;
                    ost->st->time_base.den = m_config->fps;
                    c->time_base = ost->st->time_base;

                    c->gop_size = m_config->fps * 2; /* emit one intra frame every twelve frames at most */
                    c->pix_fmt = STREAM_PIX_FMT;
                    c->max_b_frames = 0;
                    if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
                        /* just for testing, we also add B-frames */

                    }
                    break;

                default:
                    break;
            }

            /* Some formats want stream headers to be separate. */
            if (oc->oformat->flags & AVFMT_GLOBALHEADER)
                c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
            return 0;
        }

        int open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg) {
            int ret;
            AVCodecContext *c = ost->enc;
            AVDictionary *opt = NULL;

            av_dict_copy(&opt, opt_arg, 0);
            if (codec->id == AV_CODEC_ID_H264) {
                av_dict_set(&opt, "preset", "ultrafast", NULL);
                av_dict_set(&opt, "profile", "baseline", NULL);
                //av_dict_set_int(&opt, "crf", 28, NULL);
                //av_dict_set_int(&opt, "crf_max", 36, NULL);
            } else if (codec->id == AV_CODEC_ID_H265) {
                av_dict_set(&opt, "preset", "ultrafast", NULL);
                //av_dict_set(&opt, "profile", "baseline", NULL);
                av_dict_set_int(&opt, "crf", 28, NULL);
            }
            /* open the codec */
            ret = avcodec_open2(c, codec, &opt);
            av_dict_free(&opt);
            if (ret < 0) {
                return -1;
            }

            /* allocate and init a re-usable frame */
            ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
            if (!ost->frame) {
                fprintf(stderr, "Could not allocate video frame\n");
                return -1;
            }

            /* If the output format is not YUV420P, then a temporary YUV420P
             * picture is needed too. It is then converted to the required
             * output format. */
            ost->tmp_frame = NULL;
            if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
                ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
                if (!ost->tmp_frame) {
                    fprintf(stderr, "Could not allocate temporary picture\n");
                    return -1;
                }
            }

            /* copy the stream parameters to the muxer */
            ret = avcodec_parameters_from_context(ost->st->codecpar, c);
            if (ret < 0) {
                fprintf(stderr, "Could not copy the stream parameters\n");
                return -1;
            }
            return 0;
        }

        void close_stream(AVFormatContext *oc, OutputStream *ost) {
            avcodec_free_context(&ost->enc);
            av_frame_free(&ost->frame);
            av_frame_free(&ost->tmp_frame);
            sws_freeContext(ost->sws_ctx);
            swr_free(&ost->swr_ctx);
        }

        int open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg) {
            AVCodecContext *c;
            int nb_samples;
            int ret;
            AVDictionary *opt = NULL;

            c = ost->enc;

            /* open it */
            av_dict_copy(&opt, opt_arg, 0);
            ret = avcodec_open2(c, codec, &opt);
            av_dict_free(&opt);
            if (ret < 0) {
                return -1;
            }

            /* init signal generator */
            ost->t = 0;
            ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
            /* increment frequency by 110 Hz per second */
            ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;

            if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
                nb_samples = 10000;
            else
                nb_samples = c->frame_size;

            ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
                                           c->sample_rate, nb_samples);
            ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
                                               c->sample_rate, nb_samples);

            /* copy the stream parameters to the muxer */
            ret = avcodec_parameters_from_context(ost->st->codecpar, c);
            if (ret < 0) {
                fprintf(stderr, "Could not copy the stream parameters\n");
                return -1;
            }

            /* create resampler context */
            ost->swr_ctx = swr_alloc();
            if (!ost->swr_ctx) {
                fprintf(stderr, "Could not allocate resampler context\n");
                return -1;
            }

            /* set options */
            av_opt_set_int(ost->swr_ctx, "in_channel_count", c->channels, 0);
            av_opt_set_int(ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
            av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
            av_opt_set_int(ost->swr_ctx, "out_channel_count", c->channels, 0);
            av_opt_set_int(ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
            av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);

            /* initialize the resampling context */
            if ((ret = swr_init(ost->swr_ctx)) < 0) {
                fprintf(stderr, "Failed to initialize the resampling context\n");
                return -1;
            }
            return 0;
        }

        AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height) {
            AVFrame *picture;
            int ret;

            picture = av_frame_alloc();
            if (!picture)
                return NULL;

            picture->format = pix_fmt;
            picture->width = width;
            picture->height = height;

            /* allocate the buffers for the frame data */
            ret = av_frame_get_buffer(picture, 0);
            if (ret < 0) {
                fprintf(stderr, "Could not allocate frame data.\n");
                return nullptr;
            }
            return picture;
        }

        AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
                                   uint64_t channel_layout,
                                   int sample_rate, int nb_samples) {
            AVFrame *frame = av_frame_alloc();
            int ret;

            if (!frame) {
                fprintf(stderr, "Error allocating an audio frame\n");
                return nullptr;
            }

            frame->format = sample_fmt;
            frame->channel_layout = channel_layout;
            frame->sample_rate = sample_rate;
            frame->nb_samples = nb_samples;

            if (nb_samples) {
                ret = av_frame_get_buffer(frame, 0);
                if (ret < 0) {
                    fprintf(stderr, "Error allocating an audio buffer\n");
                    return nullptr;
                }
            }

            return frame;
        }

        int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
                        AVStream *st, AVFrame *frame) {
            int ret;

            // send the frame to the encoder
            ret = avcodec_send_frame(c, frame);
            if (ret < 0) {
                return -1;
            }

            while (ret >= 0) {
                AVPacket pkt = {0};

                ret = avcodec_receive_packet(c, &pkt);
                if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                    break;
                else if (ret < 0) {
                    return -1;
                }

                /* rescale output packet timestamp values from codec to stream timebase */
                av_packet_rescale_ts(&pkt, c->time_base, st->time_base);
                pkt.stream_index = st->index;

                /* Write the compressed frame to the media file. */

                ret = av_interleaved_write_frame(fmt_ctx, &pkt);
                av_packet_unref(&pkt);
                if (ret < 0) {
                    return -1;
                }
            }

            return ret == AVERROR_EOF ? 1 : 0;
        }

        int write_video_frame(AVFormatContext *oc, OutputStream *ost) {
            AVFrame *frame = get_video_frame(ost);
            if (frame != nullptr) {
                //printf("encode video frame ,pts:%d\n",ost->next_pts);
                return write_frame(oc, ost->enc, ost->st, frame);
            }
            return -1;
        }

        int write_audio_frame(AVFormatContext *oc, OutputStream *ost) {
            AVCodecContext *c;
            AVFrame *frame;
            int ret;
            int dst_nb_samples;

            c = ost->enc;

            frame = get_audio_frame(ost);

            if (frame) {
                /* convert samples from native format to destination codec format, using the resampler */
                /* compute destination number of samples */
                dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
                                                c->sample_rate, c->sample_rate, AV_ROUND_UP);
                av_assert0(dst_nb_samples == frame->nb_samples);

                /* when we pass a frame to the encoder, it may keep a reference to it
                 * internally;
                 * make sure we do not overwrite it here
                 */
                ret = av_frame_make_writable(ost->frame);
                if (ret < 0) {
                    return -1;
                }
                /* convert to destination format */
                ret = swr_convert(ost->swr_ctx,
                                  ost->frame->data, dst_nb_samples,
                                  (const uint8_t **) frame->data, frame->nb_samples);
                if (ret < 0) {
                    fprintf(stderr, "Error while converting\n");
                    exit(1);
                }
                frame = ost->frame;
                AVRational r;
                r.num = 1;
                r.den = c->sample_rate;
                frame->pts = av_rescale_q(ost->samples_count, r, c->time_base);
                ost->samples_count += dst_nb_samples;
            } else {
                return -1;
            }

            return write_frame(oc, c, ost->st, frame);
        }

        bool getMixAudioFrame(int16_t *dst, uint32_t curTs) {
            std::unique_lock<std::mutex> autoLock(m_audioFrameQueueMutex);
            int16_t mixedPcm[1024 * 2] = {0};
            bool findAudio = false;
            //printf("cur pts:%u\n", curTs);
            for (int i = 0; i < 3; i++) {
                std::shared_ptr<MyMediaBuf> pAudioFrame = nullptr;
                if (m_audioFrameQueue[i].empty() == false) {
                    pAudioFrame = m_audioFrameQueue[i].front();
                    if (pAudioFrame->m_ts <= curTs) {
                        m_audioFrameQueue[i].pop();
                        if (findAudio == false) {
                            //第一次音频，可以直接复制
                            memcpy(mixedPcm, pAudioFrame->m_buf, 1024 * m_config->audioChannels * 2);
                        } else {
                            int16_t *ptr = (int16_t *) pAudioFrame->m_buf;
                            for (int j = 0; j < 1024 * m_config->audioChannels; j++) {
                                mixedPcm[j] += ptr[j];
                            }
                        }
                        findAudio = true;
                    }
                }
            }
            if (findAudio) {
                memcpy(dst, mixedPcm, 1024 * m_config->audioChannels * 2);
            }

            return findAudio;
        }

        AVFrame *get_audio_frame(OutputStream *ost) {
            AVFrame *frame = ost->tmp_frame;
            int j, i, v;
            int16_t *q = (int16_t *) frame->data[0];

            bool hasAudioFrame = false;
            {
                std::unique_lock<std::mutex> autoLock(m_audioFrameQueueMutex);
                for (int i = 0; i < 3; i++) {
                    if (m_audioFrameQueue[i].empty() == false) {
                        hasAudioFrame = true;
                        break;
                    }
                }
            }
            if(hasAudioFrame==false &&m_bRecvEndCmd==true){
                m_bCaptureing = false;
                m_bEncodeing = false;
            }
            if (hasAudioFrame == true) {
                uint32_t curEncodeTs = ost->next_pts * 10 / 441;

                //混音
                if (getMixAudioFrame(q, curEncodeTs) == false) {
                    memset(q, 0, m_config->audioChannels * 1024 * 2);
                }
            } else {
                memset(q, 0, m_config->audioChannels * 1024 * 2);
            }
            frame->pts = ost->next_pts;
            ost->next_pts += frame->nb_samples;
            return frame;
        }

        AVFrame *get_video_frame(OutputStream *ost) {
            AVCodecContext *c = ost->enc;
            if (av_frame_make_writable(ost->frame) < 0) {
                return nullptr;
            }
            if (fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height) == 0) {
                ost->frame->pts = ost->next_pts++;
                return ost->frame;
            }
            return nullptr;
        }

        int fill_yuv_image(AVFrame *pict, int frame_index,
                           int width, int height) {
            std::shared_ptr<MyMediaBuf> pVideoFrame = nullptr;
            if (m_videoFrameQueue.empty() == false) {
                std::unique_lock<std::mutex> autoLock(m_videoFrameQueueMutex);
                pVideoFrame = m_videoFrameQueue.front();
                m_videoFrameQueue.pop();
            }
            if (pVideoFrame) {
                //printf("encode video frame ts:%u,end:%s\n",pVideoFrame->m_ts,pVideoFrame->m_bEnd?"tue":"false");
                if (pVideoFrame->m_bEnd == true) {
                    m_bRecvEndCmd = true;
                    return -1;
                }
				static AVFrame *src_frame = nullptr;
				static struct SwsContext *sws_ctx = nullptr;
				if (sws_ctx == nullptr) {

					sws_ctx = sws_getContext(width, height, AV_PIX_FMT_RGB24,
						width, height, AV_PIX_FMT_YUV420P,
						SWS_BILINEAR, NULL, NULL, NULL);
					src_frame = av_frame_alloc();
					src_frame->width = width;
					src_frame->height = height;
					src_frame->format = AV_PIX_FMT_RGB24;
					av_image_alloc(src_frame->data, src_frame->linesize, width, height, AV_PIX_FMT_RGB24, 16);
				}
				memcpy(src_frame->data[0], pVideoFrame->m_buf, width*height * 3);
				sws_scale(sws_ctx, src_frame->data, src_frame->linesize, 0, height, pict->data, pict->linesize);
                return 0;
            }
            return -1;
        }

    private:
        std::thread *m_captureThread = nullptr;
        std::thread *m_encodeThread = nullptr;

        std::atomic<bool> m_bInit = {false};
        std::atomic<bool> m_bCaptureing = {false};
        std::atomic<bool> m_bEncodeing = {false};
        std::shared_ptr<ExportConfig> m_config;
        //待编码视频帧队列
        std::mutex m_videoFrameQueueMutex;
        queue<shared_ptr<MyMediaBuf>> m_videoFrameQueue;
        //待编码音频帧队列
        std::mutex m_audioFrameQueueMutex;
        queue<shared_ptr<MyMediaBuf>> m_audioFrameQueue[3];

        //fmpeg 相关变量
        OutputStream *video_st = nullptr;
        OutputStream *audio_st = nullptr;
        AVOutputFormat *fmt = nullptr;
        AVFormatContext *oc = nullptr;
        AVCodec *audio_codec = nullptr;
        AVCodec *video_codec = nullptr;
        int have_video = 0;
        int have_audio = 0;
        int encode_video = 0;
        int encode_audio = 0;
        AVDictionary *opt = NULL;
        std::atomic<bool> m_bRecvEndCmd = {false};
        std::mutex m_mutex;
        std::condition_variable m_cv_uptr;
    };

    MediaExportImpl *g_inst = nullptr;

    MediaExport *getMediaExport() {
        if (g_inst == nullptr) {
            g_inst = new MediaExportImpl();
        }
        return g_inst;
    }
}