﻿#include "ffmpegthread.h"
#include "Ipcmsg.h"
extern "C" {
    #include <libavcodec/avcodec.h>
    #include <libavformat/avformat.h>
    #include <libavutil/dict.h>
    #include <libavfilter/buffersink.h>
    #include <libavfilter/buffersrc.h>
    #include <libavutil/fifo.h>
    #include <libavutil/opt.h>
    #include <libavutil/time.h>
    #include <libavutil/imgutils.h>
    #include <libavutil/frame.h>
    #include <libavutil/channel_layout.h>
    #include <libavutil/detection_bbox.h>
}
const unsigned int DEFAULTBOX_NUM=25;

static void cleaning_filter_graph(FilteringContext* ftx) {
    if (!ftx)
        return;
    if(ftx->buffersink_ctx)
        avfilter_free(ftx->buffersink_ctx);
    ftx->buffersink_ctx = nullptr;
    if(ftx->buffersrc_ctx)
        avfilter_free(ftx->buffersrc_ctx);
    ftx->buffersrc_ctx = nullptr;
    if (ftx->filter_graph)
        avfilter_graph_free(&ftx->filter_graph);
}

static bool copy_frame_to_array(const AVFrame* frame, LPDATA_HEADER lp, unsigned int boxes[][5],unsigned int& boxCount) {
    if (lp == 0)
        return false;
    int size_y_channel = frame->linesize[Y] * frame->height;
    int size_u_channel = frame->linesize[U] * frame->height / 2;
    int size_v_channel = frame->linesize[V] * frame->height / 2;
    int size_alpha_channel = frame->linesize[A] * frame->height;
    const AVPixFmtDescriptor* desc = av_pix_fmt_desc_get((AVPixelFormat)frame->format);
    lp->linesize[Y] = frame->linesize[Y];
    lp->linesize[U] = frame->linesize[U];
    lp->linesize[V] = frame->linesize[V];
    lp->linesize[A] = frame->linesize[A];
    lp->offset[Y] = sizeof(DATA_HEADER);
    lp->offset[U] = size_y_channel + lp->offset[Y];
    lp->offset[V] = size_u_channel + lp->offset[U];
    lp->offset[A] = size_v_channel + lp->offset[V];
    lp->pkt_dts = frame->pkt_dts;
    lp->height = frame->height;
    lp->width = frame->width;
    //bytewidth应该是小于等于 frame->linesize,  后者是对齐之后的行宽
    int bytewidth0 = av_image_get_linesize((AVPixelFormat)frame->format, frame->width, Y);//获取Y通道每行像素的字节数
    int bytewidth1 = av_image_get_linesize((AVPixelFormat)frame->format, frame->width, U);//获取U通道每行像素的字节数
    int bytewidth2 = av_image_get_linesize((AVPixelFormat)frame->format, frame->width, V);//获取V通道每行像素的字节数
    int bytewidth3 = av_image_get_linesize((AVPixelFormat)frame->format, frame->width, A);//获取alpha通道每行像素的字节数

    av_image_copy_plane((uint8_t*)lp + lp->offset[Y], lp->linesize[Y], frame->data[Y], frame->linesize[Y], bytewidth0, frame->height);
    av_image_copy_plane((uint8_t*)lp + lp->offset[U], lp->linesize[U], frame->data[U], frame->linesize[U], bytewidth1, frame->height / 2);
    av_image_copy_plane((uint8_t*)lp + lp->offset[V], lp->linesize[V], frame->data[V], frame->linesize[V], bytewidth2, frame->height / 2);
    av_image_copy_plane((uint8_t*)lp + lp->offset[A], lp->linesize[A], frame->data[A], frame->linesize[A], bytewidth3, frame->height);

    for (int i = 0; i < lp->boxNum&&i<boxCount; i++) {
        memcpy(boxes[i], lp->boxes[i], sizeof(lp->boxes[i]));
    }
    boxCount = lp->boxNum>boxCount?boxCount:lp->boxNum;
    lp->boxNum =0;
    return true;
}


FFmpegThread::FFmpegThread()
{
    static bool ffmpeg_initd = false;
    if(!ffmpeg_initd)
        avformat_network_init();
    av_dict_set(&input_config, "rtsp_transport", "udp", 0);
    av_dict_set(&input_config, "max_delay", "500000", 0);

}

FFmpegThread::~FFmpegThread(){
    if(vdecoder_ctx)
        avcodec_free_context(&vdecoder_ctx);
    if(adecoder_ctx)
        avcodec_free_context(&adecoder_ctx);
    if(vencoder_ctx)
        avcodec_free_context(&vencoder_ctx);
    if(aencoder_ctx)
        avcodec_free_context(&aencoder_ctx);
    if(vfilter_ctx)
        av_freep(&vfilter_ctx);
    if(afilter_ctx)
        av_freep(&afilter_ctx);
    if(filtered_frame)
        av_frame_free(&filtered_frame);
    if(decode_frame)
        av_frame_free(&decode_frame);
    if(raw_packet)
        av_packet_free(&raw_packet);
    if(encoded_packet)
        av_packet_free(&encoded_packet);
    if(ipc_message)
        delete ipc_message;
    if(ipc_mutex)
        delete ipc_mutex;
}

void FFmpegThread::set_enable_ipc(SharedId mmid,const std::string& mxid){
    mem_id = mmid;
    mutx_id = mxid;
    ipc_enable = true;
}

bool FFmpegThread::allocate_ipc_resource(){
    if(!ipc_enable)
        return false;
    ipc_message = new IPC_MSG;
    ipc_mutex = new CProcessMutex(mutx_id.c_str());
    av_log(NULL, AV_LOG_INFO, "create shared mutex with id: %s\n", mutx_id.c_str());
#ifdef _WIN32
    ipc_message->Create(mem_id.c_str(),true);
    av_log(NULL, AV_LOG_INFO, "create shared memmory with id: %s\n", mem_id.c_str());
#else
    ipc_message->Create(mem_id,true);
    av_log(NULL, AV_LOG_INFO, "create shared memmory with id: %d\n", mem_id);
#endif
    bool success = ipc_message->GetBuffer()!=0;
    success = success && ipc_mutex->Lock();
    if(success){
        av_log(NULL, AV_LOG_INFO, "create ipc_resource success\n");
        ipc_mutex->UnLock();
    }
    else
        av_log(NULL, AV_LOG_ERROR, "create ipc_resource failed\n");
    return success;
}

bool FFmpegThread::open_input_stream(const char* url){
    if(ic)
        return false;
    int ret = avformat_open_input(&ic, url, 0, &input_config);//参数3用于指定输入视频的封装格式，NULL表示让ffmpeg自动检测
    if(ret!=0){
        av_log(NULL, AV_LOG_ERROR,"codec open url %s.",url);
        return false;
    }
    if(avformat_find_stream_info(ic, 0)>=0){
        input_vid = av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
        input_aid = av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
        input_streams_count = ic->nb_streams;
        if(input_vid>=0)
            vstream = ic->streams[input_vid];
        if(input_aid>=0)
            astream = ic->streams[input_aid];
        return true;
    }
    else
        return false;
}

bool FFmpegThread::init_decode_context(const unsigned int stream_index){
    const AVCodec* codec=0;
    AVStream* stream=0;
    if(stream_index == input_vid)
        stream = vstream;
    else if(stream_index == input_aid)
        stream = astream;
    if(!stream)
        return false;
    codec = avcodec_find_decoder(stream->codecpar->codec_id);
    if (!codec)
        return false;
    int ret;
    if(stream_index == input_vid){
        vdecoder_ctx = avcodec_alloc_context3(codec);
        vdecodec = codec;
        //vc->framerate = av_guess_frame_rate(ic, vstream, NULL);//解码之前探测视频的fps,探测结果不一定准确
        avcodec_parameters_to_context(vdecoder_ctx, stream->codecpar);
        ret = avcodec_open2(vdecoder_ctx, vdecodec, NULL);
        vdecoder_ctx->time_base = av_inv_q(stream->avg_frame_rate);
        vdecoder_ctx->framerate = stream->avg_frame_rate;
    }
    else{
        adecoder_ctx = avcodec_alloc_context3(codec);
        adecodec = codec;
        avcodec_parameters_to_context(adecoder_ctx, stream->codecpar);
        ret = avcodec_open2(adecoder_ctx, adecodec, NULL);
    }
    if (ret != 0) {
        av_log(NULL, AV_LOG_ERROR,"codec open for decoding failed, stream index=%d.",stream_index);
        return false;
    }
    return true;
}

void FFmpegThread::update_filter_description(AVMediaType media_type, const std::string& desc){
    if(media_type==AVMEDIA_TYPE_VIDEO&&desc.length()>0){
        vdesc.clear();
        vdesc += desc;
        v_filter_update_enable = true;
    }
    else if(media_type==AVMEDIA_TYPE_AUDIO&&desc.length()>0){
        adesc.clear();
        adesc += desc;
        a_filter_update_enable = true;
    }
    return;
}

bool FFmpegThread::init_filter_context(const unsigned int stream_index){
    FilteringContext* filter_ctx=nullptr;
    bool update_enable = false;
    const char* desc="null";
    if(ic->streams[stream_index]->codecpar->codec_type==AVMEDIA_TYPE_VIDEO){
        if(vfilter_ctx==nullptr){
            vfilter_ctx=(FilteringContext*)av_malloc(sizeof(FilteringContext));
            memset(vfilter_ctx,0,sizeof(FilteringContext));
        }
        filter_ctx = vfilter_ctx;
        update_enable = v_filter_update_enable;
        v_filter_update_enable = false;
        desc = &vdesc[0];
    }
    else if(ic->streams[stream_index]->codecpar->codec_type==AVMEDIA_TYPE_AUDIO){
        if(afilter_ctx==nullptr){
            afilter_ctx=(FilteringContext*)av_malloc(sizeof(FilteringContext));
            memset(afilter_ctx,0,sizeof(FilteringContext));
        }
        filter_ctx = afilter_ctx;
        update_enable = a_filter_update_enable;
        a_filter_update_enable = false;
        desc = &adesc[0];
    }
    if(filter_ctx==nullptr&&!update_enable)
        return false;
    cleaning_filter_graph(filter_ctx);
    filter_ctx->buffersink_ctx = 0;
    filter_ctx->buffersrc_ctx = 0;
    filter_ctx->filter_graph = avfilter_graph_alloc();
    AVFilter* buffersrc = 0;
    AVFilter* buffersink = 0;
    char arg[512];
    if (filter_ctx == vfilter_ctx) {
        buffersrc = const_cast<AVFilter*>(avfilter_get_by_name("buffer"));
        buffersink = const_cast<AVFilter*>(avfilter_get_by_name("buffersink"));
        snprintf(arg, sizeof(arg),
        "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
        vdecoder_ctx->width, vdecoder_ctx->height, vdecoder_ctx->pix_fmt,
        vdecoder_ctx->time_base.num, vdecoder_ctx->time_base.den,
        vdecoder_ctx->sample_aspect_ratio.num,
        vdecoder_ctx->sample_aspect_ratio.den);//因为喂到滤镜里面的数据来自解码器输出，因此要根据解码器的参数设置
    }
    else if (filter_ctx == afilter_ctx) {
        buffersrc = const_cast<AVFilter*>(avfilter_get_by_name("abuffer"));
        buffersink = const_cast<AVFilter*>(avfilter_get_by_name("abuffersink"));
        snprintf(arg, sizeof(arg),
            "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%016lx",
            adecoder_ctx->time_base.num, adecoder_ctx->time_base.den, adecoder_ctx->sample_rate,
            av_get_sample_fmt_name(adecoder_ctx->sample_fmt),
            adecoder_ctx->channel_layout);
    }

    int ret = avfilter_graph_create_filter(&filter_ctx->buffersrc_ctx, buffersrc, "in", arg, NULL, filter_ctx->filter_graph);
    ret = avfilter_graph_create_filter(&filter_ctx->buffersink_ctx, buffersink, "out", NULL, NULL, filter_ctx->filter_graph);

    if (filter_ctx == vfilter_ctx) {
        enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUVJ420P,AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
        AVPixelFormat fmt = AV_PIX_FMT_BGR24;//让滤镜输出时转为为rgb的像素格式AV_PIX_FMT_BGR24
        /*ret = av_opt_set_bin(ftx->buffersink_ctx, "pix_fmts",
            (uint8_t*)&fmt, sizeof(fmt),
            AV_OPT_SEARCH_CHILDREN);*/
        ret=av_opt_set_int_list(filter_ctx->buffersink_ctx, "pix_fmts", pix_fmts,
            AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
    }
    if (filter_ctx == afilter_ctx) {
            //设置音频输出
        ret = av_opt_set_bin(filter_ctx->buffersink_ctx, "sample_fmts",
            (uint8_t*)&adecoder_ctx->sample_fmt, sizeof(adecoder_ctx->sample_fmt),
            AV_OPT_SEARCH_CHILDREN);
        ret = av_opt_set_bin(filter_ctx->buffersink_ctx, "channel_layouts",
            (uint8_t*)&adecoder_ctx->channel_layout,
            sizeof(adecoder_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
        ret = av_opt_set_bin(filter_ctx->buffersink_ctx, "sample_rates",
            (uint8_t*)&adecoder_ctx->sample_rate, sizeof(adecoder_ctx->sample_rate),
            AV_OPT_SEARCH_CHILDREN);
    }
    AVFilterInOut* outputs = avfilter_inout_alloc();
    AVFilterInOut* inputs = avfilter_inout_alloc();
    outputs->name = av_strdup("in");
    outputs->filter_ctx = filter_ctx->buffersrc_ctx;
    outputs->pad_idx = 0;
    outputs->next = NULL;

    inputs->name = av_strdup("out");
    inputs->filter_ctx = filter_ctx->buffersink_ctx;
    inputs->pad_idx = 0;
    inputs->next = NULL;

    ret=avfilter_graph_parse_ptr(filter_ctx->filter_graph, desc, &inputs, &outputs, NULL);
    avfilter_inout_free(&inputs);
    avfilter_inout_free(&outputs);
    if (ret < 0) {
        av_log(NULL, AV_LOG_WARNING, "avfilter_graph_parse_ptr failed.\n");
        return false;
    }
    ret = avfilter_graph_config(filter_ctx->filter_graph, NULL);
    if (ret < 0) {
        av_log(NULL, AV_LOG_WARNING, "avfilter_graph_config failed.\n");
        return false;
    }
    return true;
}

//指定编码器的参数，一般与解码器参数保持一致
bool FFmpegThread::init_encode_context(AVMediaType type){
    AVCodecContext* enc_ctx;
    const AVCodec* encoder;
    if(type==AVMEDIA_TYPE_VIDEO){
        vencodec = avcodec_find_encoder(vdecoder_ctx->codec_id);
        vencoder_ctx = avcodec_alloc_context3(vencodec);
        vencoder_ctx->height = vdecoder_ctx->height;
        vencoder_ctx->width = vdecoder_ctx->width;
        vencoder_ctx->sample_aspect_ratio = vdecoder_ctx->sample_aspect_ratio;
        //vencoder_ctx->bit_rate = 3000;
        //vencoder_ctx->thread_count = 3;
        // x264 编码器参数调整
        av_opt_set(vencoder_ctx->priv_data, "bf", "0", 0);//不使用B帧
        av_opt_set(vencoder_ctx->priv_data, "tune", "zerolatency", 0);//0缓存编码，降低压缩率和压缩质量
        av_opt_set(vencoder_ctx->priv_data, "preset", "ultrafast", 0);//快速编码，降低图像质量
        if (vencodec->pix_fmts)
            vencoder_ctx->pix_fmt = vencodec->pix_fmts[0];
        else
            vencoder_ctx->pix_fmt = vdecoder_ctx->pix_fmt;//如果根据codec_id没有找到合适的编码格式，则指定为pix_fmt
        vencoder_ctx->time_base = av_inv_q(vdecoder_ctx->framerate);
        enc_ctx = vencoder_ctx;
        encoder = vencodec;
    }
    else if(type==AVMEDIA_TYPE_AUDIO){
        aencodec = avcodec_find_encoder(adecoder_ctx->codec_id);
        aencoder_ctx = avcodec_alloc_context3(aencodec);
        aencoder_ctx->sample_rate = adecoder_ctx->sample_rate;
        aencoder_ctx->channel_layout = adecoder_ctx->channel_layout;
        aencoder_ctx->channels = av_get_channel_layout_nb_channels(aencoder_ctx->channel_layout);
        /* take first format from list of supported formats */
        aencoder_ctx->sample_fmt = aencodec->sample_fmts[0];
        aencoder_ctx->time_base = { 1, aencoder_ctx->sample_rate };
        enc_ctx = aencoder_ctx;
        encoder = aencodec;
    }
    else
        return false;
    int ret = avcodec_open2(enc_ctx, encoder, NULL);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", type);
        return false;
    }
    return true;
}

bool FFmpegThread::open_output_stream(const char* url,const char* fmt){
    int ret = avformat_alloc_output_context2(&oc, NULL, fmt, url);
    if (oc->oformat->flags & AVFMT_GLOBALHEADER){
        if(vencoder_ctx)
            vencoder_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
        if(aencoder_ctx)
            aencoder_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    }
    if(vencoder_ctx&&avcodec_is_open(vencoder_ctx)!=0){

        video_out_stream = avformat_new_stream(oc, NULL);
        ret = avcodec_parameters_from_context(video_out_stream->codecpar, vencoder_ctx);
        output_vid = video_out_stream->index;
        video_out_stream->time_base = vencoder_ctx->time_base;
    }
    if(aencoder_ctx&&avcodec_is_open(aencoder_ctx)!=0){
        audio_out_stream = avformat_new_stream(oc, NULL);
        ret = avcodec_parameters_from_context(audio_out_stream->codecpar, aencoder_ctx);
        output_aid = audio_out_stream->index;
        audio_out_stream->time_base = aencoder_ctx->time_base;
    }
    if (!(oc->oformat->flags & AVFMT_NOFILE)) {
        //ret = avio_open(&(*oc)->pb, filename, AVIO_FLAG_WRITE);
        ret = avio_open2(&(oc->pb), url, AVIO_FLAG_WRITE, NULL, NULL);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", url);
            return false;
        }
    }
    if (fmt&&strcmp(fmt, "rtsp")==0) {
        av_dict_set(&output_config, "rtsp_transport", "udp", 0);
        av_dict_set(&output_config, "max_delay", "50000", 0);
    }
    ret = avformat_write_header(oc,&output_config);
    if (ret < 0) {
        char buf[1024] = { 0 };
        av_strerror(ret, buf, sizeof(buf) - 1);
        av_log(NULL, AV_LOG_ERROR, "Could not write header for output file %s(incorrect codec parameters ?): %s\n",url,buf);
        return false;
    }
    return false;
}

void FFmpegThread::close_output_stream(){
    if(video_out_stream)
        encode_write_frame(output_vid,NULL);
    if(audio_out_stream)
        encode_write_frame(output_aid,NULL);
    if (oc != nullptr) {
        av_write_trailer(oc);
        if(oc->oformat->flags & AVFMT_NOFILE)
            avio_closep(&oc->pb);
        avformat_free_context(oc);
    }  
}

void FFmpegThread::close_input_stream(){
    if (ic)
        avformat_close_input(&ic);
}

int FFmpegThread::encode_write_frame(const unsigned int stream_index,AVFrame* write_frame){
    if(stream_index!=output_vid&&stream_index!=output_aid)
        return -1;
    AVStream* stream = 0;
    AVCodecContext* context=0;
    if(stream_index==output_vid){
        stream = video_out_stream;
        context = vencoder_ctx;
    }
    else{
        stream = audio_out_stream;
        context = aencoder_ctx;
    }
    int ret = avcodec_send_frame(context, write_frame);
    if (ret < 0)
        return ret;
    if(encoded_packet==nullptr){
        encoded_packet = av_packet_alloc();
    }
    if(!encoded_packet)
        return AVERROR(ENOMEM);
    while (ret >= 0) {
        ret = avcodec_receive_packet(context, encoded_packet);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
            break;
        /* prepare packet for muxing *///准备封装
        encoded_packet->stream_index = stream_index;
        av_packet_rescale_ts(encoded_packet, context->time_base, oc->streams[stream_index]->time_base);
        av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
        /* mux encoded frame */
        ret = av_interleaved_write_frame(oc, encoded_packet);
    }
    return ret;
}

void FFmpegThread::attach_bboxes_side_data_for_frame(int objcount, unsigned int (*boxes)[5]){
    if(objcount>0&&decode_frame){
        AVDetectionBBoxHeader* header = av_detection_bbox_create_side_data(decode_frame,objcount);
        header->nb_bboxes = objcount;
        sprintf(header->source, "%s", "side_data_detection_bboxes");
        for(int i=0;i<objcount;i++){
            AVDetectionBBox* bbox=av_get_detection_bbox(header,i);
            bbox->x=boxes[i][0];
            bbox->y=boxes[i][1];
            bbox->w=boxes[i][2] - boxes[i][0];
            bbox->h=boxes[i][3] - boxes[i][1];
            sprintf(bbox->detect_label, "classID=%d", boxes[i][4]);
            bbox->detect_confidence = {1,1};
            bbox->classify_count = 0;
        }
    }
}

void FFmpegThread::run(){
    raw_packet = av_packet_alloc();
    decode_frame = av_frame_alloc();
    filtered_frame = av_frame_alloc();
    int ret = 0;
    while(!termin){
        int re = av_read_frame(ic, raw_packet);
        if (re != 0)break;
        int index = raw_packet->stream_index;
        AVCodecContext* cc = 0;
        if (ic->streams[index] == vstream) {
            cc = vdecoder_ctx;
            av_packet_rescale_ts(raw_packet, vstream->time_base, av_inv_q(vdecoder_ctx->framerate));
        }
        if (ic->streams[index] == astream){
            cc = adecoder_ctx;
        }
        if(!cc){
            av_packet_unref(raw_packet);
            continue;
        }
        ret = avcodec_send_packet(cc, raw_packet);
        if (ret != 0) {
            av_packet_unref(raw_packet);
            continue;
        }
        for (;;) {
            ret = avcodec_receive_frame(cc, decode_frame);
            if (ret != 0) {
                break;
            }
            if(cc==vdecoder_ctx&&v_filter_update_enable)
                init_filter_context(vstream->index);
            if(cc==adecoder_ctx&&a_filter_update_enable)
                init_filter_context(astream->index);

            FilteringContext* filter =0;
            int write_stream_index = -1;
            if(cc==vdecoder_ctx){
                if(ipc_message!=nullptr&&ipc_mutex!=nullptr&&ipc_mutex->Lock()){
                    //attach_bboxes_side_data_for_frame for rendering bboxes.
                    unsigned int bbox[DEFAULTBOX_NUM][5];
                    unsigned int box_number = DEFAULTBOX_NUM;
                    void* plane = ipc_message->GetBuffer();
                    copy_frame_to_array(decode_frame, (LPDATA_HEADER)plane,bbox,box_number);
                    ipc_mutex->UnLock();
                    if(box_number>0){
                        attach_bboxes_side_data_for_frame(box_number,bbox);
                    }
                }
                ret = av_buffersrc_add_frame_flags(vfilter_ctx->buffersrc_ctx, decode_frame,0);
                filter = vfilter_ctx;
                write_stream_index = output_vid;
            }
            else if(cc==adecoder_ctx) {
                ret = av_buffersrc_add_frame(afilter_ctx->buffersrc_ctx, decode_frame);
                filter = afilter_ctx;
                write_stream_index = output_aid;
            }
            while (1) {
                ret = av_buffersink_get_frame(filter->buffersink_ctx, filtered_frame);
                if(ret>=0&& oc!= nullptr){
                    if(cc==vdecoder_ctx&&filtered_frame->pkt_dts>last_frame_pkt_dts){
                        last_frame_pkt_dts = filtered_frame->pkt_dts;
                        encode_write_frame(write_stream_index,filtered_frame);
                    }
                    else if(cc==adecoder_ctx){
                        encode_write_frame(write_stream_index,filtered_frame);
                    }
                }
                av_frame_unref(filtered_frame);
                if (ret < 0) {
                    if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                        ret = 0;
                    break;
                }
            }
        }
        av_packet_unref(raw_packet);
        av_usleep(100);
    }
}

void FFmpegThread::terminate(){
    termin = true;
    if(worker.joinable())
        worker.join();
    return;
}

void FFmpegThread::start(){
    std::thread* t= new(&worker)std::thread(&FFmpegThread::run,this);
}
