#include "rtspthread.h"

#include "public/public.h"
#include "qdebug.h"
#include "videodecoderthread.h"

#include <QAudioOutput>
#include <QDateTime>
RtspThread::RtspThread(QObject *parent)
    : QThread{parent}
{}


void RtspThread::setUrl(QString strUrl)
{
    m_strUrl = strUrl;
}

void RtspThread::setRecording(bool bRecord)
{
    m_bRecording = bRecord;
}

void RtspThread::quit()
{
    m_bRun = false;
}

void RtspThread::run()
{
    int  nVideoIndex = -1;
    int  nAudioIndex = -1;
    AVFormatContext * m_pInFmtCtx = avformat_alloc_context();
    m_pInFmtCtx->max_analyze_duration=50;
    AVPacket        * pPacket = NULL;
    AVCodecContext * pVideoCodecCtx = NULL;
    AVCodecContext * pAudioCodecCtx = NULL;
    //视频界面太耗时间，把它分出去一个单独线程，音频需要解码后重新编码，所以在这个线程中去做
    VideoDecoderThread  * pVideoThread = NULL;
    struct SwrContext *pSwrCtx = NULL;
    QAudioOutput * pAudioOutput = NULL;
    QIODevice * pAudioDevice = NULL;
    uint8_t * pAudioBuf= NULL;
    int nAudioBufLen = 0;
    AVFrame  * pAudioFrame = NULL;
    QString strRecordName = "";

    //录制使用变量
    AVFormatContext *ofmt=NULL;
    AVCodecContext  *out_video_ctx=NULL,*out_audio_ctx=NULL;
    AVStream		*o_audio_stream=NULL,*o_video_stream=NULL;
    AVPacket         pktV;
    AVAudioFifo		*fifo=NULL;
    const AVOutputFormat  *fmt=NULL;
    int				size=0;
    int output_frame_size = 0;
    int64_t nStartRecordDts = 0;
    int nVideoRecordIndex = 0;
    int				sps_pps_size=0;
    QString strRecordPath = "";
    int nAudioPts = 0;
    bool bMute = false;
    m_bRecording= false;
    m_bRun = true;
    AVDictionary *optionsDict = NULL;
    if(m_strUrl.length() > 0)
    {
        emit connecting();
        av_dict_set(&optionsDict, "rtsp_transport", "udp", 0);
        av_dict_set(&optionsDict, "tune", "zerolatency", 0);
        av_dict_set(&optionsDict, "stimeout", "2000000", 0);
        av_dict_set(&optionsDict, "buffer_size", "10240000", 0);
        av_dict_set(&optionsDict, "max_delay", "5000000", 0);
        av_dict_set(&optionsDict, "fflags", "nobuffer", 0);
        if(avformat_open_input(&m_pInFmtCtx, m_strUrl.toStdString().c_str(), NULL, &optionsDict) != 0)
        {
            qDebug()<<"RTSP:打开URL视频失败！"+m_strUrl;
            emit connectFail();
            goto cleanup;
        }

        //降低延迟操作：减少探测的时间
        m_pInFmtCtx->probesize = 100 * 1024;
        m_pInFmtCtx->max_analyze_duration = (int64_t)5000 * AV_TIME_BASE;

        if(avformat_find_stream_info(m_pInFmtCtx, NULL) < 0)
        {
            qDebug()<<"RTSP:查找流失败！"+m_strUrl;
            emit connectFail();
            goto cleanup;
        }

        for(unsigned int i = 0; i < m_pInFmtCtx->nb_streams; i++)
        {
            if(m_pInFmtCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
            {
                nVideoIndex = i;
                enum AVPixelFormat hw_pix_fmt = AV_PIX_FMT_NONE;
                pVideoCodecCtx = openDecoder(m_pInFmtCtx->streams[i]->codecpar,hw_pix_fmt);
                if(pVideoCodecCtx)
                {
                    if(pVideoThread == NULL)
                    {
                        pVideoThread = new VideoDecoderThread(parent(),pVideoCodecCtx,av_q2d(m_pInFmtCtx->streams[i]->time_base),hw_pix_fmt);
                        connect(pVideoThread,&VideoDecoderThread::updateFrame,this,[this](QImage *img){
                            emit updateFrame(img);},Qt::DirectConnection);
                        pVideoThread->start();
                    }
                }
                else
                {
                    qDebug()<<"RTSP:打开解码器失败 "+m_strUrl;
                    emit connectFail();
                    goto cleanup;
                }

            }
            if(m_pInFmtCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
            {
                nAudioIndex = i;
                enum AVPixelFormat hw_pix_fmt = AV_PIX_FMT_NONE;
                pAudioCodecCtx = openDecoder(m_pInFmtCtx->streams[i]->codecpar,hw_pix_fmt);
            }
        }
        if(nVideoIndex <0)
        {
            qDebug()<<"RTSP:连接成功，但没有找到视频流"+m_strUrl;
            emit connectFail();
            goto cleanup;
        }
        if(pPacket == NULL)
            pPacket = av_packet_alloc();
        if(av_new_packet(pPacket, pVideoCodecCtx->width * pVideoCodecCtx->height) != 0)
        {
            emit connectFail();
            goto cleanup;
        }

        int nFail = 0;
        emit connectSucc();
        while (m_bRun)
        {
            if(av_read_frame(m_pInFmtCtx, pPacket) >= 0)
            {
                if(m_bRecording && strRecordName.length() == 0 &&pVideoCodecCtx->width>0 && pVideoCodecCtx->height>0)
                {
                    //QDateTime t = QDateTime::currentDateTime();
                    strRecordName=getVideoPath() + QDateTime::currentDateTime().toString("yyyyMMddhhmmss")+".mp4";
                    avformat_alloc_output_context2(&ofmt,NULL,NULL,strRecordName.toLatin1().data());
                    fmt=ofmt->oformat;
                    o_video_stream=AddVideoStream(ofmt,&out_video_ctx,m_pInFmtCtx,nVideoIndex,false,false,pVideoCodecCtx->width , pVideoCodecCtx->height);
                    if(!o_video_stream)
                    {
                        emit connectFail();
                        goto cleanup;
                    }
                    sps_pps_size=m_pInFmtCtx->streams[nVideoIndex]->codecpar->extradata_size;

                    for(int i=0;i<sps_pps_size;i++)
                        o_video_stream->codecpar->extradata[i]=m_pInFmtCtx->streams[nVideoIndex]->codecpar->extradata[i];
                    o_video_stream->codecpar->extradata_size=sps_pps_size;

                    ofmt->avoid_negative_ts = 0;

                    if (nAudioIndex > 0)
                    {
                        o_audio_stream=AddAudioStream(ofmt,&out_audio_ctx,m_pInFmtCtx,nAudioIndex,false,true);
                        if(!o_audio_stream)
                        {
                            emit connectFail();
                            goto cleanup;
                        }

                        fifo = av_audio_fifo_alloc(out_audio_ctx->sample_fmt, out_audio_ctx->channels, 1);
                        if (!fifo)
                        {
                            emit connectFail();
                            goto cleanup;
                        }
                        output_frame_size = out_audio_ctx->frame_size;
                    }

                    if(!(fmt->flags&AVFMT_NOFILE))
                    {
                        if(avio_open(&ofmt->pb,strRecordName.toLatin1().data(),AVIO_FLAG_WRITE)<0)
                        {
                            emit connectFail();
                            goto cleanup;
                        }
                    }
                    if(avformat_write_header(ofmt,NULL)<0)
                    {
                        emit connectFail();
                        goto cleanup;
                    }
                    nStartRecordDts = pPacket->dts;
                    nVideoRecordIndex = 0;
                    nAudioPts = 0;
                }

                if(pPacket->stream_index == nVideoIndex && pVideoThread != NULL)
                {
                    pVideoThread->addPacket(pPacket);
                    if(strRecordName.length() > 0)
                    {
                        size=pPacket->size;
                        av_new_packet(&pktV,size);
                        memset(pktV.data,0,size);
                        memcpy(pktV.data,pPacket->data,size);
                        pktV.flags=pPacket->flags;
                        if(pPacket->dts == 0 && pPacket->pts == 0)
                            pktV.pts = pktV.dts = nVideoRecordIndex*3600;
                        else
                            pktV.pts = pktV.dts = pPacket->dts - nStartRecordDts;
                        pktV.duration = 3600;
                        pktV.stream_index=nVideoIndex;
                        av_interleaved_write_frame(ofmt, &pktV);
                        nVideoRecordIndex++;
                        av_packet_unref(&pktV);
                    }
                }
                if(pPacket->stream_index == nAudioIndex && pAudioCodecCtx)
                {
                    avcodec_send_packet(pAudioCodecCtx,pPacket);
                    if(pAudioFrame == NULL)
                        pAudioFrame = av_frame_alloc();
                    if (avcodec_receive_frame(pAudioCodecCtx,pAudioFrame) == 0)
                    {
                        //setPts(pAudioFrame->pts*av_q2d(m_pInFmtCtx->streams[nAudioIndex]->time_base));
                        //setPts(pAudioFrame->pts * m_pInFmtCtx->streams[nAudioIndex]->time_base.num * 1000/m_pInFmtCtx->streams[nAudioIndex]->time_base.den);
                        // if(strRecordName.length() > 0)
                        // {
                        //     if(AudioResampler(pAudioCodecCtx,out_audio_ctx,pAudioFrame,fifo)<0)
                        //     {
                        //         emit connectFail();
                        //         goto cleanup;
                        //     }

                        //     if(av_audio_fifo_size(fifo) >= output_frame_size)
                        //     {
                        //         if(LoadEncodeAndWrite(fifo,ofmt,o_audio_stream,out_audio_ctx,nAudioIndex,m_pInFmtCtx,nAudioPts)<0)
                        //         {
                        //             emit connectFail();
                        //             goto cleanup;
                        //         }
                        //     }
                        // }
                        if(!bMute){
                            if(pSwrCtx == NULL)
                            {
                                pSwrCtx = swr_alloc();
                                swr_alloc_set_opts(pSwrCtx,
                                                   av_get_default_channel_layout(pAudioCodecCtx->channels),
                                                   AV_SAMPLE_FMT_S16,
                                                   pAudioCodecCtx->sample_rate,
                                                   pAudioCodecCtx->channels,
                                                   pAudioCodecCtx->sample_fmt,
                                                   pAudioCodecCtx->sample_rate,
                                                   0, nullptr);
                                swr_init(pSwrCtx);
                            }
                            if(pAudioOutput == NULL)
                            {
                                QAudioFormat audioFormat;
                                audioFormat.setSampleRate(pAudioCodecCtx->sample_rate);
                                audioFormat.setChannelCount(pAudioCodecCtx->channels);
                                audioFormat.setSampleSize(8*av_get_bytes_per_sample(AV_SAMPLE_FMT_S16));
                                audioFormat.setByteOrder(QAudioFormat::LittleEndian);
                                audioFormat.setSampleType(QAudioFormat::UnSignedInt);//设置音频类型
                                audioFormat.setCodec("audio/pcm");
                                pAudioOutput = new QAudioOutput(audioFormat);
                                pAudioDevice = pAudioOutput->start();
                            }
                            if(pAudioBuf == NULL)
                            {
                                nAudioBufLen = av_samples_get_buffer_size(NULL, pAudioCodecCtx->channels,
                                                                          pAudioFrame->nb_samples,
                                                                          AV_SAMPLE_FMT_S16,
                                                                          0);
                                pAudioBuf = (uint8_t*)av_malloc(nAudioBufLen * sizeof(uint8_t));
                            }
                            int len = swr_convert(pSwrCtx,
                                                  &pAudioBuf,  nAudioBufLen,
                                                  (const uint8_t **)pAudioFrame->data, pAudioFrame->nb_samples);

                            if(len > 0)
                            {
                                pAudioDevice->write((const char*)pAudioBuf, nAudioBufLen);
                            }
                        }
                    }
                }
                nFail = 0;
                av_packet_unref(pPacket);
                if(!m_bRecording && strRecordName.length() > 0)
                {
                    av_write_trailer(ofmt);
                    emit recordEnd(strRecordName);
                    if(ofmt){if(!(fmt->flags & AVFMT_NOFILE))avio_close(ofmt->pb);avformat_free_context(ofmt);ofmt = NULL;}
                    strRecordName = "";
                }
            }
            else
            {
                QThread::msleep(10);
                nFail ++;
            }
            if(nFail > 5)
            {
                emit connectFail();
                goto cleanup;
            }
        }
    }

cleanup:
    if(strRecordName.length() > 0){  av_write_trailer(ofmt);  emit recordEnd(strRecordName); strRecordName = "";}    \
        if(out_audio_ctx){avcodec_close(out_audio_ctx);avcodec_free_context(&out_audio_ctx);out_audio_ctx = NULL;}\
        if(fifo){av_audio_fifo_free(fifo);fifo = NULL;}\
        if(out_video_ctx){avcodec_free_context(&out_video_ctx);out_video_ctx = NULL;}\
        if(ofmt){if(!(fmt->flags & AVFMT_NOFILE))avio_close(ofmt->pb);avformat_free_context(ofmt);ofmt = NULL;} \
        if(pAudioBuf){ av_free(pAudioBuf);pAudioBuf = NULL;}                                                            \
        if(pSwrCtx){ swr_free(&pSwrCtx);pSwrCtx = NULL;}                                                                \
        if(pAudioDevice){ pAudioDevice->close();pAudioDevice = NULL;}                                                  \
        if(pAudioOutput){ pAudioOutput->stop();pAudioOutput->deleteLater();pAudioOutput = NULL;}                                                 \
        if(pAudioFrame){  av_frame_free(&pAudioFrame); pAudioFrame = NULL;}                                                  \
        if(pPacket) { av_packet_free(&pPacket); pPacket = NULL;}                                                     \
        if(pVideoThread){ pVideoThread->quit();pVideoThread->deleteLater();pVideoThread = NULL;}                                                     \
        if(pVideoCodecCtx){avcodec_close(pVideoCodecCtx);avcodec_free_context(&pVideoCodecCtx);pVideoCodecCtx=NULL;}                                                     \
        if(pAudioCodecCtx){avcodec_close(pAudioCodecCtx);avcodec_free_context(&pAudioCodecCtx);pAudioCodecCtx=NULL;}              \
        av_dict_free(&optionsDict);  \
        if(m_pInFmtCtx){avformat_close_input(&m_pInFmtCtx); avformat_free_context(m_pInFmtCtx);m_pInFmtCtx = NULL;}

}


AVCodecContext *RtspThread::openDecoder(AVCodecParameters *codecpar,AVPixelFormat & fmt)
{
    AVCodecContext * pCodecCtx = NULL;
    const AVCodec * pCodec = NULL;
    if(pCodec == NULL)
        pCodec = avcodec_find_decoder(codecpar->codec_id);
    if(pCodec)
    {
        pCodecCtx = avcodec_alloc_context3(pCodec);
        avcodec_parameters_to_context(pCodecCtx, codecpar);
        AVBufferRef *hw_device_ctx = NULL;
        int type = AV_HWDEVICE_TYPE_VDPAU;
        while (type <= AV_HWDEVICE_TYPE_RKMPP)
        {
            if(av_hwdevice_ctx_create(&hw_device_ctx, (AVHWDeviceType)type, NULL, NULL, 0) == 0)
            {
                pCodecCtx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
                av_buffer_unref(&hw_device_ctx);
                if(pCodecCtx->hw_device_ctx)
                    break;
            }
            type ++;
        }
        avcodec_open2(pCodecCtx, pCodec, NULL);
        for (int i = 0;; i++) {
            //获取到该解码器codec的硬件属性，比如可以支持的目标像素格式等
            const AVCodecHWConfig *config = avcodec_get_hw_config(pCodec, i);
            if (!config) {
                break;
            }
            if (config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX &&config->device_type == type) {
                fmt = config->pix_fmt;
                break;
            }
        }
    }
    return pCodecCtx;
}


AVStream *RtspThread::AddVideoStream(AVFormatContext *oc, AVCodecContext **pOutCodecCtx, AVFormatContext *ic, int index, bool mark, bool open,int nW,int nH)
{
    const AVCodec   *video_codec=NULL;
    AVStream  *VideoSt=NULL;

    video_codec=avcodec_find_encoder(ic->streams[index]->codecpar->codec_id);
    if(!video_codec)
        return NULL;
    VideoSt=avformat_new_stream(oc,video_codec);
    if (!VideoSt)
    {
        return NULL;
    }
    *pOutCodecCtx=avcodec_alloc_context3(video_codec);
    if(!(*pOutCodecCtx))
        return NULL;

    if(avcodec_parameters_copy(VideoSt->codecpar,ic->streams[index]->codecpar)<0)
        return NULL;
    VideoSt->codecpar->width = nW;
    VideoSt->codecpar->height = nH;
    if(avcodec_parameters_to_context(*pOutCodecCtx,VideoSt->codecpar)<0)
        return NULL;

    if(ic->streams[index]->avg_frame_rate.den==0)
        (*pOutCodecCtx)->time_base.den=25;
    else
        (*pOutCodecCtx)->time_base.den = av_q2d(ic->streams[index]->avg_frame_rate);//帧率： 30
    VideoSt->time_base.num=1;
    VideoSt->time_base.den=ic->streams[index]->time_base.den;

    if(!mark)
    {
        if(oc->oformat->flags&AVFMT_GLOBALHEADER)
            (*pOutCodecCtx)->flags|=AV_CODEC_FLAG_GLOBAL_HEADER;
    }

    if(open)
    {
        if(avcodec_open2(*pOutCodecCtx,video_codec,NULL)<0)
            return NULL;
    }


    return VideoSt;
}

AVStream *RtspThread::AddAudioStream(AVFormatContext *oc, AVCodecContext **pOutCodecCtx, AVFormatContext *ic, int index, bool mark, bool open)
{
    AVStream *AudioSt=NULL;
    const AVCodec *audio_codec=NULL;
    audio_codec=avcodec_find_encoder(AV_CODEC_ID_AAC);
    if(!audio_codec)
        return NULL;

    AudioSt=avformat_new_stream(oc,audio_codec);
    if (!AudioSt)
        return NULL;

    *pOutCodecCtx=avcodec_alloc_context3(audio_codec);
    if(!(*pOutCodecCtx))
        return NULL;

    (*pOutCodecCtx)->channels=2;
    (*pOutCodecCtx)->channel_layout=av_get_default_channel_layout((*pOutCodecCtx)->channels);
    (*pOutCodecCtx)->sample_rate=ic->streams[index]->codecpar->sample_rate;
    (*pOutCodecCtx)->sample_fmt=audio_codec->sample_fmts[0];
    (*pOutCodecCtx)->bit_rate=64000;
    (*pOutCodecCtx)->codec_id = AV_CODEC_ID_AAC;
    (*pOutCodecCtx)->codec_type = AVMEDIA_TYPE_AUDIO;

    (*pOutCodecCtx)->strict_std_compliance=FF_COMPLIANCE_EXPERIMENTAL;

    (*pOutCodecCtx)->time_base.num=1;
    (*pOutCodecCtx)->time_base.den=ic->streams[index]->codecpar->sample_rate;

    AudioSt->time_base.num=1;
    AudioSt->time_base.den=ic->streams[index]->codecpar->sample_rate;//是否变化，注意测试   时基在发生改变

    if(!mark)
    {
        if(oc->oformat->flags&AVFMT_GLOBALHEADER)
            (*pOutCodecCtx)->flags|=AV_CODEC_FLAG_GLOBAL_HEADER;
    }
    if(open)
    {
        if(avcodec_open2(*pOutCodecCtx,audio_codec,NULL)<0)
            return NULL;
    }
    if(avcodec_parameters_from_context(AudioSt->codecpar,(*pOutCodecCtx))<0)
        return NULL;

    return AudioSt;
}


int RtspThread::InitConvertedSamples(uint8_t ***converted_input_samples, AVCodecContext *output_codec_context, int frame_size)
{
    if (!(*converted_input_samples =(uint8_t **)calloc(output_codec_context->channels,sizeof(**converted_input_samples))))
        return -1;

    if (av_samples_alloc(*converted_input_samples, NULL,output_codec_context->channels,frame_size,output_codec_context->sample_fmt, 0) < 0)
    {
        av_freep(&(*converted_input_samples)[0]);
        free(*converted_input_samples);
        return -1;
    }
    return 0;
}

int RtspThread::ConvertSamples(const uint8_t **input_data, uint8_t **converted_data, const int frame_size, SwrContext *resample_context)
{
    if (swr_convert(resample_context,converted_data, frame_size,input_data, frame_size) < 0)
    {
        return -1;
    }
    return 0;
}

int RtspThread::AddSamplesToFifo(AVAudioFifo *fifo, uint8_t **converted_input_samples, const int frame_size)
{
    if (av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + frame_size) < 0)
        return -1;

    if (av_audio_fifo_write(fifo, (void **)converted_input_samples,frame_size) < frame_size)
        return -1;
    return 0;
}

int RtspThread::AudioResampler(AVCodecContext *input_codec_context, AVCodecContext *output_codec_context, AVFrame *input_frame, AVAudioFifo *fifo_buffer)
{
    SwrContext *resample_context=NULL;
    uint8_t **converted_input_samples = NULL;
    int ret=-1;
    //const int frame_size=input_frame->nb_samples;
    //初始化
    resample_context = swr_alloc_set_opts(NULL,
                                          av_get_default_channel_layout(output_codec_context->channels),	//输出通道格式
                                          output_codec_context->sample_fmt,								//输出采样格式
                                          output_codec_context->sample_rate,								//输出采样率
                                          av_get_default_channel_layout(input_codec_context->channels),	//输入通道格式
                                          input_codec_context->sample_fmt,									//输入采样格式
                                          input_codec_context->sample_rate,								//输入采样率
                                          0, NULL);
    if (!resample_context||swr_init(resample_context) < 0)
        return -1;

    ret=InitConvertedSamples(&converted_input_samples, output_codec_context, input_frame->nb_samples);
    if(ret<0)
        goto cleanup;

    ret=ConvertSamples((const uint8_t**)input_frame->extended_data, converted_input_samples,input_frame->nb_samples, resample_context);
    if(ret<0)
        goto cleanup;

    ret=AddSamplesToFifo(fifo_buffer, converted_input_samples, input_frame->nb_samples);
    if(ret<0)
        goto cleanup;

    ret=0;
cleanup:
    if (converted_input_samples)
    {
        av_freep(&converted_input_samples[0]);
        free(converted_input_samples);
    }
    swr_free(&resample_context);
    return ret;
}

int RtspThread::InitOutputFrame(AVFrame **frame, AVCodecContext *output_codec_context, int frame_size)
{
    if (!(*frame = av_frame_alloc()))
        return -1;

    (*frame)->nb_samples     = frame_size;
    (*frame)->channel_layout = output_codec_context->channel_layout;
    (*frame)->format         = output_codec_context->sample_fmt;
    (*frame)->sample_rate    = output_codec_context->sample_rate;

    if (av_frame_get_buffer(*frame, 0) < 0)
    {
        av_frame_free(frame);
        return -1;
    }
    return 0;
}


int RtspThread::EncodeAudioFrameRtsp(AVFrame *frame,AVFormatContext *oc,AVStream *audio_st,AVCodecContext *out_codec_ctx,int index,AVFormatContext *ic)
{
    int ret=-1;
    AVPacket output_packet;
    av_init_packet(&output_packet);
    output_packet.data=NULL;
    output_packet.size=0;
    ret=avcodec_send_frame(out_codec_ctx, frame);
    if (ret == AVERROR_EOF)
    {
        ret=0;
        goto end;
    }else if(ret<0)
    {
        av_packet_unref(&output_packet);
        return -1;
    }

    ret=avcodec_receive_packet(out_codec_ctx,&output_packet);
    if (ret==AVERROR(EAGAIN))
    {
        ret=0;
        goto end;
    }else if(ret==AVERROR_EOF)
    {
        ret=0;
        goto end;
    }else if(ret<0)
    {
        av_packet_unref(&output_packet);
        return -1;
    }

    output_packet.pts=frame->pts;
    output_packet.pts = av_rescale_q_rnd(output_packet.pts, ic->streams[index]->time_base, audio_st->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
    output_packet.dts=output_packet.pts;
    output_packet.duration = av_rescale_q(output_packet.duration, ic->streams[index]->time_base, audio_st->time_base);
    output_packet.stream_index=index;
    if (av_interleaved_write_frame(oc, &output_packet)< 0)
    {
        av_packet_unref(&output_packet);
        return -1;
    }

end:
    av_packet_unref(&output_packet);
    if(ret<0)
        return ret;
    else
        return 0;
}

int RtspThread::LoadEncodeAndWrite(AVAudioFifo *fifo,AVFormatContext *oc,AVStream *audio_st,AVCodecContext *out_codec_ctx,int index,AVFormatContext *ic,int &pts)
{
    AVFrame *output_frame;
    int ret=-1;
    const int frame_size = FFMIN(av_audio_fifo_size(fifo),out_codec_ctx->frame_size);
    ret=InitOutputFrame(&output_frame, out_codec_ctx, frame_size);
    if(ret<0)
        return ret;

    if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size)
    {
        av_frame_free(&output_frame);
        return -1;
    }
    output_frame->pts = pts;
    pts += output_frame->nb_samples;
    ret=EncodeAudioFrameRtsp(output_frame,oc,audio_st,out_codec_ctx,index,ic);
    if(ret<0)
    {
        av_frame_free(&output_frame);
        return ret;
    }

    av_frame_free(&output_frame);
    return 0;
}
