#include <android/log.h>
#include "decoder.h"


CMyDecoder::CMyDecoder()
{
    m_pFormatCtx = NULL;
    m_streamIdx = -1;
    m_streamIdx_a=-1;
    m_pCodecCtx = NULL;
    m_pCodecCtx_a = NULL;
    m_pavCodecParameters = NULL;
    m_pavCodecParameters_a=NULL;
    m_pCodec = NULL;
    m_pCodec_a = NULL;
    m_pFrame = NULL;
    m_packet = NULL;

    //size buffer
    m_pout_buffer = NULL;
    m_pimg_convert_ctx = NULL;

    m_nFrameW = -1;
    m_nFrameH = -1;
    m_audio_convert_ctx = NULL;
    m_pout_buffer = NULL;
    m_pimg_convert_ctx = NULL;
}

CMyDecoder::~CMyDecoder()
{
    av_frame_free(&m_pFrame);
    avcodec_close(m_pCodecCtx);
    avformat_close_input(&m_pFormatCtx);
    sws_freeContext(m_pimg_convert_ctx);
}

int  CMyDecoder::OpenFile(std::string mediaUri)
{
    av_register_all();//注册所有支持的文件格式以及编解码器

    //分配一个AVFormatContext
    m_pFormatCtx = avformat_alloc_context();

    AVDictionary* options = NULL;
    av_dict_set(&options, "buffer_size", "102400", 0); //设置缓存大小，1080p可将值调大
    av_dict_set(&options, "rtsp_transport", "tcp", 0); //以udp方式打开，如果以tcp方式打开将udp替换为tcp
    av_dict_set(&options, "stimeout", "2000000", 0); //设置超时断开连接时间，单位微秒
    av_dict_set(&options, "max_delay", "500000", 0); //设置最大时延

    //判断文件流是否能打开
    int ret = avformat_open_input(&m_pFormatCtx, mediaUri.c_str(), NULL, &options);
    if(ret != 0){
        LOGE("Couldn't open input stream! ret=%d\n",ret);
        return -1;
    }
    //判断能够找到文件流信息
    if(avformat_find_stream_info(m_pFormatCtx, NULL) < 0){
        LOGE("couldn't find open stream information !\n");
        return -1;
    }
    //打印文件信息
    av_dump_format(m_pFormatCtx, -1, mediaUri.c_str(), 0);
    m_streamIdx=-1;
    for(int i=0; i<m_pFormatCtx->nb_streams; i++)
    {
        //新版本ffmpeg将AVCodecContext *codec替换成*codecpar
        if(m_pFormatCtx->streams[i]->codecpar->codec_type==AVMEDIA_TYPE_VIDEO) {
            m_streamIdx=i;
        }
        else if(m_pFormatCtx->streams[i]->codecpar->codec_type==AVMEDIA_TYPE_AUDIO)
        {
            m_streamIdx_a=i;
        }
    }

    if(m_streamIdx == -1){
        LOGE("Couldn't find a video stream !\n");
        return -1;
    }

    // Get a pointer to the codec context for the video stream
    m_pavCodecParameters = m_pFormatCtx->streams[m_streamIdx]->codecpar;

    if(m_streamIdx_a!=-1)
    {
        m_pavCodecParameters_a = m_pFormatCtx->streams[m_streamIdx_a]->codecpar;

        if(m_pavCodecParameters_a!=NULL)
            m_pCodec_a=avcodec_find_decoder(m_pavCodecParameters_a->codec_id);
    }

    LOGI("0000000");

    // Find the decoder for the video stream
    if(m_pavCodecParameters->codec_id == AV_CODEC_ID_H264 && 0!=mediaUri.compare(0,4,"rtsp"))
    {
        m_pCodec=avcodec_find_decoder_by_name("h264_mediacodec");
        LOGI("use decoder h264_mediacodec");
    } else
    {
        m_pCodec=avcodec_find_decoder(m_pavCodecParameters->codec_id);
        LOGI("use decoder h264_codec");
    }

    if(m_pCodec){
        LOGI("---- find video decoder: %d", m_pavCodecParameters->codec_id);
    } else
    {
        LOGI("find decoder error");
    }

    if(m_pCodec_a){
        LOGI("---- find audio decoder: %d", m_pavCodecParameters->codec_id);
        m_pCodecCtx_a = avcodec_alloc_context3(m_pCodec_a);
    } else
    {
        LOGI("find audio decoder error");
    }

    //alloc a codecContext
    m_pCodecCtx = avcodec_alloc_context3(m_pCodec);
    LOGI("1111111111111111111111111111");
    //transform
    if(avcodec_parameters_to_context(m_pCodecCtx,m_pavCodecParameters) < 0){
        LOGE("v copy the codec parameters to context fail!");
        return -1;
    }

    //transform
    if(m_pCodecCtx_a!=NULL && avcodec_parameters_to_context(m_pCodecCtx_a,m_pavCodecParameters_a) < 0){
        LOGE("a copy the codec parameters to context fail!");
        return -1;
    }
    LOGI("2222222222222222222222222222222");
    //打开codec
    int errorCode = avcodec_open2(m_pCodecCtx, m_pCodec, NULL);
    if(errorCode < 0){
        LOGE("Unable to open codec! errorCode=%d\n",errorCode);
        return errorCode;
    };

    if(m_pCodecCtx_a!=NULL)
        errorCode = avcodec_open2(m_pCodecCtx_a, m_pCodec_a, NULL);

    if(errorCode < 0){
        LOGE("Unable to open codec! errorCode=%d\n",errorCode);
        return errorCode;
    };

    //alloc frame of ffmpeg decode
    m_pFrame = av_frame_alloc();

    if(m_pFrame == NULL){
        LOGE("Unable to allocate an AVFrame!\n");
        return -1;
    }

    //decode packet
    m_packet = av_packet_alloc();

    LOGI("av_image_get_buffer_size w=%d h=%d",m_pCodecCtx->width, m_pCodecCtx->height);
    m_nFrameW = m_pCodecCtx->width;
    m_nFrameH = m_pCodecCtx->height;
    return 0;
}

int CMyDecoder::GetFrameWH(int &nW,int &nH)
{
    if(m_nFrameW<=0 || m_nFrameH<=0)
    {
        return  -1;
    }

    nW = m_nFrameW;
    nH = m_nFrameH;
    return 0;
}

int CMyDecoder::InitSws(AVFrame *pFrameOut)
{
    enum AVPixelFormat pixel_fmt = AV_PIX_FMT_YUV420P;
    m_pout_buffer = (uint8_t*)av_malloc((size_t) av_image_get_buffer_size(pixel_fmt,m_nFrameW,m_nFrameH,1));
    av_image_fill_arrays(pFrameOut->data,pFrameOut->linesize, m_pout_buffer,
                         pixel_fmt,m_nFrameW,m_nFrameH,1);

    //transform size and format of CodecCtx,立方插值
    m_pimg_convert_ctx = sws_getContext(m_nFrameW, m_nFrameH, m_pCodecCtx->pix_fmt,
                                     m_nFrameW, m_nFrameH, pixel_fmt,
                                     SWS_BICUBIC, NULL,
                                     NULL, NULL);
    return 0;
}

int CMyDecoder::GetFrameData(AVFrame *pFrameOut,AVFrame *pFrameOut_a,enMediaType &enMT,double &pts)
{
    int nRet = av_read_frame(m_pFormatCtx, m_packet);
    if( nRet < 0)
    {
        LOGI("av_read_frame  error......nRet=%d",nRet);
        return -1;
    }
    // Is this a packet from the video stream?
    if (m_packet->stream_index == m_streamIdx) {
        //decoder allocate frame to pFrame,new api
        int getPacketCode = 0;
        int getFrameCode = 0;
#if 0//解码方式1
        int getPacketCode = avcodec_send_packet(pCodecCtx, packet);
            if(getPacketCode == 0) {
                int getFrameCode = avcodec_receive_frame(pCodecCtx, pFrame);
                LOGI("%d", getFrameCode);
                // Did we get a video frame?
                if (getFrameCode == 0) {
#else
        if (getPacketCode == 0) {
            int nGotPicture = 0;
            int nLen = avcodec_decode_video2(m_pCodecCtx, m_pFrame, &nGotPicture, m_packet);
            if (nLen < 0) {
                return 0;
            }

            if (nGotPicture) {
#endif
                //scale Frame
                sws_scale(m_pimg_convert_ctx, (const uint8_t *const *) m_pFrame->data,
                          m_pFrame->linesize, 0, m_pFrame->height,
                          pFrameOut->data, pFrameOut->linesize);

                pts = m_pFrame->pkt_pts * av_q2d(m_pFormatCtx->streams[m_streamIdx]->time_base);
                //LOGI("v ----------------------------  pts=%f",pts);

                enMT = enMT_VIDEO;
                return m_pFrame->height * m_pFrame->width * 3 / 2;
            }
        } else if (getFrameCode == AVERROR(EAGAIN)) {
            LOGE("%s", "Frame is not available right now,please try another input");
        } else if (getFrameCode == AVERROR_EOF) {
            LOGE("%s", "the decoder has been fully flushed");
        } else if (getFrameCode == AVERROR(EINVAL)) {
            LOGE("%s", "codec not opened, or it is an encoder");
        } else {
            LOGI("%s", "legitimate decoding errors");
        }
    }
    else//audio
    {
        bool bGo = true;
        int nGotPicture = 0;
        AVRational time_base={1,m_pCodecCtx_a->sample_rate};
        AVRational TimeBase = {1, AV_TIME_BASE};
        enMT = enMT_AUDIO;
#if USE_SWR_AUDIO
        int nLen = avcodec_decode_audio4(m_pCodecCtx_a,pFrameOut_a,&nGotPicture,m_packet);
#else
        int nLen = avcodec_decode_audio4(m_pCodecCtx_a,decframe,&nGotPicture,&m_packet);
#endif
        if (nLen<0)
        {
            bGo = false;
        }

        if (nGotPicture && bGo)
        {
            m_pFrame->pts = pFrameOut->pkt_pts;
            pts = pFrameOut_a->pkt_pts * av_q2d(m_pFormatCtx->streams[m_streamIdx_a]->time_base);
            //LOGI("a ----------------------------  pts=%f",pts);
            //成功得到一帧数据
            av_packet_unref(m_packet);
            return nLen;
        }

        av_packet_unref(m_packet);
    }

    av_packet_unref(m_packet);
    return  0;
}

void CMyDecoder::GetAudioData(AVFrame *pFrame, unsigned char *pData, int &nDataLen)
{
    if(m_audio_convert_ctx==NULL)
    {
        return;
    }
    uint64_t out_channel_layout = av_get_default_channel_layout(m_tagSwrInfo.tagAInfo.nChannels);
    AVSampleFormat out_sample_fmt=AV_SAMPLE_FMT_S16;
    int out_sample_rate=m_tagSwrInfo.tagAInfo.nResample;
    int out_nb_samples = pFrame->nb_samples; //m_pCodecCtx_a->frame_size;

    nDataLen=av_samples_get_buffer_size(NULL,m_tagSwrInfo.tagAInfo.nChannels ,out_nb_samples,out_sample_fmt, 1);

    swr_convert(m_audio_convert_ctx,&pData, MAX_AUDIO_FRAME_SIZE,(const uint8_t **)pFrame->data , pFrame->nb_samples);
}

int CMyDecoder::InitFilters(const char *filters_descr)
{
    if (m_pCodecCtx_a==NULL || m_tagFilterInfo.bEnableAudio==false)
    {
        return -1;
    }

    AVCodecContext *pCtx = m_pCodecCtx_a;

    char args[512];
    int ret = 0;
    AVFilter *abuffersrc  = avfilter_get_by_name("abuffer");
    AVFilter *abuffersink = avfilter_get_by_name("abuffersink");//avfilter_get_by_name("abuffersink");
    AVFilterInOut *outputs = avfilter_inout_alloc();
    AVFilterInOut *inputs  = avfilter_inout_alloc();
    //static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
    //static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_STEREO, -1 };
    //static const int out_sample_rates[] = { pCtx->sample_rate, -1 };
    static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
    static const int64_t out_channel_layouts[] = { m_tagFilterInfo.tagAInfo.nChannel_layout, -1 };
    //static const int out_sample_rates[] = { pCtx->sample_rate, -1 };
    static const int out_sample_rates[] = { m_tagFilterInfo.tagAInfo.nResample, -1 };

    const AVFilterLink *outlink;
    //AVRational time_base = m_fmt_ctx->streams[audio_stream_index]->time_base;

    m_filter_graph = avfilter_graph_alloc();
    if (!outputs || !inputs || !m_filter_graph) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    /* buffer audio source: the decoded frames from the decoder will be inserted here. */
    if (!pCtx->channel_layout)
        pCtx->channel_layout = av_get_default_channel_layout(pCtx->channels);
    sprintf(args,
            "time_base=1/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%x",
            pCtx->sample_rate,pCtx->sample_rate,av_get_sample_fmt_name(pCtx->sample_fmt), pCtx->channel_layout);

    ret = avfilter_graph_create_filter(&m_buffersrc_ctx, abuffersrc, "in",
                                       args, NULL, m_filter_graph);
    if (ret < 0) {
        //av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
        goto end;
    }

    /* buffer audio sink: to terminate the filter chain. */
    ret = avfilter_graph_create_filter(&m_buffersink_ctx, abuffersink, "out",
                                       NULL, NULL, m_filter_graph);
    if (ret < 0) {
        //av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
        goto end;
    }

    ret = av_opt_set_int_list(m_buffersink_ctx, "sample_fmts", out_sample_fmts, -1,
                              AV_OPT_SEARCH_CHILDREN);
    if (ret < 0) {
        //av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
        goto end;
    }

    ret = av_opt_set_int_list(m_buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
                              AV_OPT_SEARCH_CHILDREN);
    if (ret < 0) {
        //av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
        goto end;
    }

    ret = av_opt_set_int_list(m_buffersink_ctx, "sample_rates", out_sample_rates, -1,
                              AV_OPT_SEARCH_CHILDREN);
    if (ret < 0) {
        //av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
        goto end;
    }

    /*
     * Set the endpoints for the filter graph. The filter_graph will
     * be linked to the graph described by filters_descr.
     */

    /*
     * The buffer source output must be connected to the input pad of
     * the first filter described by filters_descr; since the first
     * filter input label is not specified, it is set to "in" by
     * default.
     */
    outputs->name       = av_strdup("in");
    outputs->filter_ctx = m_buffersrc_ctx;
    outputs->pad_idx    = 0;
    outputs->next       = NULL;

    /*
     * The buffer sink input must be connected to the output pad of
     * the last filter described by filters_descr; since the last
     * filter output label is not specified, it is set to "out" by
     * default.
     */
    inputs->name       = av_strdup("out");
    inputs->filter_ctx = m_buffersink_ctx;
    inputs->pad_idx    = 0;
    inputs->next       = NULL;

    if ((ret = avfilter_graph_parse_ptr(m_filter_graph, filters_descr,
                                        &inputs, &outputs, NULL)) < 0)
        goto end;

    if ((ret = avfilter_graph_config(m_filter_graph, NULL)) < 0)
        goto end;

    /* Print summary of the sink buffer
     * Note: args buffer is reused to store channel layout string */
    outlink = m_buffersink_ctx->inputs[0];
    av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);

    end:
    avfilter_inout_free(&inputs);
    avfilter_inout_free(&outputs);

    return ret;
}

int CMyDecoder::GetMediaInfo(SMediaInfo &tagInfo)
{
    if(m_pCodecCtx==NULL && m_pCodecCtx_a==NULL)
        return -1;

    tagInfo.bEnableVideo = false;
    tagInfo.bEnableAudio = false;

    if(m_pCodecCtx!=NULL)
    {
        tagInfo.bEnableVideo = true;
        tagInfo.tagVInfo.nW = m_pCodecCtx->width;
        tagInfo.tagVInfo.nH = m_pCodecCtx->height;
        tagInfo.tagVInfo.nFPS = m_pCodecCtx->framerate.num/m_pCodecCtx->framerate.den;
        tagInfo.tagVInfo.nFPS = tagInfo.tagVInfo.nFPS==0?m_pCodecCtx->keyint_min:tagInfo.tagVInfo.nFPS;

        tagInfo.tagVInfo.nBitrate = m_pCodecCtx->bit_rate;
        tagInfo.tagVInfo.cID = m_pCodecCtx->codec_id;
    }

    if(m_pCodecCtx_a!=NULL)
    {
        tagInfo.bEnableAudio = true;
        tagInfo.tagAInfo.nChannels = m_pCodecCtx_a->channels;
        tagInfo.tagAInfo.nResample = m_pCodecCtx_a->sample_rate;
        tagInfo.tagAInfo.nChannel_layout = m_pCodecCtx_a->channel_layout;
        tagInfo.tagAInfo.cID = m_pCodecCtx_a->codec_id;
        tagInfo.tagAInfo.nFrameSize = m_pCodecCtx_a->frame_size;
    }

    if(m_pFormatCtx->duration != AV_NOPTS_VALUE){
        int hours, mins, secs, us;
        int64_t duration = m_pFormatCtx->duration + 5000;
        tagInfo.nTimeLen = duration;
        secs = duration / AV_TIME_BASE;
        us = duration % AV_TIME_BASE;
        mins = secs / 60;
        secs %= 60;
        hours = mins/ 60;
        mins %= 60;
        LOGI("%02d:%02d:%02d.%02d\n", hours, mins, secs, (100 * us) / AV_TIME_BASE);
    }

    return 0;
}

void CMyDecoder::SetFilterInfo(SMediaInfo &tagInfo)
{
    m_tagFilterInfo = tagInfo;

    if(tagInfo.bEnableAudio==true)
    {
        int sampleRate = m_pCodecCtx_a->sample_rate;
        //int sampleRate = tagInfo.tagAInfo.nResample;
        m_tagFilterInfo.tagAInfo.nChannel_layout = av_get_default_channel_layout(m_tagFilterInfo.tagAInfo.nChannels);

        char szDsc[1024];
        sprintf(szDsc,"aresample=%d,aformat=sample_fmts=%s:channel_layouts=0x%d",
                sampleRate,av_get_sample_fmt_name(AV_SAMPLE_FMT_FLTP),m_pCodecCtx_a->channel_layout);
        InitFilters(szDsc);
    }

#if NEED_VIDEOFILTER
    InitVideoFilters();
#endif
}

void CMyDecoder::SetOutSwrInfo(SMediaInfo &tagInfo)
{
    if(tagInfo.bEnableAudio==false)
        return;

    m_tagSwrInfo = tagInfo;

    //audio
    uint64_t out_channel_layout = av_get_default_channel_layout(m_tagSwrInfo.tagAInfo.nChannels);
    AVSampleFormat out_sample_fmt=AV_SAMPLE_FMT_S16;
    int out_sample_rate=m_tagSwrInfo.tagAInfo.nResample;

    if (m_audio_convert_ctx==NULL)
    {
        m_audio_convert_ctx = swr_alloc();
        m_audio_convert_ctx = swr_alloc_set_opts(m_audio_convert_ctx,out_channel_layout, out_sample_fmt, out_sample_rate,
                                                 m_pCodecCtx_a->channel_layout,m_pCodecCtx_a->sample_fmt , m_pCodecCtx_a->sample_rate,0, NULL);

        swr_init(m_audio_convert_ctx);
    }
}