#include "decoder_video.h"

static uint64_t global_video_pkt_pts = AV_NOPTS_VALUE;

DecoderVideo::DecoderVideo(AVStream *stream) : DecoderBase(stream) {
    mStream->codec->get_buffer2 = getBuffer;
//    mStream->codec->release_buffer = releaseBuffer;



}

DecoderVideo::~DecoderVideo() {
//    if (mFrame) {
//        av_frame_free(&mFrame);
//    }
}

bool DecoderVideo::prepare() {
//    mFrame = av_frame_alloc();
//    if (!mFrame) {
//        return false;
//    }
    return true;
}

double DecoderVideo::synchronize(AVFrame *src_frame, double pts) {

    double frame_delay;

    if (pts != 0) {
        /* if we have pts, set video clock to it */
        mVideoClock = pts;
    } else {
        /* if we aren't given a pts, set it to the clock */
        pts = mVideoClock;
    }
    /* update the video clock */
    frame_delay = av_q2d(mStream->codec->time_base);
    /* if we are repeating a frame, adjust clock accordingly */
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
    mVideoClock += frame_delay;
    return pts;
}


bool DecoderVideo::process(AVPacket *packet) {
    int	gotFrame;
    double pts = 0;

    AVFrame* mFrame = av_frame_alloc();

    int decodedLen = avcodec_decode_video2(mStream->codec, mFrame, &gotFrame, packet);
    LOGD("decoding video -------------- decodedLen:%d, size:%d", decodedLen, packet->size);
    if (decodedLen < 0) {
        return false;
    }

    if (packet->dts == AV_NOPTS_VALUE && mFrame->opaque
        && *(uint64_t*) mFrame->opaque != AV_NOPTS_VALUE)
    {
        pts = *(uint64_t *) mFrame->opaque;

    } else if (packet->dts != AV_NOPTS_VALUE) {
        pts = packet->dts;

    } else {
        pts = 0;
    }

    double avPacketPts = pts;
    pts = pts * av_q2d(mStream->time_base);

    LOGD("--------------- packet pts:%f, frame:%f", avPacketPts, pts);
    LOGD("------------------- num:%d, gen:%d",  mStream->time_base.num, mStream->time_base.den);

    if (gotFrame) {
        pts = synchronize(mFrame, pts);

        onDecode(mFrame, pts);

        return true;
    }
    return false;
}

bool DecoderVideo::decode(void *ptr) {

    AVPacket pPacket;

    LOGD("decoding video");

    while (mRunning) {
        if (mQueue->get(&pPacket, true) < 0) {
            mRunning = false;
            return false;
        }

        if (!process(&pPacket)) {
            LOGD("decoding video -------------- process size:%d", pPacket.size);
            mRunning = false;
            return false;
        }

        av_packet_unref(&pPacket);
    }
    LOGD("decoding video ended");
}

/* These are called whenever we allocate a frame
 * buffer. We use this to store the global_pts in
 * a frame at the time it is allocated.
 */
int DecoderVideo::getBuffer(struct AVCodecContext *c, AVFrame *pic, int flags) {
    int ret = avcodec_default_get_buffer2(c, pic, 0);
    uint64_t *pts = (uint64_t *)av_malloc(sizeof(uint64_t));
    *pts = global_video_pkt_pts;
    pic->opaque = pts;
    return ret;
}
void DecoderVideo::releaseBuffer(struct AVCodecContext *c, AVFrame *pic) {
    if (pic)
        av_freep(&pic->opaque);
    avcodec_default_release_buffer2(c, pic);
}