#include "dbg.hpp"
#include "xvideo_util.hpp"
#include "video.h"
#include "audio.h"

extern "C" {
    #include "libavutil/imgutils.h"
}

XVInsertFrame::XVInsertFrame() {}

XVInsertFrame::~XVInsertFrame() {
    if(enctx_) {
        avcodec_free_context(&enctx_);
    }

    if(outpkt_) {
        av_packet_free(&outpkt_);
    }

    if(buf_) {
        av_freep(buf_);
    }

    if(frame_) {
        av_frame_free(&frame_);
    }

    for(auto i : gop_) {
        if(i) {
            av_packet_free(&i);
        }
    }
    gop_.clear();
}

bool XVInsertFrame::opened() {
    return nullptr != enctx_;
}

int XVInsertFrame::init(int width ,int height, int fps, int bit_rate, mediacodec_id_t codec) {
    if(opened()) {
        return -(__LINE__);
    }

    if(width  <= 0 || height <= 0) {
        return -(__LINE__);
    }
    AVDictionary* param = nullptr;

    int codecId = AV_CODEC_ID_H264;
    if(MCODEC_ID_VP8 == codec) {
        codecId = AV_CODEC_ID_VP8;
    }

    AVCodec* pCodec = avcodec_find_encoder((AVCodecID)codecId);
    if (!pCodec) {
        dbge("Not found video codec!");
        return -(__LINE__);
    }

    enctx_ = avcodec_alloc_context3(pCodec);
    if (!enctx_) {
        dbge("Not allocate video codec context!");
        return -(__LINE__);
    }

    enctx_->pix_fmt = AV_PIX_FMT_YUV420P;
    enctx_->width = width;
    enctx_->height = height;
    enctx_->bit_rate = bit_rate;
    enctx_->gop_size = 12;
    enctx_->flags |= AV_CODEC_FLAG_LOW_DELAY;
    enctx_->time_base = {1, fps};
    if(AV_CODEC_ID_H264 == codecId) {
        enctx_->max_b_frames = 0;
        av_dict_set(&param, "tune", "zerolatency", 0);      //zero delay
        av_dict_set(&param, "profile", "baseline", 0);
    }

    int ret = avcodec_open2(enctx_, pCodec, &param);
    if(ret < 0) {
        dbge("Failed to open encoder!");
        return -(__LINE__);
    }

    av_dict_free(&param);
    param = nullptr;

    width_ = width;
    height_ = height;

    if(!frame_) {
        frame_ = av_frame_alloc();
        frame_->width = 0;
        frame_->height = 0;
        frame_->format = AV_PIX_FMT_YUV420P;
    }

    if(!outpkt_) {
        outpkt_ = av_packet_alloc();
    }

    if(encodeBlackPacket() < 0) {
        return -1;
    }

    return 0;
}

int XVInsertFrame::getFrame(int64_t pts, bool key_frame, const Output& func) {
    if(!opened()) {
        return -(__LINE__);
    }

    if(key_frame
    || gop_.size() <= cur_frame_index_) {
        cur_frame_index_ = 0;
    }

    AVPacket* pkt = gop_[cur_frame_index_];
    func(pkt->data, pkt->size, pts, pkt->flags & AV_PKT_FLAG_KEY);

    ++cur_frame_index_;

    return 0;
}

int XVInsertFrame::encodeBlackPacket() {
    gop_.clear();
    AVFrame* frame = buildBlackFrame();

    //key frame flag
    frame->pict_type = AV_PICTURE_TYPE_I;
    frame->key_frame = 1;
    int cycles = 0;
    while(1) {
        int ret = avcodec_send_frame(enctx_, frame);
        if (ret) {
            if (AVERROR(EAGAIN) != ret) {
                // // dbgd(logger_, "Error sending original frame to encoder!");
                return -(__LINE__);
            }
            // avcodec_receive_packet(imgCodecCtx_, outPacaket_);
            // av_packet_unref(outPacaket_);
            // avcodec_send_frame(imgCodecCtx_, frame);
        }

        while (ret >= 0) {
            ret = avcodec_receive_packet(enctx_, outpkt_);
            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){
                break;
            }else if (ret != 0) {
                // NERROR_FMT_SET(ret, "fail to avcodec_receive_packet video, err=[{}]", av_err2str(ret));
                // // dbgd(logger_, "Error receiving encoded frame from encoder!");
                return ret;
            }

            if(outpkt_->flags & AV_PKT_FLAG_KEY) {
                if(1 == cycles) {
                    return 0;
                }
                ++cycles;
            };

            AVPacket* tmppkt = av_packet_clone(outpkt_);
            gop_.emplace_back(tmppkt);

            av_packet_unref(outpkt_);
        }
        frame->pict_type = AV_PICTURE_TYPE_NONE;
        frame->key_frame = 0;
    }
    return 0;
}

AVFrame* XVInsertFrame::buildBlackFrame() {
    if(width_ == frame_->width
    || height_ == frame_->height) {
        return frame_;
    }

    if (buf_) {
        av_free(buf_);
    }

    buf_ = (uint8_t*)av_malloc(
        av_image_get_buffer_size((AVPixelFormat)AV_PIX_FMT_YUV420P, width_, height_, 1)
    );

    if (av_image_fill_arrays(frame_->data, frame_->linesize
        , buf_, (AVPixelFormat)AV_PIX_FMT_YUV420P
        , width_
        , height_, 1) < 0) {
        dbge(" Could not init swsFrame buffer!.");
        return nullptr;
    }

    memset(frame_->data[0], 0x00, width_ * height_ * 1 * sizeof(uint8_t));
    memset(frame_->data[1], 0x80, width_ * height_ / 4 * sizeof(uint8_t));
    memset(frame_->data[2], 0x80, width_ * height_ / 4 * sizeof(uint8_t));

    frame_->width = width_;
    frame_->height = height_;
    return frame_;
}



XVBuffer::XVBuffer() {}

XVBuffer::~XVBuffer() {
    if(!buffer_->empty()) {
        for(auto i : *buffer_) {
            for(auto j : *(i.second)) {
                delete j;
                j = nullptr;
            }
            delete i.second;
            i.second = nullptr;
        }

        buffer_->clear();
    }

    if(buffer_) {
        delete buffer_;
        buffer_ = nullptr;
    }

    if(insertFrame_){
        insertFrame_.reset();
        insertFrame_ = nullptr;
    }
}

int XVBuffer::init(int buffer_size, bool has_video, int width
                , int height, int bit_rate
                , int fps, mediacodec_id_t codec) {
    if(buffer_size <= 0
    || (has_video && fps <= 0)) {
        return -(__LINE__);
    }

    if(!outfunc_.audio
    || !outfunc_.video) {
        return -(__LINE__);
    }

    int ret = 0;
    capacity_ = buffer_size;
    buffer_ = new std::map<int64_t, std::vector<MediaFrame*>*>();

    if(has_video) {
        // video_sample_rate_ = video_sample_rate;
        step_pts = 1000.0 / fps;
        insertFrame_ = std::make_shared<XVInsertFrame>();
        ret = insertFrame_->init(width, height, fps, bit_rate, codec);
    }
    
    return ret;
}

void XVBuffer::setListener(const Listener& func) {
    outfunc_ = func;
}

int XVBuffer::add(uint8_t* data, size_t len
            , const int64_t& pts, MediaFrame::Type media_type
            , bool is_key_frame) {
    if(!opened()) {
        return -(__LINE__);
    }

    if(MediaFrame::Type::Audio != media_type
    && MediaFrame::Type::Video != media_type) {
        return -(__LINE__);
    }

    MediaFrame* frame = nullptr;
    std::vector<MediaFrame*>* frames = nullptr;

    //验证是否有重复
    auto search = buffer_->find(pts);
    if(search != buffer_->end()) {
        frames = search->second;
    } else {
        frames = new std::vector<MediaFrame*>();
    }

    //增加至缓冲区
    if(MediaFrame::Type::Audio ==  media_type) {
        frame = new AudioFrame(AudioCodec::Type::UNKNOWN, len);
    } else {
        frame = new VideoFrame(VideoCodec::Type::UNKNOWN, len);
        ((VideoFrame*)frame)->SetIntra(is_key_frame);
    }
    frames->emplace_back(frame);
    frame->SetMedia(data, len);

    (*buffer_)[pts] = frames;

    //检查缓冲区中数据是否溢出
    checkBuffer();
    return 0;
}

int XVBuffer::flush() {
    if(!opened()) {
        return 0;
    }

    int size = buffer_->size();
    for(int i = 0; i < size; ++i) {
        checkout();
    }
    return 0;
}

bool XVBuffer::opened() {
    return nullptr != buffer_;
}

void XVBuffer::checkInsertFrame(std::vector<MediaFrame*>* frames, const int64_t& pts) {
    if(step_pts <= 0
    || !insertFrame_) {
        return ;
    }

    //Insert frames only when there is no video at the beginning of the audio
    if(insert_frame_time_) {
        return ;
    }

    if(last_video_pts_in_buf_ < 0) {
        last_video_pts_in_buf_ = pts;
        last_video_pts_ = pts;
        force_key_frame_ = true;
        return ;
    }

    for(auto i : *frames) {
        if(i && MediaFrame::Type::Video == i->GetType()) {
            last_video_pts_in_buf_ = pts;
            force_key_frame_ = true;
            ready_insert_ = false;
            insert_frame_time_ = true;
            return ;
        }
    }

    //pts - last_video_pts_in_buf_ >= 1000 为了防止丢包导致画面黑屏，影响观看体验，设置1000毫秒等待时间
    if(pts - last_video_pts_in_buf_ < 1000) {
        return ;
    }

    if(!ready_insert_) {
        last_video_pts_ = pts;
        ready_insert_ = true;
        return ;
    }

    if(pts - last_video_pts_ >= step_pts) {
        insertFrame_->getFrame(last_video_pts_, force_key_frame_, outfunc_.video);
        last_video_pts_ += step_pts;
        return ;
    }
}

void XVBuffer::checkBuffer() {
    int size = buffer_->size();
    for(int i = capacity_; i < size; ++i) {
        checkout();
    }
}

void XVBuffer::checkout() {
    int64_t pts = buffer_->begin()->first;
    std::vector<MediaFrame*>* frames = buffer_->begin()->second;
    checkInsertFrame(frames, pts);
    for(auto i : *frames) {
        if(MediaFrame::Type::Audio == i->GetType()) {
            outfunc_.audio(i->GetData(), i->GetLength(), pts);
        } else {
            // dbge("write origin Video pts=%ld", pts);
            outfunc_.video(i->GetData(), i->GetLength(), pts, ((VideoFrame*)i)->IsIntra());
            force_key_frame_ = false;
        }
        delete i;
        i = nullptr;
    }

    delete frames;
    frames = nullptr;

    buffer_->erase(buffer_->begin());
}