// Created by Chen qin lang on 2018/6/3.
//
#include <jni.h>
#include <stdio.h>

#include "com_zagj_videocomparess_EasyVIdeoPlay.h"
#include <string>
#include <fstream>
#include <iostream>
#include <thread>
#include <memory>
#include <pthread.h>
#include <android/native_window_jni.h>
#include <unistd.h>


extern "C" {
#include "libavformat/avformat.h"
#include <android/log.h>
#include <libavfilter/buffersrc.h>
#include <libavfilter/buffersink.h>
#include "libswscale/swscale.h"
#include <libavdevice/avdevice.h>
#include "libavutil/opt.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include <libavutil/imgutils.h>
}
using namespace std;
#define LOGE(format, ...) __android_log_print(ANDROID_LOG_ERROR, "(>_<)", format,  ##__VA_ARGS__)
#define LOGI(format, ...)  printf("(^_^) " format "\n", ##__VA_ARGS__)
AVFormatContext *inputContext[2];
AVFormatContext *outputCtx;
AVCodecContext *decodeContext[2];
AVCodecContext *outputCodec;
AVFilterGraph *avFilterGraph;
AVFilterInOut *inputs;
AVFilterInOut *outputs;
AVFilterContext *pFilterContext[2];
AVFilterContext *outputFilter;
AVFrame *pSrcFrame[2];
AVFrame *pDstFrame;
AVFrame *inputFrame[2];
SwsContext *swsContext;
int videoIndex = -1;
const char *filter_descr = "overlay=100:100";
bool play = true;

int OpenInput(AVFormatContext *avFormatContext, char *filename, int indexFormat) {
    LOGE("OpenInput index: %d \n", indexFormat);
    inputContext[indexFormat] = avformat_alloc_context();
    int re = 0;
    if (re = avformat_open_input(&inputContext[indexFormat], filename, NULL, NULL) != 0) {
        LOGE("Couldn't avformat_open_input\n");

        return re;
    }
    LOGE("OpenInput333");
    re = avformat_find_stream_info(inputContext[indexFormat], NULL);
    av_dump_format(inputContext[indexFormat], 0, filename, 0);
    if (re < 0) {
        LOGE("Couldn't find stream information.%d\n", indexFormat);

        return re;
    }
    LOGE("OpenInput inputContext===%d ---- %d\n", indexFormat,
         inputContext[indexFormat]->nb_streams);
    return 0;
}

int InitDecodeCodec(int index) {
    for (int i = 0; i < inputContext[0]->nb_streams; ++i) {
        if (inputContext[0]->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoIndex = i;
        }
    }
    if (videoIndex < 0) {
        LOGE("find video stream failed");
        return -1;
    }
    AVCodec *decoder = avcodec_find_decoder(
            inputContext[index]->streams[videoIndex]->codec->codec_id);
    if (!decoder) {
        LOGE("can not found decodec \n");
        return -1;
    }
    decodeContext[index] = inputContext[index]->streams[videoIndex]->codec;
    LOGE("OpenInput index: %d \n", decodeContext[index]->coded_height);
    if (!decodeContext[index]) {
        LOGE("can not alloc codecContext failed \n");
        return -1;
    }
    if (decoder->capabilities & AV_CODEC_CAP_TRUNCATED)
        inputContext[index]->flags |= AV_CODEC_FLAG_TRUNCATED;
    int ret = avcodec_open2(decodeContext[index], decoder, NULL);
    return ret;

}

int OpenOutput(AVFormatContext *pContext, AVFormatContext *avFormatContext, char *filename) {
    int ret = avformat_alloc_output_context2(&outputCtx, NULL, "mpegts", filename);
    if (ret < 0) {
        LOGE("alloc output Context failed\n");
        return -1;
    }
    ret = avio_open2(&outputCtx->pb, filename, AVIO_FLAG_WRITE, NULL, NULL);
    if (ret < 0) {
        LOGE("avio open failed \n");
        return -1;
    }
    LOGE("avio open success %d \n", ret);

    for (int i = 0; i < inputContext[0]->nb_streams; ++i) {
        LOGE("nb_streams\n");
        if (inputContext[0]->streams[i]->codec->codec_type == AVMediaType::AVMEDIA_TYPE_VIDEO) {
            LOGE("nb_streams\n");
            videoIndex = i;
            continue;
        }

        LOGE("avformat_new_stream before\n");
        if (outputCodec == NULL || outputCodec == NULL) {
            LOGE("outputCodec==NULL  ||outputCodec==NULL\n");
            return -1;
        }
        if (outputCodec == NULL) {
            LOGE("outputCodec==NULL  ||outputCodec==NULL\n");
            return -1;
        }
        AVStream *avStream = avformat_new_stream(outputCtx, outputCodec->codec);
        LOGE("avformat_new_stream\n");
        ret = avcodec_copy_context(avStream->codec, outputCodec);
        if (ret < 0) {
            LOGE(" avcodec copy context failed \n");
            goto error;
        }
        LOGE("avformat_write_header\n");
        ret = avformat_write_header(outputCtx, NULL);
        LOGE("avformat_write_header\n");
        if (ret < 0) {
            LOGE("write header failed \n");
            goto error;
        }
    }
    return ret;
    error:
    if (outputCtx) {
        avformat_close_input(&outputCtx);
    }
    return ret;
}

int InitEncodec(AVCodecContext *pCodecContext, int width, int height, int index) {
    AVCodec *encoder = avcodec_find_encoder(AV_CODEC_ID_H264);
    LOGE("find encoder");
    if (NULL == encoder) {
        LOGE("find decode failed");
        return -1;
    }
    LOGE("find avcodec_alloc_context3b");
    outputCodec = avcodec_alloc_context3(encoder);
    LOGE("find avcodec_alloc_context3a");

    outputCodec->gop_size = 30;
    LOGE("find pix_fmtsa  ");
    outputCodec->pix_fmt = *encoder->pix_fmts;
    LOGE("find pix_fmtsb  ");

    outputCodec->has_b_frames = 0;
    LOGE("find timebasea  ");
    outputCodec->time_base.num = decodeContext[index]->time_base.num;
    outputCodec->time_base.den = decodeContext[index]->time_base.den;
    LOGE("find timebaseb ");

    outputCodec->max_b_frames = 0;
    LOGE("find max_b_frames ");
    outputCodec->codec_id = encoder->id;
    LOGE("find codec_id ");
    outputCodec->height = height;
    LOGE("find height ");

    outputCodec->width = width;
    LOGE("find width ");
    outputCodec->me_subpel_quality = 0;
    LOGE("find me_subpel_quality ");

    outputCodec->refs = 1;
    LOGE("find refs ");

    outputCodec->scenechange_threshold = 0;
    LOGE("find scenechange_threshold ");

    outputCodec->trellis = 0;
    LOGE("find trellis ");

    AVDictionary *options = nullptr;
    LOGE("find AV_CODEC_FLAG_GLOBAL_HEADER  ");
    outputCodec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    LOGE("open codec ");
    int ret = avcodec_open2(outputCodec, encoder, &options);
    LOGE("open codec a");
    if (ret < 0) {
        LOGE("open codec failed");
        return ret;
    }
    LOGE("open codec success %d", ret);
    return 1;
}

shared_ptr<AVPacket> ReadPacketFromSource(int index) {
    std::shared_ptr<AVPacket> packet(static_cast<AVPacket *>(av_malloc(sizeof(AVPacket))),
                                     [&](AVPacket *p) {
                                         av_packet_free(&p);
                                         av_freep(&p);
                                     });

    av_init_packet(packet.get());
    int ret = av_read_frame(inputContext[index], packet.get());
    if (ret >= 0) {
        LOGE("read packet success");
        return packet;
    } else {
        LOGE("read packet failed");
        return nullptr;
    }
}

int DecodeVideo(AVCodecContext *codecContext, AVFrame *frame, AVPacket *packet, int index) {
    int got_frame = 0;
    int ret = avcodec_decode_video2(decodeContext[index], frame, &got_frame, packet);
    if (ret > 0 && got_frame) {
        LOGE("avcodec_decode_video2");
        return true;
    } else {
        LOGE("decodec videco failed");
        return false;
    }
}

void CloseOutput(AVFormatContext *outputContext) {
    if (outputContext != nullptr) {
        for (int i = 0; i < outputContext->nb_streams; i++) {
            AVCodecContext *codecContext = outputContext->streams[i]->codec;
            avcodec_close(codecContext);
        }
        avformat_close_input(&outputContext);
    }
}

void CloseInput(AVFormatContext *formatContext) {
    if (formatContext != nullptr) {
        avformat_close_input(&formatContext);
    }
}

JNIEXPORT  jint JNICALL Java_com_zagj_videocomparess_EasyVideoPlay_play
        (JNIEnv *env, jobject install, jstring videoPath, jobject surface) {

    //avfilter
    char video_srt[500] = {0};
    sprintf(video_srt, "%s", env->GetStringUTFChars(videoPath, NULL));
    LOGE("open file %s \n", video_srt);

    av_register_all();
    avfilter_register_all();
    avformat_network_init();
    avdevice_register_all();
    inputContext[0] = avformat_alloc_context();

    OpenInput(inputContext[0], video_srt, 0);
    // OpenInput(inputContext[1], pic_srt, 1);
    InitDecodeCodec(0);
    //avformat_free_context(avFormatContext);

    // InitDecodeCodec(1);
    //InitEncodec(outputCodec, inputContext[0]->streams[0]->codec->width, inputContext[0]->streams[0]->codec->height,0);
    ANativeWindow *aNativeWindow = ANativeWindow_fromSurface(env, surface);
    if (aNativeWindow == 0) {
        LOGE("nativewindow取到失败");
        return -1;
    }
    // 获取视频宽高
    int videoWidth = decodeContext[0]->width;
    int videoHeight = decodeContext[0]->height;
    LOGE("videoHeight %d", videoHeight);
    ANativeWindow_setBuffersGeometry(aNativeWindow, videoWidth, videoHeight,
                                     WINDOW_FORMAT_RGBA_8888);
    LOGE("ANativeWindow_setBuffersGeometry");

    ANativeWindow_Buffer aNativeWindow_buffer;
    AVFrame *avFrame = av_frame_alloc();
    AVFrame *avFrameRGBA = av_frame_alloc();
    int64_t duraation = inputContext[0]->streams[videoIndex]->duration*av_q2d(inputContext[0]->streams[videoIndex]->time_base);


    jclass pJclass = env->GetObjectClass(install);
    jmethodID pID = env->GetMethodID(pJclass, "setFileSize", "(II)V");
    LOGE("duruation : %lld",duraation);
    env->CallVoidMethod(install,pID, static_cast<jint>(duraation),0);

    jmethodID pID2 = env->GetMethodID(pJclass, "realizeSize", "(II)V");
    LOGE("duruation : %lld",duraation);
    env->CallVoidMethod(install,pID2, inputContext[0]->streams[videoIndex]->codecpar->width,inputContext[0]->streams[videoIndex]->codecpar->height);

    int buffer_size = av_image_get_buffer_size(AV_PIX_FMT_RGBA, decodeContext[0]->width,
                                               decodeContext[0]->height, 1);
    LOGE("av_image_get_buffer_size");
    uint8_t *out_buffer = (uint8_t *) av_malloc(buffer_size * sizeof(uint8_t));
    //填充avframe
    av_image_fill_arrays(avFrameRGBA->data, avFrameRGBA->linesize, out_buffer, AV_PIX_FMT_RGBA,
                         decodeContext[0]->width, decodeContext[0]->height, 1);
    LOGE("av_image_fill_arrays");
    //与缓存区相关联，设置rgb_frame缓存区
    //avpicture_fill((AVPicture *)avFrameRGBA,out_buffer,AV_PIX_FMT_RGBA,decodeContext[0]->width,decodeContext[0]->height);

    swsContext = sws_getContext(decodeContext[0]->width, decodeContext[0]->height,
                                decodeContext[0]->pix_fmt,
                                decodeContext[0]->width, decodeContext[0]->height, AV_PIX_FMT_RGBA,
                                SWS_BICUBIC, NULL, NULL, NULL);
    LOGE("sws_getContext");
    int  frame_cnt;
    while (play) {
        const shared_ptr<AVPacket> ptr = ReadPacketFromSource(0);
        if (ptr == NULL) {
            break;
        }
        if (DecodeVideo(decodeContext[0], avFrame, ptr.get(), 0)) {
            if (ptr.get()->stream_index == videoIndex) {

                int32_t lock = ANativeWindow_lock(aNativeWindow, &aNativeWindow_buffer, 0);
                if (lock < 0) {
                    LOGE("ANativeWindow_lock");
                    continue;
                }


                // 格式转换
                sws_scale(swsContext, (uint8_t const *const *) avFrame->data,
                          avFrame->linesize, 0, decodeContext[0]->height,
                          avFrameRGBA->data, avFrameRGBA->linesize);
                LOGE("sws_scale");

                // 获取stride
                uint8_t *dst = (uint8_t *) aNativeWindow_buffer.bits;
                int dstStride = aNativeWindow_buffer.stride * 4;
                LOGE("aNativeWindow_buffer :bits %d ,dstStride:%d ", dst, dstStride);
                uint8_t *src = (uint8_t *) (avFrameRGBA->data[0]);
                int srcStride = avFrameRGBA->linesize[0];
                LOGE("avFrameRGBA :src %d ,srcStride: %d ", src, srcStride);

                char pictype_str[10]={0};
                switch(avFrame->pict_type){
                    case AV_PICTURE_TYPE_I:sprintf(pictype_str,"I");break;
                    case AV_PICTURE_TYPE_P:sprintf(pictype_str,"P");break;
                    case AV_PICTURE_TYPE_B:sprintf(pictype_str,"B");break;
                    default:sprintf(pictype_str,"Other");break;
                }
                LOGI("Frame Index: %5d. Type:%s",avFrame,pictype_str);
                frame_cnt++;
                // 由于window的stride和帧的stride不同,因此需要逐行复制
                int h;
                for (h = 0; h < videoHeight; h++) {

                   // LOGE("dst size :%d ---src size %d", dst + h * dstStride, src + h * srcStride);
                    memcpy(dst + h * dstStride, src + h * srcStride, srcStride);
                }

                ANativeWindow_unlockAndPost(aNativeWindow);
                LOGE("ANativeWindow_lock");

            }
        }
    }
    end:
    av_free(out_buffer);
    av_free(avFrameRGBA);

    // Free the YUV frame
    av_free(avFrame);
    CloseInput(inputContext[0]);
    // CloseOutput(outputCtx);
    std::cout << "Transcode file end!" << endl;
    LOGE("Transcode file end!");
    this_thread::sleep_for(chrono::hours(10));
    return 0;
}

JNIEXPORT jint JNICALL Java_com_zagj_videocomparess_EasyVideoPlay_stop
        (JNIEnv *env, jobject obj, jstring videopath) {
    play = false;
    return 0;
}



