//
// Created by Chen qin lang on 2018/6/14.
//
#include <jni.h>
#include <stdio.h>

#include "com_zagj_videocomparess_utils_MediaMuxer.h"
#include <string>
#include <fstream>
#include <iostream>
#include <thread>
#include <memory>
#include <pthread.h>
#include <bits/shared_ptr.h>

extern "C" {
#include "libavformat/avformat.h"
#include <android/log.h>

#include <libavfilter/buffersrc.h>
#include <libavfilter/buffersink.h>
#include "libswscale/swscale.h"
#include <libavdevice/avdevice.h>
#include <SLES/OpenSLES.h>
#include <SLES/OpenSLES_Android.h>
#include "libavutil/opt.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/imgutils.h"

}
using namespace std;
#define LOGE(format, ...) __android_log_print(ANDROID_LOG_ERROR, "(>_<)", format,  ##__VA_ARGS__)

#define STREAM_DURATION   10.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_PIX_FMT    AV_PIX_FMT_YUV420P /* default pix_fmt */

#define SCALE_FLAGS SWS_BICUBIC
typedef struct OutputStream {
    AVStream *st;
    AVCodecContext *enc;

    /* pts of the next frame that will be generated */
    int64_t next_pts;
    int samples_count;

    AVFrame *frame;
    AVFrame *tmp_frame;

    float t, tincr, tincr2;

    struct SwsContext *sws_ctx;
    struct SwrContext *swr_ctx;
} OutputStream;
AVFormatContext *inputContext[2];
AVFormatContext *outputCtx;
AVOutputFormat *fmt;
AVCodecContext *decodeContext[2];
AVCodecContext *outputCodec;
AVFilterGraph *avFilterGraph;
AVFilterInOut *inputs;
AVFilterInOut *outputs;
AVFilterContext *pFilterContext[2];
AVFilterContext *outputFilter;
AVFrame *pSrcFrame[2];
AVFrame *pDstFrame;
AVFrame *inputFrame[2];
int videoIndex = -1;
int audioindex_a = -1;
int videoIndex_out = -1;
int audioIndex_out = -1;
const char *filter_descr = "overlay=100:100";

int OpenInput(char *filename, int indexFormat) {
    LOGE("OpenInput index: %d \n", indexFormat);
    inputContext[indexFormat] = avformat_alloc_context();

    int re = 0;
    if (re = avformat_open_input(&inputContext[indexFormat], filename, NULL, NULL) != 0) {
        LOGE("Couldn't avformat_open_input\n");

        return re;
    }
    LOGE("OpenInput333");
    re = avformat_find_stream_info(inputContext[indexFormat], NULL);
    av_dump_format(inputContext[indexFormat], 0, filename, 0);
    if (re < 0) {
        LOGE("Couldn't find stream information.%d\n", indexFormat);

        return re;
    }
    LOGE("OpenInput inputContext===%d ---- %d\n", indexFormat,
         inputContext[indexFormat]->nb_streams);
    return 0;
}

int InitVideoDecodeCodec(int index) {
    for (int i = 0; i < inputContext[index]->nb_streams; ++i) {
        if (inputContext[index]->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoIndex = i;
            AVCodec *decoder = avcodec_find_decoder(
                    inputContext[index]->streams[i]->codec->codec_id);
            if (!decoder) {
                LOGE("can not found decodec \n");
                return -1;
            }
            decodeContext[index] = inputContext[index]->streams[i]->codec;
            LOGE("InitDecodeCodec index: %d \n", decodeContext[index]->coded_height);
            if (!decodeContext[index]) {
                LOGE("can not alloc codecContext failed \n");
                return -1;
            }
            if (decoder->capabilities & AV_CODEC_CAP_TRUNCATED)
                inputContext[index]->flags |= AV_CODEC_FLAG_TRUNCATED;
            int ret = avcodec_open2(decodeContext[index], decoder, NULL);
            return ret;
        }
    }
    return 0;
}
int InitAudioDecodeCodec(int index) {
    for (int i = 0; i < inputContext[index]->nb_streams; ++i) {
        if (inputContext[index]->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            audioindex_a = i;
            AVCodec *decoder = avcodec_find_decoder(
                    inputContext[index]->streams[i]->codec->codec_id);
            if (!decoder) {
                LOGE("can not found decodec \n");
                return -1;
            }
            decodeContext[index] = inputContext[index]->streams[i]->codec;
            LOGE("InitDecodeCodec index: %d \n", decodeContext[index]->coded_height);
            if (!decodeContext[index]) {
                LOGE("can not alloc codecContext failed \n");
                return -1;
            }
            if (decoder->capabilities & AV_CODEC_CAP_TRUNCATED)
                inputContext[index]->flags |= AV_CODEC_FLAG_TRUNCATED;
            int ret = avcodec_open2(decodeContext[index], decoder, NULL);
            return ret;
        }
    }
    return 0;
}
int InitDecodeCodec(int index) {
    AVCodec *decoder = avcodec_find_decoder(inputContext[index]->streams[0]->codec->codec_id);
    if (!decoder) {
        LOGE("can not found decodec \n");
        return -1;
    }
    decodeContext[index] = inputContext[index]->streams[0]->codec;
    LOGE("InitDecodeCodec index: %d \n", decodeContext[index]->coded_height);
    if (!decodeContext[index]) {
        LOGE("can not alloc codecContext failed \n");
        return -1;
    }
    if (decoder->capabilities & AV_CODEC_CAP_TRUNCATED)
        inputContext[index]->flags |= AV_CODEC_FLAG_TRUNCATED;
    int ret = avcodec_open2(decodeContext[index], decoder, NULL);
    return ret;

}

int OpenOutput(char *filename) {

    int ret = avformat_alloc_output_context2(&outputCtx, NULL, "flv", filename);
    if (ret < 0) {
        LOGE("alloc output Context failed\n");
        return -1;
    }

    ret = avio_open2(&outputCtx->pb, filename, AVIO_FLAG_WRITE, NULL, NULL);
    if (ret < 0) {
        LOGE("avio open failed \n");
        return -1;
    }
    LOGE("avio open success %d \n", ret);

    for (int i = 0; i < inputContext[0]->nb_streams; ++i) {
        LOGE("nb_streams\n");
        if (inputContext[0]->streams[i]->codec->codec_type == AVMediaType::AVMEDIA_TYPE_AUDIO) {
            LOGE("audio_streams\n");
            continue;
        }

        if (outputCodec == NULL) {
            LOGE("outputCodec==NULL \n");
            return -1;
        }
        AVStream *avStream = avformat_new_stream(outputCtx, decodeContext[0]->codec);
        audioIndex_out = avStream->index;
        LOGE("avformat_new_stream\n");
        ret = avcodec_copy_context(avStream->codec, decodeContext[0]);
        if (ret < 0) {
            LOGE(" avcodec copy context failed \n");
            goto error;
        }
    }
    for (int i = 0; i < inputContext[1]->nb_streams; ++i) {
        LOGE("nb_streams\n");
        if (inputContext[1]->streams[i]->codec->codec_type == AVMediaType::AVMEDIA_TYPE_VIDEO) {
            LOGE("nb_streams\n");
            continue;
        }
        audioindex_a = i;
        LOGE("avformat_new_stream before\n");
        if (outputCodec == NULL) {
            LOGE("outputCodec==NULL  ||outputCodec==NULL\n");
            return -1;
        }
        AVStream *avStream = avformat_new_stream(outputCtx,
                                                decodeContext[1]->codec);
        LOGE("avformat_new_stream\n");
        videoIndex_out = avStream->index;
        ret = avcodec_copy_context(avStream->codec, inputContext[1]->streams[0]->codec);
        if (ret < 0) {
            LOGE(" avcodec copy context failed \n");
            goto error;
        }

    }
    LOGE("avformat_write_header\n");
    ret = avformat_write_header(outputCtx, NULL);
    LOGE("avformat_write_header\n");
    if (ret < 0) {
        LOGE("write header failed \n");
        goto error;
    }
    return ret;
    error:
    if (outputCtx) {
        avformat_close_input(&outputCtx);
    }
    return ret;
}

int InitEncodec(int width, int height, int index) {
    AVCodec *encoder = avcodec_find_encoder(AV_CODEC_ID_H264);
    LOGE("find encoder");
    if (NULL == encoder) {
        LOGE("find decode failed");
        return -1;
    }
    LOGE("find avcodec_alloc_context3b");
    outputCodec = avcodec_alloc_context3(encoder);
    LOGE("find avcodec_alloc_context3a");

    outputCodec->gop_size = 30;
    LOGE("find pix_fmtsa  ");
    outputCodec->pix_fmt = *encoder->pix_fmts;
    LOGE("find pix_fmtsb  ");

    outputCodec->has_b_frames = 0;
    LOGE("find timebasea  ");
    outputCodec->time_base.num = decodeContext[index]->time_base.num;
    outputCodec->time_base.den = decodeContext[index]->time_base.den;
    LOGE("find timebaseb ");

    outputCodec->max_b_frames = 0;
    LOGE("find max_b_frames ");
    outputCodec->codec_id = encoder->id;
    LOGE("find codec_id ");
    outputCodec->height = height;
    LOGE("find height ");

    outputCodec->width = width;
    LOGE("find width ");
    outputCodec->me_subpel_quality = 0;
    LOGE("find me_subpel_quality ");

    outputCodec->refs = 1;
    LOGE("find refs ");

    outputCodec->scenechange_threshold = 0;
    LOGE("find scenechange_threshold ");

    outputCodec->trellis = 0;
    LOGE("find trellis ");

    AVDictionary *options = nullptr;
    LOGE("find AV_CODEC_FLAG_GLOBAL_HEADER  ");
    outputCodec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    LOGE("open codec ");
    int ret = avcodec_open2(outputCodec, encoder, &options);
    LOGE("open codec a");
    if (ret < 0) {
        LOGE("open codec failed");
        return ret;
    }
    LOGE("open codec success %d", ret);
    return 1;
}

int InitInputFilter(AVFilterInOut *inoutfilter, AVFilterContext *pFilterContext1,
                    AVFilterGraph *filter_graph,
                    string overlayName, int index) {
    char args[512];
    memset(args, 0, sizeof(args));
    LOGE("memset");
    auto codec = inputContext[index]->streams[0]->codec;
    LOGE("codec");
    sprintf(args,
            "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
            codec->height, codec->width, codec->pix_fmt, codec->time_base.num,
            codec->time_base.den / codec->ticks_per_frame,
            codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
    LOGE("codec %s", args);
    int pad_idx;
    AVFilterContext *padFilterContext;
    if (index == 0) {
        LOGE("pAVFilter0)");
        padFilterContext = inputs->filter_ctx;
        pad_idx = inputs->pad_idx;
    } else {
        LOGE("pAVFilter1");
        padFilterContext = inputs->next->filter_ctx;
        pad_idx = inputs->next->pad_idx;
    }

    LOGE("padFilterContext ");
    AVFilter *filter = avfilter_get_by_name("buffer");
    LOGE("pAVFilter");
    int ret = avfilter_graph_create_filter(&pFilterContext[index], filter, overlayName.c_str(),
                                           args, NULL, avFilterGraph);
    if (ret < 0) {
        LOGE("create filter failed\n");
        return -1;
    }
    ret = avfilter_link(pFilterContext[index], 0, padFilterContext, pad_idx);
    if (ret < 0) {
        LOGE("avfilter_link filter failed\n");
        return -1;
    }
    return ret;
}

int InitOutputFIlter(AVFilterInOut *pOut, AVFilterContext *pContext, AVFilterGraph *pGraph,
                     const char *filename) {
    LOGE("InitOutputFIlter");
    AVFilterContext *padFilterCtx = outputs->filter_ctx;
    LOGE("padFilterCtx");

    AVFilter *filter = avfilter_get_by_name("buffersink");
    LOGE("avfilter_get_by_name");

    int ret = avfilter_graph_create_filter(&outputFilter, filter, filename, NULL, NULL,
                                           avFilterGraph);
    if (ret < 0) {
        LOGE("graph create failed \n");
        return ret;
    }
    ret = avfilter_link(padFilterCtx, outputs->pad_idx, outputFilter, 0);
    if (ret < 0) {
        LOGE("link failed \n");
        return ret;
    }
    return ret;

}

void freeInput(AVFilterInOut *pOut, AVFilterInOut *pInOut, AVFilterInOut *pFilterInOut) {
    avfilter_inout_free(&inputs->next);
    avfilter_inout_free(&inputs);

    avfilter_inout_free(&outputs);
}

shared_ptr<AVPacket> ReadPacketFromSource(int index) {
    std::shared_ptr<AVPacket> packet(static_cast<AVPacket *>(av_malloc(sizeof(AVPacket))),
                                     [&](AVPacket *p) {
                                         av_packet_free(&p);
                                         av_freep(&p);
                                     });

    av_init_packet(packet.get());
    int ret = av_read_frame(inputContext[index], packet.get());
    if (ret >= 0) {
        LOGE("read packet success");
        return packet;
    } else {
        LOGE("read packet failed");
        return nullptr;
    }
}

int DecodeVideo(AVCodecContext *codecContext, AVFrame *frame, AVPacket *packet, int index) {
    int got_frame = 0;
    int ret = avcodec_decode_video2(decodeContext[index], frame, &got_frame, packet);
    if (ret > 0 && got_frame) {
        LOGE("avcodec_decode_video2");
        return true;
    } else {
        LOGE("decodec videco failed");
        return false;
    }
}


void CloseOutput(AVFormatContext *outputContext) {
    if (outputContext != nullptr) {
        for (int i = 0; i < outputContext->nb_streams; i++) {
            AVCodecContext *codecContext = outputContext->streams[i]->codec;
            avcodec_close(codecContext);
        }
        avformat_close_input(&outputContext);
    }
}

int EncodecVideo(AVFormatContext *pContext, AVFrame *pFrame, AVPacket *packet) {
    int got_packet;
    int ret = avcodec_encode_video2(outputCodec, packet, pDstFrame, &got_packet);
    if (ret >= 0 && got_packet) {
        LOGE(" encode frame success\n ");
        return 0;
    }
    LOGE(" encode frame failed 237\n ");
    return ret;
}

void CloseInput(AVFormatContext *formatContext) {
    if (formatContext != nullptr) {
        avformat_close_input(&formatContext);
    }
}

int av_rscal_q(AVRational src_tb, AVRational dst_tb, AVPacket *pkt) {
    if (pkt->pts != AV_NOPTS_VALUE)

        pkt->pts = av_rescale_q_rnd(pkt->pts, src_tb, dst_tb, AV_ROUND_NEAR_INF);
    LOGE(" encode frame pts\n ");
    if (pkt->dts != AV_NOPTS_VALUE)
        pkt->dts = av_rescale_q_rnd(pkt->dts, src_tb, dst_tb, AV_ROUND_NEAR_INF);
    LOGE(" encode frame dts\n ");

    if (pkt->duration > 0)
        pkt->duration = av_rescale_q(pkt->duration, src_tb, dst_tb);
    LOGE(" encode frame duration\n ");
    pkt->pos = -1;
    return 1;

}
/* Add an output stream. */
static void add_stream(OutputStream *ost, AVFormatContext *oc,
                       const AVCodec **codec,
                       enum AVCodecID codec_id) {
    AVCodecContext *c;
    int i;

    /* find the encoder */
    *codec = avcodec_find_encoder(codec_id);
    LOGE("avcodec_find_encoder");

    if (!(*codec)) {
        fprintf(stderr, "Could not find encoder for '%s'\n",
                avcodec_get_name(codec_id));
        LOGE("Could not find encoder for '%s'\n",
             avcodec_get_name(codec_id));
        exit(1);
    }

    ost->st = avformat_new_stream(oc, NULL);
    LOGE("avformat_new_stream");

    if (!ost->st) {
        fprintf(stderr, "Could not allocate stream\n");
        exit(1);
    }
    ost->st->id = oc->nb_streams - 1;
    c = avcodec_alloc_context3(*codec);
    LOGE("avcodec_alloc_context3");

    if (!c) {
        fprintf(stderr, "Could not alloc an encoding context\n");
        exit(1);
    }
    ost->enc = c;

    switch ((*codec)->type) {
        case AVMEDIA_TYPE_AUDIO:
            c->sample_fmt = *(*codec)->sample_fmts;
            c->bit_rate = 64000;
            c->sample_rate = 44100;
            if ((*codec)->supported_samplerates) {
                c->sample_rate = (*codec)->supported_samplerates[0];
                for (i = 0; (*codec)->supported_samplerates[i]; i++) {
                    if ((*codec)->supported_samplerates[i] == 44100)
                        c->sample_rate = 44100;
                }
            }
            c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
            c->channel_layout = AV_CH_LAYOUT_STEREO;
            if ((*codec)->channel_layouts) {
                c->channel_layout = (*codec)->channel_layouts[0];
                for (i = 0; (*codec)->channel_layouts[i]; i++) {
                    if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
                        c->channel_layout = AV_CH_LAYOUT_STEREO;
                }
            }
            c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
            ost->st->time_base = (AVRational) {1, c->sample_rate};
            break;

        case AVMEDIA_TYPE_VIDEO:
            c->codec_id = codec_id;
            c->bit_rate = 400000;
            /* Resolution must be a multiple of two. */
            c->width = 352;
            c->height = 288;
            /* timebase: This is the fundamental unit of time (in seconds) in terms
             * of which frame timestamps are represented. For fixed-fps content,
             * timebase should be 1/framerate and timestamp increments should be
             * identical to 1. */
            ost->st->time_base = (AVRational) {1, STREAM_FRAME_RATE};
            c->time_base = ost->st->time_base;

            c->gop_size = 12; /* emit one intra frame every twelve frames at most */
            c->pix_fmt = STREAM_PIX_FMT;
            if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
                /* just for testing, we also add B-frames */
                c->max_b_frames = 2;
            }
            if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
                /* Needed to avoid using macroblocks in which some coeffs overflow.
                 * This does not happen with normal video, it just happens here as
                 * the motion of the chroma plane does not match the luma plane. */
                c->mb_decision = 2;
            }
            break;

        default:
            break;
    }

    /* Some formats want stream headers to be separate. */
    if (oc->oformat->flags & AVFMT_GLOBALHEADER)
        c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
JNIEXPORT jint JNICALL Java_com_zagj_videocomparess_utils_MediaMuxer_muxer
        (JNIEnv *env, jobject obj, jstring videopath, jstring audioPath, jstring outputPath) {
    OutputStream video_st = {0}, audio_st = {0};
    char video_str[512] = {0};
    char audio_str[512] = {0};
    char output_str[512] = {0};
    sprintf(video_str, "%s", env->GetStringUTFChars(videopath, NULL));
    sprintf(audio_str, "%s", env->GetStringUTFChars(audioPath, NULL));
    sprintf(output_str, "%s", env->GetStringUTFChars(outputPath, NULL));
    LOGE("video_str: %s --audio_str: %s --output_str: %s ", video_str, audio_str, output_str);
    av_register_all();
    avformat_network_init();
    avcodec_register_all();
    avdevice_register_all();

    OpenInput(video_str, 0);
    OpenInput(audio_str, 1);
    InitVideoDecodeCodec(0);
    InitAudioDecodeCodec(1);
    avformat_alloc_output_context2(&outputCtx, NULL, NULL, output_str);
    LOGE("avformat_alloc_output_context2");

    if (!outputCtx) {
        LOGE("Could not deduce output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&outputCtx, NULL, "mpeg", output_str);
    }
    if (!outputCtx)
        return 1;

    fmt = outputCtx->oformat;

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    if (fmt->video_codec != AV_CODEC_ID_NONE) {
        add_stream(&video_st, outputCtx,& decodeContext[0]->codec, fmt->video_codec);
        have_video = 1;
        encode_video = 1;
        LOGE("video_codec\n");
    }
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        add_stream(&audio_st, outputCtx, &decodeContext[1]->codec, fmt->audio_codec);
        // add_stream(&audio_st, oc, &audio_codec, AV_CODEC_ID_AAC);
        have_audio = 1;
        encode_audio = 1;
        LOGE("audio_codec\n");

    }


    OpenOutput(output_str);
    AVPacket *packet;
    av_init_packet(packet);


    return 0;
}