#include <jni.h>

#include <android/log.h>
#include <time.h>
#include "libyuv.h"


#define TAG "video_native"
#define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG,TAG,__VA_ARGS__);
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,TAG,__VA_ARGS__);
#define LOGW(...) __android_log_print(ANDROID_LOG_WARN,TAG,__VA_ARGS__);
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,TAG,__VA_ARGS__);
#define ALOG(level, ...) __android_log_print(level,TAG,__VA_ARGS__);

extern "C" {
#include "libswscale/swscale.h"
#include "libavutil/imgutils.h"
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libswresample/swresample.h"
#include "libavcodec/jni.h"
#include "libavcodec/mediacodec.h"
#include "libavcodec/bsf.h"
#include "libavutil/opt.h"
}

static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl) {
    int ffplv = ANDROID_LOG_VERBOSE;
    if (level <= AV_LOG_ERROR)
        ffplv = ANDROID_LOG_ERROR;
    else if (level <= AV_LOG_WARNING)
        ffplv = ANDROID_LOG_WARN;
    else if (level <= AV_LOG_INFO)
        ffplv = ANDROID_LOG_INFO;
    else if (level <= AV_LOG_VERBOSE)
        ffplv = ANDROID_LOG_VERBOSE;
    else
        ffplv = ANDROID_LOG_DEBUG;


    va_list vl2;
    char line[1024];
    static int print_prefix = 1;

    va_copy(vl2, vl);
    av_log_format_line(ptr, level, fmt, vl2, line, sizeof(line), &print_prefix);
    va_end(vl2);
    ALOG(ffplv, "%s", line);
}

extern "C"
JNIEXPORT void JNICALL
Java_com_zgkxzx_vediotest_nativeapi_NativeApi_hello(JNIEnv *env, jobject thiz) {
    LOGD("Hello World，ffmpeg for Android!")
    const char *info = av_version_info();
    LOGD("av_version_info info %s", info)

    av_log_set_callback(log_callback_null);
}

static void encode(AVCodecContext *pCodecCtx, AVFrame *pFrame, AVPacket *pPacket, FILE *p_output_f) {
    int ret;

    ret = avcodec_send_frame(pCodecCtx, pFrame);
    LOGW("encode avcodec_send_frame ret %d", ret)
    while (ret >= 0) {
        ret = avcodec_receive_packet(pCodecCtx, pPacket);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
            LOGW("encode ret failed ")
            return;
        }

        size_t i = fwrite(pPacket->data, 1, pPacket->size, p_output_f);
        LOGW("encode fwrite %d", i);
        av_packet_unref(pPacket);
    }
}

static void encodePak(AVCodecContext *pCodecCtx, AVFrame *pFrame, AVFormatContext *outFormentContext) {
    int sendResult;
    int receiveResult;
    do {
        sendResult = -1;
        receiveResult = -1;
        AVPacket *receivePacket = av_packet_alloc();
        sendResult = avcodec_send_frame(pCodecCtx, pFrame);
        if (sendResult == 0 || sendResult == AVERROR(EAGAIN) || sendResult == AVERROR_EOF) {
            receiveResult = avcodec_receive_packet(pCodecCtx, receivePacket);
            if (receiveResult == 0) {
                int i = av_write_frame(outFormentContext, receivePacket);
                LOGW("encode av_write_frame %d", i)
            }
        }
        LOGI("encode avcodec_receive_frame sendResult %d  receiveResult %d ,compare %d", sendResult, receiveResult,AVERROR(EAGAIN))
        av_packet_unref(receivePacket);
    } while ((sendResult == AVERROR(EAGAIN)||sendResult == 0) && receiveResult == AVERROR(EAGAIN));
}

extern "C"
JNIEXPORT jint
JNICALL
Java_com_zgkxzx_vediotest_nativeapi_NativeApi_convertH264(JNIEnv *env, jobject thiz, jstring srcPath, jstring dstPath) {

    const char *src_path = env->GetStringUTFChars(srcPath, 0);
    const char *dst_path = env->GetStringUTFChars(dstPath, 0);
    LOGD("NativeApi_flip src_path is %s and dst_path is %s ", src_path, dst_path)

    AVFormatContext *avDecodeFormatContext = avformat_alloc_context();
    //open input stream
    if (avformat_open_input(&avDecodeFormatContext, src_path, NULL, NULL) != 0) {
        LOGW("avformat open input error");
        return -1;
    }

    //find stream info
    if (avformat_find_stream_info(avDecodeFormatContext, NULL) < 0) {
        LOGW("avformat_find_stream_info error");
        return -1;
    }

    int video_index = -1;
    //get video codecParams and codec and frame rate
    for (int i = 0; i < avDecodeFormatContext->nb_streams; i++) {
        AVStream *pStream = avDecodeFormatContext->streams[i];
        if (pStream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            video_index = i;
            break;
        }
    }

    if (video_index == -1) {
        LOGW("not find video stream.");
        return -1;
    }

    AVStream *pAvStream = avDecodeFormatContext->streams[video_index];

    const AVCodec *pDeCodec = avcodec_find_decoder(pAvStream->codecpar->codec_id);
    LOGI("avcodec_find_decoder codec_name %s", pDeCodec->name);

    AVCodecContext *pDeCodecContext = avcodec_alloc_context3(pDeCodec);

    avcodec_parameters_to_context(pDeCodecContext, pAvStream->codecpar);
    float frameRate = pAvStream->avg_frame_rate.num / pAvStream->avg_frame_rate.den;

    if (!pDeCodecContext) {
        LOGW("could not find video codecCtx.");
        return -1;
    }

    if (avcodec_open2(pDeCodecContext, pDeCodec, NULL)) {
        LOGW("cavcodec_open2 error.");
        return -1;
    }

    LOGI("avcodec_open2 success.");

    char info[1000] = {0};
    sprintf(info, "%s[Format    ]%s\n", info, avDecodeFormatContext->iformat->name);
    sprintf(info, "%s[Codec     ]%s\n", info, pDeCodecContext->codec->name);
    sprintf(info, "%s[Resolution]%dx%d\n", info, pDeCodecContext->width, pDeCodecContext->height);
    LOGD("Decode Info:\n %s", info);
    ////////////----------------------------------------------------
    //下面是编码部分

    int ret;
    int width = pDeCodecContext->width;
    int height = pDeCodecContext->height;
    int fps = 25;
    AVCodecID codecId = AV_CODEC_ID_H264;

    AVFormatContext *outFormatContext = avformat_alloc_context();
    ret = avformat_alloc_output_context2(&outFormatContext, NULL, NULL, dst_path);
    if (ret != 0) {
        LOGW("avformat_alloc_output_context2 fail,%d,%s", ret, av_err2str(ret));
        return -1;
    }

    //libx264
    //h264_mediacodec
    //这里转码，可以使用h264编码
    //const AVCodec *encoderCodec = avcodec_find_encoder_by_name("libx264");
    const AVCodec *encoderCodec = avcodec_find_encoder(codecId);//pDecodeParameters->codec_id
    LOGI("avcodec_find_encoder_by_name codec_name=%s", encoderCodec->name);
    if (!encoderCodec) {
        LOGW("avcodec_find_encoder_by_name error, codec_name=%s", encoderCodec->name);
        return -1;
    }

    LOGD("start avformat_new_stream");
    AVStream *outStream = avformat_new_stream(outFormatContext, encoderCodec);
    if (!outStream) {
        LOGW("avformat_new_stream fail");
        return -1;
    }

    AVCodecParameters *pDecodeParameters = pAvStream->codecpar;

    if (avcodec_parameters_copy(outStream->codecpar, pDecodeParameters) < 0) {
        LOGW("avcodec_parameters_copy fail");
        return -1;
    }

    outStream->codecpar->codec_id = codecId;
    //outStream->codecpar->codec_tag = 0;
    outStream->avg_frame_rate = {fps, 1};
    outStream->time_base = (AVRational) {1, fps};
    outStream->start_time = 0;
    outStream->codecpar->width = width;
    outStream->codecpar->height = height;
    outStream->side_data = pAvStream->side_data;
    outStream->nb_side_data = pAvStream->nb_side_data;
    av_dict_copy(&outStream->metadata, pAvStream->metadata, AV_DICT_MATCH_CASE);


    LOGD("start avcodec_find_encoder");

    AVCodecContext *encoderContext = avcodec_alloc_context3(encoderCodec);
    if (!encoderContext) {
        LOGW("avcodec_alloc_context3 error, pDeCodecCtx is NULL");
        return -1;
    }

    avcodec_parameters_to_context(encoderContext, pDecodeParameters);

    //set AVCodecContext parameters
    encoderContext->codec_type = AVMEDIA_TYPE_VIDEO;
    encoderContext->codec_id = codecId;
    encoderContext->bit_rate = pDecodeParameters->bit_rate;
    encoderContext->width = width;
    encoderContext->height = height;
    encoderContext->time_base = (AVRational) {1, fps};
    encoderContext->framerate = pDeCodecContext->framerate;
    encoderContext->gop_size = pDeCodecContext->gop_size;
    encoderContext->max_b_frames = pDeCodecContext->max_b_frames;
    encoderContext->pix_fmt = AV_PIX_FMT_YUV420P;

//    if (encoderCodec->id == AV_CODEC_ID_H264)
//        av_opt_set(encoderContext->priv_data, "preset", "slow", 0);

    LOGW("width and height is  %d %d", width, height);
    //open codec
    int openRet = avcodec_open2(encoderContext, encoderCodec, NULL);
    if (openRet < 0) {
        LOGW("avcodec_open2 error %d %s", openRet, av_err2str(openRet));
        return -1;
    }

    ret = avcodec_parameters_from_context(outStream->codecpar, encoderContext);
    if (ret < 0) {
        LOGW("avcodec_parameters_from_context error %d %s", ret, av_err2str(ret));
        return -1;
    }

    ret = avio_open(&outFormatContext->pb, dst_path, AVIO_FLAG_WRITE);
    if (ret < 0) {
        LOGW("avio_open  error %d %s", ret, av_err2str(ret));
        return -1;
    }
    LOGI("avio_open success.");

    ret = avformat_write_header(outFormatContext, NULL);
    if (ret < 0) {
        LOGW("avformat_write_header error %d %s", ret, av_err2str(ret))
        return -1;
    }

    LOGI("avformat_write_header success.");

    AVPacket *packet = av_packet_alloc();
    AVFrame *pFrame = av_frame_alloc();

    LOGI("sws_getContext before src width %d ,height %d ,pix_fmt %d",pDeCodecContext->width,pDeCodecContext->height,pDeCodecContext->pix_fmt);
    LOGI("sws_getContext before dst width %d ,height %d ,pix_fmt %d",width,height,AV_PIX_FMT_YUV420P);
    SwsContext *img_convert_ctx = sws_getContext(pDeCodecContext->width, pDeCodecContext->height,
                                                 pDeCodecContext->pix_fmt,
                                                                   width, height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);;

    while (av_read_frame(avDecodeFormatContext, packet) >= 0) {
        if (packet->stream_index == video_index) {
            AVPacket *pPacket = av_packet_alloc();
            ret = avcodec_send_packet(pDeCodecContext, packet);
            if (ret < 0) {
                LOGW("avcodec_send_packet error %d %s", ret, av_err2str(ret))
                return -1;
            }
            ret = avcodec_receive_frame(pDeCodecContext, pFrame);
            if (ret != 0) {
                continue;
            }

            AVFrame *YUVFrame = av_frame_alloc();
            unsigned int yuv_size = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, width, height, 1);
            auto *yuv_buffer = static_cast<uint8_t *>(av_malloc(yuv_size * sizeof(uint8_t)));
            av_image_fill_arrays(YUVFrame->data, YUVFrame->linesize, yuv_buffer, AV_PIX_FMT_YUV420P,
                                 width, height, 1);

            YUVFrame->width = width;
            YUVFrame->height = height;
            YUVFrame->format = AV_PIX_FMT_YUV420P;

            int i = av_frame_get_buffer(YUVFrame, 0);
            LOGW("av_frame_get_buffer %d", i)
            int rgba_stride = ((0x01001040 & 0xF0) >> 4) * width;

//            ret = sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pDeCodecContext->height, YUVFrame->data,
//                            YUVFrame->linesize);

            ret = libyuv::ARGBToI420(pFrame->data[0], pFrame->linesize[0], YUVFrame->data[0], YUVFrame->linesize[0],
                                     YUVFrame->data[1], YUVFrame->linesize[1], YUVFrame->data[2], YUVFrame->linesize[2],
                                     width, height);
            LOGW("libyuv::BGRAToI420 result %d %s", ret, av_err2str(ret))
            if (ret == 0) {
                encodePak(encoderContext, YUVFrame, outFormatContext);
            }

        }
    }

    while (true) {
        AVPacket *pPacket = av_packet_alloc();
        if (packet->stream_index == video_index) {
            ret = avcodec_send_packet(pDeCodecContext, packet);
            if (ret < 0) {
                break;
            }
            ret = avcodec_receive_frame(pDeCodecContext, pFrame);
            if (ret != 0) {
                continue;
            }


            AVFrame *YUVFrame = av_frame_alloc();
            unsigned int yuv_size = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, width, height, 1);
            auto *yuv_buffer = static_cast<uint8_t *>(av_malloc(yuv_size * sizeof(uint8_t)));
            av_image_fill_arrays(YUVFrame->data, YUVFrame->linesize, yuv_buffer, AV_PIX_FMT_YUV420P,
                                 width, height, 1);

            YUVFrame->width = width;
            YUVFrame->height = height;
            YUVFrame->format = AV_PIX_FMT_YUV420P;

            int i = av_frame_get_buffer(YUVFrame, 0);
            LOGW("av_frame_get_buffer %d", i)

            int rgba_stride = ((0x01001040 & 0xF0) >> 4) * width;
//            ret = sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pDeCodecContext->height, YUVFrame->data,
//                            YUVFrame->linesize);

            ret = libyuv::ARGBToI420(pFrame->data[0], pFrame->linesize[0], YUVFrame->data[0], YUVFrame->linesize[0],
                                     YUVFrame->data[1], YUVFrame->linesize[1], YUVFrame->data[2], YUVFrame->linesize[2],
                                     width, height);
            LOGW("libyuv::BGRAToI420 result %d %s", ret, av_err2str(ret))
            if (ret == 0) {
                encodePak(encoderContext, YUVFrame, outFormatContext);
            }
        }
    }

    av_write_trailer(outFormatContext);
    avio_close(outFormatContext
                       ->pb);
    return 0;
}

extern "C"
JNIEXPORT jint
JNICALL Java_com_zgkxzx_vediotest_nativeapi_NativeApi_cut(JNIEnv *env, jobject thiz, jstring srcPath, jstring dstPath,
                                                          jint startTime, jint endTime) {
    return 0;
}


//codec_name="libx264"
int encode_yuv_to_h264(const char *output_filePath) {
    AVCodecContext *pCodecCtx = NULL;
    const AVCodec *pCodec = NULL;
    AVPacket *pPacket = NULL;
    AVFrame *pFrame = NULL;
    char codec_name[] = "libx264";
    unsigned char endcode[] = {0x00, 0x00, 0x01, 0x7b};
    FILE *p_output_f = NULL;
    int i, x, y;
    int ret = 0;

    //pCodec = avcodec_find_encoder_by_name(codec_name);
    pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
    if (!pCodec) {
        LOGW("avcodec_find_encoder_by_name error, codec_name=%s", codec_name);
        ret = -1;
        goto end;
    }
    pCodecCtx = avcodec_alloc_context3(pCodec);
    if (!pCodecCtx) {
        LOGW("avcodec_alloc_context3 error, pCodecCtx is NULL");
        ret = -1;
        goto end;
    }
    pPacket = av_packet_alloc();
    pFrame = av_frame_alloc();

    //set AVCodecContext parameters
    pCodecCtx->bit_rate = 400000;
    pCodecCtx->width = 352;
    pCodecCtx->height = 288;
    pCodecCtx->time_base = {1, 25};
    pCodecCtx->framerate = {25, 1};
    /* emit one intra frame every ten frames
     * check frame pict_type before passing frame
     * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
     * then gop_size is ignored and the output of encoder
     * will always be I frame irrespective to gop_size
     */
    pCodecCtx->gop_size = 10;
    pCodecCtx->max_b_frames = 1;
    pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
    if (pCodec->id == AV_CODEC_ID_H264)
        av_opt_set(pCodecCtx->priv_data, "preset", "slow", 0);

    //open codec
    if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
        ret = -1;
        LOGW("avcodec_open2 error");
        goto end;
    }

    pFrame->format = pCodecCtx->pix_fmt;
    pFrame->width = pCodecCtx->width;
    pFrame->height = pCodecCtx->height;
    //Allocate new buffer(s) for audio or video data.
    if (av_frame_get_buffer(pFrame, 32) < 0) {
        LOGW("av_frame_get_buffer error");
        ret = -1;
        goto end;
    }

    //open output_file
    //fp_yuv= fopen(dst_path,"wb+");
    p_output_f = fopen(output_filePath, "wb");
    //fopen(&p_output_f, output_filePath, "wb");
    if (!p_output_f) {
        ret = -1;
        goto end;
    }

    //encode 5 seconds of video
    for (i = 0; i < 25 * 5; i++) {
        fflush(stdout);

        //make sure the frame data is writeable
        if (av_frame_is_writable(pFrame) < 0) {
            ret = -1;
            goto end;
        }

        //Y
        for (y = 0; y < pCodecCtx->height; y++) {
            for (x = 0; x < pCodecCtx->width; x++) {
                pFrame->data[0][y * pFrame->linesize[0] + x] = x + y + i * 3;
            }
        }
        //Y and V
        for (y = 0; y < pCodecCtx->height / 2; y++) {
            for (x = 0; x < pCodecCtx->width / 2; x++) {
                pFrame->data[1][y * pFrame->linesize[1] + x] = 128 + y + i * 2;
                pFrame->data[2][y * pFrame->linesize[2] + x] = 64 + x + i * 5;
            }
        }

        pFrame->pts = i;

        //encode this img
        encode(pCodecCtx, pFrame, pPacket, p_output_f);
    }

    //flush the encoder
    encode(pCodecCtx, NULL, pPacket, p_output_f);

    //add sequence end code to have a real MPEG file
    fwrite(endcode, 1, sizeof(endcode), p_output_f);

    fclose(p_output_f);

    end:
    if (pCodecCtx)
        avcodec_free_context(&pCodecCtx);
    if (pPacket)
        av_packet_free(&pPacket);
    if (pFrame)
        av_frame_free(&pFrame);
    printf("=============== encode_yuv_to_h264 done ===============\n");
    return ret;
}

extern "C"
JNIEXPORT jint
JNICALL
Java_com_zgkxzx_vediotest_nativeapi_NativeApi_clip(JNIEnv *env, jobject thiz, jstring srcPath, jstring dstPath,
                                                   jint dstWidth, jint dstHeight) {
    const char *src_path = env->GetStringUTFChars(srcPath, 0);
    const char *dst_path = env->GetStringUTFChars(dstPath, 0);
    LOGD("NativeApi_flip src_path is %s and dst_path is %s and dstWidth %d dstHeight %d", src_path,
         dst_path, dstWidth, dstHeight)
    AVFormatContext *avFormatContext = avformat_alloc_context();
    //open input stream
    if (avformat_open_input(&avFormatContext, src_path, NULL, NULL) != 0) {
        LOGW("avformat open input error");
        return -1;
    }

    //find stream info
    if (avformat_find_stream_info(avFormatContext, NULL) < 0) {
        LOGW("avformat_find_stream_info error");
        return -1;
    }

    int video_index = -1;
    //get video codecParams and codec and frame rate
    for (int i = 0; i < avFormatContext->nb_streams; i++) {
        AVStream *pStream = avFormatContext->streams[i];
        if (pStream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            video_index = i;
            break;
        }
    }

    if (video_index == -1) {
        LOGW("not find video stream.");
        return -1;
    }

    AVStream *pAvStream = avFormatContext->streams[video_index];
    AVMediaType type = pAvStream->codecpar->codec_type;
    //LOGI("Find video stream type %d",type);
    const AVCodec *pCodec = avcodec_find_decoder(
            pAvStream->codecpar->codec_id);

    AVCodecContext *pCodecContext = avcodec_alloc_context3(pCodec);

    avcodec_parameters_to_context(pCodecContext, pAvStream
            ->codecpar);
    float frameRate = pAvStream->avg_frame_rate.num / pAvStream->avg_frame_rate.den;

    if (!pCodecContext) {
        LOGW("could not find video codecCtx.");
        return -1;
    }

    //open codec
    if (avcodec_open2(pCodecContext, pCodec, NULL)) {
        LOGW("cavcodec_open2 error.");
        return -1;
    }

    SwsContext *swsContext = sws_getContext(pCodecContext->width,
                                            pCodecContext->height, pCodecContext->pix_fmt,
                                            pCodecContext->width, pCodecContext->height,
                                            AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

    AVPacket *avPacket = NULL;
    AVFrame *pFrame = NULL, *pYUVFrame = NULL;

    avPacket = av_packet_alloc();
    pFrame = av_frame_alloc();
    pYUVFrame = av_frame_alloc();

    int y_size;
    int ret;
    FILE *fp_yuv;
    int frame_cnt = 0;

    fp_yuv = fopen(dst_path, "wb+");
    if (fp_yuv == NULL) {
        LOGW("Cannot open output file.");
        return -1;
    }

    uint8_t *out_buffer = (unsigned char *) av_malloc(
            av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecContext->width,
                                     pCodecContext->height, 1));
//av_image_fill_arrays() 把AVFrame的data成员关联到某个地址空间
//此处是为后面av_read_frame()和sws_scale()处理后的帧信息输出提供存储位置
    av_image_fill_arrays(pYUVFrame
                                 ->data, pYUVFrame->linesize, out_buffer,
                         AV_PIX_FMT_YUV420P, pCodecContext->width, pCodecContext->height, 1);

    while (
            av_read_frame(avFormatContext, avPacket
            ) >= 0) {
        if (avPacket->stream_index == video_index) {
            int send_ret = avcodec_send_packet(pCodecContext, avPacket);
            \
            if (send_ret < 0) {
                LOGW("send_ret %d", send_ret);
                break;
            }
            int receive_ret = 0;

            receive_ret = avcodec_receive_frame(pCodecContext, pFrame);
            LOGD("receive_ret %d", receive_ret);
            if (receive_ret != 0) {
                continue;
            }

            sws_scale(swsContext,
                      (const unsigned char *const *) pFrame->data,
                      pFrame->linesize, 0, pCodecContext->height, pYUVFrame->data,
                      pYUVFrame->linesize);
            fwrite(pYUVFrame
                           ->data[0], 1, y_size, fp_yuv);      // Y
            fwrite(pYUVFrame
                           ->data[1], 1, y_size / 4, fp_yuv);  // U
            fwrite(pYUVFrame
                           ->data[2], 1, y_size / 4, fp_yuv);  // V

            char pic_type_str[10] = {0};
            switch (pFrame->pict_type) {
                case AV_PICTURE_TYPE_I:
                    strcpy(pic_type_str,
                           "I");
                    break;
                case AV_PICTURE_TYPE_P:
                    strcpy(pic_type_str,
                           "P");
                    break;
                case AV_PICTURE_TYPE_B:
                    strcpy(pic_type_str,
                           "B");
                    break;
                default:
                    strcpy(pic_type_str,
                           "Other");
                    break;

            }
            LOGD("Frame Index:%5d.Type:%s", frame_cnt, pic_type_str);
            frame_cnt++;
        }
    }

//flush decoder
    while (true) {
        ret = avcodec_send_packet(pCodecContext, avPacket);
        if (ret < 0) {
            break;
        }
        ret = avcodec_receive_frame(pCodecContext, pFrame);
        if (ret != 0) {
            continue;
        }
        sws_scale(swsContext, pFrame
                          ->data, pFrame->linesize, 0, pCodecContext->height,
                  pYUVFrame->data, pYUVFrame->linesize);
        y_size = pCodecContext->width * pCodecContext->height;
        fwrite(pYUVFrame
                       ->data[0], 1, y_size, fp_yuv);      // Y
        fwrite(pYUVFrame
                       ->data[1], 1, y_size / 4, fp_yuv);  // U
        fwrite(pYUVFrame
                       ->data[2], 1, y_size / 4, fp_yuv);  // V

//Output info
        char pictype_str[10] = {0};
        switch (pFrame->pict_type) {
            case AV_PICTURE_TYPE_I:
                strcpy(pictype_str,
                       "I");
                break;
            case AV_PICTURE_TYPE_P:
                strcpy(pictype_str,
                       "P");
                break;
            case AV_PICTURE_TYPE_B:
                strcpy(pictype_str,
                       "B");
                break;
            default:
                strcpy(pictype_str,
                       "Other");
        }
        LOGD("Frame Index: %5d.  Type:%s", frame_cnt, pictype_str);
        frame_cnt++;
    }

    LOGD("start encode AVFrame.");

//    SwsContext *swsContext = sws_getContext(pCodecContext->width,
//                                          pCodecContext->height, pCodecContext->pix_fmt,
//                                          pCodecContext->width, pCodecContext->height,
//                                          AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

//    av_dump_format(avFormatContext,video_index,src_path,0);
//TODO
//注意 目前做功能测试没有考虑释放情况，后续不上 目前demo验证api功能
    return 0;

}
extern "C"
JNIEXPORT jint
JNICALL
Java_com_zgkxzx_vediotest_nativeapi_NativeApi_clip2(JNIEnv *env, jobject thiz, jstring srcPath, jstring dstPath,
                                                    jint dstWidth, jint dstHeight) {
    const char *src_path = env->GetStringUTFChars(
            srcPath, 0);
    const char *dst_path = env->GetStringUTFChars(dstPath, 0);
    LOGD("NativeApi_flip src_path is %s and dst_path is %s and dstWidth %d dstHeight %d", src_path,
         dst_path, dstWidth, dstHeight)

//    char path[200];
//    sprintf(path,
//            "%s_%s\n", dst_path, ".yuv");
//    encode_yuv_to_h264(path);

    const AVCodec *kAv_Codec_ = avcodec_find_encoder(AV_CODEC_ID_H264);
    if (!kAv_Codec_) {
        LOGW("Codec %s not found", AV_CODEC_ID_H264);
        return -1;
    }

    AVCodecContext *codec_context_ = avcodec_alloc_context3(kAv_Codec_);
    if (!codec_context_) {
        LOGW("Could not allocate video codec context");
        return -1;
    }

    AVPacket *pPacket = av_packet_alloc();

/* put sample parameters */
    codec_context_->
            bit_rate = 1000000;
/* resolution must be a multiple of two */
    codec_context_->
            width = 640;
    codec_context_->
            height = 480;
/* frames per second */
    codec_context_->
            time_base = (AVRational) {1, 25};
    codec_context_->
            framerate = (AVRational) {25, 1};
    codec_context_->
            gop_size = 10;
    codec_context_->
            max_b_frames = 1;
    codec_context_->
            pix_fmt = AV_PIX_FMT_YUV420P;
    if (kAv_Codec_->id == AV_CODEC_ID_H264) {
        av_opt_set(codec_context_
                           ->priv_data, "preset", "superfast", 0);
        //设置0延迟，实时编码h264视频
        av_opt_set(codec_context_
                           ->priv_data, "tune", "zerolatency", 0);
    }
    /* open it */
    int ret = avcodec_open2(codec_context_, kAv_Codec_, NULL);
    if (ret < 0) {
        LOGW("CCould not open codec %d", ret);
        return -1;
    }

    LOGD("encode_yuv_to_h264 finish.");


    return 0;
}


extern "C"
JNIEXPORT jint
JNICALL
Java_com_zgkxzx_vediotest_nativeapi_NativeApi_clip3(JNIEnv *env, jobject thiz, jstring src_path, jstring dst_path,
                                                    jint dst_width, jint dst_height) {
//定义相关结构体变量
    AVFormatContext *pDecodeFormatCtx;
    int videoIndex;
    AVCodecContext *pDeCodecCtx;
    AVCodecParameters *pDeCodecpar;
    const AVCodec *pDeCodec;
    AVFrame *pFrame, *pFrameYUV;
    AVPacket *packet;

    int ret;
    struct SwsContext *img_convert_ctx;
    FILE *fp_yuv;
    int frame_cnt;
    clock_t time_start, time_finish;
    double time_duration = 0.0;

    char input_str[500] = {0};
    char output_str[500] = {0};
    char info[1000] = {0};
//接收传入的视频路径和待输出的yuv文件路径
    sprintf(input_str, "%s", env->GetStringUTFChars(src_path, NULL));
    sprintf(output_str, "%s", env->GetStringUTFChars(dst_path, NULL));
    LOGD("Info:\n%s -> %s", input_str, output_str);

    pDecodeFormatCtx = avformat_alloc_context(); //创建AVFormatContext结构体。

//avformat_open_input()为AVFormatContext分配内存，
//探测视频文件的封装格式并将视频源加入内部buffer中，最后读取视频头信息
    if (avformat_open_input(&pDecodeFormatCtx, input_str, NULL, NULL) != 0) {
        LOGE("Couldn't open input stream.\n");
        return -1;
    }

    if (avformat_find_stream_info(pDecodeFormatCtx, NULL) < 0) {
        LOGE("Couldn't find stream information.\n");
        return -1;
    }

    videoIndex = av_find_best_stream(pDecodeFormatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
    if (videoIndex < 0) {
        LOGE("Couldn't find a video stream.\n");
        return -1;
    }

    AVStream *pStream = pDecodeFormatCtx->streams[videoIndex];
    pDeCodecpar = pStream->codecpar;

    pDeCodec = avcodec_find_decoder(pDeCodecpar->codec_id); //寻找合适的解码器
    if (pDeCodec == NULL) {
        LOGE("Couldn't find Codec, codec is NULL.\n");
        return -1;
    }

    pDeCodecCtx = avcodec_alloc_context3(pDeCodec); //为AVCodecContext分配内存
    if (pDeCodecCtx == NULL) {
        LOGE("Couldn't allocate decoder context.\n");
        return -1;
    }

    //avcodec_parameters_to_context()真正对AVCodecContext执行了内容拷贝
    if (avcodec_parameters_to_context(pDeCodecCtx, pDeCodecpar) < 0) {
        LOGE("Couldn't copy decoder context.\n");
        return -1;
    }

    LOGD("start avcodec_open2 pDeCodecCtx");
    //avcodec_open2打开解码器
    if (avcodec_open2(pDeCodecCtx, pDeCodec, NULL) < 0) {
        LOGE("Couldn't open Codec.\n");
        return -1;
    }

    //TODO 测试使用
    int width, height;
    width = pDeCodecCtx->width / 2;
    height = pDeCodecCtx->height / 2;

    pFrame = av_frame_alloc();
    packet = (AVPacket *) av_malloc(sizeof(AVPacket));

    //    img_convert_ctx = sws_getContext(pDeCodecCtx->width, pDeCodecCtx->height, pDeCodecCtx->pix_fmt,
    //                                     width, height, AV_PIX_FMT_YUV420P,
    //                                     SWS_BICUBIC, NULL, NULL, NULL);

    sprintf(info, "%s[Output    ]%s\n", info, output_str);
    sprintf(info, "%s[Format    ]%s\n", info, pDecodeFormatCtx->iformat->name);
    sprintf(info, "%s[Codec     ]%s\n", info, pDeCodecCtx->codec->name);
    sprintf(info, "%s[Resolution]%dx%d\n", info, pDeCodecCtx->width, pDeCodecCtx->height);
    LOGD("Info2:\n %s", info);

    AVFormatContext *outFormatContext = avformat_alloc_context();
    ret = avformat_alloc_output_context2(&outFormatContext, NULL, NULL, output_str);
    if (ret != 0) {
        LOGW("avformat_alloc_output_context2 fail,%d,%s", ret, av_err2str(ret));
        return -1;
    }

    LOGD("start avformat_new_stream");
    AVStream *outStream = avformat_new_stream(outFormatContext, NULL);
    if (!outStream) {
        LOGW("avformat_new_stream fail");
        return -1;
    }

    if (avcodec_parameters_copy(outStream->codecpar, pDeCodecpar) < 0) {
        LOGW("avcodec_parameters_copy fail");
        return -1;
    }

    int cutTargetHeight = height;
    int fps = 25;

    outStream->codecpar->codec_tag = 0;
    outStream->codecpar->codec_id = pDeCodecpar->codec_id;
    outStream->avg_frame_rate = {fps, 1};
    outStream->time_base = (AVRational) {1, fps};
    outStream->start_time = 0;
    outStream->codecpar->width = width;
    outStream->codecpar->height = cutTargetHeight;
    outStream->side_data = pStream->side_data;
    outStream->nb_side_data = pStream->nb_side_data;
    av_dict_copy(&outStream->metadata, pStream->metadata, AV_DICT_MATCH_CASE);

    frame_cnt = 0;
    time_start = clock();
    AVPacket *pPacket = av_packet_alloc();

    LOGD("start avcodec_find_encoder");
    const AVCodec *encoderCodec = avcodec_find_encoder(pDeCodecpar->codec_id);
    LOGI("avcodec_find_encoder_by_name codec_name=%s", encoderCodec->name);
    if (!encoderCodec) {
        LOGW("avcodec_find_encoder_by_name error, codec_name=%s", encoderCodec->name);
        return -1;
    }

    AVCodecContext *encoderContext = avcodec_alloc_context3(encoderCodec);
    if (!encoderContext) {
        LOGW("avcodec_alloc_context3 error, pDeCodecCtx is NULL");
        return -1;
    }

    avcodec_parameters_to_context(encoderContext, pDeCodecpar);

//set AVCodecContext parameters
    encoderContext->codec_type = AVMEDIA_TYPE_VIDEO;
    encoderContext->bit_rate = pDeCodecCtx->bit_rate;
    encoderContext->codec_id = pDeCodecCtx->codec_id;
    encoderContext->width = width;
    encoderContext->height = cutTargetHeight;
    encoderContext->time_base = (AVRational) {1, fps};
    encoderContext->framerate = pDeCodecCtx->framerate;
    encoderContext->gop_size = pDeCodecCtx->gop_size;
    encoderContext->max_b_frames = pDeCodecCtx->max_b_frames;
    encoderContext->pix_fmt = AV_PIX_FMT_YUV420P;

    if (encoderCodec->id == AV_CODEC_ID_H264)
        av_opt_set(encoderContext->priv_data, "preset", "slow", 0);

    LOGW("width and height is  %d %d", width, cutTargetHeight);
//open codec
    int openRet = avcodec_open2(encoderContext, encoderCodec, NULL);
    if (openRet < 0) {
        LOGW("avcodec_open2 error %d %s", openRet, av_err2str(openRet));
        return -1;
    }

    ret = avcodec_parameters_from_context(outStream->codecpar, encoderContext);
    if (ret < 0) {
        LOGW("avcodec_parameters_from_context error %d %s", ret, av_err2str(ret));
        return -1;
    }

    ret = avio_open(&outFormatContext->pb, output_str, AVIO_FLAG_WRITE);
    if (ret < 0) {
        LOGW("avio_open  error %d %s", ret, av_err2str(ret));
        return -1;
    }

    ret = avformat_write_header(outFormatContext, NULL);
    if (ret < 0) {
        LOGW("avformat_write_header error %d %s", ret, av_err2str(ret));
        return -1;
    }

//通过重复调用av_read_frame()从打开的AVFormatContext中读取数据
//每次调用av_read_frame()成功时将返回一个AVPacket
//AVPacket中包含一个AVStream的编码数据
    while (av_read_frame(pDecodeFormatCtx, packet) >= 0) {
        if (packet->stream_index == videoIndex) {
            pPacket = av_packet_alloc();
            ret = avcodec_send_packet(pDeCodecCtx, packet);
            if (ret < 0) {
                LOGW("avcodec_send_packet Decode error.\n");
                return -1;
            }
            ret = avcodec_receive_frame(pDeCodecCtx, pFrame);
            if (ret != 0) {
// TODO 第一次调用avcodec_receive_frame()返回ret = -11，原因不明，先continue吧
                continue;
            }
// sws_scale() 主要工作是进行图像转换
            if (
                    sws_scale(img_convert_ctx, pFrame
                                      ->data, pFrame->linesize, 0, pDeCodecCtx->height,
                              pFrameYUV->data, pFrameYUV->linesize) > 0) {

                encode(encoderContext, pFrameYUV, pPacket, fp_yuv
                );

//Output info
                char pictype_str[10] = {0};
                switch (pFrame->pict_type) {
                    case AV_PICTURE_TYPE_I:
                        strcpy(pictype_str,
                               "I");
                        break;
                    case AV_PICTURE_TYPE_P:
                        strcpy(pictype_str,
                               "P");
                        break;
                    case AV_PICTURE_TYPE_B:
                        strcpy(pictype_str,
                               "B");
                        break;
                    default:
                        strcpy(pictype_str,
                               "Other");
                }
                LOGD("Frame Index: %5d.  Type:%s", frame_cnt, pictype_str);
                frame_cnt++;
            }
        }
        av_packet_unref(packet);
    }


//flush decoder
    while (true) {
        pPacket = av_packet_alloc();
        ret = avcodec_send_packet(pDeCodecCtx, packet);
        if (ret < 0) {
            break;
        }
        ret = avcodec_receive_frame(pDeCodecCtx, pFrame);
        if (ret != 0) {
            continue;
        }
        sws_scale(img_convert_ctx, pFrame
                          ->data, pFrame->linesize, 0, pDeCodecCtx->height,
                  pFrameYUV->data, pFrameYUV->linesize);
//        y_size = pDeCodecCtx->width * pDeCodecCtx->height;
//        fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);      // Y
//        fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);  // U
//        fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);  // V

        encode(encoderContext, pFrameYUV, pPacket, fp_yuv
        );

//Output info
        char pictype_str[10] = {0};
        switch (pFrame->pict_type) {
            case AV_PICTURE_TYPE_I:
                strcpy(pictype_str,
                       "I");
                break;
            case AV_PICTURE_TYPE_P:
                strcpy(pictype_str,
                       "P");
                break;
            case AV_PICTURE_TYPE_B:
                strcpy(pictype_str,
                       "B");
                break;
            default:
                strcpy(pictype_str,
                       "Other");
        }
        LOGD("Frame Index: %5d.  Type:%s", frame_cnt, pictype_str);
        frame_cnt++;
    }

//flush the encoderCodec
//encode(encoderContext, NULL, pPacket, fp_yuv);
//fwrite(endcode, 1, sizeof(endcode), fp_yuv);
//fclose(fp_yuv);
    av_write_trailer(outFormatContext);
    avio_close(outFormatContext
                       ->pb);

    time_finish = clock();
    time_duration = (double) (time_finish - time_start);

    sprintf(info,
            "%s[Time      ]%fms\n", info, time_duration);
    sprintf(info,
            "%s[Count     ]%d\n", info, frame_cnt);

    LOGD("Info:\n%s", info);


//与sws_getContext()配套使用
    sws_freeContext(img_convert_ctx);

    fclose(fp_yuv);

    av_frame_free(&pFrameYUV);
    av_frame_free(&pFrame);
    avcodec_close(pDeCodecCtx);
//与avformat_open_input()配套使用
    avformat_close_input(&pDecodeFormatCtx);

    return 0;
}