#include <jni.h>
#include <string>
#include <android/native_window_jni.h>
#include <unistd.h>
#include <android/log.h>

#define MAX_AUDIO_FRME_SIZE 4800*4
extern "C" {
#include "libavcodec/avcodec.h"
#include <libavutil/imgutils.h>
//封装格式
#include <libavformat/avformat.h>
//缩放
#include <libswscale/swscale.h>
//采样率
#include <libswresample/swresample.h>
}
/*extern "C" JNIEXPORT jstring JNICALL
Java_com_example_ffmpegsharedstudy_MainActivity_stringFromJNI(
        JNIEnv *env,
        jobject *//* this *//*) {
    std::string hello = "Hello from C++";
    return env->NewStringUTF(av_version_info());
}*/
extern "C"
JNIEXPORT void JNICALL
Java_com_example_ffmpegsharedstudy_WPlayer_native_1start(JNIEnv *env, jobject thiz, jstring path_,
                                                         jobject surface) {
    // TODO: implement native_start()
    ANativeWindow *nativeWindow = ANativeWindow_fromSurface(env, surface);
    const char *path = env->GetStringUTFChars(path_, 0);
    //初始化网络模块
    avformat_network_init();
    //总上下文
    AVFormatContext *formatContext = avformat_alloc_context();
    //
    AVDictionary *opts = NULL;
    av_dict_set(&opts, "timeout", "3000000", 0);
    int ret = avformat_open_input(&formatContext, path, NULL, &opts);
    if (ret) {
        return;
    }
//视频流
    int video_stream_idx = -1;
    avformat_find_stream_info(formatContext, NULL);
    for (int i = 0; i < formatContext->nb_streams; ++i) {
        if (formatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            video_stream_idx = i;
            break;
        }
    }


    //视频流索引
    AVCodecParameters *codecParameters = formatContext->streams[video_stream_idx]->codecpar;
    //解码 h264
    AVCodec *avCodec = avcodec_find_decoder(codecParameters->codec_id);

    //解码器的上下文 ffmpeg版本升级增加的1，2，3后缀
    AVCodecContext *codecContext = avcodec_alloc_context3(avCodec);
    //将解码器参数copy到解码器上下文
    avcodec_parameters_to_context(codecContext, codecParameters);

    avcodec_open2(codecContext, avCodec, NULL);

//解码 yuv数据
    AVPacket *packet = av_packet_alloc();
//从视频流中读取数据包
    //codecContext->pix_fmt;

    SwsContext *sws_context = sws_getContext(codecContext->width, codecContext->height,
                                             codecContext->pix_fmt,
                                             codecContext->width, codecContext->height,
                                             AV_PIX_FMT_RGBA, SWS_BILINEAR, 0, 0,
                                             0);

    ANativeWindow_setBuffersGeometry(nativeWindow, codecContext->width,
                                     codecContext->height,
                                     WINDOW_FORMAT_RGBA_8888);
    ANativeWindow_Buffer outBuffer;
    while (av_read_frame(formatContext, packet) >= 0) {
        avcodec_send_packet(codecContext, packet);
        AVFrame *frame = av_frame_alloc();
        ret = avcodec_receive_frame(codecContext, frame);
        if (ret == AVERROR(EAGAIN)) {
            continue;
        } else if (ret < 0) {
            break;
        }

        //接受的格式
        uint8_t *dst_data[4];
        int dst_linesize[4];

        av_image_alloc(dst_data, dst_linesize, codecContext->width, codecContext->height,
                       AV_PIX_FMT_RGBA, 1);

        sws_scale(sws_context, frame->data, frame->linesize, 0, frame->height, dst_data,
                  dst_linesize);

        ANativeWindow_lock(nativeWindow, &outBuffer, NULL);

        //渲染
        uint8_t *firstWindow = static_cast<uint8_t *>(outBuffer.bits);

        //输入源<rgb> 的
        uint8_t *src_data = dst_data[0];
        //拿到一行有多少个字节RGBA
        int destStride = outBuffer.stride * 4;
        int src_linesize = dst_linesize[0];
        for (int i = 0; i < outBuffer.height; ++i) {
            memcpy(firstWindow + i * destStride, src_data + i * src_linesize, destStride);
        }

        ANativeWindow_unlockAndPost(nativeWindow);
        usleep(1000 * 16);
        av_frame_free(&frame);
    }
    env->ReleaseStringUTFChars(path_, path);

}
extern "C"
JNIEXPORT void JNICALL
Java_com_example_ffmpegsharedstudy_WAudioPlayer_native_1sound_1start(JNIEnv *env, jobject thiz,
                                                                     jstring input_,
                                                                     jstring output_) {
    const char *input = env->GetStringUTFChars(input_, 0);
    const char *output = env->GetStringUTFChars(output_, 0);
    avformat_network_init();

    //总上下文
    AVFormatContext *formatContext = avformat_alloc_context();

    //打开音频文件
    if (avformat_open_input(&formatContext, input, NULL, NULL) != 0) {
//        LOGI("%s", "无法打开音频文件");
        return;
    }
    //获取输入文件信息
    if (avformat_find_stream_info(formatContext, NULL) < 0) {
//        LOGI("%s", "无法获取输入文件信息");
        return;
    }
    int audio_stream_idx = -1;
    for (int i = 0; i < formatContext->nb_streams; ++i) {
        if (formatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
            audio_stream_idx = i;
            break;
        }

    }
    AVCodecParameters *codecParameters = formatContext->streams[audio_stream_idx]->codecpar;
    //找到解码器
    AVCodec *dec = avcodec_find_decoder(codecParameters->codec_id);
    //创建上下文
    AVCodecContext *codecContext = avcodec_alloc_context3(dec);
    avcodec_parameters_to_context(codecContext, codecParameters);
    avcodec_open2(codecContext, dec, NULL);

    SwrContext *swrContext = swr_alloc();
//输入的参数
    AVSampleFormat in_sample = codecContext->sample_fmt;
//输入采样率
    int in_sample_rate = codecContext->sample_rate;
//输入声道布局
    uint64_t in_ch_layout = codecContext->channel_layout;


//输出的参数 固定
//输出采样格式
    AVSampleFormat out_sample = AV_SAMPLE_FMT_S16;
//输出采样
    int out_sample_rate = 44100;
//输出声道布局
    uint64_t out_ch_layout = AV_CH_LAYOUT_STEREO;

    swr_alloc_set_opts(swrContext, out_ch_layout, out_sample, out_sample_rate, in_ch_layout,
                       in_sample, in_sample_rate, 0, NULL);
    //初始化转换器其他额参数
    swr_init(swrContext);
    uint8_t *out_buffer = (uint8_t *) (av_malloc(2 * 44100));
    FILE *fp_pcm = fopen(output, "wb");
    //读取包
    AVPacket *packet = av_packet_alloc();
    int count = 0;

    while (av_read_frame(formatContext, packet) >= 0) {
        avcodec_send_packet(codecContext, packet);
        //解压数据
        AVFrame *frame = av_frame_alloc();

        int ret = avcodec_receive_frame(codecContext, frame);
        //
        if (ret == AVERROR(EAGAIN)) {
            continue;
        } else if (ret < 0) {
            //LOGE("解码完成")
            break;
        }
        if (packet->stream_index != audio_stream_idx) {
            continue;
        }
//        LOGE("正在解码%d", count++);
        swr_convert(swrContext, &out_buffer, 2 * 44100, (const uint8_t **) frame->data,
                    frame->nb_samples);
        int out_channel_nb = av_get_channel_layout_nb_channels(out_ch_layout);
        int out_buffer_size = av_samples_get_buffer_size(NULL, out_channel_nb, frame->nb_samples, out_sample, 1);
        fwrite(out_buffer, 1, out_buffer_size, fp_pcm);
    }
    fclose(fp_pcm);
    av_free(out_buffer);
    swr_free(&swrContext);
    avcodec_close(codecContext);
    avformat_close_input(&formatContext);

    env->ReleaseStringUTFChars(input_, input);
    env->ReleaseStringUTFChars(output_, output);
}