#include <jni.h>
#include <android/log.h>
#include <android/native_window_jni.h>
#include <android/native_window.h>
//重采样
#include <libswresample/swresample.h>
//解码
#include "ffmpeg_include/libavcodec/avcodec.h"
//封装格式处理
#include "ffmpeg_include/libavformat/avformat.h"
//像素处理
#include "ffmpeg_include/libswscale/swscale.h"

#define LOGI(FORMATTER, ...) __android_log_print(ANDROID_LOG_INFO,"cherry_ndk_file",FORMATTER,__VA_ARGS__)
#define LOGE(FORMATTER, ...) __android_log_print(ANDROID_LOG_ERROR,"cherry_ndk_file",FORMATTER,__VA_ARGS__)

//音频解码 采样率 新版版可达48000 * 4
#define MAX_AUDIO_FRME_SIZE  2 * 44100

JNIEXPORT void JNICALL
Java_com_cherry_ndkdemo_FFmpegUtils_audioDecode(JNIEnv *env, jclass clazz, jstring jinput_path,
                                                jstring joutput_path) {

    const char *input_path = (*env)->GetStringUTFChars(env, jinput_path, NULL);
    const char *output_path = (*env)->GetStringUTFChars(env, joutput_path, NULL);

    av_register_all();

    AVFormatContext *pFormatContext = avformat_alloc_context();
    if (avformat_open_input(&pFormatContext, input_path, NULL, NULL) != 0) {
        LOGE("%s", "文件无法打开");
        return;
    }
    if (avformat_find_stream_info(pFormatContext, NULL) < 0) {
        LOGE("%s", "无法获取音频信息");
        return;
    }

    int audioStream = -1;
    int i = 0;
    for (; i < pFormatContext->nb_streams; ++i) {
        if (pFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            audioStream = i;
            break;
        }
    }

    if (audioStream == -1) {
        LOGE("%s", "无法获取音频流");
        return;
    }

    //获取解码器
    AVCodecContext *pCodecContext = pFormatContext->streams[audioStream]->codec;
    AVCodec *codec = avcodec_find_decoder(pCodecContext->codec_id);
    if (codec == NULL) {
        LOGE("%s", "找不到解码器");
        return;
    }
    //打开解码器
    if (avcodec_open2(pCodecContext, codec, NULL) < 0) {
        LOGE("%s", "打开解码器失败");
        return;
    }
    //压缩数据
    AVPacket *packet = av_malloc(sizeof(AVPacket));
    //解压缩数据
    AVFrame *frame = av_frame_alloc();
    //frame->16bit  44100 PCM 统一音频采样格式与采样率
    SwrContext *swrContext = swr_alloc();
    //重采样设置参数--------------start
    //输入采样率格式
    enum AVSampleFormat inSampleFormat = pCodecContext->sample_fmt;
    //输出采样率格式16bit PCM
    enum AVSampleFormat outSampleFormat = AV_SAMPLE_FMT_S16;
    //输入采样率
    int inSampleRate = pCodecContext->sample_rate;
    //输出采样率
    int outSampleRate = 44100;
    //获取输入的声道布局
    //根据声道个数获取默认的声道布局(2个声道，默认立体声)
    //av_get_default_channel_layout(pCodeCtx->channels);
    uint64_t inChannelLayout = pCodecContext->channel_layout;
    //输出的声道布局
    uint64_t outChannelLayout = AV_CH_LAYOUT_STEREO;

    swr_alloc_set_opts(swrContext, outChannelLayout, outSampleFormat, outSampleRate,
                       inChannelLayout, inSampleFormat, inSampleRate, 0, NULL);
    int ret = swr_init(swrContext);
    if (ret < 0) {
        LOGE("%s", "swr_init error");
        return;
    }

    //获取输入输出的声道个数
    int outChannelNb = av_get_channel_layout_nb_channels(outChannelLayout);
    LOGI("输入输出的声道个数=%d", outChannelNb);
    //重采样设置参数--------------end

    //16bit 44100 PCM数据
    uint8_t *outBuffer = (uint8_t *) av_malloc(MAX_AUDIO_FRME_SIZE);

    FILE *outFile = fopen(output_path, "wb");
    int got_frame = 0;
    int framecount = 0;
    ret = -1;
    //6.一帧一帧读取压缩的音频数据AVPacket
    while (av_read_frame(pFormatContext, packet) >= 0) {
        if (packet->stream_index == audioStream) {
            //解码
            /*@param avctx编解码器上下文
 *@param [out] frame用于存储解码音频样本的AVFrame
 *@param [out] got_frame_ptr如果没有帧可以解码则为零，否则为非零
 *@param [in] avpkt包含输入缓冲区的输入AVPacket
 *@return 如果在解码期间发生错误，则返回否定错误代码，否则返回从输入AVPacket消耗的字节数。
*/
            ret = avcodec_decode_audio4(pCodecContext, frame, &got_frame, packet);
            if (ret < 0) {
                LOGI("%s", "解码完成");
                break;
            }
            if (got_frame > 0) {
                LOGI("正在解码=%d", framecount++);
                swr_convert(swrContext, &outBuffer, MAX_AUDIO_FRME_SIZE,
                            (const uint8_t **) frame->data, frame->nb_samples);
                //主要是计算编码每一帧输入给编码器需要多少个字节。然后我们自己再分配空间，填充到初始化AVFrame中
                int outBufferSize = av_samples_get_buffer_size(NULL, outChannelNb,
                                                               frame->nb_samples,
                                                               outSampleFormat, 1);
                fwrite(outBuffer, 1, outBufferSize, outFile);
            }
        }
        av_free_packet(packet);
    }
    fclose(outFile);
    av_free(outBuffer);
    swr_free(&swrContext);
    av_frame_free(&frame);
    avcodec_close(pCodecContext);
    avformat_close_input(&pFormatContext);
    (*env)->ReleaseStringUTFChars(env, jinput_path, input_path);
    (*env)->ReleaseStringUTFChars(env, joutput_path, output_path);

}

