#include <jni.h>
#include <android/log.h>
#include <libavformat/avformat.h>
#include <libswresample/swresample.h>
#include <unistd.h>

#define LOGI(FORMATTER, ...) __android_log_print(ANDROID_LOG_INFO,"cherry_ndk_file",FORMATTER,__VA_ARGS__)
#define LOGE(FORMATTER, ...) __android_log_print(ANDROID_LOG_ERROR,"cherry_ndk_file",FORMATTER,__VA_ARGS__)

//音频解码 采样率 新版版可达48000 * 4
#define MAX_AUDIO_FRME_SIZE  2 * 44100

JNIEXPORT void JNICALL
Java_com_cherry_ndkdemo_FFmpegUtils_audioPlayer(JNIEnv *env, jclass clazz, jstring jaudio_path) {

    const char *audioPath = (*env)->GetStringUTFChars(env, jaudio_path, NULL);

    av_register_all();
    AVFormatContext *pFormatterContext = avformat_alloc_context();
    if (avformat_open_input(&pFormatterContext, audioPath, NULL, NULL) != 0) {
        LOGE("%s", "无法打开音频文件");
        return;
    }
    if (avformat_find_stream_info(pFormatterContext, NULL) < 0) {
        LOGE("%s", "无法获取音频信息");
        return;
    }
    int streamId = -1;
    int i = 0;
    for (; i < pFormatterContext->nb_streams; ++i) {
        if (pFormatterContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            streamId = i;
            break;
        }
    }

    if (streamId == -1) {
        LOGE("%s", "找不到音频流");
        return;
    }

    AVCodecContext *pCodecContext = pFormatterContext->streams[streamId]->codec;
    AVCodec *codec = avcodec_find_decoder(pCodecContext->codec_id);
    if (codec == NULL) {
        LOGE("%s", "找不到解码器");
        return;
    }
    if (avcodec_open2(pCodecContext, codec, NULL) < 0) {
        LOGE("%s", "无法打开解码器");
        return;
    }
    //压缩数据
    AVPacket *packet = av_malloc(sizeof(AVPacket));
    //解压数据
    AVFrame *frame = av_frame_alloc();
    //frame->16bit  44100 PCM 统一音频采样格式与采样率
    SwrContext *swrContext = swr_alloc();

    //输入输出采样率格式
    enum AVSampleFormat inSampleFmt = pCodecContext->sample_fmt;
    enum AVSampleFormat outSampleFmt = AV_SAMPLE_FMT_S16;
    //输入输出采样率
    int inSampleRate = pCodecContext->sample_rate;
    int outSampleRate = 44100;
    //输入输出声道布局
    uint64_t inChannelLayout = pCodecContext->channel_layout;
    uint64_t outChannelLayout = AV_CH_LAYOUT_STEREO;

    swr_alloc_set_opts(swrContext, outChannelLayout, outSampleFmt, outSampleRate, inChannelLayout,
                       inSampleFmt, inSampleRate, 0, NULL);
    int ret = swr_init(swrContext);
    if (ret < 0) {
        LOGE("%s", "SwrContext init failure");
        return;
    }

    //获取输入输出声道个数
    int outChannelNb = av_get_channel_layout_nb_channels(outChannelLayout);
    LOGI("输入输出声道个数=%d", outChannelNb);

    jclass audioUtilsClz = (*env)->FindClass(env, (const char *) "com/cherry/ndkdemo/AudioUtils");
    jmethodID jcreateAudioTrackMethodId = (*env)->GetStaticMethodID(env, audioUtilsClz,
                                                                    (const char *) "createAudioTrack",
                                                                    (const char *) "(II)Landroid/media/AudioTrack;");
    //创建一个AudioTrack，并调用play()和write()
    jobject audioTrack = (*env)->CallStaticObjectMethod(env, audioUtilsClz,
                                                        jcreateAudioTrackMethodId,
                                                        outSampleRate, outChannelNb);
    jclass audioClz = (*env)->GetObjectClass(env, audioTrack);
    //play()
    jmethodID jplayMethodId = (*env)->GetMethodID(env, audioClz,
                                                  (const char *) "play", (const char *) "()V");
    (*env)->CallObjectMethod(env, audioTrack, jplayMethodId);

    //write()
    jmethodID jwriteMethodId = (*env)->GetMethodID(env, audioClz, (const char *) "write",
                                                   (const char *) "([BII)I");

    uint8_t *outBuffer = av_malloc(MAX_AUDIO_FRME_SIZE);

    int gotFrame = 0;
    int frameCount = 0;
    ret = 0;
    //一帧一帧读取压缩的音频数据AVPacket
    while (av_read_frame(pFormatterContext, packet) >= 0) {
        if (packet->stream_index == streamId) {
            //解码
            ret = avcodec_decode_audio4(pCodecContext, frame, &gotFrame, packet);
            if (ret < 0) {
                LOGI("%s", "解码完成");
                break;
            }
            if (gotFrame > 0) {
                LOGI("解码中=%d", frameCount++);
                swr_convert(swrContext, &outBuffer, MAX_AUDIO_FRME_SIZE,
                            (const uint8_t **) frame->data, frame->nb_samples);
                int outBufferSize = av_samples_get_buffer_size(NULL, outChannelNb,
                                                               frame->nb_samples, outSampleFmt, 1);
                //调用java方法，缓冲区数据int->byte[]
                jbyteArray audioSampleArray = (*env)->NewByteArray(env, outBufferSize);
                jbyte *sampleByte = (*env)->GetByteArrayElements(env, audioSampleArray, NULL);
                //将outBuffer的数据复制到sample_byte
                memcpy(sampleByte, outBuffer, outBufferSize);
                //同步数据 同时释放sampleByte
                (*env)->ReleaseByteArrayElements(env, audioSampleArray, sampleByte, 0);
                //调用java层write()
                (*env)->CallObjectMethod(env, audioTrack, jwriteMethodId, audioSampleArray, 0,
                                         outBufferSize);

                //释放局部引用  否则报错JNI ERROR (app bug): local reference table overflow (max=512)
                (*env)->DeleteLocalRef(env, audioSampleArray);
                usleep(1000 * 16);
            }

        }
        av_free_packet(packet);
    }

    av_frame_free(&frame);
    av_free(outBuffer);
    swr_free(&swrContext);
    avcodec_close(pCodecContext);
    avformat_close_input(&pFormatterContext);
    (*env)->ReleaseStringUTFChars(env, jaudio_path, audioPath);
}
