#include "include/AudioPlay.h"

AudioPlay::AudioPlay(AVFormatContext *avFormatContext,
                     JniCallJava *jniCallJava, int audioStreamIndex) {
    this->pAvFormatContext = avFormatContext;
    this->pJniCallJava = jniCallJava;
    this->audioStreamIndex = audioStreamIndex;
    this->pAVPacketQueue = new AVPacketQueue();
    this->pAudioPlayStatus = new AudioPlayStatus();
}

void *playAudio(void *context) {
    auto *audioPlay = (AudioPlay *) context;
    audioPlay->initOpenSLES();
    return nullptr;
}

void *readAudio(void *context) {
    auto *audioPlay = (AudioPlay *) context;
    while (audioPlay->pAudioPlayStatus != nullptr && !audioPlay->pAudioPlayStatus->isExit) {
        // 初始化 AVPacket，我们从文件中读取的数据会暂存在其中
        AVPacket *av_packet = av_packet_alloc();
        if (av_read_frame(audioPlay->pAvFormatContext, av_packet) >= 0) {
            if (av_packet->stream_index == audioPlay->audioStreamIndex) {
                audioPlay->pAVPacketQueue->push(av_packet);
            } else {
                av_packet_free(&av_packet);
            }
        } else {
            av_packet_free(&av_packet);
        }
    }
    return nullptr;
}

void AudioPlay::play() {
    // 一个线程读取
    pthread_t readThread;
    pthread_create(&readThread, nullptr, readAudio, this);
    pthread_detach(readThread);
    // 一个线程播放
    pthread_t playThread;
    pthread_create(&playThread, nullptr, playAudio, this);
    pthread_detach(playThread);
}

void audioCallback(SLAndroidSimpleBufferQueueItf bqPlayerBufferQueue, void *context) {
    auto *audioPlay = (AudioPlay *) context;
    int dataSize = audioPlay->resampleAudio();
    (*bqPlayerBufferQueue)->Enqueue(bqPlayerBufferQueue, audioPlay->resampleOutBuffer,
                                    static_cast<SLuint32>(dataSize));
}

void AudioPlay::initOpenSLES() {
    // 1. 创建引擎接口对象
    SLObjectItf engineObject = nullptr;
    SLEngineItf engineEngine;
    slCreateEngine(&engineObject, 0, nullptr, 0, nullptr, nullptr);
    // realize the engine
    (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE);
    // get the engine interface, which is needed in order to create other objects
    (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineEngine);
    // 2. 设置混音器
    static SLObjectItf outputMixObject = nullptr;
    const SLInterfaceID ids[1] = {SL_IID_ENVIRONMENTALREVERB};
    const SLboolean req[1] = {SL_BOOLEAN_FALSE};
    (*engineEngine)->CreateOutputMix(engineEngine, &outputMixObject, 1, ids, req);
    (*outputMixObject)->Realize(outputMixObject, SL_BOOLEAN_FALSE);
    SLEnvironmentalReverbItf outputMixEnvironmentalReverb = nullptr;
    (*outputMixObject)->GetInterface(outputMixObject, SL_IID_ENVIRONMENTALREVERB,
                                     &outputMixEnvironmentalReverb);
    SLEnvironmentalReverbSettings reverbSettings = SL_I3DL2_ENVIRONMENT_PRESET_STONECORRIDOR;
    (*outputMixEnvironmentalReverb)->SetEnvironmentalReverbProperties(outputMixEnvironmentalReverb,
                                                                      &reverbSettings);
    // 3. 创建播放器
    SLObjectItf bqPlayerObject = nullptr;
    SLPlayItf bqPlayerPlay = nullptr;
    SLDataLocator_AndroidSimpleBufferQueue simpleBufferQueue = {
            SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2};
    SLDataFormat_PCM formatPcm = {
            SL_DATAFORMAT_PCM,
            2,
            SL_SAMPLINGRATE_44_1,
            SL_PCMSAMPLEFORMAT_FIXED_16,
            SL_PCMSAMPLEFORMAT_FIXED_16,
            SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT,
            SL_BYTEORDER_LITTLEENDIAN};
    SLDataSource audioSrc = {&simpleBufferQueue, &formatPcm};
    SLDataLocator_OutputMix outputMix = {SL_DATALOCATOR_OUTPUTMIX, outputMixObject};
    SLDataSink audioSnk = {&outputMix, nullptr};
    SLInterfaceID interfaceIds[3] = {SL_IID_BUFFERQUEUE, SL_IID_VOLUME, SL_IID_PLAYBACKRATE};
    SLboolean interfaceRequired[3] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
    (*engineEngine)->CreateAudioPlayer(engineEngine, &bqPlayerObject, &audioSrc, &audioSnk, 3,
                                       interfaceIds, interfaceRequired);
    (*bqPlayerObject)->Realize(bqPlayerObject, SL_BOOLEAN_FALSE);
    (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_PLAY, &bqPlayerPlay);
    // 4. 设置缓存队列和回调函数
    SLAndroidSimpleBufferQueueItf bqPlayerBufferQueue;
    (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_BUFFERQUEUE, &bqPlayerBufferQueue);
    // 每次回调时, this 会被带给 playerCallback 里面的 context
    (*bqPlayerBufferQueue)->RegisterCallback(bqPlayerBufferQueue, audioCallback, this);
    // 5. 设置播放状态
    (*bqPlayerPlay)->SetPlayState(bqPlayerPlay, SL_PLAYSTATE_PLAYING);
    // 6. 调用回调函数
    audioCallback(bqPlayerBufferQueue, this);
}

int AudioPlay::resampleAudio() {
    int resampleOutBufferSize = 0;
    AVPacket *av_packet = nullptr;
    // 用于解压缩数据
    AVFrame *av_frame = av_frame_alloc();
    int index = 0;
    while (pAudioPlayStatus != nullptr && !pAudioPlayStatus->isExit) {
        av_packet = pAVPacketQueue->pop();
        int av_codec_send_packet_res = avcodec_send_packet(pAvCodecContext, av_packet);
        if (av_codec_send_packet_res == 0) {
            int av_codec_receive_frame_res = avcodec_receive_frame(pAvCodecContext, av_frame);
            if (av_codec_receive_frame_res == 0) {
                // AVPacket -> AVFrame
                index++;
                LOGE("解码第 %d 帧", index);
                resampleOutBufferSize = swr_convert(pSwrContext, &resampleOutBuffer,
                                                    av_frame->nb_samples,
                                                    (const uint8_t **) (av_frame->data),
                                                    av_frame->nb_samples);
                resampleOutBufferSize = resampleOutBufferSize * 2 * 2;
                break;
            }

        }
        // 解引用
        av_packet_unref(av_packet);
        av_frame_unref(av_frame);
    }

    av_packet_free(&av_packet);
    av_frame_free(&av_frame);
    return resampleOutBufferSize;
}

void AudioPlay::analyzeStream(ThreadMode threadMode) {
    // 查找解码器
    AVCodecParameters *avCodecParameters = pAvFormatContext->streams[audioStreamIndex]->codecpar;
    AVCodec *avCodec = avcodec_find_decoder(avCodecParameters->codec_id);
    if (avCodec == nullptr) {
        LOGE("av codec find decoder error.");
        callOnError(threadMode, AV_CODEC_FIND_DECODER_ERROR_CODE,
                    const_cast<char *>("av codec find decoder error"));
        return;
    }

    // 打开解码器
    pAvCodecContext = avcodec_alloc_context3(avCodec);
    if (pAvCodecContext == nullptr) {
        LOGE("av codec alloc context error.");
        callOnError(threadMode, AV_CODEC_ALLOC_CONTEXT_ERROR_CODE,
                    const_cast<char *>("av codec alloc context error"));
        return;
    }
    int av_codec_params_to_ctx_res = avcodec_parameters_to_context(pAvCodecContext,
                                                                   avCodecParameters);
    if (av_codec_params_to_ctx_res < 0) {
        LOGE("av codec parameters to context error: %s", av_err2str(av_codec_params_to_ctx_res));
        callOnError(threadMode, av_codec_params_to_ctx_res,
                    av_err2str(av_codec_params_to_ctx_res));
        return;
    }
    int av_codec_open_res = avcodec_open2(pAvCodecContext, avCodec, nullptr);
    if (av_codec_open_res != 0) {
        LOGE("av codec open error: %s", av_err2str(av_codec_open_res));
        callOnError(threadMode, av_codec_open_res, av_err2str(av_codec_open_res));
        return;
    }

    // =========== 重采样 start ===========
    // 输入的采样格式
    enum AVSampleFormat in_sample_fmt = pAvCodecContext->sample_fmt;
    // 输出的采样格式
    enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;
    // 输入的采样率
    int in_sample_rate = pAvCodecContext->sample_rate;
    // 输出的采样率
    int out_sample_rate = in_sample_rate;
    // 输入的声道布局
    uint64_t in_channel_layout = pAvCodecContext->channel_layout;
    // 输入的声道布局(立体声)
    uint64_t out_channel_layout = AV_CH_LAYOUT_STEREO;
    pSwrContext = swr_alloc_set_opts(nullptr,
                                     out_channel_layout, out_sample_fmt, out_sample_rate,
                                     in_channel_layout, in_sample_fmt, in_sample_rate,
                                     0, nullptr);
    if (pSwrContext == nullptr) {
        LOGE("av swr alloc set pts error.");
        callOnError(threadMode, AV_SWR_ALLOC_SET_OPTS_ERROR_CODE,
                    const_cast<char *>("av swr alloc set pts error"));
        return;
    }
    int av_swr_init_res = swr_init(pSwrContext);
    if (av_swr_init_res < 0) {
        LOGE("av swr init error.");
        callOnError(threadMode, AV_SWR_INIT_ERROR_CODE,
                    const_cast<char *>("av swr init error"));
        return;
    }
    // 双通道(AV_SAMPLE_FMT_S16) * 立体声(AV_CH_LAYOUT_STEREO)
    this->resampleOutBuffer = static_cast<uint8_t *>(malloc(
            static_cast<size_t>(pAvCodecContext->frame_size * 2 * 2)));
}

AudioPlay::~AudioPlay() {
    release();
}

void AudioPlay::callOnError(ThreadMode threadMode, int errorCode, char *errorMsg) {
    release();
    if (pJniCallJava != nullptr) {
        pJniCallJava->callOnError(threadMode, errorCode, errorMsg);
    }
}

void AudioPlay::release() {
    if (pAVPacketQueue) {
        delete (pAVPacketQueue);
        pAVPacketQueue = nullptr;
    }

    if (resampleOutBuffer) {
        free(resampleOutBuffer);
        resampleOutBuffer = nullptr;
    }

    if (pAudioPlayStatus) {
        delete (pAudioPlayStatus);
        pAudioPlayStatus = nullptr;
    }

    if (pAvCodecContext != nullptr) {
        avcodec_close(pAvCodecContext);
        avcodec_free_context(&pAvCodecContext);
        pAvCodecContext = nullptr;
    }

    if (pSwrContext != nullptr) {
        swr_free(&pSwrContext);
        free(pSwrContext);
        pSwrContext = nullptr;
    }
}


