#include <jni.h>
#include <string>
#include <SLES/OpenSLES.h>
#include <SLES/OpenSLES_Android.h>
#include <SLES/OpenSLES_AndroidConfiguration.h>
#include "RecordBuffer.h"

#ifndef FFMUSIC_ANDROIDLOG_H
#define FFMUSIC_ANDROIDLOG_H
#endif //FFMUSIC_ANDROIDLOG_H

#include "android/log.h"

#define LOG_DEBUG true
#define LOGD(FORMAT, ...)__android_log_print(ANDROID_LOG_DEBUG,"JniThread",FORMAT, ##__VA_ARGS__);
#define LOGE(FORMAT, ...)__android_log_print(ANDROID_LOG_ERROR,"JniThread",FORMAT, ##__VA_ARGS__);
#define LOGW(FORMAT, ...)__android_log_print(ANDROID_LOG_WARN,"JniThread",FORMAT, ##__VA_ARGS__);

SL_API SLresult SLAPIENTRY slCreateEngine(
        SLObjectItf *pEngine,           //对象地址，用于传出对象
        SLuint32 numOptions,         //配置参数数量
        const SLEngineOption *pEngineOptions,    //配置参数，为枚举数组
        SLuint32 numInterfaces,      //支持的接口数量
        const SLInterfaceID *pInterfaceIds,     //具体的要支持的接口，是枚举的数组
        const SLboolean *pInterfaceRequired //具体的要支持的接口是开放的还是关闭的，也是一个数组，这三个参数长度是一致的
);

SLObjectItf slObjectEngine = NULL;//用SLObjectltf声明引擎接口对象
SLEngineItf engineItf = NULL;//声明具体的引擎对象实例
SLObjectItf recordObj = NULL;//用SLObjectltf声明引擎接口对象
SLRecordItf recordItf = NULL;
SLAndroidSimpleBufferQueueItf recorderBufferQueue = NULL;//Buffer接口
RecordBuffer *recordBuffer;

FILE *pcmFile = NULL;
int bufferLength = 2048;
bool finish = true;


static JavaVM *jvm;
static jobject gInstance;
static JNIEnv *gEnv;

SLuint32 g_num_channels=0;
SLuint32 g_samples_per_sec=0;
SLuint32 g_bits_per_sample=0;
SLuint32 g_channel_mask=0;


/* Preset number to use for recording */
SLuint32 presetValue = SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION;
SLAndroidConfigurationItf configItf;


/**
 * 使用反射方式回调数据到java层的【dataCallback】方法
 * @param env
 */
void reflectDatatoJava(jshort *buffer) {
    //LOGW( "reflectDatatoJava-->start");
    if (jvm->GetEnv((void **) &gEnv, JNI_VERSION_1_4) != JNI_OK) { // 构建当前线程的jvm环境
        //LOGW( "reflectDatatoJava--> !=JNI_OK,return");
        return;
    }

    jclass recordCls = gEnv->GetObjectClass(gInstance);//得到CRecorder类
    jvm->AttachCurrentThread(&gEnv, 0);  //  Attach当前jvm线程
    jmethodID methodDataCallback = gEnv->GetStaticMethodID(recordCls, "dataCallback", "([S)V");

    jshortArray retShortArray = gEnv->NewShortArray(bufferLength/2 );//分配内存
    gEnv->SetShortArrayRegion(retShortArray, 0, bufferLength/2 , buffer);//数据复制到jni对象
    gEnv->CallStaticVoidMethod(recordCls, methodDataCallback, retShortArray);//执行反射
    LOGW( "reflectDatatoJava-->end");
    gEnv->ReleaseShortArrayElements(retShortArray, buffer,JNI_COMMIT); //回收数据，不然内存泄漏??执行无效java内存一直在涨

}

/**
 * 缓冲数据回调
 * @param bg
 * @param context
 */
void bgRecorderCallback(SLAndroidSimpleBufferQueueItf bg, void *context) {
    if (NULL != recordBuffer) {
        short *buffer = recordBuffer->getNowBuffer();
        if (pcmFile == NULL) {
            // LOGE("pcmFile is null");
        } else {
            fwrite(buffer, 1, bufferLength, pcmFile);
        }
        reflectDatatoJava(buffer);
    }
    if (finish) {
        (*recordItf)->SetRecordState(recordItf, SL_RECORDSTATE_STOPPED);
        //刷新缓冲区后，关闭流
        if (pcmFile != NULL) {
            fclose(pcmFile);
        }
        //释放内存
        delete recordBuffer;
        recordBuffer = NULL;
        LOGE("录制完成");
    } else {
        (*recorderBufferQueue)->Enqueue(recorderBufferQueue, recordBuffer->getRecordBuffer(),
                                        bufferLength);
         //LOGE( "正在录制");
    };
}


/**
 * JNIEnv和jobject是属于线程私有的，不能共享
 */
extern "C"
JNIEXPORT void JNICALL
Java_com_aispeech_lyra_nativec_opensles_CRecorder_startRecord(JNIEnv *env, jobject instance,
                                                              jstring path_) {
    const char *path = env->GetStringUTFChars(path_, 0);
    LOGE("path_ is %s", path);
    pcmFile = fopen(path, "w+");//打开可读/写文件，若文件存在则文件长度清为零，即该文件内容会消失；若文件不存在则创建该文件。
    if (pcmFile == NULL) {
        LOGE("打开|创建文件异常 %s ", path);
    }
    LOGW("lyra环境准备");
    (*env).GetJavaVM(&jvm);//保存全局jvm用于数据回调时的反射
    gInstance = (*env).NewGlobalRef(instance);//转化为全局对象,这样在其它线程才能获得当前java对象的索引.
    recordBuffer = new RecordBuffer(bufferLength);
    //第一步：创建引擎
    LOGW("lyra创建引擎");


    slCreateEngine(&slObjectEngine, 0, NULL, 0, NULL, NULL);


    //第二步：实现(Realize)engineObject,SL_BOOLEAN_FALSE);实例化这个对象
    (*slObjectEngine)->Realize(slObjectEngine, SL_BOOLEAN_FALSE);
    //第三步：通过engineObject的GetInterface方法初始化enngineEngine,从这个对象里面获取引擎接口
    (*slObjectEngine)->GetInterface(slObjectEngine, SL_IID_ENGINE, &engineItf);

    //4. 设置输出buffer队列
    SLDataLocator_AndroidSimpleBufferQueue loc_bq = {
            SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,//类型 这里只能是这个常量
            2,//buffer的数量
    };

    //5. 设置IO设备(麦克风)
    SLDataLocator_IODevice loc_dev = {
            SL_DATALOCATOR_IODEVICE,//类型
            SL_IODEVICE_AUDIOINPUT,//device类型 选择了音频输入类型
            SL_DEFAULTDEVICEID_AUDIOINPUT     ,//deviceID,  默认 SL_DEFAULTDEVICEID_AUDIOINPUT  ，SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION
            NULL            //device实例
    };
    SLDataSource audioStr = {
            &loc_dev, //SLDataLocator_IODevice配置输入
            NULL      //输入格式,采集的并不需要
    };


    SLDataFormat_PCM format_pcm_beiqi_2 = {
            SL_DATAFORMAT_PCM,//输出PCM格式的数据
            (SLuint32) 6,//输出的声道数量6
            SL_SAMPLINGRATE_16,//输出的采样频率，这里是16kHZ
            SL_PCMSAMPLEFORMAT_FIXED_16,//输出的采样格式，这里是16bit
            SL_PCMSAMPLEFORMAT_FIXED_16,//一般来说，跟随上一个参数

            SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT
            | SL_SPEAKER_FRONT_CENTER | SL_SPEAKER_LOW_FREQUENCY
            | SL_SPEAKER_BACK_LEFT | SL_SPEAKER_BACK_RIGHT
            | SL_ANDROID_SPEAKER_NON_POSITIONAL,   //channelmask配置，邮件中给的参数2

            SL_BYTEORDER_LITTLEENDIAN//PCM数据的大小端排列
    };

    //录制 16k,16bit,立体声【双路】--此配置在手机上可以成功录
    SLDataFormat_PCM format_pcm_stereo = {
            SL_DATAFORMAT_PCM,////输出PCM格式的数据
            (SLuint32)2,//输出的声道数量
            SL_SAMPLINGRATE_16,//输出的采样频率，这里是16kHZ
            SL_PCMSAMPLEFORMAT_FIXED_16,//输出的采样格式，这里是16bit
            SL_PCMSAMPLEFORMAT_FIXED_16,//一般来说，跟随上一个参数
            SL_SPEAKER_FRONT_LEFT |
            SL_SPEAKER_FRONT_RIGHT,//双声道配置，如果单声道可以用 SL_SPEAKER_FRONT_CENTER
            SL_BYTEORDER_LITTLEENDIAN//PCM数据的大小端排列
    };


    SLDataFormat_PCM format_pcm = {
            SL_DATAFORMAT_PCM,//输出PCM格式的数据
            g_num_channels,//输出的声道数量
            g_samples_per_sec,//输出的采样频率，这里是16kHZ
            g_bits_per_sample,//输出的采样格式，这里是16bit
            g_bits_per_sample,//一般来说，跟随上一个参数
            g_channel_mask,//双声道配置，如果单声道可以用 SL_SPEAKER_FRONT_CENTER
            SL_BYTEORDER_LITTLEENDIAN//PCM数据的大小端排列
    };



    SLDataSink audioSink = {
            &loc_bq,     //SLDataFormat_PCM配置输出
            &format_pcm_beiqi_2  //输出数据格式
    };


    int index = 0;
    int numInterfaces = 2;
    SLboolean required[numInterfaces];
    SLInterfaceID iidArray[numInterfaces];

    /* Initialize arrays required[] and iidArray[] */
    for (int i=0 ; i < numInterfaces ; i++) {
        required[i] = SL_BOOLEAN_FALSE;
        iidArray[i] = SL_IID_NULL;
    }

    required[index] = SL_BOOLEAN_TRUE;
    iidArray[index++] = SL_IID_ANDROIDSIMPLEBUFFERQUEUE;
    required[index] = SL_BOOLEAN_TRUE;
    iidArray[index++] = SL_IID_ANDROIDCONFIGURATION;



    //7. 创建录制的对象
    const SLInterfaceID id[1] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE};
    const SLboolean req[1] = {SL_BOOLEAN_TRUE};
    //创建录音器
    (*engineItf)->CreateAudioRecorder(
            engineItf,  //引擎接口
            &recordObj, //录制对象地址，用于传出对象
            &audioStr,  //输入配置
            &audioSink,  //输出配置
            numInterfaces,            //支持的接口数量
            iidArray,          //具体的要支持的接口
            required          //具体的要支持的接口是开放的还是关闭的
    );





    //第三步：通过engineObject的GetInterface方法初始化enngineEngine,从这个对象里面获取引擎接口
    //(*slObjectEngine)->GetInterface(slObjectEngine, SL_IID_ENGINE, &engineItf);

   // LOGW("lyra GetInterface ");
    (*recordObj)->GetInterface(recordObj, SL_IID_ANDROIDCONFIGURATION, (void*)&configItf);
   // LOGW("lyra SetConfiguration");


    (*configItf)->SetConfiguration(configItf, SL_ANDROID_KEY_RECORDING_PRESET,&presetValue, sizeof(SLuint32));









    LOGW("lyra实例化这个录制对象 ");
    //8. 实例化这个录制对象
    (*recordObj)->Realize(recordObj, SL_BOOLEAN_FALSE);
    //9. 获取录制接口
    (*recordObj)->GetInterface(recordObj, SL_IID_RECORD, &recordItf);


    /* Set up the recorder callback to get events during the recording */
    (*recordItf)->SetMarkerPosition(recordItf, 2000);
    (*recordItf)->SetPositionUpdatePeriod(recordItf, 500);
    (*recordItf)->SetCallbackEventsMask(recordItf,
            SL_RECORDEVENT_HEADATMARKER | SL_RECORDEVENT_HEADATNEWPOS);


    //10. 获取Buffer接口
    (*recordObj)->GetInterface(recordObj, SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &recorderBufferQueue);
    (*recorderBufferQueue)->Enqueue(recorderBufferQueue, recordBuffer->getRecordBuffer(),
                                    bufferLength);
    (*recorderBufferQueue)->RegisterCallback(recorderBufferQueue, bgRecorderCallback, NULL);
    //11. 开始录音
    LOGW("lyra开始录音 ");
    (*recordItf)->SetRecordState(recordItf, SL_RECORDSTATE_RECORDING);
    finish = false;
    env->ReleaseStringUTFChars(path_, path);
}

extern "C"
JNIEXPORT void JNICALL
Java_com_aispeech_lyra_nativec_opensles_CRecorder_stopRecord(JNIEnv *env, jobject instance) {
    if (NULL != recordItf) {
        finish = true;
    }
}







extern "C"
JNIEXPORT void JNICALL
Java_com_aispeech_lyra_nativec_opensles_CRecorder_initConfig(JNIEnv *env, jobject thiz,
                                                             jint num_channels,
                                                             jint samples_per_sec,
                                                             jint bits_per_sample,
                                                             jint channel_mask) {
    g_num_channels = num_channels;
    g_samples_per_sec=samples_per_sec;
    g_bits_per_sample=bits_per_sample;
    g_channel_mask=channel_mask;
    LOGD("录音配置初始化 ");
}

extern "C"
JNIEXPORT jboolean JNICALL
Java_com_aispeech_lyra_nativec_opensles_CRecorder_isInRecording(JNIEnv *env, jobject thiz) {

    return !finish;
}