package com.nova.bluetooth.xunFeiSDK.recorder;


import static com.nova.bluetooth.xunFeiSDK.engine.EngineConstants.isRecording;
import static com.nova.bluetooth.xunFeiSDK.engine.EngineConstants.saveAudio;

import android.annotation.SuppressLint;
import android.media.AudioFormat;
import android.media.AudioRecord;
import android.media.MediaRecorder;
import android.media.audiofx.AcousticEchoCanceler;
import android.media.audiofx.AutomaticGainControl;
import android.media.audiofx.NoiseSuppressor;
import android.util.Log;


import com.nova.bluetooth.base.CommonMessageWrap;
import com.nova.bluetooth.helpUtils.EventBusUtil;
import com.nova.bluetooth.helpUtils.LogUtil;
import com.nova.bluetooth.rtasr_wss.RTASRManager;
import com.nova.bluetooth.webIat.WebXunFeiTWS;
import com.nova.bluetooth.xunFeiSDK.engine.AiuiEngine;
import com.nova.bluetooth.xunFeiSDK.engine.EngineConstants;
import com.nova.bluetooth.xunFeiSDK.engine.WakeupEngine;
import com.nova.bluetooth.xunFeiSDK.utils.AudioAmplify;
import com.nova.bluetooth.xunFeiSDK.utils.AudioFilter;
import com.nova.bluetooth.xunFeiSDK.utils.FileUtil;

import java.util.Arrays;
import java.util.Queue;
import java.util.concurrent.LinkedBlockingQueue;


public class SystemRecorder implements AudioRecorder {
    private static final String TAG = SystemRecorder.class.getSimpleName();
    private static SystemRecorder recorder;
    //录音缓存
    private static int mBufferSize;
    //数据处理模式,true(不降噪唤醒直接识别,16k16bit,1声道),false(先降噪唤醒再识别，16k16bit,2声道)
    public static boolean AUDIO_TYPE_ASR = false;
    //录音机
    private static AudioRecord audioRecord;
    private static NoiseSuppressor noiseSuppressor;
    private static AcousticEchoCanceler echoCanceler;
    private static AutomaticGainControl agc;

    public static String saveRecordPath = "/sdcard/raw.pcm";
    public static long saveRecordAudioTimeSnippet = 0;  //保存音频的时间片段（每句话的时间节点，用于上报给服务端作为每一句话的指示器）
    public static int resetSaveRecordAudioTime = -1;
    public static String saveTempPath = "";

    //单例模式,私有构造器
    private SystemRecorder() {
    }

    //单例模式
    @SuppressLint("MissingPermission")
    public static SystemRecorder getInstance() {
        // 检查对象是否存在,不存在就进入同步区块
        if (recorder == null) {
            // 同步区块里面的代码只有在第一次才会执行
            synchronized (SystemRecorder.class) {
                if (recorder == null) {
                    recorder = new SystemRecorder();
                }
            }
        }

        int SAMPLE_RATE = 16000;
        mBufferSize = AudioRecord.getMinBufferSize(SAMPLE_RATE, AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_16BIT);
        if (EngineConstants.recorderType == 0) {
            audioRecord = new AudioRecord(
                    MediaRecorder.AudioSource.VOICE_COMMUNICATION,  //获取手机回声消除后的音频
                    SAMPLE_RATE,                                    //采样率16k
                    AudioFormat.CHANNEL_IN_MONO,                    //录制1声道数据，算法需要1mic+1ref数据,后面补充补0
                    AudioFormat.ENCODING_PCM_16BIT,                 //16bit数据
                    mBufferSize
            );
        } else if (EngineConstants.recorderType == 3) {
            audioRecord = new AudioRecord(
                    MediaRecorder.AudioSource.MIC,                  //获取原始音频
                    SAMPLE_RATE,                                    //采样率16k
                    AudioFormat.CHANNEL_IN_STEREO,                  //录制2声道数据，1mic+1ref
                    AudioFormat.ENCODING_PCM_16BIT,                 //16bit数据
                    mBufferSize
            );
        }
        Log.i(TAG, "创建systemRecorder成功");
        return recorder;
    }


    @Override
    public void stopRecord() {
        if (noiseSuppressor != null) {
            noiseSuppressor.setEnabled(false);
        }

        if (echoCanceler != null) {
            echoCanceler.setEnabled(false);
        }
        if (agc != null) {
            agc.setEnabled(false);
        }


        isRecording = false;
        try {
            audioRecord.stop();
            Log.i(TAG, "停止录音");
        } catch (IllegalStateException e) {
            Log.i(TAG, e.toString());
        }

    }

    @Override
    public void destroyRecord() {
        stopRecord();
        if (noiseSuppressor != null) {
            noiseSuppressor.setEnabled(false);
            noiseSuppressor.release();
            noiseSuppressor = null;
        }

        if (echoCanceler != null) {
            echoCanceler.setEnabled(false);
            echoCanceler.release();
            echoCanceler = null;
        }
        if (agc != null) {
            agc.setEnabled(false);
            agc.release();
            agc = null;
        }

        if (audioRecord != null) {
            audioRecord.release();
            audioRecord = null;
        }

        Log.i(TAG, "销毁recorder");
    }


    //录音时间标记，做类歌词显示
    long startSaveAudioTime = 0;
    long recordingTime = 0;

    public static final float IAT_MULTIPLE = 1.8f;  //语音听写接口使用
    public static final float RTASR_MULTIPLE = 1.85f; //转写接口使用  备注：耳机麦克风获取音频时请改成1.5f   手机麦克风获取音频时请改成1.85f
    public static final float DEF_MULTIPLE = 1.0f; //AIUI接口使用
    public static float mMultiple = RTASR_MULTIPLE;

    @Override
    public int startRecord() {
        //清理上次缓存数据
        clearWaittingQueue();

        // 以下对声音的处理，是单独加的不是讯飞demo里的
        //创建降噪器
        if (NoiseSuppressor.isAvailable()) {
            if (noiseSuppressor == null) {
                noiseSuppressor = NoiseSuppressor.create(audioRecord.getAudioSessionId());
            }
        }
        //声学回声消除器 是一种音频处理技术，用于识别和降低音频通信中的回声。回声通常发生在语音通信中，例如电话、视频会议和 VoIP 通话中，当从扬声器播放的声音在麦克风中被捕获并再次发送到对方时，会产生回声。声学回声消除器的主要任务是识别并消除这种回声，以确保通信质量清晰而无回声干扰。
//        if (AcousticEchoCanceler.isAvailable()) {
//            if(echoCanceler == null){
//                echoCanceler = AcousticEchoCanceler.create(audioRecord.getAudioSessionId());
//            }
//        }
        //自动增益控制器 是一种音频处理技术，用于自动调整音频信号的增益级别，以确保信号在整个传输链中的适当水平，从而避免过度放大或削弱信号。AGC 在音频处理中常常用于以下情况：
        //音频通信：在电话、视频会议和语音通话中，对方的话语可能会在音量上有所不同。AGC 可以帮助平衡这些不同音量的输入，以确保接收方听到一致的音频水平。
        //音频录制：当录制音频时，声音的音量可能会变化。AGC 可以确保录制的音频在合适的音量范围内，以避免声音失真或过于安静的问题。
        //音频处理：在许多音频处理应用中，如语音识别、语音命令和音频分析，AGC 可以帮助提高音频质量并简化后续处理步骤。
        if (AutomaticGainControl.isAvailable()) {
            if (agc == null) {
                agc = AutomaticGainControl.create(audioRecord.getAudioSessionId());
            }
        }
        if (noiseSuppressor != null) {
            noiseSuppressor.setEnabled(true);
        }
        if (echoCanceler != null) {
            echoCanceler.setEnabled(true);
        }
        if (agc != null) {
            agc.setEnabled(true);
        }
        if (AUDIO_TYPE_ASR) {
            //识别模式让aiui进入工作状态，唤醒模式唤醒后再让aiui进入工作状态，否则唤醒音频直接送去识别了
            AiuiEngine.MSG_wakeup(EngineConstants.WAKEUPTYPE_VOICE);
        }

        //已经在录音中就不创建线程了
        if (isRecording) {
            return 0;//0(成功)
        }

        new Thread(() -> {
            // 开始录音
            audioRecord.startRecording();
            isRecording = true;
            try {
                byte[] audioData = new byte[mBufferSize];

                resetSaveRecordAudioTime = 1;  //让 if (resetSaveRecordAudioTime != -1)走一次

                while (isRecording) {
                    if (resetSaveRecordAudioTime != -1) {
                        if (saveAudio) {
                            if (saveRecordAudioTimeSnippet == 0) {
                                saveRecordAudioTimeSnippet = System.currentTimeMillis();
                            } else {

                                //跟上一条说话，时间上分不清，自加1秒
                                if (recordingTime == 0) {
                                    recordingTime = recordingTime + 1000;
                                }

                                saveRecordAudioTimeSnippet += recordingTime;
                            }
//                            LogUtil.d("test======>timeTag:" + saveRecordAudioTimeSnippet + " recordingTime：" +recordingTime);
                            //初始化记录录制时长数值
                            startSaveAudioTime = System.currentTimeMillis();
                            recordingTime = 0;
                        }
                        resetSaveRecordAudioTime = -1;
                    }


                    int readSize = 0;
                    try { //AudioRecord read()   SIGABRT 异常
                        if (audioRecord !=null){
                            readSize = audioRecord.read(audioData, 0, mBufferSize);
                        }else {
                            LogUtil.e("test==>audioRecord为空！！！");
                            return;
                        }
                    } catch (Exception e) {
                        LogUtil.e(e.toString());
                    }

                    if (AudioRecord.ERROR_INVALID_OPERATION != readSize && isRecording) {
                        //改变原始音频增益
                        AudioAmplify.amplifyAll(audioData, mMultiple);

                        if (EngineConstants.rawAudioGain != 1.0f) {
                            AudioAmplify.amplifyAll(audioData, EngineConstants.rawAudioGain);
                        }
                        //保存原始音频，16k16bit单声道
                        if (saveAudio) {
//                            FileUtil.writeFile(audioData, saveRecordPath,1,16000, 16 );
                            FileUtil.writeFile(audioData, saveRecordPath);
                            recordingTime = System.currentTimeMillis() - startSaveAudioTime;
                        }

                        try {
                            EventBusUtil.postMessage(new CommonMessageWrap(EngineConstants.RECORDER_DATA, audioData));
                        } catch (Exception e) {
                            LogUtil.e("EventBusUtil.postMessage(RECORDER_DATA)==>"+e.toString());
                        }


                        //如果音频送去识别，只保留第一路数据
                        if (AUDIO_TYPE_ASR) {
                            byte[] asrData = audioData;
                            if (EngineConstants.recorderType == 3) {
                                //识别引擎需要1声道数据，因为recorderType==3录音是2mic，去掉第2声道数据
                                asrData = AudioFilter.convert(audioData, 2, "0");
                            }


                            //缓存一部分数据，防止WebIATWS连接那段时间的音频错失
                            if (waittingQueue.size() > 11) {
                                waittingQueue.poll();  //旧数据扔掉
                            }
                            waittingQueue.offer(asrData); //新数据先装进来

                            if (waittingQueue.size()>1  && waittingQueue.size() <12){
//                                LogUtil.d("test==>waitingQueue size:" + waittingQueue.size());
                            }


                            if (RTASRManager.isWebSocketRunning) {
                                RTASRManager.sendData(asrData);
                            } else if (WebXunFeiTWS.isWebSocketRunning) {
                                while (waittingQueue.size() > 0) {
                                    //调语音识别.
                                    byte[] mBuffer = waittingQueue.poll();
                                    WebXunFeiTWS.sendData(mBuffer);
                                }

//                                if (WebIATWS.statusInSend == WebIATWS.StatusFirstFrame){  //第一次发送，将缓存的部分数据都发送出去
//                                    while (waittingQueue.size() > 0) {
//                                        //调语音识别.
//                                        byte[] mBuffer = waittingQueue.poll();
//                                        WebIATWS.sendData(mBuffer);
//                                    }
//                                }else {
//                                    WebIATWS.sendData(asrData);
//                                }


                            } else {
                                //将数据送给送给aiui去识别
                                sendAudioToAiui(asrData);
                            }

                        } else {//如果音频送去降噪唤醒，保留2路数据
                            if (onWakeUpListener != null) {
                                onWakeUpListener.onWakeUp();
                            }

                            byte[] wakeupData = audioData;
                            if (EngineConstants.recorderType == 0) {
                                //唤醒引擎需要2通道数据，因为recorderType==0录音是1mic，增加1通道空数据
                                wakeupData = AudioFilter.convert(audioData, 1, "0,-1");
                            }
                            //唤醒音频保存开关，16k16bit-2声道
                            if (saveAudio) {
                                FileUtil.writeFile(wakeupData, "/sdcard/beforeWakeup.pcm");
                            }
                            //将数据送入唤醒引擎
                            WakeupEngine.writeAudioToCAE(wakeupData);
                        }
                    }
                }

                //手动闭麦，为了触发vad end要送一段模拟的音频，1ms有16个sampel(采样值)，1个sample有2byte,我选取了2000ms
                byte[] simulateVadEnd = new byte[2 * 16 * 2000];
                if (RTASRManager.isWebSocketRunning) {
                    RTASRManager.sendData(simulateVadEnd);
                } else if (WebXunFeiTWS.isWebSocketRunning) {
                    WebXunFeiTWS.sendData(simulateVadEnd);
                } else {
                    if (AUDIO_TYPE_ASR) {
                        //将数据送给送给aiui去识别
                        sendAudioToAiui(simulateVadEnd);
                    } else {
                        //将数据送入唤醒引擎
                        WakeupEngine.writeAudioToCAE(simulateVadEnd);
                    }
                }


            } catch (Exception e) {
                e.printStackTrace();
            }
        }).start();
        Log.i(TAG, "开启录音成功！");
        return 0;        //0(成功)
    }

    // 将数据送给AIUI去识别
    private void sendAudioToAiui(byte[] asrData) {
        final int MAX_CHUNK_SIZE = 1024; // 根据实际情况调整
        for (int offset = 0; offset < asrData.length; offset += MAX_CHUNK_SIZE) {
            int length = Math.min(MAX_CHUNK_SIZE, asrData.length - offset);
            byte[] chunk = Arrays.copyOfRange(asrData, offset, offset + length);
            AiuiEngine.MSG_writeAudio(chunk);
        }
    }

    //缓存WebIATWS开始识别前的数据
    Queue<byte[]> waittingQueue = new LinkedBlockingQueue<>();
    private void clearWaittingQueue(){
        waittingQueue.clear();
    }

    public void setOnWakeUpListener(OnWakeUpListener onWakeUpListener) {
        this.onWakeUpListener = onWakeUpListener;
    }

    OnWakeUpListener onWakeUpListener;

    public interface OnWakeUpListener {
        void onWakeUp();
    }
}
