package com.unione.unione_voice.viewmodel

import android.text.TextUtils
import android.util.Log
import androidx.lifecycle.MutableLiveData
import androidx.lifecycle.viewModelScope
import com.alibaba.fastjson.JSON
import com.alibaba.fastjson.JSONArray
import com.alibaba.fastjson.JSONObject
import com.blankj.utilcode.util.ToastUtils
import com.iflytek.aiui.AIUIAgent
import com.iflytek.aiui.AIUIConstant
import com.iflytek.aiui.AIUIEvent
import com.iflytek.aiui.AIUIListener
import com.iflytek.aiui.AIUIMessage
import com.iflytek.sparkchain.core.LLM
import com.iflytek.sparkchain.core.LLMConfig
import com.iflytek.sparkchain.core.SparkChain
import com.iflytek.sparkchain.core.SparkChainConfig
import com.jeremyliao.liveeventbus.LiveEventBus
import com.kunminx.architecture.ui.callback.UnPeekLiveData
import com.unione.unione_base.BaseApplication
import com.unione.unione_base.view.BaseViewModel
import com.unione.unione_voice.bean.RoleContent
import com.unione.unione_voice.constant.AIState
import com.unione.unione_voice.constant.VoiceBusEvent
import com.unione.unione_voice.constant.VoiceCommonConstant
import com.unione.unione_voice.constant.VoiceSdkConstant
import com.unione.unione_voice.engine.AiuiEngine
import com.unione.unione_voice.engine.EngineConstants
import com.unione.unione_voice.engine.EngineConstants.isPlayingTTS
import com.unione.unione_voice.engine.EngineConstants.isRecording
import com.unione.unione_voice.engine.EngineConstants.mAIUIState
import com.unione.unione_voice.engine.EngineConstants.meaningful
import com.unione.unione_voice.engine.EngineConstants.saveTTS
import com.unione.unione_voice.engine.WakeupEngine
import com.unione.unione_voice.engine.WakeupListener
import com.unione.unione_voice.recorder.AudioRecorder
import com.unione.unione_voice.recorder.RecorderFactory
import com.unione.unione_voice.recorder.SystemRecorder
import com.unione.unione_voice.utils.CopyAssetsUtils
import com.unione.unione_voice.utils.FileUtil
import com.unione.unione_voice.utils.GuidUtil
import com.unione.unione_voice.utils.StreamingAsrUtil
import com.unione.unione_voice.utils.senselessWordUtil
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.launch
import java.nio.charset.StandardCharsets

open class CommonAIVoiceModel : BaseViewModel() {

    protected var mAIUIAgent: AIUIAgent? = null
    private var recorder: AudioRecorder? = null

    val liveDataAIUIQuestion = UnPeekLiveData<String>()


    /**
     *  AIUI的答案
     */
    val liveDataAIUIAnswer = MutableLiveData<String>()

    /**
     *  星火spark的答案
     */
    val liveDataSparkAnswer = MutableLiveData<String>()

    /**
     *  最终返回用户的答案
     */
    val liveDataAIAnswer = MutableLiveData<String>()

    /**
     *  星火spark 历史交互记录
     */
    val sparkHistoryList = mutableListOf<RoleContent>()

    var liveDataAIState = 1


    var guid = ""
    private lateinit var llm: LLM

    var onAIUIListener: OnAIUIListener? = null
    lateinit var startTTSMsg: AIUIMessage
    lateinit var ttsParams: StringBuffer
    var isEnableDoCommonTalk: Boolean = true
    var ttsType = VoiceCommonConstant.tts_type_normal_talk

    companion object {
        const val TAG = "CommonAIVoiceModel"

    }


    private var wakeupListener: WakeupListener? = WakeupListener { angle, beam, score, keyWord ->
        //唤醒时停止播放tts
        Log.i(TAG, "唤醒时停止播放tts")
        // viewModelScope.launch (Dispatchers.Main){
        AiuiEngine.TTS_stop()
        isPlayingTTS = false
        sparkHistoryList.clear()
        if (onAIUIListener != null) {
            onAIUIListener!!.onWakeUp(angle)
        }
        liveDataAIState = 0
        LiveEventBus.get<Int>(VoiceBusEvent.LiveEventAIState).post(AIState.AI_STATE_WAKE_UP)
        //  }
    }

    /**
     * AIUI 回调信息处理
     */
    private var aiuiListener: AIUIListener? = AIUIListener { event: AIUIEvent ->
        when (event.eventType) {
            AIUIConstant.EVENT_CONNECTED_TO_SERVER -> {
                val uid = event.data.getString("uid")
                Log.i(TAG, "已连接服务器,uid：$uid")
            }

            AIUIConstant.EVENT_SERVER_DISCONNECTED -> {
                Log.i(TAG, "与服务器断开连接")
                sparkHistoryList.clear()
            }

            AIUIConstant.EVENT_WAKEUP -> {
                Log.i(TAG, "进入识别状态")
                sparkHistoryList.clear()
            }

            AIUIConstant.EVENT_RESULT -> {
                //处理语音合成结果
                if (event.info.contains("\"sub\":\"tts")) {
                    // 云端tts结果
                    //保存云端或离线tts音频文件，用于离线播放
                    if (saveTTS) {
                        val audio = event.data.getByteArray("0") //tts音频数据，16k，16bit格式
                        if (audio != null) {
                            FileUtil.writeFile(audio, "/sdcard/tts.pcm")
                        }
                    }
                    //如果使用sdk的播放器合成，可以不用解析tts
                    return@AIUIListener
                    //开发者自己用播放器来合成
//                    byte[] audio=event.data.getByteArray("0");//tts音频数据，16k，16bit格式
//                    超过500ms静音，认为是无效音频，进行过滤
//                    byte[] filteredAudio=TtsFilterUtil.filter(audio,500);
                }
                //处理识别结果
                if (event.info.contains("\"sub\":\"iat")) {
                    // 听写结果 iat
                    val cntJson = JSON.parseObject(
                        String(event.data.getByteArray("0")!!, StandardCharsets.UTF_8)
                    ) ?: return@AIUIListener
                    //                    Log.d(TAG, "cntJson" + cntJson);
                    val text = cntJson.getJSONObject("text")
                    //识别结果
                    val asrResult: String = StreamingAsrUtil.processIATResult(text)
                    //最终识别结果
                    if (text.getBoolean("ls")) {
                        Log.d(TAG, "识别结果=$asrResult")
                        if (!isPlayingTTS) {
                            if (TextUtils.isEmpty(asrResult)) {
                                //  liveDataAIUIQuestion.value = " "
                                //   liveDataAIAnswer.value = "哎呀，这个问题太难啦，\n" + "我也不知道！"
                            } else {
                                if (asrResult != "开始" && asrResult != "开始讲解" &&
                                    asrResult != "你好"
                                ) {
                                    liveDataAIUIQuestion.value = asrResult
                                    liveDataAIAnswer.value = "思考中..."
                                    LiveEventBus.get<String>(VoiceBusEvent.LiveEventAIAnswer)
                                        .post(liveDataAIAnswer.value)
                                }
                                //    startSparkChatSync(liveDataAIUIQuestion.value ?: "")
                            }
//                            LiveEventBus.get<String>(VoiceBusEvent.LiveEventAIQuestion)
//                                .post(liveDataAIUIQuestion.value)
                        }
                        //sid是每次交互的id，提供给讯飞可以查云端音频和结果
                        val sid = event.data.getString("sid")
                        Log.i(TAG, "sid=$sid")
                    }
                } else if (event.info.contains("\"sub\":\"nlp")) {
                    // 语义结果 nlp
                    //在线语义结果,rc=0(语义理解成功) rc=1(语义输入有问题) rc=2(内部错误)  rc=3(nlg有问题)   rc=4(说法未命中技能)
                    val cntJson = JSON.parseObject(
                        String(
                            event.data.getByteArray("0")!!,
                            StandardCharsets.UTF_8
                        )
                    )
                    val nlpResult = cntJson.getJSONObject("intent")
                    //nlp无结果不处理
                    if (nlpResult.isEmpty()) {
                        return@AIUIListener
                    }
                    /*判断请求类型，执行不同的逻辑。说明文档：https://www.yuque.com/iflyaiui/zzoolv/xp3trb
                        *1. 3种nlp请求执行逻辑不同
                        *   <1>语音  <2>文本请求-触屏  <3>文本请求-后台-不显示给用户
                        *2. event.data.getString("stream_id")区分语音和文本请求
                        *  语音请求 stream_id.startsWith("audio")
                        *  文本请求 stream_id.startsWith("text")
                        *3. event.data.getString("tag")区分文本请求的不同类型，tag是客户自定义的
                        *  文本请求-触屏 event.data.getString("tag").equals("text-touch")
                        *  文本请求-后台 event.data.getString("tag").equals("text-tag")
                        */
//                    String tag = event.data.getString("tag");
//                    Log.i(TAG, "tag==" + tag);
//                    if (event.data.getString("stream_id").startsWith("text")) {
//                        //文本请求结束后，不会自动重置状态，因此要手动
//                        if (AiuiUtils.wakeupType == WAKEUPTYPE_TEXT) {
//                            AiuiUtils.mAIUIState = AIUIConstant.STATE_READY;
//                        }
//                        if (event.data.getString("tag").equals("text-tag")) {
//                            Log.i(TAG, "文本请求-后台的nlp回调");
//                        } else if (event.data.getString("tag").equals("text-touch")) {
//                            Log.i(TAG, "文本请求-触屏的nlp回调");
//                        }
//                    } else if (event.data.getString("stream_id").startsWith("audio")) {
//                        Log.i(TAG, "语音请求的nlp回调");
//                    }

                    //识别结果
                    //如果只判断asr结果中的无意义词，若nlp先返回就可能被错误判断为无意义词
                    val asrResult = nlpResult.getString("text")
                    val rc = nlpResult.getInteger("rc")
                    meaningful = senselessWordUtil.isMeaningful_filter1word(asrResult)
                    //无意义词不处理
                    if (!meaningful) {
//                        senselessWordUtil.nonsenseSid = event.data.getString("sid");
//                        Log.i(TAG, "无意义请求的sid=" + senselessWordUtil.nonsenseSid);
                        return@AIUIListener
                    }
                    //   liveDataAIUIQuestion.value = asrResult
                    //云端的nlp回答结果，调用tts
                    val answerObject = nlpResult.getJSONObject("answer")
                    if (rc == 0 && answerObject != null) {
                        val answer = answerObject.getString("text")
                        //tts合成方式1：aiui平台开启云端语义后自动合成，代码不用调用
                        //tts合成方式2：设备主动请求云端合成(推荐)，下方为方式2
                        /*val params = StringBuffer()
                        params.append("vcn=x4_lingxiaoying_em_v2") //发音人，所有发音人请查看https://www.yuque.com/iflyaiui/zzoolv/iwxf76/edit#ZvJ9r
                        params.append(",speed=55") //语速，取值范围[0,100]
                        params.append(",pitch=50") //音调，取值范围[0,100]
                        params.append(",volume=55") //音量，取值范围[0,100]
                        AiuiEngine.TTS_start(answer, params)*/
                        Log.i(TAG, "isPlayingTTS:$isPlayingTTS")
                        if (!isPlayingTTS) {
                            Log.i(TAG, "isPlayingTTS:$isPlayingTTS")
                            liveDataAIUIAnswer.value = answer
                            liveDataAIAnswer.value = answer
                            startTTS(answer)
                            LiveEventBus.get<String>(VoiceBusEvent.LiveEventAIAnswer)
                                .post(answer)
                        }
                    } else if (rc != 0) {
                        if (liveDataAIUIQuestion.value != " " && !TextUtils.isEmpty(
                                liveDataAIUIQuestion.value
                            ) && liveDataAIUIQuestion.value != "开始" && liveDataAIUIQuestion.value != "开始讲解" &&
                            liveDataAIUIQuestion.value != "你好" && !isPlayingTTS
                        ) {
                            liveDataAIUIAnswer.value = ""
                            startSparkChatSync(liveDataAIUIQuestion.value ?: "")
                        }
                    }
                    Log.i(TAG, "nlp result :$nlpResult")
                } else if (event.info.contains("\"sub\":\"itrans")) {
                    // 翻译结果 itrans
                    val cntJson = JSON.parseObject(
                        String(
                            event.data.getByteArray("0")!!,
                            StandardCharsets.UTF_8
                        )
                    )
                    if (cntJson != null) {
                        val tmp = cntJson.getJSONObject("trans_result")
                        val src = tmp.getString("src")
                        val dst = tmp.getString("dst")
                    }
                } else if (event.info.contains("\"sub\":\"tpp")) {
                    // 后处理服务结果
                    val cntJson = JSON.parseObject(
                        String(event.data.getByteArray("0")!!, StandardCharsets.UTF_8)
                    )
                    if (cntJson != null) {
                        Log.i(TAG, "tpp后处理结果")
                        Log.i(TAG, cntJson.toString())
                    }
                } else if (event.info.contains("\"sub\": \"esr_pgs\"")) {
                    //离线识别中间结果 esr 代表离线
                    val cntJson = JSON.parseObject(
                        String(
                            event.data.getByteArray("0")!!,
                            StandardCharsets.UTF_8
                        )
                    )
                } else if (event.info.contains("\"sub\": \"esr_fsa\"")) {
                    //离线命令词最终结果
                    val cntJson = JSON.parseObject(
                        String(
                            event.data.getByteArray("0")!!,
                            StandardCharsets.UTF_8
                        )
                    )
                    if (cntJson != null) {
                        if (TextUtils.isEmpty(cntJson.toString())) {
                            return@AIUIListener
                        }
                        // 解析得到语义结果
                        val resultStr = cntJson.getString("intent")
                    }
                } else if (event.info.contains("\"sub\": \"esr_iat\"")) {
                    //离线识别最终结果
                    val cntJson = JSON.parseObject(
                        String(event.data.getByteArray("0")!!, StandardCharsets.UTF_8)
                    )
                    if (cntJson != null) {
                        if (TextUtils.isEmpty(cntJson.toString())) {
                            return@AIUIListener
                        }
                        // 解析得到语义结果
                        val resultStr = cntJson.getString("intent")
                    }
                }
            }

            AIUIConstant.EVENT_ERROR -> when (event.arg1) {
                11217 -> {
                    //11217报错时，aiui没有正常初始化，因此进行destroy()
                    AiuiEngine.destroy()
                    //11217可能是一个sn绑定了2台设备(例如ota升级)，因此初始化2次保证ota后正常，禁止循环初始化
                    if (EngineConstants.firstInit) {
                        initAIUI()
                        EngineConstants.firstInit = false //避免循环初始化
                        Log.d(TAG, "错误码11217,重新初始化一次")
                    }
                }

                else -> {
                    //("错误码: " + event.arg1)
                    //("错误信息:" + event.info)
                    //( """${"解决方案:" + ErrorCode.getError(event.arg1)} 错误解决详情参考：https://www.yuque.com/iflyaiui/zzoolv/igbuol""" )
                }
            }

            AIUIConstant.EVENT_VAD -> if (AIUIConstant.VAD_BOS == event.arg1) {
                Log.i(TAG, "vad_bos,开始说话")
                // isPlayingTTS=true
            } else if (AIUIConstant.VAD_BOS_TIMEOUT == event.arg1) {
                Log.i(TAG, "vad timeout,不说话,前端点超时")
            } else if (AIUIConstant.VAD_EOS == event.arg1) {
                Log.i(TAG, "vad eos,结束说话")
            } else if (AIUIConstant.VAD_VOL == event.arg1) {
//                    Log.i(TAG, "vad vol,说话音量:" + event.arg2);
            }

            AIUIConstant.EVENT_SLEEP -> {
                Log.i(TAG, "设备休眠")
                sparkHistoryList.clear()
                if (onAIUIListener != null) {
                    onAIUIListener!!.onAISleep()
                }
                liveDataAIState = 1
                LiveEventBus.get<Int>(VoiceBusEvent.LiveEventAIState).post(AIState.AI_STATE_SLEEP)
            }

            AIUIConstant.EVENT_START_RECORD -> Log.i(TAG, "开始录音")

            AIUIConstant.EVENT_STOP_RECORD -> Log.i(TAG, "停止录音")

            AIUIConstant.EVENT_STATE -> {
                mAIUIState = event.arg1
                if (AIUIConstant.STATE_IDLE == mAIUIState) {
                    // 闲置状态，AIUI未开启
                    Log.i(TAG, "aiui状态:STATE_IDLE")
                } else if (AIUIConstant.STATE_READY == mAIUIState) {
                    // AIUI已就绪，等待唤醒
                    Log.i(TAG, "aiui状态:STATE_READY")
                    isPlayingTTS = false
                } else if (AIUIConstant.STATE_WORKING == mAIUIState) {
                    // AIUI工作中，可进行交互
                    Log.i(TAG, "aiui状态:STATE_WORKING")
                }
            }

            AIUIConstant.EVENT_TTS -> {
                when (event.arg1) {
                    AIUIConstant.TTS_SPEAK_BEGIN -> {
                        isPlayingTTS = true
                        Log.i(TAG, "tts:开始播放 isPlayingTTS$isPlayingTTS")
                    }

                    AIUIConstant.TTS_SPEAK_PROGRESS -> {
                        isPlayingTTS = true
                        Log.i(TAG, "tts:播放进行中 isPlayingTTS$isPlayingTTS")
                    }

                    AIUIConstant.TTS_SPEAK_PAUSED -> {
                        isPlayingTTS = false
                        Log.i(TAG, "tts:暂停播放 isPlayingTTS$isPlayingTTS")
                    }

                    AIUIConstant.TTS_SPEAK_RESUMED -> {
                        isPlayingTTS = true
                        Log.i(TAG, "tts:恢复播放 isPlayingTTS$isPlayingTTS")
                    }

                    AIUIConstant.TTS_SPEAK_COMPLETED -> {
                        isPlayingTTS = false
                        onAIUIListener?.onTTSFinished(ttsType)
                        Log.i(TAG, "tts:播放完成 isPlayingTTS$isPlayingTTS")
                    }

                    else -> {}
                }
            }

            else -> {}
        }
    }


    fun init() {
        viewModelScope.launch {
            CopyAssetsUtils.portingFile(BaseApplication.getContext())
        }

    }

    fun initAIUISdk() {
        //状态初始化
        EngineConstants.isRecording = false
        //TODO 开发者需要实现生成sn的代码，参考：https://www.yuque.com/iflyaiui/zzoolv/tgftb5
        //注意事项1: sn每台设备需要唯一！！！！WakeupEngine的sn和AIUI的sn要一致
        //注意事项2: 获取的值要保持稳定，否则会重复授权，浪费授权量
        if (TextUtils.isEmpty(GuidUtil.getFromFile(BaseApplication.getContext()))) {
            guid = GuidUtil.createGUID(BaseApplication.getContext())
            GuidUtil.setToFile(BaseApplication.getContext(), guid)
        } else {
            guid = GuidUtil.getFromFile(BaseApplication.getContext())
        }
        EngineConstants.serialNumber = guid
        //对音频的处理为降噪唤醒再送去识别,
        SystemRecorder.AUDIO_TYPE_ASR = false
        // 初始化AIUI
        initAIUI()
        //初始化wakeupEngine(降噪+唤醒)
        val initResult: Int = WakeupEngine.getInstance(wakeupListener)
        if (initResult == 0) {
            Log.i(TAG, "wakeupEngine初始化成功")
        } else {
            //错误解决详情参考：https://www.yuque.com/iflyaiui/zzoolv/igbuol"""
            Log.e(TAG, "wakeupEngine初始化失败")
        }
        //初始化录音
        if (recorder == null) {
            recorder = RecorderFactory.getRecorder()
        }
        if (recorder != null) {
            Log.i(TAG, "录音机初始化成功")
            startRecord()
        } else {
            Log.i(TAG, "录音机初始化失败")
        }

    }

    fun initAIUI() {
        // 初始化AIUI(识别+语义+合成）
        mAIUIAgent = AiuiEngine.getInstance(aiuiListener, "cfg/aiui.cfg")
        if (mAIUIAgent != null) {
            Log.i(TAG, "AIUI初始化成功")
            initTTS()
        } else {
            Log.e(TAG, "AIUI初始化失败")
        }
    }

    private fun initTTS() {
        ttsParams = StringBuffer().apply {
            append("vcn=x2_xiaojuan");//合成发音人，x2_xiaojuan默认免费，其他需要付费开通,发音人列表：https://www.yuque.com/iflyaiui/zzoolv/iwxf76
            append(",speed=55");//语速，取值范围[0,100]
            append(",pitch=50"); //音调，取值范围[0,100]
            append(",volume=100");
        }
        startTTSMsg = AIUIMessage().apply {
            msgType = AIUIConstant.CMD_TTS
            arg1 = AIUIConstant.START
            arg2 = 0
            params = ttsParams.toString()
            //data = ttsData
        }
    }

    /**
     * @param ttsType
     */
    fun startTTS(ttsContent: String, ttsType: Int = 0) {
        isPlayingTTS = true
        startTTSMsg.data = ttsContent.toByteArray(Charsets.UTF_8)
        this.ttsType = ttsType
        AiuiEngine.TTS_start(startTTSMsg)
    }

    /**
     *  初始化星火spark
     *
     *  参数名	类型	说明	是否必填
    appID	String	创建应用后，生成的应用ID	是
    apiKey	String	创建应用后，生成的唯一应用标识	是
    apiSecret	String	创建应用后，生成的唯一应用秘钥	是
    logLevel	int	0：VERBOSE，1：DEBUG，2：INFO，3：WARN，4：ERROR，5：FATAL，100：OFF	否
    logPath	String	日志存储路径，设置则会把日志存在该路径下，不设置则会把日志打印在终端上	否
    uid	String	用户自定义标识	否
    初始化返回值：0：初始化成功，非0：初始化失败，请根据具体返回值参考错误码章节查询原因
     */
    fun initSparkSdk() {
        // 初始化SDK，Appid等信息在清单中配置
        val sparkChainConfig: SparkChainConfig = SparkChainConfig.builder()
        sparkChainConfig.appID(VoiceSdkConstant.SPARK_APPID)
            .apiKey(VoiceSdkConstant.SPARK_APIKey)
            .apiSecret(VoiceSdkConstant.SPARK_APISecret) //应用申请的appid三元组
            .logLevel(0)

        val ret: Int = SparkChain.getInst().init(BaseApplication.getContext(), sparkChainConfig)
        if (ret == 0) {
            Log.d(TAG, "Spark SDK初始化成功：$ret")
            initSparkConfig()
        } else {
            Log.d(TAG, "Spark SDK初始化失败：其他错误:$ret")
        }
    }

    /**
     *  初始化spark 大模型配置
     *  字段	含义	类型
    sid	本次会话的id	String
    status	数据状态 0:start，1:continue，2:end	int
    seq	数据序号，标明数据为第几块。最小值:0, 最大值:9999999	int
    content	文本数据	String
    role	星火大模型角色	String
    prompt_tokens	包含历史问题的总Tokens大小	int
    completion_tokens	回答的Token大小	int
    total_tokens	promptTokens和completionTokens的和，也是本次交互计费的Tokens大小	int
     */
    private fun initSparkConfig() {
        val llmConfig = LLMConfig.builder()
        llmConfig.domain("generalv2")
            .url("")
            .uid(guid)
        llm = LLM(llmConfig)
        /* val llmCallbacks: LLMCallbacks = object : LLMCallbacks {
             override fun onLLMResult(llmResult: LLMResult, usrContext: Any) {
                 Log.d(TAG, "onLLMResult\n")
                 val content = llmResult.content
                 Log.e(TAG, "onLLMResult:$content")
                 val status = llmResult.status
                 myContext = usrContext
                 Log.d(TAG, "context:${JSON.toJSONString(myContext)}")
                 if (status == 2) {
                     val completionTokens = llmResult.completionTokens
                     val promptTokens = llmResult.promptTokens //
                     val totalTokens = llmResult.totalTokens
                     Log.e(
                         TAG,
                         "completionTokens:" + completionTokens + "promptTokens:" + promptTokens + "totalTokens:" + totalTokens
                     )
                     liveDataSparkAnswer.value = llmResult.content
                 }
             }

             override fun onLLMEvent(event: LLMEvent, usrContext: Any) {
                 Log.d(TAG, "onLLMEvent\n")
                 Log.w(TAG, "onLLMEvent:" + " " + event.eventID + " " + event.eventMsg)
             }

             override fun onLLMError(error: LLMError, usrContext: Any) {
                 Log.d(TAG, "onLLMError\n")
                 Log.e(
                     TAG, "errCode:" + error.errCode + "errDesc:" + error.errMsg
                 )
                 myContext = usrContext
                 Log.d(TAG, "context:${JSON.toJSONString(myContext)}")
             }
         }
         llm.registerLLMCallbacks(llmCallbacks)*/

    }

    /**
     * 星火大模型 (同步调用)
     */
    fun startSparkChatSync(question: String) {
        viewModelScope.launch(Dispatchers.IO) {
            try {
                isPlayingTTS = true
                val currentJsonObject = JSONObject()
                currentJsonObject["role"] = "user"
                currentJsonObject["content"] = "请用100字回答以下问题\n $question"
                var content = ""
                if (sparkHistoryList.isNotEmpty()) {
                    val array = JSONArray()
                    for (roleContent in sparkHistoryList) {
                        val jsonObject = JSONObject()
                        jsonObject["role"] = roleContent.role
                        jsonObject["content"] = roleContent.content
                        array.add(jsonObject)
                    }
                    array.add(currentJsonObject)
                    content = array.toString()
                } else {
                    content = question
                }
                val outPut = llm.run(content)
                if (outPut.errCode == 0) {
                    viewModelScope.launch(Dispatchers.Main) {
                        val sparkResult = outPut.content
                        val role = outPut.role
                        Log.i(TAG, "获取星火大模型结果：" + JSONObject.toJSONString(outPut))
                        //  if (!isPlayingTTS) {
                        liveDataSparkAnswer.value = sparkResult
                        liveDataAIAnswer.value = sparkResult
                        startTTS(sparkResult)
                        LiveEventBus.get<String>(VoiceBusEvent.LiveEventAIAnswer)
                            .post(liveDataAIAnswer.value)
                        sparkHistoryList.add(
                            RoleContent(
                                currentJsonObject["role"] as String,
                                currentJsonObject["content"] as String
                            )
                        )
                        sparkHistoryList.add(
                            RoleContent(
                                role,
                                sparkResult
                            )
                        )
                    }
                    //   }
                } else {
                    Log.i(
                        TAG,
                        "星火大模型同步调用：" + "errCode" + outPut.getErrCode() + " errMsg:" + outPut.getErrMsg()
                    )
                }
            } catch (e: Exception) {

            }
        }


    }


    fun startRecord() {
        if (recorder != null) {
            val ret = recorder!!.startRecord()
            if (0 == ret) {
                Log.i(TAG, "开启录音成功！")
            } else if (111111 == ret) {
                Log.i(TAG, "异常,AlsaRecorder is null ...")
            } else {
                ToastUtils.showLong("开启录音失败，请查看/dev/snd/下的设备节点是否有777权限！\nAndroid 8.0 以上需要暂时使用setenforce 0 命令关闭Selinux权限！")
                destroyRecord()
            }
        }
    }

    fun stopRecord() {
        if (recorder != null) {
            recorder!!.stopRecord()
        }
    }


    fun destroyRecord() {
        stopRecord()
        recorder = null
        Log.d(TAG, "destroy is Done!")
    }

    open fun onDestroy() {
        if (isRecording) {
            stopRecord()
        }
        //销毁唤醒引擎
        WakeupEngine.destroy()
        //销毁aiui
        AiuiEngine.destroy()
        // 销毁星火大模型
        SparkChain.getInst().unInit()
        if (wakeupListener != null) {
            wakeupListener = null
        }
        if (aiuiListener != null) {
            aiuiListener = null
        }
        if (recorder != null) {
            recorder!!.destroyRecord()
            recorder = null
        }

    }

}