package com.soulkun.smarthome.ui.main.home.dialog

import android.os.Bundle
import android.text.TextUtils
import android.view.Gravity
import android.view.View
import android.view.ViewGroup
import androidx.fragment.app.viewModels
import androidx.lifecycle.ViewModelProvider
import androidx.lifecycle.lifecycleScope
import androidx.navigation.navGraphViewModels
import androidx.recyclerview.widget.LinearLayoutManager
import com.google.gson.Gson
import com.google.gson.JsonSyntaxException
import com.iflytek.cloud.*
import com.iflytek.cloud.util.ResourceUtil
import com.iflytek.cloud.util.ResourceUtil.RESOURCE_TYPE
import com.soulkun.smarthome.R
import com.soulkun.smarthome.databinding.AppDialogMainHomeSpeechRecognitionBinding
import com.soulkun.smarthome.logic.model.SpeechRecognitionResultModel
import com.soulkun.smarthome.ui.main.home.adapter.AppMainHomeSpeechRecognitionCommandListAdapter
import com.soulkun.smarthome.ui.main.home.viewmodel.AppMainHomeSpeechRecognitionViewModel
import com.soulkun.smarthome.ui.main.home.viewmodel.AppMainHomeViewModel
import kotlinx.coroutines.*
import soulkun.library.helper.decoration.LinearSpaceItemDecorationHelper
import soulkun.library.mvvm.component.BaseDataBindingDialogFragment
import soulkun.library.utils.file.FileUtils
import soulkun.library.utils.logE
import soulkun.library.utils.toast
import soulkun.library.utils.visible
import java.nio.charset.Charset
import java.util.*

class AppMainHomeSpeechRecognitionDialog :
    BaseDataBindingDialogFragment<AppDialogMainHomeSpeechRecognitionBinding>() {

    private val homeViewModel by lazy {
        requireParentFragment().navGraphViewModels<AppMainHomeViewModel>(R.id.app_navigation_main_home).value
    }
    private val viewModel by viewModels<AppMainHomeSpeechRecognitionViewModel> { defaultViewModelProviderFactory }

    private val engineType = SpeechConstant.TYPE_LOCAL
    private val grammarType = "bnf"
    private var speechRecognizer: SpeechRecognizer? = null
    private var grammarPath: String? = null
    private var localGrammar: String? = null

    /**
     * 初始化监听器
     */
    private val initListener by lazy {
        InitListener { code ->
            if (code != ErrorCode.SUCCESS) {
                errorSpeechSpeechRecognition("语音识别初始化失败")
            }
        }
    }

    /**
     * 构建语法监听器
     */
    private val grammarListener by lazy {
        GrammarListener { _, error ->
            if (error != null) {
                errorSpeechSpeechRecognition("语法构建失败")
            }
        }
    }

    /**
     * 识别监听器
     */
    private val recognizerListener by lazy {
        object : RecognizerListener {
            override fun onVolumeChanged(volume: Int, data: ByteArray?) {
                requireDataBinding().wlvMainHomeSpeechRecognitionAnimationWave.setVolume(volume)
            }

            override fun onBeginOfSpeech() {
                requireDataBinding().tvMainHomeSpeechRecognitionTextListeningCountdown.text =
                    "【20秒】"
                timerJob = GlobalScope.launch(Dispatchers.Main) {
                    for (countdown in 19 downTo 0) {
                        delay(1000)
                        requireDataBinding().tvMainHomeSpeechRecognitionTextListeningCountdown.text =
                            "【${countdown}秒】"
                    }
                }
                timerJob?.start()
                viewModel.speechRecognitionStateLiveData.value = 1
                requireDataBinding().wlvMainHomeSpeechRecognitionAnimationWave.startAnim()
            }

            override fun onEndOfSpeech() {
                timerJob?.cancel()
                timerJob = null
                requireDataBinding().wlvMainHomeSpeechRecognitionAnimationWave.stopAnim()
            }

            override fun onResult(result: RecognizerResult?, p1: Boolean) {
                if (null != result && !TextUtils.isEmpty(result.resultString)) {
                    successSpeechSpeechRecognition(result.resultString)
                } else {
                    errorSpeechSpeechRecognition("匹配结果为空，请重试")
                }
            }

            override fun onError(error: SpeechError?) {
                when (error?.errorCode) {
                    20005 -> {
                        errorSpeechSpeechRecognition("数据匹配不成功，请重试")
                    }
                    10118 -> {
                        errorSpeechSpeechRecognition("检测到音量太小或没有说话，自动停止识别")
                    }
                    23008 -> {
                        errorSpeechSpeechRecognition("最长识别20秒，请重试")
                    }
                    else -> {
                        "错误码：${error?.errorCode}".logE()
                    }
                }
            }

            override fun onEvent(p0: Int, p1: Int, p2: Int, p3: Bundle?) {
            }

        }
    }

    private val adapter by lazy {
        AppMainHomeSpeechRecognitionCommandListAdapter(
            mContext,
            viewModel.speechRecognitionCommandList
        ) {
            val loadingDialog = AppMainHomeSpeechRecognitionLoadingDialog()
            loadingDialog.show(childFragmentManager, "LoadingDialog")
            viewModel.postDeviceCommand(homeViewModel.selectFamilyLiveData.value!!.id, it)
                .observe(viewLifecycleOwner) { result ->
                    result.getOrNull()?.let { response ->
                        response.statusMessage.toast()
                        loadingDialog.dismiss()
                    }
                }
        }
    }

    private var timerJob: Job? = null

    override fun setLayout(): Int {
        return R.layout.app_dialog_main_home_speech_recognition
    }

    override fun doOnViewCreated() {
        requireDialog().window?.run {
            setLayout(ViewGroup.LayoutParams.MATCH_PARENT, ViewGroup.LayoutParams.WRAP_CONTENT)
            setGravity(Gravity.BOTTOM)
        }
        isCancelable = true
        requireDataBinding().lifecycleOwner = viewLifecycleOwner
        requireDataBinding().viewModel = viewModel
        requireDataBinding().onClickListener = View.OnClickListener {
            when (it.id) {
                R.id.tv_main_home_speech_recognition_text_init_success_error_restart -> {
                    startSpeechSpeechRecognition()
                }
            }
        }
        requireDataBinding().rvMainHomeSpeechRecognitionListSuccessResult.run {
            layoutManager = LinearLayoutManager(mContext)
            adapter = this@AppMainHomeSpeechRecognitionDialog.adapter
            if (itemDecorationCount == 0) {
                addItemDecoration(LinearSpaceItemDecorationHelper(0, 0, 15, 15, 10))
            }
        }
        if (initSpeechRecognition()) {
            buildSpeechSpeechRecognition()
        }else{
            "初始化失败，请重试".toast()
            dismiss()
        }
    }

    private fun initSpeechRecognition(): Boolean {
        viewModel.speechRecognitionStateLiveData.value = 0
        grammarPath = mContext.getExternalFilesDir("msc")!!.absolutePath + "/test"
        speechRecognizer = SpeechRecognizer.createRecognizer(mContext, initListener)
        localGrammar =
            FileUtils.readAssertFile(mContext, "command.bnf", Charset.defaultCharset())
        return grammarPath != null && speechRecognizer != null && localGrammar != null
    }

    private fun buildSpeechSpeechRecognition() {
        viewModel.getLexiconSet(homeViewModel.roomDeviceCommandList)
        buildSpeechGrammar()
    }

    private fun buildSpeechGrammar() {
        speechRecognizer!!.run {
            //清除参数
            setParameter(SpeechConstant.PARAMS, null)
            // 设置文本编码格式
            setParameter(SpeechConstant.TEXT_ENCODING, "utf-8")
            // 设置引擎类型
            setParameter(SpeechConstant.ENGINE_TYPE, engineType)
            // 设置语法构建路径
            setParameter(ResourceUtil.GRM_BUILD_PATH, grammarPath)
            // 设置资源路径
            setParameter(ResourceUtil.ASR_RES_PATH, getResourcePath())
            val response = buildGrammar(grammarType, localGrammar, grammarListener)
            if (response != ErrorCode.SUCCESS) {
                errorSpeechSpeechRecognition("语法构建失败")
                return
            }
            // 设置语法名称
            setParameter(SpeechConstant.GRAMMAR_LIST, "command")
            // 设置返回结果格式
            setParameter(SpeechConstant.RESULT_TYPE, "json")
            // 设置语音识别前段静音最长时间
            setParameter(SpeechConstant.VAD_BOS, "5000")
            // 设置语音识别后段静音最长时间
            setParameter(SpeechConstant.VAD_EOS, "1500")
            // 设置本地识别使用语法id
            setParameter(SpeechConstant.LOCAL_GRAMMAR, "command")
            // 设置识别的门限值
            setParameter(SpeechConstant.MIXED_THRESHOLD, "30")
            // 设置音频保存路径，保存音频格式支持pcm、wav，设置路径为sd卡,请注意WRITE_EXTERNAL_STORAGE权限
            setParameter(SpeechConstant.AUDIO_FORMAT, "wav")
            setParameter(
                SpeechConstant.ASR_AUDIO_PATH,
                requireContext().getExternalFilesDir("msc")!!.absolutePath + "/asr.wav"
            )
            viewModel.dataPrepareStateLiveData.observe(viewLifecycleOwner) {
                if (it == 1) {
                    updateSpeechLexicon()
                }
            }
        }
    }

    private fun updateSpeechLexicon() {
        //更新词典 三次
        speechRecognizer!!.updateLexicon(
            "room",
            viewModel.roomNameSet.joinToString(separator = "\n")
        ) { _, error ->
            if (error == null) {
                speechRecognizer!!.updateLexicon(
                    "device",
                    viewModel.deviceNameSet.joinToString(separator = "\n")
                ) { _, error ->
                    if (error == null) {
                        speechRecognizer!!.updateLexicon(
                            "command",
                            viewModel.commandNameSet.joinToString(separator = "\n")
                        ) { _, error ->
                            if (error == null) {
                                startSpeechSpeechRecognition()
                            } else {
                                errorSpeechSpeechRecognition("词典更新失败")
                            }
                        }
                    } else {
                        errorSpeechSpeechRecognition("词典更新失败")
                    }
                }
            } else {
                errorSpeechSpeechRecognition("词典更新失败")
            }
        }
    }

    private fun startSpeechSpeechRecognition() {
        requireDataBinding().wlvMainHomeSpeechRecognitionAnimationWave.visible(true)
        speechRecognizer!!.startListening(recognizerListener)
    }

    private fun successSpeechSpeechRecognition(result: String) {
        requireDataBinding().wlvMainHomeSpeechRecognitionAnimationWave.stopAnim()
        timerJob?.cancel()
        timerJob = null
        val gson = Gson()
        val resultModel: SpeechRecognitionResultModel
        try {
            resultModel = gson.fromJson<SpeechRecognitionResultModel>(
                result,
                SpeechRecognitionResultModel::class.java
            )
        } catch (e: JsonSyntaxException) {
            e.printStackTrace()
            errorSpeechSpeechRecognition("数据解析错误，请重试")
            return
        }
        viewModel.filterSpeechRecognitionCommandList(resultModel) {
            requireDataBinding().wlvMainHomeSpeechRecognitionAnimationWave.visible(false)
            viewModel.speechRecognitionStateLiveData.value = 2
        }
    }

    private fun errorSpeechSpeechRecognition(error: String) {
        requireDataBinding().wlvMainHomeSpeechRecognitionAnimationWave.stopAnim()
        timerJob?.cancel()
        timerJob = null
        requireDataBinding().wlvMainHomeSpeechRecognitionAnimationWave.visible(false)
        viewModel.speechRecognitionStateLiveData.value = 3
        requireDataBinding().tvMainHomeSpeechRecognitionTextErrorContent.text = error
    }

    //获取识别资源路径
    private fun getResourcePath(): String {
        val tempBuffer = StringBuffer()
        //识别通用资源
        tempBuffer.append(
            ResourceUtil.generateResourcePath(
                requireContext(),
                RESOURCE_TYPE.assets,
                "asr/common.jet"
            )
        )
        return tempBuffer.toString()
    }

    override fun doOnResume() {
        requireDataBinding().wlvMainHomeSpeechRecognitionAnimationWave.onResume()
    }

    override fun onPause() {
        super.onPause()
        requireDataBinding().wlvMainHomeSpeechRecognitionAnimationWave.onPause()
    }

    override fun doOnDismiss() {

    }

    override fun doOnDestroyView() {
        speechRecognizer?.stopListening()
        requireDataBinding().wlvMainHomeSpeechRecognitionAnimationWave.release()
        timerJob?.cancel()
        viewModel.resetViewModel()
    }

}