package com.bytedance.speech.speechdemo.volcengine.asr

import android.content.Context
import android.os.Handler
import android.util.Log
import com.bytedance.speech.speechdemo.MainActivity
import com.bytedance.speech.speechdemo.R
import com.bytedance.speech.speechdemo.SettingsActivity
import com.bytedance.speech.speechdemo.settings.Settings
import com.bytedance.speech.speechdemo.utils.SensitiveDefines
import com.bytedance.speech.speechdemo.utils.SpeechDemoDefines
import com.bytedance.speech.speechdemo.utils.SpeechStreamRecorder
import com.bytedance.speech.speechengine.SpeechEngine
import com.bytedance.speech.speechengine.SpeechEngineDefines
import com.bytedance.speech.speechengine.SpeechEngineGenerator
import org.json.JSONException
import org.json.JSONObject

class AsrEngine: SpeechEngine.SpeechListener{

    private var mSettings: Settings?=null
    private var mSpeechEngine: SpeechEngine? = null
    private var mStreamRecorder: SpeechStreamRecorder? = null
    private val mDebugPath = ""
    val TAG = AsrEngine::class.java.name
    private var context: Context?=null
    private var mEngineStarted = false
    private var handler = Handler()
    private var mFinishTalkingTimestamp: Long = -1
    

    public interface OnAsrTextListen{
        fun onText(text:String)
    }
    var asrTextListen:OnAsrTextListen?=null
    fun setOnAsrTextListen(listen:OnAsrTextListen){
        this.asrTextListen = listen;
    }

    fun start(context: Context){
        this.context = context
        if (mSpeechEngine == null) {
            Log.e(TAG, "创建引擎.")
            mSpeechEngine = SpeechEngineGenerator.getInstance()
            mSpeechEngine!!.createEngine()
            mSpeechEngine!!.setContext(context.applicationContext)
        }
        val viewId = SpeechDemoDefines.ASR_VIEW
        mSettings = SettingsActivity.getSettings(viewId)

        if (mStreamRecorder == null) {
            mStreamRecorder = SpeechStreamRecorder()
        }


        Log.e(TAG, "SDK 版本号: " + mSpeechEngine!!.version)

        Log.e(TAG, "配置初始化参数.")
        configInitParams()

        Log.e(TAG, "引擎初始化.")
        val ret = mSpeechEngine!!.initEngine()
        if (ret != SpeechEngineDefines.ERR_NO_ERROR) {
            val errMessage = "初始化失败，返回值: $ret"
            Log.e(TAG, errMessage)
            speechEngineInitFailed(errMessage)
            return
        }
        Log.i(TAG, "设置消息监听")
        mSpeechEngine!!.setListener(this)
        speechEnginInitucceeded()
    }

    private fun configInitParams() {
        //【必需配置】Engine Name
        mSpeechEngine!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_ENGINE_NAME_STRING,
            SpeechEngineDefines.ASR_ENGINE
        )

        //【可选配置】Debug & Log
        mSpeechEngine!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_DEBUG_PATH_STRING,
            mDebugPath
        )
        mSpeechEngine!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_LOG_LEVEL_STRING,
            SpeechEngineDefines.LOG_LEVEL_DEBUG
        )

        //【可选配置】User ID（用以辅助定位线上用户问题）
        mSpeechEngine!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_UID_STRING,
            SensitiveDefines.UID
        )

        //【必需配置】配置音频来源
        mSpeechEngine!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_RECORDER_TYPE_STRING,
            mSettings?.getOptionsValue(R.string.config_recorder_type, context)
        )
        if (mSettings!!.getBoolean(R.string.config_asr_rec_save)) {
            //【可选配置】录音文件保存路径，如配置，SDK会将录音保存到该路径下，文件格式为 .wav
            mSpeechEngine!!.setOptionString(
                SpeechEngineDefines.PARAMS_KEY_ASR_REC_PATH_STRING,
                mDebugPath
            )
        }

        //【可选配置】音频采样率，默认16000
        mSpeechEngine!!.setOptionInt(
            SpeechEngineDefines.PARAMS_KEY_SAMPLE_RATE_INT,
            mSettings!!.getInt(R.string.config_sample_rate)
        )
        //【可选配置】音频通道数，默认1，可选1或2
        mSpeechEngine!!.setOptionInt(
            SpeechEngineDefines.PARAMS_KEY_CHANNEL_NUM_INT,
            mSettings!!.getInt(R.string.config_channel)
        )
        //【可选配置】上传给服务的音频通道数，默认1，可选1或2，一般与PARAMS_KEY_CHANNEL_NUM_INT保持一致即可
        mSpeechEngine!!.setOptionInt(
            SpeechEngineDefines.PARAMS_KEY_UP_CHANNEL_NUM_INT,
            mSettings!!.getInt(R.string.config_channel)
        )

        // 当音频来源为 RECORDER_TYPE_STREAM 时，如输入音频采样率不等于 16K，需添加如下配置
        if (mSettings!!.getOptionsValue(
                R.string.config_recorder_type,
                context
            ) == SpeechEngineDefines.RECORDER_TYPE_STREAM
        ) {
            if (mStreamRecorder!!.GetStreamSampleRate() != 16000 || mStreamRecorder!!.GetStreamChannel() != 1) {
                // 当音频来源为 RECORDER_TYPE_STREAM 时【必需配置】，否则【无需配置】
                // 启用 SDK 内部的重采样

                // 当音频来源为 RECORDER_TYPE_STREAM 时【必需配置】，否则【无需配置】
                // 启用 SDK 内部的重采样
                mSpeechEngine!!.setOptionBoolean(
                    SpeechEngineDefines.PARAMS_KEY_ENABLE_RESAMPLER_BOOL,
                    true
                )
                // 将重采样所需的输入采样率设置为 APP 层输入的音频的实际采样率
                // 将重采样所需的输入采样率设置为 APP 层输入的音频的实际采样率
                mSpeechEngine!!.setOptionInt(
                    SpeechEngineDefines.PARAMS_KEY_CUSTOM_SAMPLE_RATE_INT,
                    mStreamRecorder!!.GetStreamSampleRate()
                )
                mSpeechEngine!!.setOptionInt(
                    SpeechEngineDefines.PARAMS_KEY_CUSTOM_CHANNEL_INT,
                    mStreamRecorder!!.GetStreamChannel()
                )

            }
        }
        var address: String = mSettings!!.getString(R.string.config_address)
        if (address.isEmpty()) {
            address = SensitiveDefines.DEFAULT_ADDRESS
        }
        Log.i(TAG, "Current address: $address")
        //【必需配置】识别服务域名
        mSpeechEngine!!.setOptionString(SpeechEngineDefines.PARAMS_KEY_ASR_ADDRESS_STRING, address)
        var uri: String = mSettings!!.getString(R.string.config_uri)
        if (uri.isEmpty()) {
            uri = SensitiveDefines.ASR_DEFAULT_URI
        }
        Log.i(TAG, "Current uri: $uri")
        //【必需配置】识别服务Uri
        mSpeechEngine!!.setOptionString(SpeechEngineDefines.PARAMS_KEY_ASR_URI_STRING, uri)
        var appid: String = mSettings!!.getString(R.string.config_app_id)
        if (appid.isEmpty()) {
            appid = SensitiveDefines.APPID
        }
        //【必需配置】鉴权相关：Appid
        mSpeechEngine!!.setOptionString(SpeechEngineDefines.PARAMS_KEY_APP_ID_STRING, appid)
        val token: String = mSettings!!.getString(R.string.config_token)
        //【必需配置】鉴权相关：Token
        mSpeechEngine!!.setOptionString(SpeechEngineDefines.PARAMS_KEY_APP_TOKEN_STRING, token)
        Log.e(TAG, "appid: $appid")
        Log.e(TAG, "token: $token")
        var cluster: String = mSettings!!.getString(R.string.config_cluster)
        if (cluster.isEmpty()) {
            cluster = SensitiveDefines.ASR_DEFAULT_CLUSTER
        }
        Log.e(TAG, "Current cluster: $cluster")
        //【必需配置】识别服务所用集群
        mSpeechEngine!!.setOptionString(SpeechEngineDefines.PARAMS_KEY_ASR_CLUSTER_STRING, cluster)

        //【可选配置】在线请求的建连与接收超时，一般不需配置使用默认值即可
        mSpeechEngine!!.setOptionInt(SpeechEngineDefines.PARAMS_KEY_ASR_CONN_TIMEOUT_INT, 3000)
        mSpeechEngine!!.setOptionInt(SpeechEngineDefines.PARAMS_KEY_ASR_RECV_TIMEOUT_INT, 5000)

        //【可选配置】在线请求断连后，重连次数，默认值为0，如果需要开启需要设置大于0的次数
        mSpeechEngine!!.setOptionInt(
            SpeechEngineDefines.PARAMS_KEY_ASR_MAX_RETRY_TIMES_INT,
            mSettings!!.getInt(R.string.config_asr_max_retry_times)
        )
    }


    fun speechEnginInitucceeded() {
        Log.e(TAG, "引擎初始化成功!")
        mStreamRecorder!!.SetSpeechEngine(SpeechDemoDefines.ASR_VIEW, mSpeechEngine)
    }


    override fun onSpeechMessage(type: Int, data: ByteArray?, len: Int) {
        val stdData = String(data!!)
        when (type) {
            SpeechEngineDefines.MESSAGE_TYPE_ENGINE_START -> {
                // Callback: 引擎启动成功回调
                Log.i(TAG, "Callback: 引擎启动成功: data: $stdData")
                speechStart()
            }

            SpeechEngineDefines.MESSAGE_TYPE_ENGINE_STOP -> {
                // Callback: 引擎关闭回调
                Log.i(TAG, "Callback: 引擎关闭: data: $stdData")
                speechStop()
            }

            SpeechEngineDefines.MESSAGE_TYPE_ENGINE_ERROR -> {
                // Callback: 错误信息回调
                Log.e(TAG, "Callback: 错误信息: $stdData")
                speechError(stdData)
            }

            SpeechEngineDefines.MESSAGE_TYPE_CONNECTION_CONNECTED -> Log.i(
                TAG,
                "Callback: 建连成功: data: $stdData"
            )

            SpeechEngineDefines.MESSAGE_TYPE_PARTIAL_RESULT -> {
                // Callback: ASR 当前请求的部分结果回调
                Log.d(TAG, "Callback: ASR 当前请求的部分结果")
                speechAsrResult(stdData, false)
            }

            SpeechEngineDefines.MESSAGE_TYPE_FINAL_RESULT -> {
                // Callback: ASR 当前请求最终结果回调
                Log.i(TAG, "Callback: ASR 当前请求最终结果")
                speechAsrResult(stdData, true)
            }

            SpeechEngineDefines.MESSAGE_TYPE_VOLUME_LEVEL ->                 // Callback: 录音音量回调
                Log.d(TAG, "Callback: 录音音量")

            else -> {}
        }
    }

    fun speechEngineInitFailed(tipText: String) {
        Log.e(TAG, "引擎初始化失败: $tipText")
    }

    fun speechStart() {
        mEngineStarted = true
    }

    fun speechStop() {
        mEngineStarted = false
        handler.post {
          mStreamRecorder!!.Stop()
        }
    }

    fun speechAsrResult(data: String?, isFinal: Boolean) {
        // 计算由录音结束到 ASR 最终结果之间的延迟
        var delay: Long = 0
        if (isFinal && mFinishTalkingTimestamp > 0) {
            delay = System.currentTimeMillis() - mFinishTalkingTimestamp
            mFinishTalkingTimestamp = 0
        }
        val response_delay = delay
        try {
                // 从回调的 json 数据中解析 ASR 结果
                val reader = JSONObject(data)
                if (!reader.has("result")) {
                    return
                }
                var text =
                    reader.getJSONArray("result").getJSONObject(0).getString("text")

                asrTextListen?.onText(text)
                if (text.isEmpty()) {
                    return
                }
                text = "result: $text"
                if (isFinal) {
                    text += """
                    
                    reqid: ${reader.getString("reqid")}
                    """.trimIndent()
                    text += "\nresponse_delay: $response_delay"
                }
                setResultText(text)
            } catch (e: JSONException) {
                e.printStackTrace()
            }
    }


    fun setResultText(text: String?) {

    }


    fun speechError(data: String?) {
        try {
            // 从回调的 json 数据中解析错误码和错误详细信息
            val reader = JSONObject(data)
            if (!reader.has("err_code") || !reader.has("err_msg")) {
                return
            }
            setResultText(data)
        } catch (e: JSONException) {
            e.printStackTrace()
        }
    }


}