package com.team.childapp.ui.ai

import android.Manifest
import android.annotation.SuppressLint
import android.content.Context
import android.content.Intent
import android.content.pm.PackageManager
import android.graphics.Color
import android.graphics.drawable.ColorDrawable
import android.media.AudioManager
import android.media.AudioManager.OnAudioFocusChangeListener
import android.os.Build
import android.os.Environment
import android.os.Handler
import android.os.Looper
import android.os.Message
import android.text.TextUtils
import android.util.Log
import android.view.KeyEvent
import android.view.LayoutInflater
import android.view.View
import android.view.ViewGroup
import android.widget.PopupWindow
import android.widget.TextView
import androidx.core.app.ActivityCompat
import androidx.core.content.ContextCompat
import androidx.recyclerview.widget.LinearLayoutManager
import com.blankj.utilcode.util.ToastUtils
import com.bumptech.glide.Glide
import com.bytedance.speech.speechengine.SpeechEngine
import com.bytedance.speech.speechengine.SpeechEngine.SpeechListener
import com.bytedance.speech.speechengine.SpeechEngineDefines
import com.bytedance.speech.speechengine.SpeechEngineGenerator
import com.sjy.pickphotos.pickphotos.PhotoPicker
import com.sjy.pickphotos.pickphotos.listeners.OnResultListener
import com.team.childapp.R
import com.team.childapp.base.App
import com.team.childapp.base.activity.BaseActivity
import com.team.childapp.base.listener.ICallBackResultListener
import com.team.childapp.bean.ai.AIHelperHistoryBean
import com.team.childapp.bean.ai.GenerateBean
import com.team.childapp.bean.ai.MessageTextPrint
import com.team.childapp.config.BaseConfig
import com.team.childapp.databinding.ActivityAiGenerateTextByImageBinding
import com.team.childapp.http.AIMessageLogical
import com.team.childapp.ui.MainActivity
import com.team.childapp.ui.ai.adapter.AiGenerateTextByImageAdapter
import com.team.childapp.ui.service.ForegroundService
import com.team.childapp.util.AIHelpUtil
import com.team.childapp.util.DimensionUtil
import com.team.childapp.util.LogUtils
import com.team.childapp.util.ToastUtil
import com.team.childapp.util.speechUtils.SensitiveDefines
import com.team.childapp.util.speechUtils.SpeechDemoDefines
import com.team.childapp.util.speechUtils.SpeechStreamPlayer
import com.team.childapp.util.speechUtils.SpeechStreamRecorder
import com.team.childapp.view.BubblePopupWindow
import com.team.childapp.view.PreviewDialog
import org.greenrobot.eventbus.EventBus
import org.greenrobot.eventbus.Subscribe
import org.greenrobot.eventbus.ThreadMode
import org.json.JSONException
import org.json.JSONObject
import java.io.File
import java.util.Collections
import java.util.Random

class AiGenerateTextByImageActivity : BaseActivity<ActivityAiGenerateTextByImageBinding>(R.layout.activity_ai_generate_text_by_image),
    SpeechListener {
    override fun initData() {
    }
    private val CODE_PERMISSION_REQUEST = 999
    // Permissions
    private val ASR_PERMISSIONS = listOf(
                Manifest.permission.RECORD_AUDIO
            )


    private var selectImagePath: String? = null
    private var aiMessageAdapter: AiGenerateTextByImageAdapter? = null
    private var dataList: MutableList<AIHelperHistoryBean>? = null

    // Engine
    private var mSpeechEngine: SpeechEngine? = null

    private var mStreamRecorder: SpeechStreamRecorder? = null
    // Paths
    private var mDebugPath = ""
    private var mFinishTalkingTimestamp: Long = -1

    private var recordIsRunning = false
    // Record
    private var recordHandler: Handler? = null
    private var recordRunnable: Runnable? = null
    private var mEngineStarted = false
    private var mConnectionCreated = false
    private var mPlayerPaused = false
    private var pageNum = 1
    private var pageSize= 10
    private val map: MutableMap<String, AIHelperHistoryBean?> =
        HashMap<String, AIHelperHistoryBean?>()
    private var mPopupWindow: PopupWindow? = null
    private var bubblePopupWindow: BubblePopupWindow? = null
    private var bubbleView: View? = null
    private var tv_copy: TextView? = null
    private var tv_ref: TextView? = null
    private var tv_z: TextView? = null
    private var tv_c: TextView? = null
    private var openSound = true
    private  var sexFlag=0;
    private  var TTS_DEFAULT_ONLINE_VOICE_TYPE="BV061_streaming"

    @SuppressLint("ClickableViewAccessibility")
    override fun initView() {
        sexFlag= App.application().getShareData().getInt(BaseConfig.USER_SEX)
        if(0==sexFlag){
            TTS_DEFAULT_ONLINE_VOICE_TYPE="BV061_streaming"
        }else{
            TTS_DEFAULT_ONLINE_VOICE_TYPE="BV051_streaming"

        }
        SpeechEngineGenerator.PrepareEnvironment(getApplicationContext(), getApplication())
        EventBus.getDefault().register(this)
        if (mDebugPath.isEmpty()) {
            mDebugPath = getDebugPath()
        }
        Log.i("SpeechDemo", "当前调试路径：$mDebugPath")
        /* cl_edit.setVisibility(View.GONE);
        im_switch.setVisibility(View.GONE);
        im_add.setVisibility(View.GONE);
        im_add_image.setVisibility(View.VISIBLE);*/
        dataList = ArrayList<AIHelperHistoryBean>()
        initClickPop()
        val linearLayoutManager = LinearLayoutManager(this)
        linearLayoutManager.stackFromEnd = true // 从底部开始布局
        mBinding.recyclerview!!.layoutManager = linearLayoutManager
        aiMessageAdapter = AiGenerateTextByImageAdapter(this)
        aiMessageAdapter?.setOnItemClick(object : AiGenerateTextByImageAdapter.onItemClick {
            override fun onClickView(bean: AIHelperHistoryBean, view: View?) {
                /* mCurTtsText = bean.getContent().trim();
                startEngineBtnClicked();*/
//                if (bubblePopupWindow.isShowing()) {
//                    return;
//                }
//                bubblePopupWindow.show(view, Gravity.TOP, true, 0);//view的上部展示
                PreviewDialog.showDialog(supportFragmentManager,bean.imageUrl,false)
            }
        })
        mBinding.recyclerview.adapter = aiMessageAdapter
        aiMessageAdapter?.setData(dataList)
        if (mStreamRecorder == null) {
            mStreamRecorder = SpeechStreamRecorder()
        }
        if (mStreamPlayer == null) {
            mStreamPlayer = SpeechStreamPlayer()
        }
        mBinding.tvPlay2.setOnClickListener {
            pickImage()
        }
      /*  mBinding.tvPlay2!!.setOnTouchListener { v: View?, event: MotionEvent ->
            if (event.action == MotionEvent.ACTION_DOWN) {
                Glide.with(this)
                    .asGif()
                    .load(R.mipmap.icon_speach)
                    .into(mBinding.imagePlay)
                mBinding.imagePlay!!.visibility = View.VISIBLE
                mBinding.tvPlay2.visibility = View.GONE
                recordBtnTouchDown()
                return@setOnTouchListener true
            } else if (event.action == MotionEvent.ACTION_UP) {
                mBinding.imagePlay!!.visibility = View.GONE
                mBinding.tvPlay2.visibility = View.VISIBLE
                recordBtnTouchUp()
                return@setOnTouchListener true
            } else if (event.action == MotionEvent.ACTION_CANCEL) {
                mBinding.imagePlay!!.visibility = View.GONE
                mBinding.tvPlay2.visibility = View.VISIBLE
                recordBtnTouchUp()
                return@setOnTouchListener true
            }
            false
        }*/
        mAudioManager =
            getApplicationContext().getSystemService(Context.AUDIO_SERVICE) as AudioManager
        mBinding.tvPlay2.postDelayed({
            initEngine()
            initEngineTTS()
        }, 1000)

        val serviceIntent: Intent = Intent(this, ForegroundService::class.java)
        serviceIntent.putExtra("inputExtra", "Foreground Service Example in Android")
        ContextCompat.startForegroundService(this, serviceIntent)
        getData(true)
        mBinding.swp!!.setOnRefreshListener { getData(false) }
        mBinding.imSound.setOnClickListener {
            openSound = !openSound
            mBinding.imSound.setImageResource(if (openSound) R.mipmap.ai_open_sound else R.mipmap.ai_close_voice)
            if (!openSound) {
                if (mStreamPlayer != null) {
                    mStreamPlayer!!.Stop()
                }
            }
        }
        mBinding.imBack.setOnClickListener {
            finish()
        }
        mBinding.imBackRight.setOnClickListener { finish() }
    }

    fun initClickPop() {
        bubblePopupWindow = BubblePopupWindow(this)
        bubbleView = getLayoutInflater().inflate(R.layout.ai_pop, null)
        tv_copy = bubbleView!!.findViewById(R.id.tv_copy)
        tv_ref = bubbleView!!.findViewById<TextView>(R.id.tv_ref)
        tv_z = bubbleView!!.findViewById<TextView>(R.id.tv_z)
        tv_c = bubbleView!!.findViewById<TextView>(R.id.tv_c)
        bubblePopupWindow!!.setBubbleView(bubbleView)
        bubblePopupWindow!!.setWidth(DimensionUtil.dpToPx(172))
        bubblePopupWindow?.setHeight(DimensionUtil.dpToPx(45))
        tv_copy?.setOnClickListener(View.OnClickListener { ToastUtils.showLong("复制") })
        tv_ref?.setOnClickListener(View.OnClickListener { ToastUtils.showLong("刷新") })
        tv_z?.setOnClickListener(View.OnClickListener { ToastUtils.showLong("赞ta") })
        tv_c?.setOnClickListener(View.OnClickListener { ToastUtils.showLong("踩ta") })
    }


    private fun getStatusBarHeight(): Int {
        var result = 0
        val resourceId: Int = getResources().getIdentifier("status_bar_height", "dimen", "android")
        if (resourceId > 0) {
            result = getResources().getDimensionPixelSize(resourceId)
        }
        return result
    }

    fun initPop() {
        mPopupWindow = PopupWindow(this)
        val view = LayoutInflater.from(this).inflate(R.layout.ai_pop, null)
        mPopupWindow!!.contentView = view
        mPopupWindow!!.width = ViewGroup.LayoutParams.MATCH_PARENT
        mPopupWindow!!.height = ViewGroup.LayoutParams.WRAP_CONTENT
        mPopupWindow!!.setBackgroundDrawable(ColorDrawable(Color.TRANSPARENT))
        mPopupWindow!!.isFocusable = true
        mPopupWindow!!.animationStyle = R.style.mypopwindow_anim_style
    }

    fun getData(isRfresh: Boolean) {
        if (isRfresh) {
            pageNum = 1
            dataList!!.clear()
        } else {
            pageNum++
        }
        AIMessageLogical.getInstance().requestAIHistory(
            this,
            pageNum.toString(),
            pageSize.toString(),
            "4",
            object : ICallBackResultListener {
                override fun onCallBack(result: Any?) {
                    if (result != null) {
                        val helperHistoryBean: AIHelperHistoryBean = result as AIHelperHistoryBean
                        if (helperHistoryBean.rows != null && helperHistoryBean.rows.size > 0) {
                            Collections.reverse(helperHistoryBean.rows)
                            dataList!!.addAll(0, helperHistoryBean.rows)
                            aiMessageAdapter?.notifyDataSetChanged()
                            if (pageNum == 1) {
                                scrollToDown()
                            }
                        }
                    } else {
                        if (!isRfresh) {
                            pageNum--
                        }
                    }
                    mBinding.swp!!.isRefreshing = false
                }
            })
    }

    val bean: AIHelperHistoryBean? = null
    var isVoice = true
    private val leftTopWindow: BubblePopupWindow? = null

    val handler: Handler = object : Handler() {
        override fun handleMessage(msg: Message) {
            if (msg == null) {
                return
            }
            if (msg.what == 1003) {
                mBinding.imSelect!!.visibility = View.GONE
                val testBean: AIHelperHistoryBean = msg.obj as AIHelperHistoryBean
                dataList!!.add(testBean)
                aiMessageAdapter?.notifyDataSetChanged()
                mBinding.recyclerview!!.smoothScrollBy(0, Int.MAX_VALUE)
                requestImage(testBean.getContent(), testBean.getImageUrl())
                selectImagePath = null
            } else if (msg.what == 1005) {
                /*   GenerateBean generateBean = (GenerateBean) msg.obj;
                AIHelperHistoryBean testBean = new AIHelperHistoryBean();
                testBean.setCreateBy("ai");
                testBean.setModeType(2);
                testBean.setContent(generateBean.getContent());
                dataList.add(testBean);
                aiMessageAdapter.notifyDataSetChanged();
                recyclerView.scrollToPosition(dataList.size() - 1);*/

                val generateBean: GenerateBean = msg.obj as GenerateBean
                //  String str = (String) msg.obj;
                val str: String = generateBean.getContent()
                val request = msg.arg1
                LogUtils.showLog("request：", request.toString())
                LogUtils.showLog("map size：", map.size.toString())

                if (map[request.toString()] == null) {
                    val testBean: AIHelperHistoryBean = AIHelperHistoryBean()
                    testBean.setCreateBy("ai")
                    testBean.setModeType(2)
                    testBean.setContent(str)
                    testBean.setNeedPrinter(true)
                    dataList!!.add(testBean)
                    map[request.toString()] = testBean

                } else {
                    val testBean: AIHelperHistoryBean = dataList!![dataList!!.size - 1]
                    // testBean.setContent(testBean.getContent() + str);
                    testBean.setNeedPrinter(true)
                    testBean.setContent(str)
                }
                if (openSound && generateBean.isEnd()) {
                    mCurTtsText = dataList!![dataList!!.size - 1].getContent()
                    startEngineBtnClicked()
                }
                aiMessageAdapter?.notifyItemChanged(aiMessageAdapter!!.itemCount - 1)
                scrollToDown()
            }
        }
    }

    private fun scrollToDown() {
        // 滚动到最底部
        mBinding.recyclerview!!.post {
            mBinding.recyclerview.layoutManager!!
                .smoothScrollToPosition(
                    mBinding.recyclerview, null, mBinding.recyclerview.layoutManager!!
                        .itemCount - 1
                )
        }
    }

    @Subscribe(threadMode = ThreadMode.MAIN)
    fun onMessageEvent(event: MessageTextPrint?) {
        if (dataList!!.isEmpty()) {
            return
        }
        for (bean1 in dataList!!) {
            bean1.setNeedPrinter(false)
        }
        aiMessageAdapter?.notifyDataSetChanged()
        LogUtils.showLog("zhu", "打印结束")
        scrollToDown()
    }

    fun setMyResult(text: String?) {
        if (TextUtils.isEmpty(selectImagePath)) {
            ToastUtil.showShort("请选择图片")
            return
        }
        val message = Message.obtain()
        val testBean: AIHelperHistoryBean = AIHelperHistoryBean()
        testBean.setImageUrl(selectImagePath)
        testBean.setContent(text)
        testBean.setCreateBy("user")
        message.obj = testBean
        message.what = 1003
        handler.sendMessage(message)
    }


    protected override fun onDestroy() {
        Log.i(SpeechDemoDefines.TAG, "Asr onDestroy")
        if (mStreamPlayer != null) {
            mStreamPlayer?.Stop()
        }
        uninitEngine()
        val serviceIntent: Intent = Intent(this, ForegroundService::class.java)
        stopService(serviceIntent)
        super.onDestroy()
    }

    private fun uninitEngine() {
        if (mSpeechEngine != null) {
            Log.i(SpeechDemoDefines.TAG, "引擎析构.")
            mSpeechEngine!!.destroyEngine()
            mSpeechEngine = null
            Log.i(SpeechDemoDefines.TAG, "引擎析构完成!")
        }
        if (mSpeechEngineTTS != null) {
            Log.i(SpeechDemoDefines.TAG, "引擎析构.")
            mSpeechEngineTTS?.destroyEngine()
            mSpeechEngineTTS = null
            Log.i(SpeechDemoDefines.TAG, "引擎析构完成!")
        }
    }

    /**
     * 初始化ars
     */
    /**
     * 初始化ars
     */
    private fun initEngine() {
        if (mSpeechEngine == null) {
            LogUtils.showLog("SpeechDemo", "创建引擎.")
            mSpeechEngine = SpeechEngineGenerator.getInstance()
            mSpeechEngine?.createEngine()
            mSpeechEngine?.setContext(getApplicationContext())
        }
        LogUtils.showLog("SpeechDemo", "SDK 版本号: " + mSpeechEngine!!.version)
        LogUtils.showLog("SpeechDemo", "配置初始化参数.")
        configInitParams()

        LogUtils.showLog("SpeechDemo", "引擎初始化.")
        val ret = mSpeechEngine!!.initEngine()
        LogUtils.showLog("SpeechDemo", "初始化失败，返回值: $ret")
        if (ret != SpeechEngineDefines.ERR_NO_ERROR) {
            val errMessage = "初始化失败，返回值: $ret"
            LogUtils.showLog("SpeechDemo", errMessage)
            speechEngineInitFailed(errMessage)
            return
        }
        mSpeechEngine!!.setListener(this)
        speechEnginInitucceeded()
    }

    fun speechEngineInitFailed(tipText: String) {
        LogUtils.showLog("SpeechDemo", "引擎初始化失败: $tipText")
        this.runOnUiThread {}
    }

    fun speechEnginInitucceeded() {
        LogUtils.showLog("SpeechDemo", "引擎初始化成功!")
        mStreamRecorder?.SetSpeechEngine(SpeechDemoDefines.ASR_VIEW, mSpeechEngine)
        this.runOnUiThread {}
    }

    /**
     * 初始化配置
     */
    /**
     * 初始化配置
     */
    private fun configInitParams() {
        //【必需配置】Engine Name
        mSpeechEngine!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_ENGINE_NAME_STRING,
            SpeechEngineDefines.ASR_ENGINE
        )
        //【可选配置】Debug & Log
        mSpeechEngine!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_DEBUG_PATH_STRING,
            mDebugPath
        )
        mSpeechEngine!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_LOG_LEVEL_STRING,
            SpeechEngineDefines.LOG_LEVEL_TRACE
        )
        //【可选配置】User ID（用以辅助定位线上用户问题）
        mSpeechEngine!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_UID_STRING,
            SensitiveDefines.UID
        )
        //【必需配置】配置音频来源
        mSpeechEngine!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_RECORDER_TYPE_STRING,
            SpeechEngineDefines.RECORDER_TYPE_RECORDER
        )

        //        if (mSettings.getBoolean(R.string.config_asr_rec_save)) {
//            //【可选配置】录音文件保存路径，如配置，SDK会将录音保存到该路径下，文件格式为 .wav
//            mSpeechEngine.setOptionString(SpeechEngineDefines.PARAMS_KEY_ASR_REC_PATH_STRING, mDebugPath);
//        }

        //【可选配置】音频采样率，默认16000
//        mSpeechEngine.setOptionInt(SpeechEngineDefines.PARAMS_KEY_SAMPLE_RATE_INT, mSettings.getInt(R.string.config_sample_rate));
        //【可选配置】音频通道数，默认1，可选1或2
//        mSpeechEngine.setOptionInt(SpeechEngineDefines.PARAMS_KEY_CHANNEL_NUM_INT, mSettings.getInt(R.string.config_channel));
        //【可选配置】上传给服务的音频通道数，默认1，可选1或2，一般与PARAMS_KEY_CHANNEL_NUM_INT保持一致即可
//        mSpeechEngine.setOptionInt(SpeechEngineDefines.PARAMS_KEY_UP_CHANNEL_NUM_INT, mSettings.getInt(R.string.config_channel));

        // 当音频来源为 RECORDER_TYPE_STREAM 时，如输入音频采样率不等于 16K，需添加如下配置
//        if (mSettings.getOptionsValue(R.string.config_recorder_type, this).equals(SpeechEngineDefines.RECORDER_TYPE_STREAM)) {
        if (mStreamRecorder?.GetStreamSampleRate() !== 16000 || mStreamRecorder?.GetStreamChannel() !== 1) {
            // 当音频来源为 RECORDER_TYPE_STREAM 时【必需配置】，否则【无需配置】
            // 启用 SDK 内部的重采样
            mSpeechEngine!!.setOptionBoolean(
                SpeechEngineDefines.PARAMS_KEY_ENABLE_RESAMPLER_BOOL,
                true
            )
            // 将重采样所需的输入采样率设置为 APP 层输入的音频的实际采样率
            mSpeechEngine!!.setOptionInt(
                SpeechEngineDefines.PARAMS_KEY_CUSTOM_SAMPLE_RATE_INT,
                mStreamRecorder!!.GetStreamSampleRate()
            )
            mSpeechEngine!!.setOptionInt(
                SpeechEngineDefines.PARAMS_KEY_CUSTOM_CHANNEL_INT,
                mStreamRecorder!!.GetStreamChannel()
            )
        }

        //        }

//        String address = mSettings.getString(R.string.config_address);
//        if (address.isEmpty()) {
        val address: String = SensitiveDefines.DEFAULT_ADDRESS
        //        }
        Log.i("SpeechDemo", "Current address: $address")
        //【必需配置】识别服务域名
        mSpeechEngine!!.setOptionString(SpeechEngineDefines.PARAMS_KEY_ASR_ADDRESS_STRING, address)

        //        String uri = mSettings.getString(R.string.config_uri);
//        if (uri.isEmpty()) {
        val uri: String = SensitiveDefines.ASR_DEFAULT_URI
        //        }
        Log.i("SpeechDemo", "Current uri: $uri")
        //【必需配置】识别服务Uri
        mSpeechEngine!!.setOptionString(SpeechEngineDefines.PARAMS_KEY_ASR_URI_STRING, uri)

        //        String appid = mSettings.getString(R.string.config_app_id);
//        if (appid.isEmpty()) {
        val appid: String = SensitiveDefines.APPID
        //        }
        Log.i("SpeechDemo", "Current appid: $appid")
        //【必需配置】鉴权相关：Appid
        mSpeechEngine!!.setOptionString(SpeechEngineDefines.PARAMS_KEY_APP_ID_STRING, appid)

        //        String token = mSettings.getString(R.string.config_token);
        val token: String = SensitiveDefines.TOKEN
        Log.i("SpeechDemo", "Current token: $token")
        //【必需配置】鉴权相关：Token
        mSpeechEngine!!.setOptionString(SpeechEngineDefines.PARAMS_KEY_APP_TOKEN_STRING, token)

        //        String cluster = mSettings.getString(R.string.config_cluster);
//        if (cluster.isEmpty()) {
        val cluster: String = SensitiveDefines.ASR_DEFAULT_CLUSTER
        //        }
        Log.i("SpeechDemo", "Current cluster: $cluster")
        //【必需配置】识别服务所用集群
        mSpeechEngine!!.setOptionString(SpeechEngineDefines.PARAMS_KEY_ASR_CLUSTER_STRING, cluster)

        //【可选配置】在线请求的建连与接收超时，一般不需配置使用默认值即可
        mSpeechEngine!!.setOptionInt(SpeechEngineDefines.PARAMS_KEY_ASR_CONN_TIMEOUT_INT, 3000)
        mSpeechEngine!!.setOptionInt(SpeechEngineDefines.PARAMS_KEY_ASR_RECV_TIMEOUT_INT, 5000)

        //【可选配置】在线请求断连后，重连次数，默认值为0，如果需要开启需要设置大于0的次数
        mSpeechEngine!!.setOptionInt(SpeechEngineDefines.PARAMS_KEY_ASR_MAX_RETRY_TIMES_INT, 1)
    }

    /**
     * 解析回吊
     *
     * @param type
     * @param bytes
     * @param i1
     */
    /**
     * 解析回吊
     *
     * @param type
     * @param bytes
     * @param i1
     */
    override fun onSpeechMessage(type: Int, bytes: ByteArray?, i1: Int) {
        val stdData = String(bytes!!)
        when (type) {
            SpeechEngineDefines.MESSAGE_TYPE_ENGINE_START -> {
                // Callback: 引擎启动成功回调
                Log.i("SpeechDemo", "Callback: 引擎启动成功: data: $stdData")
                speechStart()
            }

            SpeechEngineDefines.MESSAGE_TYPE_ENGINE_STOP -> {
                // Callback: 引擎关闭回调
                Log.i("SpeechDemo", "Callback: 引擎关闭: data: $stdData")
                speechStop()
            }

            SpeechEngineDefines.MESSAGE_TYPE_ENGINE_ERROR -> {
                // Callback: 错误信息回调
                Log.e("SpeechDemo", "Callback: 错误信息: $stdData")
                speechError(stdData)
            }

            SpeechEngineDefines.MESSAGE_TYPE_CONNECTION_CONNECTED -> Log.i(
                "SpeechDemo",
                "Callback: 建连成功: data: $stdData"
            )

            SpeechEngineDefines.MESSAGE_TYPE_PARTIAL_RESULT -> {
                // Callback: ASR 当前请求的部分结果回调
                Log.d("SpeechDemo", "Callback: ASR 当前请求的部分结果")
                speechAsrResult(stdData, false)
            }

            SpeechEngineDefines.MESSAGE_TYPE_FINAL_RESULT -> {
                // Callback: ASR 当前请求最终结果回调
                Log.i("SpeechDemo", "Callback: ASR 当前请求最终结果")
                speechAsrResult(stdData, true)
            }

            SpeechEngineDefines.MESSAGE_TYPE_VOLUME_LEVEL ->                 // Callback: 录音音量回调
                Log.d("SpeechDemo", "Callback: 录音音量")

            else -> {}
        }
    }

    fun speechStart() {
//        mEngineStarted = true;
//        this.runOnUiThread(() -> {
//            mEngineStatusTv.setText(R.string.hint_start_cb);
//            setButton(mStartEngineBtn, false);
//            setButton(mStopEngineBtn, true);
//        });
    }

    fun speechStop() {
//        mEngineStarted = false;
//        this.runOnUiThread(() -> {
//            mStreamRecorder.Stop();
//            mEngineStatusTv.setText(R.string.hint_stop_cb);
//            setButton(mStartEngineBtn, true);
//            setButton(mStopEngineBtn, false);
//        });
    }

    fun speechError(data: String?) {
        this.runOnUiThread {
            try {
                // 从回调的 json 数据中解析错误码和错误详细信息
                val reader = JSONObject(data)
                if (!reader.has("err_code") || !reader.has("err_msg")) {
                    return@runOnUiThread
                }
                //                setResultText(data);
            } catch (e: JSONException) {
                e.printStackTrace()
            }
        }
    }

    /**
     * 播放结果
     *
     * @param data
     * @param isFinal
     */
    /**
     * 播放结果
     *
     * @param data
     * @param isFinal
     */
    fun speechAsrResult(data: String?, isFinal: Boolean) {
        // 计算由录音结束到 ASR 最终结果之间的延迟
        var delay: Long = 0
        Log.i(SpeechDemoDefines.TAG, "isFinal$isFinal")
        Log.i(SpeechDemoDefines.TAG, "mFinishTalkingTimestamp$mFinishTalkingTimestamp")
        if (isFinal && mFinishTalkingTimestamp > 0) {
            delay = System.currentTimeMillis() - mFinishTalkingTimestamp
            mFinishTalkingTimestamp = 0
        }
        val response_delay = delay
        this.runOnUiThread {
            try {
                // 从回调的 json 数据中解析 ASR 结果
                val reader = JSONObject(data)
                if (!reader.has("result")) {
                    return@runOnUiThread
                }
                val text =
                    reader.getJSONArray("result").getJSONObject(0).getString("text")
                if (text.isEmpty()) {
                    return@runOnUiThread
                }
                //                text = "result: " + text;
                if (isFinal) {
//                    text += "\nreqid: " + reader.getString("reqid");
//                    text += "\nresponse_delay: " + response_delay;
                    setMyResult(text)
                }
                Log.i(SpeechDemoDefines.TAG, "result$text")
            } catch (e: JSONException) {
                e.printStackTrace()
            }
        }
    }

    /**
     * get default debug path
     *
     * @return string: debugPath
     * 音频存储路径
     */
    /**
     * get default debug path
     *
     * @return string: debugPath
     * 音频存储路径
     */
    fun getDebugPath(): String {
//        if (mDebugPath != null) {
//            return mDebugPath;
//        }
        val state = Environment.getExternalStorageState()
        if (Environment.MEDIA_MOUNTED == state) {
            Log.d("SpeechDemo", "External storage can be read and write.")
        } else {
            Log.e("SpeechDemo", "External storage can't write.")
            return ""
        }
        val debugDir: File = getExternalFilesDir(null) ?: return ""
        if (!debugDir.exists()) {
            if (debugDir.mkdirs()) {
                Log.d("SpeechDemo", "Create debug path successfully.")
            } else {
                Log.e("SpeechDemo", "Failed to create debug path.")
                return ""
            }
        }
        mDebugPath = debugDir.absolutePath
        return mDebugPath
    }

    /**
     * 按钮抬起
     */
    /**
     * 按钮抬起
     */
    private fun recordBtnTouchUp() {
        if (recordIsRunning) {
            recordIsRunning = false
            Log.i("SpeechDemo", "AsrTouch: Finish")
            mFinishTalkingTimestamp = System.currentTimeMillis()
            // Directive：结束用户音频输入。
            Log.i("SpeechDemo", "Directive: DIRECTIVE_FINISH_TALKING")
            mSpeechEngine!!.sendDirective(SpeechEngineDefines.DIRECTIVE_FINISH_TALKING, "")
            mStreamRecorder?.Stop()
        } else if (recordRunnable != null) {
            Log.i("SpeechDemo", "AsrTouch: Cancel")
            recordHandler!!.removeCallbacks(recordRunnable!!)
            recordRunnable = null
        }
    }

    /**
     * 按下按钮
     */
    /**
     * 按下按钮
     */
    private fun recordBtnTouchDown() {
        recordIsRunning = false
        recordHandler = Handler()
        recordRunnable = Runnable {
            recordIsRunning = true
            Log.i(SpeechDemoDefines.TAG, "配置启动参数.")
            configStartAsrParams()
            //【可选配置】该按钮为长按模式，预期是按下开始录音，抬手结束录音，需要关闭云端自动判停功能。
            mSpeechEngine!!.setOptionBoolean(
                SpeechEngineDefines.PARAMS_KEY_ASR_AUTO_STOP_BOOL,
                false
            )

            // Directive：启动引擎前调用SYNC_STOP指令，保证前一次请求结束。
            Log.i(SpeechDemoDefines.TAG, "关闭引擎（同步）")
            Log.i(SpeechDemoDefines.TAG, "Directive: DIRECTIVE_SYNC_STOP_ENGINE")
            var ret =
                mSpeechEngine!!.sendDirective(SpeechEngineDefines.DIRECTIVE_SYNC_STOP_ENGINE, "")
            if (ret != SpeechEngineDefines.ERR_NO_ERROR) {
                Log.e(SpeechDemoDefines.TAG, "send directive syncstop failed, $ret")
            } else {
                Log.i(SpeechDemoDefines.TAG, "启动引擎")
                Log.i(SpeechDemoDefines.TAG, "Directive: DIRECTIVE_START_ENGINE")
                ret = mSpeechEngine!!.sendDirective(SpeechEngineDefines.DIRECTIVE_START_ENGINE, "")
                Log.i(SpeechDemoDefines.TAG, "启动引擎 ret = $ret")
                if (ret == SpeechEngineDefines.ERR_REC_CHECK_ENVIRONMENT_FAILED) {
//                    mEngineStatusTv.setText(R.string.check_rec_permission);
                    requestPermission(ASR_PERMISSIONS)
                } else if (ret != SpeechEngineDefines.ERR_NO_ERROR) {
                    Log.e(SpeechDemoDefines.TAG, "send directive start failed, $ret")
                }
            }
        }
        recordHandler!!.postDelayed(recordRunnable!!, 500)
    }

    /**
     * 配置Asr参数
     */
    /**
     * 配置Asr参数
     */
    private fun configStartAsrParams() {
        //【可选配置】是否开启顺滑(DDC)
        mSpeechEngine!!.setOptionBoolean(SpeechEngineDefines.PARAMS_KEY_ASR_ENABLE_DDC_BOOL, true)
        //【可选配置】是否开启文字转数字(ITN)
//        mSpeechEngine.setOptionBoolean(SpeechEngineDefines.PARAMS_KEY_ASR_ENABLE_ITN_BOOL, mSettings.getBoolean(R.string.config_asr_enable_itn));
        //【可选配置】是否开启标点
        mSpeechEngine!!.setOptionBoolean(
            SpeechEngineDefines.PARAMS_KEY_ASR_SHOW_NLU_PUNC_BOOL,
            true
        )
        //【可选配置】设置识别语种
//        mSpeechEngine.setOptionString(SpeechEngineDefines.PARAMS_KEY_ASR_LANGUAGE_STRING, mSettings.getString(R.string.config_asr_language));
        //【可选配置】是否隐藏句尾标点
        mSpeechEngine!!.setOptionBoolean(
            SpeechEngineDefines.PARAMS_KEY_ASR_DISABLE_END_PUNC_BOOL,
            true
        )

        // 【可选配置】直接传递自定义的ASR请求JSON，若使用此参数需自行确保JSON格式正确
//        mSpeechEngine.setOptionString(SpeechEngineDefines.PARAMS_KEY_ASR_REQ_PARAMS_STRING, mSettings.getString(R.string.config_asr_req_params));

        //【可选配置】控制识别结果返回的形式，全量返回或增量返回，默认为全量
        mSpeechEngine!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_ASR_RESULT_TYPE_STRING,
            SpeechEngineDefines.ASR_RESULT_TYPE_SINGLE
        )

        //【可选配置】设置VAD头部静音时长，用户多久没说话视为空音频，即静音检测时长
//        mSpeechEngine.setOptionInt(SpeechEngineDefines.PARAMS_KEY_ASR_VAD_START_SILENCE_TIME_INT, mSettings.getInt(R.string.config_asr_vad_start_silence_time));
        //【可选配置】设置VAD尾部静音时长，用户说话后停顿多久视为说话结束，即自动判停时长
//        mSpeechEngine.setOptionInt(SpeechEngineDefines.PARAMS_KEY_ASR_VAD_END_SILENCE_TIME_INT, mSettings.getInt(R.string.config_asr_vad_end_silence_time));
        //【可选配置】设置VAD模式，用于定制VAD场景，默认为空
//        mSpeechEngine.setOptionString(SpeechEngineDefines.PARAMS_KEY_ASR_VAD_MODE_STRING, mSettings.getString(R.string.config_asr_vad_mode));
        //【可选配置】用户音频输入最大时长，仅一句话识别场景生效，单位毫秒，默认为 60000ms.
        mSpeechEngine!!.setOptionInt(
            SpeechEngineDefines.PARAMS_KEY_VAD_MAX_SPEECH_DURATION_INT,
            60000
        )

        //【可选配置】控制是否返回录音音量，在 APP 需要显示音频波形时可以启用
        mSpeechEngine!!.setOptionBoolean(
            SpeechEngineDefines.PARAMS_KEY_ENABLE_GET_VOLUME_BOOL,
            false
        )

        //【可选配置】设置纠错词表，识别结果会根据设置的纠错词纠正结果，例如："{\"古爱玲\":\"谷爱凌\"}"，当识别结果中出现"古爱玲"时会替换为"谷爱凌"
        mSpeechEngine!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_ASR_CORRECT_WORDS_STRING,
            "{\"古爱玲\":\"谷爱凌\"}"
        )


        //【可选配置】更新 ASR 热词
//        if (!mSettings.getString(R.string.config_asr_hotwords).isEmpty()) {
//            Log.d(SpeechDemoDefines.TAG, "Set hotwords.");
//            setHotWords(mSettings.getString(R.string.config_asr_hotwords));
//        }

//        if (mSettings.getOptionsValue(R.string.config_recorder_type, this).equals(SpeechEngineDefines.RECORDER_TYPE_STREAM)) {
//            if (!mStreamRecorder.Start()) {
//                requestPermission(ASR_PERMISSIONS);
//            }
//        } else if (mSettings.getOptionsValue(R.string.config_recorder_type, this).equals(SpeechEngineDefines.RECORDER_TYPE_FILE)) {
//            // 使用音频文件识别时，需要设置文件的绝对路径
//            String test_file_path = mDebugPath + "/asr_rec_file.pcm";
//            Log.d(SpeechDemoDefines.TAG, "输入的音频文件路径: " + test_file_path);
//            // 使用音频文件识别时【必须配置】，否则【无需配置】
//            mSpeechEngine.setOptionString(SpeechEngineDefines.PARAMS_KEY_RECORDER_FILE_STRING, test_file_path);
//        }
    }

    /**
     * 请求麦克风权限
     * check and request multiple permissions
     *
     * @param permissions: permission list
     * @return if all permissions already granted.
     */
    /**
     * 请求麦克风权限
     * check and request multiple permissions
     *
     * @param permissions: permission list
     * @return if all permissions already granted.
     */
    fun requestPermission(permissions: List<String>): Boolean {
        if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
            val unAuthedPermission = ArrayList<String>()
            for (permission in permissions) {
                if (ContextCompat.checkSelfPermission(this, permission)
                    != PackageManager.PERMISSION_GRANTED
                ) {
                    unAuthedPermission.add(permission)
                }
            }
            if (unAuthedPermission.isEmpty()) {
                return true
            }
            ActivityCompat.requestPermissions(
                this,
                unAuthedPermission.toTypedArray<String>(),
                CODE_PERMISSION_REQUEST
            )
            return false
        } else {
            return true
        }
    }


    /////////////////下面是文字转语音/////////////////////////////////////////////////////////////////////////////////////////////////
    // Android AudioTrack Playing
    private var mStreamPlayer: SpeechStreamPlayer? = null
    private var mSpeechEngineTTS: SpeechEngine? = null
    private val mDisablePlayerReuse = false

    private var mAudioManager: AudioManager? = null
    private var mAFChangeListener: OnAudioFocusChangeListener? = null

    private var mPlaybackNowAuthorized = false
    // private String  mCurTtsText = "愿中国青年都摆脱冷气，只是向上走，不必听自暴自弃者流的话。能做事的做事，能发声的发声。有一分热，发一分光。就令萤火一般，也可以在黑暗里发一点光，不必等候炬火。此后如竟没有炬火：我便是唯一的光。";
    private var mCurTtsText = ""
    // Engine State
    private var mEngineInited = false
    private var mEngineErrorOccurred = false

    private fun initEngineTTS() {
//        mCurTtsWorkMode = mTtsWorkModeArray[mSettings.getOptions(R.string.tts_work_mode_title).chooseIdx];
//        Log.i(SpeechDemoDefines.TAG, "调用初始化接口前的语音合成工作模式为 " + mCurTtsWorkMode);
        // 当使用纯在线模式时，不需要下载离线合成所需资源
        initEngineInternal()
    }

    private fun initEngineInternal() {
        var ret = SpeechEngineDefines.ERR_NO_ERROR
        if (mSpeechEngineTTS == null) {
            Log.i(SpeechDemoDefines.TAG, "创建引擎.")
            mSpeechEngineTTS = SpeechEngineGenerator.getInstance()
            mSpeechEngineTTS?.createEngine()
            mSpeechEngineTTS?.setContext(getApplicationContext())
        }
        if (ret != SpeechEngineDefines.ERR_NO_ERROR) {
            speechEngineInitFailed("Create engine failed: $ret")
            return
        }
        Log.d(SpeechDemoDefines.TAG, "SDK 版本号: " + mSpeechEngineTTS!!.version)

        Log.i(SpeechDemoDefines.TAG, "配置初始化参数.")
        configInitParamsTTS()

        //        if (mSettings.getBoolean(R.string.config_demo_player) && mStreamPlayer != null) {
//            mStreamPlayer.SetPlayerSampleRate(24000);
//        }
        val startInitTimestamp = System.currentTimeMillis()
        Log.i(SpeechDemoDefines.TAG, "引擎初始化.")
        ret = mSpeechEngineTTS!!.initEngine()
        val errMessage = "初始化失败，返回值: $ret"
        Log.e(SpeechDemoDefines.TAG, errMessage)
        if (ret != SpeechEngineDefines.ERR_NO_ERROR) {
//            String errMessage = "初始化失败，返回值: " + ret;
//            Log.e(SpeechDemoDefines.TAG, errMessage);
            speechEngineInitFailed(errMessage)
            return
        }
        Log.i(SpeechDemoDefines.TAG, "设置消息监听")
        mSpeechEngineTTS!!.setListener(speechListener)

        val cost = System.currentTimeMillis() - startInitTimestamp
        Log.d(SpeechDemoDefines.TAG, String.format("初始化耗时 %d 毫秒", cost))
        speechEnginInitSucceeded(cost)
    }

    private fun configInitParamsTTS() {
        //【必需配置】Engine Name
        mSpeechEngineTTS!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_ENGINE_NAME_STRING,
            SpeechEngineDefines.TTS_ENGINE
        )

        //【必需配置】Work Mode, 可选值如下
        // SpeechEngineDefines.TTS_WORK_MODE_ONLINE, 只进行在线合成，不需要配置离线合成相关参数；
        // SpeechEngineDefines.TTS_WORK_MODE_OFFLINE, 只进行离线合成，不需要配置在线合成相关参数；
        // SpeechEngineDefines.TTS_WORK_MODE_ALTERNATE, 先发起在线合成，失败后（网络超时），启动离线合成引擎开始合成；
        mSpeechEngineTTS!!.setOptionInt(
            SpeechEngineDefines.PARAMS_KEY_TTS_WORK_MODE_INT,
            SpeechEngineDefines.TTS_WORK_MODE_ONLINE
        )

        //【可选配置】Debug & Log
        mSpeechEngineTTS!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_DEBUG_PATH_STRING,
            mDebugPath
        )
        mSpeechEngineTTS!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_LOG_LEVEL_STRING,
            SpeechEngineDefines.LOG_LEVEL_TRACE
        )

        //【可选配置】User ID（用以辅助定位线上用户问题）
        mSpeechEngineTTS!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_UID_STRING,
            SensitiveDefines.UID
        )
        mSpeechEngineTTS!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_DEVICE_ID_STRING,
            SensitiveDefines.DID
        )

        //【可选配置】是否将合成出的音频保存到设备上，为 true 时需要正确配置 PARAMS_KEY_TTS_AUDIO_PATH_STRING 才会生效
        mSpeechEngineTTS!!.setOptionBoolean(
            SpeechEngineDefines.PARAMS_KEY_TTS_ENABLE_DUMP_BOOL,
            false
        )
        // TTS 音频文件保存目录，必须在合成之前创建好且 APP 具有访问权限，保存的音频文件名格式为 tts_{reqid}.wav, {reqid} 是本次合成的请求 id
        // PARAMS_KEY_TTS_ENABLE_DUMP_BOOL 配置为 true 的音频时为【必需配置】，否则为【可选配置】
        mSpeechEngineTTS!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_TTS_AUDIO_PATH_STRING,
            mDebugPath
        )

        //        mDisablePlayerReuse = mSettings.getBoolean(R.string.config_disable_player_reuse);
        //【可选配置】是否禁止播放器对象的复用，如果禁用则每次 Start Engine 都会重新创建播放器对象
        mSpeechEngineTTS!!.setOptionBoolean(
            SpeechEngineDefines.PARAMS_KEY_PLAYER_DISABLE_REUSE_BOOL,
            false
        )
        //【可选配置】用于控制 SDK 播放器所用的音源,默认为媒体音源
        // 如果不禁用播放器的复用，必须在 SDK 初始化之前配置音源，其他时机配置无法生效
        mSpeechEngineTTS!!.setOptionInt(
            SpeechEngineDefines.PARAMS_KEY_AUDIO_STREAM_TYPE_INT,
            SpeechEngineDefines.AUDIO_STREAM_TYPE_MEDIA
        )

        //【可选配置】合成出的音频的采样率，默认为 24000
        mSpeechEngineTTS!!.setOptionInt(
            SpeechEngineDefines.PARAMS_KEY_TTS_SAMPLE_RATE_INT,
            24000
        )
        //【可选配置】打断播放时使用多长时间淡出停止，单位：毫秒。默认值 0 表示不淡出
        mSpeechEngineTTS!!.setOptionInt(
            SpeechEngineDefines.PARAMS_KEY_AUDIO_FADEOUT_DURATION_INT,
            20
        )

        // ------------------------ 在线合成相关配置 -----------------------

        //【必需配置】在线合成鉴权相关：Appid
        mSpeechEngineTTS!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_APP_ID_STRING,
            SensitiveDefines.APPID
        )
        //【必需配置】在线合成鉴权相关：Token
        mSpeechEngineTTS!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_APP_TOKEN_STRING,
            SensitiveDefines.TOKEN
        )
        //【必需配置】语音合成服务域名
        mSpeechEngineTTS!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_TTS_ADDRESS_STRING,
            SensitiveDefines.DEFAULT_ADDRESS
        )
        //【必需配置】语音合成服务Uri
        mSpeechEngineTTS!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_TTS_URI_STRING,
            SensitiveDefines.TTS_DEFAULT_URI
        )
        //【必需配置】语音合成服务所用集群
        mSpeechEngineTTS!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_TTS_CLUSTER_STRING,
            SensitiveDefines.TTS_DEFAULT_CLUSTER
        )
        //【可选配置】在线合成下发的 opus-ogg 音频的压缩倍率
        mSpeechEngineTTS!!.setOptionInt(SpeechEngineDefines.PARAMS_KEY_TTS_COMPRESSION_RATE_INT, 10)
    }

    private fun speechEnginInitSucceeded(initCost: Long) {
        Log.i(SpeechDemoDefines.TAG, "引擎初始化成功!")
        this.runOnUiThread {
            // setResultText("Initialize cost: " + initCost + "ms.");
            mEngineInited = true
        }
    }

    private fun startEngineBtnClicked() {
        Log.d(SpeechDemoDefines.TAG, "Start engine, current status: $mEngineStarted")
        if (!mEngineStarted) {
            AcquireAudioFocus()
            if (!mPlaybackNowAuthorized) {
                Log.w(SpeechDemoDefines.TAG, "Acquire audio focus failed, can't play audio")
                return
            }
            //            clearResultText();
            mEngineErrorOccurred = false
            // Directive：启动引擎前调用SYNC_STOP指令，保证前一次请求结束。
            Log.i(SpeechDemoDefines.TAG, "关闭引擎（同步）")
            Log.i(SpeechDemoDefines.TAG, "Directive: DIRECTIVE_SYNC_STOP_ENGINE")
            var ret =
                mSpeechEngineTTS!!.sendDirective(SpeechEngineDefines.DIRECTIVE_SYNC_STOP_ENGINE, "")
            if (ret != SpeechEngineDefines.ERR_NO_ERROR) {
                Log.e(SpeechDemoDefines.TAG, "send directive syncstop failed, $ret")
            } else {
                configStartTtsParams()
                Log.i(SpeechDemoDefines.TAG, "启动引擎")
                Log.i(SpeechDemoDefines.TAG, "Directive: DIRECTIVE_START_ENGINE")
                ret =
                    mSpeechEngineTTS!!.sendDirective(SpeechEngineDefines.DIRECTIVE_START_ENGINE, "")
                if (ret != SpeechEngineDefines.ERR_NO_ERROR) {
                    val message = "发送启动引擎指令失败, $ret"
                    sendStartEngineDirectiveFailed(message)
                }
            }
        }
    }

    private fun AcquireAudioFocus() {
        // 向系统请求 Audio Focus 并记录返回结果
        val res = mAudioManager!!.requestAudioFocus(
            mAFChangeListener, AudioManager.STREAM_MUSIC,
            AudioManager.AUDIOFOCUS_GAIN
        )
        if (res == AudioManager.AUDIOFOCUS_REQUEST_FAILED) {
            mPlaybackNowAuthorized = false
        } else if (res == AudioManager.AUDIOFOCUS_REQUEST_GRANTED) {
            mPlaybackNowAuthorized = true
        }
    }

    private fun sendStartEngineDirectiveFailed(tipText: String) {
        Log.e(SpeechDemoDefines.TAG, tipText)
        this.runOnUiThread {
            setMyResult(tipText)
            mEngineStarted = false
        }
    }

    private fun configStartTtsParams() {
        //【必需配置】TTS 使用场景
        mSpeechEngineTTS!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_TTS_SCENARIO_STRING,
            SpeechEngineDefines.TTS_SCENARIO_TYPE_NORMAL
        )

        //        String ttsText = mReferText.getText().toString();
//        if (!ttsText.isEmpty()) {
//            mCurTtsText = ttsText;
//        } else {
        if (TextUtils.isEmpty(mCurTtsText)) {
            mCurTtsText =
                "愿中国青年都摆脱冷气，只是向上走，不必听自暴自弃者流的话。能做事的做事，能发声的发声。有一分热，发一分光。就令萤火一般，也可以在黑暗里发一点光，不必等候炬火。此后如竟没有炬火：我便是唯一的光。"
        }
        //        }
        //【必需配置】需合成的文本，不可超过 80 字
        mSpeechEngineTTS!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_TTS_TEXT_STRING,
            mCurTtsText
        )
        //【可选配置】需合成的文本的类型，支持直接传文本(TTS_TEXT_TYPE_PLAIN)和传 SSML 形式(TTS_TEXT_TYPE_SSML)的文本
        mSpeechEngineTTS!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_TTS_TEXT_TYPE_STRING,
            "Plain"
        )
        //【可选配置】用于控制 TTS 音频的语速，支持的配置范围参考火山官网 语音技术/语音合成/离在线语音合成SDK/参数说明 文档
        mSpeechEngineTTS!!.setOptionDouble(
            SpeechEngineDefines.PARAMS_KEY_TTS_SPEED_RATIO_DOUBLE,
            1.0
        )
        //【可选配置】用于控制 TTS 音频的音量，支持的配置范围参考火山官网 语音技术/语音合成/离在线语音合成SDK/参数说明 文档
        mSpeechEngineTTS!!.setOptionDouble(
            SpeechEngineDefines.PARAMS_KEY_TTS_VOLUME_RATIO_DOUBLE,
            1.0
        )
        //【可选配置】用于控制 TTS 音频的音高，支持的配置范围参考火山官网 语音技术/语音合成/离在线语音合成SDK/参数说明 文档
        mSpeechEngineTTS!!.setOptionDouble(
            SpeechEngineDefines.PARAMS_KEY_TTS_PITCH_RATIO_DOUBLE,
            1.0
        )
        //【可选配置】是否在文本的每句结尾处添加静音段，单位：毫秒，默认为 0ms
        mSpeechEngineTTS!!.setOptionInt(SpeechEngineDefines.PARAMS_KEY_TTS_SILENCE_DURATION_INT, 0)

        if (mDisablePlayerReuse) {
            //【可选配置】用于控制 SDK 播放器所用的音源,默认为媒体音源
            // 只有禁用了播放器的复用，在 Start Engine 前配置音源才是生效的
            mSpeechEngineTTS!!.setOptionInt(
                SpeechEngineDefines.PARAMS_KEY_AUDIO_STREAM_TYPE_INT,
                SpeechEngineDefines.AUDIO_STREAM_TYPE_MEDIA
            )
        }
        //【可选配置】是否使用 SDK 内置播放器播放合成出的音频，默认为 true
        mSpeechEngineTTS!!.setOptionBoolean(
            SpeechEngineDefines.PARAMS_KEY_TTS_ENABLE_PLAYER_BOOL,
            false
        )
        //【可选配置】是否令 SDK 通过回调返回合成的音频数据，默认不返回。
        // 开启后，SDK 会流式返回音频，收到 MESSAGE_TYPE_TTS_AUDIO_DATA_END 回调表示当次合成所有的音频已经全部返回
        mSpeechEngineTTS!!.setOptionInt(
            SpeechEngineDefines.PARAMS_KEY_TTS_DATA_CALLBACK_MODE_INT,
            2
        )

        // ------------------------ 在线合成相关配置 -----------------------
        Log.d(SpeechDemoDefines.TAG, "Current online voice: BV700_V2_streaming")
        //【必需配置】在线合成使用的发音人代号
        mSpeechEngineTTS!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_TTS_VOICE_ONLINE_STRING,
            TTS_DEFAULT_ONLINE_VOICE_TYPE
        )
        //【必需配置】在线合成使用的音色代号
        mSpeechEngineTTS!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_TTS_VOICE_TYPE_ONLINE_STRING,
            TTS_DEFAULT_ONLINE_VOICE_TYPE
        )
        //【可选配置】需要返回详细的播放进度或需要启用断点续播功能时应配置为 1, 否则配置为 0 或不配置
        mSpeechEngineTTS!!.setOptionInt(SpeechEngineDefines.PARAMS_KEY_TTS_WITH_FRONTEND_INT, 1)
        //【可选配置】使用复刻音色
        mSpeechEngineTTS!!.setOptionBoolean(
            SpeechEngineDefines.PARAMS_KEY_TTS_USE_VOICECLONE_BOOL,
            true
        )
        //【可选配置】在开启前述使用复刻音色的开关后，制定复刻音色所用的后端集群
        mSpeechEngineTTS!!.setOptionString(
            SpeechEngineDefines.PARAMS_KEY_TTS_BACKEND_CLUSTER_STRING,
            SensitiveDefines.TTS_DEFAULT_CLUSTER
        )
    }

    private val speechListener =
        SpeechListener { type, data, i1 ->
            var stdData = ""
            stdData = String(data)
            when (type) {
                SpeechEngineDefines.MESSAGE_TYPE_ENGINE_START -> {
                    // Callback: 引擎启动成功回调
                    Log.i(SpeechDemoDefines.TAG, "Callback: 引擎启动成功: data: $stdData")
                    speechStartTTS(stdData)
                }

                SpeechEngineDefines.MESSAGE_TYPE_ENGINE_STOP -> {
                    // Callback: 引擎关闭回调
                    Log.i(SpeechDemoDefines.TAG, "Callback: 引擎关闭: data: $stdData")
                    speechStopTTS(stdData)
                }

                SpeechEngineDefines.MESSAGE_TYPE_ENGINE_ERROR -> {
                    // Callback: 错误信息回调
                    Log.e(SpeechDemoDefines.TAG, "Callback: 错误信息: $stdData")
                    speechError(stdData)
                }

                SpeechEngineDefines.MESSAGE_TYPE_TTS_SYNTHESIS_BEGIN -> {
                    // Callback: 合成开始回调
                    Log.e(SpeechDemoDefines.TAG, "Callback: 合成开始: $stdData")
                    speechStartSynthesis(stdData)
                }

                SpeechEngineDefines.MESSAGE_TYPE_TTS_SYNTHESIS_END -> {
                    // Callback: 合成结束回调
                    Log.e(SpeechDemoDefines.TAG, "Callback: 合成结束: $stdData")
                    speechFinishSynthesis(stdData)
                }

                SpeechEngineDefines.MESSAGE_TYPE_TTS_START_PLAYING -> {
                    // Callback: 播放开始回调
                    Log.e(SpeechDemoDefines.TAG, "Callback: 播放开始: $stdData")
                    speechStartPlaying(stdData)
                }

                SpeechEngineDefines.MESSAGE_TYPE_TTS_PLAYBACK_PROGRESS -> {
                    // Callback: 播放进度回调
                    Log.e(SpeechDemoDefines.TAG, "Callback: 播放进度")
                    speechPlayingProgress(stdData)
                }

                SpeechEngineDefines.MESSAGE_TYPE_TTS_FINISH_PLAYING -> {
                    // Callback: 播放结束回调
                    Log.e(SpeechDemoDefines.TAG, "Callback: 播放结束: $stdData")
                    speechFinishPlaying(stdData)
                }

                SpeechEngineDefines.MESSAGE_TYPE_TTS_AUDIO_DATA -> {
                    // Callback: 音频数据回调
                    Log.e(
                        SpeechDemoDefines.TAG,
                        String.format("Callback: 音频数据，长度 %d 字节", stdData.length)
                    )
                    speechTtsAudioData(data, false)
                }

                SpeechEngineDefines.MESSAGE_TYPE_TTS_AUDIO_DATA_END -> {
                    // Callback: 音频数据回调
                    Log.e(
                        SpeechDemoDefines.TAG,
                        String.format("Callback: 音频数据，长度 %d 字节", stdData.length)
                    )
                    speechTtsAudioData(ByteArray(0), true)
                }

                else -> {}
            }
        }

    private fun speechStartTTS(data: String) {
        mEngineStarted = true
        if ( /*mSettings.getBoolean(R.string.config_demo_player) &&*/mStreamPlayer != null) {
            mStreamPlayer?.Start()
        }
//        this.runOnUiThread(() -> {
//            mEngineStatus.setText(R.string.hint_start_cb);
//            mReferText.setEnabled(false);
//            setResultText(mCurTtsText);
//            setButton(mStartBtn, false);
//            setButton(mStopBtn, true);
//            setButton(mCreateConnectionBtn, false);
//            setButton(mPauseResumeBtn, mSettings.getBoolean(R.string.config_sdk_player) || mSettings.getBoolean(R.string.config_demo_player));
//        });
    }

    private fun speechStopTTS(data: String) {
        mEngineStarted = false
        if ( /*mSettings.getBoolean(R.string.config_demo_player) &&*/mStreamPlayer != null) {
            mStreamPlayer?.Feed(ByteArray(0), true)
            mStreamPlayer?.WaitPlayerStop()
        }
        this.runOnUiThread {
            //            mEngineStatus.setText(R.string.hint_stop_cb);
//            mPauseResumeBtn.setText("Pause");
//            mReferText.setEnabled(true);
//            setButton(mStopBtn, false);
//            setButton(mStartBtn, true);
//            setButton(mCreateConnectionBtn, mCurTtsWorkMode != SpeechEngineDefines.TTS_WORK_MODE_OFFLINE);
//            setButton(mPauseResumeBtn, false);
            mConnectionCreated = false
            mPlayerPaused = false
        }

        // Abandon audio focus when playback complete
        mAudioManager!!.abandonAudioFocus(mAFChangeListener)
        mPlaybackNowAuthorized = false
    }

    private fun speechStartSynthesis(data: String) {
    }

    private fun speechFinishSynthesis(data: String) {
    }

    private fun speechStartPlaying(data: String) {
    }

    private fun speechPlayingProgress(data: String) {
        try {
            val reader = JSONObject(data)
            if (!reader.has("reqid") || !reader.has("progress")) {
                Log.w(SpeechDemoDefines.TAG, "Can't find necessary field in progress callback. ")
                return
            }
            val percentage = reader.getDouble("progress")
            val reqid = reader.getString("reqid")
            Log.d(
                SpeechDemoDefines.TAG,
                "当前播放的文本对应的 reqid: $reqid, 播放进度：$percentage"
            )
        } catch (e: JSONException) {
            e.printStackTrace()
        }
    }

    private fun speechFinishPlaying(data: String) {
    }

    private fun speechTtsAudioData(data: ByteArray, isFinal: Boolean) {
        if ( /*mSettings.getBoolean(R.string.config_demo_player) &&*/mStreamPlayer != null) {
            mStreamPlayer?.Feed(data, isFinal)
        }
    }


    private fun requestImage(prompt: String, filePath: String) {
        val requestId: Int = getRandomInt()
        val file = File(filePath)
        AIHelpUtil.getInstance().requestImage2Text(prompt, file, object :
            AIHelpUtil.Text2TextCallBack {
            override fun onDataBack(generateBean: GenerateBean?) {
                if (generateBean == null) {
                    return
                }
                val message = Message.obtain()
                message.arg1 = requestId
                message.what = 1005
                message.obj = generateBean
                handler.sendMessage(message)
            }
        })
    }

    private fun getRandomInt(): Int {
        // 创建 Random 对象
        val random = Random()

        // 生成一个 0（包含）到 100（不包含）之间的随机整数
        val randomNumber = random.nextInt(10000)
        return randomNumber
    }

    private val mSelectList: MutableList<String> = ArrayList()


    private fun pickImage() {
        PhotoPicker.Album(this)
            .setMultiChooseSize(1)
            .setIsCompress(true) //是否压缩
            .setIsCrop(false) //是否裁切
            .setOnResultListener(object : OnResultListener {
                override fun onSucess(imagePathList: ArrayList<String>?) {
                    mSelectList.clear()
                    mSelectList.addAll(imagePathList!!)
                    if (mSelectList != null && mSelectList.size != 0) {
                        selectImagePath = mSelectList[0]
                        Glide.with(this@AiGenerateTextByImageActivity).load(selectImagePath)
                            .into(mBinding.imSelect)
                        mBinding.imSelect!!.visibility = View.VISIBLE
                        setMyResult(selectImagePath)
                    }
                }

                override fun onCancel() {
                }
            }).start()
    }

    private val REQUEST_IMAGE = 2

}