package com.k2fsa.sherpa.onnx.kws

import android.Manifest
import android.content.pm.PackageManager
import android.media.AudioFormat

import android.media.AudioRecord
import android.media.MediaRecorder
import android.os.Bundle
import android.text.method.ScrollingMovementMethod
import android.util.Log
import android.util.TimeUtils
import android.widget.Button
import android.widget.EditText
import android.widget.TextView
import android.widget.Toast
import androidx.appcompat.app.AppCompatActivity
import androidx.core.app.ActivityCompat
import com.k2fsa.sherpa.onnx.KeywordSpotter
import com.k2fsa.sherpa.onnx.KeywordSpotterConfig
import com.k2fsa.sherpa.onnx.KeywordSpotterResult
import com.k2fsa.sherpa.onnx.OfflineRecognizer
import com.k2fsa.sherpa.onnx.OfflineRecognizerConfig
import com.k2fsa.sherpa.onnx.OfflineRecognizerResult
import com.k2fsa.sherpa.onnx.OnlineRecognizer
import com.k2fsa.sherpa.onnx.OnlineRecognizerConfig
import com.k2fsa.sherpa.onnx.OnlineRecognizerResult
import com.k2fsa.sherpa.onnx.OnlineStream
import com.k2fsa.sherpa.onnx.R
import com.k2fsa.sherpa.onnx.getFeatureConfig
import com.k2fsa.sherpa.onnx.getKeywordsFile
import com.k2fsa.sherpa.onnx.getKwsModelConfig
import com.k2fsa.sherpa.onnx.getModelConfig
import com.k2fsa.sherpa.onnx.getOfflineModelConfig
import com.konovalov.vad.silero.Vad
import com.konovalov.vad.silero.VadSilero
import com.konovalov.vad.silero.config.FrameSize
import com.konovalov.vad.silero.config.Mode
import com.konovalov.vad.silero.config.SampleRate
//import com.k2fsa.sherpa.onnx.getVadModelConfig
//import com.konovalov.vad.silero.Vad
//import com.konovalov.vad.silero.VadSilero
//import com.konovalov.vad.silero.config.FrameSize
//import com.konovalov.vad.silero.config.Mode
//import com.konovalov.vad.silero.config.SampleRate
import kotlinx.coroutines.CoroutineScope
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.cancel
import kotlinx.coroutines.launch
import kotlinx.coroutines.withContext
import org.json.JSONArray
import kotlin.concurrent.thread

private const val TAG = "sherpa-onnx-kws"
private const val REQUEST_RECORD_AUDIO_PERMISSION = 200

class MainActivity : AppCompatActivity() {
    private val permissions: Array<String> = arrayOf(Manifest.permission.RECORD_AUDIO)

    private lateinit var kws: KeywordSpotter
    private lateinit var onlineRecognizer: OnlineRecognizer
    private lateinit var offlineRecognizer: OfflineRecognizer
    private lateinit var vad: VadSilero
    private lateinit var stream: OnlineStream
    private var audioRecord: AudioRecord? = null
    private lateinit var recordButton: Button
    private lateinit var textView: TextView
    private lateinit var inputText: TextView
    private var recordingThread: Thread? = null

    private val audioSource = MediaRecorder.AudioSource.MIC
    private val sampleRateInHz = 16000
    private val channelConfig = AudioFormat.CHANNEL_IN_MONO

    // Note: We don't use AudioFormat.ENCODING_PCM_FLOAT
    // since the AudioRecord.read(float[]) needs API level >= 23
    // but we are targeting API level >= 21
    private val audioFormat = AudioFormat.ENCODING_PCM_16BIT
    private var idx: Int = 0
    private var lastText: String = ""
    private var nolastText: String = ""
    private val enableAsr = false
    private var onlyKws = false



    @Volatile
    private var isRecording: Boolean = false

    override fun onRequestPermissionsResult(
        requestCode: Int, permissions: Array<String>, grantResults: IntArray
    ) {
        super.onRequestPermissionsResult(requestCode, permissions, grantResults)
        val permissionToRecordAccepted = if (requestCode == REQUEST_RECORD_AUDIO_PERMISSION) {
            grantResults[0] == PackageManager.PERMISSION_GRANTED
        } else {
            false
        }

        if (!permissionToRecordAccepted) {
            Log.e(TAG, "Audio record is disallowed")
            finish()
        }

        Log.i(TAG, "Audio record is permitted")
    }

    override fun onCreate(savedInstanceState: Bundle?) {
        super.onCreate(savedInstanceState)
        setContentView(R.layout.activity_main)

        ActivityCompat.requestPermissions(this, permissions, REQUEST_RECORD_AUDIO_PERMISSION)
        if (onlyKws) {
            initModel()
        } else {
            Log.i(TAG, "Start to initialize vad model")
            initVadModel()
            Log.i(TAG, "Finished initializing vad model")

            Log.i(TAG, "Start to initialize kws model")
            initModel()
            Log.i(TAG, "Finished initializing kws model")
            if (enableAsr) {
                initOnlineRecognizer()
            }
        }




        recordButton = findViewById(R.id.record_button)
        recordButton.setOnClickListener { onclick() }

        textView = findViewById(R.id.my_text)
        textView.movementMethod = ScrollingMovementMethod()

        inputText = findViewById(R.id.input_text)
    }

    private fun onclick() {
        if (!isRecording) {
            val ret = initMicrophone()
            if (!ret) {
                Log.e(TAG, "Failed to initialize microphone")
                return
            }
            Log.i(TAG, "state: ${audioRecord?.state}")

            audioRecord!!.startRecording()
            recordButton.setText(R.string.stop)
            isRecording = true

            textView.text = ""
            lastText = ""
            inputText.text = ""
            lastText = ""
            idx = 0
            if (onlyKws) {
                stream.release()
                stream = kws.createStream()
                recordingThread = thread(true) {
                    processKws()
                }

            } else {

                recordingThread = thread(true) {
                    processVadKws()
//                processKws()
                }
            }




            Log.i(TAG, "Started recording")
        } else {
            isRecording = false

            audioRecord!!.stop()
            audioRecord!!.release()
            audioRecord = null
            recordButton.setText(R.string.start)
            if (onlyKws) {
                stream.release()
            }
            Log.i(TAG, "Stopped recording")
        }
    }

    private fun reset() {
        isRecording = false
        audioRecord!!.stop()
        audioRecord!!.release()
        audioRecord = null
//        initMicrophone()
        audioRecord!!.startRecording()
        stream.release()
        stream = kws.createStream("")
//        stream.release()
//        stream = kws.createStream()
    }
    private fun processVadKws() {
        Log.i(TAG, "vad kws processing samples")

//        val bufferSize = 512 // in samples
        val interval = 0.1 // i.e., 100 ms 60ms 一个切片
        val bufferSize = (interval * sampleRateInHz).toInt() // in samples
        val buffer = ShortArray(bufferSize)
        val coroutineScope = CoroutineScope(Dispatchers.IO)
        var samplesBuffer = arrayListOf<FloatArray>()
        var is_speech = false
        while (isRecording) {
            val ret = audioRecord?.read(buffer, 0, buffer.size)
            if (ret != null && ret > 0) {
                var samples = FloatArray(ret) { buffer[it] / 32768.0f }

                if (samples.size < 1600) {
                    val s = FloatArray(1600)
                    for (i in samples.indices) {
                        s[i] = samples[i]
                    }
                    samples = s
                }

//                if (vad.isSpeech(samples)) {
//                    Log.i(TAG, "processing samples")
//                    samplesBuffer.add(samples)
//                    is_speech = true
////                    vad.pop();
//                }
                if (vad.isSpeech(samples)) {
                    coroutineScope.launch {
                        val result = runKwsPass(samples)

                        val tokenArr = result.tokens
                        var tokens = "";
                        var times = result.timestamps;
                        if (tokenArr.size > 0) {
                            tokens =  JSONArray(tokenArr).toString() + JSONArray(times).toString()
                        }
                        val text = result.keyword

                        Log.d("cclin","Kws = -------"+text)


                        if (text.isNotBlank()) {
//                            kws.reset(stream)
                            withContext(Dispatchers.Main) {
                                lastText = "${idx}: ${text} ${tokens}\n${lastText}"
                                idx += 1
                                textView.text = lastText.lowercase()
                            }
                        } else {
                            if (enableAsr) {
                                val res = runKwsAsrPass(samples)
                                val tokenArr = res.tokens
                                var tokens = "";
                                var times = res.timestamps;
                                if (tokenArr.size > 0) {
                                    tokens =  JSONArray(tokenArr).toString() + JSONArray(times).toString()
                                }
                                val text1 = res.text
                                if (text1.isNotBlank()) {
//                            kws.reset(stream)
                                    withContext(Dispatchers.Main) {
                                        nolastText = "${idx}: ${text1}  ${tokens}\n${nolastText}"
                                        idx += 1
                                        inputText.text = nolastText.lowercase()
                                    }
                                }
                            }

//                            kws.reset(stream)
                            Log.i(TAG, "not wake up samples")
                        }
                }

                }


//                val isSpeechDetected = vad.isSpeechDetected()
//
                runOnUiThread {
                    textView.text = lastText.lowercase()
                }
            }
        }
        coroutineScope.cancel()
    }
    private fun processKws() {
        Log.i(TAG, "processing samples 111")

        val interval = 0.100 // i.e., 100 ms 60ms 一个切片
        val bufferSize = (interval * sampleRateInHz).toInt() // in samples
        val buffer = ShortArray(bufferSize)

        while (isRecording) {
            val ret = audioRecord?.read(buffer, 0, buffer.size)
            if (ret != null && ret > 0) {
                val samples = FloatArray(ret) { buffer[it] / 32768.0f }
                stream.acceptWaveform(samples, sampleRate = sampleRateInHz)
                while (kws.isReady(stream)) {
                    Log.i(TAG, "decode start ")
                    kws.decode(stream)
                    Log.i(TAG, "decode end ")

                    val result  = kws.getResult(stream);
                    val tokenArr = result.tokens
                    var tokens = "";
                    var times = result.timestamps;
                    if (tokenArr.size > 0) {
                        tokens =  JSONArray(tokenArr).toString() + JSONArray(times).toString()
                    }
                    val text = result.keyword

                    var textToDisplay = lastText

                    if (tokens.isNotBlank()) {
                        lastText = "$idx: $tokens\n$lastText"
                    }
                    if (text.isNotBlank()) {
                        kws.reset(stream)
//                    reset()
//                    Thread.sleep(10000)
//                    kws.release()
//                    stream = kws.createStream("")
                        if (lastText.isBlank()) {
                            textToDisplay = "$idx: $text"
                        } else {
                            textToDisplay = "$idx: $text\n$lastText"
                        }
                        lastText = "$idx: $text\n$lastText"
                        idx += 1
                    } else {
//                        kws.
                    }



                    runOnUiThread {
                        textView.text = textToDisplay
                    }
                }

            }
//            Thread.sleep(10);
        }
    }

    private fun initMicrophone(): Boolean {
        if (ActivityCompat.checkSelfPermission(
                this, Manifest.permission.RECORD_AUDIO
            ) != PackageManager.PERMISSION_GRANTED
        ) {
            ActivityCompat.requestPermissions(this, permissions, REQUEST_RECORD_AUDIO_PERMISSION)
            return false
        }

        val numBytes = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat)
        Log.i(
            TAG, "buffer size in milliseconds: ${numBytes * 1000.0f / sampleRateInHz}"
        )

        audioRecord = AudioRecord(
            audioSource,
            sampleRateInHz,
            channelConfig,
            audioFormat,
            numBytes * 2 // a sample has two bytes as we are using 16-bit PCM
        )
        return true
    }


    private fun initOfflineRecognizer() {
        // Please change getModelConfig() to add new models
        // See https://k2-fsa.github.io/sherpa/onnx/pretrained_models/index.html
        // for a list of available models
        val firstType = 4
        val secondType = 4
        var secondRuleFsts: String?
        secondRuleFsts = null
        Log.i(TAG, "Select model type $secondType for the second pass")

        val config = OfflineRecognizerConfig(
            featConfig = getFeatureConfig(sampleRate = sampleRateInHz, featureDim = 80),
            modelConfig = getOfflineModelConfig(type = secondType)!!,
        )

        if (secondRuleFsts != null) {
            config.ruleFsts = secondRuleFsts
        }

        offlineRecognizer = OfflineRecognizer(
            assetManager = application.assets,
            config = config,
        )
    }

    private fun initOnlineRecognizer() {
        // Please change getModelConfig() to add new models
        // See https://k2-fsa.github.io/sherpa/onnx/pretrained_models/index.html
        // for a list of available models
        val firstType = 0
        val firstRuleFsts: String?
        firstRuleFsts = null
        Log.i(TAG, "Select model type $firstType for the first pass")
        val config = OnlineRecognizerConfig(
            featConfig = getFeatureConfig(sampleRate = sampleRateInHz, featureDim = 80),
            modelConfig = getModelConfig(type = firstType)!!,
            hotwordsFile = getKeywordsFile(2),
            hotwordsScore = 5.0f,
            maxActivePaths = 2,

//            endpointConfig = getEndpointConfig(),
            enableEndpoint = false,
        )
//        if (firstRuleFsts != null) {
//            config.ruleFsts = firstRuleFsts;
//        }

        onlineRecognizer = OnlineRecognizer(
            assetManager = application.assets,
            config = config,
        )
    }

    private fun initVad() {
//        val DEFAULT_SAMPLE_RATE = SampleRate.SAMPLE_RATE_16K
////        val DEFAULT_FRAME_SIZE = FrameSize.FRAME_SIZE_512
//        val DEFAULT_FRAME_SIZE = FrameSize.FRAME_SIZE_1600
//        val DEFAULT_MODE = Mode.NORMAL
//        val DEFAULT_SILENCE_DURATION_MS = 300
//        val DEFAULT_SPEECH_DURATION_MS = 50
//        vad = Vad.builder().setContext(this@MainActivity).setSampleRate(DEFAULT_SAMPLE_RATE)
//            .setFrameSize(DEFAULT_FRAME_SIZE).setMode(DEFAULT_MODE)
//            .setSilenceDurationMs(DEFAULT_SILENCE_DURATION_MS)
//            .setSpeechDurationMs(DEFAULT_SPEECH_DURATION_MS).build()
    }
    private  fun initVadModel() {
        val type = 0
        Log.i(TAG, "Select VAD model type ${type}")
//        initVad()
//        val config = getVadModelConfig(type)

//        vad = Vad(
//            assetManager = application.assets,
//            config = config!!,
//        )
        val DEFAULT_SAMPLE_RATE = SampleRate.SAMPLE_RATE_16K
//        val DEFAULT_FRAME_SIZE = FrameSize.FRAME_SIZE_512
        val DEFAULT_FRAME_SIZE = FrameSize.FRAME_SIZE_1600
        val DEFAULT_MODE = Mode.NORMAL
        val DEFAULT_SILENCE_DURATION_MS = 500
        val DEFAULT_SPEECH_DURATION_MS = 100
        vad = Vad.builder().setContext(this@MainActivity).setSampleRate(DEFAULT_SAMPLE_RATE)
            .setFrameSize(DEFAULT_FRAME_SIZE).setMode(DEFAULT_MODE)
            .setSilenceDurationMs(DEFAULT_SILENCE_DURATION_MS)
            .setSpeechDurationMs(DEFAULT_SPEECH_DURATION_MS).build()
    }
    private fun initModel() {
        // Please change getKwsModelConfig() to add new models
        // See https://k2-fsa.github.io/sherpa/onnx/kws/pretrained_models/index.html
        // for a list of available models
        val type = 0
        Log.i(TAG, "Select model type $type")
        val config = KeywordSpotterConfig(
            featConfig = getFeatureConfig(sampleRate = sampleRateInHz, featureDim = 80),
            modelConfig = getKwsModelConfig(type = type)!!,
            keywordsFile = getKeywordsFile(type = type),
        )

        kws = KeywordSpotter(
            assetManager = application.assets,
            config = config,
        )
        if (onlyKws) {
            stream = kws.createStream()
        }
//        stream = kws.createStream()
    }

    private fun runKwsPass(samples: FloatArray): KeywordSpotterResult {
        Log.i(TAG, "samples size: ${samples.size} time: ${((samples.size)/16000.0f).toFloat()}" )
        val kwsStream = kws.createStream()
        kwsStream.acceptWaveform(samples, sampleRate = sampleRateInHz)
        Log.i(TAG, "start");
        while (kws.isReady(kwsStream)) {
            Log.i(TAG, "decode start");
            kws.decode(kwsStream)
            Log.i(TAG, "decode end");
            val result  = kws.getResult(kwsStream);
            if (result.keyword.isNotBlank()) {
                kws.reset(kwsStream)
//                kwsStream.release()
                return result
            }

        }
        val result  = kws.getResult(kwsStream);
        kwsStream.release()
        return result
    }

    private fun runKwsAsr(samples: FloatArray): OfflineRecognizerResult {
        val asrStream = offlineRecognizer.createStream()
        asrStream.acceptWaveform(samples, sampleRate = sampleRateInHz)
        offlineRecognizer.decode(asrStream)
        var text = offlineRecognizer.getResult(asrStream)
        asrStream.release()
        return text
    }

    private fun runKwsAsrPass(samples: FloatArray): OnlineRecognizerResult {
        val asrStream = onlineRecognizer.createStream()
//        onlineRecognizer.isEndpoint()
        asrStream.acceptWaveform(samples, sampleRate = sampleRateInHz)
//        onlineRecognizer.decode(asrStream)
//        offlineRecognizer.getResult()
//
        while (onlineRecognizer.isReady(asrStream)) {
            onlineRecognizer.decode(asrStream)
        }
//        val isEndpoint = onlineRecognizer.isEndpoint(asrStream)
//        var textToDisplay = lastText

        var text =onlineRecognizer.getResult(asrStream)
//        if (text.isNotBlank()) {
//            textToDisplay = if (lastText.isBlank()) {
//                // textView.text = "${idx}: ${text}"
//                "${idx}: $text"
//            } else {
//                "${lastText}\n${idx}: $text"
//            }
//        }
        asrStream.release()
        return text
    }



}