package com.amu.aidemo.android.wakeword

import ai.onnxruntime.OnnxTensor
import ai.onnxruntime.OrtEnvironment
import ai.onnxruntime.OrtSession
import android.content.Context
import android.util.Log
import com.amu.voiceassistant.android.ml.EmbeddingModel
import com.amu.voiceassistant.android.ml.NoahaNoaha
import org.tensorflow.lite.DataType
import org.tensorflow.lite.support.tensorbuffer.TensorBuffer
import java.nio.ByteBuffer
import java.nio.ByteOrder
import java.nio.FloatBuffer
import java.util.LinkedList

private const val TAG = "WakeWordDetector"

class WakeWordDetector(private val context: Context) {

    private lateinit var melspecOnnx: OrtSession
    private lateinit var embeddingModel: EmbeddingModel
    private lateinit var wakewordModel: NoahaNoaha
    private lateinit var verifierOnnx: OrtSession
    private lateinit var env: OrtEnvironment

    // 缓冲区
    private val rawDataBuffer = FloatArray(1760)
    private val melspecBuffer = Array(1) { Array(76) { Array(32) { FloatArray(1) } } }
    private val embeddingBuffer = Array(1) { Array(16) { FloatArray(96) } }
    private val scoreQueue = LinkedList<Float>()
    private var averagedConfidence = 0f
    private var confidence = FloatArray(1)

    private val gain = 100
    private val maxScores = 1
    private var patience = 0
    private val maxPatience = 20

    // 回调接口
    var onWakeWordDetected: ((confidence: Float) -> Unit)? = null

    init {
        initializeModels()
    }

    private fun initializeModels() {
        try {
            env = OrtEnvironment.getEnvironment()
            val melspecModelPath = context.assets.open("melspectrogram.onnx").readBytes()
            val verifierModelPath = context.assets.open("noaha_noaha.onnx").readBytes()

            melspecOnnx = env.createSession(melspecModelPath)
            embeddingModel = EmbeddingModel.newInstance(context)
            wakewordModel = NoahaNoaha.newInstance(context)
            verifierOnnx = env.createSession(verifierModelPath)

            Log.d(TAG, "Models initialized successfully")
        } catch (ex: Exception) {
            Log.e(TAG, "Failed to load models: ${ex.message}")
            throw ex
        }
    }

    /**
     * 处理音频数据，检测唤醒词
     * @param audioData 1280 个 float 样本 (16kHz, 80ms)
     * @return true 如果检测到唤醒词
     */
    fun processAudioChunk(audioData: FloatArray): Boolean {
        if (audioData.size != 1280) {
            Log.w(TAG, "Invalid audio chunk size: ${audioData.size}, expected 1280")
            return false
        }

        // 应用增益
        val amplifiedData = FloatArray(1280) { audioData[it] * gain }

        // 更新原始数据缓冲区
        System.arraycopy(rawDataBuffer, 1280, rawDataBuffer, 0, 480)
        System.arraycopy(amplifiedData, 0, rawDataBuffer, 480, 1280)

        // 处理 Melspectrogram
        bufferMelspec()

        // 处理 Embeddings
        bufferEmbeddings()

        // 获取唤醒词预测
        getWakeWordPrediction(embeddingBuffer)

        // 打印当前置信度（每10帧打印一次，避免日志过多）
        if (System.currentTimeMillis() % 10 == 0L) {
            Log.v(TAG, "Current confidence: ${confidence[0]}, averaged: $averagedConfidence")
        }

        // 检查是否检测到唤醒词
        if (patience > 0) {
            patience -= 1
            Log.v(TAG, "In patience cooldown: $patience frames remaining")
            return false
        } else if (confidence[0] > 0.35) {
            Log.i(TAG, "Confidence threshold passed: ${confidence[0]}, running verifier...")
            val verifierScore = verifierOnnxPredict(embeddingBuffer)
            Log.i(TAG, "Verifier score: $verifierScore")
            
            if (verifierScore > 0.35) {
                patience = maxPatience
                Log.w(TAG, "========================================")
                Log.w(TAG, "🎤 WAKE WORD DETECTED!")
                Log.w(TAG, "Initial confidence: ${confidence[0]}")
                Log.w(TAG, "Averaged confidence: $averagedConfidence")
                Log.w(TAG, "Verifier score: $verifierScore")
                Log.w(TAG, "========================================")
                onWakeWordDetected?.invoke(averagedConfidence)
                return true
            } else {
                Log.d(TAG, "Verifier rejected: score $verifierScore < 0.35")
            }
        }

        return false
    }

    private fun bufferMelspec() {
        val melspecPredictions = melspecOnnxPredict(rawDataBuffer)

        for (i in 0 until 68) {
            for (j in 0 until 32) {
                melspecBuffer[0][i][j][0] = melspecBuffer[0][i + 8][j][0]
            }
        }

        for (i in 0 until 8) {
            for (j in 0 until 32) {
                melspecBuffer[0][68 + i][j][0] = 2 + melspecPredictions[0][0][i][j] / 10
            }
        }
    }

    private fun bufferEmbeddings() {
        val embeddingPredictions = embeddingModelPredict(embeddingInput(melspecBuffer))
        val newEmbeddings = embeddingPredictions.floatArray

        for (i in 0 until 15) {
            for (j in 0 until 96) {
                embeddingBuffer[0][i][j] = embeddingBuffer[0][i + 1][j]
            }
        }

        for (j in 0 until 96) {
            embeddingBuffer[0][15][j] = newEmbeddings[j]
        }
    }

    private fun melspecOnnxPredict(floatArray: FloatArray): Array<Array<Array<FloatArray>>> {
        val floatBuffer = FloatBuffer.wrap(floatArray)
        val inputTensor = OnnxTensor.createTensor(env, floatBuffer, longArrayOf(1, 1760))
        val inputs: MutableMap<String, OnnxTensor> = HashMap()
        inputs["input"] = inputTensor

        val result = melspecOnnx.run(inputs)
        val output = (result[0] as OnnxTensor).value as Array<Array<Array<FloatArray>>>

        inputTensor.close()
        result.close()
        return output
    }

    private fun embeddingInput(data: Array<Array<Array<FloatArray>>>): ByteBuffer {
        val flattenedData = FloatArray(1 * 76 * 32 * 1)
        var index = 0
        for (i in 0 until 76) {
            for (j in 0 until 32) {
                flattenedData[index++] = data[0][i][j][0]
            }
        }
        val byteBuffer = ByteBuffer.allocateDirect(flattenedData.size * 4).order(ByteOrder.nativeOrder())
        byteBuffer.asFloatBuffer().put(flattenedData)
        return byteBuffer
    }

    private fun embeddingModelPredict(byteBuffer: ByteBuffer): TensorBuffer {
        val inputFeature0 = TensorBuffer.createFixedSize(intArrayOf(1, 76, 32, 1), DataType.FLOAT32)
        inputFeature0.loadBuffer(byteBuffer)
        val outputs = embeddingModel.process(inputFeature0)
        return outputs.outputFeature0AsTensorBuffer
    }

    private fun getWakeWordPrediction(array: Array<Array<FloatArray>>) {
        val wakewordPrediction = wakewordModelPredict(wakewordInput(array))
        confidence = wakewordPrediction.floatArray
        addScore(confidence[0])
    }

    private fun wakewordInput(data: Array<Array<FloatArray>>): ByteBuffer {
        val flattenedData = FloatArray(1 * 16 * 96)
        var index = 0
        for (i in 0 until 16) {
            for (j in 0 until 96) {
                flattenedData[index++] = data[0][i][j]
            }
        }
        val byteBuffer = ByteBuffer.allocateDirect(flattenedData.size * 4).order(ByteOrder.nativeOrder())
        byteBuffer.asFloatBuffer().put(flattenedData)
        return byteBuffer
    }

    private fun wakewordModelPredict(byteBuffer: ByteBuffer): TensorBuffer {
        val inputFeature0 = TensorBuffer.createFixedSize(intArrayOf(1, 16, 96), DataType.FLOAT32)
        inputFeature0.loadBuffer(byteBuffer)
        val outputs = wakewordModel.process(inputFeature0)
        return outputs.outputFeature0AsTensorBuffer
    }

    private fun verifierOnnxPredict(data: Array<Array<FloatArray>>): Float {
        val floatArray = FloatArray(1 * 16 * 96)
        var index = 0
        for (i in 0 until 16) {
            for (j in 0 until 96) {
                floatArray[index++] = data[0][i][j]
            }
        }
        val floatBuffer = FloatBuffer.wrap(floatArray)
        val inputTensor = OnnxTensor.createTensor(env, floatBuffer, longArrayOf(1, 16, 96))
        val inputs: MutableMap<String, OnnxTensor> = HashMap()
        inputs["onnx::Flatten_0"] = inputTensor

        val result = verifierOnnx.run(inputs)
        val outputTensor = result[0].value as Array<FloatArray>
        val output = if (outputTensor[0].size > 1) outputTensor[0][1] else outputTensor[0][0]

        inputTensor.close()
        result.close()
        return output
    }

    private fun addScore(newScore: Float) {
        if (scoreQueue.size == maxScores) {
            scoreQueue.pollFirst()
        }
        scoreQueue.add(newScore)
        averagedConfidence = scoreQueue.average().toFloat()
    }

    fun release() {
        embeddingModel.close()
        wakewordModel.close()
        melspecOnnx.close()
        verifierOnnx.close()
    }
}
