package com.roger.homecenterscreen.voice

import android.annotation.SuppressLint
import android.content.Context
import android.media.AudioFormat
import android.media.AudioRecord
import android.media.MediaRecorder
import android.media.audiofx.NoiseSuppressor
import android.media.audiofx.AcousticEchoCanceler
import android.media.audiofx.AutomaticGainControl
import android.os.Handler
import android.os.Looper
import android.util.Log
import okhttp3.OkHttpClient
import okhttp3.Request
import okhttp3.Response
import okhttp3.WebSocket
import okhttp3.WebSocketListener
import okio.ByteString
import okio.ByteString.Companion.toByteString
import org.json.JSONArray
import org.json.JSONObject

/**
 * 语音识别管理器（FunASR WebSocket 替换版）
 */
class AsrManager(
    private val context: Context,
    private val voiceManager: VoiceManager
) {
    companion object {
        private const val TAG = "AsrManager"
    }

    // 音频相关
    private var recorder: AudioRecord? = null
    private var recognizerThread: Thread? = null

    // WebSocket
    private var audioWebSocket: WebSocket? = null
    private val mainHandler = Handler(Looper.getMainLooper())

    // 状态
    @Volatile
    private var isRecognizing: Boolean = false

    // 简易VAD状态：仅在LISTENING状态下发送ASR音频
    private enum class VadState { WAITING, LISTENING }
    @Volatile
    private var vadState: VadState = VadState.WAITING
    @Volatile
    private var lastLoudTimeMs: Long = 0L

    // 最近音频环形缓冲（字节级，PCM 16bit 单声道），用于声纹识别
    private val recentAudioBytes: ByteArray = ByteArray(VoiceConfig.ASR_SAMPLE_RATE * 10 * 2)
    @Volatile
    private var recentWriteIndex: Int = 0
    @Volatile
    private var recentBytesWritten: Int = 0
    private val recognitionAudioBytes: ByteArray = ByteArray(VoiceConfig.ASR_SAMPLE_RATE * 10 * 2)
    @Volatile
    private var recognitionWriteIndex: Int = 0
    @Volatile
    private var recognitionBytesWritten: Int = 0
    private var hpPrevY: Double = 0.0
    private var hpPrevX: Double = 0.0
    private enum class RecState { IDLE, RECORDING }
    @Volatile
    private var recState: RecState = RecState.IDLE
    @Volatile
    private var recStartAt: Long = 0L
    @Volatile
    private var recLastMsgAt: Long = 0L
    @Volatile
    private var recGraceUntil: Long = 0L
    private var recBytes = java.io.ByteArrayOutputStream()
    private val recMaxBytes = VoiceConfig.ASR_SAMPLE_RATE * 30 * 2
    private val recLock = Any()
    private var lastRecSavedFile: java.io.File? = null
    private val preMs = VoiceConfig.REC_PRE_MS
    private val postMs = VoiceConfig.REC_POST_MS
    private val silenceMs = VoiceConfig.REC_SILENCE_MS
    private val timeoutMs = VoiceConfig.REC_TIMEOUT_MS
    @Volatile
    private var wakeupDetected: Boolean = false
    @Volatile
    private var startVoiceRecTime: Long = 0L
    @Volatile
    private var lastWsMsgAt: Long = 0L
    private var voiceRecFile: java.io.File? = null
    private var voiceWriter: WavFileWriter? = null
    private val sendEventTimes: java.util.ArrayDeque<Long> = java.util.ArrayDeque()
    @Volatile
    private var firstAsrTextAt: Long = 0L
    @Volatile
    private var recordStartAligned: Long = 0L
    @Volatile
    private var wakeupAtMs: Long = 0L
    @Volatile
    private var lastVoiceprintCandidateFile: java.io.File? = null
    
    // ASR文本拼接相关
    @Volatile
    private var cumulativeAsrText: StringBuilder = StringBuilder()
    @Volatile
    private var triggerWordDetected: Boolean = false
    @Volatile
    private var triggerWordDetectTime: Long = 0L
    @Volatile
    private var startRecordingAtMs: Long = 0L
    @Volatile
    private var voiceStartAtMs: Long = 0L
    @Volatile
    private var lastAsrAtMs: Long = 0L
    @Volatile
    private var vpExtracted: Boolean = false
    private var vpActive: Boolean = false
    private var vpTargetBytes: Int = ((VoiceConfig.ASR_SAMPLE_RATE * VoiceConfig.VOICEPRINT_KEEP_MS) / 1000) * 2
    private var vpMaxBytes: Int = ((VoiceConfig.ASR_SAMPLE_RATE * VoiceConfig.VOICEPRINT_KEEP_MS) / 1000) * 2
    private var vpBytes = java.io.ByteArrayOutputStream()
    private var vpStartAtMs: Long = 0L

    fun clearAsrTextSplicingCache() {
        cumulativeAsrText.clear()
        triggerWordDetected = false
        triggerWordDetectTime = 0L
    }

    /**
     * 初始化（此实现无需第三方SDK初始化，直接返回成功）
     */
    suspend fun initialize(): Boolean {
        Log.i(TAG, "初始化FunASR识别模块（无需SDK初始化）: ws=${VoiceConfig.FUNASR_WS_URL}, sampleRate=${VoiceConfig.ASR_SAMPLE_RATE}")
        return true
    }

    /**
     * 开始语音识别
     */
    fun startRecognition() {
        if (isRecognizing) {
            Log.w(TAG, "语音识别已经在进行中，忽略重复启动")
            return
        }

        try {
            Log.i(TAG, "开始语音识别（FunASR）")
            isRecognizing = true
            recentWriteIndex = 0
            recentBytesWritten = 0
            recognitionWriteIndex = 0
            recognitionBytesWritten = 0
            hpPrevY = 0.0
            hpPrevX = 0.0
            recState = RecState.IDLE
            recStartAt = 0L
            recLastMsgAt = 0L
            recGraceUntil = 0L
            startVoiceRecTime = 0L
            lastWsMsgAt = 0L
            wakeupDetected = false
            wakeupAtMs = 0L
            firstAsrTextAt = 0L
            recordStartAligned = 0L
            cumulativeAsrText.clear()
            triggerWordDetected = false
            triggerWordDetectTime = 0L
            vpActive = false
            vpBytes.reset()
            vpStartAtMs = 0L
            startRecordingAtMs = 0L
            voiceStartAtMs = 0L
            lastAsrAtMs = 0L
            vpExtracted = false
            recBytes.reset()
            initAudioRecord()
            Log.i(TAG, "录音器初始化完成，开始建立WebSocket连接")
            openWebSocket()
            Log.i(TAG, "WebSocket初始化流程已触发，启动识别线程")
            startRecognizerThread()
        } catch (e: Exception) {
            Log.e(TAG, "启动FunASR识别失败", e)
            isRecognizing = false
            voiceManager.onRecognitionError("启动失败: ${e.message}")
        }
    }

    /**
     * 停止语音识别
     */
    fun stopRecognition() {
        if (!isRecognizing) return
        Log.i(TAG, "停止语音识别（FunASR）")
        isRecognizing = false

        try {
            recorder?.apply {
                if (recordingState == AudioRecord.RECORDSTATE_RECORDING) stop()
            }
            stopRecognizerThread()
            sendEndMessage()
        } catch (e: Exception) {
            Log.e(TAG, "停止识别线程失败", e)
        }

        try {
            recorder?.apply {
                release()
            }
        } catch (e: Exception) {
            Log.e(TAG, "释放录音资源失败", e)
        } finally {
            recorder = null
        }

        try {
            audioWebSocket?.close(1000, "stop")
        } catch (e: Exception) {
            Log.e(TAG, "关闭WebSocket失败", e)
        } finally {
            audioWebSocket = null
        }
    }

    /**
     * 释放资源
     */
    fun release() {
        stopRecognition()
    }

    // --- 内部实现 ---

    @SuppressLint("MissingPermission")
    private fun initAudioRecord() {
        Log.i(TAG, "初始化录音器 buffer=${VoiceConfig.FUNASR_SEND_SIZE} sampleRate=${VoiceConfig.ASR_SAMPLE_RATE}")
        val sampleRate = VoiceConfig.ASR_SAMPLE_RATE
        val minBuffer = AudioRecord.getMinBufferSize(
            sampleRate,
            AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT
        )
        val bufferSize = kotlin.math.max(VoiceConfig.FUNASR_SEND_SIZE, minBuffer)
        val source = if (VoiceConfig.AUDIO_SOURCE_VOICE_RECOGNITION) MediaRecorder.AudioSource.VOICE_RECOGNITION else MediaRecorder.AudioSource.MIC
        val audioRecord = AudioRecord(
            source,
            sampleRate,
            AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT,
            bufferSize
        )

        if (audioRecord.state == AudioRecord.STATE_UNINITIALIZED) {
            audioRecord.release()
            throw IllegalStateException("初始化麦克风失败")
        }
        recorder = audioRecord

        // 启用系统内置前处理（若可用）
        try {
            val sessionId = audioRecord.audioSessionId
            if (VoiceConfig.ENABLE_NOISE_SUPPRESSOR && NoiseSuppressor.isAvailable()) {
                NoiseSuppressor.create(sessionId)?.apply { enabled = true }
                Log.i(TAG, "已启用 NoiseSuppressor")
            }
            if (VoiceConfig.ENABLE_ACOUSTIC_ECHO_CANCELER && AcousticEchoCanceler.isAvailable()) {
                AcousticEchoCanceler.create(sessionId)?.apply { enabled = true }
                Log.i(TAG, "已启用 AcousticEchoCanceler")
            }
            if (VoiceConfig.ENABLE_AUTOMATIC_GAIN_CONTROL && AutomaticGainControl.isAvailable()) {
                AutomaticGainControl.create(sessionId)?.apply { enabled = true }
                Log.i(TAG, "已启用 AutomaticGainControl")
            }
        } catch (e: Exception) {
            Log.w(TAG, "启用系统前处理失败", e)
        }
    }

    private fun buildInitMessage(isSpeaking: Boolean): String {
        val obj = JSONObject()
        obj.put("mode", VoiceConfig.FUNASR_MODE)

        val array = JSONArray()
        val chunkList = VoiceConfig.FUNASR_CHUNK_SIZE.split(',')
        for (s in chunkList) {
            val v = s.trim()
            if (v.isNotEmpty()) array.put(v.toInt())
        }
        obj.put("chunk_size", array)
        obj.put("chunk_interval", VoiceConfig.FUNASR_CHUNK_INTERVAL)
        obj.put("wav_name", "default")
        if (VoiceConfig.FUNASR_HOTWORDS.isNotEmpty()) {
            val hotwordsJSON = JSONObject()
            VoiceConfig.FUNASR_HOTWORDS.trim().lines().forEach { line ->
                if (line.isBlank()) return@forEach
                val parts = line.trim().split(" ")
                if (parts.size == 2) {
                    hotwordsJSON.put(parts[0], parts[1].toInt())
                }
            }
            obj.put("hotwords", hotwordsJSON.toString())
        }
        obj.put("wav_format", "pcm")
        obj.put("is_speaking", isSpeaking)
        return obj.toString()
    }

    private fun openWebSocket() {
        Log.i(TAG, "建立ASR WebSocket连接: ${VoiceConfig.FUNASR_WS_URL}")
        val builder = OkHttpClient.Builder()
            .retryOnConnectionFailure(true)
        if (VoiceConfig.FUNASR_TRUST_ALL_SSL) {
            try {
                val sslClient = SSLSocketClient()
                builder.sslSocketFactory(sslClient.sslSocketFactory, sslClient.trustManager)
                builder.hostnameVerifier { _, _ -> true }
            } catch (e: Exception) {
                Log.w(TAG, "配置信任所有SSL失败，将使用默认SSL", e)
            }
        }
        val client = builder.build()
        val url = VoiceConfig.FUNASR_WS_URL
        val request = Request.Builder().url(url).build()
        audioWebSocket = client.newWebSocket(request, object : WebSocketListener() {
            override fun onOpen(webSocket: WebSocket, response: Response) {
                Log.d(TAG, "FunASR WebSocket已连接: code=${response.code}")
                val initMsg = buildInitMessage(true)
                Log.d(TAG, "发送初始化消息: $initMsg")
                webSocket.send(initMsg)
            }

                    override fun onMessage(webSocket: WebSocket, text: String) {
                        Log.d(TAG, "收到ASR消息: $text")
                        try {
                            val json = JSONObject(text)
                            val textResult = json.optString("text")
                            val isFinal = json.optBoolean("is_final", false)
                            if (textResult.isNotBlank()) {
                                mainHandler.post {
                                    voiceManager.onRecognitionResult(textResult, isFinal)
                                }
                                lastWsMsgAt = System.currentTimeMillis()
                                val now = System.currentTimeMillis()
                                if (wakeupAtMs == 0L) wakeupAtMs = now - VoiceConfig.START_REC_BACKOFF_MS
                                if (firstAsrTextAt == 0L) {
                                    firstAsrTextAt = now
                                    var startTs = 0L
                                    val it = sendEventTimes.descendingIterator()
                                    while (it.hasNext()) {
                                        val t = it.next()
                                        if (t <= now) { startTs = t; break }
                                    }
                                    var base = if (startTs > 0L) startTs else now
                                    var candidate = base - VoiceConfig.START_REC_BACKOFF_MS
                                    if (wakeupAtMs > 0L && candidate > wakeupAtMs) candidate = wakeupAtMs
                                    recordStartAligned = candidate
                                }
                                
                                // ASR文本拼接逻辑 - 累积所有分段文本
                                cumulativeAsrText.append(textResult)
                                enforceTextUtf8Limit()
                                val cumulativeText = cumulativeAsrText.toString()
                                Log.d(TAG, "ASR文本拼接: 当前文本='$textResult', 累积文本='$cumulativeText', 是否已检测触发词=$triggerWordDetected")
                                
                                // 检查累积文本中是否包含完整触发词
                                if (!triggerWordDetected && cumulativeText.contains(VoiceConfig.WAKEUP_WORD)) {
                                    triggerWordDetected = true
                                    triggerWordDetectTime = now
                                    if (wakeupAtMs == 0L) wakeupAtMs = now - VoiceConfig.START_REC_BACKOFF_MS
                                    
                                    Log.i(TAG, "🎯 检测到完整触发词 '${VoiceConfig.WAKEUP_WORD}'")
                                    Log.i(TAG, "📄 累积文本='$cumulativeText'")
                                    Log.i(TAG, "⏰ 检测时间=$triggerWordDetectTime, 唤醒时间=$wakeupAtMs")
                                    
                                    // 在检测到完整触发词时启动voiceprint窗口
                                    if (!vpActive) {
                                        vpBytes.reset()
                                        val pre2 = exportRecentPcm(VoiceConfig.START_REC_BACKOFF_MS)
                                        if (pre2.isNotEmpty()) vpBytes.write(pre2)
                                        vpStartAtMs = triggerWordDetectTime
                                        vpActive = true
                                        Log.i(TAG, "🔊 启动voiceprint窗口: 锚点时间=$triggerWordDetectTime, 预录数据=${pre2.size}字节, 目标大小=${vpTargetBytes}字节")
                                    }
                                } else if (!triggerWordDetected) {
                                    Log.d(TAG, "触发词未检测到，继续累积文本...")
                                }
                                
                                if (voiceStartAtMs == 0L) voiceStartAtMs = now - VoiceConfig.START_REC_BACKOFF_MS
                                lastAsrAtMs = now
                                
                                if (recState == RecState.RECORDING) {
                                    recLastMsgAt = now
                                }
                            }
                            if (isFinal) {
                                // ASR识别完成，完成voiceprint窗口捕获
                                if (vpActive) {
                                    val raw = vpBytes.toByteArray()
                                    val trimmed = trimSilence(raw, VoiceConfig.ASR_SAMPLE_RATE)
                                    val bytesPerMs = (VoiceConfig.ASR_SAMPLE_RATE * 2) / 1000
                                    val limitBytes = bytesPerMs * VoiceConfig.VOICEPRINT_KEEP_MS
                                    val limited = if (trimmed.size > limitBytes) trimmed.copyOfRange(trimmed.size - limitBytes, trimmed.size) else trimmed
                                    val dir3 = VoiceStorageHelper.getVoiceRecordDir(context)
                                    val vf2 = java.io.File(dir3, "voiceprint_${System.currentTimeMillis()}.wav")
                                    writeWavFile(vf2, limited, VoiceConfig.ASR_SAMPLE_RATE)
                                    lastVoiceprintCandidateFile = vf2
                                    vpActive = false
                                }
                                
                                if (recState == RecState.RECORDING) {
                                    finalizeRecording()
                                }
                                webSocket.close(1000, "final")
                            }
                        } catch (e: Exception) {
                            Log.e(TAG, "解析ASR消息失败", e)
                        }
                    }

            override fun onClosing(webSocket: WebSocket, code: Int, reason: String) {
                Log.d(TAG, "ASR WebSocket关闭: $code/$reason")
                webSocket.close(1000, null)
                // 连续识别模式：保持录音进行，自动重连
                if (isRecognizing) {
                    try {
                        openWebSocket()
                    } catch (e: Exception) {
                        Log.e(TAG, "重建ASR连接失败", e)
                    }
                }
            }

            override fun onFailure(webSocket: WebSocket, t: Throwable, response: Response?) {
                Log.e(TAG, "ASR WebSocket异常: code=${response?.code} msg=${response?.message}", t)
                mainHandler.post {
                    voiceManager.onRecognitionError("网络异常: ${t.message}")
                }
                // 异常后自动重连（保持连续识别）
                if (isRecognizing) {
                    try {
                        // 若是TLS握手失败且允许回退，则尝试从wss切换到ws
                        if (VoiceConfig.FUNASR_ALLOW_WS_FALLBACK && url.startsWith("wss://")) {
                            val wsUrl = url.replaceFirst("wss://", "ws://")
                            Log.w(TAG, "尝试从WSS回退到WS: $wsUrl")
                            val fallbackClient = OkHttpClient.Builder().retryOnConnectionFailure(true).build()
                            val fallbackReq = Request.Builder().url(wsUrl).build()
                            audioWebSocket = fallbackClient.newWebSocket(fallbackReq, this)
                        } else {
                            openWebSocket()
                        }
                    } catch (e: Exception) {
                        Log.e(TAG, "异常后重连失败", e)
                    }
                }
            }
        })
    }

    private fun startRecognizerThread() {
        val thread = object : Thread("FunASR-Recognizer") {
            override fun run() {
                try {
                    val rec = recorder ?: return
                    Log.i(TAG, "调用 AudioRecord.startRecording()")
                    rec.startRecording()
                    if (rec.recordingState == AudioRecord.RECORDSTATE_STOPPED) {
                        mainHandler.post {
                            voiceManager.onRecognitionError("麦克风不可用")
                        }
                        return
                    }
                    val buffer = ByteArray(VoiceConfig.FUNASR_SEND_SIZE)
                    Log.i(TAG, "开始从麦克风读取并发送音频... bufferSize=${VoiceConfig.FUNASR_SEND_SIZE}")
                    vadState = VadState.WAITING
                    mainHandler.post { voiceManager.onVadStateChanged(false) }
                    lastLoudTimeMs = 0L
                    while (isRecognizing) {
                        val read = rec.read(buffer, 0, buffer.size)
                        if (read > 0) {
                            var k = 0
                            if (recentBytesWritten >= recentAudioBytes.size) {
                                recentWriteIndex = 0
                                recentBytesWritten = 0
                                startRecordingAtMs = System.currentTimeMillis()
                            }
                            while (k < read) {
                                recentAudioBytes[recentWriteIndex] = buffer[k]
                                recentWriteIndex = (recentWriteIndex + 1) % recentAudioBytes.size
                                k++
                            }
                            val newTotal = recentBytesWritten + read
                            recentBytesWritten = if (newTotal >= recentAudioBytes.size) recentAudioBytes.size else newTotal

                            // 高通滤波（单极一阶，去除低频噪声）
                            if (VoiceConfig.ENABLE_HIGHPASS_FILTER) {
                                val rc = 1.0 / (2 * Math.PI * VoiceConfig.HIGHPASS_CUTOFF_HZ)
                                val dt = 1.0 / VoiceConfig.ASR_SAMPLE_RATE
                                val alpha = rc / (rc + dt)
                                var j = 0
                                var prevY = hpPrevY
                                var prevX = hpPrevX
                                while (j + 1 < read) {
                                    val low = buffer[j].toInt() and 0xFF
                                    val high = buffer[j + 1].toInt() and 0xFF
                                    val sample = (high shl 8) or low
                                    val signed = sample.toShort().toInt()
                                    val x = signed.toDouble()
                                    val y = alpha * (prevY + x - prevX)
                                    val yInt = y.toInt().coerceIn(Short.MIN_VALUE.toInt(), Short.MAX_VALUE.toInt())
                                    buffer[j] = (yInt and 0xFF).toByte()
                                    buffer[j + 1] = ((yInt shr 8) and 0xFF).toByte()
                                    prevY = y
                                    prevX = x
                                    j += 2
                                }
                                hpPrevY = prevY
                                hpPrevX = prevX
                            }

                            var sumAbs = 0L
                            var samples = 0
                            var i = 0
                            while (i + 1 < read) {
                                val low = buffer[i].toInt() and 0xFF
                                val high = buffer[i + 1].toInt() and 0xFF
                                val sample = (high shl 8) or low
                                val signed = sample.toShort().toInt()
                                sumAbs += kotlin.math.abs(signed)
                                samples++
                                i += 2
                            }
                            val avgAbs = if (samples > 0) (sumAbs / samples).toInt() else 0

                            // 更新VAD状态机
                            val now = System.currentTimeMillis()
                            if (VoiceConfig.FUNASR_VAD_ENABLE) {
                                if (avgAbs >= VoiceConfig.FUNASR_VAD_AVG_ABS_THRESHOLD) {
                                    if (vadState != VadState.LISTENING) {
                                        Log.i(TAG, "VAD: 检测到超过阈值，切换到LISTENING状态 (avgAbs=$avgAbs)")
                                        mainHandler.post { voiceManager.onVadStateChanged(true) }
                                        vadState = VadState.LISTENING
                                        forceClearOnListeningStart(now)
                                    } else {
                                        vadState = VadState.LISTENING
                                    }
                                    if (startRecordingAtMs == 0L) startRecordingAtMs = now
                                    lastLoudTimeMs = now
                                } else {
                                    if (vadState == VadState.LISTENING && lastLoudTimeMs > 0L && (now - lastLoudTimeMs) >= VoiceConfig.FUNASR_VAD_SILENCE_TIMEOUT_MS) {
                                        vadState = VadState.WAITING
                                        Log.i(TAG, "VAD: 静音超时(${VoiceConfig.FUNASR_VAD_SILENCE_TIMEOUT_MS}ms)，切换到WAITING状态")
                                        mainHandler.post { voiceManager.onVadStateChanged(false) }
                                        if (recState == RecState.RECORDING && !wakeupDetected) {
                                            recState = RecState.IDLE
                                            synchronized(recLock) { recBytes.reset() }
                                            try { voiceWriter?.close() } catch (_: Exception) {}
                                            voiceWriter = null
                                            voiceRecFile = null
                                            startVoiceRecTime = 0L
                                        }
                            }
                        }
                        }

                            var kk = 0
                            while (kk < read) {
                                recognitionAudioBytes[recognitionWriteIndex] = buffer[kk]
                                recognitionWriteIndex = (recognitionWriteIndex + 1) % recognitionAudioBytes.size
                                kk++
                            }
                            val newRecTotal = recognitionBytesWritten + read
                            recognitionBytesWritten = if (newRecTotal >= recognitionAudioBytes.size) recognitionAudioBytes.size else newRecTotal
                            if (vpActive) {
                                val space = vpMaxBytes - vpBytes.size()
                                val take = if (space > 0) kotlin.math.min(space, read) else 0
                                if (take > 0) vpBytes.write(buffer, 0, take)
                                // 记录voiceprint数据捕获情况
                                if (take > 0 && vpBytes.size() % (16000 * 2) == 0) {  // 每秒记录一次
                                    Log.d(TAG, "voiceprint数据收集中... 已收集${vpBytes.size()}字节, 空间剩余${space}字节")
                                }
                            }
                            if (vadState == VadState.LISTENING && recState == RecState.IDLE) {
                                recState = RecState.RECORDING
                                recStartAt = System.currentTimeMillis()
                                recLastMsgAt = recStartAt
                                recGraceUntil = 0L
                                synchronized(recLock) { recBytes.reset() }
                                val dir = VoiceStorageHelper.getVoiceRecordDir(context)
                                val f = java.io.File(dir, "voice_rec_wav_${System.currentTimeMillis()}.wav")
                                voiceRecFile = f
                                val w = WavFileWriter(f, VoiceConfig.ASR_SAMPLE_RATE)
                                w.open()
                                val pre = exportRecentPcm(VoiceConfig.START_REC_BACKOFF_MS)
                                if (pre.isNotEmpty()) { synchronized(recLock) { recBytes.write(pre) }; w.appendBytes(pre) }
                                voiceWriter = w
                                var base = recStartAt
                                var candidate = base - VoiceConfig.START_REC_BACKOFF_MS
                                if (wakeupAtMs > 0L && candidate < wakeupAtMs) candidate = wakeupAtMs
                                recordStartAligned = candidate
                            }
                            if (!VoiceConfig.FUNASR_VAD_ENABLE || vadState == VadState.LISTENING) {
                                val data = buffer.toByteString(0, read)
                                audioWebSocket?.send(data)
                                sendEventTimes.addLast(now)
                                while (sendEventTimes.size > 300) { sendEventTimes.removeFirst() }
                            }
//                            else {
//                                Log.d(TAG, "VAD状态=$vadState，跳过发送，avgAbs=$avgAbs")
//                            }
                            if (recState == RecState.RECORDING) {
                                synchronized(recLock) { recBytes.write(buffer, 0, read) }
                                voiceWriter?.appendBytes(buffer, 0, read)
                                if (recBytes.size() >= recMaxBytes) {
                                    finalizeRecording()
                                }
                                val now2 = System.currentTimeMillis()
                                if (now2 - recLastMsgAt >= silenceMs) {
                                    if (recGraceUntil == 0L) recGraceUntil = recLastMsgAt + postMs
                                }
                                val idle = lastAsrAtMs > 0L && (now2 - lastAsrAtMs) > VoiceConfig.WS_IDLE_TIMEOUT_MS
                                val cont = lastAsrAtMs > 0L && voiceStartAtMs > 0L && (lastAsrAtMs - voiceStartAtMs) >= VoiceConfig.VOICEPRINT_KEEP_MS
                                if (!vpExtracted && (idle || cont)) {
                                    try {
                                        val bytes = exportVoiceprintWindow()
                                        if (bytes.isNotEmpty()) {
                                            val dir3 = VoiceStorageHelper.getVoiceRecordDir(context)
                                            val vf2 = java.io.File(dir3, "voiceprint_${System.currentTimeMillis()}.wav")
                                            writeWavFile(vf2, bytes, VoiceConfig.ASR_SAMPLE_RATE)
                                            lastVoiceprintCandidateFile = vf2
                                            vpExtracted = true
                                        }
                                    } catch (_: Exception) {}
                                }
                                if (idle) {
                                    try {
                                        val finalText = recognizeFileSync(voiceRecFile)
                                        if (!finalText.isNullOrBlank()) {
                                            mainHandler.post { voiceManager.onRecognitionResult(finalText, true) }
                                        }
                                        firstAsrTextAt = 0L
                                        recordStartAligned = 0L
                                    } catch (_: Exception) {}
                                    try { voiceWriter?.close() } catch (_: Exception) {}
                                    finalizeRecording()
                                }
                            }
                        }
                    }
                    Log.i(TAG, "停止读取音频，发送结束信号")
                    sendEndMessage()
                } catch (e: Exception) {
                    Log.e(TAG, "识别线程异常", e)
                    mainHandler.post {
                        voiceManager.onRecognitionError("识别异常: ${e.message}")
                    }
                }
            }
        }
        recognizerThread = thread
        thread.start()
    }

    private fun stopRecognizerThread() {
        try {
            recognizerThread?.interrupt()
            recognizerThread?.join(500)
        } catch (e: Exception) {
            Log.w(TAG, "停止识别线程发生异常", e)
        } finally {
            recognizerThread = null
        }
    }

    private fun sendEndMessage() {
        try {
            val obj = JSONObject()
            obj.put("is_speaking", false)
            audioWebSocket?.send(obj.toString())
        } catch (e: Exception) {
            Log.w(TAG, "发送结束消息失败", e)
        }
    }

    /**
     * 导出最近N毫秒的PCM16字节，用于声纹识别（默认2秒）
     */
    fun exportRecentPcm(ms: Int = 2000): ByteArray {
        val bytesNeeded = (VoiceConfig.ASR_SAMPLE_RATE * ms / 1000) * 2
        val available = recentBytesWritten.coerceAtMost(recentAudioBytes.size)
        var count = bytesNeeded.coerceAtMost(available)
        if (count % 2 != 0) count -= 1
        val out = ByteArray(count)
        if (count > 0) {
            var idx = (recentWriteIndex - count + recentAudioBytes.size) % recentAudioBytes.size
            if (idx % 2 != 0) idx = (idx + 1) % recentAudioBytes.size
            var o = 0
            while (o < count) {
                out[o] = recentAudioBytes[idx]
                o++
                idx = (idx + 1) % recentAudioBytes.size
            }
        }
        var sumAbs = 0L
        var samples = 0
        var i = 0
        while (i + 1 < out.size) {
            val low = out[i].toInt() and 0xFF
            val high = out[i + 1].toInt() and 0xFF
            val sample = (high shl 8) or low
            val signed = if (sample and 0x8000 != 0) sample or -0x10000 else sample
            sumAbs += kotlin.math.abs(signed)
            samples++
            i += 2
        }
        val avgAbs = if (samples > 0) (sumAbs / samples).toInt() else 0
        Log.i(TAG, "exportRecentPcm bytes=$count avgAbs=$avgAbs")
        return out
    }

    fun exportRecentRecognitionPcm(ms: Int = 2000): ByteArray {
        val bytesNeeded = (VoiceConfig.ASR_SAMPLE_RATE * ms / 1000) * 2
        val available = recognitionBytesWritten.coerceAtMost(recognitionAudioBytes.size)
        var count = bytesNeeded.coerceAtMost(available)
        if (count % 2 != 0) count -= 1
        val out = ByteArray(count)
        if (count > 0) {
            var idx = (recognitionWriteIndex - count + recognitionAudioBytes.size) % recognitionAudioBytes.size
            if (idx % 2 != 0) idx = (idx + 1) % recognitionAudioBytes.size
            var o = 0
            while (o < count) {
                out[o] = recognitionAudioBytes[idx]
                o++
                idx = (idx + 1) % recognitionAudioBytes.size
            }
        }
        var sumAbs = 0L
        var samples = 0
        var i = 0
        while (i + 1 < out.size) {
            val low = out[i].toInt() and 0xFF
            val high = out[i + 1].toInt()
            val sample = (high shl 8) or low
            val signed = if (sample and 0x8000 != 0) sample or -0x10000 else sample
            sumAbs += kotlin.math.abs(signed)
            samples++
            i += 2
        }
        val avgAbs = if (samples > 0) (sumAbs / samples).toInt() else 0
        Log.i(TAG, "exportRecentRecognitionPcm bytes=$count avgAbs=$avgAbs")
        return out
    }

    fun exportRecentPcmBetweenAbs(startAbsMs: Long, endAbsMs: Long): ByteArray {
        val now = System.currentTimeMillis()
        val s = startAbsMs.coerceAtLeast(0L)
        val e = endAbsMs.coerceAtLeast(s)
        val durMs = (e - s).toInt()
        var count = ((VoiceConfig.ASR_SAMPLE_RATE * durMs / 1000) * 2)
        val available = recentBytesWritten.coerceAtMost(recentAudioBytes.size)
        count = count.coerceAtMost(available)
        if (count % 2 != 0) count -= 1
        val offsetMs = (now - e).coerceAtLeast(0L).toInt()
        var offsetBytes = ((VoiceConfig.ASR_SAMPLE_RATE * offsetMs / 1000) * 2)
        offsetBytes = offsetBytes.coerceAtMost(recentAudioBytes.size - count)
        val out = ByteArray(count)
        if (count > 0) {
            var idx = (recentWriteIndex - offsetBytes - count + recentAudioBytes.size) % recentAudioBytes.size
            if (idx % 2 != 0) idx = (idx + 1) % recentAudioBytes.size
            var o = 0
            while (o < count) {
                out[o] = recentAudioBytes[idx]
                o++
                idx = (idx + 1) % recentAudioBytes.size
            }
        }
        return out
    }

    fun exportPcmSinceWakeup(ms: Int): ByteArray {
        val start = wakeupAtMs
        if (start <= 0L) return ByteArray(0)
        val end = start + ms
        return exportRecentPcmBetweenAbs(start, end)
    }

    private fun exportRecognitionPcmBetweenAbs(startAbsMs: Long, endAbsMs: Long): ByteArray {
        val now = System.currentTimeMillis()
        val s = startAbsMs.coerceAtLeast(0L)
        val e = endAbsMs.coerceAtLeast(s)
        val durMs = (e - s).toInt()
        var count = ((VoiceConfig.ASR_SAMPLE_RATE * durMs / 1000) * 2)
        val available = recognitionBytesWritten.coerceAtMost(recognitionAudioBytes.size)
        count = count.coerceAtMost(available)
        if (count % 2 != 0) count -= 1
        val offsetMs = (now - e).coerceAtLeast(0L).toInt()
        var offsetBytes = ((VoiceConfig.ASR_SAMPLE_RATE * offsetMs / 1000) * 2)
        offsetBytes = offsetBytes.coerceAtMost(recognitionAudioBytes.size - count)
        val out = ByteArray(count)
        if (count > 0) {
            var idx = (recognitionWriteIndex - offsetBytes - count + recognitionAudioBytes.size) % recognitionAudioBytes.size
            if (idx % 2 != 0) idx = (idx + 1) % recognitionAudioBytes.size
            var o = 0
            while (o < count) {
                out[o] = recognitionAudioBytes[idx]
                o++
                idx = (idx + 1) % recognitionAudioBytes.size
            }
        }
        return out
    }

    private fun finalizeRecording() {
        // 完成voiceprint窗口捕获
        if (vpActive) {
            try {
                val raw = vpBytes.toByteArray()
                val trimmed = trimSilence(raw, VoiceConfig.ASR_SAMPLE_RATE)
                val dir3 = VoiceStorageHelper.getVoiceRecordDir(context)
                val vf2 = java.io.File(dir3, "voiceprint_${System.currentTimeMillis()}.wav")
                writeWavFile(vf2, trimmed, VoiceConfig.ASR_SAMPLE_RATE)
                lastVoiceprintCandidateFile = vf2
                vpActive = false
                
                val originalDurationMs = (raw.size / 2) * 1000 / VoiceConfig.ASR_SAMPLE_RATE
                val trimmedDurationMs = (trimmed.size / 2) * 1000 / VoiceConfig.ASR_SAMPLE_RATE
                val silenceRemovedPercent = if (originalDurationMs > 0) ((originalDurationMs - trimmedDurationMs) * 100 / originalDurationMs) else 0
                
                Log.i(TAG, "✅ finalizeRecording中完成voiceprint窗口")
                Log.i(TAG, "📁 文件路径: ${vf2.absolutePath}")
                Log.i(TAG, "📊 数据大小: 原始=${raw.size}字节(${originalDurationMs}ms), 裁剪后=${trimmed.size}字节(${trimmedDurationMs}ms)")
                Log.i(TAG, "🔇 静音移除: ${silenceRemovedPercent}%")
                Log.i(TAG, "📝 最终累积文本: '${cumulativeAsrText.toString()}'")
            } catch (e: Exception) {
                Log.e(TAG, "voiceprint窗口完成失败", e)
            }
        }
        
        try {
            val bytesPerMs = (VoiceConfig.ASR_SAMPLE_RATE * 2) / 1000
            var src = synchronized(recLock) { recBytes.toByteArray() }
            if (recordStartAligned > 0L && recStartAt > 0L) {
                val deltaStartMs = (recordStartAligned - recStartAt).coerceAtLeast(0L)
                val skip = (deltaStartMs * bytesPerMs).toInt()
                val maxKeepMs = (VoiceConfig.VOICEPRINT_KEEP_MS).toLong()
                val lastMsgBoundMs = (recLastMsgAt - recordStartAligned).coerceAtLeast(0L)
                val keepMs = kotlin.math.min(maxKeepMs, lastMsgBoundMs)
                val keepBytesLimit = (keepMs * bytesPerMs).toInt()
                if (skip < src.size) {
                    val available = src.size - skip
                    val take = if (keepBytesLimit > 0) kotlin.math.min(keepBytesLimit, available) else available
                    src = src.copyOfRange(skip, skip + take)
                } else {
                    src = ByteArray(0)
                }
            }
            val dst = resamplePcm16Mono(src, VoiceConfig.ASR_SAMPLE_RATE, 44100)
            val dir = VoiceStorageHelper.getVoiceRecordDir(context)
            val f = java.io.File(dir, "rec_${System.currentTimeMillis()}.wav")
            writeWavFile(f, dst, 44100)
            lastRecSavedFile = f
            Log.i(TAG, "REC saved path=${f.absolutePath} bytes=${dst.size}")
            try { voiceWriter?.close() } catch (_: Exception) {}
        } catch (e: Exception) {
            Log.e(TAG, "REC finalize error", e)
        } finally {
            recState = RecState.IDLE
            recStartAt = 0L
            recLastMsgAt = 0L
            recGraceUntil = 0L
            synchronized(recLock) { recBytes.reset() }
            voiceWriter = null
            voiceRecFile = null
            startVoiceRecTime = 0L
            wakeupDetected = false
            wakeupAtMs = 0L
            startRecordingAtMs = 0L
            voiceStartAtMs = 0L
            lastAsrAtMs = 0L
            vpExtracted = false
        }
    }

    fun getLastRecordedFile(): java.io.File? {
        return lastRecSavedFile
    }

    private fun resamplePcm16Mono(src: ByteArray, srcRate: Int, dstRate: Int): ByteArray {
        if (srcRate == dstRate) return src
        val srcSamples = src.size / 2
        val dstSamples = ((srcSamples.toLong() * dstRate) / srcRate).toInt()
        val out = ByteArray(dstSamples * 2)
        var i = 0
        while (i < dstSamples) {
            val pos = i.toDouble() * srcRate / dstRate
            val idx = pos.toInt().coerceIn(0, srcSamples - 1)
            val frac = pos - idx
            val s0 = getSample(src, idx)
            val s1 = getSample(src, if (idx + 1 < srcSamples) idx + 1 else idx)
            val v = (s0 + ((s1 - s0) * frac)).toInt().coerceIn(Short.MIN_VALUE.toInt(), Short.MAX_VALUE.toInt())
            setSample(out, i, v)
            i++
        }
        return out
    }

    private fun getSample(bytes: ByteArray, index: Int): Int {
        val base = index * 2
        val low = bytes[base].toInt() and 0xFF
        val high = bytes[base + 1].toInt() and 0xFF
        return (((high shl 8) or low).toShort()).toInt()
    }

    private fun setSample(bytes: ByteArray, index: Int, value: Int) {
        val base = index * 2
        val v = value.coerceIn(Short.MIN_VALUE.toInt(), Short.MAX_VALUE.toInt())
        bytes[base] = (v and 0xFF).toByte()
        bytes[base + 1] = ((v shr 8) and 0xFF).toByte()
    }

    private fun writeWavFile(file: java.io.File, pcm: ByteArray, sampleRate: Int) {
        val channels = 1
        val bitsPerSample = 16
        val byteRate = sampleRate * channels * bitsPerSample / 8
        val blockAlign = (channels * bitsPerSample / 8).toShort()
        val dataSize = pcm.size
        val totalDataLen = 36 + dataSize
        val out = java.io.FileOutputStream(file)
        out.write("RIFF".toByteArray())
        out.write(java.nio.ByteBuffer.allocate(4).order(java.nio.ByteOrder.LITTLE_ENDIAN).putInt(totalDataLen).array())
        out.write("WAVE".toByteArray())
        out.write("fmt ".toByteArray())
        out.write(java.nio.ByteBuffer.allocate(4).order(java.nio.ByteOrder.LITTLE_ENDIAN).putInt(16).array())
        out.write(java.nio.ByteBuffer.allocate(2).order(java.nio.ByteOrder.LITTLE_ENDIAN).putShort(1).array())
        out.write(java.nio.ByteBuffer.allocate(2).order(java.nio.ByteOrder.LITTLE_ENDIAN).putShort(channels.toShort()).array())
        out.write(java.nio.ByteBuffer.allocate(4).order(java.nio.ByteOrder.LITTLE_ENDIAN).putInt(sampleRate).array())
        out.write(java.nio.ByteBuffer.allocate(4).order(java.nio.ByteOrder.LITTLE_ENDIAN).putInt(byteRate).array())
        out.write(java.nio.ByteBuffer.allocate(2).order(java.nio.ByteOrder.LITTLE_ENDIAN).putShort(blockAlign).array())
        out.write(java.nio.ByteBuffer.allocate(2).order(java.nio.ByteOrder.LITTLE_ENDIAN).putShort(bitsPerSample.toShort()).array())
        out.write("data".toByteArray())
        out.write(java.nio.ByteBuffer.allocate(4).order(java.nio.ByteOrder.LITTLE_ENDIAN).putInt(dataSize).array())
        out.write(pcm)
        out.flush()
        out.close()
    }

    private fun trimSilence(src: ByteArray, sampleRate: Int): ByteArray {
        val frameMs = 20
        val bytesPerMs = (sampleRate * 2) / 1000
        val frameBytes = frameMs * bytesPerMs
        val th = VoiceConfig.FUNASR_VAD_AVG_ABS_THRESHOLD / 2  // 降低阈值，减少裁剪
        var start = 0
        var end = src.size
        var i = 0
        while (i + frameBytes <= src.size && i < 300 * bytesPerMs) {  // 减少前向搜索时间到300ms
            var sum = 0L
            var j = i
            while (j + 1 < i + frameBytes) {
                val low = src[j].toInt() and 0xFF
                val high = src[j + 1].toInt() and 0xFF
                val sample = (high shl 8) or low
                sum += kotlin.math.abs(sample.toShort().toInt())
                j += 2
            }
            val samples = frameBytes / 2
            val avg = if (samples > 0) (sum / samples).toInt() else 0
            if (avg >= th) { start = i; break }
            i += frameBytes
        }
        var k = src.size - frameBytes
        val limit = src.size - (300 * bytesPerMs)  // 减少后向搜索时间到300ms
        while (k >= 0 && k >= limit) {
            var sum = 0L
            var j = k
            while (j + 1 < k + frameBytes) {
                val low = src[j].toInt() and 0xFF
                val high = src[j + 1].toInt() and 0xFF
                val sample = (high shl 8) or low
                sum += kotlin.math.abs(sample.toShort().toInt())
                j += 2
            }
            val samples = frameBytes / 2
            val avg = if (samples > 0) (sum / samples).toInt() else 0
            if (avg >= th) { end = k + frameBytes; break }
            k -= frameBytes
        }
        if (end <= start) return src
        return src.copyOfRange(start, end)
    }

    private fun recognizeFileSync(file: java.io.File?): String? {
        if (file == null || !file.exists()) return null
        return try {
            val builder = OkHttpClient.Builder().retryOnConnectionFailure(true)
            val client = builder.build()
            val url = VoiceConfig.FUNASR_WS_URL
            val req = Request.Builder().url(url).build()
            var finalText: String? = null
            val latch = java.util.concurrent.CountDownLatch(1)
            client.newWebSocket(req, object : WebSocketListener() {
                override fun onOpen(webSocket: WebSocket, response: Response) {
                    webSocket.send(buildInitMessage(false))
                    val bytes = file.readBytes()
                    var offset = 44
                    while (offset < bytes.size) {
                        val len = kotlin.math.min(VoiceConfig.FUNASR_SEND_SIZE, bytes.size - offset)
                        webSocket.send(bytes.copyOfRange(offset, offset + len).toByteString())
                        offset += len
                    }
                    val obj = JSONObject()
                    obj.put("is_speaking", false)
                    webSocket.send(obj.toString())
                }
                override fun onMessage(webSocket: WebSocket, text: String) {
                    try {
                        val j = JSONObject(text)
                        val t = j.optString("text")
                        val fin = j.optBoolean("is_final", false)
                        if (t.isNotBlank()) finalText = t
                        if (fin) {
                            webSocket.close(1000, "final")
                            latch.countDown()
                        }
                    } catch (_: Exception) {}
                }
                override fun onFailure(webSocket: WebSocket, t: Throwable, response: Response?) {
                    latch.countDown()
                }
            })
            latch.await(3, java.util.concurrent.TimeUnit.SECONDS)
            finalText
        } catch (_: Exception) { null }
    }

    fun notifyWakeup() {
        wakeupDetected = true
        wakeupAtMs = System.currentTimeMillis()
        if (recState == RecState.IDLE) {
            val pre = exportRecentPcm(VoiceConfig.START_REC_BACKOFF_MS)
            synchronized(recLock) { recBytes.reset(); if (pre.isNotEmpty()) recBytes.write(pre) }
            val dir = VoiceStorageHelper.getVoiceRecordDir(context)
            val f = java.io.File(dir, "voice_rec_wav_${System.currentTimeMillis()}.wav")
            voiceRecFile = f
            val w = WavFileWriter(f, VoiceConfig.ASR_SAMPLE_RATE)
            w.open()
            if (pre.isNotEmpty()) w.appendBytes(pre)
            voiceWriter = w
            recState = RecState.RECORDING
            recStartAt = wakeupAtMs
            recLastMsgAt = wakeupAtMs
            recGraceUntil = 0L
        }
        var lastSend = 0L
        val it = sendEventTimes.descendingIterator()
        while (it.hasNext()) {
            val t = it.next()
            if (t <= wakeupAtMs) { lastSend = t; break }
        }
        val base = if (lastSend > 0L) lastSend else wakeupAtMs
        var candidate = base - VoiceConfig.START_REC_BACKOFF_MS
        if (candidate > wakeupAtMs) candidate = wakeupAtMs
        recordStartAligned = candidate
        startRecordingAtMs = wakeupAtMs
        voiceStartAtMs = 0L
        lastAsrAtMs = 0L
        vpExtracted = false
        // 不再在这里启动voiceprint窗口，改为在ASR识别到文本时启动
    }

    fun getLastVoiceprintCandidateFile(): java.io.File? = lastVoiceprintCandidateFile

    private fun enforceTextUtf8Limit() {
        try {
            var s = cumulativeAsrText.toString()
            var b = s.toByteArray(Charsets.UTF_8)
            if (b.size <= 1000) return
            var remove = 0
            while (b.size > 1000 && remove < s.length) {
                remove++
                val ns = s.substring(remove)
                val nb = ns.toByteArray(Charsets.UTF_8)
                s = ns
                b = nb
            }
            cumulativeAsrText = StringBuilder(s)
        } catch (_: Exception) {}
    }

    internal fun forceClearOnListeningStart(now: Long) {
        cumulativeAsrText.clear()
        triggerWordDetected = false
        triggerWordDetectTime = 0L
        vpActive = false
        vpBytes.reset()
        vpStartAtMs = 0L
        vpExtracted = false
        recentWriteIndex = 0
        recentBytesWritten = 0
        startRecordingAtMs = now
        synchronized(recLock) { recBytes.reset() }
        recordStartAligned = 0L
        firstAsrTextAt = 0L
        lastWsMsgAt = 0L
        voiceStartAtMs = 0L
        lastAsrAtMs = 0L
    }

    private fun exportVoiceprintWindow(): ByteArray {
        val now = System.currentTimeMillis()
        if (startRecordingAtMs <= 0L || voiceStartAtMs <= 0L || lastAsrAtMs <= 0L) return ByteArray(0)
        var vs = voiceStartAtMs
        if (vs < startRecordingAtMs) vs = startRecordingAtMs
        var keepMs = (lastAsrAtMs - vs).coerceAtLeast(0L)
        if (keepMs > VoiceConfig.VOICEPRINT_KEEP_MS) keepMs = VoiceConfig.VOICEPRINT_KEEP_MS.toLong()
        val lagMs = (now - lastAsrAtMs).coerceAtLeast(0L)
        val needMs = (keepMs + lagMs).toInt()
        val pre = exportRecentPcm(needMs)
        val bytesPerMs = (VoiceConfig.ASR_SAMPLE_RATE * 2) / 1000
        val dropTail = (lagMs * bytesPerMs).toInt()
        val keepBytes = (keepMs * bytesPerMs).toInt()
        if (pre.isEmpty()) return pre
        val take = kotlin.math.max(0, pre.size - dropTail)
        val start = kotlin.math.max(0, take - keepBytes)
        val end = start + kotlin.math.min(keepBytes, take)
        if (end <= start) return ByteArray(0)
        return pre.copyOfRange(start, end)
    }

    internal object StateData {
        data class Caches(
            var cumulativeAsrText: StringBuilder,
            var triggerWordDetected: Boolean,
            var triggerWordDetectTime: Long,
            var vpActive: Boolean,
            var vpExtracted: Boolean,
            var recentWriteIndex: Int,
            var recentBytesWritten: Int,
            var startRecordingAtMs: Long,
            var recordStartAligned: Long,
            var firstAsrTextAt: Long,
            var lastWsMsgAt: Long,
            var voiceStartAtMs: Long,
            var lastAsrAtMs: Long
        )

        fun clearOnListeningStart(c: Caches, now: Long): Caches {
            c.cumulativeAsrText.clear()
            c.triggerWordDetected = false
            c.triggerWordDetectTime = 0L
            c.vpActive = false
            c.vpExtracted = false
            c.recentWriteIndex = 0
            c.recentBytesWritten = 0
            c.startRecordingAtMs = now
            c.recordStartAligned = 0L
            c.firstAsrTextAt = 0L
            c.lastWsMsgAt = 0L
            c.voiceStartAtMs = 0L
            c.lastAsrAtMs = 0L
            return c
        }
    }
}
