package com.amu.aidemo.android

import android.content.Context
import android.util.Log
import com.amu.aidemo.android.audio.AudioPlayer
import com.amu.aidemo.android.audio.AudioRecorder
import com.amu.aidemo.android.audio.WakeWordSoundPlayer
import com.amu.aidemo.android.wakeword.WakeWordDetector
import com.amu.aidemo.android.xfyun.XfyunIatClient
import com.amu.aidemo.android.xfyun.XfyunTtsClient
import com.amu.aidemo.android.zhipu.ZhipuAiClient
import com.amu.aidemo.android.zhipu.ZhipuMessage
import com.amu.aidemo.ui.model.AssistantState
import com.amu.aidemo.ui.model.ChatMessage
import com.amu.aidemo.ui.model.MessageType
import com.amu.aidemo.ui.model.VoiceAssistantUiState
import kotlinx.coroutines.CoroutineScope
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.Job
import kotlinx.coroutines.delay
import kotlinx.coroutines.launch
import kotlinx.coroutines.withContext
import java.nio.ByteBuffer
import java.nio.ByteOrder
import java.util.UUID

private const val TAG = "VoiceAssistantManager"

class VoiceAssistantManager(
    private val context: Context,
    // 讯飞语音识别（IAT）密钥
    private val xfyunIatAppId: String = "your_iat_app_id",
    private val xfyunIatApiKey: String = "your_iat_api_key",
    private val xfyunIatApiSecret: String = "your_iat_api_secret",
    // 讯飞语音合成（TTS）密钥
    private val xfyunTtsAppId: String = "your_tts_app_id",
    private val xfyunTtsApiKey: String = "your_tts_api_key",
    private val xfyunTtsApiSecret: String = "your_tts_api_secret",
    // 智谱 AI 密钥
    private val zhipuApiKey: String = "your_zhipu_api_key",
    // 唤醒词提示音资源 ID（可选，例如 R.raw.hi_yue）
    private val wakeWordSoundResId: Int? = null
) {
    
    private var audioRecorder: AudioRecorder? = null
    private var audioPlayer: AudioPlayer? = null
    private var wakeWordSoundPlayer: WakeWordSoundPlayer? = null
    private var wakeWordDetector: WakeWordDetector? = null
    private var xfyunClient: XfyunIatClient? = null
    private var xfyunTtsClient: XfyunTtsClient? = null
    private var zhipuClient: ZhipuAiClient? = null
    private var scope = CoroutineScope(Dispatchers.Main + Job())
    
    private var isFirstAudioFrame = true
    private var recognizedText = StringBuilder()
    private var aiResponseBuilder = StringBuilder()
    
    private var asrStartTime = 0L
    private var llmStartTime = 0L
    private var ttsStartTime = 0L
    
    // TTS 音频缓冲区（边缓存边播放）
    private val ttsAudioBuffer = mutableListOf<ByteArray>()
    private var audioWriteJob: Job? = null
    private var isTtsSynthesizing = false  // TTS 合成状态标志
    
    // ASR 超时控制
    private var asrTimeoutJob: Job? = null
    private val ASR_MAX_DURATION = 60_000L  // 最大录音时长：60秒
    
    // 状态回调
    var onStateChanged: ((VoiceAssistantUiState) -> Unit)? = null
    
    private var currentState = VoiceAssistantUiState()
        set(value) {
            field = value
            onStateChanged?.invoke(value)
        }
    
    /**
     * 初始化（延迟加载模型）
     */
    fun initialize() {
        scope.launch(Dispatchers.IO) {
            try {
                // 打印配置信息（用于调试）
                Log.d(TAG, "Initializing wake word detector...")
                wakeWordDetector = WakeWordDetector(context).apply {
                    onWakeWordDetected = { confidence ->
                        handleWakeWordDetected(confidence)
                    }
                }
                
                // Initialize Zhipu AI client (不设置回调，每次调用时设置)
                zhipuClient = ZhipuAiClient(zhipuApiKey)
                Log.d(TAG, "Zhipu AI client initialized")
                
                // Initialize Audio Player
                audioPlayer = AudioPlayer().apply {
                    initialize()
                }
                Log.d(TAG, "Audio player initialized")
                
                // Initialize Wake Word Sound Player
                wakeWordSoundPlayer = WakeWordSoundPlayer(context)
                Log.d(TAG, "Wake word sound player initialized")
                
                audioRecorder = AudioRecorder(context).apply {
                    onAudioChunk = { audioData ->
                        processAudioChunk(audioData)
                    }
                    onError = { error ->
                        Log.e(TAG, "Audio error: $error")
                        updateState(
                            currentState.copy(
                                state = AssistantState.IDLE,
                                isAnimating = false,
                                currentTranscript = "错误: $error"
                            )
                        )
                    }
                }
                
                Log.d(TAG, "Initialization complete")
            } catch (e: Exception) {
                Log.e(TAG, "Failed to initialize", e)
                updateState(
                    currentState.copy(
                        currentTranscript = "初始化失败: ${e.message}"
                    )
                )
            }
        }
    }
    
    /**
     * 开始监听唤醒词
     */
    fun startListeningForWakeWord() {
        if (currentState.state != AssistantState.IDLE) {
            Log.w(TAG, "Cannot start listening, current state: ${currentState.state}")
            return
        }
        
        updateState(
            currentState.copy(
                state = AssistantState.LISTENING,
                isAnimating = true,
                currentTranscript = "等待唤醒词..."
            )
        )
        
        // 确保 AudioRecorder 已初始化
        if (audioRecorder == null) {
            Log.w(TAG, "AudioRecorder is null, reinitializing...")
            audioRecorder = AudioRecorder(context).apply {
                onAudioChunk = { audioData ->
                    processAudioChunk(audioData)
                }
                onError = { error ->
                    Log.e(TAG, "Audio error: $error")
                }
            }
        }
        
        audioRecorder?.startRecording()
        Log.d(TAG, "Started listening for wake word")
    }
    
    /**
     * 停止监听
     */
    fun stopListening() {
        Log.d(TAG, "Stopping all activities...")
        
        scope.launch(Dispatchers.IO) {
            // 停止录音
            audioRecorder?.stopRecording()
            
            // 断开讯飞连接
            xfyunClient?.disconnect()
            xfyunClient = null
            
            // 停止 TTS
            audioPlayer?.stop()
            xfyunTtsClient?.disconnect()
            xfyunTtsClient = null
            
            // 重新初始化 AudioRecorder 用于唤醒词检测
            try {
                audioRecorder = AudioRecorder(context).apply {
                    onAudioChunk = { audioData ->
                        processAudioChunk(audioData)
                    }
                    onError = { error ->
                        Log.e(TAG, "Audio error: $error")
                    }
                }
                Log.d(TAG, "AudioRecorder reinitialized for wake word detection")
            } catch (e: Exception) {
                Log.e(TAG, "Failed to reinitialize AudioRecorder", e)
            }
            
            // 重置状态到主线程
            withContext(Dispatchers.Main) {
                updateState(
                    currentState.copy(
                        state = AssistantState.IDLE,
                        isAnimating = false,
                        currentTranscript = ""
                    )
                )
            }
            
            Log.d(TAG, "Stopped listening and cleaned up resources")
        }
    }
    
    /**
     * 处理音频块
     */
    private fun processAudioChunk(audioData: FloatArray) {
        if (currentState.state != AssistantState.LISTENING) {
            return
        }
        
        // 传递给唤醒词检测器
        wakeWordDetector?.processAudioChunk(audioData)
    }
    
    /**
     * 处理检测到唤醒词
     */
    private fun handleWakeWordDetected(confidence: Float) {
        Log.w(TAG, "╔════════════════════════════════════════╗")
        Log.w(TAG, "║   WAKE WORD DETECTED!                  ║")
        Log.w(TAG, "╠════════════════════════════════════════╣")
        Log.w(TAG, "║ Confidence: ${String.format("%.4f", confidence).padEnd(24)} ║")
        Log.w(TAG, "║ Percentage: ${String.format("%.2f%%", confidence * 100).padEnd(23)} ║")
        Log.w(TAG, "║ Timestamp: ${System.currentTimeMillis().toString().padEnd(25)} ║")
        Log.w(TAG, "╚════════════════════════════════════════╝")
        
        // 停止唤醒词检测
        audioRecorder?.stopRecording()
        Log.d(TAG, "Wake word detection stopped")
        
        // 播放提示音，然后开始语音识别
        scope.launch {
            playWakeWordSoundAndStartRecognition()
        }
    }
    
    /**
     * 播放唤醒词提示音，完成后开始语音识别
     */
    private suspend fun playWakeWordSoundAndStartRecognition() {
        try {
            // 更新状态显示正在播放提示音
            updateState(
                currentState.copy(
                    state = AssistantState.LISTENING,
                    isAnimating = true,
                    currentTranscript = "唤醒成功..."
                )
            )
            
            // 如果配置了提示音资源，则播放
            if (wakeWordSoundResId != null) {
                Log.d(TAG, "Playing wake word sound from resource: $wakeWordSoundResId")
                val playSuccess = wakeWordSoundPlayer?.playSoundFromResource(wakeWordSoundResId) ?: false
                
                if (!playSuccess) {
                    Log.w(TAG, "Failed to play wake word sound, proceeding anyway")
                }
            } else {
                Log.d(TAG, "No wake word sound configured, skipping")
            }
            
            // 提示音播放完成后，开始语音识别
            startSpeechRecognition()
            
        } catch (e: Exception) {
            Log.e(TAG, "Error playing wake word sound", e)
            // 即使播放失败也继续语音识别
            startSpeechRecognition()
        }
    }
    
    /**
     * 开始语音识别
     */
    private fun startSpeechRecognition() {
        scope.launch {
            try {
                Log.i(TAG, "Starting speech recognition...")
                recognizedText.clear()
                isFirstAudioFrame = true
                asrStartTime = System.currentTimeMillis() // 记录 ASR 开始时间
                
                // 初始化讯飞语音识别客户端
                xfyunClient = XfyunIatClient(xfyunIatAppId, xfyunIatApiKey, xfyunIatApiSecret).apply {
                    onResult = { text, isFinal ->
                        handleRecognitionResult(text, isFinal)
                    }
                    onError = { error ->
                        Log.e(TAG, "Recognition error: $error")
                        handleRecognitionError(error)
                    }
                    onConnected = {
                        Log.i(TAG, "Connected to Xfyun, starting audio recording...")
                        startAudioRecordingForRecognition()
                    }
                    onVadDetected = {
                        // VAD 检测到静音，自动停止录音
                        Log.i(TAG, "VAD detected silence, auto-stopping recording...")
                        scope.launch {
                            updateState(
                                currentState.copy(
                                    currentTranscript = "检测到静音，正在处理..."
                                )
                            )
                        }
                    }
                }
                
                // 连接到讯飞服务器（显示连接状态）
                updateState(
                    currentState.copy(
                        state = AssistantState.CONNECTING,
                        isAnimating = true,
                        currentTranscript = "正在连接服务器，请稍候..."
                    )
                )
                
                xfyunClient?.connect()
            } catch (e: Exception) {
                Log.e(TAG, "Failed to start recognition", e)
                handleRecognitionError("启动识别失败: ${e.message}")
            }
        }
    }
    
    /**
     * 开始录音用于语音识别
     */
    private fun startAudioRecordingForRecognition() {
        audioRecorder = AudioRecorder(context).apply {
            onAudioChunk = { floatData ->
                // 转换为 PCM 字节数组
                val byteData = floatArrayToPcm16(floatData)
                xfyunClient?.sendAudio(byteData, isFirstAudioFrame, false)
                isFirstAudioFrame = false
            }
            onError = { error ->
                Log.e(TAG, "Audio recording error: $error")
            }
        }
        
        audioRecorder?.startRecording()
        
        // 连接成功，切换到录音状态
        updateState(
            currentState.copy(
                state = AssistantState.LISTENING,
                currentTranscript = "正在录音，说完后会自动停止（或点击按钮手动停止）"
            )
        )
        
        // 启动超时检测
        startAsrTimeout()
    }
    
    /**
     * 启动 ASR 超时检测
     */
    private fun startAsrTimeout() {
        // 取消之前的超时任务
        asrTimeoutJob?.cancel()
        
        asrTimeoutJob = scope.launch {
            delay(ASR_MAX_DURATION)
            Log.w(TAG, "⏰ ASR timeout reached (${ASR_MAX_DURATION}ms), auto-stopping...")
            
            // 超时后自动停止
            updateState(
                currentState.copy(
                    currentTranscript = "录音时间过长，自动停止..."
                )
            )
            
            delay(500)
            stopSpeechRecognition()
        }
    }
    
    /**
     * 取消 ASR 超时检测
     */
    private fun cancelAsrTimeout() {
        asrTimeoutJob?.cancel()
        asrTimeoutJob = null
    }
    
    /**
     * 停止语音识别
     */
    private fun stopSpeechRecognition() {
        Log.d(TAG, "Stopping speech recognition...")
        
        // 取消超时检测
        cancelAsrTimeout()
        
        audioRecorder?.stopRecording()
        
        // 发送最后一帧
        xfyunClient?.sendAudio(ByteArray(0), false, true)
        
        updateState(
            currentState.copy(
                state = AssistantState.THINKING,
                isAnimating = true,
                currentTranscript = "正在处理..."
            )
        )
    }
    
    /**
     * 处理识别错误
     */
    private fun handleRecognitionError(error: String) {
        // 取消超时并清理资源
        cancelAsrTimeout()
        audioRecorder?.stopRecording()
        xfyunClient?.disconnect()
        xfyunClient = null
        
        // 重新初始化 AudioRecorder 用于唤醒词检测
        scope.launch(Dispatchers.IO) {
            try {
                audioRecorder = AudioRecorder(context).apply {
                    onAudioChunk = { audioData ->
                        processAudioChunk(audioData)
                    }
                    onError = { err ->
                        Log.e(TAG, "Audio error: $err")
                    }
                }
                Log.d(TAG, "AudioRecorder reinitialized after error")
            } catch (e: Exception) {
                Log.e(TAG, "Failed to reinitialize AudioRecorder", e)
            }
        }
        
        // 显示错误并延迟后重置
        updateState(
            currentState.copy(
                state = AssistantState.IDLE,
                isAnimating = false,
                currentTranscript = "识别错误: $error"
            )
        )
        
        // 3秒后清除错误信息
        scope.launch {
            delay(3000)
            if (currentState.state == AssistantState.IDLE) {
                updateState(
                    currentState.copy(
                        currentTranscript = ""
                    )
                )
            }
        }
    }
    
    /**
     * 处理识别结果
     */
    private fun handleRecognitionResult(text: String, isFinal: Boolean) {
        recognizedText.append(text)
        val fullText = recognizedText.toString()
        
        Log.i(TAG, "Recognition: $fullText (final=$isFinal)")
        
        if (isFinal) {
            // 识别完成，取消超时并停止录音
            cancelAsrTimeout()
            audioRecorder?.stopRecording()
            xfyunClient?.disconnect()
            xfyunClient = null
            
            if (fullText.isNotEmpty()) {
                // 添加用户消息
                val currentTime = System.currentTimeMillis()
                val asrDuration = currentTime - asrStartTime // 计算 ASR 耗时
                Log.d(TAG, "ASR completed in ${asrDuration}ms")
                
                val userMessage = ChatMessage(
                    id = UUID.randomUUID().toString(),
                    content = fullText,
                    type = MessageType.USER,
                    timestamp = currentTime,
                    asrDuration = asrDuration
                )
                
                updateState(
                    currentState.copy(
                        messages = currentState.messages + userMessage,
                        currentTranscript = ""
                    )
                )
                
                // 调用 AI 对话接口
                llmStartTime = System.currentTimeMillis() // 记录 LLM 开始时间
                simulateAiResponse(fullText)
            } else {
                // 未识别到内容，返回空闲状态
                updateState(
                    currentState.copy(
                        state = AssistantState.IDLE,
                        isAnimating = false,
                        currentTranscript = "未识别到内容"
                    )
                )
                
                // 重新初始化 AudioRecorder 用于唤醒词检测
                scope.launch(Dispatchers.IO) {
                    try {
                        audioRecorder = AudioRecorder(context).apply {
                            onAudioChunk = { audioData ->
                                processAudioChunk(audioData)
                            }
                            onError = { error ->
                                Log.e(TAG, "Audio error: $error")
                            }
                        }
                        Log.d(TAG, "AudioRecorder reinitialized for wake word detection after empty recognition")
                    } catch (e: Exception) {
                        Log.e(TAG, "Failed to reinitialize AudioRecorder", e)
                    }
                }
            }
        } else {
            // 实时显示识别结果
            updateState(
                currentState.copy(
                    currentTranscript = fullText
                )
            )
        }
    }
    
    /**
     * 调用智谱 AI 获取回复
     */
    private fun simulateAiResponse(userInput: String) {
        scope.launch {
            try {
                updateState(
                    currentState.copy(
                        state = AssistantState.THINKING,
                        isAnimating = true,
                        currentTranscript = "正在思考..."
                    )
                )
                
                // Build conversation history (limit to last 5 messages to prevent token overflow)
                val conversationHistory = currentState.messages.takeLast(5).map { msg ->
                    val role = when (msg.type) {
                        MessageType.USER -> "user"
                        MessageType.ASSISTANT -> "assistant"
                        else -> "user"
                    }
                    role to msg.content
                }
                
                // Build messages for Zhipu AI
                val messages = zhipuClient?.buildMessages(
                    systemPrompt = com.amu.aidemo.android.config.LlmConfig.SYSTEM_PROMPT,
                    conversationHistory = conversationHistory,
                    userMessage = userInput
                ) ?: emptyList()
                
                Log.d(TAG, "Calling Zhipu AI with ${messages.size} messages")
                
                // Reset response builder
                aiResponseBuilder.clear()
                
                // Update state to thinking (will change to speaking when chunks arrive)
                updateState(
                    currentState.copy(
                        state = AssistantState.THINKING,
                        isAnimating = true,
                        currentTranscript = ""
                    )
                )
                
                // 设置回调（每次都重新设置，避免累积旧回调）
                zhipuClient?.apply {
                    onStreamChunk = { chunk ->
                        handleAiResponseChunk(chunk)
                    }
                    onStreamComplete = {
                        Log.d(TAG, "Stream complete callback triggered")
                        handleAiResponseComplete()
                    }
                    onError = { error ->
                        handleAiError(error)
                    }
                }
                
                // 调用智谱 AI 流式聊天
                zhipuClient?.chatCompletionStream(
                    messages = messages,
                    model = com.amu.aidemo.android.config.LlmConfig.MODEL,
                    temperature = com.amu.aidemo.android.config.LlmConfig.TEMPERATURE,
                    maxTokens = com.amu.aidemo.android.config.LlmConfig.MAX_TOKENS
                ) ?: run {
                    Log.e(TAG, "Zhipu client is null")
                    handleAiError("AI 客户端未初始化")
                }
                
            } catch (e: Exception) {
                Log.e(TAG, "Failed to call Zhipu AI", e)
                updateState(
                    currentState.copy(
                        state = AssistantState.IDLE,
                        isAnimating = false,
                        currentTranscript = "AI 调用失败: ${e.message}"
                    )
                )
            }
        }
    }
    
    /**
     * 处理 AI 响应片段（流式更新 transcript，不添加消息）
     */
    private fun handleAiResponseChunk(chunk: String) {
        aiResponseBuilder.append(chunk)
        
        // 只更新 currentTranscript 用于实时显示，不添加到消息列表
        updateState(
            currentState.copy(
                state = AssistantState.THINKING,
                isAnimating = true,
                currentTranscript = aiResponseBuilder.toString()
            )
        )
    }
    
    /**
     * 处理 AI 响应完成（只在这里添加消息到列表，避免重复）
     */
    private fun handleAiResponseComplete() {
        val fullResponse = aiResponseBuilder.toString().trim()
        aiResponseBuilder.clear()  // 立即清空 builder
        
        if (fullResponse.isNotEmpty()) {
            val currentTime = System.currentTimeMillis()
            val llmDuration = currentTime - llmStartTime
            Log.d(TAG, "LLM completed in ${llmDuration}ms, response length: ${fullResponse.length}")
            
            // 使用 UUID 避免 ID 冲突
            val assistantMessage = ChatMessage(
                id = UUID.randomUUID().toString(),
                content = fullResponse,
                type = MessageType.ASSISTANT,
                timestamp = currentTime,
                llmDuration = llmDuration
            )
            
            // 清空 currentTranscript，只添加完整消息到列表
            updateState(
                currentState.copy(
                    messages = currentState.messages + assistantMessage,
                    currentTranscript = "",  // 关键：清空实时显示
                    state = AssistantState.SPEAKING
                )
            )
            
            // 使用 TTS 播放 AI 回复
            ttsStartTime = System.currentTimeMillis()
            speakText(fullResponse, assistantMessage)
        } else {
            // 没有内容，直接返回空闲状态
            scope.launch {
                delay(500)
                updateState(
                    currentState.copy(
                        state = AssistantState.IDLE,
                        isAnimating = false,
                        currentTranscript = ""
                    )
                )
            }
        }
    }
    
    /**
     * 处理 AI 错误
     */
    private fun handleAiError(error: String) {
        Log.e(TAG, "Zhipu AI error: $error")
        aiResponseBuilder.clear()  // 清空 builder
        
        val currentTime = System.currentTimeMillis()
        val errorMessage = ChatMessage(
            id = UUID.randomUUID().toString(),
            content = "抱歉，AI 服务出现错误：$error",
            type = MessageType.ASSISTANT,
            timestamp = currentTime
        )
        
        updateState(
            currentState.copy(
                messages = currentState.messages + errorMessage,
                state = AssistantState.IDLE,
                isAnimating = false,
                currentTranscript = ""
            )
        )
        
        // 清理资源
        cleanupAfterError()
    }
    
    /**
     * 使用 TTS 播放文本
     */
    private fun speakText(text: String, message: ChatMessage) {
        scope.launch {
            try {
                // 先停止并清理旧的 TTS 和音频播放
                xfyunTtsClient?.disconnect()
                xfyunTtsClient = null
                audioPlayer?.stop()
                
                // 取消旧的写入任务
                audioWriteJob?.cancel()
                audioWriteJob = null
                
                // 清空缓冲区
                synchronized(ttsAudioBuffer) {
                    ttsAudioBuffer.clear()
                }
                
                // 重新初始化 AudioPlayer
                audioPlayer?.initialize()
                
                updateState(
                    currentState.copy(
                        state = AssistantState.SPEAKING,
                        isAnimating = true,
                        currentTranscript = "正在播放语音..."
                    )
                )
                
                // 设置合成状态为进行中
                isTtsSynthesizing = true
                
                // 启动音频写入协程（在后台持续处理缓冲区）
                audioWriteJob = scope.launch(Dispatchers.IO) {
                    var lastProcessedIndex = 0
                    
                    while (isTtsSynthesizing || lastProcessedIndex < ttsAudioBuffer.size) {
                        val currentSize = synchronized(ttsAudioBuffer) { ttsAudioBuffer.size }
                        
                        // 处理新到达的音频块
                        while (lastProcessedIndex < currentSize) {
                            val audioData = synchronized(ttsAudioBuffer) {
                                ttsAudioBuffer[lastProcessedIndex]
                            }
                            
                            try {
                                audioPlayer?.write(audioData)
                                Log.d(TAG, "TTS audio written [${lastProcessedIndex + 1}/$currentSize]: ${audioData.size} bytes")
                            } catch (e: Exception) {
                                Log.e(TAG, "Error writing audio chunk", e)
                            }
                            
                            lastProcessedIndex++
                        }
                        
                        // 如果还在合成中且没有新数据，等待
                        if (isTtsSynthesizing && lastProcessedIndex >= currentSize) {
                            delay(10)
                        }
                    }
                    
                    Log.d(TAG, "All TTS audio written, total chunks: $lastProcessedIndex")
                }
                
                // 初始化 TTS 客户端
                xfyunTtsClient = XfyunTtsClient(xfyunTtsAppId, xfyunTtsApiKey, xfyunTtsApiSecret).apply {
                    onAudioData = { audioData ->
                        // 缓存音频数据（写入协程会自动处理）
                        if (audioData.isNotEmpty()) {
                            synchronized(ttsAudioBuffer) {
                                ttsAudioBuffer.add(audioData)
                            }
                            Log.d(TAG, "TTS audio data buffered: ${audioData.size} bytes, total chunks: ${ttsAudioBuffer.size}")
                        }
                    }
                    onComplete = {
                        // TTS 合成完成，标记状态并等待写入协程完成
                        isTtsSynthesizing = false
                        scope.launch {
                            audioWriteJob?.join()
                            handleTtsComplete(message)
                        }
                    }
                    onError = { error ->
                        Log.e(TAG, "TTS error: $error")
                        isTtsSynthesizing = false
                        audioWriteJob?.cancel()
                        handleTtsComplete()
                    }
                    onConnected = {
                        Log.d(TAG, "TTS connected, synthesizing text...")
                        // 连接成功后发送文本
                        synthesize(
                            text = text,
                            voiceName = com.amu.aidemo.android.config.TtsConfig.VOICE_NAME,
                            speed = com.amu.aidemo.android.config.TtsConfig.SPEED,
                            pitch = com.amu.aidemo.android.config.TtsConfig.PITCH,
                            volume = com.amu.aidemo.android.config.TtsConfig.VOLUME
                        )
                    }
                }
                
                // 连接到 TTS 服务
                xfyunTtsClient?.connect()
                
            } catch (e: Exception) {
                Log.e(TAG, "Failed to speak text", e)
                audioWriteJob?.cancel()
                handleTtsComplete()
            }
        }
    }
    
    /**
     * TTS 播放完成
     */
    private fun handleTtsComplete(message: ChatMessage? = null) {
        scope.launch {
            val ttsDuration = if (ttsStartTime > 0) {
                System.currentTimeMillis() - ttsStartTime
            } else 0L
            
            if (ttsDuration > 0) {
                Log.d(TAG, "TTS completed in ${ttsDuration}ms")
                
                // 更新消息的 TTS 耗时
                message?.let { msg ->
                    val updatedMessage = msg.copy(ttsDuration = ttsDuration)
                    val updatedMessages = currentState.messages.map {
                        if (it.id == msg.id) updatedMessage else it
                    }
                    updateState(currentState.copy(messages = updatedMessages))
                }
            }
            
            // 等待音频播放完成（在 IO 线程中）
            withContext(Dispatchers.IO) {
                audioPlayer?.waitForCompletion()
                
                // 重新初始化 AudioPlayer 为下次使用做准备
                try {
                    audioPlayer?.release()
                    audioPlayer = AudioPlayer().apply {
                        initialize()
                    }
                    Log.d(TAG, "AudioPlayer reinitialized after TTS completion")
                } catch (e: Exception) {
                    Log.e(TAG, "Failed to reinitialize AudioPlayer", e)
                }
            }
            
            // 断开 TTS 连接
            xfyunTtsClient?.disconnect()
            xfyunTtsClient = null
            
            // 重新初始化 AudioRecorder 用于唤醒词检测
            withContext(Dispatchers.IO) {
                try {
                    audioRecorder = AudioRecorder(context).apply {
                        onAudioChunk = { audioData ->
                            processAudioChunk(audioData)
                        }
                        onError = { error ->
                            Log.e(TAG, "Audio error: $error")
                        }
                    }
                    Log.d(TAG, "AudioRecorder reinitialized after TTS completion")
                } catch (e: Exception) {
                    Log.e(TAG, "Failed to reinitialize AudioRecorder", e)
                }
            }
            
            // 返回空闲状态
            delay(500)
            updateState(
                currentState.copy(
                    state = AssistantState.IDLE,
                    isAnimating = false,
                    currentTranscript = ""
                )
            )
            
            Log.d(TAG, "TTS playback completed, ready for next interaction")
        }
    }
    
    /**
     * Float 数组转 PCM16 字节数组
     */
    private fun floatArrayToPcm16(floatData: FloatArray): ByteArray {
        val byteBuffer = ByteBuffer.allocate(floatData.size * 2)
        byteBuffer.order(ByteOrder.LITTLE_ENDIAN)
        
        for (sample in floatData) {
            val pcm16 = (sample * 32767).toInt().coerceIn(-32768, 32767).toShort()
            byteBuffer.putShort(pcm16)
        }
        
        return byteBuffer.array()
    }
    
    /**
     * 清理错误后的资源
     */
    private fun cleanupAfterError() {
        scope.launch(Dispatchers.IO) {
            try {
                audioPlayer?.stop()
                xfyunClient?.disconnect()
                xfyunClient = null
                xfyunTtsClient?.disconnect()
                xfyunTtsClient = null
            } catch (e: Exception) {
                Log.e(TAG, "Error during cleanup", e)
            }
        }
    }
    
    /**
     * 手动触发（用于测试）- 主按钮：唤醒词/ASR 录音控制
     */
    fun manualTrigger() {
        Log.d(TAG, "Manual trigger - Current state: ${currentState.state}")
        when (currentState.state) {
            AssistantState.IDLE -> {
                Log.d(TAG, "Starting listening for wake word...")
                startListeningForWakeWord()
            }
            AssistantState.LISTENING -> {
                // 如果正在录音识别，停止并处理
                if (xfyunClient != null) {
                    Log.d(TAG, "Stopping speech recognition...")
                    stopSpeechRecognition()
                } else {
                    // 如果是在等待唤醒词，停止监听
                    Log.d(TAG, "Stopping wake word detection...")
                    audioRecorder?.stopRecording()
                    updateState(
                        currentState.copy(
                            state = AssistantState.IDLE,
                            isAnimating = false,
                            currentTranscript = ""
                        )
                    )
                }
            }
            else -> {
                // 其他状态不处理，使用 stopAll 按钮
                Log.d(TAG, "Use stop all button for other states")
            }
        }
    }
    
    /**
     * 停止所有活动 - 红色停止按钮
     */
    fun stopAll() {
        Log.d(TAG, "Stop all activities - Current state: ${currentState.state}")
        stopListening()
    }
    
    /**
     * 更新状态（在主线程）
     */
    private fun updateState(newState: VoiceAssistantUiState) {
        scope.launch(Dispatchers.Main) {
            currentState = newState
        }
    }
    
    /**
     * 释放资源
     */
    fun release() {
        Log.d(TAG, "Releasing all resources...")
        
        audioRecorder?.stopRecording()
        audioRecorder?.release()
        audioPlayer?.stop()
        audioPlayer?.release()
        wakeWordSoundPlayer?.release()
        wakeWordDetector?.release()
        
        xfyunClient?.disconnect()
        xfyunTtsClient?.disconnect()
        
        scope.launch(Dispatchers.IO) {
            wakeWordDetector = null
            audioRecorder = null
            audioPlayer = null
            wakeWordSoundPlayer = null
            xfyunClient = null
            xfyunTtsClient = null
        }
        
        Log.d(TAG, "All resources released")
    }
}
