package com.roger.homecenterscreen.voice

import android.content.Context
import android.util.Log
import com.roger.homecenterscreen.config.BusinessConfig
import kotlinx.coroutines.*
import kotlinx.coroutines.flow.MutableStateFlow
import kotlinx.coroutines.flow.StateFlow
import kotlinx.coroutines.flow.asStateFlow

/**
 * 语音功能管理器
 * 统一管理语音唤醒、识别、合成等功能
 */
class VoiceManager private constructor(private val context: Context) {
    
    companion object {
        private const val TAG = "VoiceManager"
        
        @Volatile
        private var INSTANCE: VoiceManager? = null
        
        fun getInstance(context: Context): VoiceManager {
            return INSTANCE ?: synchronized(this) {
                INSTANCE ?: VoiceManager(context.applicationContext).also { INSTANCE = it }
            }
        }
    }
    
    private val coroutineScope = CoroutineScope(Dispatchers.Main + SupervisorJob())
    
    // 语音状态
    private val _voiceState = MutableStateFlow(VoiceState.IDLE)
    val voiceState: StateFlow<VoiceState> = _voiceState.asStateFlow()
    
    // 识别结果
    private val _recognitionResult = MutableStateFlow("")
    val recognitionResult: StateFlow<String> = _recognitionResult.asStateFlow()
    
    // 合成状态
    private val _synthesisState = MutableStateFlow(SynthesisState.IDLE)
    val synthesisState: StateFlow<SynthesisState> = _synthesisState.asStateFlow()
    
    // 初始化状态
    private val _isVoiceInitialized = MutableStateFlow(false)
    val isVoiceInitialized: StateFlow<Boolean> = _isVoiceInitialized.asStateFlow()

    private val _lastVoiceprintWav = MutableStateFlow<java.io.File?>(null)
    val lastVoiceprintWav: StateFlow<java.io.File?> = _lastVoiceprintWav.asStateFlow()
    
    // 子模块
    private lateinit var wakeupManager: WakeupManager
    private lateinit var asrManager: AsrManager
    // CosyVoice 替代讯飞 TTS
    private lateinit var cosyTtsManager: CosyVoiceTtsManager
    private lateinit var difyManager: DifyManager
    private lateinit var soundEffectManager: SoundEffectManager
    private var mqttManager: MqttServiceManager? = null
    private val voiceprintService = VoiceprintService()

    // 会话/用户 & 状态跟踪
    @Volatile private var currentUserId: Any? = null
    @Volatile private var currentSessionId: String? = null
    @Volatile private var lastAsrText: String = ""
    @Volatile private var lastAsrUpdateAt: Long = 0L
    @Volatile private var lastInboundAt: Long = 0L
    private var clearUserJob: Job? = null
    
    private var isInitialized = false
    private var lastInitError: String? = null

    fun getLastInitError(): String? = lastInitError

    /**
     * 由 ASR 模块（本地VAD）回调：在“等待/聆听”之间切换时更新底部栏状态。
     * 避免覆盖 PROCESSING/SPEAKING/ERROR 等关键状态。
     */
    internal fun onVadStateChanged(isListening: Boolean) {
        coroutineScope.launch {
            Log.i(TAG, "VAD回调：isListening=$isListening, 当前voiceState=${_voiceState.value}")
            val current = _voiceState.value
            when (current) {
                VoiceState.PROCESSING, VoiceState.SPEAKING -> {
                    // 正在处理/播报/错误时不覆盖状态
                    return@launch
                }
                else -> {
                    _voiceState.value = if (isListening) VoiceState.LISTENING_SPEECH else VoiceState.LISTENING_WAKEUP
                    if (isListening) {
                        _recognitionResult.value = ""
                        _lastVoiceprintWav.value = null
                    }
                    Log.i(TAG, "VAD驱动voiceState变更为=${_voiceState.value}")
                }
            }
        }
    }
    
    /**
     * 初始化语音功能
     */
    suspend fun initialize(): Boolean {
        if (isInitialized) {
            Log.i(TAG, "语音功能已初始化，跳过重复初始化")
            return true
        }
        return withContext(Dispatchers.IO) {
            try {
                Log.i(TAG, "🎤 开始初始化语音功能...")
                Log.i(TAG, "📋 语音配置信息:")
                Log.i(TAG, "  - APPID: ${VoiceConfig.APPID}")
                Log.i(TAG, "  - API Key: ${VoiceConfig.API_KEY.take(8)}...")
                Log.i(TAG, "  - 唤醒词: ${VoiceConfig.WAKEUP_WORD}")
                
                // 仅检查最小必需权限（连续ASR模式）：录音 + 网络
                Log.i(TAG, "🔐 检查最小必需权限（录音/网络）...")
                if (!VoicePermissionHelper.checkRecordAudioPermission(context)) {
                    lastInitError = "缺少录音权限(RECORD_AUDIO)"
                    Log.e(TAG, "❌ 录音权限是语音功能的必要条件")
                    return@withContext false
                }
                if (!VoicePermissionHelper.checkNetworkPermission(context)) {
                    lastInitError = "缺少网络权限(INTERNET)"
                    Log.e(TAG, "❌ 网络权限缺失")
                    return@withContext false
                }
                Log.i(TAG, "✅ 最小必需权限已满足")
                
                // 检查并准备存储目录
                val voiceCacheDir = VoiceStorageHelper.getVoiceCacheDir(context)
                val voiceModelDir = VoiceStorageHelper.getVoiceModelDir(context)
                
                Log.i(TAG, "📁 语音缓存目录: ${voiceCacheDir.absolutePath}")
                Log.i(TAG, "📁 语音模型目录: ${voiceModelDir.absolutePath}")
                
                // 检查存储空间
                if (!VoiceStorageHelper.checkStorageSpace(context)) {
                    Log.w(TAG, "⚠️ 存储空间不足，可能影响语音功能")
                }
                
                // 清理过期缓存文件
                VoiceStorageHelper.cleanupVoiceCache(context)
                
                
                
                // 初始化各个子模块
                Log.i(TAG, "🔧 初始化各个语音模块...")
                try {
                    wakeupManager = WakeupManager(context, this@VoiceManager)
                } catch (e: Exception) {
                    Log.w(TAG, "⚠️ WakeupManager 初始化占位")
                }
                asrManager = AsrManager(context, this@VoiceManager)
                // 初始化 CosyVoice TTS 管理器
                cosyTtsManager = CosyVoiceTtsManager(context, this@VoiceManager)
                difyManager = DifyManager()
                soundEffectManager = SoundEffectManager(context)
                
                Log.i(TAG, "🔧 初始化 ASR 模块（FunASR）...")
                val asrInit = asrManager.initialize()
                Log.i(TAG, "🔧 初始化 ASR 结果: $asrInit")
                val wakeupInit = try { wakeupManager.initialize() } catch (e: Exception) { Log.w(TAG, "唤醒模块初始化异常", e); false }
                Log.i(TAG, "🔧 初始化 唤醒结果: $wakeupInit")
                Log.i(TAG, "🔧 初始化 TTS 模块（CosyVoice）...")
                val ttsInit = try { cosyTtsManager.initialize() } catch (e: Exception) { Log.w(TAG, "CosyVoice TTS 初始化异常", e); false }
                Log.i(TAG, "🔧 初始化 TTS 结果: $ttsInit")
                Log.i(TAG, "🔧 初始化 音效模块 ...")
                val soundInit = soundEffectManager.initialize()
                Log.i(TAG, "🔧 初始化 音效结果: $soundInit")

                Log.i(TAG, "模块初始化结果: 唤醒=$wakeupInit, 识别=$asrInit, 合成=$ttsInit (CosyVoice), 音效=$soundInit")
                
                if (!asrInit) {
                    lastInitError = "ASR 初始化失败（FunASR）"
                    Log.e(TAG, "❌ ASR初始化失败")
                    return@withContext false
                }
                
                if (!soundInit) {
                    Log.w(TAG, "⚠️ 音效模块初始化失败，但不影响主要功能")
                }
                
                isInitialized = true
                _isVoiceInitialized.value = true
                Log.i(TAG, "✅ 语音功能初始化完成（ASR已启用，AIKit已移除）")

                // 加载业务配置并建立MQTT连接
                try {
                    BusinessConfig.load(context)
                    mqttManager = MqttServiceManager(context) { inbound ->
                        // 记录最后一次下行消息时间
                        lastInboundAt = System.currentTimeMillis()

                        if (inbound.type == "0") {
                            // 流式播报直到 is_end
                            if (_synthesisState.value == SynthesisState.IDLE) {
                                _voiceState.value = VoiceState.SPEAKING
                                _synthesisState.value = SynthesisState.SYNTHESIZING
                                cosyTtsManager.startStreaming { completed ->
                                    _synthesisState.value = SynthesisState.COMPLETED
                                    _voiceState.value = VoiceState.IDLE
                                    coroutineScope.launch {
                                        delay(1000)
                                        startWakeupListening()
                                    }
                                }
                            }
                            if (inbound.content.isNotEmpty()) {
                                cosyTtsManager.appendStreaming(inbound.content)
                            }
                            if (inbound.is_end) {
                                cosyTtsManager.finishStreaming()
                            }
                        }

                        // 超时守卫：若长时间未结束，视为异常
                        clearUserJob?.cancel()
                        clearUserJob = coroutineScope.launch {
                            delay(4000)
                            val passed = System.currentTimeMillis() - lastInboundAt
                            if (passed >= 4000 && _synthesisState.value == SynthesisState.SYNTHESIZING) {
                                Log.w(TAG, "服务端响应超时/未正常结束，清理当前用户信息")
                                try { cosyTtsManager.finishStreaming() } catch (_: Exception) {}
                                _synthesisState.value = SynthesisState.ERROR
                                currentUserId = null
                                currentSessionId = null
                                startWakeupListening()
                            }
                        }
                    }
                    mqttManager?.connectAndSubscribe()
                } catch (e: Exception) {
                    Log.w(TAG, "MQTT 初始化/连接异常", e)
                }

                // 连续识别模式：初始化后直接开始从麦克风采集并发送到ASR
                try {
                    Log.i(TAG, "🎤 准备启动连续语音识别：设置状态与清空结果")
                    _voiceState.value = VoiceState.LISTENING_SPEECH
                    _recognitionResult.value = ""
                    Log.i(TAG, "🎤 调用 asrManager.startRecognition() 开始")
                    asrManager.startRecognition()
                    Log.i(TAG, "🎤 调用 asrManager.startRecognition() 结束")
                } catch (e: Exception) {
                    Log.e(TAG, "自动启动连续识别失败", e)
                }
                return@withContext true
                
            } catch (e: Exception) {
                Log.e(TAG, "❌ 初始化失败", e)
                e.printStackTrace()
                return@withContext false
            }
        }
    }
    
    /**
     * 开始语音唤醒监听
     */
    fun startWakeupListening() {
        Log.i(TAG, "🔊 开始语音唤醒监听")
        Log.i(TAG, "👂 正在监听唤醒词: \"${VoiceConfig.WAKEUP_WORD}\"")
        
        if (!isInitialized) {
            Log.w(TAG, "⚠️ 语音功能未初始化")
            _voiceState.value = VoiceState.ERROR
            return
        }
        
        coroutineScope.launch {
            try {
                _voiceState.value = VoiceState.LISTENING_WAKEUP
                wakeupManager.startListening()
                Log.i(TAG, "✅ 语音唤醒监听已启动")
                Log.i(TAG, "💡 请说出唤醒词 \"${VoiceConfig.WAKEUP_WORD}\" 来测试")
            } catch (e: Exception) {
                Log.e(TAG, "启动语音唤醒失败", e)
                _voiceState.value = VoiceState.ERROR
            }
        }
    }
    
    /**
     * 停止语音唤醒监听
     */
    fun stopWakeupListening() {
        coroutineScope.launch {
            wakeupManager.stopListening()
            _voiceState.value = VoiceState.IDLE
        }
    }
    
    /**
     * 唤醒词被检测到的回调
     */
    internal fun onWakeupDetected() {
        Log.i(TAG, "检测到唤醒词：${VoiceConfig.WAKEUP_WORD}")
        coroutineScope.launch {
            _voiceState.value = VoiceState.ASR_START
            currentUserId = null
            currentSessionId = null
            asrManager.clearAsrTextSplicingCache()
            asrManager.notifyWakeup()
        }
    }
    
    /**
     * 开始语音识别
     */
    private fun startRecognition() {
        coroutineScope.launch {
            try {
                _voiceState.value = VoiceState.LISTENING_SPEECH
                _recognitionResult.value = ""
                asrManager.startRecognition()
            } catch (e: Exception) {
                Log.e(TAG, "启动语音识别失败", e)
                _voiceState.value = VoiceState.ERROR
                // 重新开始唤醒监听
                delay(1000)
                startWakeupListening()
            }
        }
    }
    
    /**
     * 语音识别结果回调
     */
    internal fun onRecognitionResult(result: String, isLast: Boolean) {
        _recognitionResult.value = result
        val now = System.currentTimeMillis()
        val changed = (result != lastAsrText)
        if (changed) {
            lastAsrText = result
            lastAsrUpdateAt = now
        }

        // 在“结果1秒未变化”条件成立时触发后续逻辑
        coroutineScope.launch {
            delay(1000)
            val idleEnough = (System.currentTimeMillis() - lastAsrUpdateAt) >= 1000
            if (!idleEnough || lastAsrText.isBlank()) return@launch

            if (currentUserId == null) {
                val wakeWord = VoiceConfig.WAKEUP_WORD
                if (!lastAsrText.contains(wakeWord)) return@launch
                val vp = withContext(Dispatchers.IO) {
                    val cand = asrManager.getLastVoiceprintCandidateFile()
                    val wavFile = if (cand != null && cand.exists()) cand else run {
                        val pcm = asrManager.exportPcmSinceWakeup(VoiceConfig.VOICEPRINT_KEEP_MS)
                        if (pcm.isNotEmpty()) writeWavTemp(pcm, VoiceConfig.ASR_SAMPLE_RATE) else asrManager.getLastRecordedFile()
                    }
                    asrManager.clearAsrTextSplicingCache()
                    if (wavFile != null) voiceprintService.verify(wavFile) else null
                }
                if (vp?.userId.isNullOrBlank()) {
                    synthesizeText("您暂未开通权限")
                    return@launch
                }
                currentUserId = vp?.userId
                currentSessionId = java.util.UUID.randomUUID().toString()
            }

            // 发布到服务端（MQTT）
            try {
                BusinessConfig.load(context)
                val outbound = OutboundMessage(
                    query = lastAsrText,
                    user_id = currentUserId,
                    demp_id = BusinessConfig.dempId,
                    from_terminal = "terminal",
                    type = "0",
                    mode = "normal",
                    session_id = currentSessionId
                )
                withContext(kotlinx.coroutines.Dispatchers.IO) {
                    mqttManager?.publishOutbound(outbound)
                }
            } catch (e: Exception) {
                Log.e(TAG, "发布MQTT消息失败", e)
            }
        }

        // 保留原逻辑：若标记为最后一句且非空，可按需触发处理
        if (isLast && result.isNotEmpty()) {
            Log.i(TAG, "语音识别完成(legacy hook): $result")
        }
    }
    
    /**
     * 处理与Dify的对话
     */
    private suspend fun processChatWithDify(userMessage: String) { /* 已由业务侧MQTT替代 */ }
    
    /**
     * 合成文字为语音
     */
    private fun synthesizeText(text: String) {
        if (text.isEmpty()) return
        
        coroutineScope.launch {
            try {
                _voiceState.value = VoiceState.SPEAKING
                _synthesisState.value = SynthesisState.SYNTHESIZING
                
                withContext(Dispatchers.IO) {
                    cosyTtsManager.synthesize(text) { isCompleted ->
                        if (isCompleted) {
                            _synthesisState.value = SynthesisState.COMPLETED
                            _voiceState.value = VoiceState.IDLE
                            asrManager.clearAsrTextSplicingCache()
                            coroutineScope.launch {
                                delay(1000)
                                startWakeupListening()
                            }
                        }
                    }
                }
            } catch (e: Exception) {
                Log.e(TAG, "语音合成失败", e)
                _synthesisState.value = SynthesisState.ERROR
                _voiceState.value = VoiceState.ERROR
                asrManager.clearAsrTextSplicingCache()
                
                // 出错后重新开始唤醒监听
                coroutineScope.launch {
                    delay(2000)
                    startWakeupListening()
                }
            }
        }
    }
    
    /**
     * 流式合成文字为语音
     */
    private fun synthesizeStreamText(text: String) {
        if (text.isEmpty()) return
        
        coroutineScope.launch {
            if (_synthesisState.value == SynthesisState.IDLE) {
                _voiceState.value = VoiceState.SPEAKING
                _synthesisState.value = SynthesisState.SYNTHESIZING
            }
            
                withContext(Dispatchers.IO) {
                    cosyTtsManager.synthesizeStream(text) { isCompleted ->
                        if (isCompleted) {
                            _synthesisState.value = SynthesisState.COMPLETED
                            _voiceState.value = VoiceState.IDLE
                            asrManager.clearAsrTextSplicingCache()
                            coroutineScope.launch {
                                delay(1000)
                                startWakeupListening()
                            }
                        }
                    }
                }
        }
    }

    /**
     * 将PCM16单声道字节写入临时WAV文件（声纹识别使用）
     */
    private fun writeWavTemp(pcm: ByteArray, sampleRate: Int): java.io.File {
        val dir = VoiceStorageHelper.getVoiceRecordDir(context)
        val f = java.io.File(dir, "voiceprint_${System.currentTimeMillis()}.wav")
        Log.i(TAG, "准备写入WAV: bytes=${pcm.size} file=${f.absolutePath}")
        val channels = 1
        val bitsPerSample = 16
        val byteRate = sampleRate * channels * bitsPerSample / 8
        val blockAlign = (channels * bitsPerSample / 8).toShort()
        val dataSize = pcm.size
        val totalDataLen = 36 + dataSize
        val out = java.io.FileOutputStream(f)
        val header = java.nio.ByteBuffer.allocate(44).order(java.nio.ByteOrder.LITTLE_ENDIAN)
        // RIFF header
        out.write("RIFF".toByteArray())
        out.write(java.nio.ByteBuffer.allocate(4).order(java.nio.ByteOrder.LITTLE_ENDIAN).putInt(totalDataLen).array())
        out.write("WAVE".toByteArray())
        // fmt chunk
        out.write("fmt ".toByteArray())
        out.write(java.nio.ByteBuffer.allocate(4).order(java.nio.ByteOrder.LITTLE_ENDIAN).putInt(16).array()) // Subchunk1Size
        out.write(java.nio.ByteBuffer.allocate(2).order(java.nio.ByteOrder.LITTLE_ENDIAN).putShort(1).array()) // PCM format
        out.write(java.nio.ByteBuffer.allocate(2).order(java.nio.ByteOrder.LITTLE_ENDIAN).putShort(channels.toShort()).array())
        out.write(java.nio.ByteBuffer.allocate(4).order(java.nio.ByteOrder.LITTLE_ENDIAN).putInt(sampleRate).array())
        out.write(java.nio.ByteBuffer.allocate(4).order(java.nio.ByteOrder.LITTLE_ENDIAN).putInt(byteRate).array())
        out.write(java.nio.ByteBuffer.allocate(2).order(java.nio.ByteOrder.LITTLE_ENDIAN).putShort(blockAlign).array())
        out.write(java.nio.ByteBuffer.allocate(2).order(java.nio.ByteOrder.LITTLE_ENDIAN).putShort(bitsPerSample.toShort()).array())
        // data chunk
        out.write("data".toByteArray())
        out.write(java.nio.ByteBuffer.allocate(4).order(java.nio.ByteOrder.LITTLE_ENDIAN).putInt(dataSize).array())
        out.write(pcm)
        out.flush()
        out.close()
        _lastVoiceprintWav.value = f
        return f
    }
    
    /**
     * 语音识别错误回调
     */
    internal fun onRecognitionError(error: String) {
        Log.e(TAG, "语音识别错误: $error")
        coroutineScope.launch {
            _voiceState.value = VoiceState.ERROR
            
            // 播放错误提示音
            if (::soundEffectManager.isInitialized) {
                soundEffectManager.playErrorSound()
            }
            
            delay(2000)
            
            // 重新开始唤醒监听
            startWakeupListening()
        }
    }
    
    /**
     * 释放资源
     */
    fun release() {
        coroutineScope.cancel()
        
        if (isInitialized) {
            wakeupManager.release()
            asrManager.release()
            cosyTtsManager.release()
            
            if (::soundEffectManager.isInitialized) {
                soundEffectManager.release()
            }
            
            try { mqttManager?.disconnect() } catch (_: Exception) {}
            mqttManager = null
            isInitialized = false
            _isVoiceInitialized.value = false
        }
    }
}

/**
 * 语音状态枚举
 */
enum class VoiceState {
    IDLE,                // 空闲状态
    LISTENING_WAKEUP,    // 监听唤醒词
    WAKEUP_DETECTED,     // 检测到唤醒词
    LISTENING_SPEECH,    // 监听语音
    RECORDING,           // 录制中
    ASR_START,           // 开始识别
    RECOGNIZING,         // 识别中
    PROCESSING,          // 处理中
    SPEAKING,            // 语音播放中
    ERROR                // 错误状态
}

/**
 * 合成状态枚举
 */
enum class SynthesisState {
    IDLE,          // 空闲
    SYNTHESIZING,  // 合成中
    COMPLETED,     // 完成
    ERROR          // 错误
}
