package com.smartinput.voice.websocket

import org.springframework.stereotype.Component
import org.springframework.web.socket.*
import org.springframework.web.socket.handler.TextWebSocketHandler
import org.slf4j.LoggerFactory
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.module.kotlin.jacksonObjectMapper
import com.fasterxml.jackson.module.kotlin.readValue
import com.smartinput.voice.application.service.AliyunWebSocketSpeechRecognitionService
import com.smartinput.websocket.SmartInputWebSocketHandler
import java.util.concurrent.ConcurrentHashMap
import java.util.*

@Component
class VoiceWebSocketHandler(
    private val aliyunWebSocketService: AliyunWebSocketSpeechRecognitionService,
    private val smartInputWebSocketHandler: SmartInputWebSocketHandler
) : TextWebSocketHandler() {
    
    private val logger = LoggerFactory.getLogger(VoiceWebSocketHandler::class.java)
    private val objectMapper = jacksonObjectMapper()
    
    // 存储手机端会话信息 (WebSocket A)
    private val mobileSessions = ConcurrentHashMap<String, MobileSessionInfo>()
    

    
    // 消息类型
    data class VoiceMessage(
        val type: String,
        val data: Map<String, Any>? = null,
        val text: String? = null,
        val confidence: Double? = null,
        val success: Boolean? = null,
        val error: String? = null,
        val deviceId: String? = null
    )
    
    // 手机端会话信息
    data class MobileSessionInfo(
        val session: WebSocketSession,
        var deviceId: String = "",
        var isRecording: Boolean = false,
        val audioBuffer: MutableList<ByteArray> = mutableListOf(),
        var traceId: String = "" // 添加traceId用于会话管理
    )
    
    override fun afterConnectionEstablished(session: WebSocketSession) {
        logger.info("手机端WebSocket连接建立: ${session.id}")
        
        // 为每个手机端会话生成唯一的traceId
        val traceId = generateTraceId()
        
        // 创建手机端会话信息
        mobileSessions[session.id] = MobileSessionInfo(
            session = session,
            traceId = traceId
        )
        
        logger.info("手机端会话创建成功: sessionId=${session.id}, traceId=$traceId")
    }
    
    override fun handleTextMessage(session: WebSocketSession, message: TextMessage) {
        try {
            val voiceMessage = objectMapper.readValue<VoiceMessage>(message.payload)
            logger.debug("收到手机端文本消息: ${voiceMessage.type}")
            
            when (voiceMessage.type) {
                "start" -> handleStartRecognition(session, voiceMessage)
                "stop" -> handleStopRecognition(session, voiceMessage)
                "select_device" -> handleSelectDevice(session, voiceMessage)
                else -> logger.warn("未知消息类型: ${voiceMessage.type}")
            }
        } catch (e: Exception) {
            logger.error("处理手机端文本消息失败", e)
            sendError(session, "消息处理失败: ${e.message}")
        }
    }
    
    override fun handleBinaryMessage(session: WebSocketSession, message: BinaryMessage) {
        try {
            val sessionInfo = mobileSessions[session.id]
            if (sessionInfo?.isRecording == true && sessionInfo.traceId.isNotBlank()) {
                // 将音频数据添加到缓冲区
                val audioData = message.payload.array()
                
                // 🔍 音频格式诊断
                if (sessionInfo.audioBuffer.isEmpty()) {
                    analyzeAudioFormat(audioData, session.id)
                }
                
                sessionInfo.audioBuffer.add(audioData)
                
                // 根据阿里云文档要求，优化音频缓冲策略
                // 确保每次发送的音频数据足够大，但不会太大导致延迟
                if (sessionInfo.audioBuffer.size >= 16) { // 约3.2秒的音频数据，适合3200字节包发送
                    sendAudioToAliyun(session, sessionInfo)
                }
            }
        } catch (e: Exception) {
            logger.error("处理手机端二进制消息失败", e)
            sendError(session, "音频数据处理失败: ${e.message}")
        }
    }
    
    override fun afterConnectionClosed(session: WebSocketSession, status: CloseStatus) {
        logger.info("手机端WebSocket连接关闭: ${session.id}, 状态: $status")
        
        // 清理会话信息
        val sessionInfo = mobileSessions.remove(session.id)
        if (sessionInfo != null) {
            // 使用traceId清理阿里云会话
            aliyunWebSocketService.cleanupSession(sessionInfo.traceId)
            logger.info("清理会话资源: sessionId=${session.id}, traceId=${sessionInfo.traceId}")
        }
    }
    
    override fun handleTransportError(session: WebSocketSession, exception: Throwable) {
        logger.error("手机端WebSocket传输错误: ${session.id}", exception)
        mobileSessions.remove(session.id)
    }
    
    private fun handleSelectDevice(session: WebSocketSession, message: VoiceMessage) {
        try {
            val deviceId = message.deviceId ?: message.data?.get("deviceId") as? String
            if (deviceId.isNullOrBlank()) {
                sendError(session, "设备ID不能为空")
                return
            }
            
            val sessionInfo = mobileSessions[session.id]
            if (sessionInfo != null) {
                sessionInfo.deviceId = deviceId
                logger.info("手机端选择设备: session=${session.id}, deviceId=$deviceId")
                sendMessage(session, VoiceMessage(
                    type = "device_selected",
                    success = true,
                    deviceId = deviceId
                ))
            }
        } catch (e: Exception) {
            logger.error("选择设备失败", e)
            sendError(session, "选择设备失败: ${e.message}")
        }
    }
    
    private fun handleStartRecognition(session: WebSocketSession, message: VoiceMessage) {
        try {
            val sessionInfo = mobileSessions[session.id]
            if (sessionInfo == null) {
                sendError(session, "会话不存在")
                return
            }
            
            if (sessionInfo.deviceId.isBlank()) {
                sendError(session, "请先选择目标设备")
                return
            }
            
            // 为当前手机端创建独立的阿里云WebSocket会话
            val success = createAliyunSession(sessionInfo.deviceId, session.id)
            if (!success) {
                sendError(session, "无法创建阿里云语音识别会话")
                return
            }
            
            sessionInfo.isRecording = true
            sessionInfo.audioBuffer.clear()

            
            logger.info("开始语音识别: session=${session.id}, deviceId=${sessionInfo.deviceId}")
            sendMessage(session, VoiceMessage(
                type = "recognition_started",
                success = true,
                deviceId = sessionInfo.deviceId
            ))
            
        } catch (e: Exception) {
            logger.error("开始语音识别失败", e)
            sendError(session, "开始语音识别失败: ${e.message}")
        }
    }
    
    private fun handleStopRecognition(session: WebSocketSession, message: VoiceMessage) {
        try {
            val sessionInfo = mobileSessions[session.id]
            if (sessionInfo != null) {
                sessionInfo.isRecording = false
                
                // 处理剩余的音频数据
                if (sessionInfo.audioBuffer.isNotEmpty()) {
                    sendAudioToAliyun(session, sessionInfo, true)
                }
                
                // 停止阿里云会话
                val traceId = sessionInfo.traceId
                if (traceId.isNotBlank()) {
                    aliyunWebSocketService.stopCurrentSession(traceId)
                    logger.info("停止阿里云会话: session=${session.id}, traceId=$traceId")
                }
                
                logger.info("停止语音识别: session=${session.id}")
                sendMessage(session, VoiceMessage(
                    type = "recognition_stopped",
                    success = true
                ))
            }
        } catch (e: Exception) {
            logger.error("停止语音识别失败", e)
            sendError(session, "停止语音识别失败: ${e.message}")
        }
    }
    
    private fun createAliyunSession(deviceId: String, mobileSessionId: String): Boolean {
        return try {
            val sessionInfo = mobileSessions[mobileSessionId]
            if (sessionInfo == null) {
                logger.error("手机端会话不存在: $mobileSessionId")
                return false
            }
            
            // 使用traceId创建阿里云会话
            val traceId = sessionInfo.traceId
            if (traceId.isBlank()) {
                logger.error("traceId为空: session=$mobileSessionId")
                return false
            }
            
            // 设置异步识别结果回调
            aliyunWebSocketService.setRecognitionResultCallback(traceId) { result ->
                // 异步处理识别结果
                if (result.success && result.text.isNotBlank()) {
                    // 重新获取最新的会话信息，避免闭包问题
                    val currentSessionInfo = mobileSessions[mobileSessionId]
                    if (currentSessionInfo != null) {
                        // 根据是否为最终结果发送不同类型的消息
                        val messageType = if (result.isFinal) "recognition_result" else "recognition_interim"
                        
                        // 发送结果到手机端
                        sendMessage(currentSessionInfo.session, VoiceMessage(
                            type = messageType,
                            success = true,
                            text = result.text,
                            confidence = result.confidence,
                            deviceId = currentSessionInfo.deviceId
                        ))
                        
                        // 发送结果到插件端（区分临时和最终结果）
                        sendToPlugin(currentSessionInfo.deviceId, result.text, result.confidence, result.isFinal)
                        
                        logger.info("异步识别结果已发送: traceId=$traceId, deviceId=${currentSessionInfo.deviceId}, text=${result.text}, isFinal=${result.isFinal}")
                    } else {
                        logger.warn("会话信息不存在，无法发送识别结果: traceId=$traceId")
                    }
                }
            }
            
            // 启动阿里云会话
            val success = aliyunWebSocketService.startSession(traceId)
            if (!success) {
                logger.error("启动阿里云会话失败: traceId=$traceId")
                return false
            }
            
            logger.info("创建阿里云会话成功: session=$mobileSessionId, traceId=$traceId, deviceId=$deviceId")
            true
            
        } catch (e: Exception) {
            logger.error("创建阿里云会话失败: session=$mobileSessionId", e)
            false
        }
    }
    
    private fun sendAudioToAliyun(session: WebSocketSession, sessionInfo: MobileSessionInfo, isFinal: Boolean = false) {
        try {
            // 检查traceId是否有效
            val traceId = sessionInfo.traceId
            if (traceId.isBlank()) {
                logger.error("traceId为空: session=${session.id}")
                sendError(session, "会话标识无效")
                return
            }
            
            // 合并音频数据
            val totalSize = sessionInfo.audioBuffer.sumOf { it.size }
            val audioData = ByteArray(totalSize)
            var offset = 0
            
            for (chunk in sessionInfo.audioBuffer) {
                System.arraycopy(chunk, 0, audioData, offset, chunk.size)
                offset += chunk.size
            }
            
            // 清空缓冲区
            sessionInfo.audioBuffer.clear()
            
            // 使用traceId异步发送音频数据到阿里云
            val result = aliyunWebSocketService.sendAudioData(traceId, audioData)
            if (!result.success) {
                logger.error("音频数据发送失败: session=${session.id}, traceId=$traceId, error=${result.error}")
                sendError(session, "音频数据发送失败: ${result.error}")
            } else {
                logger.debug("音频数据异步发送成功: session=${session.id}, traceId=$traceId, size=${audioData.size} bytes")
                // 注意：识别结果将通过异步回调返回，不在这里处理
            }
            
        } catch (e: Exception) {
            logger.error("发送音频到阿里云失败: session=${session.id}", e)
            sendError(session, "音频处理失败: ${e.message}")
        }
    }
    

    
    private fun sendToPlugin(deviceId: String, text: String, confidence: Double, isFinal: Boolean) {
        try {
            // 通过SmartInputWebSocketHandler发送到插件端
            // 统一使用sendVoiceRecognitionResult，通过isFinal标志区分临时和最终结果
            smartInputWebSocketHandler.sendVoiceRecognitionResult(deviceId, text, isFinal)
            
            if (isFinal) {
                logger.info("发送最终识别结果到插件端: deviceId=$deviceId, text=$text")
            } else {
                logger.info("发送临时识别结果到插件端: deviceId=$deviceId, text=$text")
            }
        } catch (e: Exception) {
            logger.error("发送识别结果到插件端失败: deviceId=$deviceId", e)
        }
    }
    

    

    

    

    
    // 生成TraceId
    private fun generateTraceId(): String {
        return UUID.randomUUID().toString().replace("-", "")
    }
    
    private fun sendMessage(session: WebSocketSession, message: VoiceMessage) {
        try {
            val jsonMessage = objectMapper.writeValueAsString(message)
            session.sendMessage(TextMessage(jsonMessage))
        } catch (e: Exception) {
            logger.error("发送消息到手机端失败", e)
        }
    }
    
    private fun sendError(session: WebSocketSession, error: String) {
        sendMessage(session, VoiceMessage(
            type = "error",
            success = false,
            error = error
        ))
    }
    
    /**
     * 分析音频格式，诊断是否符合阿里云要求
     */
    private fun analyzeAudioFormat(audioData: ByteArray, sessionId: String) {
        logger.info("📊 音频格式分析 - 会话: $sessionId")
        logger.info("   数据大小: ${audioData.size} bytes")
        logger.info("   前16字节: ${audioData.take(16).joinToString(" ") { "%02x".format(it) }}")
        
        // 检测音频格式
        val formatInfo = detectAudioFormat(audioData)
        logger.warn("⚠️ 检测到的音频格式: $formatInfo")
        
        // 检查是否是阿里云要求的PCM格式
        if (isLikelyPCM(audioData)) {
            logger.info("✅ 音频数据可能是PCM格式")
        } else {
            logger.error("❌ 音频数据不是PCM格式！")
            logger.error("   阿里云要求: 16KHz, 16bit, 单声道, PCM格式")
            logger.error("   当前格式: $formatInfo")
            logger.error("   建议: 手机端需要将音频转换为PCM格式再发送")
        }
    }
    
    /**
     * 检测音频格式
     */
    private fun detectAudioFormat(audioData: ByteArray): String {
        if (audioData.size < 16) return "数据太小"
        
        // 检查文件头魔数
        val header = audioData.take(12).toByteArray()
        
        return when {
            // WAV格式 (RIFF...WAVE)
            header.sliceArray(0..3).contentEquals("RIFF".toByteArray()) && 
            header.sliceArray(8..11).contentEquals("WAVE".toByteArray()) -> "WAV格式"
            
            // WebM格式 (Matroska/EBML)
            header[0] == 0x1A.toByte() && header[1] == 0x45.toByte() && 
            header[2] == 0xDF.toByte() && header[3] == 0xA3.toByte() -> "WebM格式"
            
            // MP4格式
            header.sliceArray(4..7).let { 
                it.contentEquals("ftyp".toByteArray()) || 
                it.contentEquals("mdat".toByteArray()) || 
                it.contentEquals("moov".toByteArray())
            } -> "MP4格式"
            
            // OGG格式
            header.sliceArray(0..3).contentEquals("OggS".toByteArray()) -> "OGG格式"
            
            // 可能是原始PCM数据
            isLikelyPCM(audioData) -> "可能是PCM格式"
            
            else -> "未知格式 (前4字节: ${header.take(4).joinToString(" ") { "%02x".format(it) }})"
        }
    }
    
    /**
     * 判断是否可能是PCM格式
     */
    private fun isLikelyPCM(audioData: ByteArray): Boolean {
        // PCM数据特征：
        // 1. 16bit数据，应该是2字节对齐
        // 2. 没有明显的文件头魔数
        // 3. 数据分布相对随机（音频信号特征）
        
        if (audioData.size % 2 != 0) return false // 16bit PCM应该是偶数字节
        
        // 检查是否没有明显的文件头
        val header = audioData.take(12)
        val hasFileHeader = header.toByteArray().let { h ->
            h.sliceArray(0..3).contentEquals("RIFF".toByteArray()) ||
            h[0] == 0x1A.toByte() && h[1] == 0x45.toByte() ||
            h.sliceArray(0..3).contentEquals("OggS".toByteArray())
        }
        
        return !hasFileHeader
    }
} 