package com.smartinput.voice.application.service

import com.fasterxml.jackson.module.kotlin.jacksonObjectMapper
import com.fasterxml.jackson.module.kotlin.readValue
import jakarta.websocket.*
import jakarta.websocket.CloseReason
import org.slf4j.LoggerFactory
import java.net.URI
import java.nio.ByteBuffer
import java.util.concurrent.CountDownLatch
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.atomic.AtomicReference

@ClientEndpoint
class AliyunWebSocketClient(
    private val token: String,
    private val appKey: String,
    private val onMessage: (String) -> Unit,
    private val onError: (String) -> Unit,
    private val onClose: (Int, String) -> Unit,
    private val onRecognitionResult: ((String, Boolean) -> Unit)? = null, // 新增：异步识别结果回调
    private val onTranscriptionStarted: (() -> Unit)? = null // 新增：TranscriptionStarted事件回调
) {
    //    private var transcriptionStarted = AtomicBoolean(false)
    private val transcriptionSent = AtomicBoolean(false)
    private var sessionStarted = AtomicBoolean(false)
    private val logger = LoggerFactory.getLogger(AliyunWebSocketClient::class.java)
    private val objectMapper = jacksonObjectMapper()
    
    private var session: Session? = null
    
    private val isConnected = AtomicBoolean(false)
    private val transcriptionStarted = AtomicBoolean(false)
    private val recognitionResults = mutableListOf<RecognitionResult>()
    private val resultLatch = CountDownLatch(1)
    
    // 音频数据缓存
    private val audioDataCache = mutableListOf<ByteArray>()
    private var currentTaskId: String? = null
    private var currentMessageId: String? = null
    
    data class RecognitionResult(
        val text: String,
        val confidence: Double,
        val isFinal: Boolean
    )
    
    // 阿里云实时语音识别消息格式
    data class AliyunMessage(
        val header: AliyunHeader,
        val payload: Any? = null  // 改为Any类型，因为payload可能是字符串或对象
    )
    
    data class AliyunHeader(
        val message_id: String,
        val task_id: String,
        val namespace: String,
        val name: String,
        val appkey: String? = null, // 阿里云响应消息中可能不包含appkey
        val status: Int = 20000000,
        val status_message: String? = null,
        val status_text: String? = null // 阿里云响应消息中可能包含status_text字段
    )
    
    // 阿里云响应消息格式
    data class AliyunResponse(
        val header: AliyunHeader,
        val payload: String? = null
    )
    
    // SentenceEnd消息的payload格式
    data class SentenceEndPayload(
        val index: Int? = null,
        val time: Long? = null,
        val result: String? = null,
        val confidence: Double? = null,
        val words: List<Any>? = null,
        val status: Int? = null,
        val gender: String? = null,
        val begin_time: Long? = null,
        val fixed_result: String? = null,
        val unfixed_result: String? = null,
        val stash_result: Map<String, Any>? = null,
        val audio_extra_info: String? = null,
        val sentence_id: String? = null,
        val gender_score: Double? = null
    )
    
    data class StartTranscriptionRequest(
        val header: AliyunHeader,
        val payload: StartTranscriptionPayload
    )
    
    data class StartTranscriptionPayload(
        val format: String = "wav",
        val sample_rate: Int = 16000,
        val enable_intermediate_result: Boolean = true,
        val enable_punctuation_prediction: Boolean = true,
        val enable_inverse_text_normalization: Boolean = false,
        val enable_voice_detection: Boolean = true, // 启用语音活动检测，帮助识别有效语音
        val disfluency: Boolean = false,
        val vad_mode: String = "2" // 语音活动检测模式：0-关闭，1-严格，2-正常，3-松散
    )
    
    data class StopTranscriptionRequest(
        val header: AliyunHeader
    )
    
    data class AudioDataRequest(
        val header: AliyunHeader,
        val payload: String // Base64编码的音频数据
    )
    
    fun connect(): Boolean {
        return try {
            // 根据阿里云实时语音识别文档，正确的WebSocket连接URL格式
            // 外网访问URL格式：wss://nls-gateway-cn-shanghai.aliyuncs.com/ws/v1?token=<your token>
            val uri = URI("wss://nls-gateway-cn-shanghai.aliyuncs.com/ws/v1?token=$token")
            logger.info("连接阿里云WebSocket: $uri")
            val container = jakarta.websocket.ContainerProvider.getWebSocketContainer()
            
            // 设置连接超时
            container.defaultMaxSessionIdleTimeout = 30000
            container.defaultMaxTextMessageBufferSize = 8192
            container.defaultMaxBinaryMessageBufferSize = 8192
            
            session = container.connectToServer(this, uri)
            true
        } catch (e: Exception) {
            logger.error("连接阿里云WebSocket失败", e)
            false
        }
    }
    
    @OnOpen
    fun onOpen(session: Session) {
        this.session = session
        isConnected.set(true)
        
        // 详细的连接状态检查
        logger.info("🔗 阿里云WebSocket连接已建立")
        logger.info("   连接ID: ${session.id}")
        logger.info("   连接URI: ${session.requestURI}")
        logger.info("   协议版本: ${session.protocolVersion}")
        logger.info("   最大空闲时间: ${session.maxIdleTimeout}ms")
        logger.info("   最大二进制消息大小: ${session.maxBinaryMessageBufferSize} bytes")
        logger.info("   最大文本消息大小: ${session.maxTextMessageBufferSize} bytes")
        
        // 检查SSL连接状态
        try {
            val isSecure = session.isSecure
            logger.info("   SSL连接: ${if (isSecure) "✅ 安全连接" else "⚠️ 非安全连接"}")
        } catch (e: Exception) {
            logger.warn("无法检查SSL状态: ${e.message}")
        }
        
        logger.info("✅ WebSocket连接建立完成，准备开始语音识别")
    }
    
    @OnMessage
    fun onMessage(message: String) {
        try {
            logger.info("收到阿里云消息: $message")
            
            val aliyunMessage = objectMapper.readValue<AliyunMessage>(message)
            
            when (aliyunMessage.header.name) {
                "TranscriptionStarted" -> {
                    transcriptionStarted.set(true)
                    sessionStarted.set(true)
                    logger.info("阿里云语音识别已开始，现在可以发送音频数据")
                    // 发送缓存的音频数据
                    flushCachedAudioData()
                    // 通知上层TranscriptionStarted事件
                    onTranscriptionStarted?.invoke()
                }
                "SentenceBegin" -> {
                    logger.info("句子开始识别")
                }
                "TranscriptionResultChanged" -> {
                    handleTranscriptionResult(aliyunMessage, false)
                }
                "SentenceEnd" -> {
                    handleSentenceEnd(aliyunMessage)
                }
                "TranscriptionCompleted" -> {
                    handleTranscriptionResult(aliyunMessage, true)
                    resultLatch.countDown()
                    // 识别完成后异步关闭WebSocket连接
                    logger.info("语音识别完成，准备关闭WebSocket连接")
                    // 使用异步方式延迟关闭，确保最终结果已处理完成
                    java.util.Timer().schedule(object : java.util.TimerTask() {
                        override fun run() {
                            disconnect()
                        }
                    }, 100) // 100ms延迟
                }
                "TranscriptionFailed" -> {
                    logger.error("阿里云语音识别失败: ${aliyunMessage.payload}")
                    onError("语音识别失败: ${aliyunMessage.payload}")
                    // 识别失败后也关闭连接
                    logger.info("语音识别失败，关闭WebSocket连接")
                    java.util.Timer().schedule(object : java.util.TimerTask() {
                        override fun run() {
                            disconnect()
                        }
                    }, 50) // 50ms延迟
                }
                "TaskFailed" -> {
                    logger.error("阿里云任务失败: ${aliyunMessage.payload}")
                    onError("任务失败: ${aliyunMessage.payload}")
                    // 任务失败后也关闭连接
                    logger.info("语音识别任务失败，关闭WebSocket连接")
                    java.util.Timer().schedule(object : java.util.TimerTask() {
                        override fun run() {
                            disconnect()
                        }
                    }, 50) // 50ms延迟
                }
                "EVENT_ASR_ERROR" -> {
                    logger.error("阿里云ASR错误事件: ${aliyunMessage.payload}")
                    handleAsrError(aliyunMessage)
                    // ASR错误后关闭连接
                    java.util.Timer().schedule(object : java.util.TimerTask() {
                        override fun run() {
                            disconnect()
                        }
                    }, 50)
                }
                else -> {
                    logger.info("收到未处理的消息类型: ${aliyunMessage.header.name}, payload: ${aliyunMessage.payload}")
                }
            }
        } catch (e: Exception) {
            logger.error("处理阿里云消息失败", e)
            onError("消息处理失败: ${e.message}")
        }
    }
    
    @OnError
    fun onError(error: Throwable) {
        logger.error("阿里云WebSocket错误", error)
        onError("WebSocket错误: ${error.message}")
    }
    
    @OnClose
    fun onClose(session: Session, reason: CloseReason) {
        logger.info("阿里云WebSocket连接关闭: ${reason.closeCode}, ${reason.reasonPhrase}")
        isConnected.set(false)
        onClose(reason.closeCode.code, reason.reasonPhrase)
    }
    
    private fun handleTranscriptionResult(message: AliyunMessage, isFinal: Boolean) {
        try {
            val payload = message.payload
            if (payload != null) {
                // 将payload转换为SentenceEndPayload
                val sentenceEndPayload = when (payload) {
                    is Map<*, *> -> {
                        // 如果已经是Map，直接使用objectMapper转换
                        objectMapper.convertValue(payload, SentenceEndPayload::class.java)
                    }
                    is String -> {
                        // 如果是字符串，先解析为JSON
                        objectMapper.readValue(payload, SentenceEndPayload::class.java)
                    }
                    else -> {
                        logger.warn("未知的payload类型: ${payload::class.java}")
                        return
                    }
                }
                
                val text = sentenceEndPayload.result ?: ""
                val confidence = sentenceEndPayload.confidence ?: 0.0
                
                if (text.isNotBlank()) {
                    recognitionResults.add(RecognitionResult(text, confidence, isFinal))
                    logger.info("语音识别结果: $text (置信度: $confidence, 最终: $isFinal)")
                    
                    // 异步调用识别结果回调函数
                    onRecognitionResult?.invoke(text, isFinal)
                }
            }
        } catch (e: Exception) {
            logger.error("处理语音识别结果失败", e)
        }
    }
    
    private fun handleSentenceEnd(message: AliyunMessage) {
        try {
            val payload = message.payload
            if (payload != null) {
                // 将payload转换为SentenceEndPayload
                val sentenceEndPayload = when (payload) {
                    is Map<*, *> -> {
                        // 如果已经是Map，直接使用objectMapper转换
                        objectMapper.convertValue(payload, SentenceEndPayload::class.java)
                    }
                    is String -> {
                        // 如果是字符串，先解析为JSON
                        objectMapper.readValue(payload, SentenceEndPayload::class.java)
                    }
                    else -> {
                        logger.warn("未知的payload类型: ${payload::class.java}")
                        return
                    }
                }
                
                val text = sentenceEndPayload.result ?: ""
                val confidence = sentenceEndPayload.confidence ?: 0.0
                
                if (text.isNotBlank()) {
                    recognitionResults.add(RecognitionResult(text, confidence, true))
                    logger.info("句子结束识别结果: $text (置信度: $confidence)")
                    
                    // 异步调用识别结果回调函数
                    onRecognitionResult?.invoke(text, true)
                }
            }
        } catch (e: Exception) {
            logger.error("处理句子结束结果失败", e)
        }
    }


    fun sendStartTranscription(taskId: String, messageId: String) {
        val request = StartTranscriptionRequest(
            header = AliyunHeader(
                message_id = messageId,
                task_id = taskId,
                namespace = "SpeechTranscriber",
                name = "StartTranscription",
                appkey = appKey
            ),
            payload = StartTranscriptionPayload(
                format = "wav",
                sample_rate = 16000,
                enable_intermediate_result = true,
                enable_punctuation_prediction = true,
                enable_inverse_text_normalization = false,
                enable_voice_detection = false,
                disfluency = false
            )
        )
        
        sendMessage(request)
        logger.info("发送StartTranscription请求: taskId=$taskId, messageId=$messageId")
    }

    fun sendStopTranscription(taskId: String, messageId: String) {
        val request = StopTranscriptionRequest(
            header = AliyunHeader(
                message_id = messageId,
                task_id = taskId,
                namespace = "SpeechTranscriber",
                name = "StopTranscription",
                appkey = appKey
            )
        )
        
        sendMessage(request)
        logger.info("发送StopTranscription请求: taskId=$taskId, messageId=$messageId")
    }
    
    fun sendAudioData(taskId: String, messageId: String, audioData: ByteArray) {
        // 存储当前的任务ID和消息ID，用于缓存处理
        if (currentTaskId == null || currentMessageId == null) {
            currentTaskId = taskId
            currentMessageId = messageId
        }
        
        // 根据阿里云文档要求，确保音频数据格式正确
        // 实时语音识别要求：16000Hz、16bit、单声道PCM数据
        logger.debug("原始音频数据大小: ${audioData.size} bytes")
        
        // 🔥 音频格式处理：确保发送符合阿里云要求的PCM数据
        // 阿里云要求：16000Hz采样率、16bit采样位数、单声道PCM数据
        val audioDataStart = 44 // WAV文件头大小
        val actualAudioData = if (audioData.size > audioDataStart) {
            // 跳过WAV文件头，只发送纯PCM数据
            val pcmData = audioData.sliceArray(audioDataStart until audioData.size)
            logger.trace("音频格式处理: 原始大小=${audioData.size}bytes, 去除WAV头后=${pcmData.size}bytes")
            pcmData
        } else {
            // 如果数据小于44字节，可能已经是纯PCM数据
            logger.debug("音频数据小于44字节，可能已是PCM格式: ${audioData.size}bytes")
            audioData
        }
        
        // 验证PCM数据完整性
        if (actualAudioData.isEmpty()) {
            logger.warn("❌ PCM音频数据为空，跳过发送: taskId=$taskId, messageId=$messageId")
            return
        }
        
        // 🔥 严格验证音频格式符合阿里云要求
        // 要求：16KHz采样率、16bit采样位数、单声道PCM数据
        val expectedBytesPerSecond = 16000 * 2 // 16000采样率 * 2字节(16bit)
        val durationSeconds = actualAudioData.size.toDouble() / expectedBytesPerSecond
        
        // 验证音频数据大小是否符合采样率要求
        if (actualAudioData.size % 2 != 0) {
            logger.warn("⚠️ 音频格式警告：PCM数据大小不是偶数，可能不是16bit格式: ${actualAudioData.size}bytes")
        }
        
        logger.debug("✅ 音频格式验证: 大小=${actualAudioData.size}bytes, 预计时长=${String.format("%.2f", durationSeconds)}秒")
        logger.trace("📊 音频参数: 16KHz采样率, 16bit位数, 单声道, PCM格式")
        
        // 🔥 严格按照阿里云协议时序要求：只有在收到TranscriptionStarted消息后才能发送音频数据
        // 协议流程：1. 连接建立 → 2. 发送StartTranscription → 3. 收到TranscriptionStarted → 4. 发送Binary Frame音频数据
        if (!transcriptionStarted.get()) {
            // 缓存音频数据，等收到TranscriptionStarted消息后再发送
            synchronized(audioDataCache) {
                audioDataCache.add(actualAudioData)
                logger.warn("⚠️ 协议时序错误：尚未收到TranscriptionStarted消息，缓存音频数据等待: taskId=$taskId, messageId=$messageId")
                logger.debug("缓存详情: 缓存数量=${audioDataCache.size}, 当前数据大小=${actualAudioData.size} bytes")
            }
            return
        }
        
        // 如果已经开始识别，直接发送音频数据
        sendAudioDataInternal(actualAudioData, taskId, messageId)
    }
    
    private fun sendAudioDataInternal(audioData: ByteArray, taskId: String, messageId: String) {
        // 根据阿里云文档要求，音频数据需要按固定包大小发送
        // 支持3200字节或1600字节的包大小，确保音频数据完整性
        try {
            // 根据阿里云文档推荐，优先使用3200字节包大小
            // 如果网络环境不佳，可以降级到1600字节
            val packetSize = 3200 // 使用3200字节的包大小，获得更好的传输效率
            var offset = 0
            var packetCount = 0
            
            while (offset < audioData.size) {
                val remainingSize = audioData.size - offset
                val currentPacketSize = minOf(packetSize, remainingSize)
                
                // 创建固定大小的音频包
                val audioPacket = audioData.sliceArray(offset until offset + currentPacketSize)
                
                // 🔥 验证Binary Frame格式符合RFC 6455标准
                if (!validateBinaryFrameFormat(audioPacket)) {
                    logger.error("❌ Binary Frame格式验证失败，跳过发送")
                    continue
                }
                
                // 🔥 符合RFC 6455第5.6节Data Frames标准的Binary Frame发送
                // RFC 6455要求：Binary Frame (opcode=0x2) 直接发送应用数据，无额外协议头
                val audioBuffer = ByteBuffer.wrap(audioPacket)
                
                // 验证ByteBuffer属性符合RFC标准
                audioBuffer.position(0) // 确保从头开始读取
                audioBuffer.limit(audioPacket.size) // 确保数据长度正确
                
                // 发送Binary Frame - Jakarta WebSocket会自动处理RFC 6455帧格式
                session?.basicRemote?.sendBinary(audioBuffer)
                
                offset += currentPacketSize
                packetCount++
                
                // 记录RFC 6455 Binary Frame发送详情
                logger.debug("✅ RFC 6455 Binary Frame #$packetCount: opcode=0x2, payload=${audioPacket.size}bytes, FIN=1")
                logger.trace("📦 Payload前16字节: ${audioPacket.take(16).joinToString(" ") { "%02x".format(it) }}")
                
                // 验证发送的数据符合预期格式
                if (audioPacket.size > 65535) {
                    logger.warn("⚠️ 大型payload: ${audioPacket.size}bytes (>64KB) - 将使用8字节长度字段")
                }
            }
            
            logger.info("音频数据发送完成: taskId=$taskId, messageId=$messageId, 总大小=${audioData.size} bytes, 包数量=$packetCount")
        } catch (e: Exception) {
            logger.error("发送二进制音频数据失败", e)
            onError("发送音频数据失败: ${e.message}")
        }
    }
    
    private fun flushCachedAudioData() {
        // 发送所有缓存的音频数据
        synchronized(audioDataCache) {
            if (audioDataCache.isNotEmpty() && currentTaskId != null && currentMessageId != null) {
                logger.info("开始发送缓存的音频数据，缓存数量: ${audioDataCache.size}")
                audioDataCache.forEach { audioData ->
                    sendAudioDataInternal(audioData, currentTaskId!!, currentMessageId!!)
                }
                audioDataCache.clear()
                logger.info("缓存的音频数据发送完成")
            }
        }
    }
    
    private fun sendMessage(message: Any) {
        try {
            val jsonMessage = objectMapper.writeValueAsString(message)
            // 🔥 协议要求：控制消息使用Text Frame，音频数据使用Binary Frame
            session?.basicRemote?.sendText(jsonMessage)
            logger.debug("发送Text Frame控制消息到阿里云: $jsonMessage")
        } catch (e: Exception) {
            logger.error("发送Text Frame消息失败", e)
            onError("发送消息失败: ${e.message}")
        }
    }
    
    fun isConnected(): Boolean {
        return isConnected.get() && session?.isOpen == true
    }
    
    /**
     * 验证Binary Frame格式是否符合RFC 6455标准
     * RFC 6455第5.6节要求：
     * - Binary Frame使用opcode 0x2
     * - FIN bit设置为1（完整帧）
     * - 无扩展数据
     * - Payload直接包含应用数据
     */
    private fun validateBinaryFrameFormat(audioData: ByteArray): Boolean {
        // 基本验证
        if (audioData.isEmpty()) {
            logger.warn("❌ RFC 6455违规：Binary Frame payload不能为空")
            return false
        }
        
        // 检查payload大小限制
        when {
            audioData.size <= 125 -> {
                logger.trace("✅ RFC 6455符合：小payload (${audioData.size}<=125) - 使用7bit长度")
            }
            audioData.size <= 65535 -> {
                logger.trace("✅ RFC 6455符合：中等payload (${audioData.size}<=65535) - 使用16bit长度")
            }
            else -> {
                logger.trace("✅ RFC 6455符合：大payload (${audioData.size}>65535) - 使用64bit长度")
            }
        }
        
        return true
    }
    
    /**
     * 处理阿里云ASR错误，根据官方文档提供详细的错误分析
     */
    private fun handleAsrError(message: AliyunMessage) {
        try {
            val header = message.header
            val errorCode = header.status.toString()
            val errorMessage = header.status_message ?: "未知错误"
            
            logger.error("🚨 阿里云ASR错误详情:")
            logger.error("   错误码: $errorCode")
            logger.error("   错误信息: $errorMessage")
            logger.error("   TaskId: ${header.task_id}")
            logger.error("   MessageId: ${header.message_id}")
            
            // 根据阿里云官方文档提供具体的错误分析和建议
            val errorAnalysis = when (errorCode) {
                "40000002" -> "MESSAGE_INVALID: message_id格式错误，请确保为32位十六进制字符"
                "41010104" -> "TOO_LONG_SPEECH: 音频时长超过60秒限制，建议使用录音文件识别"
                "41050103" -> "AUDIO_DURATION_TOO_LONG: 音频时长超过12小时，请切分音频文件"
                "240062" -> "DEFAULT_NLS_ERROR: 服务端内部错误，请检查网络连接和参数配置"
                else -> "未知错误码，请参考阿里云错误码文档"
            }
            
            logger.error("📋 错误分析: $errorAnalysis")
            
            // 根据错误类型提供具体建议
            when {
                errorCode.startsWith("4000") -> {
                    logger.error("💡 建议: 检查message_id和task_id格式，确保为32位十六进制")
                }
                errorCode.startsWith("4101") -> {
                    logger.error("💡 建议: 检查音频格式和时长，确保符合16KHz/16bit/单声道要求")
                }
                errorCode.startsWith("4105") -> {
                    logger.error("💡 建议: 音频文件过大，使用ffmpeg切分音频文件")
                }
                else -> {
                    logger.error("💡 建议: 检查Token有效性、网络连接和服务配置")
                }
            }
            
            onError("ASR错误: [$errorCode] $errorMessage - $errorAnalysis")
            
        } catch (e: Exception) {
            logger.error("处理ASR错误信息失败", e)
            onError("ASR错误处理失败: ${e.message}")
        }
    }
    
    fun disconnect() {
        try {
            session?.close()
            isConnected.set(false)
            transcriptionStarted.set(false)
            clearCache()
            logger.info("阿里云WebSocket连接已断开")
        } catch (e: Exception) {
            logger.error("断开阿里云WebSocket连接失败", e)
        }
    }
    
    private fun clearCache() {
        synchronized(audioDataCache) {
            audioDataCache.clear()
            currentTaskId = null
            currentMessageId = null
            logger.debug("已清理音频数据缓存")
        }
    }
    
    fun waitForTranscriptionStarted(timeoutSeconds: Int): Boolean {
        return try {
            transcriptionStarted.get() || resultLatch.await(timeoutSeconds.toLong(), TimeUnit.SECONDS)
        } catch (e: InterruptedException) {
            logger.error("等待TranscriptionStarted超时", e)
            false
        }
    }
    
    fun waitForRecognitionResult(timeoutSeconds: Int): List<RecognitionResult> {
        return try {
            resultLatch.await(timeoutSeconds.toLong(), TimeUnit.SECONDS)
            recognitionResults.toList()
        } catch (e: InterruptedException) {
            logger.error("等待识别结果超时", e)
            emptyList()
        }
    }
    
    fun getLatestResult(): RecognitionResult? {
        return recognitionResults.lastOrNull()
    }
    
    fun clearResults() {
        recognitionResults.clear()
    }
    
    fun isSessionStarted(): Boolean {
        return sessionStarted.get()
    }
    
    fun isTranscriptionStarted(): Boolean {
        return transcriptionStarted.get()
    }
    
    fun canSendAudioData(): Boolean {
        return isConnected() && transcriptionStarted.get()
    }
    
    fun stopSession() {
        sessionStarted.set(false)
    }
} 