package com.example.voicechat

import android.content.Context
import android.content.pm.PackageManager
import androidx.core.app.ActivityCompat
import androidx.core.content.ContextCompat
import okhttp3.*
import java.util.concurrent.atomic.AtomicBoolean

/**
 * 此类封装了语音识别逻辑，可使用讯飞SDK实现真实的语音转文本功能。
 */
class VoiceRecognitionManager(private val context: Context) {

    interface VoiceRecognitionListener {
        fun onResult(result: String)
        fun onError(error: String)
    }

    private var webSocket: WebSocket? = null
    private var isRecognizing = AtomicBoolean(false)
    private var currentListener: VoiceRecognitionListener? = null
    private val audioRecorder = AudioRecorder(context)
    
    companion object {
        private const val HOST_URL = "https://iat-api.xfyun.cn/v2/iat"
        private const val APP_ID = "38c280fa"
        private const val API_KEY = "bb08bdeb17fbe15758240eeb6f72e155"
        private const val API_SECRET = "ODQxYzkwNjRmNjEzYjhjMzg0ZGE2ZTNj"
        
        private fun checkConfig(): Boolean {
            return APP_ID.isNotEmpty() && 
                   API_KEY.isNotEmpty() && 
                   API_SECRET.isNotEmpty()
        }
    }

    init {
        // 初始化工作已经移到 startVoiceRecognition 中
    }

    /**
     * 开始语音识别。
     * 收到识别结果后回调 listener.onResult(result)；如出错，则调用 listener.onError(error)
     */
    fun startVoiceRecognition(listener: VoiceRecognitionListener) {
        LogUtils.e("VoiceRecognitionManager", "开始语音识别")
        if (!checkConfig()) {
            listener.onError("请先配置讯飞 API 密钥")
            return
        }
        
        if (isRecognizing.get()) {
            LogUtils.e("VoiceRecognitionManager", "语音识别正在进行中")
            listener.onError("语音识别正在进行中")
            return
        }
        
        if (!hasRecordPermission()) {
            LogUtils.e("VoiceRecognitionManager", "没有录音权限")
            listener.onError("没有录音权限")
            return
        }
        
        currentListener = listener
        isRecognizing.set(true)
        
        try {
            // 创建 WebIATWS 实例并设置回调
            val webIatWs = object : WebIATWS() {
                override fun onMessage(webSocket: WebSocket, text: String) {
                    super.onMessage(webSocket, text)
                    // 在这里处理识别结果
                    handleRecognitionResult(text)
                }

                override fun onFailure(webSocket: WebSocket, t: Throwable, response: Response?) {
                    super.onFailure(webSocket, t, response)
                    LogUtils.e("VoiceRecognitionManager", "WebSocket失败: ${t.message}")
                    currentListener?.onError("连接失败: ${t.message}")
                    release()
                }

                override fun onClosed(webSocket: WebSocket, code: Int, reason: String) {
                    super.onClosed(webSocket, code, reason)
                    LogUtils.e("VoiceRecognitionManager", "WebSocket关闭: code=$code, reason=$reason")
                    isRecognizing.set(false)
                }
            }
            
            // 构建鉴权url
            val authUrl = WebIATWS.getAuthUrl(HOST_URL, API_KEY, API_SECRET)
            val client = OkHttpClient.Builder()
                .retryOnConnectionFailure(true)
                .build()
            
            val url = authUrl.replace("http://", "ws://").replace("https://", "wss://")
            LogUtils.e("VoiceRecognitionManager", "WebSocket URL: $url")
            val request = Request.Builder().url(url).build()
            
            val ws = client.newWebSocket(request, webIatWs)
            webSocket = ws
            
            // 等待连接建立后再开始录音
            Thread.sleep(500)  // 给连接一些建立的时间
            startRecording(ws)
        } catch (e: Exception) {
            isRecognizing.set(false)
            currentListener?.onError("初始化失败: ${e.message}")
        }
    }

    private fun startRecording(ws: WebSocket) {
        if (!hasRecordPermission()) {
            currentListener?.onError("没有录音权限")
            return
        }

        LogUtils.e("VoiceRecognitionManager", "开始录音")
        var isFirstFrame = true
        audioRecorder.startRecording { audioData ->
            if (!isRecognizing.get()) {
                LogUtils.e("VoiceRecognitionManager", "识别已停止，不再发送音频数据")
                return@startRecording
            }

            try {
                if (audioData.isEmpty()) {
                    LogUtils.e("VoiceRecognitionManager", "收到空的音频数据")
                    return@startRecording
                }

                val frame = if (isFirstFrame) {
                    isFirstFrame = false
                    LogUtils.e("VoiceRecognitionManager", "发送第一帧")
                    createFirstFrame(audioData)
                } else {
                    LogUtils.e("VoiceRecognitionManager", "发送后续帧")
                    createContinueFrame(audioData)
                }
                LogUtils.e("VoiceRecognitionManager", "发送音频数据: ${audioData.size} bytes")
                ws.send(frame)
            } catch (e: Exception) {
                LogUtils.e("VoiceRecognitionManager", "发送音频数据失败", e)
                currentListener?.onError("发送音频数据失败: ${e.message}")
                release()
            }
        }
    }
    
    private fun createFirstFrame(audioData: ByteArray): String {
        // 创建第一帧数据，包含完整的配置信息
        return WebIATWS.json.toJson(mapOf(
            "common" to mapOf("app_id" to APP_ID),
            "business" to mapOf(
                "language" to "zh_cn",
                "domain" to "iat",
                "accent" to "mandarin",
                "dwa" to "wpgs"
            ),
            "data" to mapOf(
                "status" to 0,
                "format" to "audio/L16;rate=16000",
                "encoding" to "raw",
                "audio" to android.util.Base64.encodeToString(audioData, android.util.Base64.NO_WRAP)
            )
        ))
    }
    
    private fun createContinueFrame(audioData: ByteArray): String {
        // 创建后续帧数据
        return WebIATWS.json.toJson(mapOf(
            "data" to mapOf(
                "status" to 1,
                "format" to "audio/L16;rate=16000",
                "encoding" to "raw",
                "audio" to android.util.Base64.encodeToString(audioData, android.util.Base64.NO_WRAP)
            )
        ))
    }

    private fun handleRecognitionResult(text: String) {
        LogUtils.e("VoiceRecognitionManager", "收到识别结果: $text")
        try {
            val resp = WebIATWS.json.fromJson(text, WebIATWS.ResponseData::class.java)
            LogUtils.e("VoiceRecognitionManager", "解析响应: code=${resp.code}, message=${resp.message}")
            
            if (resp.code != 0) {
                LogUtils.e("VoiceRecognitionManager", "识别错误: ${resp.message}")
                currentListener?.onError("识别错误: ${resp.message}")
                release()
                return
            }
            
            resp.data?.let { data ->
                if (data.result?.ws != null && data.result.ws.isNotEmpty()) {
                    val recognizedText = data.result.getText()
                    if (recognizedText != null && recognizedText.text.isNotEmpty()) {
                        LogUtils.e("VoiceRecognitionManager", "识别结果: ${recognizedText.text}")
                        currentListener?.onResult(recognizedText.text)
                    }
                    
                    // 只在收到最后一帧时释放资源
                    if (data.status == 2) {
                        LogUtils.e("VoiceRecognitionManager", "收到最后一帧，准备释放资源")
                        release()
                    }
                }
            }
        } catch (e: Exception) {
            LogUtils.e("VoiceRecognitionManager", "解析结果失败", e)
            currentListener?.onError("解析结果失败: ${e.message}")
            release()
        }
    }

    /**
     * 释放语音识别相关资源
     */
    fun release() {
        LogUtils.e("VoiceRecognitionManager", "开始释放资源")
        isRecognizing.set(false)
        
        try {
            // 发送最后一帧数据
            webSocket?.let { ws ->
                val lastFrame = WebIATWS.json.toJson(mapOf(
                    "data" to mapOf(
                        "status" to 2,  // 标记为最后一帧
                        "format" to "audio/L16;rate=16000",
                        "encoding" to "raw",
                        "audio" to ""  // 最后一帧不需要音频数据
                    )
                ))
                LogUtils.e("VoiceRecognitionManager", "发送最后一帧数据")
                ws.send(lastFrame)
                
                // 等待一段时间再关闭连接，给服务器处理时间
                Thread.sleep(1000)
            }
        } catch (e: Exception) {
            LogUtils.e("VoiceRecognitionManager", "发送最后一帧失败", e)
        } finally {
            audioRecorder.stopRecording()
            webSocket?.close(1000, "正常结束")
            webSocket = null
            currentListener = null
            LogUtils.e("VoiceRecognitionManager", "资源释放完成")
        }
    }

    private fun hasRecordPermission(): Boolean {
        return ContextCompat.checkSelfPermission(
            context,
            android.Manifest.permission.RECORD_AUDIO
        ) == PackageManager.PERMISSION_GRANTED
    }
} 