package com.amu.aidemo.android

import android.content.Context
import android.util.Log
import com.amu.aidemo.android.audio.AudioPlayer
import com.amu.aidemo.android.audio.AudioRecorder
import com.amu.aidemo.android.xfyun.XfyunIatClient
import com.amu.aidemo.android.xfyun.XfyunTtsClient
import com.amu.aidemo.android.zhipu.ZhipuAiClient
import com.amu.aidemo.android.zhipu.ZhipuMessage
import kotlinx.coroutines.CoroutineScope
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.Job
import kotlinx.coroutines.delay
import kotlinx.coroutines.launch
import kotlinx.coroutines.withContext
import java.nio.ByteBuffer
import java.nio.ByteOrder

private const val TAG = "ModuleTestManager"

/**
 * 模块测试管理器
 * 用于独立测试 ASR、LLM、TTS 模块
 */
class ModuleTestManager(
    private val context: Context,
    // 讯飞语音识别（IAT）密钥
    private val xfyunIatAppId: String,
    private val xfyunIatApiKey: String,
    private val xfyunIatApiSecret: String,
    // 讯飞语音合成（TTS）密钥
    private val xfyunTtsAppId: String,
    private val xfyunTtsApiKey: String,
    private val xfyunTtsApiSecret: String,
    // 智谱 AI 密钥
    private val zhipuApiKey: String
) {
    private var audioPlayer: AudioPlayer? = null
    private var audioRecorder: AudioRecorder? = null
    private var xfyunIatClient: XfyunIatClient? = null
    private var xfyunTtsClient: XfyunTtsClient? = null
    private var zhipuClient: ZhipuAiClient? = null
    private val scope = CoroutineScope(Dispatchers.Main + Job())
    // TTS 音频缓冲区
    private val ttsAudioBuffer = mutableListOf<ByteArray>()
    private var audioWriteJob: Job? = null
    private var isTtsSynthesizing = false  // TTS 合成状态标志
    
    // ASR 测试状态
    private val recognizedText = StringBuilder()
    private var isFirstFrame = true
    private var asrResultCallback: ((String) -> Unit)? = null
    private var asrTimeoutJob: Job? = null
    private val ASR_MAX_DURATION = 60_000L  // 最大录音时长：60秒
    
    init {
        // 初始化音频播放器
        audioPlayer = AudioPlayer()
        
        // 初始化智谱 AI 客户端
        zhipuClient = ZhipuAiClient(zhipuApiKey)
    }
    
    /**
     * 开始 ASR 录音（带实时回调）
     */
    fun startAsr(onRealtimeResult: ((String) -> Unit)? = null) {
        scope.launch {
            try {
                Log.d(TAG, "Starting ASR recording...")
                
                // 清理旧的客户端
                xfyunIatClient?.disconnect()
                audioRecorder?.release()
                
                recognizedText.clear()
                isFirstFrame = true
                asrResultCallback = onRealtimeResult
                
                // 初始化录音器
                audioRecorder = AudioRecorder(context).apply {
                    onAudioChunk = { audioData ->
                        // 转换 FloatArray 到 ByteArray (PCM16)
                        val byteData = floatArrayToPcm16(audioData)
                        xfyunIatClient?.sendAudio(byteData, isFirst = isFirstFrame, isLast = false)
                        isFirstFrame = false
                    }
                }
                
                // 初始化 IAT 客户端
                xfyunIatClient = XfyunIatClient(xfyunIatAppId, xfyunIatApiKey, xfyunIatApiSecret).apply {
                    this.onResult = { text: String, isFinal: Boolean ->
                        // 追加所有结果（无论是否 final）
                        recognizedText.append(text)
                        Log.d(TAG, "ASR ${if (isFinal) "final" else "partial"} result: $text, total: $recognizedText")
                        
                        // 实时回调当前完整结果
                        scope.launch {
                            asrResultCallback?.invoke(recognizedText.toString())
                        }
                    }
                    this.onError = { error: String ->
                        Log.e(TAG, "ASR error: $error")
                        audioRecorder?.stopRecording()
                    }
                }
                
                // 连接并开始录音
                xfyunIatClient?.connect()
                delay(500) // 等待连接
                audioRecorder?.startRecording()
                
                // 启动超时检测
                startAsrTimeout { 
                    Log.w(TAG, "⏰ ASR timeout, auto-stopping...")
                    stopAsr(
                        onResult = { result -> 
                            asrResultCallback?.invoke("$result\n[录音超时自动停止]")
                        },
                        onError = { }
                    )
                }
                
            } catch (e: Exception) {
                Log.e(TAG, "Failed to start ASR", e)
            }
        }
    }
    
    /**
     * 启动 ASR 超时检测
     */
    private fun startAsrTimeout(onTimeout: () -> Unit) {
        asrTimeoutJob?.cancel()
        asrTimeoutJob = scope.launch {
            delay(ASR_MAX_DURATION)
            onTimeout()
        }
    }
    
    /**
     * 取消 ASR 超时检测
     */
    private fun cancelAsrTimeout() {
        asrTimeoutJob?.cancel()
        asrTimeoutJob = null
    }
    
    /**
     * 停止 ASR 录音并获取结果
     */
    fun stopAsr(onResult: (String) -> Unit, onError: (String) -> Unit) {
        scope.launch {
            try {
                Log.d(TAG, "Stopping ASR recording...")
                
                // 取消超时检测
                cancelAsrTimeout()
                
                // 停止录音
                audioRecorder?.stopRecording()
                xfyunIatClient?.sendAudio(ByteArray(0), isFirst = false, isLast = true)
                
                // 等待结果返回
                delay(1000)
                
                val result = recognizedText.toString()
                Log.d(TAG, "ASR test completed: $result")
                
                if (result.isEmpty()) {
                    onResult("未识别到语音")
                } else {
                    onResult(result)
                }
                
            } catch (e: Exception) {
                Log.e(TAG, "Failed to stop ASR", e)
                onError("停止录音失败: ${e.message}")
            }
        }
    }
    
    /**
     * 测试 LLM（大语言模型）
     */
    fun testLlm(question: String, onResult: (String) -> Unit, onError: (String) -> Unit) {
        scope.launch {
            try {
                Log.d(TAG, "Starting LLM test with question: $question")
                
                val responseBuilder = StringBuilder()
                
                // 设置回调
                zhipuClient?.onStreamChunk = { chunk ->
                    responseBuilder.append(chunk)
                }
                
                zhipuClient?.onStreamComplete = {
                    Log.d(TAG, "LLM completed")
                    scope.launch {
                        onResult(responseBuilder.toString())
                    }
                }
                
                zhipuClient?.onError = { error ->
                    Log.e(TAG, "LLM error: $error")
                    scope.launch {
                        onError(error)
                    }
                }
                
                // 调用流式聊天
                zhipuClient?.chatCompletionStream(
                    messages = listOf(
                        ZhipuMessage(role = "system", content = com.amu.aidemo.android.config.LlmConfig.SYSTEM_PROMPT),
                        ZhipuMessage(role = "user", content = question)
                    ),
                    model = com.amu.aidemo.android.config.LlmConfig.MODEL,
                    temperature = com.amu.aidemo.android.config.LlmConfig.TEMPERATURE,
                    maxTokens = com.amu.aidemo.android.config.LlmConfig.MAX_TOKENS
                )
                
            } catch (e: Exception) {
                Log.e(TAG, "LLM test failed", e)
                onError("LLM 测试失败: ${e.message}")
            }
        }
    }
    
    /**
     * 测试 TTS（语音合成）
     */
    fun testTts(text: String, onError: (String) -> Unit) {
        scope.launch {
            try {
                Log.d(TAG, "Starting TTS test with text: $text")
                
                // 清理旧的 TTS
                xfyunTtsClient?.disconnect()
                xfyunTtsClient = null
                audioPlayer?.stop()
                
                // 取消旧的写入任务
                audioWriteJob?.cancel()
                audioWriteJob = null
                
                // 清空缓冲区
                synchronized(ttsAudioBuffer) {
                    ttsAudioBuffer.clear()
                }
                
                // 重新初始化 AudioPlayer
                withContext(Dispatchers.IO) {
                    audioPlayer?.initialize()
                    Log.d(TAG, "AudioPlayer initialized for TTS")
                }
                
                // 设置合成状态为进行中
                isTtsSynthesizing = true
                
                // 启动音频写入协程
                audioWriteJob = scope.launch(Dispatchers.IO) {
                    var lastProcessedIndex = 0
                    
                    while (isTtsSynthesizing || lastProcessedIndex < ttsAudioBuffer.size) {
                        val currentSize = synchronized(ttsAudioBuffer) { ttsAudioBuffer.size }
                        
                        while (lastProcessedIndex < currentSize) {
                            val audioData = synchronized(ttsAudioBuffer) {
                                ttsAudioBuffer[lastProcessedIndex]
                            }
                            
                            try {
                                audioPlayer?.write(audioData)
                                Log.d(TAG, "TTS audio written [${lastProcessedIndex + 1}/$currentSize]: ${audioData.size} bytes")
                            } catch (e: Exception) {
                                Log.e(TAG, "Error writing audio chunk", e)
                            }
                            
                            lastProcessedIndex++
                        }
                        
                        if (isTtsSynthesizing && lastProcessedIndex >= currentSize) {
                            delay(10)  // 等待新数据
                        }
                    }
                    
                    Log.d(TAG, "All TTS audio written, total chunks: $lastProcessedIndex")
                }
                
                // 初始化 TTS 客户端
                xfyunTtsClient = XfyunTtsClient(xfyunTtsAppId, xfyunTtsApiKey, xfyunTtsApiSecret).apply {
                    this.onAudioData = { audioData ->
                        if (audioData.isNotEmpty()) {
                            synchronized(ttsAudioBuffer) {
                                ttsAudioBuffer.add(audioData)
                            }
                            Log.d(TAG, "TTS audio data buffered: ${audioData.size} bytes")
                        }
                    }
                    this.onComplete = {
                        Log.d(TAG, "TTS synthesis completed")
                        isTtsSynthesizing = false  // 标记合成完成
                        scope.launch {
                            audioWriteJob?.join()
                            withContext(Dispatchers.IO) {
                                audioPlayer?.waitForCompletion()
                            }
                            Log.d(TAG, "TTS playback completed")
                        }
                    }
                    this.onError = { error: String ->
                        Log.e(TAG, "TTS error: $error")
                        isTtsSynthesizing = false  // 标记合成结束
                        audioWriteJob?.cancel()
                        scope.launch {
                            onError(error)
                        }
                    }
                    this.onConnected = {
                        Log.d(TAG, "TTS connected, synthesizing...")
                        synthesize(
                            text = text,
                            voiceName = com.amu.aidemo.android.config.TtsConfig.VOICE_NAME,
                            speed = com.amu.aidemo.android.config.TtsConfig.SPEED,
                            pitch = com.amu.aidemo.android.config.TtsConfig.PITCH,
                            volume = com.amu.aidemo.android.config.TtsConfig.VOLUME
                        )
                    }
                }
                
                // 连接到 TTS 服务
                xfyunTtsClient?.connect()
                
            } catch (e: Exception) {
                Log.e(TAG, "TTS test failed", e)
                audioWriteJob?.cancel()
                onError("TTS 测试失败: ${e.message}")
            }
        }
    }
    
    /**
     * 停止所有测试
     */
    fun stopAll() {
        xfyunIatClient?.disconnect()
        xfyunTtsClient?.disconnect()
        audioRecorder?.stopRecording()
        audioPlayer?.stop()
        audioWriteJob?.cancel()
    }
    
    /**
     * 释放资源
     */
    fun release() {
        stopAll()
        audioRecorder?.release()
        audioPlayer?.release()
        xfyunIatClient = null
        xfyunTtsClient = null
        zhipuClient = null
    }
    
    /**
     * Float 数组转 PCM16 字节数组
     */
    private fun floatArrayToPcm16(floatData: FloatArray): ByteArray {
        val byteBuffer = ByteBuffer.allocate(floatData.size * 2)
        byteBuffer.order(ByteOrder.LITTLE_ENDIAN)
        
        for (sample in floatData) {
            // 将 float (-1.0 到 1.0) 转换为 short (-32768 到 32767)
            val shortSample = (sample * 32767f).toInt().coerceIn(-32768, 32767).toShort()
            byteBuffer.putShort(shortSample)
        }
        
        return byteBuffer.array()
    }
}
