/**
 * 聊天状态管理 Store
 */

import { defineStore } from 'pinia'
import { ElMessage } from 'element-plus'
import * as characterApi from '@/api/character'
import * as conversationApi from '@/api/conversation'
import * as voiceApi from '@/api/voice'
import type { UploadUserVoiceRequest, CreateVoiceTaskRequest, CreateVoiceTaskResponse } from '@/api/voice'
import * as userApi from '@/api/user'
import { useAuthStore } from './auth'
import { blobToBase64 } from '@/utils/audio'
import { VoiceTaskWebSocket, type WebSocketMessage } from '@/utils/websocket'
import { checkWebSocketStatus, getWebSocketUrl } from '@/api/websocket'
import type { Character, Message, VoicePreferences } from '@/types/chat'

export const useChatStore = defineStore('chat', {
  state: () => ({
    characters: [] as Character[],
    selectedCharacterId: null as number | null,
    messages: [] as Message[],
    currentConversationId: null as number | null,
    voiceSettings: {
      userId: 0,
      preferredCharacterType: 'friend',
      voiceSpeed: 1.0,
      voicePitch: 1.0,
      autoEmotionDetection: true,
      privacyLevel: 1,
      notificationEnabled: true
    } as VoicePreferences,
    isLoading: false,
    isRecording: false,
    isProcessing: false,
    // ========== 流式语音处理相关状态 ==========
    currentTaskId: null as string | null,
    taskProgress: 0,
    taskStatus: 'PENDING' as 'PENDING' | 'PROCESSING' | 'COMPLETED' | 'FAILED',
    currentStep: null as string | null,
    taskMessage: '',
    wsConnection: null as VoiceTaskWebSocket | null,
    pollingInterval: null as number | null,
    autoPlayAIVoice: null as { messageId: number, voiceUrl: string } | null
  }),

  getters: {
    currentCharacter: (state) => 
      state.characters.find(c => c.id === state.selectedCharacterId),
    
    unreadCounts: (state) => {
      // 计算每个角色的未读消息数
      return new Map<number, number>()
    }
  },

  actions: {
    // 获取角色列表
    async fetchCharacters() {
      try {
        console.log('开始获取角色列表...')
        const characters = await characterApi.getCharacters()
        console.log('获取到的角色列表:', characters)
        this.characters = characters
      } catch (error) {
        console.error('获取角色列表失败:', error)
        console.log('使用模拟数据作为备选方案...')
        
        // 临时使用模拟数据，以便界面能正常显示
        this.characters = [
          {
            id: 1,
            name: '小艾',
            description: '温柔体贴的心理咨询师，擅长倾听和安慰',
            personality: '温柔、耐心、善解人意',
            background: '专业心理咨询师，拥有丰富的心理治疗经验',
            avatarUrl: '',
            voiceConfig: 'female_gentle',
            characterType: 'psychologist',
            skills: '心理咨询,情绪疏导,压力缓解',
            systemPrompt: '你是一个温柔体贴的心理咨询师，善于倾听和安慰他人。',
            isActive: true,
            sortOrder: 1,
            createdAt: new Date().toISOString(),
            updatedAt: new Date().toISOString()
          },
          {
            id: 2,
            name: '小明',
            description: '活泼开朗的朋友，总是能带来快乐',
            personality: '活泼、开朗、幽默',
            background: '乐观向上的年轻人，喜欢分享快乐',
            avatarUrl: '',
            voiceConfig: 'male_cheerful',
            characterType: 'friend',
            skills: '陪伴聊天,情绪调节,幽默互动',
            systemPrompt: '你是一个活泼开朗的朋友，总是能带来快乐和正能量。',
            isActive: true,
            sortOrder: 2,
            createdAt: new Date().toISOString(),
            updatedAt: new Date().toISOString()
          },
          {
            id: 3,
            name: '智慧导师',
            description: '博学多才的人生导师，提供智慧建议',
            personality: '睿智、深沉、博学',
            background: '资深人生导师，拥有丰富的人生阅历',
            avatarUrl: '',
            voiceConfig: 'male_wise',
            characterType: 'mentor',
            skills: '人生指导,智慧分享,问题解决',
            systemPrompt: '你是一个博学多才的人生导师，能够提供智慧的人生建议。',
            isActive: true,
            sortOrder: 3,
            createdAt: new Date().toISOString(),
            updatedAt: new Date().toISOString()
          }
        ]
        
        ElMessage.warning('后端服务暂时不可用，已加载演示角色数据。请检查后端服务状态。')
      }
    },

    // 切换角色
    async switchCharacter(characterId: number) {
      this.selectedCharacterId = characterId
      this.messages = []
      this.currentConversationId = null
      await this.loadChatHistory(characterId)
    },

     // 加载聊天记录
     async loadChatHistory(characterId: number) {
       try {
         const authStore = useAuthStore()
         if (!authStore.userInfo?.id) {
           throw new Error('用户未登录')
         }
         
         const messages = await conversationApi.getChatHistory(characterId, parseInt(authStore.userInfo.id))
         
         // 处理消息数据，确保语音消息有正确的URL
         this.messages = messages.map(message => {
           // 如果是语音消息但没有voiceUrl，尝试重新生成TTS
           if (message.contentType === 'voice' && !message.voiceUrl && message.senderType === 2) {
             // AI回复的语音消息，需要重新生成TTS
             this.regenerateTTSForMessage(message)
           }
           return message
         })
         
         console.log('聊天记录加载完成:', this.messages.length, '条消息')
       } catch (error) {
         console.error('加载聊天记录失败:', error)
         ElMessage.error('加载聊天记录失败')
       }
     },

    // 发送文本消息
    async sendMessage(content: string, contentType: 'text' | 'voice' = 'text') {
      if (!this.selectedCharacterId) return
      
      try {
        this.isLoading = true
        const authStore = useAuthStore()
        
        if (!authStore.userInfo?.id) {
          throw new Error('用户未登录')
        }

        // 添加用户消息到界面
        const userMessage: Message = {
          id: Date.now(), // 临时ID
          conversationId: this.currentConversationId || 0,
          senderType: 1,
          content,
          contentType,
          createdAt: new Date().toISOString()
        }
        this.messages.push(userMessage)

        const aiReply = await conversationApi.sendMessage({
          characterId: this.selectedCharacterId,
          content,
          contentType,
          conversationId: this.currentConversationId || undefined
        }, parseInt(authStore.userInfo.id))

        // 更新用户消息ID
        const userMsgIndex = this.messages.findIndex(m => m.id === userMessage.id)
        if (userMsgIndex !== -1) {
          this.messages[userMsgIndex].id = aiReply.id - 1 // 假设AI回复ID比用户消息ID大1
        }

        this.messages.push(aiReply)

        // 更新会话ID
        if (!this.currentConversationId) {
          this.currentConversationId = aiReply.conversationId
        }

        // 如果有语音回复且开启自动播放
        if (aiReply.voiceUrl && this.voiceSettings.notificationEnabled) {
          this.playVoice(aiReply.voiceUrl)
        }

        return aiReply
      } catch (error) {
        console.error('发送消息失败:', error)
        ElMessage.error('发送消息失败')
        throw error
      } finally {
        this.isLoading = false
      }
    },

    // ========== 旧的同步语音处理功能（已注释） ==========
    // 说明：此功能已被流式语音处理替代，保留用于回退
    /*
    async sendVoiceMessage(audioBlob: Blob) {
      if (!this.selectedCharacterId) return

      try {
        this.isProcessing = true
        const authStore = useAuthStore()

        if (!authStore.userInfo?.id) {
          throw new Error('用户未登录')
        }

        console.log('开始完整录音流程，音频Blob:', audioBlob)
        console.log('音频Blob类型:', audioBlob.type)
        console.log('音频Blob大小:', audioBlob.size)
        
        // 显示处理状态
        ElMessage.info('正在处理语音，请稍候...')
        
        // 1. 转换为Base64
        const base64Audio = await blobToBase64(audioBlob)
        console.log('Base64音频数据长度:', base64Audio.length)
        
        // 2. 检测音频格式
        let audioFormat = 'wav'
        if (audioBlob.type.includes('webm')) {
          audioFormat = 'webm'
          console.warn('⚠️ 检测到WebM格式，阿里云ASR可能不支持，建议前端使用WAV或MP3格式')
        } else if (audioBlob.type.includes('mp4')) {
          audioFormat = 'mp4'
        } else if (audioBlob.type.includes('wav')) {
          audioFormat = 'wav'
        }
        
        console.log('检测到的音频格式:', audioFormat)
        console.log('音频Blob类型:', audioBlob.type)
        
        // 3. 调用完整录音流程API
        const response = await voiceApi.completeVoiceFlow({
          characterId: this.selectedCharacterId,
          userId: parseInt(authStore.userInfo.id),
          audioData: base64Audio,
          format: audioFormat,  // 使用检测到的格式
          language: 'zh',  // 修改为阿里云支持的语言代码
          enableEmotionDetection: this.voiceSettings.autoEmotionDetection,
          conversationId: this.currentConversationId || undefined
        })
        
        console.log('完整录音流程响应:', response)
        
        // 3. 清理识别文本，提取纯文本内容
        let cleanText = response.data.recognizedText || ''
        
        // 处理可能的格式化文本，如 [{text=你好，请问你能听得见吗？}]
        if (cleanText.includes('[{text=') && cleanText.includes('}]')) {
          const match = cleanText.match(/\[\{text=([^}]+)\}\]/)
          if (match && match[1]) {
            cleanText = match[1]
            console.log('提取的纯文本:', cleanText)
          }
        }
        
        // 如果文本为空，使用默认提示
        if (!cleanText || cleanText.trim() === '') {
          cleanText = '语音识别失败，请重试'
        }
        
        console.log('最终显示文本:', cleanText)
        
        // 4. 添加用户语音消息
        const userMessage: Message = {
          id: Date.now(),
          conversationId: response.data.aiReply.conversationId,
          senderType: 1,
          content: cleanText,  // 使用清理后的文本
          contentType: 'voice',
          voiceUrl: response.data.userVoiceUrl,
          emotionType: response.data.emotionType,
          emotionIntensity: response.data.emotionIntensity,
          duration: response.data.userVoiceDuration,
          createdAt: new Date().toISOString()
        }
        this.messages.push(userMessage)
        
        // 5. 添加AI回复消息
        const aiMessage: Message = {
          id: response.data.aiReply.id,
          conversationId: response.data.aiReply.conversationId,
          senderType: 2,
          content: response.data.aiReply.content,
          contentType: response.data.aiReply.contentType as 'text' | 'voice',
          voiceUrl: response.data.aiReply.voiceUrl,
          aiResponseTime: response.data.aiReply.aiResponseTime,
          createdAt: response.data.aiReply.createdAt
        }
        this.messages.push(aiMessage)

        // 更新会话ID
        if (!this.currentConversationId) {
          this.currentConversationId = response.data.aiReply.conversationId
        }

        // 6. 自动播放AI回复语音
        if (response.data.aiReply.voiceUrl && this.voiceSettings.notificationEnabled) {
          this.playVoice(response.data.aiReply.voiceUrl)
        }
        
        return aiMessage
      } catch (error) {
        console.error('发送语音消息失败:', error)
        ElMessage.error('发送语音消息失败')
        throw error
      } finally {
        this.isProcessing = false
      }
    },
    */

    // ========== 新的流式语音处理功能 ==========
    async sendVoiceMessage(audioBlob: Blob) {
      if (!this.selectedCharacterId) return

      try {
        this.isProcessing = true
        const authStore = useAuthStore()

        if (!authStore.userInfo?.id) {
          throw new Error('用户未登录')
        }

        console.log('开始流式语音处理，音频Blob:', audioBlob)
        console.log('音频Blob类型:', audioBlob.type)
        console.log('音频Blob大小:', audioBlob.size)
        
        // 立即显示进度面板和初始状态
        this.taskStatus = 'PENDING'
        this.taskProgress = 0
        this.currentStep = null
        this.taskMessage = '正在创建语音处理任务...'
        
        // 显示处理状态
        ElMessage.info('正在创建语音处理任务...')
        
        // 1. 先转换音频为Base64
        console.log('🚀 步骤1: 转换音频为Base64...')
        const base64Audio = await blobToBase64(audioBlob)
        console.log('Base64音频数据长度:', base64Audio.length)
        
        // 2. 检测音频格式
        let audioFormat = 'wav'
        if (audioBlob.type.includes('webm')) {
          audioFormat = 'webm'
          console.warn('⚠️ 检测到WebM格式，阿里云ASR可能不支持，建议前端使用WAV或MP3格式')
        } else if (audioBlob.type.includes('mp4')) {
          audioFormat = 'mp4'
        } else if (audioBlob.type.includes('wav')) {
          audioFormat = 'wav'
        }
        
        console.log('检测到的音频格式:', audioFormat)
        
        // 3. 创建语音处理任务（包含音频数据）
        const taskRequest: CreateVoiceTaskRequest = {
          audioData: base64Audio,
          format: audioFormat,
          language: 'zh',
          characterId: this.selectedCharacterId,
          userId: parseInt(authStore.userInfo.id),
          conversationId: this.currentConversationId || undefined,
          enableEmotionDetection: this.voiceSettings.autoEmotionDetection
        }
        
        console.log('🚀 步骤2: 创建语音处理任务...')
        const response = await voiceApi.createVoiceTask(taskRequest)
        console.log('语音处理任务创建成功:', response.data)
        
        // 4. 保存任务信息
        const taskData = response.data.data
        this.taskStatus = taskData.status
        this.taskProgress = taskData.progress
        this.currentStep = taskData.currentStep
        this.taskMessage = '任务已创建，开始处理...'
        
        console.log('任务ID:', taskData.taskId)
        console.log('任务状态:', this.taskStatus)
        
        if (!taskData.taskId) {
          throw new Error('任务ID为空，任务创建失败')
        }
        
        console.log('✅ 任务创建完成，等待处理结果...')
        
        return response.data
      } catch (error) {
        console.error('语音处理失败:', error)
        ElMessage.error('语音处理失败: ' + (error as Error).message)
        // 重置状态
        this.taskStatus = 'FAILED'
        this.taskMessage = '处理失败'
        this.isProcessing = false
        throw error
      }
    },

    // 建立WebSocket连接（按用户管理） - 已废弃，使用预连接方式
    // 此方法保留用于向后兼容，但实际使用registerTaskHandler
    
    // 等待WebSocket连接建立
    waitForWebSocketConnection(taskId: string): Promise<void> {
      return new Promise((resolve) => {
        let waitCount = 0
        const maxWait = 3 // 增加到3秒，确保连接建立
        
        const waitInterval = setInterval(() => {
          waitCount++
          console.log(`⏳ 等待WebSocket连接... (${waitCount}/${maxWait}秒)`)
          
          if (this.wsConnection && this.wsConnection.isConnected()) {
            clearInterval(waitInterval)
            console.log('✅ WebSocket连接已建立，停止等待')
            resolve()
            return
          }
          
          if (waitCount >= maxWait) {
            clearInterval(waitInterval)
            console.log('⏰ WebSocket连接等待超时，启动轮询备选方案')
            this.startPollingFallback(taskId)
            resolve() // 即使超时也要resolve，避免阻塞
          }
        }, 1000)
      })
    },
    
    // 停止轮询
    stopPolling() {
      if (this.pollingInterval) {
        clearInterval(this.pollingInterval)
        this.pollingInterval = null
        console.log('🛑 停止轮询')
      }
    },
    
    // 轮询备选方案
    startPollingFallback(taskId: string) {
      console.log('🔄 启动轮询备选方案')
      
      // 先停止现有轮询
      this.stopPolling()
      
      let pollCount = 0
      const maxPolls = 15 // 进一步减少到15次，总共15秒
      
      this.pollingInterval = setInterval(async () => {
        pollCount++
        console.log(`🔄 轮询任务状态 (${pollCount}/${maxPolls}):`, taskId)
        
        try {
          const response = await voiceApi.getTaskStatus(taskId)
          const taskData = response.data.data
          
          console.log('🔄 轮询结果:', taskData)
          
          // 更新任务状态
          this.taskProgress = taskData.progress
          this.taskStatus = taskData.status
          this.currentStep = taskData.currentStep
          this.taskMessage = '正在处理中...'
          
          // 如果任务完成，停止轮询
          // 检查条件：1) 状态为COMPLETED/FAILED 2) 或者状态为PENDING但currentStep为STORAGE且有完整数据
          const isTaskCompleted = taskData.status === 'COMPLETED' || taskData.status === 'FAILED' ||
            (taskData.status === 'PENDING' && taskData.currentStep === 'STORAGE' && 
             taskData.recognizedText && taskData.aiReply && taskData.voiceUrl)
          
          if (isTaskCompleted) {
            this.stopPolling()
            console.log('🔄 轮询完成，任务状态:', taskData.status)
            
            // 构造符合WebSocket消息格式的数据
            const wsData = {
              status: 'COMPLETED' as const, // 强制设为COMPLETED
              progress: 100, // 强制设为100
              currentStep: taskData.currentStep || 'STORAGE',
              message: '处理完成！',
              recognizedText: taskData.recognizedText || undefined,
              aiReply: taskData.aiReply || undefined,
              voiceUrl: taskData.voiceUrl || undefined,
              userVoiceUrl: (taskData as any).userVoiceUrl || undefined, // 使用后端提供的用户录音URL
              conversationId: (taskData as any).conversationId || undefined, // 使用后端提供的会话ID
              timestamp: new Date().toISOString()
            }
            this.handleTaskCompleted(wsData)
          }
          
        } catch (error) {
          console.error('🔄 轮询失败:', error)
        }
        
        // 达到最大轮询次数，停止轮询
        if (pollCount >= maxPolls) {
          this.stopPolling()
          console.log('🔄 轮询超时，停止轮询')
        }
      }, 1000) // 每秒轮询一次
    },

    // 处理进度更新
    handleProgressUpdate(data: WebSocketMessage['data']) {
      console.log('收到进度更新:', data)
      console.log('当前任务状态:', this.taskStatus)
      console.log('当前进度:', this.taskProgress)
      
      if (!data) {
        console.warn('⚠️ 进度更新数据为空')
        return
      }
      
      this.taskProgress = data.progress
      this.taskStatus = data.status
      this.currentStep = data.currentStep || null
      this.taskMessage = data.message || ''
      
      console.log('更新后任务状态:', this.taskStatus)
      console.log('更新后进度:', this.taskProgress)
      
      // 如果识别出文本，添加用户消息（只在ASR阶段且是最终结果时添加，避免重复）
      if (data.recognizedText && data.currentStep === 'ASR' && 
          !this.messages.find(m => m.content === data.recognizedText && m.senderType === 1)) {
        const userMessage: Message = {
          id: Date.now(),
          conversationId: this.currentConversationId || 0,
          senderType: 1,
          content: data.recognizedText,
          contentType: 'voice',
          voiceUrl: data.userVoiceUrl || undefined, // 使用后端提供的用户录音URL
          createdAt: new Date().toISOString()
        }
        this.messages.push(userMessage)
        console.log('✅ 添加用户消息:', userMessage)
      }
      
      // 注意：AI消息在任务完成时添加，不在进度更新时添加
    },

    // 处理任务完成
    handleTaskCompleted(data: WebSocketMessage['data']) {
      console.log('任务完成:', data)
      
      if (!data) {
        console.warn('⚠️ 任务完成数据为空')
        return
      }
      
      this.taskStatus = 'COMPLETED'
      this.taskProgress = 100
      this.currentStep = null  // 清理当前步骤
      this.taskMessage = '处理完成！'
      
      // 重置处理状态
      this.isProcessing = false
      
      // 更新会话ID
      if (data.conversationId) {
        this.currentConversationId = data.conversationId
      }
      
      // 确保用户消息和AI消息都已添加（只在任务完成时添加，避免重复）
      if (data.recognizedText && !this.messages.find(m => m.content === data.recognizedText && m.senderType === 1)) {
        const userMessage: Message = {
          id: Date.now(),
          conversationId: this.currentConversationId || 0,
          senderType: 1,
          content: data.recognizedText,
          contentType: 'voice',
          voiceUrl: data.userVoiceUrl || undefined,
          createdAt: new Date().toISOString()
        }
        this.messages.push(userMessage)
        console.log('✅ 添加用户消息:', userMessage)
        console.log('✅ 用户语音URL:', data.userVoiceUrl)
        // 注意：用户语音不自动播放，只显示在聊天中
      }
      
      let aiMessageId: number | null = null
      if (data.aiReply && !this.messages.find(m => m.content === data.aiReply && m.senderType === 2)) {
        const aiMessage: Message = {
          id: Date.now() + 1,
          conversationId: this.currentConversationId || 0,
          senderType: 2,
          content: data.aiReply,
          contentType: 'voice',
          voiceUrl: data.voiceUrl,
          createdAt: new Date().toISOString()
        }
        this.messages.push(aiMessage)
        aiMessageId = aiMessage.id
        console.log('✅ 添加AI消息:', aiMessage)
      }
      
      // 强制触发响应式更新
      this.messages = [...this.messages]
      
      // 自动播放AI回复语音（如果启用）
      if (data.voiceUrl && this.voiceSettings.notificationEnabled && aiMessageId) {
        console.log('🎵 准备播放AI语音:', data.voiceUrl)
        // 设置自动播放状态，让聊天区组件监听并播放
        this.autoPlayAIVoice = {
          messageId: aiMessageId,
          voiceUrl: data.voiceUrl
        }
      }
      
      ElMessage.success('语音处理完成！')
    },

    // 处理任务失败
    handleTaskFailed(data: WebSocketMessage['data']) {
      console.error('任务失败:', data)
      
      if (!data) {
        console.warn('⚠️ 任务失败数据为空')
        this.taskStatus = 'FAILED'
        this.taskMessage = '处理失败'
        ElMessage.error('语音处理失败: 未知错误')
        return
      }
      
      this.taskStatus = 'FAILED'
      this.currentStep = null  // 清理当前步骤
      this.taskMessage = data.errorMessage || '处理失败'
      
      // 重置处理状态
      this.isProcessing = false
      
      ElMessage.error('语音处理失败: ' + (data.errorMessage || '未知错误'))
    },

    // 处理任务消息 - 已废弃，统一使用通用处理器
    // handleTaskMessage(taskId: string, type: string, data: any) { ... }

    // 处理WebSocket错误
    handleWebSocketError(error: string) {
      console.error('WebSocket错误:', error)
      ElMessage.error('连接错误: ' + error)
    },

    // 取消当前任务
    async cancelCurrentTask() {
      if (this.currentTaskId) {
        try {
          await voiceApi.cancelTask(this.currentTaskId)
          this.taskStatus = 'FAILED'
          this.taskMessage = '任务已取消'
          ElMessage.info('任务已取消')
        } catch (error) {
          console.error('取消任务失败:', error)
          ElMessage.error('取消任务失败')
        }
      }
      
      // 断开WebSocket连接
      if (this.wsConnection) {
        this.wsConnection.disconnect()
        this.wsConnection = null
      }
      this.stopPolling()
    },
    
    // 初始化页面级WebSocket连接
    async initializeWebSocket() {
      const authStore = useAuthStore()
      const userId = authStore.userInfo?.id?.toString()
      
      if (!userId) {
        console.log('❌ 用户未登录，跳过WebSocket初始化')
        return
      }

      try {
        // 检查现有连接状态
        const status = await checkWebSocketStatus(userId)
        console.log('🔍 WebSocket状态检查:', status)
        
        if (status.connected) {
          console.log('✅ WebSocket已连接，获取连接信息')
          const urlInfo = await getWebSocketUrl(userId)
          console.log('🔗 WebSocket URL:', urlInfo.wsUrl)
        } else {
          console.log('❌ WebSocket未连接，预建立连接')
          // 预建立WebSocket连接
          await this.preConnectWebSocket(userId)
        }
        
        // 无论是否已连接，都确保有WebSocket连接
        if (!this.wsConnection || !this.wsConnection.isConnected()) {
          console.log('🔧 确保WebSocket连接存在')
          await this.preConnectWebSocket(userId)
        }
      } catch (error) {
        console.error('❌ 检查WebSocket状态失败:', error)
      }
    },

    // 预建立WebSocket连接
    async preConnectWebSocket(userId: string) {
      console.log('🔌 预建立WebSocket连接:', userId)
      
      this.wsConnection = new VoiceTaskWebSocket(userId, {
        onConnect: () => {
          console.log('✅ 预连接WebSocket已建立')
        },
        onDisconnect: () => {
          console.log('❌ 预连接WebSocket已断开')
        },
        onError: (error) => {
          console.error('❌ 预连接WebSocket错误:', error)
        },
        // 统一使用通用处理器处理所有消息
        onProgressUpdate: (taskId, data) => {
          console.log('📨 收到进度更新:', taskId, data)
          // 只要正在处理就处理消息
          if (this.isProcessing) {
            console.log('✅ 处理进度更新:', taskId, data)
            this.handleProgressUpdate(data)
          } else {
            console.log('⏭️ 跳过非处理中消息:', taskId, '处理中:', this.isProcessing)
          }
        },
        onTaskCompleted: (taskId, data) => {
          console.log('📨 收到任务完成:', taskId, data)
          // 只要正在处理就处理消息
          if (this.isProcessing) {
            console.log('✅ 处理任务完成:', taskId, data)
            this.handleTaskCompleted(data)
          } else {
            console.log('⏭️ 跳过非处理中消息:', taskId, '处理中:', this.isProcessing)
          }
        },
        onTaskFailed: (taskId, data) => {
          console.log('📨 收到任务失败:', taskId, data)
          // 只要正在处理就处理消息
          if (this.isProcessing) {
            console.log('✅ 处理任务失败:', taskId, data)
            this.handleTaskFailed(data)
          } else {
            console.log('⏭️ 跳过非处理中消息:', taskId, '处理中:', this.isProcessing)
          }
        }
      })
      
      this.wsConnection.connect()
      
      // 等待连接建立
      let waitCount = 0
      const maxWait = 5
      
      while (waitCount < maxWait && (!this.wsConnection || !this.wsConnection.isConnected())) {
        await new Promise(resolve => setTimeout(resolve, 1000))
        waitCount++
        console.log(`⏳ 等待预连接建立... (${waitCount}/${maxWait}秒)`)
      }
      
      if (this.wsConnection && this.wsConnection.isConnected()) {
        console.log('✅ 预连接WebSocket建立成功')
      } else {
        console.log('❌ 预连接WebSocket建立失败')
      }
    },

    // 注册任务处理器 - 已废弃，统一使用通用处理器
    // registerTaskHandler(taskId: string) { ... }

    // 断开WebSocket连接（用于页面切换等场景）
    disconnectWebSocket() {
      if (this.wsConnection) {
        console.log('🔌 主动断开WebSocket连接')
        this.wsConnection.disconnect()
        this.wsConnection = null
      }
      this.stopPolling()
    },

    // 播放语音
    async playVoice(voiceUrl: string): Promise<void> {
      console.log('🎵 开始播放语音:', voiceUrl)
      
      // 检查URL是否有效
      if (!voiceUrl || !voiceUrl.startsWith('http')) {
        console.error('❌ 无效的语音URL:', voiceUrl)
        throw new Error('语音文件URL无效')
      }
      
      return new Promise((resolve, reject) => {
        const audio = new Audio(voiceUrl)
        audio.playbackRate = this.voiceSettings.voiceSpeed
        
        // 添加时间更新监听
        audio.addEventListener('timeupdate', () => {
          console.log('播放进度:', audio.currentTime, '/', audio.duration)
        })
        
        audio.addEventListener('loadeddata', () => {
          console.log('✅ 语音文件加载完成，时长:', audio.duration, '秒')
        })
        
        audio.addEventListener('canplay', () => {
          console.log('✅ 语音可以播放')
        })
        
        audio.addEventListener('error', (event) => {
          console.error('❌ 语音加载错误:', event)
          console.error('❌ 错误详情:', {
            error: audio.error,
            networkState: audio.networkState,
            readyState: audio.readyState,
            src: audio.src
          })
          reject(new Error('语音文件加载失败'))
        })
        
        audio.play().then(() => {
          console.log('✅ 语音播放开始')
          resolve()
        }).catch(error => {
          console.error('❌ 播放语音失败:', error)
          console.error('❌ 错误类型:', error.name)
          console.error('❌ 错误消息:', error.message)
          reject(error)
        })
      })
    },

    // 更新语音设置
    async updateVoiceSettings(settings: Partial<VoicePreferences>) {
      this.voiceSettings = { ...this.voiceSettings, ...settings }

      try {
        const authStore = useAuthStore()
        if (!authStore.userInfo?.id) return

        await userApi.updateVoicePreferences(parseInt(authStore.userInfo.id), this.voiceSettings)
      } catch (error) {
        console.error('更新语音设置失败:', error)
        ElMessage.error('更新语音设置失败')
      }
    },

    // 初始化语音设置
    async initVoiceSettings() {
      try {
        const authStore = useAuthStore()
        if (!authStore.userInfo?.id) return

        const preferences = await userApi.getVoicePreferences(parseInt(authStore.userInfo.id))
        this.voiceSettings = preferences
      } catch (error) {
        console.error('获取语音设置失败:', error)
        // 使用默认设置
      }
    },

    // 设置录音状态
    setRecordingState(isRecording: boolean) {
      this.isRecording = isRecording
    },

     // 设置处理状态
     setProcessingState(isProcessing: boolean) {
       this.isProcessing = isProcessing
     },

     // 为消息重新生成TTS
     async regenerateTTSForMessage(message: Message) {
       if (!message.content || message.senderType !== 2) return
       
       try {
         const ttsResult = await voiceApi.textToSpeech({
           text: message.content,
           voiceType: 'female',
           speed: this.voiceSettings.voiceSpeed,
           pitch: this.voiceSettings.voicePitch,
           emotion: message.emotionType || 'neutral',
           format: 'wav'
         })
         
         // 更新消息的语音URL
         const messageIndex = this.messages.findIndex(m => m.id === message.id)
         if (messageIndex !== -1) {
           this.messages[messageIndex].voiceUrl = ttsResult.audioUrl
           this.messages[messageIndex].duration = ttsResult.duration
         }
         
         console.log('TTS重新生成成功:', ttsResult.audioUrl)
       } catch (error) {
         console.error('TTS重新生成失败:', error)
       }
     }
   }
 })

