<script setup>
import { ref, onMounted, watch, nextTick, onBeforeUnmount } from 'vue'
import { useRoute } from 'vue-router'

const route = useRoute()
const roleId = ref(route.params.roleId)
const routeConversationId = ref(route.query.conversationId ? Number(route.query.conversationId) : null)

// 调试日志
console.log('Chat.vue 初始化 - route.params:', route.params)
console.log('Chat.vue 初始化 - roleId:', roleId.value)
console.log('Chat.vue 初始化 - routeConversationId:', routeConversationId.value)
// 后端接口用的用户ID（登录后从本地读取）
const apiUserId = ref(null)
// 信令用的前端临时用户ID（字符串），用于点对点呼叫
const signalingUserId = ref(localStorage.getItem('tmp_user_id') || '')
if (!signalingUserId.value) {
  signalingUserId.value = `u_${Math.random().toString(36).slice(2, 10)}`
  localStorage.setItem('tmp_user_id', signalingUserId.value)
}

const conversationId = ref(null)
const messages = ref([])
const chatBoxRef = ref(null)
const conversations = ref([])
const convPageNo = ref(1)
const convPageSize = 10
const convHasMore = ref(true)
const convLoading = ref(false)
const currentRole = ref(null)
const sidebarRef = ref(null)
const content = ref('')
const loading = ref(false)

// 录音（短语音非流式）
let mediaRecorder = null
const isRecording = ref(false)
let recordChunks = []
const wsStatus = ref('disconnected')
let voiceWs = null
const ttsQueue = []
let playing = false
let currentAudio = null

// TTS队列播放相关变量
let ttsPlaying = false
let ttsCurrentAudio = null
let ttsAudioStarted = false // 防止重复播放
let wsReconnectTimer = null
const voiceReady = ref(false)
const asrPending = ref(false)
const ttsPending = ref(false)
const muted = ref(false)

// 语音通话相关状态
const callState = ref('idle') // idle, calling, ringing, connected, ended
const currentCallId = ref(null)
const isInCall = ref(false)
const callDuration = ref(0)
const isVoiceActive = ref(false)
let callTimer = null

// WebRTC相关变量
let pc = null // RTCPeerConnection
const localStream = ref(null)
const remoteAudio = ref(null)

// VAD相关变量
let audioContext = null
let analyser = null
let microphone = null
let vadProcessor = null
const vadState = ref('idle') // idle, listening, speaking, processing
const speechThreshold = 0.025 // 语音检测阈值（提高敏感度）
const silenceDuration = 2000 // 静音持续时间（毫秒）- 适中等待时间
const minSpeechDuration = 800 // 最小语音持续时间（毫秒）- 降低最小时长
let speechStartTime = null
let silenceStartTime = null
let audioBuffer = []
let isVadRecording = false
let consecutiveSpeechFrames = 0 // 连续语音帧计数
let consecutiveSilenceFrames = 0 // 连续静音帧计数
const minConsecutiveSpeechFrames = 1 // 最少连续语音帧数（进一步降低要求）
const maxConsecutiveSilenceFrames = 5 // 最多连续静音帧数（进一步降低要求）

// 全双工通信相关变量
const audioQueue = ref([]) // 音频播放队列
const currentAudioStream = ref(null) // 当前播放的音频流
const isPlayingAudio = ref(false) // 是否正在播放音频
const audioInterruptEnabled = ref(true) // 是否允许音频中断
const duplexState = ref('idle') // idle, user_speaking, ai_responding, both_active
let audioContextDuplex = null // 全双工音频上下文
let audioMixer = null // 音频混合器
let userAudioGain = null // 用户音频增益控制
let aiAudioGain = null // AI音频增益控制

// 流式LLM相关状态
const streamingText = ref('')
const isStreamingLLM = ref(false)

// LLM和TTS状态控制
const isLLMStreaming = ref(false)
const isTTSStreaming = ref(false)
const shouldStopLLM = ref(false)

// 音色选择相关
const voiceList = ref([])
const selectedVoice = ref('qiniu_zh_male_hlsnkk') // 默认音色
const showVoiceSelector = ref(false)

// 用户头像相关
const userAvatar = ref('')
const userName = ref('')

// 消息播放状态管理
const messagePlayStates = ref(new Map()) // 存储每个消息的播放状态

// 通话质量监控
const callQuality = ref({
  connectionState: 'new',
  iceConnectionState: 'new',
  iceGatheringState: 'new',
  audioLevel: 0,
  packetLoss: 0,
  latency: 0
})
let qualityMonitorInterval = null

const authHeaders = () => {
  const token = localStorage.getItem('auth_token') || ''
  return token ? { 'Authorization': `Bearer ${token}` } : {}
}

const initVoiceWs = () => {
  const wsBase = location.origin.replace('http', 'ws')
  voiceWs = new WebSocket(`${wsBase.replace(':5173', ':8080')}/ws/voice`)
  voiceWs.onopen = () => {
    wsStatus.value = 'connected'
    voiceReady.value = true
    // 心跳
    try { voiceWs.send(JSON.stringify({ type: 'ping' })) } catch {}
  }
  voiceWs.onerror = () => { wsStatus.value = 'error' }
  voiceWs.onclose = () => {
    wsStatus.value = 'closed'
    voiceReady.value = false
    // 退避重连
    if (wsReconnectTimer) clearTimeout(wsReconnectTimer)
    wsReconnectTimer = setTimeout(() => initVoiceWs(), 1000)
  }
  voiceWs.onmessage = async (ev) => {
    const msg = JSON.parse(ev.data)
    if (msg.type === 'asr_final') {
      // 将识别文本作为我方消息显示（只有当文本不为空时）
      if (msg.text && msg.text.trim()) {
        // 显示原始用户消息（不包含简化提示）
        messages.value = messages.value.concat([{ 
          id: 'tmp_'+Date.now(), 
          isUserMessage: 1, 
          content: msg.text, 
          audioUrl: null 
        }])
        
        // 如果是语音通话，在发送给LLM前临时添加简化提示
        if (isInCall.value) {
          console.log('语音通话模式：将在发送给LLM时添加简化提示')
        }
      }
      asrPending.value = false
    } else if (msg.type === 'asr_start') {
      // 流式ASR开始
      console.log('ASR开始识别:', msg.content)
      asrPending.value = true
    } else if (msg.type === 'asr_error') {
      // 流式ASR错误
      console.error('ASR识别错误:', msg.content)
      asrPending.value = false
    } else if (msg.type === 'llm_text') {
      // 将 AI 文本加入消息（回退模式，只有当文本不为空时）
      if (msg.text && msg.text.trim()) {
        messages.value = messages.value.concat([{ id: 'tmp_ai_'+Date.now(), isUserMessage: 0, content: msg.text, audioUrl: null }])
      }
      isStreamingLLM.value = false
      ttsPending.value = true
    } else if (msg.type === 'llm_start') {
      // 流式LLM开始
      console.log('LLM开始生成:', msg.content)
      streamingText.value = ''
      isStreamingLLM.value = true
      isLLMStreaming.value = true
      shouldStopLLM.value = false
      ttsPending.value = true
    } else if (msg.type === 'llm_chunk') {
      // 流式LLM文本块
      console.log('LLM文本块:', msg.content)
      
      // 检查是否被用户打断
      if (shouldStopLLM.value) {
        console.log('LLM流式生成已被用户打断，忽略此文本块')
        return
      }
      
      streamingText.value += msg.content
      // 实时更新最后一条消息的内容（只有当文本不为空时）
      if (streamingText.value && streamingText.value.trim()) {
        if (messages.value.length > 0 && !messages.value[messages.value.length - 1].isUserMessage) {
          messages.value[messages.value.length - 1].content = streamingText.value
        } else {
          // 创建新的AI消息
          messages.value = messages.value.concat([{ 
            id: 'streaming_ai_'+Date.now(), 
            isUserMessage: 0, 
            content: streamingText.value,
            audioUrl: null
          }])
        }
      }
    } else if (msg.type === 'llm_end') {
      // 流式LLM结束
      console.log('LLM生成完成:', msg.content)
      
      // 检查是否被用户打断
      if (shouldStopLLM.value) {
        console.log('LLM生成已被用户打断，不进行TTS处理')
        isStreamingLLM.value = false
        isLLMStreaming.value = false
        shouldStopLLM.value = false
        streamingText.value = ''
        return
      }
      
      isStreamingLLM.value = false
      isLLMStreaming.value = false
      ttsPending.value = true // 开始TTS处理
      // 确保最后一条消息内容完整（只有当文本不为空时）
      if (streamingText.value && streamingText.value.trim()) {
        if (messages.value.length > 0 && !messages.value[messages.value.length - 1].isUserMessage) {
          messages.value[messages.value.length - 1].content = streamingText.value
        }
      }
      
      // 发送完整文本到后端进行TTS处理
      if (voiceWs && voiceWs.readyState === WebSocket.OPEN) {
        const cleanedText = cleanTextForTTS(msg.content)
        voiceWs.send(JSON.stringify({
          type: 'tts_request',
          text: cleanedText,
          conversationId: conversationId.value,
          voiceType: selectedVoice.value || '' // 如果没有选择音色，传递空字符串让后端使用角色默认音色
        }))
      }
    // 移除自动TTS请求处理，文本消息不再自动播放音频
    } else if (msg.type === 'llm_error') {
      // 流式LLM错误
      console.error('LLM生成错误:', msg.content)
      isStreamingLLM.value = false
      ttsPending.value = false
      streamingText.value = ''
    } else if (msg.type === 'tts_blob') {
      if (msg.payload) {
        // 使用统一的音频队列处理TTS音频
        console.log('收到TTS音频块，大小:', msg.payload.length)
        if (!muted.value) {
          // 将Base64音频数据转换为Blob URL并添加到播放队列
          try {
            const binaryString = atob(msg.payload)
            const bytes = new Uint8Array(binaryString.length)
            for (let i = 0; i < binaryString.length; i++) {
              bytes[i] = binaryString.charCodeAt(i)
            }
            const blob = new Blob([bytes], { type: 'audio/mp3' })
            const url = URL.createObjectURL(blob)
            
            // 检查是否是手动TTS请求的响应
            if (msg.messageId) {
              console.log('收到手动TTS响应，messageId:', msg.messageId)
              // 直接播放这个音频，不加入队列
              const messageKey = msg.messageId
              const playState = messagePlayStates.value.get(messageKey)
              
              if (playState) {
                // 如果是第一个音频，清除等待状态并开始播放
                if (playState.waitingForTTS) {
                  playState.waitingForTTS = false
                  playState.playing = true
                  
                  // 停止其他音频播放
                  stopCurrentAudio()
                  
                  // 清除加载状态
                  const messageIndex = messages.value.findIndex(m => m.id === msg.messageId)
                  if (messageIndex !== -1) {
                    const content = messages.value[messageIndex].content
                    if (content.endsWith(' 🔊')) {
                      messages.value[messageIndex].content = content.slice(0, -2)
                    }
                  }
                }
                
                // 创建新的音频对象并添加到播放队列
                const audio = new Audio(url)
                if (!playState.audioQueue) {
                  playState.audioQueue = []
                }
                playState.audioQueue.push(audio)
                
                // 如果当前没有播放，开始播放
                if (!playState.currentAudio) {
                  playNextManualTTS(messageKey)
                }
                
                return
              }
            }
            
            // 只有在语音通话状态时才添加到播放队列并自动播放
            if (isInCall.value) {
              // 添加到TTS播放队列（流式TTS）
              ttsQueue.push(url)
              console.log(`TTS音频已加入播放队列，队列长度: ${ttsQueue.length}`)
              
              // 如果当前没有播放，开始播放
              if (!ttsPlaying) {
                console.log('启动TTS播放进程')
                playNext()
              } else {
                console.log('TTS正在播放中，音频已加入队列')
              }
              
              duplexState.value = 'ai_responding'
            } else {
              console.log('非语音通话状态，TTS音频不自动播放')
            }
          } catch (error) {
            console.error('处理TTS音频块失败:', error)
          }
        }
        ttsPending.value = false
      }
    } else if (msg.type === 'tts_start') {
      // 流式TTS开始
      console.log('TTS开始合成:', msg.content)
      ttsPending.value = true
      isTTSStreaming.value = true
      duplexState.value = 'ai_responding'
    } else if (msg.type === 'tts_error') {
      // 流式TTS错误
      console.error('TTS合成错误:', msg.content)
      ttsPending.value = false
      if (duplexState.value === 'ai_responding') {
        duplexState.value = 'idle'
      }
    } else if (msg.type === 'call_ringing') {
      // 收到振铃通知 - 自动接听
      callState.value = 'ringing'
      currentCallId.value = msg.callId
      console.log('收到振铃通知，自动接听:', msg.callId)
      
      // 自动接听通话
      await answerCall()
    } else if (msg.type === 'call_connected') {
      // 通话已连接
      callState.value = 'connected'
      isInCall.value = true
      startCallTimer()
      console.log('通话已连接:', msg.callId)
      
      // 开始通话录音
      console.log('准备开始通话录音...')
      setTimeout(() => {
        console.log('开始启动录音功能...')
        startCallRecording()
      }, 1000) // 延迟1秒开始录音，确保连接稳定
      
      // 如果有Answer，处理它（只有在正确的信令状态下）
      if (msg.answer && pc && pc.signalingState === 'have-local-offer') {
        try {
          await pc.setRemoteDescription(new RTCSessionDescription(msg.answer))
          console.log('设置远程Answer')
        } catch (error) {
          console.error('设置Answer失败:', error)
        }
      } else if (msg.answer) {
        console.log('跳过Answer设置，当前信令状态:', pc?.signalingState)
      }
    } else if (msg.type === 'call_rejected') {
      // 通话被拒绝
      console.log('通话被拒绝:', msg.callId)
      
      // 立即停止AI播放和生成
      stopAIPlayback('通话被拒绝')
      
      callState.value = 'idle'
      isInCall.value = false
      currentCallId.value = null
      stopCallTimer()
      console.log('通话被拒绝处理完成')
    } else if (msg.type === 'call_ended') {
      // 通话结束
      console.log('通话结束:', msg.reason)
      
      // 立即停止AI播放和生成
      stopAIPlayback('通话结束')
      
      callState.value = 'idle'
      isInCall.value = false
      currentCallId.value = null
      stopCallTimer()
      console.log('通话结束处理完成')
    } else if (msg.type === 'call_error') {
      // 通话错误
      console.error('通话错误:', msg.message)
      
      // 立即停止AI播放和生成
      stopAIPlayback('通话错误')
      
      callState.value = 'idle'
      isInCall.value = false
      currentCallId.value = null
      stopCallTimer()
      console.log('通话错误处理完成')
    } else if (msg.type === 'offer') {
      // 收到Offer
      try {
        await ensurePeer()
        if (pc.signalingState === 'stable') {
          await pc.setRemoteDescription(new RTCSessionDescription(msg.offer))
      const answer = await pc.createAnswer()
      await pc.setLocalDescription(answer)
          
          voiceWs?.send(JSON.stringify({
            type: 'answer',
            callId: msg.callId,
            answer: answer
          }))
          
          console.log('处理Offer并发送Answer')
        } else {
          console.log('跳过Offer处理，当前信令状态:', pc.signalingState)
        }
      } catch (error) {
        console.error('处理Offer失败:', error)
      }
    } else if (msg.type === 'answer') {
      // 收到Answer
      try {
      if (pc && pc.signalingState === 'have-local-offer') {
          await pc.setRemoteDescription(new RTCSessionDescription(msg.answer))
          console.log('设置远程Answer')
      } else {
          console.log('跳过Answer设置，当前信令状态:', pc?.signalingState)
        }
      } catch (error) {
        console.error('设置Answer失败:', error)
      }
    } else if (msg.type === 'ice_candidate') {
      // 收到ICE候选
      try {
        if (msg.candidate) {
          await pc.addIceCandidate(new RTCIceCandidate(msg.candidate))
          console.log('添加ICE候选')
        }
      } catch (error) {
        console.error('添加ICE候选失败:', error)
      }
    } else if (msg.type === 'user_message') {
      // 收到用户语音转文本消息，立即显示到聊天框（只有当内容不为空时）
      console.log('收到用户语音消息:', msg.content)
      if (msg.content && msg.content.trim()) {
        // 显示原始用户消息（不包含简化提示）
        const userMessage = {
          id: Date.now(),
          content: msg.content,
          isUserMessage: true,
          timestamp: new Date().toLocaleTimeString(),
          type: 'text',
          audioUrl: null
        }
        messages.value.push(userMessage)
        
        // 如果是语音通话，在发送给LLM前临时添加简化提示
        if (isInCall.value) {
          console.log('语音通话模式：将在发送给LLM时添加简化提示')
        }
        console.log('用户消息已添加到聊天框:', userMessage)
      }
    } else if (msg.type === 'ai_message') {
      // 收到AI消息（开场白等），立即显示到聊天框
      console.log('收到AI消息:', msg.content)
      if (msg.content && msg.content.trim()) {
        const aiMessage = {
          id: Date.now(),
          content: msg.content,
          isUserMessage: false,
          timestamp: new Date().toLocaleTimeString(),
          type: 'text',
          audioUrl: null
        }
        messages.value.push(aiMessage)
        console.log('AI消息已添加到聊天框:', aiMessage)
      }
    } else if (msg.type === 'audio_url_update') {
      // 收到音频URL更新通知，更新对应消息的audioUrl字段
      console.log('收到音频URL更新:', msg.messageId, msg.audioUrl)
      const messageIndex = messages.value.findIndex(m => m.id === msg.messageId)
      if (messageIndex !== -1) {
        messages.value[messageIndex].audioUrl = msg.audioUrl
        console.log('消息音频URL已更新:', messages.value[messageIndex])
      }
    }
    await nextTick()
    if (chatBoxRef.value) chatBoxRef.value.scrollTop = chatBoxRef.value.scrollHeight
  }
}

const ensureVoiceWsOpen = async () => {
  if (voiceWs && voiceWs.readyState === 1) return
  if (!voiceWs || voiceWs.readyState > 1) initVoiceWs()
  // 等待最多 3 秒
  const start = Date.now()
  while (Date.now() - start < 3000) {
    if (voiceWs && voiceWs.readyState === 1) return
    await new Promise(r => setTimeout(r, 100))
  }
}

const ensurePeer = async () => {
  if (pc) return pc
  
  // 改进的ICE服务器配置
  const iceServers = [
    { urls: 'stun:stun.l.google.com:19302' },
    { urls: 'stun:stun1.l.google.com:19302' },
    { urls: 'stun:stun2.l.google.com:19302' },
    { urls: 'stun:stun3.l.google.com:19302' },
    { urls: 'stun:stun4.l.google.com:19302' },
    // 如果有TURN服务器，可以添加：
    // { urls: 'turn:your-turn-server.com:3478', username: 'user', credential: 'pass' }
  ]
  
  pc = new RTCPeerConnection({ 
    iceServers: iceServers,
    iceCandidatePoolSize: 10,
    bundlePolicy: 'max-bundle',
    rtcpMuxPolicy: 'require'
  })
  
  // 获取用户媒体流
  try {
  localStream.value = await navigator.mediaDevices.getUserMedia({
    audio: {
      echoCancellation: true,
      noiseSuppression: true,
        autoGainControl: true,
        sampleRate: 48000,
        channelCount: 1
      }
    })
    
    // 添加音频轨道到PeerConnection
    localStream.value.getTracks().forEach(track => {
      pc.addTrack(track, localStream.value)
    })
    
    console.log('本地音频流已获取并添加到PeerConnection')
  } catch (error) {
    console.error('获取用户媒体失败:', error)
    throw error
  }
  
  // 处理远程音频流
  pc.ontrack = (event) => {
    console.log('收到远程音频流')
    if (!remoteAudio.value) return
    
    const [remoteStream] = event.streams
    remoteAudio.value.srcObject = remoteStream
    remoteAudio.value.muted = false
    remoteAudio.value.autoplay = true
    remoteAudio.value.playsInline = true
    remoteAudio.value.volume = 1.0
    
    const playPromise = remoteAudio.value.play()
    if (playPromise && playPromise.catch) {
      playPromise.catch(error => {
        console.warn('自动播放被阻止:', error)
      })
    }
  }
  
  // 注意：录音将在连接状态变化时自动开始
  
  // 处理ICE候选
  pc.onicecandidate = (event) => {
    if (event.candidate) {
      console.log('发送ICE候选:', event.candidate)
      if (voiceWs && voiceWs.readyState === WebSocket.OPEN) {
        voiceWs.send(JSON.stringify({
          type: 'ice_candidate',
          candidate: event.candidate,
          callId: currentCallId.value
        }))
      }
    }
  }
  
  // 连接状态监控
  pc.onconnectionstatechange = () => {
    console.log('连接状态变化:', pc.connectionState)
    callQuality.value.connectionState = pc.connectionState
    
    if (pc.connectionState === 'connected') {
      console.log('WebRTC连接已建立，开始通话录音')
      callState.value = 'connected'
      startCallTimer()
      startQualityMonitoring()
      // 开始通话录音
      setTimeout(() => {
        startCallRecording()
      }, 1000) // 延迟1秒开始录音，确保连接稳定
    } else if (pc.connectionState === 'disconnected' || pc.connectionState === 'failed') {
      console.log('WebRTC连接已断开，停止通话录音')
      callState.value = 'idle'
      stopCallTimer()
      stopQualityMonitoring()
      // 停止通话录音
      stopCallRecording()
    }
  }
  
  // ICE连接状态监控
  pc.oniceconnectionstatechange = () => {
    console.log('ICE连接状态:', pc.iceConnectionState)
    callQuality.value.iceConnectionState = pc.iceConnectionState
  }
  
  // ICE收集状态监控
  pc.onicegatheringstatechange = () => {
    console.log('ICE收集状态:', pc.iceGatheringState)
    callQuality.value.iceGatheringState = pc.iceGatheringState
  }
  
  // 信令状态监控
  pc.onsignalingstatechange = () => {
    console.log('信令状态变化:', pc.signalingState)
  }
  
  return pc
}

const stopCurrentAudio = () => {
  if (currentAudio) {
    currentAudio.pause()
    currentAudio.currentTime = 0
    currentAudio = null
  }
  playing = false
  
  // 清除所有播放状态
  messagePlayStates.value.clear()
}

const playNext = () => {
  if (ttsPlaying) {
    console.log('TTS正在播放中，跳过重复调用')
    return
  }
  
  // 检查是否被用户打断
  if (shouldStopLLM.value) {
    console.log('TTS播放被用户打断，停止播放')
    // 清空队列并释放所有URL
    while (ttsQueue.length > 0) {
      const url = ttsQueue.shift()
      URL.revokeObjectURL(url)
    }
    duplexState.value = 'idle'
    return
  }
  
  const url = ttsQueue.shift()
  if (!url) {
    console.log('TTS播放队列为空，播放完成')
    duplexState.value = 'idle'
    return
  }
  
  console.log('开始播放TTS音频，队列剩余:', ttsQueue.length)
  
  // 停止当前TTS播放的音频
  if (ttsCurrentAudio) {
    ttsCurrentAudio.pause()
    ttsCurrentAudio.currentTime = 0
    ttsCurrentAudio = null
  }
  
  ttsPlaying = true
  ttsAudioStarted = false
  ttsCurrentAudio = new Audio(url)
  
  // 确保音频加载完成后再播放
  ttsCurrentAudio.oncanplaythrough = () => {
    // 再次检查是否被用户打断
    if (shouldStopLLM.value) {
      console.log('TTS音频加载完成但被用户打断，停止播放')
      ttsPlaying = false
      ttsAudioStarted = false
      ttsCurrentAudio = null
      URL.revokeObjectURL(url)
      return
    }
    
    if (ttsCurrentAudio && ttsPlaying && !ttsAudioStarted) {
      ttsAudioStarted = true
      console.log('开始播放TTS音频')
      ttsCurrentAudio.play().catch(e => {
        console.warn('TTS音频播放失败:', e)
        ttsPlaying = false
        ttsAudioStarted = false
        ttsCurrentAudio = null
        URL.revokeObjectURL(url)
        // 继续播放下一个
        setTimeout(() => playNext(), 100)
      })
    }
  }
  
  ttsCurrentAudio.onended = () => { 
    if (!ttsPlaying) return // 防止重复处理
    ttsPlaying = false
    ttsAudioStarted = false
    ttsCurrentAudio = null
    URL.revokeObjectURL(url)
    console.log('TTS音频播放完成，播放下一个')
    // 继续播放下一个
    setTimeout(() => playNext(), 100)
  }
  
  ttsCurrentAudio.onerror = () => { 
    if (!ttsPlaying) return // 防止重复处理
    ttsPlaying = false
    ttsAudioStarted = false
    ttsCurrentAudio = null
    URL.revokeObjectURL(url)
    console.error('TTS音频播放错误，播放下一个')
    // 继续播放下一个
    setTimeout(() => playNext(), 100)
  }
}

const startRecord = async () => {
  if (isRecording.value) return
  let stream
  try {
    stream = await navigator.mediaDevices.getUserMedia({ audio: true })
  } catch (e) {
    console.error('getUserMedia error', e)
    alert('无法访问麦克风，请检查浏览器权限（建议 Chrome/Edge）')
    return
  }
  const options = {}
  // 强制使用WAV格式，提高ASR兼容性
  if (window.MediaRecorder) {
    if (MediaRecorder.isTypeSupported('audio/wav')) {
      options.mimeType = 'audio/wav'
    } else {
      console.warn('浏览器不支持audio/wav，使用默认格式')
    }
  }
  let mr
  try {
    mr = new MediaRecorder(stream, options)
  } catch (e) {
    console.error('MediaRecorder init error', e)
    alert('当前浏览器不支持录音，请更换浏览器再试')
    stream.getTracks().forEach(t => t.stop())
    return
  }
  recordChunks = []
  mr.ondataavailable = e => { if (e.data && e.data.size) recordChunks.push(e.data) }
  mr.onstop = async () => {
    const mime = options.mimeType || 'audio/wav'
    const blob = new Blob(recordChunks, { type: mime })
    // 短语音分片：audio_start -> audio_chunk* -> audio_end，避免单帧过大(1009)
    const arrayBuf = await blob.arrayBuffer()
    const bytes = new Uint8Array(arrayBuf)
    let binary = ''
    for (let i = 0; i < bytes.byteLength; i++) binary += String.fromCharCode(bytes[i])
    const b64 = btoa(binary)
    await ensureVoiceWsOpen()
    if (!(voiceWs && voiceWs.readyState === 1)) { alert('语音通道未连接'); return }
    try {
      voiceWs.send(JSON.stringify({ type: 'audio_start', conversationId: conversationId.value, mime }))
      const CHUNK = 16384 // 16KB base64 片段
      let seq = 0
      for (let p = 0; p < b64.length; p += CHUNK) {
        const part = b64.slice(p, p + CHUNK)
        voiceWs.send(JSON.stringify({ type: 'audio_chunk', seq, payload: part }))
        seq++
      }
      voiceWs.send(JSON.stringify({ type: 'audio_end', conversationId: conversationId.value, mime }))
      asrPending.value = true
    } catch (e) {
      console.error('send chunks error', e)
      alert('发送音频失败，请重试')
    }
  }
  mr.start()
  mediaRecorder = mr
  isRecording.value = true
}

const stopRecord = () => {
  if (!isRecording.value || !mediaRecorder) return
  mediaRecorder.stop()
  mediaRecorder.stream.getTracks().forEach(t => t.stop())
  isRecording.value = false
}

// TTS 播放/暂停
const playTts = async (text, messageId, audioUrl = null) => {
  if (!text && !audioUrl) return
  
  console.log('playTts调用:', { text: text?.substring(0, 20) + '...', messageId, audioUrl })
  
  const messageKey = messageId || text.substring(0, 20) // 使用消息ID或文本前20字符作为key
  const playState = messagePlayStates.value.get(messageKey)
  
  // 如果当前消息正在播放，则暂停
  if (playState && playState.playing && playState.currentAudio) {
    playState.currentAudio.pause()
    playState.paused = true
    playState.playing = false
    playing = false
    return
  }
  
  // 如果当前消息已暂停，则恢复播放
  if (playState && playState.paused && playState.currentAudio) {
    playState.currentAudio.play()
    playState.paused = false
    playState.playing = true
    playing = true
    return
  }
  
  // 如果没有audioUrl，发送TTS请求到后端使用角色默认音色
  if (!audioUrl) {
    console.log('音频URL为空，发送TTS请求使用角色默认音色...')
    
    // 停止其他音频播放
    stopCurrentAudio()
    
    // 显示加载状态
    const messageIndex = messages.value.findIndex(m => m.id === messageId)
    if (messageIndex !== -1) {
      const originalContent = messages.value[messageIndex].content
      messages.value[messageIndex].content = originalContent + ' 🔊'
    }
    
    // 设置播放状态为等待中
    const newPlayState = {
      playing: false,
      paused: false,
      audio: null,
      currentAudio: null,
      audioQueue: [],
      waitingForTTS: true // 标记正在等待TTS响应
    }
    messagePlayStates.value.set(messageKey, newPlayState)
    
    // 发送TTS请求到后端，使用空字符串让后端使用角色默认音色
    if (voiceWs && voiceWs.readyState === WebSocket.OPEN) {
      const cleanedText = cleanTextForTTS(text)
      voiceWs.send(JSON.stringify({
        type: 'tts_request',
        text: cleanedText,
        conversationId: conversationId.value,
        voiceType: '', // 空字符串让后端使用角色默认音色
        messageId: messageId // 添加messageId用于关联
      }))
    }
    return
  }
  
  // 停止其他音频播放
  stopCurrentAudio()
  
  // 清除所有消息的播放状态
  messagePlayStates.value.clear()
  
  try {
    let url
    
    // 如果有音频URL，直接使用；否则调用TTS API
    if (audioUrl) {
      console.log('使用预生成的音频URL:', audioUrl)
      url = audioUrl
    } else {
      console.log('调用TTS API生成音频')
      const cleanedText = cleanTextForTTS(text)
  const res = await fetch('/api/audio/tts', {
    method: 'POST',
        headers: { 'Content-Type': 'application/json', ...authHeaders() },
        body: JSON.stringify({ text: cleanedText, language: 'zh' })
  })
  const buf = await res.arrayBuffer()
  const blob = new Blob([buf], { type: 'audio/wav' })
      url = URL.createObjectURL(blob)
    }
    
    // 设置播放状态
    const newPlayState = {
      playing: true,
      paused: false,
      audio: null
    }
    messagePlayStates.value.set(messageKey, newPlayState)
    
    playing = true
    currentAudio = new Audio(url)
    newPlayState.audio = currentAudio
    
    currentAudio.addEventListener('canplaythrough', () => {
      // 检查是否被用户打断
      if (shouldStopLLM.value) {
        console.log('手动TTS音频加载完成但被用户打断，停止播放')
        playing = false
        currentAudio = null
        URL.revokeObjectURL(url)
        messagePlayStates.value.delete(messageKey)
        return
      }
      
      if (currentAudio) {
        currentAudio.play().catch(e => {
          console.warn('音频播放失败:', e)
          playing = false
          currentAudio = null
          URL.revokeObjectURL(url)
          messagePlayStates.value.delete(messageKey)
        })
      }
    })
    
    currentAudio.onended = () => {
      playing = false
      currentAudio = null
      // 只有Object URL才需要清理，Kodo URL不需要
      if (!audioUrl) {
        URL.revokeObjectURL(url)
      }
      messagePlayStates.value.delete(messageKey)
    }
    currentAudio.onerror = () => {
      playing = false
      currentAudio = null
      // 只有Object URL才需要清理，Kodo URL不需要
      if (!audioUrl) {
        URL.revokeObjectURL(url)
      }
      messagePlayStates.value.delete(messageKey)
    }
  } catch (error) {
    console.error('TTS请求失败:', error)
    messagePlayStates.value.delete(messageKey)
  }
}

// 播放下一个手动TTS音频
const playNextManualTTS = (messageKey) => {
  const playState = messagePlayStates.value.get(messageKey)
  if (!playState || !playState.audioQueue || playState.audioQueue.length === 0) {
    // 队列为空，播放完成
    playState.playing = false
    playState.currentAudio = null
    return
  }
  
  const audio = playState.audioQueue.shift()
  playState.currentAudio = audio
  currentAudio = audio
  playing = true
  
  audio.addEventListener('canplaythrough', () => {
    if (audio === currentAudio) {
      audio.play().catch(e => {
        console.warn('手动TTS音频播放失败:', e)
        playNextManualTTS(messageKey)
      })
    }
  })
  
  audio.onended = () => {
    if (audio === currentAudio) {
      URL.revokeObjectURL(audio.src)
      playNextManualTTS(messageKey)
    }
  }
  
  audio.onerror = () => {
    if (audio === currentAudio) {
      console.warn('手动TTS音频播放错误')
      URL.revokeObjectURL(audio.src)
      playNextManualTTS(messageKey)
    }
  }
}

// 获取播放按钮图标
const getPlayButtonIcon = (messageId) => {
  const messageKey = messageId || 'default'
  const playState = messagePlayStates.value.get(messageKey)
  
  if (playState && playState.playing) {
    return '⏸️' // 暂停图标
  } else if (playState && playState.paused) {
    return '▶️' // 播放图标
  } else {
    return '🔊' // 默认播放图标
  }
}

// 获取播放按钮标题
const getPlayButtonTitle = (messageId) => {
  const messageKey = messageId || 'default'
  const playState = messagePlayStates.value.get(messageKey)
  
  if (playState && playState.playing) {
    return '暂停播放'
  } else if (playState && playState.paused) {
    return '继续播放'
  } else {
    return '朗读'
  }
}

const ensureConversation = async () => {
  console.log('ensureConversation 开始执行')
  console.log('roleId.value:', roleId.value)
  console.log('routeConversationId.value:', routeConversationId.value)
  console.log('conversationId.value:', conversationId.value)
  
  // 如果URL中有conversationId参数，使用指定的会话
  if (routeConversationId.value) {
    console.log('使用指定的会话:', routeConversationId.value)
    conversationId.value = routeConversationId.value
    
    // 从会话列表中找到对应的会话信息并设置角色
    const targetConversation = conversations.value.find(c => c.id === routeConversationId.value)
    if (targetConversation && targetConversation.roleId) {
      currentRole.value = {
        id: targetConversation.roleId,
        name: targetConversation.roleName,
        avatarUrl: targetConversation.roleAvatarUrl,
        description: targetConversation.roleDescription || 'AI助手'
      }
    }
    return
  }
  
  // 如果URL中有roleId参数，创建与该角色的新会话
  if (roleId.value) {
    console.log('从角色ID创建新会话:', roleId.value)
    console.log('当前conversationId:', conversationId.value)
    console.log('当前conversations数量:', conversations.value.length)
    
    // 先获取角色信息
    await fetchCurrentRole(roleId.value)
    
    // 创建新会话
    const requestBody = { roleId: Number(roleId.value) }
    console.log('发送创建会话请求:', requestBody)
    
  const res = await fetch('/api/conversations', {
    method: 'POST',
      headers: { 'Content-Type': 'application/json', ...authHeaders() },
      body: JSON.stringify(requestBody)
  })
    
    console.log('创建会话响应状态:', res.status)
  const data = await res.json()
    console.log('创建会话响应数据:', data)
    
    if (data.code === 0) {
  conversationId.value = data.data
      console.log('创建新会话成功:', conversationId.value)
      
      // 增加角色使用量
      try {
        await fetch(`/api/roles/${roleId.value}/use`, {
          method: 'POST',
          headers: authHeaders()
        })
      } catch (e) {
        console.warn('增加角色使用量失败:', e)
      }
      
      // 刷新会话列表
      await fetchConversations(true)
    } else {
      console.error('创建会话失败:', data.message)
    }
    return
  }
  
  // 如果已经有会话ID但没有roleId，说明是从会话列表进入的，不需要创建新会话
  if (conversationId.value) {
    return
  }
  
  // 若已有会话列表，则默认选中第一条
  if (conversations.value.length > 0) {
    const firstConversation = conversations.value[0]
    conversationId.value = firstConversation?.id || null
    
    // 设置当前角色信息
    if (firstConversation && firstConversation.roleId) {
      currentRole.value = {
        id: firstConversation.roleId,
        name: firstConversation.roleName,
        avatarUrl: firstConversation.roleAvatarUrl,
        description: firstConversation.roleDescription || 'AI助手'
      }
    }
    return
  }
  
  // 如果没有任何会话且没有角色ID，显示提示
  console.log('没有会话和角色信息')
}

// 通话相关函数
const startCallTimer = () => {
  callDuration.value = 0
  callTimer = setInterval(() => {
    callDuration.value++
  }, 1000)
}

const stopCallTimer = () => {
  if (callTimer) {
    clearInterval(callTimer)
    callTimer = null
  }
}

const formatCallDuration = (seconds) => {
  const mins = Math.floor(seconds / 60)
  const secs = seconds % 60
  return `${mins.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`
}

// 开始通话质量监控
const startQualityMonitoring = () => {
  if (qualityMonitorInterval) return
  
  qualityMonitorInterval = setInterval(async () => {
    if (!pc || pc.connectionState !== 'connected') return
    
    try {
      // 获取统计信息
      const stats = await pc.getStats()
      let audioLevel = 0
      let packetLoss = 0
      let latency = 0
      
      stats.forEach(report => {
        if (report.type === 'inbound-rtp' && report.mediaType === 'audio') {
          // 计算音频级别（简化）
          if (report.audioLevel !== undefined) {
            audioLevel = Math.round(report.audioLevel * 100)
          }
          
          // 计算丢包率
          if (report.packetsLost !== undefined && report.packetsReceived !== undefined) {
            const totalPackets = report.packetsLost + report.packetsReceived
            if (totalPackets > 0) {
              packetLoss = Math.round((report.packetsLost / totalPackets) * 100)
            }
          }
        }
        
        if (report.type === 'candidate-pair' && report.state === 'succeeded') {
          // 计算延迟
          if (report.currentRoundTripTime !== undefined) {
            latency = Math.round(report.currentRoundTripTime * 1000)
          }
        }
      })
      
      // 更新质量指标
      callQuality.value.audioLevel = audioLevel
      callQuality.value.packetLoss = packetLoss
      callQuality.value.latency = latency
      
    } catch (error) {
      console.warn('获取通话统计信息失败:', error)
    }
  }, 2000) // 每2秒更新一次
}

// 停止通话质量监控
const stopQualityMonitoring = () => {
  if (qualityMonitorInterval) {
    clearInterval(qualityMonitorInterval)
    qualityMonitorInterval = null
  }
  
  // 重置质量指标
  callQuality.value = {
    connectionState: 'new',
    iceConnectionState: 'new',
    iceGatheringState: 'new',
    audioLevel: 0,
    packetLoss: 0,
    latency: 0
  }
}

// 获取质量等级样式类
const getQualityClass = (value) => {
  if (value >= 80) return 'quality-excellent'
  if (value >= 60) return 'quality-good'
  if (value >= 40) return 'quality-fair'
  return 'quality-poor'
}

// 获取延迟等级样式类
const getLatencyClass = (latency) => {
  if (latency <= 100) return 'quality-excellent'
  if (latency <= 200) return 'quality-good'
  if (latency <= 300) return 'quality-fair'
  return 'quality-poor'
}

// 通话录音功能
let callMediaRecorder = null
let callRecordChunks = []

// WebRTC VAD实现
const initVAD = async () => {
  try {
    // 获取用户媒体
    const stream = await navigator.mediaDevices.getUserMedia({ 
      audio: {
        sampleRate: 16000,
        channelCount: 1,
        echoCancellation: true,
        noiseSuppression: true,
        autoGainControl: true
      } 
    })
    
    // 创建音频上下文
    audioContext = new (window.AudioContext || window.webkitAudioContext)({
      sampleRate: 16000
    })
    
    // 创建分析器
    analyser = audioContext.createAnalyser()
    analyser.fftSize = 2048
    analyser.smoothingTimeConstant = 0.3
    
    // 创建麦克风输入
    microphone = audioContext.createMediaStreamSource(stream)
    microphone.connect(analyser)
    
    // 创建VAD处理器 - 使用更小的缓冲区提高响应速度
    vadProcessor = audioContext.createScriptProcessor(1024, 1, 1)
    vadProcessor.onaudioprocess = processAudioDataWithVAD
    
    analyser.connect(vadProcessor)
    vadProcessor.connect(audioContext.destination)
    
    console.log('WebRTC VAD初始化成功')
    return true
    
  } catch (error) {
    console.error('VAD初始化失败:', error)
    return false
  }
}

// WebRTC风格的VAD算法
const processAudioDataWithVAD = (event) => {
  if (!isVadRecording) return
  
  const inputBuffer = event.inputBuffer
  const inputData = inputBuffer.getChannelData(0)
  
  // 计算多个特征
  const features = calculateAudioFeatures(inputData)
  
  // 使用多特征VAD决策
  const isSpeech = detectSpeech(features)
  
  // 调试日志（每5帧输出一次）
  if (Math.random() < 0.2) {
    console.log(`VAD调试 - RMS: ${features.rms.toFixed(4)}, ZCR: ${features.zcr.toFixed(3)}, 语音: ${isSpeech}, 连续语音帧: ${consecutiveSpeechFrames}, 连续静音帧: ${consecutiveSilenceFrames}, 状态: ${vadState.value}`)
  }
  
  if (isSpeech) {
    // 检测到语音
    consecutiveSpeechFrames++
    consecutiveSilenceFrames = 0
    
    // 需要连续几帧都检测到语音才开始录音
    if (consecutiveSpeechFrames >= minConsecutiveSpeechFrames) {
      // 如果TTS正在播放，立即中断
      if (ttsPlaying || playing) {
        console.log('检测到用户说话，中断AI音频播放', {
          ttsPlaying,
          playing,
          isLLMStreaming: isLLMStreaming.value,
          shouldStopLLM: shouldStopLLM.value
        })
        interruptAudio('user_speaking')
      }
      
      if (vadState.value === 'idle' || vadState.value === 'listening') {
        vadState.value = 'speaking'
        duplexState.value = 'user_speaking'
        isVoiceActive.value = true
        speechStartTime = Date.now()
        silenceStartTime = null
        audioBuffer = []
        console.log('检测到语音开始')
        
        // 如果AI正在回复，可以中断AI音频
        if (isPlayingAudio.value && currentAudioStream.value?.type === 'ai') {
          interruptAudio('user_speaking')
        }
      }
      
      // 累积音频数据
      audioBuffer.push(new Float32Array(inputData))
    }
    
  } else {
    // 检测到静音
    consecutiveSilenceFrames++
    consecutiveSpeechFrames = 0
    
    if (vadState.value === 'speaking') {
      // 需要连续多帧都检测到静音才认为语音结束
      if (consecutiveSilenceFrames >= maxConsecutiveSilenceFrames) {
        if (!silenceStartTime) {
          silenceStartTime = Date.now()
        }
        
        // 检查静音持续时间
        if (Date.now() - silenceStartTime > silenceDuration) {
          // 语音结束
          const speechDuration = Date.now() - speechStartTime
          
          if (speechDuration >= minSpeechDuration) {
            // 发送音频数据
            sendAudioBuffer()
          } else {
            console.log('语音太短，忽略')
          }
          
          vadState.value = 'idle'
          duplexState.value = 'idle'
          isVoiceActive.value = false
          speechStartTime = null
          silenceStartTime = null
          audioBuffer = []
          consecutiveSpeechFrames = 0
          consecutiveSilenceFrames = 0
        }
      }
    }
  }
}

// 计算音频特征
const calculateAudioFeatures = (audioData) => {
  const length = audioData.length
  
  // 1. RMS能量
  let sum = 0
  for (let i = 0; i < length; i++) {
    sum += audioData[i] * audioData[i]
  }
  const rms = Math.sqrt(sum / length)
  
  // 2. 过零率 (Zero Crossing Rate)
  let zcr = 0
  for (let i = 1; i < length; i++) {
    if ((audioData[i] >= 0) !== (audioData[i-1] >= 0)) {
      zcr++
    }
  }
  zcr = zcr / length
  
  // 3. 频谱质心 (Spectral Centroid)
  const fftSize = 1024
  const fft = new Float32Array(fftSize)
  const fftComplex = new Float32Array(fftSize * 2)
  
  // 简单的FFT实现（实际应用中可以使用更高效的FFT库）
  for (let i = 0; i < Math.min(length, fftSize); i++) {
    fftComplex[i * 2] = audioData[i]
    fftComplex[i * 2 + 1] = 0
  }
  
  // 计算幅度谱
  for (let i = 0; i < fftSize / 2; i++) {
    const real = fftComplex[i * 2]
    const imag = fftComplex[i * 2 + 1]
    fft[i] = Math.sqrt(real * real + imag * imag)
  }
  
  // 计算频谱质心
  let weightedSum = 0
  let magnitudeSum = 0
  for (let i = 0; i < fftSize / 2; i++) {
    weightedSum += i * fft[i]
    magnitudeSum += fft[i]
  }
  const spectralCentroid = magnitudeSum > 0 ? weightedSum / magnitudeSum : 0
  
  // 4. 峰值检测
  let peakCount = 0
  const threshold = rms * 2
  for (let i = 1; i < length - 1; i++) {
    if (audioData[i] > audioData[i-1] && audioData[i] > audioData[i+1] && audioData[i] > threshold) {
      peakCount++
    }
  }
  
  return {
    rms,
    zcr,
    spectralCentroid,
    peakCount,
    length
  }
}

// 语音检测决策
const detectSpeech = (features) => {
  // 多特征融合的VAD决策
  let score = 0
  
  // RMS能量特征 (权重: 0.4) - 提高敏感度
  if (features.rms > 0.02) {
    score += 0.4
  } else if (features.rms > 0.01) {
    score += 0.2
  }
  
  // 过零率特征 (权重: 0.2) - 放宽语音范围
  if (features.zcr > 0.1 && features.zcr < 0.5) {
    score += 0.2
  }
  
  // 频谱质心特征 (权重: 0.2) - 放宽语音范围
  if (features.spectralCentroid > 50 && features.spectralCentroid < 200) {
    score += 0.2
  }
  
  // 峰值特征 (权重: 0.2) - 降低峰值要求
  if (features.peakCount > 5) {
    score += 0.2
  }
  
  // 综合决策阈值 - 提高敏感度
  return score > 0.5
}

// 发送音频缓冲区
const sendAudioBuffer = async () => {
  if (audioBuffer.length === 0) return
  
  try {
    vadState.value = 'processing'
    
    // 合并音频数据
    const totalLength = audioBuffer.reduce((sum, buffer) => sum + buffer.length, 0)
    const mergedBuffer = new Float32Array(totalLength)
    let offset = 0
    
    for (const buffer of audioBuffer) {
      mergedBuffer.set(buffer, offset)
      offset += buffer.length
    }
    
    // 转换为WAV格式
    const wavBlob = float32ArrayToWav(mergedBuffer, 16000)
    
    // 发送到后端
    const reader = new FileReader()
    reader.onload = () => {
      const base64 = reader.result.split(',')[1]
      console.log('发送VAD音频数据，大小:', base64.length)
      
      if (voiceWs && voiceWs.readyState === WebSocket.OPEN) {
        voiceWs.send(JSON.stringify({
          type: 'audio_chunk',
          payload: base64,
          mime: 'audio/wav', // 统一使用WAV格式
          conversationId: conversationId.value
        }))
      }
    }
    reader.readAsDataURL(wavBlob)
    
  } catch (error) {
    console.error('发送音频数据失败:', error)
  } finally {
    vadState.value = 'idle'
  }
}

// Float32Array转WAV
const float32ArrayToWav = (float32Array, sampleRate) => {
  const length = float32Array.length
  const buffer = new ArrayBuffer(44 + length * 2)
  const view = new DataView(buffer)
  
  // WAV文件头
  const writeString = (offset, string) => {
    for (let i = 0; i < string.length; i++) {
      view.setUint8(offset + i, string.charCodeAt(i))
    }
  }
  
  writeString(0, 'RIFF')
  view.setUint32(4, 36 + length * 2, true)
  writeString(8, 'WAVE')
  writeString(12, 'fmt ')
  view.setUint32(16, 16, true)
  view.setUint16(20, 1, true)
  view.setUint16(22, 1, true)
  view.setUint32(24, sampleRate, true)
  view.setUint32(28, sampleRate * 2, true)
  view.setUint16(32, 2, true)
  view.setUint16(34, 16, true)
  writeString(36, 'data')
  view.setUint32(40, length * 2, true)
  
  // 转换音频数据
  let offset = 44
  for (let i = 0; i < length; i++) {
    const sample = Math.max(-1, Math.min(1, float32Array[i]))
    view.setInt16(offset, sample * 0x7FFF, true)
    offset += 2
  }
  
  return new Blob([buffer], { type: 'audio/wav' })
}

// 全双工音频系统初始化
const initDuplexAudio = async () => {
  try {
    // 创建全双工音频上下文
    audioContextDuplex = new (window.AudioContext || window.webkitAudioContext)({
      sampleRate: 16000
    })
    
    // 创建音频混合器
    audioMixer = audioContextDuplex.createGain()
    audioMixer.connect(audioContextDuplex.destination)
    
    // 创建用户和AI音频增益控制
    userAudioGain = audioContextDuplex.createGain()
    aiAudioGain = audioContextDuplex.createGain()
    
    // 设置初始增益
    userAudioGain.gain.value = 1.0
    aiAudioGain.gain.value = 0.8
    
    // 连接到混合器
    userAudioGain.connect(audioMixer)
    aiAudioGain.connect(audioMixer)
    
    console.log('全双工音频系统初始化成功')
    return true
    
  } catch (error) {
    console.error('全双工音频系统初始化失败:', error)
    return false
  }
}

// 音频队列管理
const addToAudioQueue = (audioData, type = 'ai', priority = 0) => {
  const audioItem = {
    id: `audio_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`,
    data: audioData,
    type, // 'user' 或 'ai'
    priority, // 优先级，数字越大优先级越高
    timestamp: Date.now()
  }
  
  // 按优先级插入队列
  const insertIndex = audioQueue.value.findIndex(item => item.priority < priority)
  if (insertIndex === -1) {
    audioQueue.value.push(audioItem)
  } else {
    audioQueue.value.splice(insertIndex, 0, audioItem)
  }
  
  console.log(`音频已加入队列: ${type}, 优先级: ${priority}, 队列长度: ${audioQueue.value.length}`)
  
  // 如果当前没有播放音频，开始播放
  if (!isPlayingAudio.value) {
    playNextAudio()
  }
}

// 播放下一个音频
const playNextAudio = async () => {
  if (audioQueue.value.length === 0) {
    isPlayingAudio.value = false
    currentAudioStream.value = null
    return
  }
  
  const audioItem = audioQueue.value.shift()
  isPlayingAudio.value = true
  currentAudioStream.value = audioItem
  
  try {
    // 根据音频类型设置增益
    if (audioItem.type === 'user') {
      userAudioGain.gain.value = 1.0
      aiAudioGain.gain.value = 0.3 // 降低AI音频音量
    } else {
      userAudioGain.gain.value = 0.3 // 降低用户音频音量
      aiAudioGain.gain.value = 1.0
    }
    
    // 播放音频
    await playAudioItem(audioItem)
    
  } catch (error) {
    console.error('播放音频失败:', error)
  } finally {
    // 播放完成后继续播放下一个
    setTimeout(() => {
      playNextAudio()
    }, 100)
  }
}

// 播放单个音频项
const playAudioItem = async (audioItem) => {
  return new Promise((resolve, reject) => {
    try {
      // 创建音频源
      const audioSource = audioContextDuplex.createBufferSource()
      
      // 如果是Base64数据，需要解码
      if (typeof audioItem.data === 'string') {
        // Base64解码
        const binaryString = atob(audioItem.data)
        const bytes = new Uint8Array(binaryString.length)
        for (let i = 0; i < binaryString.length; i++) {
          bytes[i] = binaryString.charCodeAt(i)
        }
        
        // 创建Blob并转换为URL，使用HTML Audio元素播放
        const blob = new Blob([bytes], { type: 'audio/mp3' })
        const audioUrl = URL.createObjectURL(blob)
        
        const audio = new Audio(audioUrl)
        audio.addEventListener('canplaythrough', () => {
          audio.play().then(() => {
            console.log(`音频开始播放: ${audioItem.type}`)
          }).catch(error => {
            console.error('音频播放失败:', error)
            URL.revokeObjectURL(audioUrl)
            reject(error)
          })
        })
        
        audio.addEventListener('ended', () => {
          console.log(`音频播放完成: ${audioItem.type}`)
          URL.revokeObjectURL(audioUrl)
          resolve()
        })
        
        audio.addEventListener('error', (error) => {
          console.error('音频播放错误:', error)
          URL.revokeObjectURL(audioUrl)
          reject(error)
        })
      } else {
        // 直接播放音频数据
        audioSource.buffer = audioItem.data
        audioSource.connect(audioItem.type === 'user' ? userAudioGain : aiAudioGain)
        
        audioSource.onended = () => {
          console.log(`音频播放完成: ${audioItem.type}`)
          resolve()
        }
        
        audioSource.start()
      }
      
    } catch (error) {
      reject(error)
    }
  })
}

// 立即停止AI播放和生成（通用函数）
const stopAIPlayback = (reason = 'unknown') => {
  console.log(`${reason}：立即停止AI播放和生成`)
  
  // 停止当前音频播放
  if (currentAudio) {
    currentAudio.pause()
    currentAudio.currentTime = 0
    currentAudio = null
  }
  
  // 停止TTS播放
  if (ttsCurrentAudio) {
    ttsCurrentAudio.pause()
    ttsCurrentAudio.currentTime = 0
    ttsCurrentAudio = null
  }
  
  // 清空TTS播放队列并释放所有URL
  while (ttsQueue.length > 0) {
    const url = ttsQueue.shift()
    URL.revokeObjectURL(url)
  }
  
  // 清空全双工音频播放队列
  audioQueue.value = []
  playing = false
  ttsPlaying = false
  ttsAudioStarted = false
  isPlayingAudio.value = false
  currentAudioStream.value = null
  
  // 停止LLM流式生成
  if (isLLMStreaming.value) {
    console.log(`${reason}：停止LLM流式生成`)
    shouldStopLLM.value = true
    isLLMStreaming.value = false
    isStreamingLLM.value = false
    streamingText.value = ''
  }
  
  // 停止TTS流式传输
  if (isTTSStreaming.value) {
    console.log(`${reason}：停止TTS流式传输`)
    isTTSStreaming.value = false
    ttsPending.value = false
  }
  
  // 停止VAD录音
  if (isVadRecording) {
    console.log(`${reason}：停止VAD录音`)
    stopVADRecording()
  }
  
  // 重置状态
  duplexState.value = 'idle'
  isVoiceActive.value = false
  
  console.log(`${reason}：AI播放停止完成`)
}

// 中断当前音频播放
const interruptAudio = (reason = 'user_interrupt') => {
  console.log(`中断音频播放: ${reason}`)
  
  // 停止当前音频播放
  if (currentAudio) {
    currentAudio.pause()
    currentAudio.currentTime = 0
    currentAudio = null
  }
  
  // 停止TTS播放
  if (ttsCurrentAudio) {
    ttsCurrentAudio.pause()
    ttsCurrentAudio.currentTime = 0
    ttsCurrentAudio = null
  }
  
  // 清空TTS播放队列并释放所有URL
  while (ttsQueue.length > 0) {
    const url = ttsQueue.shift()
    URL.revokeObjectURL(url)
  }
  
  // 清空全双工音频播放队列
  audioQueue.value = []
  
  // 停止当前全双工音频播放
  if (currentAudioStream.value && currentAudioStream.value.source) {
    currentAudioStream.value.source.stop()
  }
  
  playing = false
  ttsPlaying = false
  ttsAudioStarted = false
  isPlayingAudio.value = false
  currentAudioStream.value = null
  
  // 如果LLM还在流式生成，发送停止信号
  if (isLLMStreaming.value) {
    console.log('用户打断，停止LLM流式生成', {
      isLLMStreaming: isLLMStreaming.value,
      isStreamingLLM: isStreamingLLM.value,
      shouldStopLLM: shouldStopLLM.value,
      wsReady: voiceWs && voiceWs.readyState === WebSocket.OPEN
    })
    shouldStopLLM.value = true
    isLLMStreaming.value = false
    isStreamingLLM.value = false
    
    // 发送停止信号到后端
    if (voiceWs && voiceWs.readyState === WebSocket.OPEN) {
      console.log('发送stop_llm信号到后端')
      voiceWs.send(JSON.stringify({
        type: 'stop_llm',
        reason: 'user_interrupt'
      }))
    } else {
      console.warn('WebSocket未连接，无法发送stop_llm信号')
    }
  }
  
  // 停止TTS流式传输
  if (isTTSStreaming.value) {
    console.log('用户打断，停止TTS流式传输')
    isTTSStreaming.value = false
    ttsPending.value = false
  }
  
  // 重置流式文本状态
  streamingText.value = ''
  
  // 重置状态
  duplexState.value = 'user_speaking'
}

// 获取用户信息
const fetchUserInfo = async () => {
  try {
    const response = await fetch('/api/user/info', {
      method: 'GET',
      headers: authHeaders()
    })
    const result = await response.json()
    if (result.code === 0) {
      userName.value = result.data.username || '用户'
      userAvatar.value = result.data.avatarUrl || ''
      console.log('用户信息:', result.data)
    } else {
      console.error('获取用户信息失败:', result.message)
    }
  } catch (error) {
    console.error('获取用户信息失败:', error)
  }
}

// 获取音色列表
const fetchVoiceList = async () => {
  try {
    const response = await fetch('/api/voice/list', {
      headers: {
        'Content-Type': 'application/json'
      }
    })
    const result = await response.json()
    if (result.code === 0) {
      // 直接使用后端返回的音色列表数据
      voiceList.value = result.data || []
      console.log('获取音色列表成功:', voiceList.value)
    } else {
      console.error('获取音色列表失败:', result.message)
    }
  } catch (error) {
    console.error('获取音色列表错误:', error)
  }
}

// 切换音色选择器显示状态
const toggleVoiceSelector = () => {
  showVoiceSelector.value = !showVoiceSelector.value
  if (showVoiceSelector.value && voiceList.value.length === 0) {
    fetchVoiceList()
  }
}

// 选择音色
const selectVoice = (voiceType) => {
  selectedVoice.value = voiceType
  showVoiceSelector.value = false
  console.log('选择音色:', voiceType)
}

// 试听音色
const previewVoice = async (voiceType) => {
  try {
    const token = localStorage.getItem('token')
    const response = await fetch('/api/voice/preview', {
      method: 'POST',
      headers: {
        'Authorization': `Bearer ${token}`,
        'Content-Type': 'application/json',
      },
      body: JSON.stringify({ 
        voiceType: voiceType 
      })
    })
    const result = await response.json()
    if (result.code === 0) {
      // 创建音频对象并播放
      const audioBlob = new Blob([Uint8Array.from(atob(result.data), c => c.charCodeAt(0))], { type: 'audio/wav' })
      const audioUrl = URL.createObjectURL(audioBlob)
      const audio = new Audio(audioUrl)
      audio.play()
      
      // 播放完成后清理URL
      audio.onended = () => {
        URL.revokeObjectURL(audioUrl)
      }
    } else {
      console.error('音色试听失败:', result.message)
    }
  } catch (error) {
    console.error('音色试听失败:', error)
  }
}

// 开始VAD录音
const startVADRecording = async () => {
  if (isVadRecording) return
  
  // 初始化全双工音频系统
  await initDuplexAudio()
  
  const success = await initVAD()
  if (success) {
    isVadRecording = true
    vadState.value = 'listening'
    duplexState.value = 'idle'
    console.log('VAD录音已开始')
  }
}

// 停止VAD录音
const stopVADRecording = () => {
  if (!isVadRecording) return
  
  isVadRecording = false
  vadState.value = 'idle'
  duplexState.value = 'idle'
  
  // 清理VAD资源
  if (vadProcessor) {
    vadProcessor.disconnect()
    vadProcessor = null
  }
  if (analyser) {
    analyser.disconnect()
    analyser = null
  }
  if (microphone) {
    microphone.disconnect()
    microphone = null
  }
  if (audioContext) {
    audioContext.close()
    audioContext = null
  }
  
  // 清理全双工音频资源
  if (audioContextDuplex) {
    audioContextDuplex.close()
    audioContextDuplex = null
  }
  if (audioMixer) {
    audioMixer.disconnect()
    audioMixer = null
  }
  if (userAudioGain) {
    userAudioGain.disconnect()
    userAudioGain = null
  }
  if (aiAudioGain) {
    aiAudioGain.disconnect()
    aiAudioGain = null
  }
  
  // 停止当前音频播放
  if (currentAudio) {
    currentAudio.pause()
    currentAudio.currentTime = 0
    currentAudio = null
  }
  
  // 停止TTS播放
  if (ttsCurrentAudio) {
    ttsCurrentAudio.pause()
    ttsCurrentAudio.currentTime = 0
    ttsCurrentAudio = null
  }
  
  // 清空TTS播放队列并释放所有URL
  while (ttsQueue.length > 0) {
    const url = ttsQueue.shift()
    URL.revokeObjectURL(url)
  }
  
  // 清空全双工音频队列
  audioQueue.value = []
  playing = false
  ttsPlaying = false
  ttsAudioStarted = false
  isPlayingAudio.value = false
  currentAudioStream.value = null
  
  console.log('VAD录音和全双工音频系统已停止')
}

const startCallRecording = () => {
  console.log('尝试开始通话录音...')
  console.log('localStream.value:', localStream.value)
  console.log('callState.value:', callState.value)
  console.log('pc:', pc)
  
  // 使用VAD替代定时录音
  startVADRecording()
  console.log('VAD录音已启动')
}

const stopCallRecording = () => {
  // 停止VAD录音
  stopVADRecording()
  console.log('VAD录音已停止')
}

const startVoiceCall = async () => {
  if (!conversationId.value || !currentRole.value) {
    alert('请先选择一个角色开始对话')
    return
  }
  
  if (callState.value !== 'idle') {
    alert('当前已有通话进行中')
    return
  }
  
  try {
    // 确保WebSocket连接
    await ensureVoiceWsOpen()
    if (!(voiceWs && voiceWs.readyState === WebSocket.OPEN)) {
      alert('连接已断开，请刷新页面重试')
      return
    }
    
    // 创建PeerConnection
    await ensurePeer()
    
    const callId = `call_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`
    currentCallId.value = callId
    callState.value = 'calling'
    
    // 创建Offer
    const offer = await pc.createOffer({
      offerToReceiveAudio: true,
      offerToReceiveVideo: false
    })
    await pc.setLocalDescription(offer)
    
    // 发送呼叫请求和Offer
    voiceWs.send(JSON.stringify({
      type: 'call_start',
      callId: callId,
      calleeId: currentRole.value.id.toString(),
      conversationId: conversationId.value,
      offer: offer
    }))
    
    console.log('发起语音通话:', callId)
    
  } catch (error) {
    console.error('发起通话失败:', error)
    alert('发起通话失败: ' + error.message)
    callState.value = 'idle'
  }
}

const answerCall = async () => {
  if (!currentCallId.value) return
  
  try {
    // 确保PeerConnection已创建
    await ensurePeer()
    
    // 检查当前信令状态
    console.log('当前信令状态:', pc.signalingState)
    
    // 只有在正确状态下才创建Answer
    if (pc.signalingState === 'have-remote-offer') {
      const answer = await pc.createAnswer()
      await pc.setLocalDescription(answer)
      
      // 发送接听响应和Answer
      voiceWs?.send(JSON.stringify({
        type: 'call_answer',
        callId: currentCallId.value,
        answer: answer
      }))
      
      console.log('接听通话:', currentCallId.value)
    } else {
      console.log('跳过Answer创建，当前信令状态:', pc.signalingState)
      // 直接发送接听响应，不包含Answer
      voiceWs?.send(JSON.stringify({
        type: 'call_answer',
        callId: currentCallId.value
      }))
    }
    
  } catch (error) {
    console.error('接听通话失败:', error)
    // 不显示错误提示，因为这是自动接听
  }
}

const rejectCall = () => {
  if (callState.value !== 'ringing' || !currentCallId.value) return
  
  voiceWs?.send(JSON.stringify({
    type: 'call_reject',
    callId: currentCallId.value
  }))
  
  // 清理WebRTC连接
  if (pc) {
    pc.close()
    pc = null
  }
  
  // 停止本地音频流
  if (localStream.value) {
    localStream.value.getTracks().forEach(track => track.stop())
    localStream.value = null
  }
  
  // 停止质量监控
  stopQualityMonitoring()
  
  // 停止通话录音
  stopCallRecording()
  
  callState.value = 'idle'
  isInCall.value = false
  currentCallId.value = null
  stopCallTimer()
  console.log('拒绝通话')
}

const hangupCall = () => {
  if (!currentCallId.value) return
  
  voiceWs?.send(JSON.stringify({
    type: 'call_hangup',
    callId: currentCallId.value
  }))
  
  // 立即停止AI播放和生成
  stopAIPlayback('挂断通话')
  
  // 清理WebRTC连接
  if (pc) {
    pc.close()
    pc = null
  }
  
  // 停止本地音频流
  if (localStream.value) {
    localStream.value.getTracks().forEach(track => track.stop())
    localStream.value = null
  }
  
  // 停止质量监控
  stopQualityMonitoring()
  
  // 停止通话录音
  stopCallRecording()
  
  // 挂断后重置为idle状态，这样通话按钮会重新显示
  callState.value = 'idle'
  isInCall.value = false
  currentCallId.value = null
  stopCallTimer()
  console.log('挂断通话完成')
}

// 通话界面相关方法
const getCallStatusText = () => {
  switch (duplexState.value) {
    case 'user_speaking':
      return '正在说话...'
    case 'ai_responding':
      return 'AI正在回复...'
    case 'both_active':
      return '双方都在说话'
    default:
      return '通话中'
  }
}


const getConnectionStatusClass = () => {
  switch (callQuality.value.connectionState) {
    case 'connected':
      return 'status-connected'
    case 'connecting':
      return 'status-connecting'
    case 'disconnected':
    case 'failed':
      return 'status-error'
    default:
      return 'status-unknown'
  }
}

const getPacketLossClass = () => {
  const loss = callQuality.value.packetLoss
  if (loss < 1) return 'quality-excellent'
  if (loss < 3) return 'quality-good'
  if (loss < 5) return 'quality-fair'
  return 'quality-poor'
}


const toggleMute = () => {
  muted.value = !muted.value
  console.log('静音状态:', muted.value)
}

const toggleSpeaker = () => {
  // 这里可以添加扬声器切换逻辑
  console.log('切换扬声器')
}

const fetchMessages = async () => {
  if (!conversationId.value) return
  const res = await fetch(`/api/conversations/${conversationId.value}/messages?pageNo=1&pageSize=100`, { headers: { ...authHeaders() } })
  const data = await res.json()
  messages.value = data.data?.records || []
  
  // 调试：检查消息是否包含audioUrl字段
  console.log('加载的消息:', messages.value.map(m => ({ 
    id: m.id, 
    content: m.content?.substring(0, 20) + '...', 
    audioUrl: m.audioUrl,
    isUserMessage: m.isUserMessage,
    isUserMessageType: typeof m.isUserMessage
  })))
  
  // 确保isUserMessage字段是布尔值
  messages.value.forEach(m => {
    if (typeof m.isUserMessage === 'number') {
      m.isUserMessage = m.isUserMessage === 1
    }
  })
  
  await nextTick()
  if (chatBoxRef.value) {
    chatBoxRef.value.scrollTop = chatBoxRef.value.scrollHeight
  }
}

const fetchConversations = async (reset = false) => {
  if (convLoading.value) return
  if (reset) {
    convPageNo.value = 1
    convHasMore.value = true
    conversations.value = []
  }
  if (!convHasMore.value) return
  convLoading.value = true
  
  const qs = new URLSearchParams({ pageNo: String(convPageNo.value), pageSize: String(convPageSize) })
  console.log('获取会话列表 - 查询参数:', qs.toString())
  
  const res = await fetch(`/api/conversations?${qs.toString()}`, { headers: { ...authHeaders() } })
  const data = await res.json()
  const records = data.data?.records || []
  
  console.log('获取会话列表 - 返回记录数:', records.length)
  console.log('获取会话列表 - 记录详情:', records.map(r => ({ id: r.id, roleName: r.roleName, roleId: r.roleId })))
  
  if (records.length < convPageSize) convHasMore.value = false
  conversations.value = conversations.value.concat(records)
  convPageNo.value += 1
  convLoading.value = false
}

const onSidebarScroll = (e) => {
  const el = e.target
  if (el.scrollTop + el.clientHeight >= el.scrollHeight - 20) {
    fetchConversations(false)
  }
}

const selectConversation = async (c) => {
  if (!c || c.id === conversationId.value) return
  conversationId.value = c.id
  
  // 设置当前角色信息
  if (c.roleId) {
    currentRole.value = {
      id: c.roleId,
      name: c.roleName,
      avatarUrl: c.roleAvatarUrl,
      description: c.roleDescription || 'AI助手'
    }
  }
  
  await fetchMessages()
  // 注意：语音房间管理功能暂未实现
}

const deleteConversation = async (conversation) => {
  if (!confirm(`确定要删除与"${conversation.roleName}"的会话吗？此操作不可撤销。`)) {
    return
  }
  
  try {
    const token = localStorage.getItem('auth_token')
    if (!token) {
      console.error('用户未登录')
      return
    }
    
    const res = await fetch(`/api/conversations/${conversation.id}`, {
      method: 'DELETE',
      headers: { 'Authorization': `Bearer ${token}` }
    })
    
    const data = await res.json()
    if (data.code === 0) {
      // 从列表中移除已删除的会话
      conversations.value = conversations.value.filter(c => c.id !== conversation.id)
      
      // 如果删除的是当前会话，清空当前会话
      if (conversationId.value === conversation.id) {
        conversationId.value = null
        messages.value = []
        currentRole.value = null
      }
      
      console.log('会话删除成功')
    } else {
      console.error('删除会话失败:', data.message)
      alert('删除失败：' + data.message)
    }
  } catch (e) {
    console.error('删除会话失败', e)
    alert('删除失败，请重试')
  }
}

const send = async () => {
  if (!content.value.trim()) return
  
  const messageText = content.value.trim()
  content.value = '' // 立即清空输入框，提升用户体验
  
  // 立即显示用户消息（只有当文本不为空时）
  if (messageText && messageText.trim()) {
    messages.value = messages.value.concat([{ 
      id: 'user_'+Date.now(), 
      isUserMessage: 1, 
      content: messageText,
      audioUrl: null
    }])
  }
  
  // 确保WebSocket连接
  await ensureVoiceWsOpen()
  if (!(voiceWs && voiceWs.readyState === WebSocket.OPEN)) {
    alert('连接已断开，请刷新页面重试')
    return
  }
  
  try {
    // 如果是语音通话，在发送给LLM前添加简化提示
    let textToSend = messageText
    if (isInCall.value) {
      textToSend = messageText + ' 不要分点，用一句或一段话来回复。'
      console.log('语音通话模式：已添加简化提示到LLM请求')
    }
    
    // 发送文本消息到后端进行流式LLM处理
    voiceWs.send(JSON.stringify({
      type: 'text_message',
      text: textToSend,
      conversationId: conversationId.value
    }))
    
    // 设置流式LLM状态
    isStreamingLLM.value = true
    streamingText.value = ''
    
  } catch (e) {
    console.error('发送消息失败:', e)
    alert('发送消息失败，请重试')
  }
}

watch(() => route.params.id, async v => {
  roleId.value = v
  conversationId.value = null
  messages.value = []
  await fetchCurrentRole(v)
  await ensureConversation()
  await fetchMessages()
})

onMounted(async () => {
  try {
    const user = JSON.parse(localStorage.getItem('auth_user') || 'null')
    apiUserId.value = user?.id || null
  } catch (e) {
    apiUserId.value = null
  }
  
  // 确保VAD处于停止状态
  stopVADRecording()
  
  // 获取用户信息
  fetchUserInfo()
  
  await fetchConversations(true)
  await ensureConversation()
  await fetchMessages()
  initVoiceWs()
  await nextTick()
  if (sidebarRef.value) sidebarRef.value.addEventListener('scroll', onSidebarScroll)
})

onBeforeUnmount(() => {
  if (sidebarRef.value) sidebarRef.value.removeEventListener('scroll', onSidebarScroll)
  try { if (voiceWs) voiceWs.close() } catch {}
  stopCallTimer()
  // 停止VAD录音
  stopVADRecording()
})

// 格式化时间
const formatTime = (timeStr) => {
  if (!timeStr) return ''
  const date = new Date(timeStr)
  const now = new Date()
  const diff = now - date
  const days = Math.floor(diff / (1000 * 60 * 60 * 24))
  
  if (days === 0) {
    return date.toLocaleTimeString('zh-CN', { hour: '2-digit', minute: '2-digit' })
  } else if (days === 1) {
    return '昨天'
  } else if (days < 7) {
    return `${days}天前`
  } else {
    return date.toLocaleDateString('zh-CN', { month: '2-digit', day: '2-digit' })
  }
}

// 格式化消息时间
const formatMessageTime = (timeStr) => {
  if (!timeStr) return ''
  const date = new Date(timeStr)
  return date.toLocaleTimeString('zh-CN', { 
    hour: '2-digit', 
    minute: '2-digit',
    second: '2-digit'
  })
}

// 清理TTS文本，移除Markdown格式和特殊字符
const cleanTextForTTS = (text) => {
  if (!text) return ''
  
  let cleaned = text
  
  // 移除Markdown格式
  cleaned = cleaned.replace(/\*\*(.*?)\*\*/g, '$1') // 粗体 **text** -> text
  cleaned = cleaned.replace(/\*(.*?)\*/g, '$1') // 斜体 *text* -> text
  cleaned = cleaned.replace(/`(.*?)`/g, '$1') // 代码 `text` -> text
  cleaned = cleaned.replace(/~~(.*?)~~/g, '$1') // 删除线 ~~text~~ -> text
  
  // 移除列表符号和特殊字符
  cleaned = cleaned.replace(/^[\s]*[-*+]\s+/gm, '') // 列表符号 - * +
  cleaned = cleaned.replace(/^[\s]*\d+\.\s+/gm, '') // 数字列表 1. 2. 3.
  cleaned = cleaned.replace(/^[\s]*[•·]\s+/gm, '') // 其他列表符号 • ·
  
  // 移除特殊符号
  cleaned = cleaned.replace(/[📌🔍💡⚠️✅❌🎯📝📊📈📉🔗]/g, '') // 移除emoji符号
  cleaned = cleaned.replace(/[【】《》「」『』〈〉]/g, '') // 移除中文括号
  cleaned = cleaned.replace(/[\[\](){}]/g, '') // 移除英文括号
  cleaned = cleaned.replace(/[#@$%^&*+=|\\~`]/g, '') // 移除特殊符号
  
  // 清理多余的空格和换行
  cleaned = cleaned.replace(/\s+/g, ' ') // 多个空格合并为一个
  cleaned = cleaned.replace(/\n\s*\n/g, '\n') // 多个换行合并为一个
  cleaned = cleaned.trim() // 去除首尾空格
  
  // 移除多余的标点符号
  cleaned = cleaned.replace(/[。，！？；：]{2,}/g, '。') // 多个标点合并为一个句号
  cleaned = cleaned.replace(/\s*[。，！？；：]\s*/g, '$&') // 标点符号前后只保留一个空格
  
  console.log('TTS文本清理:', { 原文: text.substring(0, 50) + '...', 清理后: cleaned.substring(0, 50) + '...' })
  
  return cleaned
}

// 获取当前角色信息
const fetchCurrentRole = async (roleId) => {
  if (!roleId) return
  try {
    const res = await fetch(`/api/roles/${roleId}`, { headers: authHeaders() })
    if (res.ok) {
      const data = await res.json()
      if (data.code === 0) {
        currentRole.value = data.data
      }
    }
  } catch (e) {
    console.error('获取角色信息失败', e)
  }
}
</script>

<template>
  <div class="chat-page">
    <div class="chat-container">
      <!-- 左侧会话列表 -->
      <aside class="conversation-sidebar" ref="sidebarRef">
        <div class="sidebar-header">
          <div class="header-content">
            <h2 class="sidebar-title">💬 我的会话</h2>
            <button class="refresh-btn" @click="fetchConversations(true)" :disabled="convLoading">
              <span class="refresh-icon" :class="{ spinning: convLoading }">🔄</span>
            </button>
          </div>
        </div>
        
        <div class="conversation-list" ref="convListRef">
          <div v-if="conversations.length === 0 && !convLoading" class="empty-conversations">
            <div class="empty-icon">💭</div>
            <div class="empty-text">还没有会话记录</div>
            <div class="empty-hint">去角色广场开始对话吧</div>
      </div>
          
          <div v-for="c in conversations" :key="c.id" 
               :class="['conversation-item', c.id===conversationId?'active':'']">
            <div class="conversation-main" @click="selectConversation(c)">
              <div class="conversation-avatar">
                <img v-if="c.roleAvatarUrl" :src="c.roleAvatarUrl" :alt="c.roleName" class="avatar-image" />
                <div v-else class="avatar-placeholder">{{ c.roleName?.charAt(0) || '🤖' }}</div>
      </div>
              
              <div class="conversation-content">
                <div class="conversation-header">
                  <h3 class="role-name">{{ c.roleName || '未知角色' }}</h3>
                  <span class="conversation-time">{{ formatTime(c.startTime) }}</span>
    </div>
                <p class="conversation-title">{{ c.conversationTitle || '新对话' }}</p>
                <div class="conversation-footer">
                  <span class="conversation-status" :class="c.isActive ? 'active' : 'ended'">
                    {{ c.isActive ? '进行中' : '已结束' }}
                  </span>
                </div>
              </div>
            </div>
            
            <div class="conversation-actions">
              <button class="action-btn delete-btn" @click.stop="deleteConversation(c)" title="删除会话">
                🗑️
              </button>
            </div>
          </div>
        </div>
        
        <div class="load-more" v-if="convHasMore && !convLoading">
          <button class="load-more-btn" @click="fetchConversations(false)">加载更多</button>
        </div>
      </aside>

      <!-- 右侧聊天区域 -->
      <main class="chat-main">
        <!-- 聊天头部 -->
        <header class="chat-header">
          <div class="chat-info">
            <div class="current-role" v-if="currentRole">
              <img v-if="currentRole.avatarUrl" :src="currentRole.avatarUrl" :alt="currentRole.name" class="role-avatar" />
              <div v-else class="role-avatar-placeholder">{{ currentRole.name?.charAt(0) || '🤖' }}</div>
              <div class="role-details">
                <h3 class="role-name">{{ currentRole.name }}</h3>
                <p class="role-description">{{ currentRole.description || 'AI助手' }}</p>
              </div>
            </div>
            <div v-else class="no-role">
              <h3>选择一个角色开始对话</h3>
            </div>
          </div>
          
          <div class="chat-controls" style="display: none;">
            <div class="voice-controls">
              <button v-if="!isRecording" @click="startRecord" class="control-btn voice-btn" :disabled="wsStatus!=='connected'">
                <span class="btn-icon">🎙️</span>
                <span>开始录音</span>
              </button>
              <button v-else @click="stopRecord" class="control-btn voice-btn recording">
                <span class="btn-icon">⏹️</span>
                <span>结束录音</span>
              </button>
              
              <button class="control-btn" :class="muted?'muted':''" @click="muted = !muted">
                <span class="btn-icon">{{ muted ? '🔇' : '🔊' }}</span>
                <span>{{ muted ? '已静音' : '自动播放' }}</span>
              </button>
              
              <button class="control-btn" @click="playNext" :disabled="ttsPlaying || ttsQueue.length===0">
                <span class="btn-icon">▶️</span>
                <span>播放下一个 ({{ ttsQueue.length }})</span>
              </button>
            </div>
          </div>
        </header>


        <!-- 聊天消息区域 -->
        <div class="chat-messages" ref="chatBoxRef">
          <div v-if="!conversationId" class="welcome-screen">
            <div class="welcome-content">
              <div class="welcome-icon">✨</div>
              <h2>欢迎使用AI对话</h2>
              <p>选择一个会话或开始新的对话</p>
            </div>
          </div>
          
          <!-- 通话界面 -->
          <div v-else-if="callState === 'connected'" class="call-interface">
            <!-- 通话头部 -->
            <div class="call-header">
              <div class="call-avatar-container">
                <div class="call-avatar-ring" :class="{ active: isVoiceActive }">
                  <div class="call-avatar">
                    <img v-if="currentRole?.avatarUrl" :src="currentRole.avatarUrl" :alt="currentRole.name" class="call-avatar-img" />
                    <div v-else class="call-avatar-placeholder">{{ currentRole?.name?.charAt(0) || '🤖' }}</div>
                  </div>
                </div>
              </div>
              
              <div class="call-info">
                <h2 class="call-name">{{ currentRole?.name || 'AI助手' }}</h2>
                <div class="call-status-container">
                  <p class="call-status" :class="duplexState">
                    <span class="status-dot"></span>
                    {{ getCallStatusText() }}
                  </p>
                  <div class="call-duration">{{ formatCallDuration(callDuration) }}</div>
                </div>
              </div>
            </div>
            
            <!-- 语音可视化 -->
            <div class="call-visualizer">
              <div class="voice-visualizer">
                <div v-for="i in 8" :key="i" 
                     class="voice-bar" 
                     :class="{ active: isVoiceActive || duplexState === 'ai_responding' }"
                     :style="{ 
                       animationDelay: (i * 0.1) + 's',
                       height: isVoiceActive ? (20 + Math.random() * 40) + 'px' : '20px'
                     }">
                </div>
              </div>
            </div>
            
            <!-- 通话控制按钮 -->
            <div class="call-controls">
              <button @click="toggleMute" class="call-control-btn mute-btn" :class="{ active: muted }">
                <div class="control-icon">
                  <span class="icon">{{ muted ? '🔇' : '🎤' }}</span>
                </div>
                <span class="control-label">{{ muted ? '取消静音' : '静音' }}</span>
              </button>
              
              <button @click="toggleVoiceSelector" class="call-control-btn voice-btn" :class="{ active: showVoiceSelector }">
                <div class="control-icon">
                  <span class="icon">🎵</span>
                </div>
                <span class="control-label">音色</span>
              </button>
              
              <button @click="hangupCall" class="call-control-btn hangup-btn">
                <div class="control-icon">
                  <span class="icon">📞</span>
                </div>
                <span class="control-label">挂断</span>
              </button>
              
              <button @click="toggleSpeaker" class="call-control-btn speaker-btn" :class="{ active: !muted }">
                <div class="control-icon">
                  <span class="icon">🔊</span>
                </div>
                <span class="control-label">扬声器</span>
              </button>
            </div>
            
            <!-- 音色选择器 -->
            <div v-if="showVoiceSelector" class="voice-selector">
              <div class="voice-selector-header">
                <h4>选择音色</h4>
                <button @click="showVoiceSelector = false" class="close-btn">×</button>
              </div>
              <div class="voice-list">
                <div v-if="voiceList.length === 0" class="empty-voice-list">
                  <p>暂无可用音色</p>
                </div>
                <div v-for="voice in voiceList" :key="voice.voice_type" 
                     class="voice-item" 
                     :class="{ selected: selectedVoice === voice.voice_type }"
                     @click="selectVoice(voice.voice_type)">
                  <div class="voice-info">
                    <span class="voice-name">{{ voice.voice_name || voice.voice_type }}</span>
                    <span class="voice-description">{{ voice.category || voice.voice_type }}</span>
                  </div>
                  <div class="voice-preview">
                    <button @click.stop="previewVoice(voice.voice_type)" class="preview-btn">试听</button>
                  </div>
                </div>
              </div>
            </div>
            
            <div class="call-quality">
              <div class="quality-grid">
                <div class="quality-card">
                  <div class="quality-icon">📊</div>
                  <div class="quality-content">
                    <div class="quality-label">丢包率</div>
                    <div class="quality-value" :class="getPacketLossClass()">{{ callQuality.packetLoss.toFixed(2) }}%</div>
                  </div>
                </div>
                <div class="quality-card">
                  <div class="quality-icon">⚡</div>
                  <div class="quality-content">
                    <div class="quality-label">延迟</div>
                    <div class="quality-value" :class="getLatencyClass(callQuality.latency)">{{ callQuality.latency.toFixed(0) }}ms</div>
                  </div>
                </div>
                <div class="quality-card">
                  <div class="quality-icon">🎵</div>
                  <div class="quality-content">
                    <div class="quality-label">音频强度</div>
                    <div class="quality-value audio-level">{{ Math.round(callQuality.audioLevel * 100) }}%</div>
                  </div>
                </div>
              </div>
            </div>
          </div>
          
          <div v-else-if="messages.length === 0" class="empty-chat">
            <div class="empty-content">
              <div class="empty-icon">💬</div>
              <h3>开始你的对话吧！</h3>
              <p>输入消息或使用语音功能与AI交流</p>
            </div>
          </div>
          
          <div v-else class="messages-container">
            <div v-for="m in messages" :key="m.id" :class="['message-row', m.isUserMessage ? 'user' : 'assistant']">
              <!-- AI消息头像 -->
              <div v-if="!m.isUserMessage" class="message-avatar assistant-avatar">
                <img v-if="currentRole?.avatarUrl" :src="currentRole.avatarUrl" :alt="currentRole.name" class="avatar-img" />
                <div v-else class="avatar-placeholder">🤖</div>
              </div>
              
              <!-- 用户消息头像 -->
              <div v-if="m.isUserMessage" class="message-avatar user-avatar">
                <img v-if="userAvatar" :src="userAvatar" :alt="userName" class="avatar-img" />
                <div v-else class="avatar-placeholder">{{ userName?.charAt(0) || '👤' }}</div>
              </div>
              
              <!-- 消息内容 -->
              <div class="message-content" :class="m.isUserMessage ? 'user-message' : 'assistant-message'">
                <div class="message-bubble" :class="m.isUserMessage ? 'user-bubble' : 'assistant-bubble'">
                  <div class="message-text">{{ m.content }}</div>
                  <button v-if="!m.isUserMessage" class="tts-button" @click="playTts(m.content, m.id, m.audioUrl)" :title="getPlayButtonTitle(m.id)">
                    <span class="tts-icon">{{ getPlayButtonIcon(m.id) }}</span>
                  </button>
                </div>
                <div class="message-timestamp">{{ m.timestamp || formatMessageTime(m.createdAt) }}</div>
              </div>
            </div>
          </div>
        </div>

        <!-- 输入区域 -->
        <div class="chat-input">
          <div class="input-wrapper">
            <input v-model="content" 
                   @keyup.enter="send" 
                   placeholder="输入消息..." 
                   class="message-input" 
                   :disabled="loading" />
            <!-- 语音通话按钮 -->
            <button v-if="callState === 'idle'" @click="startVoiceCall" class="voice-button call-button" :disabled="wsStatus!=='connected'">
              <span class="voice-icon">📞</span>
            </button>
            <button v-else-if="callState === 'calling'" @click="hangupCall" class="voice-button calling-button">
              <span class="voice-icon">📞</span>
            </button>
            <button v-else-if="callState === 'ringing'" @click="hangupCall" class="voice-button calling-button">
              <span class="voice-icon">📞</span>
            </button>
            <button v-else-if="callState === 'connected'" @click="hangupCall" class="voice-button connected-button">
              <span class="voice-icon">📞</span>
            </button>
            
            <!-- 语音录制按钮（通话中隐藏） -->
            <button v-if="callState === 'idle' && !isRecording" @click="startRecord" class="voice-button record-button" :disabled="wsStatus!=='connected'">
              <span class="voice-icon">🎙️</span>
            </button>
            <button v-else-if="callState === 'idle' && isRecording" @click="stopRecord" class="voice-button recording">
              <span class="voice-icon">⏹️</span>
            </button>
            <button @click="send" :disabled="loading || !content.trim()" class="send-button">
              <span v-if="loading" class="loading-spinner">⏳</span>
              <span v-else class="send-icon">📤</span>
            </button>
          </div>
        </div>
      </main>
    </div>
    
    <audio ref="remoteAudio" style="display:none"></audio>
  </div>
</template>

<style scoped>
/* 全局样式 */
.chat-page {
  height: 100%;
  background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
  overflow: hidden;
}

.chat-container {
  display: grid;
  grid-template-columns: 360px 1fr;
  height: 100%;
  background: white;
  border-radius: 0;
  box-shadow: 0 0 50px rgba(0, 0, 0, 0.1);
  overflow: hidden;
}

/* 左侧会话列表 */
.conversation-sidebar {
  background: linear-gradient(180deg, #f8fafc 0%, #e2e8f0 100%);
  border-right: 1px solid #e5e7eb;
  display: flex;
  flex-direction: column;
  overflow: hidden;
}

.sidebar-header {
  padding: 16px 16px 12px;
  border-bottom: 1px solid #e5e7eb;
  background: white;
}

.header-content {
  display: flex;
  align-items: center;
  justify-content: space-between;
}

.sidebar-title {
  font-size: 16px;
  font-weight: 700;
  color: #1f2937;
  margin: 0;
}

.refresh-btn {
  background: none;
  border: none;
  padding: 8px;
  border-radius: 8px;
  cursor: pointer;
  transition: all 0.2s;
  color: #6b7280;
}

.refresh-btn:hover {
  background: #f3f4f6;
  color: #374151;
}

.refresh-icon {
  font-size: 16px;
  transition: transform 0.3s;
}

.refresh-icon.spinning {
  animation: spin 1s linear infinite;
}

@keyframes spin {
  from { transform: rotate(0deg); }
  to { transform: rotate(360deg); }
}

.conversation-list {
  flex: 1;
  overflow-y: auto;
  padding: 12px 16px;
  display: flex;
  flex-direction: column;
  gap: 8px;
}

.empty-conversations {
  display: flex;
  flex-direction: column;
  align-items: center;
  justify-content: center;
  height: 200px;
  color: #6b7280;
  text-align: center;
}

.empty-conversations .empty-icon {
  font-size: 48px;
  margin-bottom: 16px;
  opacity: 0.6;
}

.empty-conversations .empty-text {
  font-size: 16px;
  font-weight: 600;
  color: #374151;
  margin-bottom: 8px;
}

.empty-conversations .empty-hint {
  font-size: 14px;
  color: #9ca3af;
}

.conversation-item {
  background: white;
  border-radius: 12px;
  padding: 12px;
  cursor: pointer;
  transition: all 0.3s ease;
  border: 2px solid transparent;
  box-shadow: 0 2px 8px rgba(0, 0, 0, 0.04);
  display: flex;
  align-items: center;
  gap: 12px;
}

.conversation-item:hover {
  transform: translateY(-2px);
  box-shadow: 0 8px 25px rgba(0, 0, 0, 0.1);
  border-color: #e5e7eb;
}

.conversation-item.active {
  border-color: #667eea;
  background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%);
  box-shadow: 0 8px 25px rgba(102, 126, 234, 0.15);
}

.conversation-main {
  flex: 1;
  display: flex;
  align-items: center;
  gap: 12px;
  cursor: pointer;
}

.conversation-actions {
  display: flex;
  align-items: center;
  gap: 8px;
  opacity: 0;
  transition: opacity 0.2s;
}

.conversation-item:hover .conversation-actions {
  opacity: 1;
}

.action-btn {
  width: 32px;
  height: 32px;
  border: none;
  border-radius: 6px;
  cursor: pointer;
  display: flex;
  align-items: center;
  justify-content: center;
  font-size: 14px;
  transition: all 0.2s;
}

.delete-btn {
  background: #fef2f2;
  color: #dc2626;
}

.delete-btn:hover {
  background: #fee2e2;
  transform: scale(1.1);
}

.conversation-avatar {
  width: 36px;
  height: 36px;
  border-radius: 50%;
  overflow: hidden;
  margin-bottom: 8px;
  background: linear-gradient(135deg, #f093fb, #f5576c);
  display: flex;
  align-items: center;
  justify-content: center;
}

.avatar-image {
  width: 100%;
  height: 100%;
  object-fit: cover;
}

.avatar-placeholder {
  font-size: 20px;
  color: white;
  font-weight: 600;
}

.conversation-content {
  flex: 1;
}

.conversation-header {
  display: flex;
  justify-content: space-between;
  align-items: flex-start;
  margin-bottom: 6px;
}

.role-name {
  font-size: 14px;
  font-weight: 600;
  color: #1f2937;
  margin: 0;
  line-height: 1.2;
}

.conversation-time {
  font-size: 12px;
  color: #9ca3af;
  white-space: nowrap;
}

.conversation-title {
  font-size: 12px;
  color: #6b7280;
  margin: 0 0 8px 0;
  line-height: 1.4;
  display: -webkit-box;
  -webkit-line-clamp: 2;
  -webkit-box-orient: vertical;
  overflow: hidden;
}

.conversation-footer {
  display: flex;
  justify-content: flex-end;
}

.conversation-status {
  padding: 4px 8px;
  border-radius: 12px;
  font-size: 11px;
  font-weight: 500;
}

.conversation-status.active {
  background: #dcfce7;
  color: #166534;
}

.conversation-status.ended {
  background: #f3f4f6;
  color: #6b7280;
}

.load-more {
  padding: 12px 16px;
  text-align: center;
  border-top: 1px solid #e5e7eb;
  background: white;
}

.load-more-btn {
  background: linear-gradient(135deg, #667eea, #764ba2);
  color: white;
  border: none;
  padding: 8px 16px;
  border-radius: 20px;
  font-size: 14px;
  cursor: pointer;
  transition: all 0.2s;
}

.load-more-btn:hover {
  transform: translateY(-1px);
  box-shadow: 0 4px 12px rgba(102, 126, 234, 0.3);
}

/* 右侧聊天区域 */
.chat-main {
  display: flex;
  flex-direction: column;
  background: white;
  overflow: hidden;
}

.chat-header {
  padding: 16px 32px;
  border-bottom: 1px solid #e5e7eb;
  background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%);
  display: flex;
  justify-content: space-between;
  align-items: center;
  flex-shrink: 0;
}

.chat-info {
  display: flex;
  align-items: center;
  gap: 16px;
}

.current-role {
  display: flex;
  align-items: center;
  gap: 12px;
}

.role-avatar {
  width: 48px;
  height: 48px;
  border-radius: 50%;
  object-fit: cover;
  border: 3px solid white;
  box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1);
}

.role-avatar-placeholder {
  width: 48px;
  height: 48px;
  border-radius: 50%;
  background: linear-gradient(135deg, #f093fb, #f5576c);
  display: flex;
  align-items: center;
  justify-content: center;
  color: white;
  font-size: 20px;
  font-weight: 600;
  border: 3px solid white;
  box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1);
}

.role-details h3 {
  font-size: 18px;
  font-weight: 600;
  color: #1f2937;
  margin: 0 0 4px 0;
}

.role-details p {
  font-size: 14px;
  color: #6b7280;
  margin: 0;
}

.no-role h3 {
  font-size: 18px;
  color: #6b7280;
  margin: 0;
}

.chat-controls {
  display: flex;
  gap: 12px;
}

.voice-controls {
  display: flex;
  gap: 8px;
}

.control-btn {
  display: flex;
  align-items: center;
  gap: 6px;
  padding: 8px 16px;
  background: white;
  border: 1px solid #e5e7eb;
  border-radius: 20px;
  font-size: 14px;
  cursor: pointer;
  transition: all 0.2s;
  color: #374151;
}

.control-btn:hover {
  background: #f9fafb;
  border-color: #d1d5db;
}

.control-btn:disabled {
  opacity: 0.5;
  cursor: not-allowed;
}

.control-btn.voice-btn {
  background: linear-gradient(135deg, #10b981, #059669);
  color: white;
  border-color: #10b981;
}

.control-btn.voice-btn.recording {
  background: linear-gradient(135deg, #ef4444, #dc2626);
  border-color: #ef4444;
  animation: pulse 1.5s infinite;
}

.control-btn.muted {
  background: linear-gradient(135deg, #6b7280, #4b5563);
  color: white;
  border-color: #6b7280;
}

@keyframes pulse {
  0%, 100% { opacity: 1; }
  50% { opacity: 0.7; }
}

.btn-icon {
  font-size: 16px;
}

/* 状态栏 */
.status-bar {
  display: flex;
  gap: 16px;
  padding: 8px 32px;
  background: #f9fafb;
  border-bottom: 1px solid #e5e7eb;
  flex-shrink: 0;
}

.status-item {
  display: flex;
  align-items: center;
  gap: 6px;
  font-size: 12px;
  color: #6b7280;
}

.status-dot {
  width: 8px;
  height: 8px;
  border-radius: 50%;
  background: #d1d5db;
}

.status-item.connected .status-dot {
  background: #10b981;
}

.status-item.disconnected .status-dot {
  background: #ef4444;
}

.status-item.processing .status-dot {
  background: #3b82f6;
  animation: pulse 1.5s infinite;
}

.status-item.playing .status-dot {
  background: #8b5cf6;
  animation: pulse 1.5s infinite;
}

.status-item.listening .status-dot {
  background: #10b981;
  animation: pulse 2s ease-in-out infinite;
}

.status-item.speaking .status-dot {
  background: #f59e0b;
  animation: pulse 0.8s ease-in-out infinite;
}

.status-item.user-speaking .status-dot {
  background: #3b82f6;
  animation: pulse 1s ease-in-out infinite;
}

.status-item.ai-responding .status-dot {
  background: #8b5cf6;
  animation: pulse 1.2s ease-in-out infinite;
}

.status-item.both-active .status-dot {
  background: #ef4444;
  animation: pulse 0.6s ease-in-out infinite;
}

.status-item.queue-status .status-dot {
  background: #10b981;
  animation: pulse 1.5s ease-in-out infinite;
}

/* 聊天消息区域 */
.chat-messages {
  flex: 1;
  overflow-y: auto;
  padding: 16px 32px;
  background: linear-gradient(180deg, #ffffff 0%, #fafafa 100%);
  min-height: 0;
}

.welcome-screen, .empty-chat {
  display: flex;
  align-items: center;
  justify-content: center;
  height: 100%;
  text-align: center;
}

.welcome-content, .empty-content {
  max-width: 400px;
}

.welcome-icon, .empty-icon {
  font-size: 64px;
  margin-bottom: 24px;
  opacity: 0.6;
}

.welcome-content h2, .empty-content h3 {
  font-size: 24px;
  font-weight: 600;
  color: #1f2937;
  margin: 0 0 12px 0;
}

.welcome-content p, .empty-content p {
  font-size: 16px;
  color: #6b7280;
  margin: 0;
}

.messages-container {
  display: flex;
  flex-direction: column;
  gap: 24px;
}

.message-row {
  display: flex;
  align-items: flex-start;
  gap: 16px;
}

.message-row.user {
  flex-direction: row-reverse;
}

.message-avatar {
  width: 40px;
  height: 40px;
  border-radius: 50%;
  flex-shrink: 0;
  display: flex;
  align-items: center;
  justify-content: center;
  border: 2px solid white;
  box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
}

.assistant-avatar {
  background: linear-gradient(135deg, #f093fb, #f5576c);
}

.user-avatar {
  background: linear-gradient(135deg, #667eea, #764ba2);
}

.message-avatar .avatar-img {
  width: 100%;
  height: 100%;
  border-radius: 50%;
  object-fit: cover;
}

.message-avatar .avatar-placeholder {
  color: white;
  font-size: 18px;
  font-weight: 600;
}

.message-content {
  max-width: 70%;
  min-width: 200px;
  display: flex;
  flex-direction: column;
  align-items: flex-start;
}

.message-content.user-message {
  align-items: flex-end;
}

.message-content.assistant-message {
  align-items: flex-start;
}

.message-bubble {
  padding: 16px 20px;
  border-radius: 20px;
  position: relative;
  box-shadow: 0 2px 12px rgba(0, 0, 0, 0.08);
  word-wrap: break-word;
  max-width: fit-content;
  width: auto;
}

.assistant-bubble {
  background: white;
  border: 1px solid #e5e7eb;
  color: #1f2937;
}

.user-bubble {
  background: linear-gradient(135deg, #667eea, #764ba2);
  color: white;
}

.message-text {
  font-size: 15px;
  line-height: 1.6;
  margin: 0;
}

.tts-button {
  position: absolute;
  top: 8px;
  right: 8px;
  background: rgba(255, 255, 255, 0.9);
  border: none;
  border-radius: 50%;
  width: 28px;
  height: 28px;
  display: flex;
  align-items: center;
  justify-content: center;
  cursor: pointer;
  transition: all 0.2s;
  opacity: 0;
}

.message-bubble:hover .tts-button {
  opacity: 1;
}

.tts-button:hover {
  background: white;
  transform: scale(1.1);
}

.tts-icon {
  font-size: 12px;
}

.message-timestamp {
  font-size: 11px;
  color: #9ca3af;
  margin-top: 4px;
  padding: 0 8px;
}

/* 输入区域 */
.chat-input {
  padding: 12px 32px;
  border-top: 1px solid #e5e7eb;
  background: white;
  flex-shrink: 0;
}

.input-wrapper {
  display: flex;
  gap: 8px;
  align-items: center;
}

.message-input {
  flex: 1;
  padding: 12px 16px;
  border: 2px solid #e5e7eb;
  border-radius: 25px;
  font-size: 15px;
  outline: none;
  transition: all 0.2s;
  background: #f9fafb;
}

.message-input:focus {
  border-color: #667eea;
  background: white;
  box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.1);
}

.send-button {
  width: 40px;
  height: 40px;
  border-radius: 50%;
  border: none;
  background: linear-gradient(135deg, #667eea, #764ba2);
  color: white;
  cursor: pointer;
  transition: all 0.2s;
  display: flex;
  align-items: center;
  justify-content: center;
  box-shadow: 0 4px 12px rgba(102, 126, 234, 0.3);
}

.send-button:hover:not(:disabled) {
  transform: translateY(-2px);
  box-shadow: 0 6px 20px rgba(102, 126, 234, 0.4);
}

.send-button:disabled {
  opacity: 0.5;
  cursor: not-allowed;
  transform: none;
}

.send-icon, .loading-spinner {
  font-size: 16px;
}

.voice-input {
  display: flex;
  justify-content: center;
}

.voice-button {
  width: 40px;
  height: 40px;
  border-radius: 50%;
  background: linear-gradient(135deg, #10b981, #059669);
  color: white;
  border: none;
  cursor: pointer;
  transition: all 0.2s;
  display: flex;
  align-items: center;
  justify-content: center;
  box-shadow: 0 4px 12px rgba(16, 185, 129, 0.3);
}

.voice-button:hover:not(:disabled) {
  transform: translateY(-2px);
  box-shadow: 0 6px 20px rgba(16, 185, 129, 0.4);
}

.voice-button.recording {
  background: linear-gradient(135deg, #ef4444, #dc2626);
  box-shadow: 0 4px 12px rgba(239, 68, 68, 0.3);
  animation: pulse 1.5s infinite;
}

.voice-button:disabled {
  opacity: 0.5;
  cursor: not-allowed;
  transform: none;
}

/* 通话按钮样式 */
.voice-button.call-button {
  background: linear-gradient(135deg, #3b82f6, #1d4ed8);
  box-shadow: 0 4px 12px rgba(59, 130, 246, 0.3);
}

.voice-button.calling-button {
  background: linear-gradient(135deg, #f59e0b, #d97706);
  box-shadow: 0 4px 12px rgba(245, 158, 11, 0.3);
  animation: pulse 1.5s infinite;
}

.voice-button.answer-button {
  background: linear-gradient(135deg, #10b981, #059669);
  box-shadow: 0 4px 12px rgba(16, 185, 129, 0.3);
  animation: pulse 1.5s infinite;
}

.voice-button.connected-button {
  background: linear-gradient(135deg, #ef4444, #dc2626);
  box-shadow: 0 4px 12px rgba(239, 68, 68, 0.3);
}

/* 通话质量指示器样式 */
.call-quality {
  display: flex;
  gap: 8px;
  margin-top: 4px;
  font-size: 12px;
}

.quality-item {
  padding: 2px 6px;
  border-radius: 4px;
  font-weight: 500;
  white-space: nowrap;
}

.quality-excellent {
  background: rgba(16, 185, 129, 0.2);
  color: #059669;
}

.quality-good {
  background: rgba(59, 130, 246, 0.2);
  color: #2563eb;
}

.quality-fair {
  background: rgba(245, 158, 11, 0.2);
  color: #d97706;
}

.quality-poor {
  background: rgba(239, 68, 68, 0.2);
  color: #dc2626;
}

.voice-button.record-button {
  background: linear-gradient(135deg, #8b5cf6, #7c3aed);
  box-shadow: 0 4px 12px rgba(139, 92, 246, 0.3);
}

/* 通话状态样式 */
.status-item.calling .status-dot {
  background: #f59e0b;
  animation: pulse 1.5s infinite;
}

.status-item.ringing .status-dot {
  background: #10b981;
  animation: pulse 1.5s infinite;
}

.status-item.connected .status-dot {
  background: #3b82f6;
  animation: pulse 1.5s infinite;
}

.status-item.streaming .status-dot {
  background: #8b5cf6;
  animation: pulse 1.5s infinite;
}

.voice-icon {
  font-size: 18px;
}

/* 响应式设计 */
@media (max-width: 768px) {
  .chat-container {
    grid-template-columns: 1fr;
  }
  
  .conversation-sidebar {
    display: none;
  }
  
  .chat-header {
    padding: 16px 20px;
  }
  
  .chat-messages {
    padding: 16px 20px;
  }
  
  .chat-input {
    padding: 16px 20px;
  }
}

/* 通话界面样式 */
.call-interface {
  display: flex;
  flex-direction: column;
  align-items: center;
  justify-content: space-between;
  height: 60vh;
  padding: 20px;
  background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
  color: white;
  position: relative;
  overflow: hidden;
}

.call-interface::before {
  content: '';
  position: absolute;
  top: 0;
  left: 0;
  right: 0;
  bottom: 0;
  background: radial-gradient(circle at 30% 20%, rgba(255, 255, 255, 0.1) 0%, transparent 50%),
              radial-gradient(circle at 70% 80%, rgba(255, 255, 255, 0.05) 0%, transparent 50%);
  pointer-events: none;
}

.call-header {
  text-align: center;
  margin-bottom: 20px;
  position: relative;
  z-index: 1;
  flex-shrink: 0;
}

.call-avatar-container {
  position: relative;
  margin-bottom: 15px;
  flex-shrink: 0;
}

.call-avatar-ring {
  position: relative;
  width: 100px;
  height: 100px;
  margin: 0 auto;
  border-radius: 50%;
  background: linear-gradient(45deg, rgba(255, 255, 255, 0.2), rgba(255, 255, 255, 0.1));
  display: flex;
  align-items: center;
  justify-content: center;
  transition: all 0.3s ease;
}

.call-avatar-ring.active {
  animation: pulse-ring 2s infinite;
  background: linear-gradient(45deg, rgba(76, 175, 80, 0.3), rgba(139, 195, 74, 0.2));
}

.call-avatar {
  width: 80px;
  height: 80px;
  border-radius: 50%;
  overflow: hidden;
  border: 2px solid rgba(255, 255, 255, 0.4);
  box-shadow: 0 6px 20px rgba(0, 0, 0, 0.3);
  background: rgba(255, 255, 255, 0.1);
}

.call-avatar-img {
  width: 100%;
  height: 100%;
  object-fit: cover;
}

.call-avatar-placeholder {
  width: 100%;
  height: 100%;
  display: flex;
  align-items: center;
  justify-content: center;
  font-size: 32px;
  font-weight: 600;
  background: linear-gradient(135deg, rgba(255, 255, 255, 0.2), rgba(255, 255, 255, 0.1));
}

.call-info {
  text-align: center;
  margin-bottom: 15px;
  flex-shrink: 0;
}

.call-name {
  font-size: 24px;
  margin: 0 0 8px 0;
  font-weight: 700;
  text-shadow: 0 2px 10px rgba(0, 0, 0, 0.3);
  letter-spacing: -0.5px;
  color: #fff;
}

.call-status-container {
  display: flex;
  flex-direction: column;
  align-items: center;
  gap: 8px;
}

.call-status {
  display: flex;
  align-items: center;
  justify-content: center;
  gap: 8px;
  font-size: 16px;
  margin: 0;
  font-weight: 600;
  padding: 8px 16px;
  background: rgba(255, 255, 255, 0.1);
  border-radius: 20px;
  backdrop-filter: blur(10px);
  border: 1px solid rgba(255, 255, 255, 0.2);
  transition: all 0.3s ease;
}

.call-status.user_speaking {
  color: #4CAF50;
  background: rgba(76, 175, 80, 0.2);
  border-color: rgba(76, 175, 80, 0.3);
}

.call-status.ai_responding {
  color: #FF9800;
  background: rgba(255, 152, 0, 0.2);
  border-color: rgba(255, 152, 0, 0.3);
}

.call-status.both_active {
  color: #E91E63;
  background: rgba(233, 30, 99, 0.2);
  border-color: rgba(233, 30, 99, 0.3);
}

.status-dot {
  width: 8px;
  height: 8px;
  border-radius: 50%;
  background: currentColor;
  animation: status-pulse 2s infinite;
  box-shadow: 0 0 8px currentColor;
}

.call-duration {
  font-size: 16px;
  font-family: 'SF Mono', 'Monaco', 'Inconsolata', 'Roboto Mono', monospace;
  font-weight: 600;
  color: rgba(255, 255, 255, 0.9);
  background: rgba(255, 255, 255, 0.1);
  padding: 6px 12px;
  border-radius: 12px;
  backdrop-filter: blur(10px);
  border: 1px solid rgba(255, 255, 255, 0.2);
  letter-spacing: 0.5px;
}

.call-visualizer {
  margin-bottom: 20px;
  position: relative;
  z-index: 1;
  flex-shrink: 0;
}

.voice-visualizer {
  display: flex;
  align-items: center;
  justify-content: center;
  gap: 4px;
  height: 60px;
  padding: 10px;
}

.voice-bar {
  width: 8px;
  height: 20px;
  background: rgba(255, 255, 255, 0.3);
  border-radius: 4px;
  transition: all 0.3s ease;
  position: relative;
}

.voice-bar.active {
  background: linear-gradient(to top, #4CAF50, #8BC34A);
  animation: voicePulse 0.8s ease-in-out infinite alternate;
  box-shadow: 0 0 20px rgba(76, 175, 80, 0.5);
}

@keyframes pulse-ring {
  0% {
    transform: scale(1);
    opacity: 1;
  }
  100% {
    transform: scale(1.1);
    opacity: 0;
  }
}

@keyframes status-pulse {
  0%, 100% {
    opacity: 1;
  }
  50% {
    opacity: 0.5;
  }
}

@keyframes voicePulse {
  0% { 
    height: 20px;
    transform: scaleY(1);
  }
  100% { 
    height: 80px;
    transform: scaleY(1.2);
  }
}

.call-controls {
  display: flex;
  gap: 24px;
  position: relative;
  z-index: 1;
  justify-content: center;
  margin-top: 20px;
  padding: 20px;
  background: rgba(255, 255, 255, 0.05);
  border-radius: 20px;
  backdrop-filter: blur(10px);
  border: 1px solid rgba(255, 255, 255, 0.1);
  flex-shrink: 0;
}

.call-control-btn {
  display: flex;
  flex-direction: column;
  align-items: center;
  gap: 10px;
  padding: 16px 20px;
  background: rgba(255, 255, 255, 0.1);
  border: 1px solid rgba(255, 255, 255, 0.2);
  border-radius: 20px;
  color: white;
  cursor: pointer;
  transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
  backdrop-filter: blur(20px);
  min-width: 80px;
  position: relative;
  overflow: hidden;
  box-shadow: 0 4px 15px rgba(0, 0, 0, 0.1);
}

.call-control-btn::before {
  content: '';
  position: absolute;
  top: 0;
  left: -100%;
  width: 100%;
  height: 100%;
  background: linear-gradient(90deg, transparent, rgba(255, 255, 255, 0.2), transparent);
  transition: left 0.5s ease;
}

.call-control-btn:hover::before {
  left: 100%;
}

.call-control-btn:hover {
  transform: translateY(-2px);
  background: rgba(255, 255, 255, 0.25);
  border-color: rgba(255, 255, 255, 0.4);
  box-shadow: 0 8px 25px rgba(0, 0, 0, 0.2);
}

.call-control-btn.active {
  background: rgba(76, 175, 80, 0.3);
  border-color: rgba(76, 175, 80, 0.6);
  box-shadow: 0 0 20px rgba(76, 175, 80, 0.3);
}

.call-control-btn.hangup-btn {
  background: rgba(244, 67, 54, 0.3);
  border-color: rgba(244, 67, 54, 0.6);
}

.call-control-btn.hangup-btn:hover {
  background: rgba(244, 67, 54, 0.4);
  border-color: rgba(244, 67, 54, 0.8);
}

.control-icon {
  width: 44px;
  height: 44px;
  display: flex;
  align-items: center;
  justify-content: center;
  border-radius: 50%;
  background: rgba(255, 255, 255, 0.15);
  transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
  border: 1px solid rgba(255, 255, 255, 0.2);
}

.call-control-btn:hover .control-icon {
  background: rgba(255, 255, 255, 0.25);
  transform: scale(1.1);
  border-color: rgba(255, 255, 255, 0.4);
  box-shadow: 0 0 20px rgba(255, 255, 255, 0.2);
}

.control-icon .icon {
  font-size: 20px;
  display: block;
  filter: drop-shadow(0 2px 4px rgba(0, 0, 0, 0.2));
}

.control-label {
  font-size: 13px;
  font-weight: 600;
  text-align: center;
  white-space: nowrap;
  letter-spacing: 0.3px;
  text-shadow: 0 1px 2px rgba(0, 0, 0, 0.3);
}





.call-quality {
  display: flex;
  flex-direction: column;
  gap: 8px;
  font-size: 12px;
  opacity: 0.8;
}

.quality-grid {
  display: grid;
  grid-template-columns: repeat(3, 1fr);
  gap: 12px;
  margin-top: 16px;
  flex-shrink: 0;
}

@media (max-width: 768px) {
  .quality-grid {
    grid-template-columns: 1fr;
    gap: 8px;
  }
  
  .call-controls {
    gap: 16px;
    padding: 16px;
  }
  
  .call-control-btn {
    min-width: 70px;
    padding: 12px 16px;
  }
  
  .control-icon {
    width: 40px;
    height: 40px;
  }
  
  .control-icon .icon {
    font-size: 18px;
  }
  
  .control-label {
    font-size: 12px;
  }
}

.quality-card {
  background: rgba(255, 255, 255, 0.1);
  backdrop-filter: blur(10px);
  border: 1px solid rgba(255, 255, 255, 0.2);
  border-radius: 12px;
  padding: 12px;
  display: flex;
  align-items: center;
  gap: 8px;
  transition: all 0.3s ease;
}

.quality-card:hover {
  background: rgba(255, 255, 255, 0.15);
  transform: translateY(-2px);
}

.quality-icon {
  font-size: 20px;
  width: 32px;
  height: 32px;
  display: flex;
  align-items: center;
  justify-content: center;
  background: rgba(255, 255, 255, 0.1);
  border-radius: 8px;
  flex-shrink: 0;
}

.quality-content {
  flex: 1;
  min-width: 0;
}

.quality-label {
  font-size: 11px;
  color: rgba(255, 255, 255, 0.7);
  font-weight: 500;
  margin-bottom: 2px;
  white-space: nowrap;
  overflow: hidden;
  text-overflow: ellipsis;
}

.quality-value {
  font-size: 14px;
  font-weight: 700;
  font-family: 'SF Mono', 'Monaco', 'Inconsolata', 'Roboto Mono', monospace;
  line-height: 1.2;
}

.quality-value.audio-level {
  color: #64B5F6;
}

.quality-excellent {
  color: #4CAF50 !important;
}

.quality-good {
  color: #8BC34A !important;
}

.quality-fair {
  color: #FF9800 !important;
}

.quality-poor {
  color: #F44336 !important;
}

.status-connected {
  color: #4ade80;
}

.status-connecting {
  color: #fbbf24;
}

.status-error {
  color: #ef4444;
}

.status-unknown {
  color: #9ca3af;
}

/* 音色选择器样式 */
.voice-selector {
  position: absolute;
  top: 50%;
  left: 50%;
  transform: translate(-50%, -50%);
  background: rgba(255, 255, 255, 0.95);
  color: #333;
  border-radius: 12px;
  padding: 20px;
  min-width: 400px;
  max-width: 500px;
  max-height: 60vh;
  overflow-y: auto;
  box-shadow: 0 10px 30px rgba(0, 0, 0, 0.3);
  z-index: 1000;
}

.voice-selector-header {
  display: flex;
  justify-content: space-between;
  align-items: center;
  margin-bottom: 20px;
  padding-bottom: 10px;
  border-bottom: 1px solid #eee;
}

.voice-selector-header h4 {
  margin: 0;
  color: #333;
  font-size: 18px;
}

.close-btn {
  background: none;
  border: none;
  font-size: 24px;
  color: #666;
  cursor: pointer;
  padding: 0;
  width: 30px;
  height: 30px;
  display: flex;
  align-items: center;
  justify-content: center;
  border-radius: 50%;
  transition: background-color 0.2s;
}

.close-btn:hover {
  background-color: #f0f0f0;
}

.empty-voice-list {
  text-align: center;
  padding: 20px;
  color: #666;
}

.empty-voice-list p {
  margin: 0;
  font-size: 14px;
}

.voice-list {
  display: flex;
  flex-direction: column;
  gap: 10px;
}

.voice-item {
  display: flex;
  justify-content: space-between;
  align-items: center;
  padding: 12px 16px;
  border: 2px solid #eee;
  border-radius: 8px;
  cursor: pointer;
  transition: all 0.2s;
}

.voice-item:hover {
  border-color: #667eea;
  background-color: #f8f9ff;
}

.voice-item.selected {
  border-color: #667eea;
  background-color: #667eea;
  color: white;
}

.voice-info {
  display: flex;
  flex-direction: column;
  gap: 4px;
}

.voice-name {
  font-weight: 600;
  font-size: 14px;
}

.voice-description {
  font-size: 12px;
  opacity: 0.7;
}

.preview-btn {
  background: #667eea;
  color: white;
  border: none;
  padding: 6px 12px;
  border-radius: 6px;
  font-size: 12px;
  cursor: pointer;
  transition: background-color 0.2s;
}

.preview-btn:hover {
  background: #5a6fd8;
}

.voice-item.selected .preview-btn {
  background: white;
  color: #667eea;
}

.voice-item.selected .preview-btn:hover {
  background: #f0f0f0;
}
</style>


