<template>
  <div class="voice-chat-container">
    <!-- 头部 -->
    <header class="chat-header">
      <h1>🎤 AI语音助手</h1>
      <div class="connection-status">
        <span :class="['status-indicator', connectionStatus]"></span>
        <span class="status-text">{{ connectionStatusText }}</span>
      </div>
    </header>

    <!-- 聊天消息区域 -->
    <div class="chat-messages" ref="messagesContainer">
      <div
        v-for="message in messages"
        :key="message.id"
        :class="['message', message.type]"
      >
        <div class="message-content">
          <div class="message-text">{{ message.text }}</div>
          <div class="message-time">{{ formatTime(message.timestamp) }}</div>
          
          <!-- 音频播放器 -->
          <div v-if="message.audioBase64" class="audio-player">
            <button
              @click="playAudio(message.audioBase64 || '')"
              :disabled="audioPlayer.state.value.isLoading"
              class="play-button"
            >
              <span v-if="audioPlayer.state.value.isLoading">⏳</span>
              <span v-else-if="audioPlayer.state.value.isPlaying">⏸️</span>
              <span v-else>▶️</span>
            </button>
            <div class="audio-info">
              <span class="audio-duration">{{ audioPlayer.durationFormatted.value }}</span>
              <div class="audio-progress">
                <div 
                  class="progress-bar"
                  :style="{ width: audioPlayer.progress.value + '%' }"
                ></div>
              </div>
            </div>
          </div>
        </div>
      </div>

      <!-- 加载状态 -->
      <div v-if="isProcessing" class="message ai processing">
        <div class="message-content">
          <div class="typing-indicator">
            <span></span>
            <span></span>
            <span></span>
          </div>
          <div class="message-text">正在处理中...</div>
        </div>
      </div>
    </div>

    <!-- 输入区域 -->
    <div class="chat-input">
      <!-- 录音控制 -->
      <div class="recording-controls">
        <!-- 智能语音通话按钮 -->
        <button
          @click="toggleSmartCall"
          :disabled="isProcessing && !isSmartCallActive"
          :class="['smart-call-button', { 
            'active': isSmartCallActive, 
            'connecting': isConnecting,
            'disabled': !isConnected 
          }]"
          :title="!isConnected ? '请先连接WebSocket' : '智能语音通话 - 自然多轮对话'"
        >
          <span v-if="isConnecting">🔄</span>
          <span v-else-if="isSmartCallActive">📴</span>
          <span v-else>🎯</span>
          <span class="button-text">{{ smartCallButtonText }}</span>
        </button>

        <!-- 发送语音按钮 (仅在智能通话模式下显示) -->
        <button
          v-if="isSmartCallActive"
          @click="sendCurrentSpeech"
          class="send-speech-button"
          title="发送当前录制的语音"
        >
          <span>📤</span>
          <span class="button-text">发送语音</span>
        </button>

        <!-- 通话状态显示 -->
        <div v-if="isSmartCallActive" class="call-status">
          <div class="call-info">
            <div class="call-duration">⏱️ {{ callDuration }}</div>
            <div class="call-stats">
              <span class="stat-item">📤 {{ audioQualityStats.sentChunks }}</span>
              <span class="stat-item">📥 {{ audioQualityStats.receivedResponses }}</span>
              <span class="stat-item">⚡ {{ audioQualityStats.avgResponseTime }}ms</span>
              <span class="stat-item" :class="{ 'error': audioQualityStats.errors > 0 }">
                ❌ {{ audioQualityStats.errors }}
              </span>
            </div>
          </div>
        </div>

        <!-- 录音按钮 -->
        <button
          @click="toggleRecording"
          :disabled="!recorder.state.value.isSupported || isPhoneCallActive"
          :class="['record-button', { 
            recording: recorder.state.value.isRecording,
            disabled: isPhoneCallActive 
          }]"
        >
          <span v-if="recorder.state.value.isRecording">⏹️</span>
          <span v-else>🎤</span>
          <span class="button-text">录音消息</span>
        </button>
        
        <div v-if="recorder.state.value.isRecording" class="recording-info">
          <span class="recording-duration">{{ recorder.formatDuration.value }}</span>
          <div class="recording-indicator">
            <div class="pulse"></div>
          </div>
        </div>
        
        <div v-if="recorder.state.value.error" class="error-message">
          {{ recorder.state.value.error }}
        </div>
      </div>

      <!-- 文本输入（测试用） -->
      <div class="text-input-section">
        <input
          v-model="textInput"
          @keyup.enter="sendTextMessage"
          placeholder="输入文本测试（可选）"
          class="text-input"
        />
        <button @click="sendTextMessage" :disabled="!textInput.trim()" class="send-button">
          发送
        </button>
      </div>

      <!-- 连接控制 -->
      <div class="connection-controls">
        <button
          @click="toggleConnection"
          :class="['connection-button', connectionStatus]"
        >
          {{ isConnected ? '断开连接' : '连接WebSocket' }}
        </button>
        <button @click="clearMessages" class="clear-button">
          清空聊天
        </button>
      </div>
    </div>
  </div>
</template>

<script setup lang="ts">
import { ref, onMounted, onUnmounted, nextTick, computed } from 'vue'
import { useAudioRecorder } from '../composables/useAudioRecorder'
import { useAudioPlayer } from '../composables/useAudioPlayer'
import { useVoiceActivityDetection } from '../composables/useVoiceActivityDetection'
import { voiceChatApi, VoiceChatWebSocket, type VoiceChatRequest, type VoiceChatResponse } from '../services/api'

// 消息类型
interface ChatMessage {
  id: string
  type: 'user' | 'ai'
  text: string
  audioBase64?: string
  timestamp: Date
  processingTime?: number
}

// 响应式数据
const messages = ref<ChatMessage[]>([])
const textInput = ref('')
const isProcessing = ref(false)
const messagesContainer = ref<HTMLElement>()
const sessionId = ref<string>()

// 智能通话相关
const isSmartCallActive = ref(false)
const isConnecting = ref(false)
const phoneCallRecorder = ref<MediaRecorder | null>(null)
const phoneCallStream = ref<MediaStream | null>(null)
const currentCallAudio = ref<HTMLAudioElement | null>(null)
const callStartTime = ref<Date | null>(null)
const callDuration = ref('')
const audioQualityStats = ref({
  sentChunks: 0,
  receivedResponses: 0,
  avgResponseTime: 0,
  errors: 0
})

// 使用组合式函数
const recorder = useAudioRecorder()
const audioPlayer = useAudioPlayer()

// WebSocket连接
let wsClient: VoiceChatWebSocket | null = null
const isConnected = ref(false)
const connectionStatus = computed(() => {
  if (isConnected.value) return 'connected'
  return 'disconnected'
})
const connectionStatusText = computed(() => {
  if (isConnected.value) return '已连接'
  return '未连接'
})

const smartCallButtonText = computed(() => {
  if (!isConnected.value) return '需要连接'
  if (isConnecting.value) return '连接中...'
  if (isSmartCallActive.value) return `挂断 ${callDuration.value}`
  return '智能通话'
})

// 通话时长计算
const updateCallDuration = () => {
  if (callStartTime.value && isSmartCallActive.value) {
    const now = new Date()
    const diff = now.getTime() - callStartTime.value.getTime()
    const minutes = Math.floor(diff / 60000)
    const seconds = Math.floor((diff % 60000) / 1000)
    callDuration.value = `${minutes.toString().padStart(2, '0')}:${seconds.toString().padStart(2, '0')}`
  }
}

// 定时更新通话时长
let durationTimer: number | null = null

// 初始化WebSocket
const initWebSocket = async () => {
  try {
    wsClient = new VoiceChatWebSocket()
    
    // 监听连接事件
    wsClient.on('connected', () => {
      isConnected.value = true
      addSystemMessage('✅ WebSocket连接已建立')
      console.log('WebSocket连接成功')
    })
    
    wsClient.on('disconnected', () => {
      isConnected.value = false
      addSystemMessage('❌ WebSocket连接已断开')
      console.log('WebSocket连接断开')
      
      // 如果正在通话，自动结束通话
      if (isPhoneCallActive.value) {
        endPhoneCall()
        addSystemMessage('📴 连接断开，通话已自动结束')
      }
    })
    
    wsClient.on('error', (error: Error) => {
      console.error('WebSocket错误:', error)
      isConnected.value = false
      const errorMessage = error?.message || 'WebSocket连接失败'
      addSystemMessage('❌ WebSocket连接错误: ' + errorMessage)
    })
    
    // 监听所有消息
    wsClient.on('message', (data: unknown) => {
      console.log('收到WebSocket消息:', data)
    })
    
    // 监听语音响应
    wsClient.on('voice-response', (data: { data: VoiceChatResponse }) => {
      console.log('收到语音响应:', data)
      try {
        if (data.data.isRealtime && isPhoneCallActive.value) {
          handleRealtimeResponse(data.data)
        } else {
          handleVoiceResponse(data.data)
        }
      } catch (error) {
        console.error('处理语音响应失败:', error)
        audioQualityStats.value.errors++
        addSystemMessage('❌ 响应处理失败，请重试')
      }
    })
    
    // 监听流式消息
    wsClient.on('streaming_connected', (data: { content: string }) => {
      addSystemMessage('🌊 ' + data.content)
    })
    
    wsClient.on('streaming_recognition', (data: { content: string }) => {
      console.log('流式识别:', data.content)
      // 实时显示识别结果
      addSystemMessage('🎤 识别: ' + data.content)
    })
    
    wsClient.on('streaming_ai_thinking', (data: { content: string }) => {
      addSystemMessage('🤔 ' + data.content)
    })
    
    wsClient.on('streaming_ai_response', (data: { content: string }) => {
      console.log('流式AI回复:', data.content)
      // 添加AI回复消息
      const aiMessage: ChatMessage = {
        id: Date.now().toString(),
        type: 'ai',
        text: '🌊 ' + data.content,
        timestamp: new Date()
      }
      messages.value.push(aiMessage)
      scrollToBottom()
    })
    
    wsClient.on('streaming_audio', (data: { audioBase64: string, processingTime: number }) => {
      console.log('收到流式音频，处理时间:', data.processingTime, 'ms')
      if (data.audioBase64 && data.audioBase64.length > 0) {
        playRealtimeAudio(data.audioBase64)
      }
    })
    
    // 新增的VAD状态监听
    wsClient.on('streaming_waiting', (data: { content: string }) => {
      console.log('AI等待用户说话:', data.content)
      addSystemMessage('⏳ ' + data.content)
    })
    
    wsClient.on('streaming_listening', (data: { content: string }) => {
      console.log('正在听用户说话:', data.content)
      addSystemMessage('👂 ' + data.content)
    })
    
    wsClient.on('streaming_processing', (data: { content: string }) => {
      console.log('正在处理用户语音:', data.content)
      addSystemMessage('⚡ ' + data.content)
    })

    // 简化VAD事件监听
    wsClient.on('simple_vad_ready', (data: { content: string, round: number }) => {
      console.log('简化VAD就绪:', data.content, '轮数:', data.round)
      addSystemMessage('🎯 ' + data.content)
    })

    wsClient.on('simple_vad_recognizing', (data: { content: string, round: number }) => {
      console.log('正在识别:', data.content, '轮数:', data.round)
      addSystemMessage('🔍 ' + data.content)
    })

    wsClient.on('simple_vad_recognized', (data: { content: string, round: number }) => {
      console.log('识别完成:', data.content, '轮数:', data.round)
      const userMessage: ChatMessage = {
        id: Date.now().toString(),
        type: 'user',
        text: `🎤 [第${data.round}轮] ${data.content}`,
        timestamp: new Date()
      }
      messages.value.push(userMessage)
      scrollToBottom()
    })

    wsClient.on('simple_vad_thinking', (data: { content: string, round: number }) => {
      console.log('AI思考中:', data.content, '轮数:', data.round)
      addSystemMessage('🤔 ' + data.content)
    })

    wsClient.on('simple_vad_ai_response', (data: { content: string, round: number }) => {
      console.log('AI回复:', data.content, '轮数:', data.round)
      const aiMessage: ChatMessage = {
        id: Date.now().toString(),
        type: 'ai',
        text: `🤖 [第${data.round}轮] ${data.content}`,
        timestamp: new Date()
      }
      messages.value.push(aiMessage)
      scrollToBottom()
    })

    wsClient.on('simple_vad_generating_audio', (data: { content: string, round: number }) => {
      console.log('生成语音:', data.content, '轮数:', data.round)
      addSystemMessage('🎵 ' + data.content)
    })

    wsClient.on('simple_vad_audio', (data: { audioBase64: string, processingTime: number, round: number }) => {
      console.log('收到VAD音频，轮数:', data.round, '处理时间:', data.processingTime, 'ms')
      if (data.audioBase64 && data.audioBase64.length > 0) {
        playRealtimeAudio(data.audioBase64)
      }
      audioQualityStats.value.receivedResponses++
    })

    wsClient.on('simple_vad_no_speech', (data: { content: string, round: number }) => {
      console.log('未检测到语音:', data.content, '轮数:', data.round)
      addSystemMessage('❌ ' + data.content)
    })

    wsClient.on('simple_vad_error', (data: { content: string, round: number }) => {
      console.log('VAD错误:', data.content, '轮数:', data.round)
      addSystemMessage('⚠️ ' + data.content)
    })

    wsClient.on('simple_vad_ended', (data: { content: string }) => {
      console.log('智能通话会话结束:', data.content)
      addSystemMessage('📴 ' + data.content)
      isSmartCallActive.value = false
    })
    
    // 监听处理状态
    wsClient.on('processing', (data: { message: string }) => {
      addSystemMessage('🔄 ' + data.message)
    })
    
    // 监听连接状态
    wsClient.on('connection', (data: { status: string; message: string }) => {
      console.log('连接状态消息:', data)
      if (data.status === 'connected') {
        addSystemMessage('🎉 ' + data.message)
      }
    })
    
    console.log('开始连接WebSocket...')
    await wsClient.connect()
  } catch (error) {
    console.error('WebSocket连接失败:', error)
    isConnected.value = false
    addSystemMessage('❌ WebSocket连接失败，将使用HTTP接口')
  }
}

// 切换连接状态
const toggleConnection = async () => {
  if (isConnected.value) {
    wsClient?.disconnect()
  } else {
    await initWebSocket()
  }
}

// 切换录音状态
const toggleRecording = async () => {
  if (recorder.state.value.isRecording) {
    recorder.stopRecording()
    // 等待录音完成后发送
    setTimeout(() => {
      if (recorder.state.value.audioBlob) {
        sendVoiceMessage()
      }
    }, 500)
  } else {
    const success = await recorder.startRecording()
    if (!success) {
      addSystemMessage('录音启动失败，请检查麦克风权限')
    }
  }
}

// 发送语音消息
const sendVoiceMessage = async () => {
  if (!recorder.state.value.audioBlob) {
    addSystemMessage('没有录音数据')
    return
  }

  try {
    isProcessing.value = true
    
    // 添加用户消息（显示录音时长）
    const userMessage: ChatMessage = {
      id: Date.now().toString(),
      type: 'user',
      text: `🎤 语音消息 (${recorder.formatDuration.value})`,
      timestamp: new Date()
    }
    messages.value.push(userMessage)
    scrollToBottom()

    // 将录音转换为Base64
    const audioBase64 = await recorder.toBase64()
    
    const request: VoiceChatRequest = {
      audioBase64: audioBase64,
      sessionId: sessionId.value
    }

    let response: VoiceChatResponse
    
    if (isConnected.value && wsClient) {
      // 使用WebSocket发送
      wsClient.sendVoiceChat(request)
      return // WebSocket响应会在事件监听器中处理
    } else {
      // 使用HTTP接口
      response = await voiceChatApi.chat(request)
    }

    handleVoiceResponse(response)
  } catch (error) {
    console.error('发送语音消息失败:', error)
    addSystemMessage('发送失败: ' + (error as Error).message)
  } finally {
    isProcessing.value = false
    recorder.cleanup()
  }
}

// 发送文本消息
const sendTextMessage = async () => {
  if (!textInput.value.trim()) return

  try {
    isProcessing.value = true
    
    // 添加用户消息
    const userMessage: ChatMessage = {
      id: Date.now().toString(),
      type: 'user',
      text: textInput.value,
      timestamp: new Date()
    }
    messages.value.push(userMessage)
    scrollToBottom()

    const request: VoiceChatRequest = {
      textInput: textInput.value,
      sessionId: sessionId.value
    }

    textInput.value = '' // 清空输入框

    let response: VoiceChatResponse
    
    if (isConnected.value && wsClient) {
      // 使用WebSocket发送
      wsClient.sendVoiceChat(request)
      return // WebSocket响应会在事件监听器中处理
    } else {
      // 使用HTTP接口
      response = await voiceChatApi.chat(request)
    }

    handleVoiceResponse(response)
  } catch (error) {
    console.error('发送文本消息失败:', error)
    addSystemMessage('发送失败: ' + (error as Error).message)
  } finally {
    isProcessing.value = false
  }
}

// 处理语音响应
const handleVoiceResponse = (response: VoiceChatResponse) => {
  isProcessing.value = false
  
  if (response.status === 'success') {
    // 更新会话ID
    if (response.sessionId) {
      sessionId.value = response.sessionId
    }
    
    // 添加AI回复消息
    const aiMessage: ChatMessage = {
      id: Date.now().toString(),
      type: 'ai',
      text: response.aiResponse || '没有回复内容',
      audioBase64: response.audioBase64,
      timestamp: new Date(),
      processingTime: response.processingTime
    }
    messages.value.push(aiMessage)
    scrollToBottom()
    
    // 自动播放AI回复的语音
    if (response.audioBase64) {
      setTimeout(() => {
        playAudio(response.audioBase64!)
      }, 500)
    }
  } else {
    addSystemMessage('处理失败: ' + (response.errorMessage || '未知错误'))
  }
}

// 播放音频
const playAudio = async (base64Audio: string) => {
  try {
    console.log('开始播放音频，数据长度:', base64Audio.length)
    
    // 阿里云TTS返回PCM格式，首先尝试PCM转WAV
    const success = await audioPlayer.playFromBase64(base64Audio, 'audio/pcm')
    
    if (success) {
      console.log('音频播放成功 (PCM转WAV)')
      return
    }
    
    // 如果PCM转换失败，尝试其他格式
    const fallbackFormats = ['audio/wav', 'audio/mp3', 'audio/webm', 'audio/ogg']
    
    for (const format of fallbackFormats) {
      try {
        const fallbackSuccess = await audioPlayer.playFromBase64(base64Audio, format)
        if (fallbackSuccess) {
          console.log(`音频播放成功，格式: ${format}`)
          return
        }
      } catch (formatError) {
        console.warn(`格式 ${format} 播放失败:`, formatError)
      }
    }
    
    throw new Error('所有音频格式都无法播放')
  } catch (error) {
    console.error('音频播放失败:', error)
    addSystemMessage('❌ 音频播放失败: ' + (error as Error).message)
  }
}

// 实时通话功能
const togglePhoneCall = async () => {
  if (!isConnected.value) {
    addSystemMessage('❌ 请先连接WebSocket')
    return
  }

  if (isPhoneCallActive.value) {
    // 结束通话
    await endPhoneCall()
  } else {
    // 开始通话
    await startPhoneCall()
  }
}

// 流式通话功能
const toggleStreamingCall = async () => {
  if (!isConnected.value) {
    addSystemMessage('❌ 请先连接WebSocket')
    return
  }

  if (isPhoneCallActive.value) {
    // 结束流式通话
    await endPhoneCall()
  } else {
    // 开始流式通话
    await startStreamingCall()
  }
}

const toggleSmartCall = async () => {
  if (!isConnected.value) {
    addSystemMessage('❌ 请先连接WebSocket')
    return
  }

  if (isSmartCallActive.value) {
    await endSmartCall()
  } else {
    await startSmartCall()
  }
}

// 开始实时通话
const startPhoneCall = async () => {
  try {
    isConnecting.value = true
    addSystemMessage('📞 正在启动实时通话...')

    // 重置统计信息
    audioQualityStats.value = {
      sentChunks: 0,
      receivedResponses: 0,
      avgResponseTime: 0,
      errors: 0
    }

    // 获取麦克风权限
    phoneCallStream.value = await navigator.mediaDevices.getUserMedia({ 
      audio: {
        echoCancellation: true,
        noiseSuppression: true,
        autoGainControl: true,
        sampleRate: 24000
      } 
    })

    // 创建MediaRecorder进行实时录音
    phoneCallRecorder.value = new MediaRecorder(phoneCallStream.value, {
      mimeType: 'audio/webm;codecs=opus'
    })

    let audioChunks: Blob[] = []
    let isFirstChunk = true
    let chunkCount = 0

    phoneCallRecorder.value.ondataavailable = async (event) => {
      if (event.data.size > 0 && event.data.size < 100000) { // 限制单个chunk大小不超过100KB
        audioChunks.push(event.data)
        chunkCount++
        
        // 每收集到2个音频块或达到1秒就发送（减少发送频率）
        if (audioChunks.length >= 2 || chunkCount % 2 === 0) {
          const audioBlob = new Blob(audioChunks, { type: 'audio/webm' })
          
          // 检查总大小，如果太大则跳过
          if (audioBlob.size < 200000) { // 限制总大小不超过200KB
            const sendTime = Date.now()
            await sendRealtimeAudio(audioBlob, isFirstChunk, sendTime)
            isFirstChunk = false
          } else {
            console.warn('音频块太大，跳过发送:', audioBlob.size)
            audioQualityStats.value.errors++
          }
          
          audioChunks = []
        }
      }
    }

    phoneCallRecorder.value.onstop = async () => {
      // 发送剩余的音频数据
      if (audioChunks.length > 0) {
        const audioBlob = new Blob(audioChunks, { type: 'audio/webm' })
        await sendRealtimeAudio(audioBlob, false, Date.now())
      }
    }

    phoneCallRecorder.value.onerror = (event) => {
      console.error('录音器错误:', event)
      audioQualityStats.value.errors++
      addSystemMessage('❌ 录音器出现错误，请重新开始通话')
    }

    // 开始录音，每1000ms产生一个数据块（减少频率）
    phoneCallRecorder.value.start(1000)
    
    // 设置通话状态
    isPhoneCallActive.value = true
    isConnecting.value = false
    callStartTime.value = new Date()
    
    // 开始计时器
    durationTimer = setInterval(updateCallDuration, 1000)
    
    addSystemMessage('📞 实时通话已开始，请说话...')
    addSystemMessage(`🎯 音频质量: 回声消除已启用，噪声抑制已启用`)

  } catch (error) {
    console.error('启动实时通话失败:', error)
    isConnecting.value = false
    audioQualityStats.value.errors++
    
    const errorMessage = (error as Error).message
    if (errorMessage.includes('Permission denied') || errorMessage.includes('NotAllowedError')) {
      addSystemMessage('❌ 麦克风权限被拒绝，请允许访问麦克风后重试')
    } else if (errorMessage.includes('NotFoundError')) {
      addSystemMessage('❌ 未找到麦克风设备，请检查设备连接')
    } else {
      addSystemMessage('❌ 启动实时通话失败: ' + errorMessage)
    }
  }
}

// 结束实时通话
const endPhoneCall = async () => {
  try {
    addSystemMessage('📴 正在结束通话...')

    // 停止计时器
    if (durationTimer) {
      clearInterval(durationTimer)
      durationTimer = null
    }

    // 停止录音
    if (phoneCallRecorder.value && phoneCallRecorder.value.state !== 'inactive') {
      phoneCallRecorder.value.stop()
    }

    // 关闭音频流
    if (phoneCallStream.value) {
      phoneCallStream.value.getTracks().forEach(track => track.stop())
      phoneCallStream.value = null
    }

    // 停止当前播放的音频
    if (currentCallAudio.value) {
      currentCallAudio.value.pause()
      currentCallAudio.value = null
    }

    // 重置状态
    phoneCallRecorder.value = null
    isPhoneCallActive.value = false
    callStartTime.value = null
    callDuration.value = ''

    // 显示通话统计
    const stats = audioQualityStats.value
    addSystemMessage('📴 通话已结束')
    addSystemMessage(`📊 通话统计: 发送${stats.sentChunks}块音频, 收到${stats.receivedResponses}个回复, 平均响应${stats.avgResponseTime}ms, 错误${stats.errors}次`)

  } catch (error) {
    console.error('结束通话失败:', error)
    addSystemMessage('❌ 结束通话失败: ' + (error as Error).message)
  }
}

// VAD实例
const vad = useVoiceActivityDetection({
  silenceThreshold: 15, // 音量阈值
  silenceDuration: 2000, // 2秒静音后自动发送
  minSpeechDuration: 500, // 最少说话0.5秒
  sampleRate: 16000
})

// 开始流式通话 - VAD版本
const startStreamingCall = async () => {
  try {
    isConnecting.value = true
    addSystemMessage('🌊 正在启动智能语音通话...')

    // 重置统计信息
    audioQualityStats.value = {
      sentChunks: 0,
      receivedResponses: 0,
      avgResponseTime: 0,
      errors: 0
    }

    // 获取麦克风权限
    phoneCallStream.value = await navigator.mediaDevices.getUserMedia({ 
      audio: {
        echoCancellation: true,
        noiseSuppression: true,
        autoGainControl: true,
        sampleRate: 16000
      } 
    })

    // 初始化VAD
    const vadInitialized = await vad.initVAD(phoneCallStream.value)
    if (!vadInitialized) {
      throw new Error('语音活动检测初始化失败')
    }

    // 设置VAD回调
    vad.setCallbacks({
      onSpeechStart: () => {
        console.log('🎤 开始说话')
        addSystemMessage('🎤 正在听您说话...')
        // 通知后端用户开始说话
        sendVADMessage('speech_start')
      },
      onSpeechEnd: (audioBlob: Blob) => {
        console.log('✅ 说话结束，发送音频')
        addSystemMessage('🔄 正在处理您的话...')
        // 发送完整音频到后端
        sendCompleteAudio(audioBlob)
      },
      onVolumeChange: (volume: number) => {
        // 可以在UI中显示音量条
        updateVolumeDisplay(volume)
      }
    })
    
    // 设置通话状态
    isPhoneCallActive.value = true
    isConnecting.value = false
    callStartTime.value = new Date()
    
    // 开始计时器
    durationTimer = setInterval(updateCallDuration, 1000)
    
    // 发送连接消息
    sendVADMessage('connected')
    
    addSystemMessage('🌊 智能语音通话已开始！')
    addSystemMessage('💡 特点：自动检测说话，停顿2秒后自动发送')
    addSystemMessage('🎯 请开始说话，AI会等您说完再回复')

  } catch (error) {
    console.error('启动流式通话失败:', error)
    isConnecting.value = false
    audioQualityStats.value.errors++
    addSystemMessage('❌ 启动智能通话失败: ' + (error as Error).message)
  }
}

// 发送VAD消息
const sendVADMessage = async (messageType: string) => {
  if (wsClient && isConnected.value) {
    const request = {
      sessionId: sessionId.value || Date.now().toString(),
      textInput: messageType, // 使用textInput字段传递消息类型
      isRealtime: true,
      sendTime: Date.now()
    }
    
    wsClient.sendVoiceChat(request)
    console.log('发送VAD消息:', messageType)
  }
}

// 发送完整音频
const sendCompleteAudio = async (audioBlob: Blob) => {
  try {
    const reader = new FileReader()
    reader.onload = async () => {
      const base64Audio = (reader.result as string).split(',')[1]
      
      if (wsClient && isConnected.value) {
        const request = {
          audioBase64: base64Audio,
          sessionId: sessionId.value || Date.now().toString(),
          textInput: 'speech_end', // 标识语音结束
          isRealtime: true,
          sendTime: Date.now()
        }
        
        wsClient.sendVoiceChat(request)
        audioQualityStats.value.sentChunks++
        
        console.log('发送完整音频，大小:', base64Audio.length)
      }
    }
    reader.readAsDataURL(audioBlob)
  } catch (error) {
    console.error('发送完整音频失败:', error)
    audioQualityStats.value.errors++
  }
}

// 更新音量显示
const updateVolumeDisplay = (volume: number) => {
  // 可以在UI中添加音量条显示
  console.log('当前音量:', volume)
}

// 开始智能通话
const startSmartCall = async () => {
  try {
    isConnecting.value = true
    addSystemMessage('🎯 正在启动智能语音通话...')

    // 重置统计信息
    audioQualityStats.value = {
      sentChunks: 0,
      receivedResponses: 0,
      avgResponseTime: 0,
      errors: 0
    }

    // 获取麦克风权限
    phoneCallStream.value = await navigator.mediaDevices.getUserMedia({ 
      audio: {
        echoCancellation: true,
        noiseSuppression: true,
        autoGainControl: true,
        sampleRate: 16000
      } 
    })

    // 使用传统的MediaRecorder，但改进处理逻辑
    phoneCallRecorder.value = new MediaRecorder(phoneCallStream.value, {
      mimeType: 'audio/webm;codecs=opus'
    })

    let audioChunks: Blob[] = []

    phoneCallRecorder.value.ondataavailable = async (event) => {
      if (event.data.size > 0) {
        audioChunks.push(event.data)
      }
    }

    phoneCallRecorder.value.onstop = async () => {
      if (audioChunks.length > 0) {
        const audioBlob = new Blob(audioChunks, { type: 'audio/webm' })
        if (audioBlob.size > 1000) { // 至少1KB才发送
          await sendSmartCallAudio(audioBlob)
        }
        audioChunks = [] // 重置
      }
    }

    // 设置通话状态
    isSmartCallActive.value = true
    isConnecting.value = false
    callStartTime.value = new Date()
    
    // 开始计时器
    durationTimer = setInterval(updateCallDuration, 1000)
    
    // 开始录音
    phoneCallRecorder.value.start()
    
    // 启动智能通话会话
    if (wsClient && isConnected.value) {
      const startRequest = {
        sessionId: sessionId.value || Date.now().toString(),
        textInput: 'simple_vad_start',
        isRealtime: true,
        sendTime: Date.now()
      }
      wsClient.sendVoiceChat(startRequest)
    }
    
    addSystemMessage('🎯 智能语音通话已开始！')
    addSystemMessage('💡 请说话，说完后点击"发送语音"按钮')
    addSystemMessage('🔄 系统会自动处理您的语音并回复')

  } catch (error) {
    console.error('启动智能通话失败:', error)
    isConnecting.value = false
    audioQualityStats.value.errors++
    addSystemMessage('❌ 启动智能通话失败: ' + (error as Error).message)
  }
}

// 发送智能通话音频
const sendSmartCallAudio = async (audioBlob: Blob) => {
  try {
    const reader = new FileReader()
    reader.onload = async () => {
      const base64Audio = (reader.result as string).split(',')[1]
      
      if (wsClient && isConnected.value) {
        const request = {
          audioBase64: base64Audio,
          sessionId: sessionId.value || Date.now().toString(),
          textInput: 'simple_vad', // 标识使用智能VAD
          isRealtime: true,
          sendTime: Date.now()
        }
        
        wsClient.sendVoiceChat(request)
        audioQualityStats.value.sentChunks++
        
        console.log('发送智能通话音频，大小:', base64Audio.length)
      }
    }
    reader.readAsDataURL(audioBlob)
  } catch (error) {
    console.error('发送智能通话音频失败:', error)
    audioQualityStats.value.errors++
  }
}

// 手动发送语音（给用户控制）
const sendCurrentSpeech = async () => {
  if (phoneCallRecorder.value && phoneCallRecorder.value.state === 'recording') {
    phoneCallRecorder.value.stop()
    addSystemMessage('📤 正在发送语音...')
    
    // 短暂延迟后重新开始录音
    setTimeout(() => {
      if (isSmartCallActive.value && phoneCallRecorder.value) {
        phoneCallRecorder.value.start()
      }
    }, 1000)
  }
}

// 结束智能通话
const endSmartCall = async () => {
  try {
    if (phoneCallRecorder.value && phoneCallRecorder.value.state === 'recording') {
      phoneCallRecorder.value.stop()
    }
    
    if (phoneCallStream.value) {
      phoneCallStream.value.getTracks().forEach(track => track.stop())
    }
    
    // 清除计时器
    if (durationTimer) {
      clearInterval(durationTimer)
      durationTimer = null
    }
    
    // 重置状态
    isSmartCallActive.value = false
    isConnecting.value = false
    phoneCallRecorder.value = null
    phoneCallStream.value = null
    callStartTime.value = null
    callDuration.value = ''
    
    // 显示通话统计
    const stats = audioQualityStats.value
    addSystemMessage(`📊 通话统计: 发送${stats.sentChunks}块音频, 收到${stats.receivedResponses}个回复, 平均响应${stats.avgResponseTime}ms, 错误${stats.errors}次`)
    
    addSystemMessage('📴 智能通话已结束')
    
  } catch (error) {
    console.error('结束智能通话失败:', error)
    addSystemMessage('❌ 结束智能通话失败: ' + (error as Error).message)
  }
}

// 发送流式音频数据
const sendStreamingAudio = async (audioBlob: Blob, isFirstChunk: boolean, sendTime: number) => {
  try {
    const reader = new FileReader()
    reader.onload = async () => {
      const base64Audio = (reader.result as string).split(',')[1]
      
      if (wsClient && isConnected.value) {
        const request = {
          audioBase64: base64Audio,
          sessionId: sessionId.value || Date.now().toString(),
          isRealtime: true,
          isFirstChunk: isFirstChunk,
          sendTime: sendTime,
          textInput: '流式' // 标识使用流式处理
        }
        
        wsClient.sendVoiceChat(request)
        audioQualityStats.value.sentChunks++
        
        console.log('发送流式音频块，大小:', base64Audio.length, '块数:', audioQualityStats.value.sentChunks)
      }
    }
    reader.readAsDataURL(audioBlob)
  } catch (error) {
    console.error('发送流式音频失败:', error)
    audioQualityStats.value.errors++
  }
}

// 发送实时音频数据
const sendRealtimeAudio = async (audioBlob: Blob, isFirstChunk: boolean, sendTime: number) => {
  try {
    // 转换为Base64
    const reader = new FileReader()
    reader.onload = async () => {
      const base64Audio = (reader.result as string).split(',')[1]
      
      if (wsClient && isConnected.value) {
        const request = {
          audioBase64: base64Audio,
          sessionId: sessionId.value || Date.now().toString(),
          isRealtime: true,
          isFirstChunk: isFirstChunk,
          sendTime: sendTime // 添加发送时间用于计算延迟
        }
        
        wsClient.sendVoiceChat(request)
        audioQualityStats.value.sentChunks++
        
        console.log('发送实时音频块，大小:', base64Audio.length, '块数:', audioQualityStats.value.sentChunks)
      }
    }
    reader.readAsDataURL(audioBlob)
  } catch (error) {
    console.error('发送实时音频失败:', error)
    audioQualityStats.value.errors++
  }
}

// 处理实时通话响应
const handleRealtimeResponse = async (response: VoiceChatResponse) => {
  try {
    // 更新统计信息
    audioQualityStats.value.receivedResponses++
    
    // 计算响应时间
    if (response.processingTime) {
      const totalResponseTime = audioQualityStats.value.avgResponseTime * (audioQualityStats.value.receivedResponses - 1) + response.processingTime
      audioQualityStats.value.avgResponseTime = Math.round(totalResponseTime / audioQualityStats.value.receivedResponses)
    }
    
    if (response.status === 'success') {
      console.log('收到实时AI语音响应，音频长度:', response.audioBase64?.length || 0, '响应时间:', response.processingTime, 'ms')
      
      // 播放AI回复的语音（如果有音频数据）
      if (response.audioBase64 && response.audioBase64.length > 0) {
        await playRealtimeAudio(response.audioBase64)
      } else if (response.aiResponse && response.aiResponse.trim()) {
        // 如果没有音频但有文本，显示提示
        console.log('收到文本回复但无音频:', response.aiResponse)
      }
      
      // 如果有识别的文字，显示在聊天中
      if (response.recognizedText && response.recognizedText.trim()) {
        const userMessage: ChatMessage = {
          id: Date.now().toString(),
          type: 'user',
          text: `🎤 ${response.recognizedText}`,
          timestamp: new Date()
        }
        messages.value.push(userMessage)
      }
      
      // 显示AI回复文字
      if (response.aiResponse && response.aiResponse.trim()) {
        const aiMessage: ChatMessage = {
          id: Date.now().toString() + '_ai',
          type: 'ai',
          text: response.aiResponse,
          timestamp: new Date()
        }
        messages.value.push(aiMessage)
      }
      
      scrollToBottom()
    } else if (response.status === 'error') {
      audioQualityStats.value.errors++
      console.error('实时通话响应错误:', response.errorMessage)
    }
  } catch (error) {
    console.error('处理实时响应失败:', error)
    audioQualityStats.value.errors++
  }
}

// 播放实时音频（优先级高，立即播放）
const playRealtimeAudio = async (base64Audio: string) => {
  try {
    // 停止当前播放的音频
    if (currentCallAudio.value) {
      currentCallAudio.value.pause()
      currentCallAudio.value = null
    }

    // 转换PCM为WAV并播放
    const binaryString = atob(base64Audio)
    const bytes = new Uint8Array(binaryString.length)
    for (let i = 0; i < binaryString.length; i++) {
      bytes[i] = binaryString.charCodeAt(i)
    }

    // PCM转WAV
    const audioBuffer = pcmToWav(bytes, 24000, 1, 16)
    const blob = new Blob([audioBuffer], { type: 'audio/wav' })
    const audioUrl = URL.createObjectURL(blob)

    currentCallAudio.value = new Audio(audioUrl)
    currentCallAudio.value.onended = () => {
      URL.revokeObjectURL(audioUrl)
    }
    
    await currentCallAudio.value.play()
    console.log('实时音频播放成功')
  } catch (error) {
    console.error('实时音频播放失败:', error)
  }
}

// PCM转WAV工具函数
const pcmToWav = (pcmData: Uint8Array, sampleRate: number = 24000, channels: number = 1, bitDepth: number = 16): ArrayBuffer => {
  const length = pcmData.length
  const buffer = new ArrayBuffer(44 + length)
  const view = new DataView(buffer)
  
  // WAV文件头
  const writeString = (offset: number, string: string) => {
    for (let i = 0; i < string.length; i++) {
      view.setUint8(offset + i, string.charCodeAt(i))
    }
  }
  
  // RIFF标识符
  writeString(0, 'RIFF')
  view.setUint32(4, 36 + length, true)
  writeString(8, 'WAVE')
  
  // fmt子块
  writeString(12, 'fmt ')
  view.setUint32(16, 16, true)
  view.setUint16(20, 1, true)
  view.setUint16(22, channels, true)
  view.setUint32(24, sampleRate, true)
  view.setUint32(28, sampleRate * channels * bitDepth / 8, true)
  view.setUint16(32, channels * bitDepth / 8, true)
  view.setUint16(34, bitDepth, true)
  
  // data子块
  writeString(36, 'data')
  view.setUint32(40, length, true)
  
  // 复制PCM数据
  const dataView = new Uint8Array(buffer, 44)
  dataView.set(pcmData)
  
  return buffer
}

// 添加系统消息
const addSystemMessage = (text: string) => {
  const systemMessage: ChatMessage = {
    id: Date.now().toString(),
    type: 'ai',
    text: `🔔 ${text}`,
    timestamp: new Date()
  }
  messages.value.push(systemMessage)
  scrollToBottom()
}

// 清空消息
const clearMessages = () => {
  messages.value = []
  sessionId.value = undefined
}

// 滚动到底部
const scrollToBottom = () => {
  nextTick(() => {
    if (messagesContainer.value) {
      messagesContainer.value.scrollTop = messagesContainer.value.scrollHeight
    }
  })
}

// 格式化时间
const formatTime = (date: Date) => {
  return date.toLocaleTimeString('zh-CN', {
    hour: '2-digit',
    minute: '2-digit',
    second: '2-digit'
  })
}

// 组件挂载
onMounted(() => {
  addSystemMessage('语音助手已启动')
  // 自动尝试连接WebSocket
  initWebSocket()
})

// 组件卸载
onUnmounted(() => {
  recorder.cleanup()
  audioPlayer.cleanup()
  
  // 清理实时通话资源
  if (isPhoneCallActive.value) {
    endPhoneCall()
  }
  
  // 清理音频播放
  if (currentCallAudio.value) {
    currentCallAudio.value.pause()
    currentCallAudio.value = null
  }
  
  // 断开WebSocket连接
  wsClient?.disconnect()
})
</script>

<style scoped>
.voice-chat-container {
  display: flex;
  flex-direction: column;
  height: 100vh;
  max-width: 800px;
  margin: 0 auto;
  background: #f5f5f5;
}

.chat-header {
  background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
  color: white;
  padding: 1rem;
  display: flex;
  justify-content: space-between;
  align-items: center;
  box-shadow: 0 2px 10px rgba(0,0,0,0.1);
}

.chat-header h1 {
  margin: 0;
  font-size: 1.5rem;
}

.connection-status {
  display: flex;
  align-items: center;
  gap: 0.5rem;
}

.status-indicator {
  width: 10px;
  height: 10px;
  border-radius: 50%;
  background: #ff4757;
}

.status-indicator.connected {
  background: #2ed573;
}

.chat-messages {
  flex: 1;
  overflow-y: auto;
  padding: 1rem;
  display: flex;
  flex-direction: column;
  gap: 1rem;
}

.message {
  display: flex;
  max-width: 70%;
}

.message.user {
  align-self: flex-end;
}

.message.ai {
  align-self: flex-start;
}

.message-content {
  background: white;
  padding: 0.75rem 1rem;
  border-radius: 1rem;
  box-shadow: 0 2px 5px rgba(0,0,0,0.1);
}

.message.user .message-content {
  background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
  color: white;
}

.message-text {
  margin-bottom: 0.25rem;
}

.message-time {
  font-size: 0.75rem;
  opacity: 0.7;
}

.audio-player {
  margin-top: 0.5rem;
  display: flex;
  align-items: center;
  gap: 0.5rem;
}

.play-button {
  background: none;
  border: none;
  font-size: 1.2rem;
  cursor: pointer;
  padding: 0.25rem;
  border-radius: 50%;
  transition: background 0.2s;
}

.play-button:hover {
  background: rgba(0,0,0,0.1);
}

.play-button:disabled {
  cursor: not-allowed;
  opacity: 0.6;
}

.audio-info {
  display: flex;
  align-items: center;
  gap: 0.5rem;
  flex: 1;
}

.audio-duration {
  font-size: 0.75rem;
  opacity: 0.8;
}

.audio-progress {
  flex: 1;
  height: 4px;
  background: rgba(0,0,0,0.1);
  border-radius: 2px;
  overflow: hidden;
}

.progress-bar {
  height: 100%;
  background: #667eea;
  transition: width 0.1s;
}

.processing {
  opacity: 0.8;
}

.typing-indicator {
  display: flex;
  gap: 0.25rem;
  margin-bottom: 0.5rem;
}

.typing-indicator span {
  width: 8px;
  height: 8px;
  border-radius: 50%;
  background: #667eea;
  animation: typing 1.4s infinite ease-in-out;
}

.typing-indicator span:nth-child(1) {
  animation-delay: -0.32s;
}

.typing-indicator span:nth-child(2) {
  animation-delay: -0.16s;
}

@keyframes typing {
  0%, 80%, 100% {
    transform: scale(0);
    opacity: 0.5;
  }
  40% {
    transform: scale(1);
    opacity: 1;
  }
}

.chat-input {
  background: white;
  padding: 1rem;
  border-top: 1px solid #e1e8ed;
  display: flex;
  flex-direction: column;
  gap: 1rem;
}

.recording-controls {
  display: flex;
  align-items: center;
  gap: 1rem;
}

.record-button {
  width: 60px;
  height: 60px;
  border-radius: 50%;
  border: none;
  background: linear-gradient(135deg, #ff6b6b 0%, #ee5a52 100%);
  color: white;
  font-size: 1.5rem;
  cursor: pointer;
  transition: all 0.3s ease;
  box-shadow: 0 4px 15px rgba(255, 107, 107, 0.4);
}

.record-button:hover {
  transform: translateY(-2px);
  box-shadow: 0 6px 20px rgba(255, 107, 107, 0.6);
}

.record-button.recording {
  background: linear-gradient(135deg, #ff4757 0%, #c44569 100%);
  animation: pulse 1s infinite;
}

@keyframes pulse {
  0% {
    box-shadow: 0 0 0 0 rgba(255, 71, 87, 0.7);
  }
  70% {
    box-shadow: 0 0 0 10px rgba(255, 71, 87, 0);
  }
  100% {
    box-shadow: 0 0 0 0 rgba(255, 71, 87, 0);
  }
}

.record-button:disabled {
  background: #ccc;
  cursor: not-allowed;
  transform: none;
  box-shadow: none;
}

.record-button.disabled {
  opacity: 0.5;
  cursor: not-allowed;
}

/* 电话按钮样式 */
.phone-button {
  width: 70px;
  height: 70px;
  border-radius: 50%;
  border: none;
  background: linear-gradient(135deg, #10ac84 0%, #1dd1a1 100%);
  color: white;
  font-size: 1.8rem;
  cursor: pointer;
  transition: all 0.3s ease;
  box-shadow: 0 4px 15px rgba(16, 172, 132, 0.4);
  display: flex;
  flex-direction: column;
  align-items: center;
  justify-content: center;
  position: relative;
}

.phone-button:hover:not(:disabled) {
  transform: translateY(-2px);
  box-shadow: 0 6px 20px rgba(16, 172, 132, 0.6);
}

.phone-button.active {
  background: linear-gradient(135deg, #ff3838 0%, #ff6b6b 100%);
  box-shadow: 0 4px 15px rgba(255, 56, 56, 0.4);
  animation: phone-active 2s infinite;
}

.phone-button.connecting {
  background: linear-gradient(135deg, #ffa726 0%, #ffb74d 100%);
  animation: phone-connecting 1s infinite;
}

.phone-button.disabled {
  background: #ccc;
  cursor: not-allowed;
  transform: none;
  box-shadow: none;
}

.phone-button:disabled {
  background: #ccc;
  cursor: not-allowed;
  transform: none;
  box-shadow: none;
}

.button-text {
  font-size: 0.7rem;
  margin-top: 2px;
  font-weight: 500;
  text-align: center;
  line-height: 1;
}

@keyframes phone-active {
  0% {
    box-shadow: 0 0 0 0 rgba(255, 56, 56, 0.7);
  }
  70% {
    box-shadow: 0 0 0 15px rgba(255, 56, 56, 0);
  }
  100% {
    box-shadow: 0 0 0 0 rgba(255, 56, 56, 0);
  }
}

@keyframes phone-connecting {
  0%, 100% {
    transform: scale(1);
  }
  50% {
    transform: scale(1.05);
  }
}

/* 流式通话按钮样式 */
.streaming-button {
  background: linear-gradient(135deg, #00d4ff 0%, #0099cc 100%);
  color: white;
  border: none;
  border-radius: 50px;
  padding: 1rem;
  width: 5rem;
  height: 5rem;
  font-size: 1.5rem;
  font-weight: 600;
  cursor: pointer;
  transition: all 0.3s cubic-bezier(0.25, 0.46, 0.45, 0.94);
  display: flex;
  flex-direction: column;
  align-items: center;
  justify-content: center;
  position: relative;
  box-shadow: 0 4px 15px rgba(0, 212, 255, 0.3);
  margin: 0 0.5rem;
}

.streaming-button:hover:not(:disabled) {
  transform: translateY(-2px);
  box-shadow: 0 6px 20px rgba(0, 212, 255, 0.6);
}

.streaming-button.active {
  background: linear-gradient(135deg, #ff3838 0%, #ff6b6b 100%);
  box-shadow: 0 4px 15px rgba(255, 56, 56, 0.4);
  animation: streaming-active 2s infinite;
}

.streaming-button.connecting {
  background: linear-gradient(135deg, #00d4ff 0%, #00b8e6 100%);
  animation: streaming-connecting 1s infinite;
}

.streaming-button.disabled {
  background: #ccc;
  cursor: not-allowed;
  transform: none;
  box-shadow: none;
}

.streaming-button:disabled {
  background: #ccc;
  cursor: not-allowed;
  transform: none;
  box-shadow: none;
}

@keyframes streaming-active {
  0% {
    box-shadow: 0 0 0 0 rgba(0, 212, 255, 0.7);
  }
  70% {
    box-shadow: 0 0 0 15px rgba(0, 212, 255, 0);
  }
  100% {
    box-shadow: 0 0 0 0 rgba(0, 212, 255, 0);
  }
}

@keyframes streaming-connecting {
  0%, 100% {
    transform: scale(1);
  }
  50% {
    transform: scale(1.05);
  }
}

/* 智能通话按钮样式 */
.smart-call-button {
  background: linear-gradient(135deg, #28a745 0%, #20c997 100%);
  color: white;
  border: none;
  border-radius: 50px;
  padding: 1rem;
  width: 5rem;
  height: 5rem;
  font-size: 1.5rem;
  font-weight: 600;
  cursor: pointer;
  transition: all 0.3s cubic-bezier(0.25, 0.46, 0.45, 0.94);
  display: flex;
  flex-direction: column;
  align-items: center;
  justify-content: center;
  position: relative;
  box-shadow: 0 4px 15px rgba(40, 167, 69, 0.3);
  margin: 0 0.5rem;
}

.smart-call-button:hover:not(:disabled) {
  transform: translateY(-2px);
  box-shadow: 0 6px 20px rgba(40, 167, 69, 0.6);
}

.smart-call-button.active {
  background: linear-gradient(135deg, #dc3545 0%, #e74c3c 100%);
  box-shadow: 0 4px 15px rgba(220, 53, 69, 0.4);
  animation: smart-call-active 2s infinite;
}

.smart-call-button.connecting {
  background: linear-gradient(135deg, #28a745 0%, #20c997 100%);
  animation: smart-call-connecting 1s infinite;
}

.smart-call-button.disabled {
  background: #ccc;
  cursor: not-allowed;
  transform: none;
  box-shadow: none;
}

.smart-call-button:disabled {
  background: #ccc;
  cursor: not-allowed;
  transform: none;
  box-shadow: none;
}

@keyframes smart-call-active {
  0% {
    box-shadow: 0 0 0 0 rgba(40, 167, 69, 0.7);
  }
  70% {
    box-shadow: 0 0 0 15px rgba(40, 167, 69, 0);
  }
  100% {
    box-shadow: 0 0 0 0 rgba(40, 167, 69, 0);
  }
}

@keyframes smart-call-connecting {
  0%, 100% {
    transform: scale(1);
  }
  50% {
    transform: scale(1.05);
  }
}

/* 发送语音按钮样式 */
.send-speech-button {
  background: linear-gradient(135deg, #ffc107 0%, #fd7e14 100%);
  color: white;
  border: none;
  border-radius: 25px;
  padding: 0.8rem 1.5rem;
  font-size: 1rem;
  font-weight: 600;
  cursor: pointer;
  transition: all 0.3s ease;
  display: flex;
  align-items: center;
  justify-content: center;
  gap: 0.5rem;
  box-shadow: 0 2px 8px rgba(255, 193, 7, 0.3);
  margin: 0 0.5rem;
}

.send-speech-button:hover {
  transform: translateY(-1px);
  box-shadow: 0 4px 12px rgba(255, 193, 7, 0.5);
}

.send-speech-button:active {
  transform: translateY(0);
  box-shadow: 0 2px 6px rgba(255, 193, 7, 0.4);
}

/* 通话状态样式 */
.call-status {
  flex: 1;
  margin-left: 1rem;
  padding: 0.75rem;
  background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
  border-radius: 0.75rem;
  color: white;
  box-shadow: 0 2px 8px rgba(102, 126, 234, 0.3);
}

.call-info {
  display: flex;
  flex-direction: column;
  gap: 0.5rem;
}

.call-duration {
  font-size: 1rem;
  font-weight: bold;
  text-align: center;
}

.call-stats {
  display: flex;
  justify-content: space-around;
  gap: 0.5rem;
  flex-wrap: wrap;
}

.stat-item {
  font-size: 0.75rem;
  padding: 0.25rem 0.5rem;
  background: rgba(255, 255, 255, 0.2);
  border-radius: 0.375rem;
  white-space: nowrap;
  transition: all 0.3s ease;
}

.stat-item.error {
  background: rgba(255, 71, 87, 0.3);
  animation: error-pulse 2s infinite;
}

@keyframes error-pulse {
  0%, 100% {
    background: rgba(255, 71, 87, 0.3);
  }
  50% {
    background: rgba(255, 71, 87, 0.6);
  }
}

.recording-info {
  display: flex;
  align-items: center;
  gap: 0.5rem;
}

.recording-duration {
  font-weight: bold;
  color: #ff4757;
}

.recording-indicator {
  display: flex;
  align-items: center;
}

.pulse {
  width: 10px;
  height: 10px;
  border-radius: 50%;
  background: #ff4757;
  animation: pulse-dot 1s infinite;
}

@keyframes pulse-dot {
  0%, 100% {
    opacity: 1;
  }
  50% {
    opacity: 0.3;
  }
}

.text-input-section {
  display: flex;
  gap: 0.5rem;
}

.text-input {
  flex: 1;
  padding: 0.75rem;
  border: 2px solid #e1e8ed;
  border-radius: 0.5rem;
  font-size: 1rem;
  outline: none;
  transition: border-color 0.2s;
}

.text-input:focus {
  border-color: #667eea;
}

.send-button {
  padding: 0.75rem 1.5rem;
  background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
  color: white;
  border: none;
  border-radius: 0.5rem;
  cursor: pointer;
  font-weight: bold;
  transition: all 0.2s;
}

.send-button:hover {
  transform: translateY(-1px);
  box-shadow: 0 4px 10px rgba(102, 126, 234, 0.3);
}

.send-button:disabled {
  background: #ccc;
  cursor: not-allowed;
  transform: none;
  box-shadow: none;
}

.connection-controls {
  display: flex;
  gap: 0.5rem;
}

.connection-button {
  padding: 0.5rem 1rem;
  border: 2px solid #667eea;
  border-radius: 0.5rem;
  background: white;
  color: #667eea;
  cursor: pointer;
  font-weight: bold;
  transition: all 0.2s;
}

.connection-button.connected {
  background: #667eea;
  color: white;
}

.connection-button:hover {
  background: #667eea;
  color: white;
}

.clear-button {
  padding: 0.5rem 1rem;
  border: 2px solid #ff4757;
  border-radius: 0.5rem;
  background: white;
  color: #ff4757;
  cursor: pointer;
  font-weight: bold;
  transition: all 0.2s;
}

.clear-button:hover {
  background: #ff4757;
  color: white;
}

.error-message {
  color: #ff4757;
  font-size: 0.875rem;
  margin-top: 0.5rem;
}
</style>
