<template>
  <div class="voice-call-window">
    <!-- 加载提示 -->
    <div 
      v-if="loading" 
      class="loading-overlay"
      v-loading="loading"
      element-loading-text="正在加载通话配置..."
      element-loading-background="rgba(0, 0, 0, 0.8)"
    >
    </div>

    <!-- 通话界面 -->
    <div v-if="!loading && dialogConfig" class="call-container">
      <!-- 标题栏 -->
      <div class="call-header">
        <div class="header-info">
          <el-icon><Phone /></el-icon>
          <span class="title">AI语音通话</span>
        </div>
        
        <!-- TTS播放指示器 -->
        <span v-if="isPlayingTTS" class="header-tts-indicator">🎵</span>
      </div>

      <!-- 通话内容 -->
      <div class="call-content">
        <!-- 语音状态显示 -->
        <div class="voice-status">
          <div class="status-avatar">
            <el-icon class="avatar-icon" :class="{ 'speaking': isRecording || isPlayingTTS }">
              <Avatar />
            </el-icon>
          </div>
          
          <div class="status-info">
            <div class="status-text">
              <span v-if="isTTSBlocking && isPlayingTTS">🔊 AI正在回复...</span>
              <span v-else-if="isTTSBlocking && isProcessing">🤔 AI正在思考...</span>
              <span v-else-if="isRecording && accumulatedText">🎤 正在聆听...</span>
              <span v-else-if="isRecording">🎤 正在聆听...</span>
              <span v-else>💬 AI助手待命中</span>
            </div>
            
                        <!-- 实时识别文本显示 -->
            <div v-if="accumulatedText" class="recognition-text">
              {{ accumulatedText }}
            </div>
            
            <!-- 对话记录简要显示 -->
            <div v-if="lastMessage" class="last-message">
              <div class="message-user">您：{{ lastUserMessage }}</div>
              <div class="message-ai">AI：{{ lastMessage }}</div>
            </div>
          </div>
        </div>
        
        <div class="call-timer">
          {{ formatTime(callDuration) }}
        </div>

        <div class="call-controls">
          <el-button 
            type="danger" 
            circle 
            @click="endCall"
            class="control-btn"
            title="结束通话"
          >
            <el-icon><SwitchButton /></el-icon>
          </el-button>
          
          <el-button 
            :type="isRecording ? 'success' : 'info'" 
            circle 
            @click="toggleRecording"
            class="control-btn"
            :title="isRecording ? '停止录音' : '开始录音'"
          >
            <el-icon><Microphone v-if="!isRecording" /><VideoPause v-else /></el-icon>
          </el-button>
          
          <el-button 
            type="primary" 
            circle 
            @click="showTTSSettingsMenu"
            class="control-btn"
            title="TTS语音设置"
          >
            <el-icon><Setting /></el-icon>
          </el-button>
        </div>
      </div>
    </div>

    <!-- 使用新的TTS设置组件 -->
    <TTSSettings 
      v-model="showTTSSettings"
      :settings="ttsSettings"
      storage-key="voice-call-tts-settings"
      @settings-changed="handleTTSSettingsChanged"
    />

    <!-- 错误状态 -->
    <div v-if="!loading && !dialogConfig" class="error-container">
      <el-result
        icon="error"
        title="加载失败"
        sub-title="无法加载通话配置"
      >
        <template #extra>
          <el-button type="primary" @click="reloadConfig">重新加载</el-button>
        </template>
      </el-result>
    </div>
  </div>
</template>

<script setup lang="ts">
import { ref, onMounted, onUnmounted, nextTick } from 'vue'
import { ElMessage } from 'element-plus'
import { 
  Phone, Microphone, Setting, SwitchButton, Connection, VideoPlay, VideoPause, 
  Avatar, Check, RefreshLeft
} from '@element-plus/icons-vue'
import { dialogApi, sceneApi } from '@/api/scene'
import { agentApi } from '@/api/agent'
import TTSSettings from './TTSSettings.vue'
// 导入音频工具类
import { 
  AudioRecognitionClient, 
  AudioRecognitionResult, 
  AudioRecognitionConfig,
  checkAudioSupport,
  generateSessionId 
} from '@/utils/audioUtils'
// 添加音频API导入
import axios from 'axios'

// 类型定义
interface DialogConfig {
  id: number
  scene_id: string
  dialog_size: {
    width: number
    height: number
  }
  transparency: number
}

interface TTSConfig {
  model?: string
  volume?: number
  speechRate?: number
  pitchRate?: number
}

// 响应式数据
const loading = ref(true)
const dialogConfig = ref<DialogConfig | null>(null)
const callDuration = ref(0)
let timer: number | null = null

// 语音识别相关
const audioClient = ref<AudioRecognitionClient | null>(null)
const audioStatus = ref<'disconnected' | 'connecting' | 'connected' | 'recording' | 'stopped' | 'error'>('disconnected')
const audioSupport = ref(checkAudioSupport())
const currentRecognitionText = ref('')
const isConnectingAudio = ref(false)
const isRecording = ref(false)
const isProcessing = ref(false)

// 语音识别时间控制
let lastRecognitionTime = 0
let recognitionTimer: number | null = null
const RECOGNITION_TIMEOUT = 2000 // 2秒超时
const accumulatedText = ref('') // 累积的识别文本（响应式变量）

// TTS播放状态控制
const isTTSBlocking = ref(false) // TTS播放时阻止语音识别处理

// TTS 语音合成相关
const isTTSEnabled = ref(true)  // TTS功能默认开启
const ttsSettings = ref({
  model: 'sambert-zhijia-v1',  // 默认使用知佳
  volume: 50,
  speechRate: 1.0,
  pitchRate: 1.0
})
const isPlayingTTS = ref(false)
const currentAudio = ref<HTMLAudioElement | null>(null)

// TTS设置相关
const showTTSSettings = ref(false)

// 对话记录
const lastUserMessage = ref('')
const lastMessage = ref('')
const selectedSceneId = ref<string>('')

// 调试：打印音频支持检查结果
console.log('语音通话-音频支持检查结果:', audioSupport.value)

// 方法
const loadDialogConfig = async () => {
  try {
    loading.value = true
    const urlParams = new URLSearchParams(window.location.search)
    const sceneId = urlParams.get('scene_id')
    
    if (!sceneId) {
      throw new Error('缺少场景ID参数')
    }
    
    const response = await dialogApi.getDialogByScene(sceneId)
    dialogConfig.value = response
    
    // 设置窗口透明度
    if (response.transparency < 100) {
      document.body.style.opacity = (response.transparency / 100).toString()
    }
    
  } catch (error) {
    console.error('加载通话配置失败:', error)
    ElMessage.error('加载配置失败')
  } finally {
    loading.value = false
  }
}

const reloadConfig = () => {
  loadDialogConfig()
}

const formatTime = (seconds: number): string => {
  const minutes = Math.floor(seconds / 60)
  const remainingSeconds = seconds % 60
  return `${minutes.toString().padStart(2, '0')}:${remainingSeconds.toString().padStart(2, '0')}`
}

const endCall = () => {
  if (timer) {
    clearInterval(timer)
    timer = null
  }
  window.close()
}

const toggleRecording = async () => {
  if (!audioSupport.value.supported) {
    ElMessage.error(`浏览器不支持音频录制：${audioSupport.value.issues.join(', ')}`)
    return
  }

  if (isRecording.value) {
    // 停止录音
    await stopRecording()
  } else {
    // 开始录音
    await startRecording()
  }
}

// 开始录音
const startRecording = async () => {
  try {
    isConnectingAudio.value = true
    
    if (!audioClient.value) {
      await initializeAudioClient()
    }
    
    if (audioClient.value && audioClient.value.getConnectionStatus() !== 'connected') {
      await audioClient.value.connect()
    }
    
    if (audioClient.value) {
      await audioClient.value.sendControlMessage({
        type: 'start_recording'
      })
      ElMessage.success('🎤 开始录音')
    }
    
  } catch (error: any) {
    console.error('开始录音失败:', error)
    ElMessage.error(`开始录音失败: ${error.message || '未知错误'}`)
    isRecording.value = false
    audioStatus.value = 'error'
  } finally {
    isConnectingAudio.value = false
  }
}

// 停止录音
const stopRecording = async () => {
  console.log('🛑 开始停止录音...')
  
  const currentSessionId = audioClient.value?.getSessionId()
  
  if (audioClient.value) {
    await audioClient.value.sendControlMessage({
      type: 'stop_recording'
    })
    ElMessage.info('🔇 录音已停止')
  }
  
  // 手动重置状态
  isRecording.value = false
  isConnectingAudio.value = false
  currentRecognitionText.value = ''
  accumulatedText.value = '' // 清空累积文本
  
  // 清除识别定时器
  if (recognitionTimer) {
    clearTimeout(recognitionTimer)
    recognitionTimer = null
  }
  
  console.log('✅ 录音已停止，累积文本已清空')
}

const toggleTTS = () => {
  isTTSEnabled.value = !isTTSEnabled.value
  ElMessage.info(isTTSEnabled.value ? '已开启TTS' : '已关闭TTS')
}

// TTS设置相关方法
const showTTSSettingsMenu = () => {
  showTTSSettings.value = true
}

const handleTTSSettingsChanged = (newSettings: TTSConfig) => {
  Object.assign(ttsSettings.value, newSettings)
  console.log('TTS设置已更新:', newSettings)
}

const saveTTSSettings = () => {
  try {
    localStorage.setItem('voice-call-tts-settings', JSON.stringify(ttsSettings.value))
  } catch (error) {
    console.warn('保存TTS设置失败:', error)
  }
}

const loadTTSSettings = () => {
  try {
    const saved = localStorage.getItem('voice-call-tts-settings')
    if (saved) {
      const settings = JSON.parse(saved)
      ttsSettings.value = { ...ttsSettings.value, ...settings }
    }
  } catch (error) {
    console.warn('加载TTS设置失败:', error)
  }
}

// 音频API辅助方法
const audioAPI = {
  async closeSession(sessionId: string) {
    try {
      const response = await axios.delete(`/api/audio/sessions/${sessionId}`)
      return response.data
    } catch (error) {
      console.error('关闭音频会话API调用失败:', error)
      throw error
    }
  },

  tts: {
    async synthesizeToBase64(text: string, config: TTSConfig = {}) {
      try {
        const params = new URLSearchParams()
        params.append('text', text)
        params.append('model', config.model || ttsSettings.value.model)
        params.append('volume', String(config.volume || ttsSettings.value.volume))
        params.append('speech_rate', String(config.speechRate || ttsSettings.value.speechRate))
        params.append('pitch_rate', String(config.pitchRate || ttsSettings.value.pitchRate))
        params.append('format', 'wav')
        params.append('sample_rate', '48000')

        const response = await axios.post('/api/audio/tts/synthesize/base64', params, {
          headers: {
            'Content-Type': 'application/x-www-form-urlencoded'
          }
        })
        return response.data
      } catch (error) {
        console.error('TTS合成失败:', error)
        throw error
      }
    }
  }
}

// 语音识别相关方法
const initializeAudioClient = async () => {
  const sessionId = generateSessionId()
  const config: AudioRecognitionConfig = {}

  audioClient.value = new AudioRecognitionClient(
    sessionId,
    config,
    handleAudioRecognitionResult,
    handleAudioError,
    handleAudioStatusChange
  )
}

const handleAudioRecognitionResult = async (result: AudioRecognitionResult) => {
  console.log('语音识别结果:', result)
  
  // 如果TTS正在播放，暂停处理语音识别
  if (isTTSBlocking.value && result.type === 'recognition_result') {
    console.log('🎵 TTS播放中，暂停处理语音识别')
    return
  }
  
  switch (result.type) {
    case 'recognition_result':
      if (result.text) {
        console.log("🎵 result.text:", result.text)
        currentRecognitionText.value = result.text
        
        // 只有当识别结果是最终结果时，才累加到累积文本中
        if (result.is_final) {
          if (accumulatedText.value) {
            accumulatedText.value += ' ' + result.text
            console.log("🎵 accumulatedText.value:", accumulatedText.value)
          } else {
            accumulatedText.value = result.text
          }
          
          // 清空实时识别文本显示
          currentRecognitionText.value = ''
          
          console.log(`✅ 最终识别结果已累加: "${result.text}" -> 累积文本: "${accumulatedText.value}" (总长度: ${accumulatedText.value.length})`)
        } else {
          console.log(`📝 实时识别中: "${result.text}" (非最终结果)`)
        }
        
        lastRecognitionTime = Date.now()
        
        // 清除之前的定时器
        if (recognitionTimer) {
          clearTimeout(recognitionTimer)
        }
        
        // 设置新的定时器，2秒后自动发送给AI
        recognitionTimer = window.setTimeout(() => {
          handleRecognitionTimeout()
        }, RECOGNITION_TIMEOUT)
      }
      break
      
    case 'recognition_started':
      console.log('🎙️ 语音识别已启动')
      break
      
    case 'recognition_ended':
      console.log('⏹️ 语音识别已结束')
      break
      
    case 'recognition_complete':
      console.log('✅ 语音识别完成')
      break
      
    case 'error':
      ElMessage.error(`❌ 识别错误: ${result.message}`)
      break
  }
}

// 处理识别超时，发送累积的文本给AI
const handleRecognitionTimeout = async () => {
  if (accumulatedText.value.trim() && !isTTSBlocking.value) {
    console.log(`⏰ 识别超时(2秒)，发送文本给AI: ${accumulatedText.value}`)
    
    const textToSend = accumulatedText.value.trim()
    lastUserMessage.value = textToSend
    currentRecognitionText.value = ''
    
    // 清除定时器
    if (recognitionTimer) {
      clearTimeout(recognitionTimer)
      recognitionTimer = null
    }
    
    // 调用AI接口（会自动设置isTTSBlocking并清空accumulatedText）
    await sendToAI(textToSend)
    
    // 清空累积文本
    accumulatedText.value = ''
  } else if (isTTSBlocking.value) {
    console.log('🚫 TTS阻塞中，跳过识别超时处理')
  }
}

const handleAudioError = (error: string) => {
  console.error('音频错误:', error)
  ElMessage.error(`🔴 音频错误: ${error}`)
  isRecording.value = false
  audioStatus.value = 'error'
}

const handleAudioStatusChange = (status: 'connecting' | 'connected' | 'recording' | 'stopped' | 'error') => {
  console.log('🔄 音频状态变化:', status)
  audioStatus.value = status
  
  switch (status) {
    case 'recording':
      isRecording.value = true
      isConnectingAudio.value = false
      break
    case 'stopped':
      isRecording.value = false
      isConnectingAudio.value = false
      break
    case 'error':
      isRecording.value = false
      isConnectingAudio.value = false
      break
  }
}

// 发送消息给AI
const sendToAI = async (message: string) => {
  if (!message.trim()) return

  try {
    isProcessing.value = true
    isTTSBlocking.value = true // 立即阻止新的语音识别
    
    console.log('🤖 开始调用AI接口，阻止新的语音识别')
    
    // 使用流式接口
    await agentApi.sendMessageStream(message, undefined, (streamData: any) => {
      handleStreamMessage(streamData)
    }, selectedSceneId.value)
    
  } catch (error) {
    console.error('发送消息给AI失败:', error)
    ElMessage.error('发送消息失败')
    // 发生错误时恢复语音识别
    isTTSBlocking.value = false
  } finally {
    isProcessing.value = false
  }
}

// 处理AI流式响应
const handleStreamMessage = async (streamData: any) => {
  const { type, content } = streamData
  console.log("🎵 handleStreamMessage:", streamData)    
  
  switch (type) {
    case 'final_result':
      // 最终完整消息
      lastMessage.value = content
      
      console.log('🎵 收到final_result，准备播放TTS:', content)
      
      // 🎵 自动播放TTS
      if (isTTSEnabled.value && content.trim()) {
        setTimeout(() => {
          console.log('🎵 开始播放TTS音频:', content)
          playTTSAudio(content)
        }, 500)
      } else if (!isTTSEnabled.value) {
        // 如果TTS被关闭，立即恢复语音识别
        console.log('🔕 TTS已关闭，立即恢复语音识别')
        isTTSBlocking.value = false
      }
      break
      
    default:
      console.log('🔍 忽略的消息类型:', type)
      break
  }
}

// TTS播放方法
const playTTSAudio = async (text: string) => {
  if (!isTTSEnabled.value || !text.trim()) {
    return
  }

  try {
    stopCurrentAudio()
    
    console.log('🔊 开始TTS合成:', text.substring(0, 50) + '...')
    isPlayingTTS.value = true
    // isTTSBlocking已在sendToAI时设置，这里不需要重复设置

    const result = await audioAPI.tts.synthesizeToBase64(text)
    
    if (result.success && result.data_url) {
      const audio = new Audio(result.data_url)
      currentAudio.value = audio

      audio.oncanplaythrough = () => {
        console.log('✅ 音频加载完成，开始播放')
        audio.play().catch(error => {
          console.error('播放音频失败:', error)
          ElMessage.error('播放语音失败')
          isPlayingTTS.value = false
          isTTSBlocking.value = false // 关闭TTS阻塞模式
        })
      }

      audio.onended = () => {
        console.log('🎵 音频播放完成，恢复语音识别处理')
        isPlayingTTS.value = false
        isTTSBlocking.value = false // 关闭TTS阻塞模式
        currentAudio.value = null
      }

      audio.onerror = (error) => {
        console.error('音频播放错误:', error)
        ElMessage.error('语音播放出错')
        isPlayingTTS.value = false
        isTTSBlocking.value = false // 关闭TTS阻塞模式
        currentAudio.value = null
      }

    } else {
      throw new Error('TTS合成失败')
    }

  } catch (error: any) {
    console.error('TTS播放失败:', error)
    ElMessage.error(`语音播放失败: ${error.message || '未知错误'}`)
    isPlayingTTS.value = false
    isTTSBlocking.value = false // 关闭TTS阻塞模式
  }
}

const stopCurrentAudio = () => {
  if (currentAudio.value) {
    currentAudio.value.pause()
    currentAudio.value.currentTime = 0
    currentAudio.value = null
  }
  isPlayingTTS.value = false
  isTTSBlocking.value = false // 确保清除阻塞状态
}

// 自动开启录音
const startAutoRecording = async () => {
  if (!audioSupport.value.supported) {
    ElMessage.error(`浏览器不支持音频录制：${audioSupport.value.issues.join(', ')}`)
    return
  }

  try {
    isConnectingAudio.value = true
    
    if (!audioClient.value) {
      await initializeAudioClient()
    }
    
    if (audioClient.value && audioClient.value.getConnectionStatus() !== 'connected') {
      await audioClient.value.connect()
    }
    
    if (audioClient.value) {
      await audioClient.value.sendControlMessage({
        type: 'start_recording'
      })
    }
    
  } catch (error: any) {
    console.error('自动开启录音失败:', error)
    ElMessage.error(`开启录音失败: ${error.message || '未知错误'}`)
    isRecording.value = false
    audioStatus.value = 'error'
  } finally {
    isConnectingAudio.value = false
  }
}



// 关闭所有音频功能
const closeAllAudio = async () => {
  const currentSessionId = audioClient.value?.getSessionId()
  
  // 停止TTS
  stopCurrentAudio()
  
  // 停止录音
  if (audioClient.value) {
    try {
      await audioClient.value.sendControlMessage({
        type: 'stop_recording'
      })
    } catch (error) {
      console.error('停止录音失败:', error)
    }
  }
  
  // 关闭音频会话
  if (currentSessionId) {
    try {
      await audioAPI.closeSession(currentSessionId)
      console.log('✅ 音频会话已关闭')
    } catch (error) {
      console.error('关闭音频会话失败:', error)
    }
  }
  
  // 清理状态
  if (audioClient.value) {
    audioClient.value.disconnect()
    audioClient.value = null
  }
  
  isRecording.value = false
  isConnectingAudio.value = false
  currentRecognitionText.value = ''
  audioStatus.value = 'disconnected'
}

// 生命周期
onMounted(async () => {
  // 加载对话配置
  await loadDialogConfig()
  
  // 获取场景ID
  const urlParams = new URLSearchParams(window.location.search)
  selectedSceneId.value = urlParams.get('scene_id') || ''
  
  // 加载TTS设置
  loadTTSSettings()
  
  // 启动计时器
  timer = window.setInterval(() => {
    callDuration.value++
  }, 1000)
  
  // 自动开启语音功能
  setTimeout(async () => {
    await startAutoRecording()
  }, 1000) // 延迟1秒，确保页面完全加载
  
  // 监听页面关闭事件
  window.addEventListener('beforeunload', handleBeforeUnload)
  
  // 设置窗口标题
  document.title = 'AI语音通话'
})

onUnmounted(async () => {
  // 清理计时器
  if (timer) {
    clearInterval(timer)
    timer = null
  }
  
  // 清理识别定时器
  if (recognitionTimer) {
    clearTimeout(recognitionTimer)
    recognitionTimer = null
  }
  
  // 关闭所有音频功能
  await closeAllAudio()
  
  // 移除事件监听
  window.removeEventListener('beforeunload', handleBeforeUnload)
})

// 页面关闭前的处理
const handleBeforeUnload = async () => {
  await closeAllAudio()
}
</script>

<style scoped>
.voice-call-window {
  height: 100vh;
  display: flex;
  flex-direction: column;
  background: #f5f7fa;
}

.loading-overlay {
  position: fixed;
  top: 0;
  left: 0;
  width: 100%;
  height: 100%;
  z-index: 9999;
  display: flex;
  align-items: center;
  justify-content: center;
}

.call-container {
  height: 100%;
  display: flex;
  flex-direction: column;
}

.call-header {
  display: flex;
  justify-content: space-between;
  align-items: center;
  padding: 12px 16px;
  background: linear-gradient(90deg, #4facfe 0%, #00f2fe 100%);
  color: white;
  flex-shrink: 0;
}

.header-info {
  display: flex;
  align-items: center;
  gap: 8px;
}

.title {
  font-weight: 600;
  font-size: 14px;
}

.call-content {
  flex: 1;
  display: flex;
  flex-direction: column;
  align-items: center;
  justify-content: center;
  padding: 40px 20px;
  background: white;
}

.voice-status {
  display: flex;
  flex-direction: column;
  align-items: center;
  gap: 20px;
  margin-bottom: 40px;
}

.status-avatar {
  width: 120px;
  height: 120px;
  border-radius: 50%;
  background: linear-gradient(135deg, #f0f9ff 0%, #e0f2fe 100%);
  display: flex;
  align-items: center;
  justify-content: center;
  border: 3px solid #e1f5fe;
  transition: all 0.3s ease;
  box-shadow: 0 4px 20px rgba(79, 172, 254, 0.2);
}

.avatar-icon {
  font-size: 60px;
  color: #64b5f6;
  transition: all 0.3s ease;
}

.avatar-icon.speaking {
  color: #2196f3;
  animation: speaking 1.5s ease-in-out infinite;
}

@keyframes speaking {
  0%, 100% { transform: scale(1); color: #2196f3; }
  50% { transform: scale(1.1); color: #1976d2; }
}

.status-info {
  display: flex;
  flex-direction: column;
  align-items: center;
  gap: 12px;
  text-align: center;
}

.status-text {
  font-size: 18px;
  font-weight: 500;
  color: #333;
}

.recognition-text {
  font-size: 16px;
  color: #666;
  background: #f5f5f5;
  padding: 8px 16px;
  border-radius: 20px;
  font-style: italic;
  max-width: 300px;
  word-wrap: break-word;
}



.last-message {
  display: flex;
  flex-direction: column;
  gap: 8px;
  max-width: 400px;
  background: #fafafa;
  padding: 16px;
  border-radius: 12px;
  border: 1px solid #e0e0e0;
}

.message-user, .message-ai {
  font-size: 14px;
  line-height: 1.4;
  padding: 4px 0;
}

.message-user {
  color: #2196f3;
  font-weight: 500;
}

.message-ai {
  color: #4caf50;
  font-weight: 500;
}

.call-timer {
  font-size: 28px;
  font-weight: 600;
  color: #333;
  margin-bottom: 40px;
  font-family: 'Courier New', monospace;
  background: linear-gradient(90deg, #4facfe, #00f2fe);
  -webkit-background-clip: text;
  -webkit-text-fill-color: transparent;
  background-clip: text;
}

.call-controls {
  display: flex;
  gap: 30px;
  align-items: center;
}

.call-controls .el-button {
  width: 60px;
  height: 60px;
  font-size: 24px;
  border-radius: 50%;
  border: 2px solid;
  transition: all 0.3s ease;
  box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15);
}

.call-controls .el-button:hover {
  transform: translateY(-2px);
  box-shadow: 0 6px 20px rgba(0, 0, 0, 0.2);
}

.call-controls .el-button.el-button--danger {
  background: linear-gradient(135deg, #ff6b6b, #ee5a52);
  border-color: #ff5252;
}

.call-controls .el-button.el-button--success {
  background: linear-gradient(135deg, #4caf50, #45a049);
  border-color: #4caf50;
}

.call-controls .el-button.el-button--info {
  background: linear-gradient(135deg, #9e9e9e, #757575);
  border-color: #9e9e9e;
}

.call-controls .el-button.el-button--warning {
  background: linear-gradient(135deg, #ff9800, #f57c00);
  border-color: #ff9800;
}

.error-container {
  height: 100%;
  display: flex;
  align-items: center;
  justify-content: center;
  background: white;
}



/* 动画效果 */
@keyframes pulse {
  0%, 100% { opacity: 1; }
  50% { opacity: 0.6; }
}

/* 响应式设计 */
@media (max-width: 768px) {
  .call-content {
    padding: 20px 16px;
  }
  
  .status-avatar {
    width: 100px;
    height: 100px;
  }
  
  .avatar-icon {
    font-size: 50px;
  }
  
  .call-timer {
    font-size: 24px;
  }
  
  .call-controls {
    gap: 20px;
  }
  
  .call-controls .el-button {
    width: 50px;
    height: 50px;
    font-size: 20px;
  }
}
</style> 