<template>
  <div class="voice-chat-wrapper">
    <div class="voice-chat">
      <!-- 顶部导航栏 -->
      <div class="chat-header">
        <div class="header-left">
          <el-button @click="$router.push('/')" icon="ArrowLeft" circle/>
        </div>
        <div class="header-center">
          <div class="agent-info" v-if="currentAgent">
            <h3>{{ currentAgent.name }}</h3>
            <ConnectionStatus/>
          </div>
        </div>
        <div class="header-right">
          <el-button @click="$router.push('/history')" icon="ChatLineSquare" circle/>
        </div>
      </div>

      <!-- 主要内容区域 -->
      <div class="main-content">
        <!-- 用户头像和信息区域 -->
        <div class="user-avatar-section">
          <div class="agent-avatar-container">
            <el-avatar :src="currentAgent?.avatarUrl" :size="120" class="agent-avatar">
              <el-icon size="60">
                <Avatar/>
              </el-icon>
            </el-avatar>
            <!-- 通话状态指示器 -->
            <div v-if="isCallConnected" class="call-status-ring"></div>
          </div>
          
          <div class="agent-info-section">
            <h2 class="agent-name">{{ currentAgent?.name || '智能助手' }}</h2>
            <p class="call-status-text">
              <span v-if="!isCallConnected">{{ isConnecting ? '正在连接...' : '等待连接' }}</span>
              <span v-else-if="recordingState === RecordingState.RECORDING">正在说话...</span>
              <span v-else-if="recordingState === RecordingState.PROCESSING">正在思考...</span>
              <span v-else-if="recordingState === RecordingState.PLAYING">正在回复...</span>
              <span v-else>{{ formatCallDuration(currentCallDuration) }}</span>
            </p>
          </div>
        </div>

        <!-- 音波可视化区域 -->
        <div v-if="isCallConnected" class="audio-visualizer-section">
          <canvas ref="audioCanvas" class="audio-canvas"></canvas>
          <div class="audio-info">
            <span class="voice-status" :class="{ active: isVoiceDetected }">
              {{ isVoiceDetected ? '🎤 正在说话' : '🔇 静音中' }}
            </span>
          </div>
        </div>

        <!-- 聊天消息区域（简化版） -->
        <div v-if="messages.length > 0" class="chat-messages-mini" ref="messagesContainer">
          <div
              v-for="message in messages.slice(-3)"
              :key="message.messageId"
              :class="['message-mini', message.messageType]"
          >
            <p>{{ message.content }}</p>
          </div>
        </div>
      </div>

      <!-- 底部控制区域 -->
      <div class="call-controls">
        <!-- 通话连接按钮 -->
        <div v-if="!isCallConnected" class="call-connection">
          <el-button
              type="success"
              size="large"
              round
              @click="startCall"
              :disabled="isConnecting"
              :loading="isConnecting"
              class="call-button"
          >
            <el-icon size="24">
              <Phone/>
            </el-icon>
            {{ getCallButtonText() }}
          </el-button>
        </div>

        <!-- 通话中的控制界面 -->
        <div v-else class="call-active-controls">
          <div class="control-buttons">
            <!-- 静音按钮 -->
            <el-button
                size="large"
                circle
                class="control-btn mute-btn"
                :class="{ active: !isVoiceDetected }"
            >
              <el-icon size="24">
                <Microphone v-if="isVoiceDetected"/>
                <MicrophoneSlash v-else/>
              </el-icon>
            </el-button>

            <!-- 挂断按钮 -->
            <el-button
                type="danger"
                size="large"
                circle
                @click="endCall"
                class="control-btn hangup-btn"
            >
              <el-icon size="28">
                <Cellphone/>
              </el-icon>
            </el-button>

            <!-- 扬声器按钮 -->
            <el-button
                size="large"
                circle
                class="control-btn speaker-btn"
            >
              <el-icon size="24">
                <Speaker/>
              </el-icon>
            </el-button>
          </div>
        </div>
      </div>
    </div>

    <!-- 调试信息面板 - 移到页面底部 -->
    <div class="debug-panel-bottom">
      <div class="debug-header">
        <span>🔧 调试信息</span>
        <div class="debug-actions">
          <el-button
              @click="testMP3Playback"
              size="small"
              type="primary"
              :disabled="recordingState === RecordingState.PLAYING"
          >
            测试播放
          </el-button>
          <el-button
              @click="debugMessages = []"
              size="small"
              type="text"
          >
            清空
          </el-button>
        </div>
      </div>
      <div class="debug-messages">
        <div
            v-for="(message, index) in debugMessages.slice().reverse()"
            :key="debugMessages.length - index - 1"
            class="debug-message"
        >
          {{ message }}
        </div>
        <div v-if="debugMessages.length === 0" class="debug-empty">
          暂无调试信息
        </div>
      </div>
    </div>
  </div>
</template>

<script setup lang="ts">

import {Cellphone, Microphone, Phone} from '@element-plus/icons-vue';

import {computed, nextTick, onMounted, onUnmounted, ref, watch} from 'vue'
import {onBeforeRouteLeave, useRoute, useRouter} from 'vue-router'
import {ElMessage} from 'element-plus'
import {agentApi, chatApi, uploadApi} from '@/api'
import {voiceWebSocket} from '@/services/websocket'
import {AudioPlaybackService, VoiceRecordingService} from '@/services/voice'
import ConnectionStatus from '@/components/ConnectionStatus.vue'
import type {AiAgent, ChatMessage, ChatSession} from '@/types'
import {VoiceRecordingState} from '@/types'
import {useUserStore} from "@/stores/user.ts";

const router = useRouter()
const route = useRoute()
const userStore = useUserStore();

// 在模板中使用枚举
const RecordingState = VoiceRecordingState


// 响应式数据
const currentAgent = ref<AiAgent | null>(null)
const currentSession = ref<ChatSession | null>(null)
const messages = ref<ChatMessage[]>([])
const recordingState = ref<VoiceRecordingState>(VoiceRecordingState.IDLE)
const isWebSocketConnected = ref(false)
const isAiThinking = ref(false)
const playingMessageId = ref<string | null>(null)
const messagesContainer = ref<HTMLElement>()
const isCallConnected = ref(false) // 通话连接状态
const isConnecting = ref(false) // 正在连接状态

// 服务实例
const voiceRecorder = new VoiceRecordingService()
const audioPlayer = new AudioPlaybackService()

// 语音活动检测相关
const audioContext = ref<AudioContext | null>(null)
const analyser = ref<AnalyserNode | null>(null)
const microphone = ref<MediaStreamAudioSourceNode | null>(null)
const mediaRecorder = ref<MediaRecorder | null>(null)
const isVoiceDetected = ref(false)
const silenceTimer = ref<number | null>(null)
const recordingChunks = ref<Blob[]>([])

// 语音检测参数
const SILENCE_THRESHOLD = 0.01 // 静音阈值
const SILENCE_DURATION = 1500 // 静音持续时间（毫秒）
const MIN_RECORDING_DURATION = 500 // 最小录音时长（毫秒）

// 音波可视化相关
const audioCanvas = ref<HTMLCanvasElement | null>(null)
const canvasContext = ref<CanvasRenderingContext2D | null>(null)
const audioLevels = ref<number[]>([])
const currentAudioLevel = ref(0)

// 调试信息
const debugMessages = ref<string[]>([])
const recordingStartTime = ref<number | null>(null)
const callStartTime = ref<number | null>(null)
const currentCallDuration = ref(0)
const callDurationTimer = ref<number | null>(null)

// 声明 lamejs
declare const lamejs: any

// 计算属性
const connectionStatus = computed(() => {
  return isWebSocketConnected.value ? '已连接' : '连接中...'
})

// 获取智能体ID
const agentId = computed(() => {
  return Number(route.params.agentId)
})

// 生成用户ID（简化处理）
const userId = ref<number>(userStore.userId)

// 添加调试信息
const addDebugMessage = (message: string) => {
  const timestamp = new Date().toLocaleTimeString()
  debugMessages.value.push(`[${timestamp}] ${message}`)
  // 只保留最近20条消息
  if (debugMessages.value.length > 20) {
    debugMessages.value.shift()
  }
  console.log(`[语音调试] ${message}`)
  
  // 滚动到顶部显示最新消息（因为现在是倒序显示）
  nextTick(() => {
    const debugContainer = document.querySelector('.debug-messages')
    if (debugContainer) {
      debugContainer.scrollTop = 0
    }
  })
}

// 格式化通话时长
const formatCallDuration = (seconds: number) => {
  const mins = Math.floor(seconds / 60)
  const secs = seconds % 60
  return `${mins.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`
}

// 获取通话按钮文本
const getCallButtonText = () => {
  if (isConnecting.value) {
    return '连接中...'
  }
  if (isWebSocketConnected.value) {
    return '开始通话'
  }
  return '开始通话'
}

// 更新通话时长
const updateCallDuration = () => {
  if (callStartTime.value) {
    currentCallDuration.value = Math.floor((Date.now() - callStartTime.value) / 1000)
  }
}

// 初始化
onMounted(async () => {
  addDebugMessage('🚀 页面初始化开始...')
  
  await loadAgent()
  // 注意：不再自动初始化WebSocket和创建会话
  // 只有在用户点击"开始通话"时才进行连接
  setupVoiceRecorder()
  
  // 添加页面可见性监听器
  document.addEventListener('visibilitychange', handleVisibilityChange)
  
  addDebugMessage('✅ 页面初始化完成，等待用户开始通话')
})

// 清理资源的通用方法
const cleanupResources = () => {
  addDebugMessage('🧹 开始清理资源...')
  
  // 结束通话（如果正在通话中）
  if (isCallConnected.value) {
    endCall()
  }
  
  // 断开WebSocket连接
  if (isWebSocketConnected.value) {
    voiceWebSocket.disconnect()
    isWebSocketConnected.value = false
  }
  
  // 清理语音服务
  voiceRecorder.dispose()
  audioPlayer.dispose()
  
  // 重置所有状态到初始值
  resetToInitialState()
  
  addDebugMessage('✅ 资源清理完成')
}

// 重置页面状态到初始值
const resetToInitialState = () => {
  // 重置通话相关状态
  isCallConnected.value = false
  isConnecting.value = false
  recordingState.value = VoiceRecordingState.IDLE
  isVoiceDetected.value = false
  isAiThinking.value = false
  playingMessageId.value = null
  
  // 重置音频相关状态
  audioContext.value = null
  analyser.value = null
  microphone.value = null
  mediaRecorder.value = null
  audioLevels.value = []
  currentAudioLevel.value = 0
  
  // 重置计时器
  if (silenceTimer.value) {
    clearTimeout(silenceTimer.value)
    silenceTimer.value = null
  }
  if (callDurationTimer.value) {
    clearInterval(callDurationTimer.value)
    callDurationTimer.value = null
  }
  
  // 重置通话时长
  callStartTime.value = null
  currentCallDuration.value = 0
  recordingStartTime.value = null
  
  // 清空录音数据
  recordingChunks.value = []
  
  // 清空消息（可选，根据需求决定是否保留）
  // messages.value = []
  
  addDebugMessage('🔄 页面状态已重置到初始状态')
}

// 路由离开守卫 - 页面切换时自动清理
onBeforeRouteLeave((to, from, next) => {
  addDebugMessage(`🚪 准备离开页面，目标路由: ${to.path}`)
  
  // 如果正在通话中，先结束通话
  if (isCallConnected.value) {
    addDebugMessage('⚠️ 检测到正在通话中，自动结束通话')
    endCall()
  }
  
  // 清理所有资源
  cleanupResources()
  
  // 允许路由跳转
  next()
})

// 组件卸载时清理
onUnmounted(() => {
  addDebugMessage('🔚 组件卸载，执行最终清理')
  
  // 移除页面可见性监听器
  document.removeEventListener('visibilitychange', handleVisibilityChange)
  
  cleanupResources()
})

// 页面可见性变化时的处理（可选）
const handleVisibilityChange = () => {
  if (document.hidden) {
    // 页面变为不可见（切换到其他标签页或最小化）
    addDebugMessage('👁️ 页面变为不可见')
    if (isCallConnected.value) {
      addDebugMessage('⚠️ 页面不可见时自动结束通话')
      endCall()
    }
  } else {
    // 页面变为可见
    addDebugMessage('👁️ 页面变为可见')
  }
}


// 加载智能体信息
const loadAgent = async () => {
  try {
    const response = await agentApi.getAgentById(agentId.value)
    if (response.code === 200) {
      currentAgent.value = response.data
    } else {
      ElMessage.error('加载智能体信息失败')
      router.push('/')
    }
  } catch (error) {
    console.error('加载智能体失败:', error)
    ElMessage.error('网络错误')
    router.push('/')
  }
}

// 初始化WebSocket连接
const initWebSocket = async () => {
  try {
    addDebugMessage('🔌 开始初始化WebSocket连接...')
    
    // 如果已经连接，先断开
    if (isWebSocketConnected.value) {
      addDebugMessage('🔌 检测到已有连接，先断开...')
      voiceWebSocket.disconnect()
      isWebSocketConnected.value = false
    }
    
    await voiceWebSocket.connect({
      onOpen: () => {
        isWebSocketConnected.value = true
        addDebugMessage('✅ WebSocket连接成功')
        ElMessage.success('连接成功')
      },
      onClose: () => {
        isWebSocketConnected.value = false
        addDebugMessage('⚠️ WebSocket连接已断开')
        // 只在非主动断开时显示警告
        if (isCallConnected.value) {
          ElMessage.warning('连接已断开')
        }
      },
      onError: (error) => {
        console.error('WebSocket错误:', error)
        addDebugMessage(`❌ WebSocket连接错误: ${error}`)
        ElMessage.error('连接失败')
      }
    })

    // 注册消息处理器
    voiceWebSocket.onMessage('ai_reply', handleAiReply)
    voiceWebSocket.onMessage('audio_data', handleAudioData)
    voiceWebSocket.onMessage('mp3_audio', async (data: any) => {
      // 处理后端返回的MP3音频数据
      if (data && data.audioData) {
        // 如果audioData是Base64字符串，需要解码
        if (typeof data.audioData === 'string') {
          const binaryString = atob(data.audioData)
          const bytes = new Uint8Array(binaryString.length)
          for (let i = 0; i < binaryString.length; i++) {
            bytes[i] = binaryString.charCodeAt(i)
          }
          const blob = new Blob([bytes], { type: 'audio/mp3' })
          addDebugMessage(`🔊 收到AI回复音频(Base64)，大小: ${(blob.size / 1024).toFixed(2)}KB`)
          // 异步播放，不阻塞主线程
          playMP3BlobAsync(blob)
        }
        // 如果audioData是ArrayBuffer
        else if (data.audioData instanceof ArrayBuffer) {
          const blob = new Blob([data.audioData], { type: 'audio/mp3' })
          addDebugMessage(`🔊 收到AI回复音频(ArrayBuffer)，大小: ${(blob.size / 1024).toFixed(2)}KB`)
          // 异步播放，不阻塞主线程
          playMP3BlobAsync(blob)
        }
        // 如果audioData是Uint8Array
        else if (data.audioData instanceof Uint8Array) {
          const blob = new Blob([data.audioData], { type: 'audio/mp3' })
          addDebugMessage(`🔊 收到AI回复音频(Uint8Array)，大小: ${(blob.size / 1024).toFixed(2)}KB`)
          // 异步播放，不阻塞主线程
          playMP3BlobAsync(blob)
        }
      }
      // 如果直接是Blob数据
      else if (data instanceof Blob) {
        addDebugMessage(`🔊 收到AI回复音频(Blob)，大小: ${(data.size / 1024).toFixed(2)}KB`)
        // 异步播放，不阻塞主线程
        playMP3BlobAsync(data)
      }
    })
    voiceWebSocket.onMessage('voice_processing', () => {
      isAiThinking.value = true
      recordingState.value = VoiceRecordingState.PROCESSING
      addDebugMessage('🤖 后端正在处理语音...')
    })
    voiceWebSocket.onMessage('voice_processed', () => {
      isAiThinking.value = false
      addDebugMessage('✅ 后端处理完成')
    })
    voiceWebSocket.onMessage('error', (data) => {
      ElMessage.error(data.message)
      recordingState.value = VoiceRecordingState.IDLE
      isAiThinking.value = false
    })

  } catch (error) {
    console.error('WebSocket连接失败:', error)
    ElMessage.error('无法连接到服务器')
  }
}


// 创建聊天会话
const createChatSession = async () => {
  try {
    addDebugMessage('💬 开始创建聊天会话...')
    
    const response = await chatApi.createSession(userId.value, agentId.value)
    if (response.code === 200) {
      currentSession.value = response.data
      addDebugMessage(`✅ 聊天会话创建成功，会话ID: ${currentSession.value.sessionId}`)

      // 初始化WebSocket会话
      voiceWebSocket.sendMessage({
        type: 'init',
        userId: userId.value,
        agentId: agentId.value.toString(),
        chatSessionId: currentSession.value.sessionId
      })
      
      addDebugMessage('📤 WebSocket会话初始化消息已发送')

    } else {
      addDebugMessage(`❌ 创建会话失败: ${response.message}`)
      ElMessage.error('创建会话失败')
      throw new Error(response.message || '创建会话失败')
    }
  } catch (error) {
    console.error('创建会话失败:', error)
    addDebugMessage(`❌ 创建会话异常: ${error}`)
    ElMessage.error('创建会话失败')
    throw error
  }
}

// 加载 lamejs 脚本
const loadLamejs = () => {
  return new Promise((resolve, reject) => {
    if ((window as any).lamejs) {
      resolve(true)
      return
    }
    const script = document.createElement('script')
    script.src = 'https://cdn.jsdelivr.net/npm/lamejs@1.2.1/lame.min.js'
    script.onload = () => {
      console.log('lamejs 加载成功')
      resolve(true)
    }
    script.onerror = error => {
      console.error('lamejs 加载失败:', error)
      reject(error)
    }
    document.head.appendChild(script)
  })
}

// 将音频数据转换为 PCM
const convertToPCM = async (audioBlob: Blob): Promise<Float32Array> => {
  const context = new (window.AudioContext || (window as any).webkitAudioContext)()
  const arrayBuffer = await audioBlob.arrayBuffer()
  const audioBuffer = await context.decodeAudioData(arrayBuffer)
  return audioBuffer.getChannelData(0)
}

// 将 Float32Array 转换为 Int16Array
const convertFloat32ToInt16 = (float32Array: Float32Array): Int16Array => {
  const int16Array = new Int16Array(float32Array.length)
  for (let i = 0; i < float32Array.length; i++) {
    const s = Math.max(-1, Math.min(1, float32Array[i]))
    int16Array[i] = s < 0 ? s * 0x8000 : s * 0x7fff
  }
  return int16Array
}

// 音频格式转换：WebM -> MP3（使用lamejs库进行客户端转换）
const convertToMP3 = async (audioBlob: Blob): Promise<Blob> => {
  try {
    addDebugMessage('🔄 开始WebM到MP3转换...')

    // 第一步：将WebM音频解码为PCM数据
    const floatPCM = await convertToPCM(audioBlob)

    // 第二步：将浮点PCM转换为整数PCM（MP3编码器需要）
    const pcmData = convertFloat32ToInt16(floatPCM)

    // 第三步：创建MP3编码器（单声道，44.1kHz，128kbps）
    const encoder = new lamejs.Mp3Encoder(1, 44100, 128)
    const maxSamples = 1152  // MP3帧大小
    const mp3Data = []

    // 第四步：分块编码PCM数据为MP3
    for (let i = 0; i < pcmData.length; i += maxSamples) {
      const pcm = pcmData.subarray(i, Math.min(i + maxSamples, pcmData.length))
      const encoded = encoder.encodeBuffer(pcm)
      if (encoded.length > 0) {
        mp3Data.push(encoded)
      }
    }

    // 第五步：获取编码器缓冲区中剩余的数据
    const lastData = encoder.flush()
    if (lastData.length > 0) {
      mp3Data.push(lastData)
    }

    // 第六步：合并所有MP3数据块为最终文件
    const blob = new Blob(mp3Data, {type: 'audio/mp3'})
    addDebugMessage(`✅ MP3转换完成，压缩比: ${((1 - blob.size / audioBlob.size) * 100).toFixed(1)}%`)
    return blob
  } catch (error) {
    addDebugMessage(`❌ MP3转换失败: ${error}`)
    throw error
  }
}

// 绘制音波 - 改进的条柱状图格式
const drawAudioWave = () => {
  if (!canvasContext.value || !audioCanvas.value) return

  const canvas = audioCanvas.value
  const ctx = canvasContext.value
  const width = canvas.width
  const height = canvas.height

  // 清除画布
  ctx.clearRect(0, 0, width, height)

  // 绘制渐变背景
  const gradient = ctx.createLinearGradient(0, 0, 0, height)
  gradient.addColorStop(0, '#f8f9fa')
  gradient.addColorStop(1, '#e9ecef')
  ctx.fillStyle = gradient
  ctx.fillRect(0, 0, width, height)

  // 绘制音频条柱状图
  if (audioLevels.value.length > 0) {
    const barCount = Math.min(audioLevels.value.length, 40) // 最多显示40个条柱
    const barWidth = (width - 20) / barCount // 留出边距
    const barSpacing = 2 // 条柱间距
    const actualBarWidth = barWidth - barSpacing

    // 从最新的数据开始绘制
    const startIndex = Math.max(0, audioLevels.value.length - barCount)

    for (let i = 0; i < barCount; i++) {
      const dataIndex = startIndex + i
      if (dataIndex >= audioLevels.value.length) break

      const level = audioLevels.value[dataIndex]
      const normalizedLevel = level / 255 // 归一化到0-1

      // 计算条柱高度（最小高度为2px，最大为画布高度的90%）
      const minHeight = 2
      const maxHeight = height * 0.9
      const barHeight = Math.max(minHeight, normalizedLevel * maxHeight)

      // 计算位置
      const x = 10 + i * barWidth // 左边距10px
      const y = height - barHeight // 从底部开始绘制

      // 根据音量级别设置渐变颜色
      let barGradient
      if (level > 80) {
        // 高音量 - 红色渐变
        barGradient = ctx.createLinearGradient(0, y, 0, y + barHeight)
        barGradient.addColorStop(0, '#ff6b6b')
        barGradient.addColorStop(1, '#e74c3c')
      } else if (level > 40) {
        // 中音量 - 橙色渐变
        barGradient = ctx.createLinearGradient(0, y, 0, y + barHeight)
        barGradient.addColorStop(0, '#feca57')
        barGradient.addColorStop(1, '#f39c12')
      } else if (level > 10) {
        // 低音量 - 绿色渐变
        barGradient = ctx.createLinearGradient(0, y, 0, y + barHeight)
        barGradient.addColorStop(0, '#48dbfb')
        barGradient.addColorStop(1, '#0abde3')
      } else {
        // 静音 - 灰色渐变
        barGradient = ctx.createLinearGradient(0, y, 0, y + barHeight)
        barGradient.addColorStop(0, '#ddd')
        barGradient.addColorStop(1, '#bdc3c7')
      }

      ctx.fillStyle = barGradient

      // 绘制圆角矩形条柱
      const radius = Math.min(actualBarWidth / 4, 3)
      ctx.beginPath()
      if (ctx.roundRect) {
        // 使用现代浏览器的roundRect方法
        ctx.roundRect(x, y, actualBarWidth, barHeight, [radius, radius, 0, 0])
      } else {
        // 兼容旧浏览器，绘制普通矩形
        ctx.rect(x, y, actualBarWidth, barHeight)
      }
      ctx.fill()

      // 为当前正在说话的条柱添加发光效果
      if (isVoiceDetected.value && i === barCount - 1) {
        ctx.shadowColor = '#e74c3c'
        ctx.shadowBlur = 8
        ctx.shadowOffsetX = 0
        ctx.shadowOffsetY = 0
        ctx.fill()
        ctx.shadowBlur = 0
      }
    }
  }

  // 绘制当前音量指示器（右侧大条柱）
  const currentLevel = currentAudioLevel.value
  const indicatorWidth = 15
  const indicatorHeight = Math.max(2, (currentLevel / 255) * height * 0.9)
  const indicatorX = width - indicatorWidth - 5
  const indicatorY = height - indicatorHeight

  // 当前音量条柱的渐变
  const indicatorGradient = ctx.createLinearGradient(0, indicatorY, 0, indicatorY + indicatorHeight)
  if (isVoiceDetected.value) {
    indicatorGradient.addColorStop(0, '#ff6b6b')
    indicatorGradient.addColorStop(0.5, '#e74c3c')
    indicatorGradient.addColorStop(1, '#c0392b')
  } else {
    indicatorGradient.addColorStop(0, '#ecf0f1')
    indicatorGradient.addColorStop(1, '#bdc3c7')
  }

  ctx.fillStyle = indicatorGradient
  ctx.beginPath()
  if (ctx.roundRect) {
    // 使用现代浏览器的roundRect方法
    ctx.roundRect(indicatorX, indicatorY, indicatorWidth, indicatorHeight, [3, 3, 0, 0])
  } else {
    // 兼容旧浏览器，绘制普通矩形
    ctx.rect(indicatorX, indicatorY, indicatorWidth, indicatorHeight)
  }
  ctx.fill()

  // 绘制阈值线（虚线）
  const thresholdY = height - (SILENCE_THRESHOLD * 255 / 255 * height * 0.9)
  ctx.strokeStyle = '#3498db'
  ctx.lineWidth = 1
  ctx.setLineDash([4, 4])
  ctx.beginPath()
  ctx.moveTo(10, thresholdY)
  ctx.lineTo(width - 25, thresholdY)
  ctx.stroke()
  ctx.setLineDash([])

  // 在阈值线旁边添加标签
  ctx.fillStyle = '#3498db'
  ctx.font = '10px Arial'
  ctx.fillText('阈值', width - 45, thresholdY - 2)
}

// 语音活动检测 - 核心算法：实时监测用户是否在说话
const detectVoiceActivity = () => {
  if (!analyser.value) return

  // 获取音频频谱数据
  const bufferLength = analyser.value.frequencyBinCount
  const dataArray = new Uint8Array(bufferLength)
  analyser.value.getByteFrequencyData(dataArray)

  // 计算音频能量平均值（0-1之间）
  let sum = 0
  for (let i = 0; i < bufferLength; i++) {
    sum += dataArray[i]
  }
  const average = sum / bufferLength / 255  // 归一化到0-1
  const averageLevel = Math.floor(average * 255)  // 转换为0-255显示

  // 更新当前音量级别（用于UI显示）
  currentAudioLevel.value = averageLevel

  // 更新音频波形数据（用于可视化）
  audioLevels.value.push(averageLevel)
  if (audioLevels.value.length > 50) {
    audioLevels.value.shift()  // 保持最近50个采样点
  }

  // 绘制实时音波图
  drawAudioWave()

  // 判断是否有语音活动（超过阈值即认为在说话）
  const hasVoice = average > SILENCE_THRESHOLD

  if (hasVoice && !isVoiceDetected.value) {
    // 🎤 检测到语音开始 - 开始录音
    isVoiceDetected.value = true
    recordingStartTime.value = Date.now()
    startRecordingSegment()
    addDebugMessage(`🎤 检测到语音开始 (音量: ${averageLevel})`)

    // 清除之前的静音计时器
    if (silenceTimer.value) {
      clearTimeout(silenceTimer.value)
      silenceTimer.value = null
    }
  } else if (!hasVoice && isVoiceDetected.value) {
    // 🔇 检测到静音 - 开始倒计时，准备结束录音
    if (!silenceTimer.value) {
      silenceTimer.value = window.setTimeout(() => {
        const duration = recordingStartTime.value ? Date.now() - recordingStartTime.value : 0
        addDebugMessage(`🔇 语音结束，录音时长: ${(duration / 1000).toFixed(1)}秒`)
        isVoiceDetected.value = false
        stopRecordingSegment()  // 停止录音并触发MP3转换上传
        silenceTimer.value = null
      }, SILENCE_DURATION)  // 静音1.5秒后才真正结束录音
    }
  } else if (hasVoice && silenceTimer.value) {
    // 🎤 重新检测到语音 - 取消结束录音的倒计时
    clearTimeout(silenceTimer.value)
    silenceTimer.value = null
  }

  // 继续下一帧检测（60fps）
  if (isCallConnected.value) {
    requestAnimationFrame(detectVoiceActivity)
  }
}

// 开始录音片段 - 当检测到语音活动时调用
const startRecordingSegment = () => {
  // 防止重复开始录音
  if (!mediaRecorder.value || mediaRecorder.value.state === 'recording') return

  // 清空之前的录音数据
  recordingChunks.value = []
  recordingState.value = VoiceRecordingState.RECORDING

  try {
    // 开始录制音频（MediaRecorder会收集音频数据到ondataavailable事件）
    mediaRecorder.value.start()
    addDebugMessage('📹 开始录音片段')
  } catch (error) {
    addDebugMessage(`❌ 开始录音失败: ${error}`)
  }
}

// 停止录音片段 - 当检测到语音结束时调用
const stopRecordingSegment = () => {
  // 确保录音器正在录制状态
  if (!mediaRecorder.value || mediaRecorder.value.state !== 'recording') return

  try {
    // 停止录制，会触发onstop事件，然后调用handleRecordingData处理音频
    mediaRecorder.value.stop()
    addDebugMessage('⏹️ 停止录音片段')
  } catch (error) {
    addDebugMessage(`❌ 停止录音失败: ${error}`)
  }
}

// 处理录音数据 - 转换为MP3并发送给后端
const handleRecordingData = async (audioBlob: Blob) => {
  if (audioBlob.size === 0) {
    addDebugMessage('⚠️ 录音数据为空，跳过处理')
    return
  }

  recordingState.value = VoiceRecordingState.PROCESSING
  addDebugMessage(`🔄 开始处理录音数据，原始WebM大小: ${(audioBlob.size / 1024).toFixed(2)}KB`)

  try {
    // 第一步：转换WebM音频为MP3格式
    const mp3Blob = await convertToMP3(audioBlob)
    addDebugMessage(`🎵 MP3转换完成，压缩后大小: ${(mp3Blob.size / 1024).toFixed(2)}KB`)

    // // 第二步：通过HTTP API上传MP3文件到后端
    // const formData = new FormData()
    // formData.append('audioFile', mp3Blob, `voice_${Date.now()}.mp3`)
    // formData.append('userId', userId.value)
    // formData.append('agentId', agentId.value.toString())
    // formData.append('sessionId', currentSession.value?.sessionId || '')
    // // 添加WebSocket会话ID，让后端知道通过哪个WebSocket连接返回结果
    // // 使用用户ID作为WebSocket会话标识（简化处理）
    // formData.append('websocketSessionId', userId.value)
    //

    //
    // // 调用后端语音处理接口
    // const response = await fetch('/api/voice/upload', {
    //   method: 'POST',
    //   body: formData
    // })
    addDebugMessage('📤 开始上传MP3文件到后端...')
    const response = await uploadApi.uploadVideo(mp3Blob, userId.value, agentId.value, currentSession.value?.sessionId || '', voiceWebSocket.getWebSocketSessionId() || '');

    if (response.code == 200) {
      addDebugMessage('✅ MP3文件上传成功，后端开始处理')
      // 后端会通过WebSocket返回处理结果
    } else {
      throw new Error(`上传失败: ${response.code} ${response.message}`)
    }

  } catch (error) {
    addDebugMessage(`❌ 处理录音数据失败: ${error}`)
    ElMessage.error('语音处理失败')
  } finally {
    recordingState.value = VoiceRecordingState.IDLE
  }
}

// 设置语音录制器
const setupVoiceRecorder = async () => {
  try {
    // 加载 lamejs
    await loadLamejs()

    voiceRecorder.onStateChanged((state) => {
      recordingState.value = state
    })

    voiceRecorder.onAudioDataReceived((audioData) => {
      // 实时发送音频数据到WebSocket
      voiceWebSocket.sendBinaryData(audioData)
    })
  } catch (error) {
    console.error('设置语音录制器失败:', error)
    ElMessage.error('语音组件初始化失败')
  }
}

// 开始通话
const startCall = async () => {
  addDebugMessage('📞 开始通话流程...')
  
  // 设置连接中状态
  isConnecting.value = true
  
  try {
    // 如果WebSocket未连接，重新初始化连接
    if (!isWebSocketConnected.value) {
      addDebugMessage('🔌 WebSocket未连接，重新初始化...')
      try {
        await initWebSocket()
        await createChatSession()
        addDebugMessage('✅ WebSocket和会话重新初始化完成')
      } catch (error) {
        addDebugMessage(`❌ 重新初始化失败: ${error}`)
        ElMessage.error('连接初始化失败，请稍后重试')
        isConnecting.value = false
        return
      }
    }
    
    // 再次检查连接状态
    if (!isWebSocketConnected.value) {
      ElMessage.warning('连接未建立，请稍后重试')
      isConnecting.value = false
      return
    }

    // 获取麦克风权限
    const stream = await navigator.mediaDevices.getUserMedia({
      audio: {
        channelCount: 1,
        sampleRate: 44100,
        echoCancellation: true,
        noiseSuppression: true,
      }
    })

    // 创建音频上下文和分析器
    audioContext.value = new (window.AudioContext || (window as any).webkitAudioContext)()
    analyser.value = audioContext.value.createAnalyser()
    analyser.value.fftSize = 2048

    // 连接麦克风到分析器
    microphone.value = audioContext.value.createMediaStreamSource(stream)
    microphone.value.connect(analyser.value)

    // 创建媒体录制器
    mediaRecorder.value = new MediaRecorder(stream, {
      mimeType: 'audio/webm;codecs=opus',
      audioBitsPerSecond: 128000,
    })

    // 设置录制事件处理器
    mediaRecorder.value.ondataavailable = (event) => {
      // 收集录音数据片段
      if (event.data.size > 0) {
        recordingChunks.value.push(event.data)
      }
    }

    // 录音停止时的处理
    mediaRecorder.value.onstop = async () => {
      if (recordingChunks.value.length > 0) {
        // 合并所有录音片段为一个音频文件
        const audioBlob = new Blob(recordingChunks.value, {type: 'audio/webm;codecs=opus'})
        // 处理录音数据：转换为MP3并发送
        await handleRecordingData(audioBlob)
        // 清空录音片段缓存
        recordingChunks.value = []
      }
    }

    isCallConnected.value = true
    recordingState.value = VoiceRecordingState.IDLE

    // 初始化画布
    nextTick(() => {
      if (audioCanvas.value) {
        canvasContext.value = audioCanvas.value.getContext('2d')
        audioCanvas.value.width = 400
        audioCanvas.value.height = 100
      }
    })

    // 开始通话计时
    callStartTime.value = Date.now()
    currentCallDuration.value = 0
    callDurationTimer.value = window.setInterval(updateCallDuration, 1000)

    // 开始语音活动检测
    detectVoiceActivity()

    // 通知后端开始通话
    voiceWebSocket.sendMessage({
      type: 'voice_start',
      userId: userId.value,
      agentId: agentId.value.toString(),
      sessionId: currentSession.value?.sessionId || ''
    })

    addDebugMessage('📞 通话已连接，开始监听语音')
    ElMessage.success('通话已连接，正在监听语音...')

  } catch (error) {
    console.error('开始通话失败:', error)
    ElMessage.error('无法访问麦克风，请检查权限设置')
    isCallConnected.value = false
  } finally {
    // 无论成功还是失败，都重置连接中状态
    isConnecting.value = false
  }
}

// 结束通话
const endCall = () => {
  const totalDuration = callStartTime.value ? Math.floor((Date.now() - callStartTime.value) / 1000) : 0

  // 停止音频播放（在重置状态之前）
  if (recordingState.value === VoiceRecordingState.PLAYING) {
    audioPlayer.stopPlayback()
  }

  isCallConnected.value = false
  recordingState.value = VoiceRecordingState.IDLE
  isVoiceDetected.value = false

  // 清除计时器
  if (silenceTimer.value) {
    clearTimeout(silenceTimer.value)
    silenceTimer.value = null
  }

  if (callDurationTimer.value) {
    clearInterval(callDurationTimer.value)
    callDurationTimer.value = null
  }

  // 停止录音
  if (mediaRecorder.value && mediaRecorder.value.state === 'recording') {
    mediaRecorder.value.stop()
  }

  // 断开音频连接
  if (microphone.value) {
    microphone.value.disconnect()
    microphone.value = null
  }

  if (audioContext.value) {
    audioContext.value.close()
    audioContext.value = null
  }

  // 停止媒体流
  if (mediaRecorder.value && mediaRecorder.value.stream) {
    mediaRecorder.value.stream.getTracks().forEach(track => track.stop())
  }

  // 清理数据
  audioLevels.value = []
  currentAudioLevel.value = 0
  callStartTime.value = null
  currentCallDuration.value = 0

  // 通知后端结束通话
  voiceWebSocket.sendMessage({type: 'voice_end'})

  addDebugMessage(`📞 通话结束，总时长: ${formatCallDuration(totalDuration)}`)
  ElMessage.info('通话已结束')
}

// 开始录音（已废弃，现在自动开始）
const startRecording = async () => {
  // 此方法已废弃，录音现在在开始通话时自动启动
  console.log('录音已自动开始，无需手动操作')
}

// 停止录音（手动停止 - 已废弃）
const stopRecording = () => {
  // 现在录音是自动控制的，不需要手动停止
  addDebugMessage('⚠️ 手动停止录音（自动模式下不推荐）')
  if (mediaRecorder.value && mediaRecorder.value.state === 'recording') {
    mediaRecorder.value.stop()
  }
}

// 处理AI回复
const handleAiReply = (data: any) => {
  isAiThinking.value = false

  const aiMessage: ChatMessage = {
    id: Date.now(),
    messageId: data.messageId,
    sessionId: currentSession.value?.sessionId || '',
    messageType: 'assistant',
    content: data.content,
    audioUrl: data.audioUrl,
    audioDuration: data.duration,
    status: 0,
    createTime: new Date().toISOString(),
    updateTime: new Date().toISOString()
  }

  messages.value.push(aiMessage)
  scrollToBottom()

  // 自动播放AI回复
  if (data.audioUrl) {
    recordingState.value = VoiceRecordingState.PLAYING
  }
}

// 处理音频数据
const handleAudioData = async (audioData: ArrayBuffer) => {
  try {
    recordingState.value = VoiceRecordingState.PLAYING

    // 创建音频播放器
    const audioContext = new (window.AudioContext || (window as any).webkitAudioContext)()
    const audioBuffer = await audioContext.decodeAudioData(audioData.slice(0))

    const source = audioContext.createBufferSource()
    source.buffer = audioBuffer
    source.connect(audioContext.destination)

    // 播放完成后重置状态
    source.onended = () => {
      recordingState.value = VoiceRecordingState.IDLE
      console.log('音频播放完成')
    }

    source.start(0)
    console.log('开始播放AI回复音频')

  } catch (error) {
    console.error('播放音频失败:', error)
    recordingState.value = VoiceRecordingState.IDLE
    ElMessage.error('音频播放失败')
  }
}

// 播放MP3数据（ArrayBuffer格式）
const playMP3Data = async (mp3Data: ArrayBuffer) => {
  try {
    console.log('播放MP3数据(ArrayBuffer)')
    const blob = new Blob([mp3Data], {type: 'audio/mp3'})
    await playMP3Blob(blob)
  } catch (error) {
    addDebugMessage(`❌ 播放MP3失败: ${error}`)
    recordingState.value = VoiceRecordingState.IDLE
    ElMessage.error('音频播放失败')
  }
}

// 异步播放MP3数据（Blob格式）- 不阻塞主线程
const playMP3BlobAsync = (mp3Blob: Blob) => {
  // 立即返回，在后台异步处理
  setTimeout(async () => {
    try {
      console.log('异步播放MP3数据(Blob)', mp3Blob.size, 'bytes')
      recordingState.value = VoiceRecordingState.PLAYING

      // 方法1：使用Blob URL + Audio元素（推荐）
      const audioUrl = URL.createObjectURL(mp3Blob)
      const audio = new Audio(audioUrl)

      // 设置音频事件监听
      audio.onloadstart = () => {
        addDebugMessage('📥 音频开始加载...')
      }

      audio.oncanplay = () => {
        addDebugMessage('✅ 音频可以播放')
      }

      audio.onended = () => {
        recordingState.value = VoiceRecordingState.IDLE
        URL.revokeObjectURL(audioUrl)
        addDebugMessage('✅ AI音频播放完成')
      }

      audio.onerror = (error) => {
        console.error('音频播放错误:', error)
        addDebugMessage(`❌ MP3播放失败: ${audio.error?.message || 'Unknown error'}`)
        recordingState.value = VoiceRecordingState.IDLE
        URL.revokeObjectURL(audioUrl)
        ElMessage.error('音频播放失败')
      }

      // 开始播放
      try {
        addDebugMessage('🎵 开始播放AI回复音频')
        await audio.play()
        addDebugMessage('🎵 音频播放已开始')
      } catch (playError) {
        console.error('播放失败:', playError)
        addDebugMessage(`❌ 播放失败: ${playError}`)

        // 如果Audio元素播放失败，尝试使用Web Audio API
        await playMP3BlobWithWebAudio(mp3Blob)
      }

    } catch (error) {
      addDebugMessage(`❌ 播放MP3失败: ${error}`)
      recordingState.value = VoiceRecordingState.IDLE
      ElMessage.error('音频播放失败')
    }
  }, 0)
}

// 播放MP3数据（Blob格式）- 同步版本（保留兼容性）
const playMP3Blob = async (mp3Blob: Blob) => {
  // 直接调用异步版本
  playMP3BlobAsync(mp3Blob)
}

// 使用Web Audio API播放MP3（备用方案）
const playMP3BlobWithWebAudio = async (mp3Blob: Blob) => {
  try {
    addDebugMessage('🔄 尝试使用Web Audio API播放...')

    const audioContext = new (window.AudioContext || (window as any).webkitAudioContext)()
    const arrayBuffer = await mp3Blob.arrayBuffer()
    const audioBuffer = await audioContext.decodeAudioData(arrayBuffer)

    const source = audioContext.createBufferSource()
    source.buffer = audioBuffer
    source.connect(audioContext.destination)

    source.onended = () => {
      recordingState.value = VoiceRecordingState.IDLE
      addDebugMessage('✅ Web Audio API播放完成')
    }

    source.start(0)
    addDebugMessage('🎵 Web Audio API播放已开始')

  } catch (error) {
    addDebugMessage(`❌ Web Audio API播放失败: ${error}`)
    recordingState.value = VoiceRecordingState.IDLE
    throw error
  }
}

// 切换音频播放
const toggleAudioPlayback = async (message: ChatMessage) => {
  if (playingMessageId.value === message.messageId) {
    // 停止播放
    audioPlayer.stopPlayback()
    playingMessageId.value = null
  } else {
    // 开始播放
    if (message.audioUrl) {
      try {
        playingMessageId.value = message.messageId
        // 这里需要先获取音频数据，简化处理
        ElMessage.info('音频播放功能开发中')
      } catch (error) {
        console.error('播放失败:', error)
        playingMessageId.value = null
      }
    }
  }
}

// 停止音频播放
const stopAudioPlayback = () => {
  audioPlayer.stopPlayback()
  voiceWebSocket.sendMessage({type: 'stop_audio'})
  recordingState.value = VoiceRecordingState.IDLE
}

// 获取状态文本
const getStatusText = () => {
  switch (recordingState.value) {
    case VoiceRecordingState.RECORDING:
      return '正在录音...'
    case VoiceRecordingState.PROCESSING:
      return '正在处理...'
    case VoiceRecordingState.PLAYING:
      return '正在播放...'
    default:
      return ''
  }
}

// 格式化时间
const formatTime = (timeString: string) => {
  const date = new Date(timeString)
  return date.toLocaleTimeString('zh-CN', {
    hour: '2-digit',
    minute: '2-digit'
  })
}

// 格式化音频时长
const formatDuration = (duration: number) => {
  const seconds = Math.floor(duration / 1000)
  return `${seconds}s`
}

// 滚动到底部
const scrollToBottom = () => {
  nextTick(() => {
    if (messagesContainer.value) {
      messagesContainer.value.scrollTop = messagesContainer.value.scrollHeight
    }
  })
}

// 监听消息变化，自动滚动
watch(() => messages.value.length, () => {
  scrollToBottom()
})

// 测试MP3播放功能（开发调试用）
const testMP3Playback = async () => {
  try {
    addDebugMessage('🧪 开始测试MP3播放功能...')

    // 创建一个简单的测试音频数据（静音）
    const sampleRate = 44100
    const duration = 1 // 1秒
    const numSamples = sampleRate * duration
    const audioBuffer = new ArrayBuffer(numSamples * 2) // 16位音频
    const view = new DataView(audioBuffer)

    // 生成简单的正弦波测试音频
    for (let i = 0; i < numSamples; i++) {
      const sample = Math.sin(2 * Math.PI * 440 * i / sampleRate) * 0.1 // 440Hz, 低音量
      const intSample = Math.max(-32768, Math.min(32767, sample * 32767))
      view.setInt16(i * 2, intSample, true)
    }

    // 转换为MP3格式进行测试
    const testBlob = new Blob([audioBuffer], { type: 'audio/wav' })

    // 如果有lamejs，转换为MP3测试
    if ((window as any).lamejs) {
      const mp3Blob = await convertToMP3(testBlob)
      addDebugMessage('🎵 测试播放转换后的MP3...')
      await playMP3Blob(mp3Blob)
    } else {
      addDebugMessage('⚠️ lamejs未加载，直接测试WAV播放')
      await playMP3Blob(testBlob)
    }

  } catch (error) {
    addDebugMessage(`❌ 测试播放失败: ${error}`)
    console.error('测试播放失败:', error)
  }
}
</script>

<style scoped>
/* 外层容器 - 全屏背景 */
.voice-chat-wrapper {
  height: 100vh;
  width: 100vw;
  background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
  display: flex;
  justify-content: center;
  align-items: center;
  padding: 20px;
  box-sizing: border-box;
}

/* 主容器 - 60%宽度居中 */
.voice-chat {
  width: 60%;
  max-width: 500px;
  min-width: 350px;
  height: 90vh;
  background: rgba(255, 255, 255, 0.95);
  border-radius: 20px;
  box-shadow: 0 20px 60px rgba(0, 0, 0, 0.3);
  backdrop-filter: blur(10px);
  display: flex;
  flex-direction: column;
  overflow: hidden;
}

/* 头部导航栏 */
.chat-header {
  display: flex;
  justify-content: space-between;
  align-items: center;
  padding: 20px 24px;
  background: transparent;
  position: relative;
}

.header-left, .header-right {
  flex: 1;
}

.header-center {
  flex: 2;
  text-align: center;
}

.header-left {
  display: flex;
  justify-content: flex-start;
}

.header-right {
  display: flex;
  justify-content: flex-end;
}

.agent-info h3 {
  margin: 0;
  font-size: 1.1rem;
  color: #2c3e50;
  font-weight: 600;
}

/* 主要内容区域 */
.main-content {
  flex: 1;
  display: flex;
  flex-direction: column;
  justify-content: center;
  align-items: center;
  padding: 40px 20px;
  text-align: center;
}

/* 用户头像和信息区域 */
.user-avatar-section {
  display: flex;
  flex-direction: column;
  align-items: center;
  margin-bottom: 40px;
}

.agent-avatar-container {
  position: relative;
  margin-bottom: 20px;
}

.agent-avatar {
  border: 4px solid rgba(255, 255, 255, 0.8);
  box-shadow: 0 8px 32px rgba(0, 0, 0, 0.1);
}

.call-status-ring {
  position: absolute;
  top: -8px;
  left: -8px;
  right: -8px;
  bottom: -8px;
  border: 3px solid #67c23a;
  border-radius: 50%;
  animation: pulse-ring 2s infinite;
}

@keyframes pulse-ring {
  0% {
    transform: scale(1);
    opacity: 1;
  }
  100% {
    transform: scale(1.1);
    opacity: 0;
  }
}

.agent-info-section {
  text-align: center;
}

.agent-name {
  font-size: 1.8rem;
  font-weight: 600;
  color: #2c3e50;
  margin: 0 0 8px 0;
}

.call-status-text {
  font-size: 1rem;
  color: #7f8c8d;
  margin: 0;
}

/* 音波可视化区域 */
.audio-visualizer-section {
  width: 100%;
  max-width: 300px;
  margin-bottom: 30px;
  padding: 20px;
  background: rgba(255, 255, 255, 0.5);
  border-radius: 16px;
  backdrop-filter: blur(10px);
}

.audio-canvas {
  width: 100%;
  height: 80px;
  border-radius: 8px;
  background: rgba(255, 255, 255, 0.8);
}

.audio-info {
  display: flex;
  justify-content: center;
  margin-top: 12px;
}

.voice-status {
  padding: 6px 16px;
  border-radius: 20px;
  background: rgba(255, 255, 255, 0.8);
  color: #6c757d;
  font-size: 0.9rem;
  transition: all 0.3s ease;
}

.voice-status.active {
  background: #e74c3c;
  color: white;
  animation: pulse 1s infinite;
}

/* 简化聊天消息 */
.chat-messages-mini {
  width: 100%;
  max-width: 280px;
  max-height: 120px;
  overflow-y: auto;
  padding: 16px;
  background: rgba(255, 255, 255, 0.3);
  border-radius: 12px;
  backdrop-filter: blur(5px);
}

.message-mini {
  margin-bottom: 8px;
  padding: 8px 12px;
  border-radius: 12px;
  font-size: 0.85rem;
  line-height: 1.4;
}

.message-mini.user {
  background: rgba(64, 158, 255, 0.8);
  color: white;
  text-align: right;
}

.message-mini.assistant {
  background: rgba(255, 255, 255, 0.8);
  color: #2c3e50;
  text-align: left;
}

.message-mini p {
  margin: 0;
}

/* 底部控制区域 */
.call-controls {
  padding: 30px 20px;
  background: transparent;
}

/* 通话连接按钮 */
.call-connection {
  text-align: center;
}

.call-button {
  width: 200px;
  height: 56px;
  font-size: 1.1rem;
  font-weight: 600;
  border-radius: 28px;
  box-shadow: 0 8px 24px rgba(103, 194, 58, 0.3);
  transition: all 0.3s ease;
}

.call-button:hover {
  transform: translateY(-2px);
  box-shadow: 0 12px 32px rgba(103, 194, 58, 0.4);
}

/* 通话中控制按钮 */
.call-active-controls {
  display: flex;
  justify-content: center;
}

.control-buttons {
  display: flex;
  justify-content: center;
  align-items: center;
  gap: 40px;
}

.control-btn {
  width: 64px;
  height: 64px;
  border-radius: 50%;
  border: none;
  box-shadow: 0 6px 20px rgba(0, 0, 0, 0.15);
  transition: all 0.3s ease;
  position: relative;
}

.control-btn:hover {
  transform: translateY(-2px);
  box-shadow: 0 8px 25px rgba(0, 0, 0, 0.2);
}

.mute-btn {
  background: rgba(255, 255, 255, 0.9);
  color: #6c757d;
}

.mute-btn.active {
  background: #f56565;
  color: white;
}

.hangup-btn {
  width: 80px;
  height: 80px;
  background: #e53e3e;
  color: white;
}

.hangup-btn:hover {
  background: #c53030;
}

.speaker-btn {
  background: rgba(255, 255, 255, 0.9);
  color: #6c757d;
}

/* 动画效果 */
@keyframes pulse {
  0% {
    transform: scale(1);
  }
  50% {
    transform: scale(1.05);
  }
  100% {
    transform: scale(1);
  }
}

/* 底部调试信息面板样式 */
.debug-panel-bottom {
  position: fixed;
  bottom: 0;
  left: 50%;
  transform: translateX(-50%);
  width: 60%;
  max-width: 500px;
  min-width: 350px;
  border-top: 2px solid rgba(255, 255, 255, 0.3);
  background: rgba(0, 0, 0, 0.8);
  backdrop-filter: blur(10px);
  max-height: 200px;
  overflow: hidden;
  z-index: 1000;
  border-radius: 12px 12px 0 0;
  box-shadow: 0 -8px 32px rgba(0, 0, 0, 0.3);
}

.debug-header {
  display: flex;
  justify-content: space-between;
  align-items: center;
  padding: 8px 16px;
  background: rgba(255, 255, 255, 0.1);
  border-bottom: 1px solid rgba(255, 255, 255, 0.2);
  font-size: 0.9rem;
  font-weight: 500;
  color: white;
}

.debug-actions {
  display: flex;
  gap: 8px;
  align-items: center;
}

.debug-messages {
  max-height: 140px;
  overflow-y: auto;
  padding: 8px 16px;
  background: transparent;
}

.debug-message {
  font-size: 0.75rem;
  font-family: monospace;
  color: rgba(255, 255, 255, 0.9);
  margin-bottom: 4px;
  padding: 2px 4px;
  background: rgba(255, 255, 255, 0.1);
  border-radius: 4px;
  word-break: break-all;
}

.debug-empty {
  text-align: center;
  color: rgba(255, 255, 255, 0.6);
  font-size: 0.8rem;
  padding: 20px;
}

/* 滚动条样式 */
.debug-messages::-webkit-scrollbar {
  width: 4px;
}

.debug-messages::-webkit-scrollbar-track {
  background: rgba(255, 255, 255, 0.1);
  border-radius: 2px;
}

.debug-messages::-webkit-scrollbar-thumb {
  background: rgba(255, 255, 255, 0.3);
  border-radius: 2px;
}

.debug-messages::-webkit-scrollbar-thumb:hover {
  background: rgba(255, 255, 255, 0.5);
}
</style>
