<template>
  <div class="chat-container">
    <!-- 导航栏 -->
    <header class="nav-bar">
      <button class="nav-btn" @click="$router.push('/')">
        <span class="btn-icon">←</span>
        返回首页
      </button>
      <h1 class="logo">智能对话</h1>
      <div class="nav-actions">
        <button class="nav-btn icon-btn" title="清空对话" @click="clearConversation">
          <span class="btn-icon">🗑️</span>
        </button>
        <button class="nav-btn icon-btn" title="导出对话" @click="exportConversation">
          <span class="btn-icon">📤</span>
        </button>
        <button class="nav-btn">设置</button>
      </div>
    </header>

    <!-- 聊天头部（角色信息） -->
    <div v-if="currentRole" class="chat-header">
      <div class="role-info">
        <img :src="currentRole.avatar" class="role-avatar" />
        <div class="role-details">
          <h2>{{ currentRole.name }}</h2>
          <p class="role-status">
            <span class="status-dot online"></span>
            在线
          </p>
        </div>
      </div>
      <div class="header-actions">
        <button class="action-btn" @click="toggleRoleInfo">角色信息</button>
        <button class="action-btn" @click="toggleVoiceMode">
          {{ voiceMode ? '🔊 语音开' : '🔈 语音关' }}
        </button>
        <button
          class="action-btn call-btn"
          @click="openCallDialog"
        >
          ☎️ 通话
        </button>
      </div>
    </div>

    <!-- 角色信息面板 -->
    <div v-if="showRoleInfo && currentRole" class="role-info-panel">
      <div class="panel-header">
        <h3>角色详细信息</h3>
        <button class="close-btn" @click="showRoleInfo = false">×</button>
      </div>
      <div class="panel-content">
        <img :src="currentRole.avatar" class="info-avatar" />
        <div class="info-details">
          <h4>{{ currentRole.name }}</h4>
          <p class="info-desc">{{ currentRole.description }}</p>
          <div class="info-stats">
            <div class="stat-item">
              <span class="stat-label">对话次数</span>
              <span class="stat-value">{{ messageCount }}</span>
            </div>
            <div class="stat-item">
              <span class="stat-label">今日对话</span>
              <span class="stat-value">{{ todayMessageCount }}</span>
            </div>
          </div>
        </div>
      </div>
    </div>

    <!-- 消息区 -->
    <div ref="messagesContainer" class="messages-container">
      <!-- 通话模式状态提示 -->
      <div v-if="isCallMode && isInCall" class="call-status-banner">
        <div class="call-status-content">
          <span class="call-icon">📞</span>
          <span class="call-text">通话模式已开启 - 语音将自动连续录制</span>
          <span class="call-indicator"></span>
        </div>
      </div>

      <div class="welcome-message" v-if="messages.length === 0">
        <div class="welcome-content">
          <img :src="currentRole?.avatar" class="welcome-avatar" />
          <h3>开始与 {{ currentRole?.name }} 对话</h3>
          <p>{{ currentRole?.description }}</p>
          <div class="suggestions">
            <button
              v-for="(suggestion, index) in quickSuggestions"
              :key="index"
              class="suggestion-btn"
              @click="useSuggestion(suggestion)"
            >
              {{ suggestion }}
            </button>
          </div>
        </div>
      </div>

      <!-- 只显示有内容的消息 -->
      <div v-for="(message, index) in messages.filter(msg => msg.content.trim() !== '')" :key="index" :class="['message', message.role]">
        <div class="message-avatar">
          <img
            :src="message.role === 'user' ? userAvatar : currentRole?.avatar"
            class="avatar-img"
          />
        </div>
        <div class="message-content">
          <div class="content-text">{{ message.content }}</div>
        </div>
      </div>

      <!-- 输入指示器 -->
      <div v-if="isTyping" class="typing-indicator">
        <div class="typing-avatar">
          <img :src="currentRole?.avatar" class="avatar-img" />
        </div>
        <div class="typing-content">
          <div class="typing-dots">
            <span></span>
            <span></span>
            <span></span>
          </div>
        </div>
      </div>
    </div>

    <!-- 输入区 -->
    <div class="input-area">
      <div class="input-tools">
        <button class="tool-btn" title="添加表情" @click="addEmoji">😊</button>
        <button class="tool-btn" title="上传图片" @click="uploadImage">📷</button>
        <button class="tool-btn" title="发送文件" @click="sendFile">📎</button>
        <button
          class="tool-btn voice-btn"
          :class="{
            'recording': isRecording,
            'processing': isProcessing,
            'permission-denied': microphonePermission === 'denied',
            'call-mode': isCallMode && isInCall
          }"
          :title="getVoiceButtonTitle()"
          @click="toggleVoiceInput"
          :disabled="isProcessing || microphonePermission === 'denied' || (isCallMode && isInCall)"
        >
          <span v-if="microphonePermission === 'denied'" class="permission-denied-indicator">🚫</span>
          <span v-else-if="isRecording" class="recording-indicator">🔴</span>
          <span v-else-if="isProcessing" class="processing-indicator">⏳</span>
          <span v-else>🎤</span>
          <span v-if="isRecording" class="recording-time">{{ formatRecordingTime(recordingTime) }}</span>
          <div v-if="isProcessing" class="progress-bar">
            <div class="progress-fill" :style="{ width: uploadProgress + '%' }"></div>
          </div>
        </button>
      </div>
      <div class="input-wrapper">
        <textarea
          v-model="inputText"
          @keydown.enter.exact.prevent="sendMessage"
          @input="autoResize"
          placeholder="输入消息... (Enter发送，Shift+Enter换行)"
          class="input-box"
          ref="textArea"
          rows="1"
        />
        <button class="send-btn" @click="sendMessage" :disabled="!inputText.trim()">
          <span class="send-icon">↑</span>
        </button>
      </div>
      <div class="input-footer">
        <span class="char-count">{{ inputText.length }}/500</span>
        <div class="footer-actions">
          <button class="footer-btn" @click="clearInput">清空</button>
          <button class="footer-btn" @click="insertTemplate">模板</button>
        </div>
      </div>
    </div>

    <!-- 通话弹窗 -->
    <div v-if="showCallDialog" class="call-dialog-overlay" @click.self="closeCallDialog">
      <div class="call-dialog">
        <div class="call-dialog-header">
          <h3>🔊 语音通话</h3>
          <button class="close-dialog-btn" @click="closeCallDialog">×</button>
        </div>

        <div class="call-dialog-content">
          <div class="call-avatar-section">
            <img :src="currentRole?.avatar" class="call-avatar" />
            <h4>{{ currentRole?.name }}</h4>
            <p class="call-status-text">
              {{ getCallStatusText() }}
            </p>
          </div>

          <div class="call-controls">
            <button
              class="call-speak-btn"
              :class="{
                'speaking': isDialogRecording,
                'processing': isDialogProcessing,
                'waiting': isWaitingForResponse
              }"
              @click="toggleDialogRecording"
              :disabled="isDialogProcessing || isWaitingForResponse"
            >
              <span class="speak-btn-icon">
                {{ getDialogButtonIcon() }}
              </span>
              <span class="speak-btn-text">
                {{ getDialogButtonText() }}
              </span>
              <div v-if="isDialogRecording" class="recording-time-display">
                {{ formatRecordingTime(dialogRecordingTime) }}
              </div>
              <div v-if="isDialogProcessing" class="progress-indicator">
                <div class="progress-fill" :style="{ width: dialogUploadProgress + '%' }"></div>
              </div>
            </button>
          </div>

          <!-- 对话历史 -->
          <div class="call-conversation">
            <div
              v-for="(msg, index) in callMessages"
              :key="index"
              class="call-message"
              :class="msg.role"
            >
              <!-- 用户消息（显示语音识别文本） -->
              <div v-if="msg.role === 'user'" class="call-msg-content user-voice">
                <div class="voice-icon">🎤</div>
                <div class="voice-text">{{ msg.content }}</div>
              </div>

              <!-- AI语音消息 -->
              <div
                v-else-if="msg.type === 'voice'"
                class="call-msg-content voice-bubble"
                :class="{ playing: msg.isPlaying }"
                @click="playVoiceMessage(msg, index)"
              >
                <div class="voice-play-icon">
                  {{ msg.isPlaying ? '⏹️' : '▶️' }}
                </div>
                <div class="voice-waves">
                  <div class="wave" v-for="i in 5" :key="i"></div>
                </div>
                <div class="voice-duration">{{ formatDuration(msg.duration || 3) }}"</div>
              </div>

              <!-- AI文本消息 -->
              <div v-else class="call-msg-content">{{ msg.content }}</div>
            </div>
          </div>
        </div>
      </div>
    </div>
  </div>
</template>

<script setup lang="ts">
import { ref, watch, nextTick, onMounted, computed } from 'vue'
import { useRoute, useRouter } from 'vue-router'
import { useConversationStore } from '../stores/conversation'
import { useRoleStore } from '@/stores/role'
import Recorder from 'js-audio-recorder'
// 使用更现代的lamejs版本进行MP3编码
import * as lamejs from '@breezystack/lamejs'
import { mockSpeechRecognition, uploadAudioForRecognition, validateAudioFile, callTextChatAPI, type UploadProgress, type TextChatResult, type StreamCallback } from '@/services/speechService'

// 定义消息接口
interface Message {
  role: 'user' | 'assistant'
  content: string
}

const route = useRoute()
const router = useRouter()
const conversationStore = useConversationStore()
const roleStore = useRoleStore()

const roleId = ref(route.params.roleId as string)
const currentRole = ref(roleStore.getRoleById(roleId.value))

// 如果角色不存在，跳转回首页
if (!currentRole.value) {
  router.push('/')
}

const inputText = ref('')
const messagesContainer = ref<HTMLElement | null>(null)
const textArea = ref<HTMLTextAreaElement | null>(null)
const showRoleInfo = ref(false)
const voiceMode = ref(false)
const isTyping = ref(false)

// 通话模式相关状态
const isCallMode = ref(false)
const isInCall = ref(false)

// 通话弹窗相关状态
const showCallDialog = ref(false)
const isDialogRecording = ref(false)
const isDialogProcessing = ref(false)
const isWaitingForResponse = ref(false)
const callMessages = ref<CallMessage[]>([])

// 弹窗专用消息接口
interface CallMessage {
  role: 'user' | 'assistant'
  content: string
  type?: 'text' | 'voice'
  audioUrl?: string
  duration?: number
  isPlaying?: boolean
}

// 弹窗独立的录音状态
const dialogRecordingTime = ref(0)
const dialogUploadProgress = ref(0)
const dialogMediaRecorder = ref<MediaRecorder | null>(null)
const dialogAudioChunks = ref<Blob[]>([])
const dialogRecordingTimer = ref<number | null>(null)

// 语音录制相关状态
const isRecording = ref(false)
const isProcessing = ref(false)
const recordingTime = ref(0)
const uploadProgress = ref(0)
const mediaRecorder = ref<MediaRecorder | null>(null)
const audioChunks = ref<Blob[]>([])
const recordingTimer = ref<number | null>(null)
const microphonePermission = ref<'granted' | 'denied' | 'prompt'>('prompt')

// js-audio-recorder 实例
const recorder = new Recorder({
  sampleBits: 16, // 采样位数，支持 8 或 16，默认是16
  sampleRate: 48000, // 采样率
  numChannels: 1 // 声道，支持 1 或 2， 默认是1
})

// 使用计算属性获取当前角色的消息
const messages = computed(() => {
  // 由于store中没有按角色存储消息的功能，我们直接返回所有消息
  // 注意：在实际应用中，你可能需要修改store以支持按角色存储消息
  return conversationStore.messages
})
const messageCount = computed(() => messages.value.length)
const todayMessageCount = computed(() => {
  // const today = new Date().toDateString()
  return 1
})

const userAvatar =
  'https://images.unsplash.com/photo-1535713875002-d1d0cf377fde?w=40&h=40&fit=crop&crop=face'

const quickSuggestions = computed(() => currentRole.value?.quickSuggestions || [])

const autoResize = () => {
  nextTick(() => {
    if (textArea.value) {
      textArea.value.style.height = 'auto'
      textArea.value.style.height = Math.min(textArea.value.scrollHeight, 120) + 'px'
    }
  })
}

const sendMessage = async () => {
  if (!inputText.value.trim()) return

  const userMessage: Message = {
    role: 'user',
    content: inputText.value,
  }

  // 添加到 store，指定角色ID
  conversationStore.addMessage({
    role: userMessage.role,
    content: userMessage.content,
  })

  const messageContent = inputText.value
  inputText.value = ''
  autoResize()

  // 模拟AI回复
  isTyping.value = true

  try {
    console.log('开始文本聊天请求:', messageContent)

    // 不要立即添加空的AI消息，等到收到第一个chunk时再添加
    let aiMessageIndex = -1

    // 调用真实的文本聊天API（SSE流式）
    const result = await callTextChatAPI(
      messageContent,
      currentRole.value?.name || '助手',
      (chunk: string, isComplete: boolean) => {
        // 实时更新消息内容
        console.log('收到SSE数据块:', chunk, '是否完成:', isComplete)

        // 如果是第一个chunk，创建AI消息并关闭加载状态
        if (aiMessageIndex === -1 && chunk && !isComplete) {
          const aiMessage: Message = {
            role: 'assistant',
            content: chunk,
          }
          conversationStore.addMessage(aiMessage)
          aiMessageIndex = conversationStore.messages.length - 1
          // 立即关闭加载状态，避免重复头像
          isTyping.value = false
          console.log('创建AI消息，索引:', aiMessageIndex, '关闭加载状态')
        } else if (aiMessageIndex >= 0 && !isComplete) {
          // 追加后续内容
          conversationStore.messages[aiMessageIndex].content += chunk
        }
      }
    )

    console.log('文本聊天API结果:', result)

    if (result.success) {
      // 检查是否有AI音频回复并播放
      if (result.audioUrl) {
        console.log('检测到AI音频回复，开始播放:', result.audioUrl)
        setTimeout(() => {
          playAudio(result.audioUrl!)
        }, 1000)
      }
    } else {
      // API调用失败时更新为本地生成的回复
      if (aiMessageIndex >= 0 && aiMessageIndex < conversationStore.messages.length) {
        conversationStore.messages[aiMessageIndex].content = generateAIResponse(messageContent, currentRole.value?.name || '助手')
      }
    }

  } catch (error) {
    console.error('文本聊天API调用失败:', error)
    // 出错时创建一个错误回复消息
    const aiMessage: Message = {
      role: 'assistant',
      content: generateAIResponse(messageContent, currentRole.value?.name || '助手'),
    }
    conversationStore.addMessage(aiMessage)
  } finally {
    // 确保加载状态被关闭（如果还没有关闭的话）
    isTyping.value = false
  }
}

const generateAIResponse = (userMessage: string, roleName: string): string => {
  const responses: { [key: string]: string[] } = {
    苏格拉底: [
      `亲爱的朋友，你问"${userMessage}"这个问题很有趣。让我用苏格拉底式的提问来引导你思考：你认为这个问题的本质是什么？`,
      `作为一个追求真理的哲学家，我对"${userMessage}"这个问题很感兴趣。真理往往需要通过对话来发现，让我们一起来探讨吧。`,
      `你提出了"${userMessage}"这个问题。在回答之前，我想先问你：你自己对这个问题的看法是什么？`,
    ],
    吕布: [
      `哈哈哈！"${userMessage}"？在我吕布面前，任何问题都不堪一击！我的方天画戟所向披靡！`,
      `你问"${userMessage}"？我吕布征战沙场多年，这种问题对我来说小菜一碟！`,
      `"${userMessage}"？在我面前提这种问题，有意思！让我用我的武艺来为你解答！`,
    ],
  }

  const roleResponses = responses[roleName] || [
    `关于"${userMessage}"这个问题，我很乐意帮助你。`,
    `你问"${userMessage}"，这是一个很好的问题。`,
    `对于"${userMessage}"，我有一些想法可以和你分享。`,
  ]

  return roleResponses[Math.floor(Math.random() * roleResponses.length)]
}

const clearConversation = () => {
  if (confirm('确定要清空对话记录吗？')) {
    conversationStore.clearConversation()
  }
}

const exportConversation = () => {
  const content = messages.value
    .map(
      (msg) => `${msg.role === 'user' ? '我' : currentRole.value?.name || '助手'}: ${msg.content}`,
    )
    .join('\n')

  const blob = new Blob([content], { type: 'text/plain' })
  const url = URL.createObjectURL(blob)
  const a = document.createElement('a')
  a.href = url
  a.download = `与${currentRole.value?.name || '助手'}的对话.txt`
  a.click()
  URL.revokeObjectURL(url)
}

const toggleRoleInfo = () => {
  showRoleInfo.value = !showRoleInfo.value
}

const toggleVoiceMode = () => {
  voiceMode.value = !voiceMode.value
  alert(voiceMode.value ? '语音模式已开启' : '语音模式已关闭')
}

const useSuggestion = (suggestion: string) => {
  inputText.value = suggestion
  textArea.value?.focus()
}

const clearInput = () => {
  inputText.value = ''
  autoResize()
}

const insertTemplate = () => {
  inputText.value = '请问你能帮我解决以下问题：\n1. \n2. \n3. '
  autoResize()
  textArea.value?.focus()
}

const addEmoji = () => {
  inputText.value += ' 😊'
  textArea.value?.focus()
}

const uploadImage = () => {
  alert('图片上传功能开发中...')
}

const sendFile = () => {
  alert('文件发送功能开发中...')
}

// 语音录制相关方法
const checkMicrophonePermission = async () => {
  try {
    const permission = await navigator.permissions.query({ name: 'microphone' as PermissionName })
    microphonePermission.value = permission.state
    return permission.state === 'granted'
  } catch (error) {
    console.warn('无法检查麦克风权限:', error)
    return true // 如果无法检查，假设有权限
  }
}

const requestMicrophonePermission = async (): Promise<boolean> => {
  try {
    const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
    // 立即停止测试流
    stream.getTracks().forEach(track => track.stop())
    microphonePermission.value = 'granted'
    return true
  } catch (error) {
    console.error('麦克风权限被拒绝:', error)
    microphonePermission.value = 'denied'
    return false
  }
}

// 使用现代MediaRecorder进行录音
const startRecording = async () => {
  try {
    // 检查浏览器支持
    if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
      throw new Error('您的浏览器不支持录音功能')
    }

    // 获取麦克风权限
    const stream = await navigator.mediaDevices.getUserMedia({
      audio: {
        sampleRate: 48000,
        channelCount: 1,
        echoCancellation: true,
        noiseSuppression: true
      }
    })

    microphonePermission.value = 'granted'

    // 检查MediaRecorder支持的格式
    let mimeType = 'audio/webm'
    if (MediaRecorder.isTypeSupported('audio/webm;codecs=opus')) {
      mimeType = 'audio/webm;codecs=opus'
      console.log('使用WebM+Opus格式录音')
    } else if (MediaRecorder.isTypeSupported('audio/mp4')) {
      mimeType = 'audio/mp4'
      console.log('使用MP4格式录音')
    } else {
      console.log('使用默认WebM格式录音')
    }

    // 初始化MediaRecorder
    mediaRecorder.value = new MediaRecorder(stream, {
      mimeType: mimeType,
      audioBitsPerSecond: 128000
    })

    // 清空之前的录音数据
    audioChunks.value = []

    // 设置事件监听
    mediaRecorder.value.ondataavailable = (event) => {
      if (event.data.size > 0) {
        audioChunks.value.push(event.data)
        console.log('收到录音数据块，大小:', event.data.size)
      }
    }

    mediaRecorder.value.onstop = () => {
      console.log('录音停止，共收集', audioChunks.value.length, '个数据块')
      // 停止所有音频轨道
      stream.getTracks().forEach(track => track.stop())
    }

    mediaRecorder.value.onerror = (event) => {
      console.error('MediaRecorder错误:', event)
    }

    // 开始录音
    mediaRecorder.value.start(1000) // 每秒1秒产生一个数据块
    isRecording.value = true
    recordingTime.value = 0

    console.log('开始MediaRecorder录音，格式:', mimeType)

    // 开始计时
    recordingTimer.value = window.setInterval(() => {
      recordingTime.value++
      // 最大录音时长60秒
      if (recordingTime.value >= 60) {
        stopRecording()
        alert('录音时间已达到上限(60秒)')
      }
    }, 1000)

  } catch (error) {
    console.error('录音失败:', error)
    microphonePermission.value = 'denied'
    const errorMessage = error instanceof Error ? error.message : '无法开始录音，请检查麦克风权限'
    alert(errorMessage)
  }
}

// 通话弹窗控制方法
const openCallDialog = () => {
  showCallDialog.value = true
  // 清空对话历史
  callMessages.value = []
  console.log('打开通话弹窗')
}

const closeCallDialog = () => {
  // 如果正在录音，停止录音
  if (isDialogRecording.value) {
    stopDialogRecording()
  }

  // 清理弹窗独立状态
  showCallDialog.value = false
  isDialogRecording.value = false
  isDialogProcessing.value = false
  isWaitingForResponse.value = false
  dialogRecordingTime.value = 0
  dialogUploadProgress.value = 0
  dialogAudioChunks.value = []

  // 清理计时器
  if (dialogRecordingTimer.value) {
    clearInterval(dialogRecordingTimer.value)
    dialogRecordingTimer.value = null
  }

  // 停止MediaRecorder
  if (dialogMediaRecorder.value) {
    try {
      if (dialogMediaRecorder.value.state === 'recording') {
        dialogMediaRecorder.value.stop()
      }
    } catch (error) {
      console.warn('停止弹窗MediaRecorder时出错:', error)
    }
    dialogMediaRecorder.value = null
  }

  // 停止当前播放的音频
  stopCurrentAudio()
  // 清理所有消息的播放状态
  callMessages.value.forEach(msg => {
    msg.isPlaying = false
  })

  console.log('关闭通话弹窗，已清理所有状态')
}

// 获取对话状态文本
const getCallStatusText = (): string => {
  if (isDialogProcessing.value) {
    return '正在处理您的语音...'
  }
  if (isWaitingForResponse.value) {
    return '等待AI回复...'
  }
  if (isDialogRecording.value) {
    return '正在听您说话...'
  }
  return '点击下方按钮开始说话'
}

// 获取按钮图标
const getDialogButtonIcon = (): string => {
  if (isDialogProcessing.value) return '⏳'
  if (isWaitingForResponse.value) return '🤔'
  if (isDialogRecording.value) return '🔴'
  return '🎤'
}

// 获取按钮文本
const getDialogButtonText = (): string => {
  if (isDialogProcessing.value) return '正在处理...'
  if (isWaitingForResponse.value) return '等待回复...'
  if (isDialogRecording.value) return '我说完了'
  return '我开始说了'
}

// 当前播放的音频对象
let currentAudio: HTMLAudioElement | null = null

// 播放语音消息
const playVoiceMessage = (message: CallMessage, index: number) => {
  if (!message.audioUrl) {
    alert('暂无音频，请稍后再试')
    return
  }

  // 如果当前正在播放，直接停止
  if (message.isPlaying) {
    stopCurrentAudio()
    message.isPlaying = false
    console.log('停止播放AI语音')
    return
  }

  // 停止其他正在播放的语音
  callMessages.value.forEach((msg) => {
    msg.isPlaying = false
  })

  // 开始播放当前语音
  message.isPlaying = true
  console.log('开始播放AI语音:', message.audioUrl)

  playAudioWithCallback(message.audioUrl, () => {
    // 播放完成后停止动画
    message.isPlaying = false
  })
}

// 停止当前播放的音频
const stopCurrentAudio = () => {
  if (currentAudio) {
    currentAudio.pause()
    currentAudio.currentTime = 0
    currentAudio = null
  }
}

// 带回调的音频播放函数
const playAudioWithCallback = (audioUrl: string, onEnded?: () => void) => {
  try {
    console.log('开始播放音频:', audioUrl)

    if (!audioUrl || audioUrl.trim() === '') {
      console.error('音频URL为空')
      onEnded?.()
      return
    }

    // 停止之前的音频
    stopCurrentAudio()

    // 创建新的音频对象
    currentAudio = new Audio(audioUrl)
    currentAudio.volume = 0.8

    currentAudio.oncanplay = () => {
      console.log('音频准备就绪，开始播放')
    }

    currentAudio.onplay = () => {
      console.log('音频开始播放')
    }

    currentAudio.onended = () => {
      console.log('音频播放完成')
      currentAudio = null
      onEnded?.()
    }

    currentAudio.onerror = (e) => {
      console.error('音频播放失败:', e)
      console.error('音频URL:', audioUrl)
      currentAudio = null
      onEnded?.()
    }

    currentAudio.play().catch(error => {
      console.error('播放失败:', error)
      currentAudio = null
      onEnded?.()
      if (error.name === 'NotAllowedError') {
        alert('请点击页面任意位置后再试，浏览器阻止了自动播放')
      }
    })

  } catch (error) {
    console.error('播放音频异常:', error)
    currentAudio = null
    onEnded?.()
  }
}

// 生成模拟音频URL（用于测试）
const generateMockAudioUrl = (text: string): string => {
  // 这里可以返回一个模拟的音频URL或者空字符串
  console.log('生成模拟音频URL为:', text)
  return '' // 暂时返回空，实际应用中会有真实的AI语音URL
}

// 格式化时长显示
const formatDuration = (seconds: number): string => {
  return seconds.toString()
}

// 弹窗录音控制
const toggleDialogRecording = async () => {
  if (isDialogRecording.value) {
    stopDialogRecording()
  } else {
    await startDialogRecording()
  }
}

const startDialogRecording = async () => {
  try {
    // 检查麦克风权限
    const hasPermission = await requestMicrophonePermission()
    if (!hasPermission) {
      alert('需要麦克风权限才能进行语音对话')
      return
    }

    // 检查浏览器支持
    if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
      throw new Error('您的浏览器不支持录音功能')
    }

    // 获取麦克风权限
    const stream = await navigator.mediaDevices.getUserMedia({
      audio: {
        sampleRate: 48000,
        channelCount: 1,
        echoCancellation: true,
        noiseSuppression: true
      }
    })

    // 检查MediaRecorder支持的格式
    let mimeType = 'audio/webm'
    if (MediaRecorder.isTypeSupported('audio/webm;codecs=opus')) {
      mimeType = 'audio/webm;codecs=opus'
    } else if (MediaRecorder.isTypeSupported('audio/mp4')) {
      mimeType = 'audio/mp4'
    }

    // 初始化弹窗专用的MediaRecorder
    dialogMediaRecorder.value = new MediaRecorder(stream, {
      mimeType: mimeType,
      audioBitsPerSecond: 128000
    })

    // 清空之前的录音数据
    dialogAudioChunks.value = []

    // 设置事件监听
    dialogMediaRecorder.value.ondataavailable = (event) => {
      if (event.data.size > 0) {
        dialogAudioChunks.value.push(event.data)
        console.log('弹窗录音收到数据块，大小:', event.data.size)
      }
    }

    dialogMediaRecorder.value.onstop = () => {
      console.log('弹窗录音停止，共收集', dialogAudioChunks.value.length, '个数据块')
      // 停止所有音频轨道
      stream.getTracks().forEach(track => track.stop())
    }

    dialogMediaRecorder.value.onerror = (event) => {
      console.error('弹窗MediaRecorder错误:', event)
    }

    // 开始录音
    dialogMediaRecorder.value.start(1000)
    isDialogRecording.value = true
    dialogRecordingTime.value = 0

    console.log('弹窗开始录音，格式:', mimeType)

    // 开始计时
    dialogRecordingTimer.value = window.setInterval(() => {
      dialogRecordingTime.value++
      // 最大录音时长60秒
      if (dialogRecordingTime.value >= 60) {
        stopDialogRecording()
        alert('录音时间已达到上限(60秒)')
      }
    }, 1000)

  } catch (error) {
    console.error('弹窗录音失败:', error)
    const errorMessage = error instanceof Error ? error.message : '无法开始录音，请检查麦克风权限'
    alert(errorMessage)
  }
}

const stopDialogRecording = () => {
  if (isDialogRecording.value && dialogMediaRecorder.value) {
    dialogMediaRecorder.value.stop()
    isDialogRecording.value = false

    // 清除计时器
    if (dialogRecordingTimer.value) {
      clearInterval(dialogRecordingTimer.value)
      dialogRecordingTimer.value = null
    }

    console.log('停止弹窗录音')
    // 录音停止后会自动触发onstop事件，然后调用processDialogRecording
    setTimeout(() => {
      processDialogRecording()
    }, 100) // 等待数据收集完成
  }
}

// 弹窗专用的录音处理方法
const processDialogRecording = async () => {
  // 检查是否有录音数据
  if (dialogAudioChunks.value.length === 0) {
    alert('未检测到录音数据，请重试')
    return
  }

  isDialogProcessing.value = true
  dialogUploadProgress.value = 0

  try {
    console.log('开始处理弹窗录音数据...')
    console.log('录音时长:', dialogRecordingTime.value, '秒')

    // 使用新的MP3转换方案
    dialogUploadProgress.value = 20
    const mp3Blob = await convertDialogToMp3()

    console.log('弹窗MP3转换完成，文件大小:', (mp3Blob.size / 1024).toFixed(2) + 'KB')

    // 验证音频文件
    const validation = validateAudioFile(mp3Blob)
    if (!validation.valid) {
      throw new Error(validation.error)
    }

    // 上传并进行语音识别
    dialogUploadProgress.value = 50

    // 添加上传进度回调
    const onProgress = (progress: UploadProgress) => {
      dialogUploadProgress.value = 50 + (progress.percentage * 0.4) // 50-90%
      console.log('弹窗上传进度:', progress.percentage + '%')
    }

    // 根据环境选择使用真实API或模拟数据
    const useRealAPI = true

    let result
    if (useRealAPI) {
      console.log('弹窗使用真实后端API进行语音识别')
      result = await uploadAudioForRecognition(mp3Blob, onProgress, currentRole.value?.name || '吕布')
    } else {
      console.log('弹窗使用模拟数据进行测试')
      result = await mockSpeechRecognition(mp3Blob)
    }

    dialogUploadProgress.value = 100

    if (result.success && result.text) {
      console.log('弹窗语音识别成功，结果:', result)
      console.log('识别文本:', result.text)
      console.log('音频URL:', result.audioUrl)

      // 添加到弹窗消息历史（显示语音识别文本）
      console.log('添加用户消息到弹窗:', result.text)
      callMessages.value.push({
        role: 'user',
        content: result.text, // 这里应该是识别的文本，不是MP3地址
        type: 'text'
      })

      isWaitingForResponse.value = true

      try {
        // 调用真实的文本聊天API（SSE流式）
        const chatResult = await callTextChatAPI(result.text, currentRole.value?.name || '助手')

        let aiResponse: string
        let aiAudioUrl: string | undefined

        if (chatResult.success && chatResult.reply) {
          aiResponse = chatResult.reply
          aiAudioUrl = chatResult.audioUrl || result.audioUrl
        } else {
          // API调用失败时使用本地生成的回复
          aiResponse = generateAIResponse(result.text, currentRole.value?.name || '助手')
          aiAudioUrl = result.audioUrl
        }

        // AI回复作为语音消息
        const aiMessage: CallMessage = {
          role: 'assistant',
          content: aiResponse,
          type: 'voice',
          audioUrl: aiAudioUrl || generateMockAudioUrl(aiResponse),
          duration: Math.floor(aiResponse.length / 8) + 2, // 模拟语音时长
          isPlaying: false
        }

        callMessages.value.push(aiMessage)
        isWaitingForResponse.value = false

        // 检查是否有AI音频回复并自动播放
        if (aiAudioUrl) {
          console.log('检测到AI音频回复，自动播放:', aiAudioUrl)
          // 停止其他正在播放的语音
          callMessages.value.forEach((msg) => {
            msg.isPlaying = false
          })
          // 延迟一下让界面更新完成
          setTimeout(() => {
            // 自动播放AI语音
            const messageIndex = callMessages.value.length - 1
            // 直接设置为播放状态并播放
            aiMessage.isPlaying = true
            playAudioWithCallback(aiAudioUrl!, () => {
              // 播放完成后停止动画
              aiMessage.isPlaying = false
            })
          }, 500)
        }

      } catch (error) {
        console.error('文本聊天API调用失败:', error)
        // 出错时使用本地生成的回复
        const aiResponse = generateAIResponse(result.text, currentRole.value?.name || '助手')

        const aiMessage: CallMessage = {
          role: 'assistant',
          content: aiResponse,
          type: 'voice',
          audioUrl: result.audioUrl || generateMockAudioUrl(aiResponse),
          duration: Math.floor(aiResponse.length / 8) + 2,
          isPlaying: false
        }

        callMessages.value.push(aiMessage)
        isWaitingForResponse.value = false
      }
    } else {
      throw new Error(result.error || '语音识别失败')
    }

  } catch (audioError) {
    console.error('弹窗音频处理或识别失败:', audioError)
    const errorMessage = audioError instanceof Error ? audioError.message : '音频处理失败，请重试'
    alert(`语音处理失败: ${errorMessage}`)
  } finally {
    isDialogProcessing.value = false
    dialogUploadProgress.value = 0
    dialogRecordingTime.value = 0
    // 清空录音数据
    dialogAudioChunks.value = []
  }
}

// 获取按钮图标

// 弹窗专用的MP3转换方法
const convertDialogToMp3 = async (): Promise<Blob> => {
  console.log('开始弹窗MP3转换...')

  try {
    // 使用弹窗的录音数据
    const audioBlob = new Blob(dialogAudioChunks.value, { type: 'audio/webm' })
    const audioBuffer = await audioBlob.arrayBuffer()

    // 使用AudioContext解码音频数据
    const audioContext = new AudioContext()
    const decodedData = await audioContext.decodeAudioData(audioBuffer)

    console.log('弹窗音频解码成功:', {
      duration: decodedData.duration,
      sampleRate: decodedData.sampleRate,
      numberOfChannels: decodedData.numberOfChannels,
      length: decodedData.length
    })

    // 获取左声道数据并转换为16位PCM
    const channelData = decodedData.getChannelData(0)
    const samples = new Int16Array(channelData.length)
    for (let i = 0; i < channelData.length; i++) {
      samples[i] = Math.max(-32768, Math.min(32767, channelData[i] * 32768))
    }

    const sampleRate = decodedData.sampleRate
    const channels = 1 // 使用单声道
    const kbps = 128

    console.log(`开始弹窗MP3编码: 采样率=${sampleRate}, 声道=${channels}, 码率=${kbps}kbps`)

    // 创建MP3编码器
    const mp3encoder = new lamejs.Mp3Encoder(channels, sampleRate, kbps)

    const mp3Data = []
    const sampleBlockSize = 1152 // MP3编码块大小

    // 分块编码
    for (let i = 0; i < samples.length; i += sampleBlockSize) {
      const sampleChunk = samples.subarray(i, i + sampleBlockSize)
      const mp3buf = mp3encoder.encodeBuffer(sampleChunk)
      if (mp3buf.length > 0) {
        mp3Data.push(new Int8Array(mp3buf))
      }
    }

    // 完成编码
    const mp3buf = mp3encoder.flush()
    if (mp3buf.length > 0) {
      mp3Data.push(new Int8Array(mp3buf))
    }

    // 创建MP3 Blob
    const mp3Blob = new Blob(mp3Data, { type: 'audio/mp3' })

    console.log('弹稗MP3转换完成！文件大小:', (mp3Blob.size / 1024).toFixed(2) + 'KB')

    if (mp3Blob.size === 0) {
      throw new Error('弹稗MP3转换失败，生成的文件为空')
    }

    return mp3Blob

  } catch (error) {
    console.error('弹稗MP3转换失败:', error)
    throw new Error(`弹稗MP3转换失败: ${error instanceof Error ? error.message : '未知错误'}`)
  }
}

// 通话模式控制方法
const toggleCallMode = () => {
  if (isInCall.value) {
    // 结束通话
    endCall()
  } else {
    // 开始通话
    startCall()
  }
}

const startCall = async () => {
  try {
    // 检查麦克风权限
    const hasPermission = await requestMicrophonePermission()
    if (!hasPermission) {
      alert('需要麦克风权限才能进行通话')
      return
    }

    isInCall.value = true
    isCallMode.value = true

    // 自动开始第一次录音
    await startRecording()

    console.log('通话模式开始')
  } catch (error) {
    console.error('启动通话失败:', error)
    alert('启动通话失败，请检查麦克风设置')
  }
}

const endCall = () => {
  isInCall.value = false
  isCallMode.value = false

  // 如果正在录音，停止录音
  if (isRecording.value) {
    stopRecording()
  }

  console.log('通话模式结束')
}

// 重写stopRecording方法，支持通话模式
const stopRecording = () => {
  if (isRecording.value && mediaRecorder.value) {
    mediaRecorder.value.stop()
    isRecording.value = false

    // 清除计时器
    if (recordingTimer.value) {
      clearInterval(recordingTimer.value)
      recordingTimer.value = null
    }

    console.log('停止MediaRecorder录音')
    // 录音停止后会自动触发onstop事件，然后调用processRecording
    setTimeout(() => {
      processRecording()
    }, 100) // 等待数据收集完成
  }
}

// 重写processRecording方法，支持通话模式
const processRecording = async () => {
  // 检查是否有录音数据
  if (audioChunks.value.length === 0) {
    alert('未检测到录音数据，请重试')
    return
  }

  isProcessing.value = true
  isDialogProcessing.value = true
  uploadProgress.value = 0

  try {
    console.log('开始处理录音数据...')
    console.log('录音时长:', recordingTime.value, '秒')

    // 使用新的MP3转换方案
    uploadProgress.value = 20
    const mp3Blob = await convertToMp3()

    console.log('MP3转换完成，文件大小:', (mp3Blob.size / 1024).toFixed(2) + 'KB')

    // 验证音频文件
    const validation = validateAudioFile(mp3Blob)
    if (!validation.valid) {
      throw new Error(validation.error)
    }

    // 上传并进行语音识别
    uploadProgress.value = 50

    // 添加上传进度回调
    const onProgress = (progress: UploadProgress) => {
      uploadProgress.value = 50 + (progress.percentage * 0.4) // 50-90%
      console.log('上传进度:', progress.percentage + '%')
    }

    // 根据环境选择使用真实API或模拟数据
    const useRealAPI = true

    let result
    if (useRealAPI) {
      console.log('使用真实后端API进行语音识别')
      result = await uploadAudioForRecognition(mp3Blob, onProgress, currentRole.value?.name || '吕布')
    } else {
      console.log('使用模拟数据进行测试')
      result = await mockSpeechRecognition(mp3Blob)
    }

    uploadProgress.value = 100

    if (result.success && result.text) {
      console.log('语音识别成功，结果:', result)
      console.log('识别文本:', result.text)
      console.log('音频URL:', result.audioUrl)

      // 在弹窗模式下，添加到弹窗消息历史
      if (showCallDialog.value) {
        callMessages.value.push({
          role: 'user',
          content: result.text
        })

        isWaitingForResponse.value = true

        // 模拟AI回复
        setTimeout(() => {
          const aiResponse = generateAIResponse(result.text, currentRole.value?.name || '助手')
          callMessages.value.push({
            role: 'assistant',
            content: aiResponse
          })

          isWaitingForResponse.value = false

          // 检查是否有AI音频回复
          if (result.audioUrl) {
            console.log('检测到AI音频回复，开始播放:', result.audioUrl)
            setTimeout(() => {
              playAudio(result.audioUrl!)
            }, 500)
          }
        }, 1000 + Math.random() * 2000)
      } else {
        // 原有的聊天模式：将识别结果填入输入框
        inputText.value = result.text
        // 自动调整输入框高度
        autoResize()
        // 聚焦到输入框
        textArea.value?.focus()

        // 自动发送消息
        await sendMessage()
      }

      // 检查是否有AI音频回复
      if (result.audioUrl) {
        console.log('检测到AI音频回复，开始播放:', result.audioUrl)
        // 稍微延迟一下，等待消息发送完成
        setTimeout(() => {
          playAudio(result.audioUrl!)
        }, 1500)
      } else {
        console.log('未检测到AI音频回复')
      }

      // 通话模式下，播放完成后自动开始下一次录音
      if (isCallMode.value && isInCall.value) {
        setTimeout(() => {
          if (isInCall.value && !isRecording.value) {
            console.log('通话模式：准备下一次录音')
            // 延迟一下再开始下一次录音，给用户一些反应时间
            setTimeout(() => {
              if (isInCall.value) {
                startRecording()
              }
            }, 2000)
          }
        }, result.audioUrl ? 3000 : 1000) // 如果有音频回复，等待更长时间
      }
    } else {
      throw new Error(result.error || '语音识别失败')
    }

  } catch (audioError) {
    console.error('音频处理或识别失败:', audioError)
    const errorMessage = audioError instanceof Error ? audioError.message : '音频处理失败，请重试'
    alert(`语音处理失败: ${errorMessage}`)
  } finally {
    isProcessing.value = false
    isDialogProcessing.value = false
    uploadProgress.value = 0
    recordingTime.value = 0
    // 清空录音数据
    audioChunks.value = []
  }
}

// 简单的音频播放功能
const playAudio = (audioUrl: string) => {
  try {
    console.log('开始播放音频:', audioUrl)

    if (!audioUrl || audioUrl.trim() === '') {
      console.error('音频URL为空')
      return
    }

    // 创建音频元素并直接播放
    const audio = new Audio(audioUrl)
    audio.volume = 0.8

    audio.oncanplay = () => {
      console.log('音频准备就绪，开始播放')
    }

    audio.onplay = () => {
      console.log('音频开始播放')
    }

    audio.onended = () => {
      console.log('音频播放完成')
    }

    audio.onerror = (e) => {
      console.error('音频播放失败:', e)
      console.error('音频URL:', audioUrl)
    }

    // 直接播放
    audio.play().catch(error => {
      console.error('播放失败:', error)
      // 如果自动播放被阻止，提示用户手动播放
      if (error.name === 'NotAllowedError') {
        alert('请点击页面任意位置后再试，浏览器阻止了自动播放')
      }
    })

  } catch (error) {
    console.error('播放音频异常:', error)
  }
}

// 使用更现代的lamejs库进行MP3转换
const convertToMp3 = async (): Promise<Blob> => {
  console.log('开始使用新版lamejs进行MP3转换...')

  try {
    // 优先使用MediaRecorder的数据
    let audioBuffer: ArrayBuffer

    if (audioChunks.value.length > 0) {
      console.log('使用MediaRecorder录制的数据')
      const audioBlob = new Blob(audioChunks.value, { type: 'audio/webm' })
      audioBuffer = await audioBlob.arrayBuffer()
    } else {
      console.log('使用js-audio-recorder的WAV数据')
      const wavBlob = recorder.getWAVBlob()
      audioBuffer = await wavBlob.arrayBuffer()
    }

    // 使用AudioContext解码音频数据
    const audioContext = new AudioContext()
    const decodedData = await audioContext.decodeAudioData(audioBuffer)

    console.log('音频解码成功:', {
      duration: decodedData.duration,
      sampleRate: decodedData.sampleRate,
      numberOfChannels: decodedData.numberOfChannels,
      length: decodedData.length
    })

    // 获取左声道数据并转换为16位PCM
    const channelData = decodedData.getChannelData(0)
    const samples = new Int16Array(channelData.length)
    for (let i = 0; i < channelData.length; i++) {
      samples[i] = Math.max(-32768, Math.min(32767, channelData[i] * 32768))
    }

    const sampleRate = decodedData.sampleRate
    const channels = 1 // 使用单声道
    const kbps = 128

    console.log(`开始MP3编码: 采样率=${sampleRate}, 声道=${channels}, 码率=${kbps}kbps`)

    // 创建MP3编码器
    const mp3encoder = new lamejs.Mp3Encoder(channels, sampleRate, kbps)

    const mp3Data = []
    const sampleBlockSize = 1152 // MP3编码块大小

    // 分块编码
    for (let i = 0; i < samples.length; i += sampleBlockSize) {
      const sampleChunk = samples.subarray(i, i + sampleBlockSize)
      const mp3buf = mp3encoder.encodeBuffer(sampleChunk)
      if (mp3buf.length > 0) {
        mp3Data.push(new Int8Array(mp3buf))
      }
    }

    // 完成编码
    const mp3buf = mp3encoder.flush()
    if (mp3buf.length > 0) {
      mp3Data.push(new Int8Array(mp3buf))
    }

    // 创建MP3 Blob
    const mp3Blob = new Blob(mp3Data, { type: 'audio/mp3' })

    console.log('MP3转换完成！文件大小:', (mp3Blob.size / 1024).toFixed(2) + 'KB')

    if (mp3Blob.size === 0) {
      throw new Error('MP3转换失败，生成的文件为空')
    }

    return mp3Blob

  } catch (error) {
    console.error('MP3转换失败:', error)
    throw new Error(`MP3转换失败: ${error instanceof Error ? error.message : '未知错误'}`)
  }
}

// 检查是否为真正的WAV文件
const checkIfRealWav = (buffer: ArrayBuffer): boolean => {
  try {
    const view = new DataView(buffer)
    // 检查RIFF头
    const riff = String.fromCharCode(
      view.getUint8(0),
      view.getUint8(1),
      view.getUint8(2),
      view.getUint8(3)
    )
    // 检查WAVE标识
    const wave = String.fromCharCode(
      view.getUint8(8),
      view.getUint8(9),
      view.getUint8(10),
      view.getUint8(11)
    )
    return riff === 'RIFF' && wave === 'WAVE'
  } catch (error) {
    return false
  }
}

// WAV转MP3转换函数（由于lamejs兼容性问题，暂时禁用）
/*
const convertWavToMp3 = (wavData: ArrayBuffer): Blob => {
  try {
    // 检查lamejs是否可用
    if (!lamejs || !lamejs.WavHeader || !lamejs.Mp3Encoder) {
      throw new Error('lamejs库未正确加载')
    }

    // 读取WAV头信息
    const wav = lamejs.WavHeader.readHeader(new DataView(wavData))
    console.log('WAV信息:', wav)

    // 验证WAV数据
    if (!wav || !wav.channels || !wav.sampleRate || !wav.dataLen) {
      throw new Error('WAV文件格式不正确')
    }

    // 创建MP3编码器
    const mp3Encoder = new lamejs.Mp3Encoder(wav.channels, wav.sampleRate, 128)

    // 获取音频样本数据
    const samples = new Int16Array(wavData, wav.dataOffset, wav.dataLen / 2)

    const mp3Data = []
    let remaining = samples.length
    const maxSamples = 1152

    // 分块编码成MP3
    for (let i = 0; i < remaining; i += maxSamples) {
      const left = samples.subarray(i, i + maxSamples)
      const mp3buf = mp3Encoder.encodeBuffer(left)
      if (mp3buf.length > 0) {
        mp3Data.push(mp3buf)
      }
    }

    // 完成编码
    const enc = mp3Encoder.flush()
    if (enc.length > 0) {
      mp3Data.push(enc)
    }

    const mp3Blob = new Blob(mp3Data, { type: 'audio/mp3' })
    console.log('MP3转换完成，大小:', mp3Blob.size)
    return mp3Blob

  } catch (error) {
    console.error('WAV到MP3转换失败:', error)
    throw error
  }
}
*/

// 语音录制控制方法（兼容通话模式）
const toggleVoiceInput = async () => {
  if (isRecording.value) {
    stopRecording()
  } else {
    // 在通话模式下，不允许手动录音
    if (isCallMode.value && isInCall.value) {
      alert('通话模式下请使用通话按钮控制')
      return
    }
    await startRecording()
  }
}

// 获取语音按钮提示文字
const getVoiceButtonTitle = (): string => {
  if (isCallMode.value && isInCall.value) {
    return '通话模式下请使用通话按钮控制录音'
  }
  if (microphonePermission.value === 'denied') {
    return '麦克风权限被拒绝，请在浏览器设置中允许麦克风访问'
  }
  if (isProcessing.value) {
    return '正在处理音频...'
  }
  if (isRecording.value) {
    return '点击停止录音'
  }
  return '点击开始语音输入'
}

// 格式化录音时间显示
const formatRecordingTime = (seconds: number): string => {
  const mins = Math.floor(seconds / 60)
  const secs = seconds % 60
  return `${mins}:${secs.toString().padStart(2, '0')}`
}
const convertToWav = async (audioBuffer: ArrayBuffer): Promise<ArrayBuffer> => {
  return new Promise((resolve, reject) => {
    try {
      console.log('开始转换为WAV格式...')
      const audioContext = new AudioContext()

      audioContext.decodeAudioData(audioBuffer.slice(0)).then((decodedData) => {
        console.log('音频解码成功:', {
          duration: decodedData.duration,
          sampleRate: decodedData.sampleRate,
          numberOfChannels: decodedData.numberOfChannels,
          length: decodedData.length
        })

        // 获取音频数据（使用第一个声道）
        const samples = decodedData.getChannelData(0)
        const sampleRate = decodedData.sampleRate
        const numChannels = 1 // 单声道

        console.log('开始转换PCM数据...')
        // 转换为16位PCM
        const int16Array = new Int16Array(samples.length)
        for (let i = 0; i < samples.length; i++) {
          int16Array[i] = Math.max(-32768, Math.min(32767, samples[i] * 32768))
        }

        console.log('创建WAV文件...')
        // 创建WAV文件头
        const wavBuffer = createWavFile(int16Array, sampleRate, numChannels)
        console.log('WAV文件创建成功，大小:', wavBuffer.byteLength)
        resolve(wavBuffer)
      }).catch((error) => {
        console.error('音频解码失败:', error)
        reject(new Error(`音频解码失败: ${error.message}`))
      })
    } catch (error) {
      console.error('convertToWav出错:', error)
      reject(error)
    }
  })
}

// 创建WAV文件的函数
const createWavFile = (samples: Int16Array, sampleRate: number, numChannels: number): ArrayBuffer => {
  const buffer = new ArrayBuffer(44 + samples.length * 2)
  const view = new DataView(buffer)

  // WAV文件头
  const writeString = (offset: number, string: string) => {
    for (let i = 0; i < string.length; i++) {
      view.setUint8(offset + i, string.charCodeAt(i))
    }
  }

  writeString(0, 'RIFF')
  view.setUint32(4, 36 + samples.length * 2, true)
  writeString(8, 'WAVE')
  writeString(12, 'fmt ')
  view.setUint32(16, 16, true)
  view.setUint16(20, 1, true)
  view.setUint16(22, numChannels, true)
  view.setUint32(24, sampleRate, true)
  view.setUint32(28, sampleRate * numChannels * 2, true)
  view.setUint16(32, numChannels * 2, true)
  view.setUint16(34, 16, true)
  writeString(36, 'data')
  view.setUint32(40, samples.length * 2, true)

  // 写入音频数据
  let offset = 44
  for (let i = 0; i < samples.length; i++) {
    view.setInt16(offset, samples[i], true)
    offset += 2
  }

  return buffer
}

// 组件挂载时检查权限和初始化
onMounted(() => {

  //清空上一个角色聊天记录
  conversationStore.clearConversation()

  checkMicrophonePermission()

  // 检查MediaRecorder支持
  try {
    if (typeof MediaRecorder === 'undefined') {
      console.warn('MediaRecorder API不支持，将使用js-audio-recorder作为备选')
    } else {
      console.log('MediaRecorder API支持，可以使用现代录音方案')

      // 检查支持的音频格式
      const supportedTypes = [
        'audio/webm;codecs=opus',
        'audio/webm',
        'audio/mp4',
        'audio/wav'
      ]

      supportedTypes.forEach(type => {
        if (MediaRecorder.isTypeSupported(type)) {
          console.log('支持的音频格式:', type)
        }
      })
    }

    // js-audio-recorder作为备选方案
    if (recorder) {
      console.log('js-audio-recorder库加载成功，可作为备选方案')
    }
  } catch (error) {
    console.error('音频功能初始化失败:', error)
  }
})

watch(
  messages,
  () => {
    nextTick(() => {
      if (messagesContainer.value) {
        messagesContainer.value.scrollTo({
          top: messagesContainer.value.scrollHeight,
          behavior: 'smooth',
        })
      }
    })
  },
  { deep: true },
)

</script>

<style scoped>
.chat-container {
  display: flex;
  flex-direction: column;
  height: 100vh;
  background: linear-gradient(135deg, #f9fafb 0%, #e0e7ff 100%);
  font-family:
    -apple-system, BlinkMacSystemFont, 'SF Pro Display', 'Segoe UI', Roboto, Helvetica, Arial,
    sans-serif;
}

/* 导航栏 */
.nav-bar {
  display: flex;
  justify-content: space-between;
  align-items: center;
  padding: 12px 20px;
  background: rgba(255, 255, 255, 0.95);
  backdrop-filter: blur(15px);
  border-bottom: 1px solid rgba(0, 0, 0, 0.08);
  position: sticky;
  top: 0;
  z-index: 100;
}

.logo {
  font-size: 1.1rem;
  font-weight: 600;
  color: #1d1d1f;
}

.nav-actions {
  display: flex;
  gap: 8px;
}

.nav-btn {
  display: flex;
  align-items: center;
  gap: 6px;
  background: #007aff;
  color: white;
  border: none;
  border-radius: 18px;
  padding: 8px 16px;
  cursor: pointer;
  font-size: 14px;
  transition: all 0.2s ease;
}

.nav-btn:hover {
  background: #005ecc;
  transform: translateY(-1px);
}

.icon-btn {
  padding: 8px;
}

.btn-icon {
  font-size: 16px;
}

/* 聊天头部 */
.chat-header {
  display: flex;
  justify-content: space-between;
  align-items: center;
  padding: 16px 20px;
  background: rgba(255, 255, 255, 0.9);
  border-bottom: 1px solid rgba(0, 0, 0, 0.06);
}

.role-info {
  display: flex;
  align-items: center;
  gap: 12px;
}

.role-avatar {
  width: 44px;
  height: 44px;
  border-radius: 50%;
  object-fit: cover;
  border: 2px solid #007aff;
}

.role-details h2 {
  font-size: 1.1rem;
  font-weight: 600;
  color: #1d1d1f;
  margin: 0 0 4px 0;
}

.role-status {
  display: flex;
  align-items: center;
  gap: 6px;
  font-size: 0.85rem;
  color: #6b7280;
  margin: 0;
}

.status-dot {
  width: 8px;
  height: 8px;
  border-radius: 50%;
}

.status-dot.online {
  background: #34c759;
}

.status-dot.offline {
  background: #ff3b30;
}

.status-dot.busy {
  background: #ff9500;
}

.header-actions {
  display: flex;
  gap: 8px;
}

.action-btn {
  background: rgba(0, 122, 255, 0.1);
  color: #007aff;
  border: 1px solid rgba(0, 122, 255, 0.2);
  border-radius: 16px;
  padding: 6px 12px;
  font-size: 0.85rem;
  cursor: pointer;
  transition: all 0.2s ease;
}

.action-btn:hover {
  background: rgba(0, 122, 255, 0.2);
}

/* 通话按钮样式 */
.call-btn {
  position: relative;
  transition: all 0.3s ease;
}

.call-btn.in-call {
  background: rgba(255, 59, 48, 0.1) !important;
  color: #ff3b30 !important;
  border-color: rgba(255, 59, 48, 0.3) !important;
  animation: callPulse 2s infinite;
}

@keyframes callPulse {
  0%, 100% {
    transform: scale(1);
    box-shadow: 0 0 0 0 rgba(255, 59, 48, 0.4);
  }
  50% {
    transform: scale(1.05);
    box-shadow: 0 0 0 8px rgba(255, 59, 48, 0);
  }
}

/* 角色信息面板 */
.role-info-panel {
  background: rgba(255, 255, 255, 0.95);
  border-bottom: 1px solid rgba(0, 0, 0, 0.06);
  padding: 20px;
  animation: slideDown 0.3s ease;
}

@keyframes slideDown {
  from {
    opacity: 0;
    transform: translateY(-10px);
  }
  to {
    opacity: 1;
    transform: translateY(0);
  }
}

.panel-header {
  display: flex;
  justify-content: space-between;
  align-items: center;
  margin-bottom: 16px;
}

.panel-header h3 {
  margin: 0;
  font-size: 1rem;
  color: #1d1d1f;
}

.close-btn {
  background: none;
  border: none;
  font-size: 1.5rem;
  cursor: pointer;
  color: #6b7280;
}

.panel-content {
  display: flex;
  gap: 16px;
  align-items: flex-start;
}

.info-avatar {
  width: 80px;
  height: 80px;
  border-radius: 50%;
  object-fit: cover;
}

.info-details h4 {
  margin: 0 0 8px 0;
  font-size: 1.2rem;
  color: #1d1d1f;
}

.info-desc {
  margin: 0 0 16px 0;
  color: #6b7280;
  line-height: 1.5;
}

.info-stats {
  display: flex;
  gap: 20px;
}

.stat-item {
  display: flex;
  flex-direction: column;
  align-items: center;
}

.stat-label {
  font-size: 0.8rem;
  color: #6b7280;
  margin-bottom: 4px;
}

.stat-value {
  font-size: 1.1rem;
  font-weight: 600;
  color: #007aff;
}

/* 消息区 */
.messages-container {
  flex: 1;
  overflow-y: auto;
  padding: 20px;
  display: flex;
  flex-direction: column;
  gap: 16px;
}

/* 通话状态横幅 */
.call-status-banner {
  background: linear-gradient(135deg, #ff3b30, #ff6b47);
  color: white;
  border-radius: 12px;
  padding: 12px 16px;
  margin-bottom: 8px;
  animation: slideInDown 0.3s ease;
  box-shadow: 0 4px 12px rgba(255, 59, 48, 0.3);
}

.call-status-content {
  display: flex;
  align-items: center;
  gap: 8px;
  font-size: 0.9rem;
  font-weight: 500;
}

.call-icon {
  font-size: 1.1rem;
  animation: bounce 2s infinite;
}

.call-indicator {
  width: 8px;
  height: 8px;
  border-radius: 50%;
  background: white;
  animation: blink 1.5s infinite;
  margin-left: auto;
}

@keyframes slideInDown {
  from {
    opacity: 0;
    transform: translateY(-20px);
  }
  to {
    opacity: 1;
    transform: translateY(0);
  }
}

@keyframes bounce {
  0%, 20%, 50%, 80%, 100% {
    transform: translateY(0);
  }
  40% {
    transform: translateY(-3px);
  }
  60% {
    transform: translateY(-2px);
  }
}

.welcome-message {
  display: flex;
  justify-content: center;
  align-items: center;
  padding: 40px 20px;
}

.welcome-content {
  text-align: center;
  max-width: 400px;
}

.welcome-avatar {
  width: 80px;
  height: 80px;
  border-radius: 50%;
  object-fit: cover;
  margin-bottom: 16px;
  border: 3px solid #007aff;
}

.welcome-content h3 {
  margin: 0 0 8px 0;
  color: #1d1d1f;
}

.welcome-content p {
  margin: 0 0 24px 0;
  color: #6b7280;
  line-height: 1.5;
}

.suggestions {
  display: flex;
  flex-wrap: wrap;
  gap: 8px;
  justify-content: center;
}

.suggestion-btn {
  background: rgba(0, 122, 255, 0.1);
  color: #007aff;
  border: 1px solid rgba(0, 122, 255, 0.2);
  border-radius: 16px;
  padding: 8px 16px;
  font-size: 0.9rem;
  cursor: pointer;
  transition: all 0.2s ease;
}

.suggestion-btn:hover {
  background: rgba(0, 122, 255, 0.2);
  transform: translateY(-1px);
}

.message {
  display: flex;
  gap: 12px;
  max-width: 70%;
  animation: messageAppear 0.3s ease;
}

@keyframes messageAppear {
  from {
    opacity: 0;
    transform: translateY(10px);
  }
  to {
    opacity: 1;
    transform: translateY(0);
  }
}

.message.user {
  align-self: flex-end;
  flex-direction: row-reverse;
}

.message.assistant {
  align-self: flex-start;
}

.message-avatar {
  flex-shrink: 0;
}

.avatar-img {
  width: 36px;
  height: 36px;
  border-radius: 50%;
  object-fit: cover;
}

.message-content {
  background: #f2f2f7;
  padding: 12px 16px;
  border-radius: 18px;
  position: relative;
}

.message.user .message-content {
  background: #007aff;
  color: white;
}

.content-text {
  line-height: 1.5;
  white-space: pre-wrap;
}

.message-meta {
  display: flex;
  justify-content: space-between;
  align-items: center;
  margin-top: 6px;
  font-size: 0.75rem;
  opacity: 0.7;
}

.message.user .message-meta {
  flex-direction: row-reverse;
}

/* 输入指示器 */
.typing-indicator {
  display: flex;
  gap: 12px;
  align-self: flex-start;
  max-width: 70%;
}

.typing-content {
  background: #f2f2f7;
  padding: 12px 16px;
  border-radius: 18px;
}

.typing-dots {
  display: flex;
  gap: 4px;
}

.typing-dots span {
  width: 6px;
  height: 6px;
  border-radius: 50%;
  background: #6b7280;
  animation: typing 1.4s infinite ease-in-out;
}

.typing-dots span:nth-child(1) {
  animation-delay: -0.32s;
}
.typing-dots span:nth-child(2) {
  animation-delay: -0.16s;
}

@keyframes typing {
  0%,
  80%,
  100% {
    transform: scale(0.8);
    opacity: 0.5;
  }
  40% {
    transform: scale(1);
    opacity: 1;
  }
}

/* 输入区 */
.input-area {
  background: rgba(255, 255, 255, 0.95);
  border-top: 1px solid rgba(0, 0, 0, 0.08);
  padding: 16px 20px;
}

.input-tools {
  display: flex;
  gap: 8px;
  margin-bottom: 12px;
}

.tool-btn {
  background: none;
  border: none;
  font-size: 1.2rem;
  cursor: pointer;
  padding: 4px 8px;
  border-radius: 6px;
  transition: background 0.2s ease;
}

.tool-btn:hover {
  background: rgba(0, 0, 0, 0.05);
}

/* 语音录制按钮样式 */
.voice-btn {
  position: relative;
  transition: all 0.3s ease;
}

.voice-btn.recording {
  background: rgba(255, 59, 48, 0.1);
  color: #ff3b30;
  animation: pulse 1s infinite;
}

.voice-btn.processing {
  background: rgba(255, 149, 0, 0.1);
  color: #ff9500;
}

.voice-btn.permission-denied {
  background: rgba(142, 142, 147, 0.1);
  color: #8e8e93;
}

.voice-btn.call-mode {
  background: rgba(142, 142, 147, 0.1);
  color: #8e8e93;
  cursor: not-allowed;
}

.voice-btn:disabled {
  opacity: 0.6;
  cursor: not-allowed;
}

.recording-indicator {
  animation: blink 1s infinite;
}

.processing-indicator {
  animation: rotate 1s linear infinite;
}

.recording-time {
  position: absolute;
  top: -8px;
  right: -8px;
  background: #ff3b30;
  color: white;
  border-radius: 8px;
  padding: 2px 6px;
  font-size: 0.7rem;
  font-weight: 600;
  min-width: 30px;
  text-align: center;
}

.progress-bar {
  position: absolute;
  bottom: -2px;
  left: 2px;
  right: 2px;
  height: 3px;
  background: rgba(255, 149, 0, 0.2);
  border-radius: 2px;
  overflow: hidden;
}

.progress-fill {
  height: 100%;
  background: #ff9500;
  border-radius: 2px;
  transition: width 0.3s ease;
}

@keyframes pulse {
  0% {
    transform: scale(1);
  }
  50% {
    transform: scale(1.1);
  }
  100% {
    transform: scale(1);
  }
}

@keyframes blink {
  0%, 50% {
    opacity: 1;
  }
  51%, 100% {
    opacity: 0.3;
  }
}

@keyframes rotate {
  from {
    transform: rotate(0deg);
  }
  to {
    transform: rotate(360deg);
  }
}

.input-wrapper {
  display: flex;
  gap: 12px;
  align-items: flex-end;
}

.input-box {
  flex: 1;
  border: 1px solid rgba(0, 0, 0, 0.1);
  padding: 12px 16px;
  border-radius: 20px;
  font-size: 14px;
  resize: none;
  background: #f9fafb;
  outline: none;
  transition: border-color 0.2s ease;
  max-height: 120px;
  line-height: 1.5;
}

.input-box:focus {
  border-color: #007aff;
}

.send-btn {
  background: #007aff;
  color: white;
  border: none;
  border-radius: 50%;
  width: 40px;
  height: 40px;
  cursor: pointer;
  display: flex;
  align-items: center;
  justify-content: center;
  transition: all 0.2s ease;
  flex-shrink: 0;
}

.send-btn:hover:not(:disabled) {
  background: #005ecc;
  transform: scale(1.05);
}

.send-btn:disabled {
  background: #ccc;
  cursor: not-allowed;
  transform: none;
}

.send-icon {
  font-size: 16px;
  font-weight: bold;
}

.input-footer {
  display: flex;
  justify-content: space-between;
  align-items: center;
  margin-top: 8px;
  font-size: 0.8rem;
  color: #6b7280;
}

.footer-actions {
  display: flex;
  gap: 8px;
}

.footer-btn {
  background: none;
  border: none;
  color: #007aff;
  cursor: pointer;
  padding: 4px 8px;
  border-radius: 4px;
  font-size: 0.8rem;
  transition: background 0.2s ease;
}

.footer-btn:hover {
  background: rgba(0, 122, 255, 0.1);
}

/* 通话弹窗样式 */
.call-dialog-overlay {
  position: fixed;
  top: 0;
  left: 0;
  right: 0;
  bottom: 0;
  background: rgba(0, 0, 0, 0.5);
  display: flex;
  align-items: center;
  justify-content: center;
  z-index: 1000;
  backdrop-filter: blur(4px);
}

.call-dialog {
  background: white;
  border-radius: 20px;
  width: 90%;
  max-width: 480px;
  max-height: 80vh;
  overflow: hidden;
  box-shadow: 0 20px 60px rgba(0, 0, 0, 0.3);
  animation: dialogAppear 0.3s ease;
}

@keyframes dialogAppear {
  from {
    opacity: 0;
    transform: scale(0.9) translateY(-20px);
  }
  to {
    opacity: 1;
    transform: scale(1) translateY(0);
  }
}

.call-dialog-header {
  background: linear-gradient(135deg, #007aff, #5ac8fa);
  color: white;
  padding: 20px;
  display: flex;
  justify-content: space-between;
  align-items: center;
}

.call-dialog-header h3 {
  margin: 0;
  font-size: 1.2rem;
  font-weight: 600;
}

.close-dialog-btn {
  background: none;
  border: none;
  color: white;
  font-size: 1.5rem;
  cursor: pointer;
  width: 32px;
  height: 32px;
  border-radius: 50%;
  display: flex;
  align-items: center;
  justify-content: center;
  transition: background 0.2s ease;
}

.close-dialog-btn:hover {
  background: rgba(255, 255, 255, 0.2);
}

.call-dialog-content {
  padding: 30px;
  display: flex;
  flex-direction: column;
  gap: 24px;
}

.call-avatar-section {
  text-align: center;
}

.call-avatar {
  width: 80px;
  height: 80px;
  border-radius: 50%;
  object-fit: cover;
  border: 4px solid #007aff;
  margin-bottom: 12px;
}

.call-avatar-section h4 {
  margin: 0 0 8px 0;
  color: #1d1d1f;
  font-size: 1.1rem;
}

.call-status-text {
  margin: 0;
  color: #6b7280;
  font-size: 0.9rem;
}

.call-controls {
  display: flex;
  justify-content: center;
}

.call-speak-btn {
  background: linear-gradient(135deg, #007aff, #5ac8fa);
  color: white;
  border: none;
  border-radius: 60px;
  padding: 16px 32px;
  font-size: 1rem;
  font-weight: 600;
  cursor: pointer;
  transition: all 0.3s ease;
  position: relative;
  min-width: 200px;
  display: flex;
  align-items: center;
  justify-content: center;
  gap: 8px;
  box-shadow: 0 4px 16px rgba(0, 122, 255, 0.3);
}

.call-speak-btn:hover:not(:disabled) {
  transform: translateY(-2px);
  box-shadow: 0 6px 20px rgba(0, 122, 255, 0.4);
}

.call-speak-btn.speaking {
  background: linear-gradient(135deg, #ff3b30, #ff6b47);
  animation: speakingPulse 1.5s infinite;
}

.call-speak-btn.processing {
  background: linear-gradient(135deg, #ff9500, #ffb340);
}

.call-speak-btn.waiting {
  background: linear-gradient(135deg, #34c759, #62d496);
}

.call-speak-btn:disabled {
  opacity: 0.8;
  cursor: not-allowed;
  transform: none;
}

@keyframes speakingPulse {
  0%, 100% {
    transform: scale(1);
    box-shadow: 0 4px 16px rgba(255, 59, 48, 0.3);
  }
  50% {
    transform: scale(1.05);
    box-shadow: 0 8px 24px rgba(255, 59, 48, 0.5);
  }
}

.speak-btn-icon {
  font-size: 1.2rem;
}

.speak-btn-text {
  font-size: 1rem;
}

.recording-time-display {
  position: absolute;
  top: -12px;
  right: -12px;
  background: #ff3b30;
  color: white;
  border-radius: 12px;
  padding: 4px 8px;
  font-size: 0.8rem;
  font-weight: 600;
  min-width: 40px;
  text-align: center;
}

.progress-indicator {
  position: absolute;
  bottom: -2px;
  left: 4px;
  right: 4px;
  height: 4px;
  background: rgba(255, 255, 255, 0.3);
  border-radius: 2px;
  overflow: hidden;
}

.progress-fill {
  height: 100%;
  background: white;
  border-radius: 2px;
  transition: width 0.3s ease;
}

.call-conversation {
  max-height: 200px;
  overflow-y: auto;
  display: flex;
  flex-direction: column;
  gap: 12px;
  padding: 16px;
  background: #f9fafb;
  border-radius: 12px;
}

.call-message {
  display: flex;
  max-width: 80%;
}

.call-message.user {
  align-self: flex-end;
}

.call-message.assistant {
  align-self: flex-start;
}

.call-msg-content {
  background: #f2f2f7;
  padding: 8px 12px;
  border-radius: 12px;
  font-size: 0.9rem;
  line-height: 1.4;
}

.call-message.user .call-msg-content {
  background: #007aff;
  color: white;
}

/* 用户语音消息样式 */
.user-voice {
  display: flex;
  align-items: center;
  gap: 8px;
  background: #007aff !important;
  color: white !important;
  padding: 10px 12px;
}

.voice-icon {
  font-size: 14px;
}

.voice-text {
  flex: 1;
  font-size: 0.9rem;
}

.welcome-avatar {
  width: 80px;
  height: 80px;
  border-radius: 50%;
  object-fit: cover;
  margin-bottom: 16px;
  border: 3px solid #007aff;
  /* 居中 */
  position: relative;
  left: 50%;
  transform: translateX(-50%);
}

/* AI语音气泡样式 */
.voice-bubble {
  display: flex;
  align-items: center;
  gap: 8px;
  background: #f2f2f7 !important;
  color: #333 !important;
  padding: 10px 12px;
  cursor: pointer;
  transition: all 0.2s ease;
  min-width: 120px;
  position: relative;
}

.voice-bubble:hover {
  background: #e8e8e8 !important;
}

.voice-bubble.playing {
  background: #e3f2fd !important;
}

.voice-play-icon {
  font-size: 16px;
  width: 20px;
  text-align: center;
}

.voice-waves {
  display: flex;
  align-items: center;
  gap: 2px;
  flex: 1;
  height: 20px;
}

.wave {
  width: 3px;
  background: #007aff;
  border-radius: 2px;
  transition: height 0.2s ease;
}

.voice-bubble:not(.playing) .wave {
  height: 8px;
}

.voice-bubble.playing .wave {
  animation: waveAnimation 1s infinite ease-in-out;
}

.voice-bubble.playing .wave:nth-child(1) {
  animation-delay: 0s;
  height: 12px;
}

.voice-bubble.playing .wave:nth-child(2) {
  animation-delay: 0.1s;
  height: 16px;
}

.voice-bubble.playing .wave:nth-child(3) {
  animation-delay: 0.2s;
  height: 20px;
}

.voice-bubble.playing .wave:nth-child(4) {
  animation-delay: 0.3s;
  height: 16px;
}

.voice-bubble.playing .wave:nth-child(5) {
  animation-delay: 0.4s;
  height: 12px;
}

@keyframes waveAnimation {
  0%, 100% {
    height: 8px;
  }
  50% {
    height: 20px;
  }
}

.voice-duration {
  font-size: 0.8rem;
  color: #666;
  min-width: 20px;
  text-align: right;
}

/* 响应式设计 */
@media (max-width: 480px) {
  .call-dialog {
    width: 95%;
    margin: 20px;
  }

  .call-dialog-content {
    padding: 20px;
  }

  .call-speak-btn {
    min-width: 180px;
    padding: 14px 24px;
  }
}

/* 响应式设计 */
@media (max-width: 768px) {
  .nav-bar {
    padding: 10px 16px;
  }

  .chat-header {
    padding: 12px 16px;
    flex-direction: column;
    gap: 12px;
    align-items: flex-start;
  }

  .header-actions {
    align-self: flex-end;
  }

  .message {
    max-width: 85%;
  }

  .input-area {
    padding: 12px 16px;
  }
}
</style>
