<template>
  <div class="voice-chat-app">
    <!-- 主要内容容器 -->
    <div class="main-container">
      <!-- 侧边栏 -->
      <div class="sidebar">
      <!-- 用户信息区域 -->
      <div class="user-info">
        <div class="avatar">
          <svg width="32" height="32" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
            <circle cx="12" cy="8" r="4" fill="currentColor"/>
            <path d="M6 21v-2a4 4 0 0 1 4-4h4a4 4 0 0 1 4 4v2" stroke="currentColor" stroke-width="2" fill="none"/>
          </svg>
        </div>
        <div class="user-details">
          <div class="username">欢迎你，{{ currentUser.username || userInfo.username || '用户' }}</div>
          <button @click="logout" class="logout-btn">退出登录</button>
        </div>
      </div>

      <!-- 新建会话按钮 -->
      <div class="new-chat-section">
        <button @click="showCreateSessionModal" class="new-chat-btn">
          <svg width="16" height="16" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
            <path d="M12 5v14M5 12h14" stroke="currentColor" stroke-width="2"/>
          </svg>
          新建会话
        </button>
      </div>

      <!-- 新建会话弹窗 -->
      <div v-if="showModal" class="modal-overlay" @click="closeModal">
        <div class="modal-content" @click.stop>
          <div class="modal-header">
            <h3>新建会话</h3>
            <button @click="closeModal" class="close-btn">×</button>
          </div>
          <div class="modal-body">
            <div class="input-group">
              <label for="rolePrefix">角色描述：</label>
              <div class="role-input-container">
                <span class="role-prefix">扮演一个</span>
                <input 
                  id="rolePrefix"
                  v-model="rolePrefix" 
                  type="text" 
                  placeholder="幽默的"
                  class="role-input prefix-input"
                  @keyup.enter="createNewSession"
                />
                <span class="role-suffix">的</span>
                <input 
                  id="roleSuffix"
                  v-model="roleSuffix" 
                  type="text" 
                  placeholder="苏格拉底"
                  class="role-input suffix-input"
                  @keyup.enter="createNewSession"
                />
              </div>
            </div>
            <div class="example-text">
              示例：幽默的 + 鲁迅、温柔的 + 老师、专业的 + 程序员
            </div>
          </div>
          <div class="modal-footer">
            <button @click="closeModal" class="cancel-btn">取消</button>
            <button @click="createNewSession" class="confirm-btn" :disabled="!rolePrefix.trim() || !roleSuffix.trim()">
              创建会话
            </button>
          </div>
        </div>
      </div>

      <!-- 会话列表 -->
      <div class="sessions-list">
        <div class="sessions-header">
          <h3>会话历史 ({{ sessions.length }})</h3>
        </div>

        <div class="sessions-container">
          <div 
            v-for="session in sessions" 
            :key="session.id"
            :class="['session-item', { active: currentSessionId === session.id }]"
            @click="switchSession(session.id)"
          >
            <div class="session-title">{{ session.title }}</div>
            <div class="session-time">{{ formatTime(session.lastMessageTime) }}</div>
          </div>
        </div>
      </div>
    </div>

    <!-- 主内容区域 -->
    <div class="main-content">
      <!-- 聊天区域 -->
      <div class="chat-area">
        <div class="chat-header">
          <h2>{{ currentSession?.title || '新会话' }}</h2>
        </div>
        
        <div class="messages-container" ref="messagesContainer">
          <div 
            v-for="message in currentMessages" 
            :key="message.id"
            :class="['message', message.type]"
          >
            <div class="message-content">
              <div class="message-text">{{ message.text }}</div>
              <div class="message-time">{{ formatTime(message.timestamp) }}</div>
              <!-- 语音播放按钮 -->
              <button 
                v-if="message.type === 'ai'" 
                @click="playAudio(message)"
                class="play-audio-btn"
                :class="{ playing: playingAudioId === message.id }"
              >
                <svg width="16" height="16" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
                  <polygon v-if="playingAudioId !== message.id" points="5,3 19,12 5,21" fill="currentColor"/>
                  <template v-else>
                    <rect x="6" y="4" width="4" height="16" fill="currentColor"/>
                    <rect x="14" y="4" width="4" height="16" fill="currentColor"/>
                  </template>
                </svg>
              </button>
            </div>
          </div>
        </div>
      </div>

      <!-- 语音输入区域 -->
      <div class="voice-input-area">
        <!-- 模式切换按钮 -->
        <div class="mode-switch">
          <button 
            @click="toggleVoiceMode" 
            :class="['mode-btn', { active: voiceActivationMode }]"
          >
            <svg width="16" height="16" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
              <path d="M12 1a3 3 0 0 0-3 3v8a3 3 0 0 0 6 0V4a3 3 0 0 0-3-3z" fill="currentColor"/>
              <path d="M19 10v2a7 7 0 0 1-14 0v-2" stroke="currentColor" stroke-width="2" fill="none"/>
              <circle cx="12" cy="12" r="2" fill="currentColor" v-if="voiceActivationMode"/>
            </svg>
            切换模式
          </button>
        </div>

        <div class="input-container">
          <!-- 按住录音模式 -->
          <button 
            v-if="!voiceActivationMode"
            @mousedown="startRecording" 
            @mouseup="stopRecording"
            @mouseleave="stopRecording"
            :class="['record-btn', { recording: isRecording }]"
            :disabled="isProcessing || isWaitingResponse"
          >
            <svg width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
              <path d="M12 1a3 3 0 0 0-3 3v8a3 3 0 0 0 6 0V4a3 3 0 0 0-3-3z" fill="currentColor"/>
              <path d="M19 10v2a7 7 0 0 1-14 0v-2M12 19v4M8 23h8" stroke="currentColor" stroke-width="2" fill="none"/>
            </svg>
            <span v-if="!isRecording && !isProcessing && !isWaitingResponse">按住说话</span>
            <span v-else-if="isRecording">录音中...</span>
            <span v-else-if="isWaitingResponse">AI正在思考中，请稍候...</span>
            <span v-else>处理中...</span>
          </button>

          <!-- 语音感应模式 -->
          <button 
            v-else
            @click="toggleVoiceActivation"
            :class="['voice-activation-btn', { 
              listening: isListening && !voiceDetected && !isWaitingResponse, 
              speaking: voiceDetected && !isWaitingResponse,
              processing: isProcessing,
              waiting: isWaitingResponse
            }]"
            :disabled="isProcessing || isWaitingResponse"
          >
            <svg width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
              <path d="M12 1a3 3 0 0 0-3 3v8a3 3 0 0 0 6 0V4a3 3 0 0 0-3-3z" fill="currentColor"/>
              <path d="M19 10v2a7 7 0 0 1-14 0v-2" stroke="currentColor" stroke-width="2" fill="none"/>
              <circle cx="12" cy="12" r="3" fill="none" stroke="currentColor" stroke-width="2" v-if="isListening"/>
            </svg>
            <span v-if="isWaitingResponse">AI正在思考中，请稍候...</span>
            <span v-else-if="!isListening && !isProcessing">开始监听</span>
            <span v-else-if="isListening && !voiceDetected">监听中...</span>
            <span v-else-if="voiceDetected">检测到语音</span>
            <span v-else>处理中...</span>
          </button>
        </div>
        
        <!-- 录音状态指示 -->
        <div v-if="isRecording || (voiceActivationMode && isListening) || isWaitingResponse" class="recording-indicator">
          <div class="recording-wave">
            <div class="wave-bar" :style="{ height: `${Math.max(20, audioLevel * 100)}%` }"></div>
            <div class="wave-bar" :style="{ height: `${Math.max(15, audioLevel * 80)}%` }"></div>
            <div class="wave-bar" :style="{ height: `${Math.max(25, audioLevel * 120)}%` }"></div>
            <div class="wave-bar" :style="{ height: `${Math.max(18, audioLevel * 90)}%` }"></div>
          </div>
          <span v-if="!voiceActivationMode">正在录音，松开发送</span>
          <span v-else-if="isWaitingResponse">AI正在处理您的语音，请稍候...</span>
          <span v-else-if="!voiceDetected">监听中，开始说话...</span>
          <span v-else>检测到语音，继续说话...</span>
        </div>
      </div>
      </div>
    </div>
  </div>
</template>

<script setup>
import { ref, reactive, computed, onMounted, nextTick } from 'vue'
import { userApi, sessionApi, talkApi } from '../api/index.js'

// 定义emit事件
const emit = defineEmits(['logout'])

// 响应式数据
const userInfo = reactive({
  username: localStorage.getItem('rememberedUsername') || '用户'
})

// 当前用户信息
const currentUser = reactive({
  username: '',
  email: ''
})

// 会话数据改为从API获取
const sessions = ref([])

// 新建会话弹窗相关数据
const showModal = ref(false)
const rolePrefix = ref('')
const roleSuffix = ref('')

const currentSessionId = ref(null)
const isRecording = ref(false)
const isProcessing = ref(false)
const isWaitingResponse = ref(false) // 等待AI响应状态
const playingAudioId = ref(null)
const currentAudio = ref(null)
const messagesContainer = ref(null)
const mediaRecorder = ref(null)
const audioChunks = ref([])
const recordingStream = ref(null)

// 语音感应模式相关状态
const voiceActivationMode = ref(false) // 语音感应模式开关
const isListening = ref(false) // 是否正在监听语音
const audioContext = ref(null)
const analyser = ref(null)
const microphone = ref(null)
const silenceTimer = ref(null)
const voiceDetected = ref(false)
const audioLevel = ref(0) // 当前音频音量级别

// 语音感应配置
const VAD_CONFIG = {
  silenceThreshold: 1500, // 静音阈值时间(ms) - 增加到1.5秒
  volumeThreshold: 0.05, // 音量阈值 - 提高阈值降低敏感度
  smoothingTimeConstant: 0.8, // 音频分析平滑常数
  fftSize: 256 // FFT大小
}

// 计算属性
const currentSession = computed(() => {
  return sessions.value.find(s => s.id === currentSessionId.value)
})

const currentMessages = computed(() => {
  return currentSession.value?.messages || []
})

// 方法
// 获取会话历史
const fetchSessions = async () => {
  try {
    console.log('=== 开始获取会话历史 ===')
    console.log('当前localStorage token:', localStorage.getItem('token'))
    console.log('sessionApi对象:', sessionApi)
    console.log('sessionApi.getAllSessions方法:', sessionApi.getAllSessions)
    
    const response = await sessionApi.getAllSessions()
    console.log('=== 会话历史API响应 ===')
    console.log('完整响应对象:', response)
    console.log('响应类型:', typeof response)
    console.log('响应是否为null:', response === null)
    console.log('响应是否为undefined:', response === undefined)
    
    // 修复：响应拦截器已经处理了数据结构，直接使用response作为数据
    if (response && Array.isArray(response)) {
      console.log('=== 进入数据处理分支（数组格式） ===')
      // 处理会话数据，转换为前端需要的格式
      const sessionData = response
      console.log('原始会话数据:', sessionData)
      
      sessions.value = sessionData.map(session => ({
        id: session.id,
        title: session.topic || session.title || '新会话', // 优先使用topic字段
        lastMessageTime: new Date(session.updatedTime || session.createdTime).getTime(),
        messages: [] // 消息数据暂时为空，后续可以添加获取消息的接口
      })).sort((a, b) => b.lastMessageTime - a.lastMessageTime) // 按更新时间倒序排列，最新的在前面
      
      console.log('处理后的会话数据:', sessions.value)
      
      // 如果有会话，默认选择第一个并获取其聊天记录
      if (sessions.value.length > 0 && !currentSessionId.value) {
        const firstSessionId = sessions.value[0].id
        console.log('设置当前会话ID:', firstSessionId)
        // 调用switchSession来获取聊天记录
        await switchSession(firstSessionId)
      }
    } else {
      console.log('=== 进入错误处理分支 ===')
      console.log('没有会话数据或响应格式不正确')
      console.log('响应结构:', response)
      console.log('条件检查结果:')
      console.log('- response存在:', !!response)
      console.log('- response是数组:', Array.isArray(response))
      sessions.value = []
    }
  } catch (error) {
    console.error('=== 获取会话历史失败 ===', error)
    console.error('错误详情:', error.message)
    console.error('错误堆栈:', error.stack)
    sessions.value = []
  }
}

// 显示新建会话弹窗
const showCreateSessionModal = () => {
  showModal.value = true
  rolePrefix.value = ''
  roleSuffix.value = ''
}

// 关闭弹窗
const closeModal = () => {
  showModal.value = false
  rolePrefix.value = ''
  roleSuffix.value = ''
}

const createNewSession = async () => {
  if (!rolePrefix.value.trim() || !roleSuffix.value.trim()) {
    return
  }
  
  try {
    // 合并两个输入框的内容
    const fullRoleDescription = `扮演一个${rolePrefix.value.trim()}的${roleSuffix.value.trim()}`
    console.log('创建新会话...', fullRoleDescription)
    
    const response = await sessionApi.createSession(fullRoleDescription)
    console.log('创建会话API响应:', response)
    
    // 关闭弹窗
    closeModal()
    
    // 重新获取会话列表
    await fetchSessions()
    
    // 如果有返回的会话ID，切换到新创建的会话
    if (response && response.id) {
      await switchSession(response.id)
    } else {
      // 如果没有返回ID，选择最新的会话（通常是第一个）
      if (sessions.value.length > 0) {
        await switchSession(sessions.value[0].id)
      }
    }
    
    console.log('会话创建成功，当前会话ID:', currentSessionId.value)
  } catch (error) {
    console.error('创建新会话失败:', error)
    // 关闭弹窗
    closeModal()
    
    // 如果API调用失败，显示错误提示
    alert('创建会话失败，请稍后重试')
  }
}

const switchSession = async (sessionId) => {
  try {
    console.log('切换到会话:', sessionId)
    currentSessionId.value = sessionId
    
    // 获取会话的聊天记录
    const chatHistory = await sessionApi.getChatHistory(sessionId)
    console.log('获取到的聊天记录:', chatHistory)
    
    // 更新当前会话的消息
    const currentSessionIndex = sessions.value.findIndex(s => s.id === sessionId)
    if (currentSessionIndex !== -1 && chatHistory && Array.isArray(chatHistory)) {
      // 将API返回的消息格式转换为前端需要的格式
      const formattedMessages = chatHistory.map(msg => ({
        id: msg.id,
        type: msg.messageType === 'USER' ? 'user' : 'ai',
        text: msg.content,
        timestamp: new Date(msg.createdTime).getTime()
      }))
      
      sessions.value[currentSessionIndex].messages = formattedMessages
      console.log('更新会话消息:', formattedMessages)
    }
    
    nextTick(() => {
      scrollToBottom()
    })
  } catch (error) {
    console.error('获取聊天记录失败:', error)
    // 即使获取失败，也要切换会话
    currentSessionId.value = sessionId
    nextTick(() => {
      scrollToBottom()
    })
  }
}

const logout = async () => {
  try {
    // 调用后端退出登录接口
    await userApi.logout()
    console.log('服务端退出登录成功')
  } catch (error) {
    console.error('服务端退出登录失败:', error)
    // 即使服务端调用失败，也继续清除本地数据
  }
  
  // 清除本地存储的数据
  localStorage.removeItem('token')
  localStorage.removeItem('rememberedUsername')
  
  // 触发父组件的logout事件，返回登录界面
  emit('logout')
}

const formatTime = (timestamp) => {
  const date = new Date(timestamp)
  const now = new Date()
  const diff = now.getTime() - timestamp
  
  if (diff < 1000 * 60) {
    return '刚刚'
  } else if (diff < 1000 * 60 * 60) {
    return `${Math.floor(diff / (1000 * 60))}分钟前`
  } else if (diff < 1000 * 60 * 60 * 24) {
    return `${Math.floor(diff / (1000 * 60 * 60))}小时前`
  } else {
    return date.toLocaleDateString()
  }
}

const startRecording = async () => {
  try {
    isRecording.value = true
    audioChunks.value = []
    
    // 获取用户媒体权限
    const stream = await navigator.mediaDevices.getUserMedia({ 
      audio: {
        echoCancellation: true,
        noiseSuppression: true,
        sampleRate: 44100
      } 
    })
    
    recordingStream.value = stream
    
    // 创建MediaRecorder实例
    const recorder = new MediaRecorder(stream, {
      mimeType: 'audio/webm;codecs=opus'
    })
    
    mediaRecorder.value = recorder
    
    // 监听数据可用事件
    recorder.ondataavailable = (event) => {
      if (event.data.size > 0) {
        audioChunks.value.push(event.data)
      }
    }
    
    // 开始录音
    recorder.start()
    console.log('开始录音...')
    
  } catch (error) {
    console.error('录音失败:', error)
    isRecording.value = false
    
    // 显示错误提示
    if (error.name === 'NotAllowedError') {
      alert('请允许访问麦克风权限')
    } else if (error.name === 'NotFoundError') {
      alert('未找到麦克风设备')
    } else {
      alert('录音功能初始化失败')
    }
  }
}

const stopRecording = async () => {
  if (!isRecording.value || !mediaRecorder.value) return
  
  isRecording.value = false
  isProcessing.value = true
  
  try {
    // 停止录音
    mediaRecorder.value.stop()
    
    // 停止媒体流
    if (recordingStream.value) {
      recordingStream.value.getTracks().forEach(track => track.stop())
      recordingStream.value = null
    }
    
    // 等待录音数据收集完成
    await new Promise((resolve) => {
      mediaRecorder.value.onstop = resolve
    })
    
    // 创建音频文件
    const audioBlob = new Blob(audioChunks.value, { type: 'audio/webm' })
    console.log('录音完成，音频大小:', audioBlob.size, 'bytes')
    
    // 检查录音时长
    if (audioBlob.size < 1000) {
      alert('录音时间太短，请重新录音')
      return
    }
    
    // 发送音频到后端
    await sendAudioToBackend(audioBlob)
    
  } catch (error) {
    console.error('处理录音失败:', error)
    alert('录音处理失败，请重试')
  } finally {
    isProcessing.value = false
    // 清理资源
    mediaRecorder.value = null
    audioChunks.value = []
  }
}

// 发送音频到后端的方法
const sendAudioToBackend = async (audioBlob) => {
  try {
    console.log('发送音频到后端...')
    
    // 如果是语音感应模式，发送请求前暂停语音监听
    const wasListening = isListening.value
    if (voiceActivationMode.value && wasListening) {
      console.log('发送请求前暂停语音监听')
      stopVoiceActivation()
    }
    
    // 设置等待响应状态
    isWaitingResponse.value = true
    
    // 检查是否有当前会话
    if (!currentSession.value) {
      alert('请先选择或创建一个会话')
      isWaitingResponse.value = false
      return
    }
    
    // 调用语音对话API
    const response = await talkApi.talkInSession(audioBlob, currentSession.value.id)
    console.log('语音对话响应:', response)
    
    // 清除等待响应状态
    isWaitingResponse.value = false
    
    // 请求完成后立即开启语音监听（如果之前在监听且是语音感应模式）
    if (voiceActivationMode.value && wasListening) {
      console.log('请求完成，立即开启语音监听')
      await startVoiceActivation()
    }
    
    // 响应拦截器已经处理了success判断，直接使用response作为audioUrl
    if (response && typeof response === 'string' && response.startsWith('http')) {
      // 获取返回的音频URL
      const audioUrl = response
      console.log('获取到AI回复音频URL:', audioUrl)
      
      // 刷新会话记录以获取最新的消息
      await switchSession(currentSession.value.id)
      
      // 播放AI回复的音频（不需要在播放结束后恢复监听，因为已经在请求完成后开启了）
      await playAudioFromUrl(audioUrl, false)
      
      // 滚动到底部显示最新消息
      nextTick(() => {
        scrollToBottom()
      })
      
    } else {
      console.error('语音对话失败: 响应格式不正确或URL无效', response)
      alert('语音对话失败，请重试')
    }
    
  } catch (error) {
    console.error('发送音频失败:', error)
    
    // 判断是否为超时错误
    if (error.code === 'ECONNABORTED' || error.message?.includes('timeout')) {
      alert('语音处理超时，AI可能需要更多时间思考，请稍后重试')
    } else if (error.response?.status === 500) {
      alert('服务器处理出错，请稍后重试')
    } else if (error.response?.status === 413) {
      alert('语音文件过大，请缩短录音时间')
    } else {
      alert('发送音频失败，请检查网络连接')
    }
    
    // 清除等待响应状态
    isWaitingResponse.value = false
    
    // 如果发生错误且之前在监听，恢复语音监听
    if (voiceActivationMode.value && wasListening) {
      console.log('发送失败，恢复语音监听')
      await startVoiceActivation()
    }
  }
}

// 播放指定URL的音频
const playAudioFromUrl = async (audioUrl, shouldRestoreListening = false) => {
  try {
    console.log('播放音频:', audioUrl)
    
    // 如果有其他音频在播放，先停止
    if (currentAudio.value) {
      currentAudio.value.pause()
      currentAudio.value = null
    }
    
    // 创建音频对象并播放
    const audio = new Audio(audioUrl)
    currentAudio.value = audio
    
    // 监听播放完成事件
    audio.addEventListener('ended', async () => {
      console.log('音频播放完成')
      currentAudio.value = null
      
      // 如果需要恢复语音监听且当前是语音感应模式
      if (shouldRestoreListening && voiceActivationMode.value && !isListening.value) {
        console.log('音频播放完成，恢复语音监听')
        await startVoiceActivation()
      }
    })
    
    // 监听播放错误事件
    audio.addEventListener('error', async (error) => {
      console.error('音频播放错误:', error)
      currentAudio.value = null
      
      // 如果播放出错且需要恢复语音监听
      if (shouldRestoreListening && voiceActivationMode.value && !isListening.value) {
        console.log('音频播放出错，恢复语音监听')
        await startVoiceActivation()
      }
    })
    
    // 开始播放
    await audio.play()
    console.log('开始播放AI回复音频')
    
  } catch (error) {
    console.error('播放音频失败:', error)
    
    // 如果播放失败且需要恢复语音监听
    if (shouldRestoreListening && voiceActivationMode.value && !isListening.value) {
      console.log('音频播放失败，恢复语音监听')
      await startVoiceActivation()
    }
  }
}

const playAudio = async (message) => {
  if (playingAudioId.value === message.id) {
    // 停止播放
    if (currentAudio.value) {
      currentAudio.value.pause()
      currentAudio.value = null
    }
    playingAudioId.value = null
    console.log('停止播放音频:', message.id)
    return
  }

  try {
    // 如果有其他音频在播放，先停止
    if (currentAudio.value) {
      currentAudio.value.pause()
      currentAudio.value = null
    }

    // 开始播放
    playingAudioId.value = message.id
    console.log('开始获取音频URL:', message.id, message.text)

    // 调用API获取音频URL
    const response = await talkApi.getAudioUrl(message.id, message.text, currentUser.value?.id)
    
    console.log('API响应:', response)
    
    // 处理不同的响应格式
    let audioUrl = null
    
    if (typeof response === 'string') {
      // 如果响应直接是字符串URL
      audioUrl = response.toString().trim().replace(/`/g, '')
    } else if (response && response.success && response.data) {
      // 如果响应是标准格式对象
      audioUrl = response.data.toString().trim().replace(/`/g, '')
    } else if (response && response.data) {
      // 如果响应只有data字段
      audioUrl = response.data.toString().trim().replace(/`/g, '')
    }
    
    if (audioUrl && audioUrl.startsWith('http')) {
      console.log('获取到音频URL:', audioUrl)
      
      // 创建音频对象并播放
      const audio = new Audio(audioUrl)
      currentAudio.value = audio
      
      // 音频加载完成后播放
      audio.addEventListener('loadeddata', () => {
        audio.play().catch(error => {
          console.error('音频播放失败:', error)
          playingAudioId.value = null
          currentAudio.value = null
        })
      })
      
      // 音频播放结束
      audio.addEventListener('ended', () => {
        playingAudioId.value = null
        currentAudio.value = null
        console.log('音频播放完成')
      })
      
      // 音频播放错误
      audio.addEventListener('error', (error) => {
        console.error('音频播放错误:', error)
        playingAudioId.value = null
        currentAudio.value = null
      })
      
    } else {
      console.error('无法获取有效的音频URL')
      console.error('响应类型:', typeof response)
      console.error('完整响应:', response)
      console.error('提取的audioUrl:', audioUrl)
      playingAudioId.value = null
    }
    
  } catch (error) {
    console.error('播放音频失败:', error)
    playingAudioId.value = null
    currentAudio.value = null
  }
}

const scrollToBottom = () => {
  if (messagesContainer.value) {
    messagesContainer.value.scrollTop = messagesContainer.value.scrollHeight
  }
}

// 获取当前用户信息
const fetchCurrentUser = async () => {
  try {
    console.log('开始获取用户信息...')
    console.log('localStorage中的token:', localStorage.getItem('token'))
    
    const response = await userApi.getCurrentUser()
    console.log('API响应:', response)
    console.log('响应类型:', typeof response)
    console.log('响应是否为对象:', response && typeof response === 'object')
    
    // 处理各种可能的响应格式
    let userData = null
    
    if (response && response.success && response.data) {
      // 格式1: { success: true, data: { username: "xxx", email: "xxx" } }
      console.log('格式1: 包装格式，用户信息获取成功:', response.data)
      userData = response.data
    } else if (response && response.username) {
      // 格式2: 直接返回用户对象 { username: "xxx", email: "xxx" }
      console.log('格式2: 直接返回用户对象:', response)
      userData = response
    } else if (response && response.data && response.data.username) {
      // 格式3: { data: { username: "xxx", email: "xxx" } }
      console.log('格式3: data包装格式:', response.data)
      userData = response.data
    } else {
      console.log('未识别的响应格式或响应为空:', response)
    }
    
    if (userData && userData.username) {
      currentUser.username = userData.username
      currentUser.email = userData.email || ''
      console.log('成功设置用户信息:', { username: currentUser.username, email: currentUser.email })
    } else {
      console.log('未能从响应中提取用户名，使用备选方案')
      currentUser.username = localStorage.getItem('rememberedUsername') || '用户'
    }
    
    console.log('最终的currentUser对象:', currentUser)
  } catch (error) {
    console.error('获取用户信息失败:', error)
    console.error('错误详情:', error.response?.data || error.message)
    // 如果获取失败，使用本地存储的用户名作为备选
    currentUser.username = localStorage.getItem('rememberedUsername') || '用户'
    console.log('使用备选用户名:', currentUser.username)
  }
}

// 语音感应模式相关方法
const toggleVoiceMode = () => {
  voiceActivationMode.value = !voiceActivationMode.value
  console.log('切换语音模式:', voiceActivationMode.value ? '语音感应' : '按住录音')
  
  // 如果切换到按住录音模式，停止语音监听
  if (!voiceActivationMode.value && isListening.value) {
    stopVoiceActivation()
  }
}

const toggleVoiceActivation = async () => {
  if (isListening.value) {
    stopVoiceActivation()
  } else {
    await startVoiceActivation()
  }
}

const startVoiceActivation = async () => {
  try {
    console.log('开始语音感应监听')
    isListening.value = true
    voiceDetected.value = false
    
    // 获取麦克风权限
    const stream = await navigator.mediaDevices.getUserMedia({ 
      audio: {
        echoCancellation: true,
        noiseSuppression: true,
        autoGainControl: true
      } 
    })
    
    recordingStream.value = stream
    
    // 创建音频上下文和分析器
    audioContext.value = new (window.AudioContext || window.webkitAudioContext)()
    analyser.value = audioContext.value.createAnalyser()
    microphone.value = audioContext.value.createMediaStreamSource(stream)
    
    // 配置分析器
    analyser.value.fftSize = VAD_CONFIG.fftSize
    analyser.value.smoothingTimeConstant = VAD_CONFIG.smoothingTimeConstant
    
    // 连接音频节点
    microphone.value.connect(analyser.value)
    
    // 开始音频分析
    startAudioAnalysis()
    
    console.log('语音感应监听已启动')
  } catch (error) {
    console.error('启动语音感应失败:', error)
    isListening.value = false
    alert('无法访问麦克风，请检查权限设置')
  }
}

const stopVoiceActivation = () => {
  console.log('停止语音感应监听')
  isListening.value = false
  voiceDetected.value = false
  
  // 清除静音计时器
  if (silenceTimer.value) {
    clearTimeout(silenceTimer.value)
    silenceTimer.value = null
  }
  
  // 停止音频分析
  if (audioContext.value) {
    audioContext.value.close()
    audioContext.value = null
  }
  
  // 停止媒体流
  if (recordingStream.value) {
    recordingStream.value.getTracks().forEach(track => track.stop())
    recordingStream.value = null
  }
  
  analyser.value = null
  microphone.value = null
  audioLevel.value = 0
}

const startAudioAnalysis = () => {
  if (!analyser.value || !isListening.value) return
  
  const bufferLength = analyser.value.frequencyBinCount
  const dataArray = new Uint8Array(bufferLength)
  
  const analyze = () => {
    if (!isListening.value) return
    
    analyser.value.getByteFrequencyData(dataArray)
    
    // 计算音频音量级别
    let sum = 0
    for (let i = 0; i < bufferLength; i++) {
      sum += dataArray[i]
    }
    const average = sum / bufferLength
    audioLevel.value = average / 255 // 归一化到0-1
    
    // 检测语音活动
    const isVoiceActive = audioLevel.value > VAD_CONFIG.volumeThreshold
    
    if (isVoiceActive) {
      // 检测到语音
      if (!voiceDetected.value) {
        console.log('检测到语音开始')
        voiceDetected.value = true
        
        // 打断当前播放的音频
        if (currentAudio.value) {
          console.log('打断当前播放的音频')
          currentAudio.value.pause()
          currentAudio.value = null
          playingAudioId.value = null
        }
        
        // 开始录音
        startRecordingForVAD()
      }
      
      // 清除静音计时器
      if (silenceTimer.value) {
        clearTimeout(silenceTimer.value)
        silenceTimer.value = null
      }
    } else if (voiceDetected.value) {
      // 语音停止，开始静音计时
      if (!silenceTimer.value) {
        silenceTimer.value = setTimeout(() => {
          console.log('检测到静音，发送录音')
          voiceDetected.value = false
          stopRecordingForVAD()
        }, VAD_CONFIG.silenceThreshold)
      }
    }
    
    // 继续分析
    requestAnimationFrame(analyze)
  }
  
  analyze()
}

const startRecordingForVAD = async () => {
  try {
    if (!recordingStream.value) return
    
    // 重置音频数据
    audioChunks.value = []
    
    // 创建MediaRecorder
    mediaRecorder.value = new MediaRecorder(recordingStream.value, {
      mimeType: 'audio/webm;codecs=opus'
    })
    
    // 监听数据可用事件
    mediaRecorder.value.ondataavailable = (event) => {
      if (event.data.size > 0) {
        audioChunks.value.push(event.data)
      }
    }
    
    // 开始录音
    mediaRecorder.value.start(100) // 每100ms收集一次数据
    console.log('VAD录音已开始')
  } catch (error) {
    console.error('VAD录音启动失败:', error)
  }
}

const stopRecordingForVAD = async () => {
  try {
    if (!mediaRecorder.value || mediaRecorder.value.state === 'inactive') return
    
    // 停止录音
    mediaRecorder.value.stop()
    
    // 等待录音数据收集完成
    await new Promise((resolve) => {
      mediaRecorder.value.onstop = resolve
    })
    
    // 创建音频文件
    const audioBlob = new Blob(audioChunks.value, { type: 'audio/webm' })
    
    // 检查录音长度
    if (audioBlob.size < 1000) {
      console.log('录音太短，忽略')
      return
    }
    
    console.log('VAD录音完成，大小:', audioBlob.size, 'bytes')
    
    // 发送到后端
    await sendAudioToBackend(audioBlob)
    
  } catch (error) {
    console.error('VAD录音处理失败:', error)
  } finally {
    mediaRecorder.value = null
    audioChunks.value = []
  }
}

onMounted(() => {
  console.log('=== VoiceChat组件已挂载 ===')
  scrollToBottom()
  fetchCurrentUser()
  console.log('=== 准备调用fetchSessions ===')
  fetchSessions() // 添加获取会话历史
  console.log('=== fetchSessions调用完成 ===')
})
</script>

<style scoped>
.voice-chat-app {
  display: flex;
  height: 100vh;
  background: #f5f5f5;
  font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
}

/* 主要内容区域 */
.main-container {
  display: flex;
  flex: 1;
}

.main-container > .main-content {
  flex: 1;
}

/* 侧边栏样式 */
.sidebar {
  width: 300px;
  background: #2c3e50;
  color: white;
  display: flex;
  flex-direction: column;
  border-right: 1px solid #34495e;
}

.user-info {
  padding: 20px;
  border-bottom: 1px solid #34495e;
  display: flex;
  align-items: center;
  gap: 12px;
}

.avatar {
  width: 40px;
  height: 40px;
  background: #3498db;
  border-radius: 50%;
  display: flex;
  align-items: center;
  justify-content: center;
  color: white;
}

.user-details {
  flex: 1;
}

.username {
  font-weight: 600;
  margin-bottom: 4px;
}

.logout-btn {
  background: none;
  border: 1px solid #7f8c8d;
  color: #bdc3c7;
  padding: 4px 8px;
  border-radius: 4px;
  font-size: 12px;
  cursor: pointer;
  transition: all 0.2s;
}

.logout-btn:hover {
  background: #e74c3c;
  border-color: #e74c3c;
  color: white;
}

.new-chat-section {
  padding: 20px;
  border-bottom: 1px solid #34495e;
}

.new-chat-btn {
  width: 100%;
  background: #3498db;
  border: none;
  color: white;
  padding: 12px;
  border-radius: 8px;
  font-size: 14px;
  cursor: pointer;
  display: flex;
  align-items: center;
  justify-content: center;
  gap: 8px;
  transition: background 0.2s;
}

.new-chat-btn:hover {
  background: #2980b9;
}

.sessions-list {
  flex: 1;
  overflow: hidden;
  display: flex;
  flex-direction: column;
}

.sessions-header {
  padding: 20px 20px 10px;
}

.sessions-header h3 {
  margin: 0;
  font-size: 14px;
  color: #bdc3c7;
  font-weight: 500;
}

.sessions-container {
  flex: 1;
  overflow-y: auto;
  padding: 0 10px;
}

.session-item {
  padding: 12px 10px;
  margin: 2px 0;
  border-radius: 6px;
  cursor: pointer;
  transition: background 0.2s;
}

.session-item:hover {
  background: #34495e;
}

.session-item.active {
  background: #3498db;
}

.session-title {
  font-size: 14px;
  margin-bottom: 4px;
  white-space: nowrap;
  overflow: hidden;
  text-overflow: ellipsis;
}

.session-time {
  font-size: 12px;
  color: #95a5a6;
}

/* 主内容区域样式 */
.main-content {
  flex: 1;
  display: flex;
  flex-direction: column;
  background: white;
}

.chat-area {
  flex: 1;
  display: flex;
  flex-direction: column;
  overflow: hidden;
}

.chat-header {
  padding: 20px;
  border-bottom: 1px solid #ecf0f1;
  background: white;
}

.chat-header h2 {
  margin: 0;
  font-size: 18px;
  color: #2c3e50;
}

.messages-container {
  flex: 1;
  overflow-y: auto;
  padding: 20px;
  display: flex;
  flex-direction: column;
  gap: 16px;
}

.message {
  display: flex;
  max-width: 70%;
}

.message.user {
  align-self: flex-end;
}

.message.assistant {
  align-self: flex-start;
}

.message-content {
  background: #f8f9fa;
  padding: 12px 16px;
  border-radius: 18px;
  position: relative;
}

.message.user .message-content {
  background: #3498db;
  color: white;
}

.message-text {
  margin-bottom: 4px;
  line-height: 1.4;
}

.message-time {
  font-size: 11px;
  opacity: 0.7;
}

.play-audio-btn {
  position: absolute;
  top: -8px;
  right: -8px;
  width: 32px;
  height: 32px;
  border-radius: 50%;
  background: #2ecc71;
  border: none;
  color: white;
  cursor: pointer;
  display: flex;
  align-items: center;
  justify-content: center;
  transition: all 0.2s;
  box-shadow: 0 2px 8px rgba(0,0,0,0.1);
}

.play-audio-btn:hover {
  background: #27ae60;
  transform: scale(1.1);
}

.play-audio-btn.playing {
  background: #e74c3c;
}

/* 语音输入区域样式 */
.voice-input-area {
  padding: 20px;
  border-top: 1px solid #ecf0f1;
  background: white;
}

/* 模式切换按钮样式 */
.mode-switch {
  display: flex;
  justify-content: center;
  margin-bottom: 15px;
}

.mode-btn {
  background: #f8f9fa;
  border: 2px solid #e9ecef;
  color: #6c757d;
  padding: 8px 16px;
  border-radius: 25px;
  font-size: 14px;
  cursor: pointer;
  display: flex;
  align-items: center;
  gap: 6px;
  transition: all 0.2s;
  user-select: none;
}

.mode-btn:hover {
  background: #e9ecef;
  border-color: #dee2e6;
}

.mode-btn.active {
  background: #007bff;
  border-color: #007bff;
  color: white;
}

.input-container {
  display: flex;
  justify-content: center;
  margin-bottom: 10px;
}

.record-btn {
  background: #3498db;
  border: none;
  color: white;
  padding: 16px 24px;
  border-radius: 50px;
  font-size: 16px;
  cursor: pointer;
  display: flex;
  align-items: center;
  gap: 8px;
  transition: all 0.2s;
  user-select: none;
}

.record-btn:hover {
  background: #2980b9;
  transform: translateY(-2px);
}

.record-btn.recording {
  background: #e74c3c;
  animation: pulse 1.5s infinite;
}

.record-btn:disabled {
  background: #95a5a6;
  cursor: not-allowed;
  transform: none;
}

/* 语音感应按钮样式 */
.voice-activation-btn {
  background: #28a745;
  border: none;
  color: white;
  padding: 16px 24px;
  border-radius: 50px;
  font-size: 16px;
  cursor: pointer;
  display: flex;
  align-items: center;
  gap: 8px;
  transition: all 0.2s;
  user-select: none;
}

.voice-activation-btn:hover {
  background: #218838;
  transform: translateY(-2px);
}

.voice-activation-btn.listening {
  background: #17a2b8;
  animation: listening-pulse 2s infinite;
}

.voice-activation-btn.speaking {
  background: #fd7e14;
  animation: speaking-pulse 1s infinite;
}

.voice-activation-btn.processing {
  background: #6f42c1;
}

.voice-activation-btn.waiting {
  background: #ffc107;
  animation: waiting-pulse 1.5s infinite;
}

.voice-activation-btn.waiting {
  background: #ffc107;
  color: #212529;
  animation: waiting-pulse 1.5s infinite;
}

.voice-activation-btn:disabled {
  background: #95a5a6;
  cursor: not-allowed;
  transform: none;
}

.recording-indicator {
  display: flex;
  align-items: center;
  justify-content: center;
  gap: 12px;
  color: #e74c3c;
  font-size: 14px;
}

.recording-wave {
  display: flex;
  gap: 3px;
  align-items: center;
}

.wave-bar {
  width: 3px;
  height: 20px;
  background: #e74c3c;
  border-radius: 2px;
  animation: wave 1.2s infinite ease-in-out;
}

.wave-bar:nth-child(2) {
  animation-delay: 0.1s;
}

.wave-bar:nth-child(3) {
  animation-delay: 0.2s;
}

.wave-bar:nth-child(4) {
  animation-delay: 0.3s;
}

@keyframes pulse {
  0% {
    box-shadow: 0 0 0 0 rgba(231, 76, 60, 0.7);
  }
  70% {
    box-shadow: 0 0 0 10px rgba(231, 76, 60, 0);
  }
  100% {
    box-shadow: 0 0 0 0 rgba(231, 76, 60, 0);
  }
}

@keyframes listening-pulse {
  0% {
    box-shadow: 0 0 0 0 rgba(23, 162, 184, 0.7);
  }
  70% {
    box-shadow: 0 0 0 15px rgba(23, 162, 184, 0);
  }
  100% {
    box-shadow: 0 0 0 0 rgba(23, 162, 184, 0);
  }
}

@keyframes speaking-pulse {
  0% {
    box-shadow: 0 0 0 0 rgba(253, 126, 20, 0.8);
    transform: scale(1);
  }
  50% {
    box-shadow: 0 0 0 8px rgba(253, 126, 20, 0.3);
    transform: scale(1.05);
  }
  100% {
    box-shadow: 0 0 0 0 rgba(253, 126, 20, 0);
    transform: scale(1);
  }
}

@keyframes waiting-pulse {
  0% {
    box-shadow: 0 0 0 0 rgba(255, 193, 7, 0.6);
    transform: scale(1);
  }
  50% {
    box-shadow: 0 0 0 12px rgba(255, 193, 7, 0.2);
    transform: scale(1.02);
  }
  100% {
    box-shadow: 0 0 0 0 rgba(255, 193, 7, 0);
    transform: scale(1);
  }
}

@keyframes wave {
  0%, 40%, 100% {
    transform: scaleY(0.4);
  }
  20% {
    transform: scaleY(1);
  }
}

/* 滚动条样式 */
.sessions-container::-webkit-scrollbar,
.messages-container::-webkit-scrollbar {
  width: 6px;
}

.sessions-container::-webkit-scrollbar-track,
.messages-container::-webkit-scrollbar-track {
  background: transparent;
}

.sessions-container::-webkit-scrollbar-thumb {
  background: #34495e;
  border-radius: 3px;
}

.messages-container::-webkit-scrollbar-thumb {
  background: #bdc3c7;
  border-radius: 3px;
}

.sessions-container::-webkit-scrollbar-thumb:hover,
.messages-container::-webkit-scrollbar-thumb:hover {
  background: #7f8c8d;
}

/* 弹窗样式 */
.modal-overlay {
  position: fixed;
  top: 0;
  left: 0;
  right: 0;
  bottom: 0;
  background: rgba(0, 0, 0, 0.5);
  display: flex;
  align-items: center;
  justify-content: center;
  z-index: 1000;
}

.modal-content {
  background: white;
  border-radius: 12px;
  width: 90%;
  max-width: 500px;
  box-shadow: 0 10px 30px rgba(0, 0, 0, 0.3);
  animation: modalSlideIn 0.3s ease-out;
}

@keyframes modalSlideIn {
  from {
    opacity: 0;
    transform: translateY(-50px) scale(0.9);
  }
  to {
    opacity: 1;
    transform: translateY(0) scale(1);
  }
}

.modal-header {
  display: flex;
  justify-content: space-between;
  align-items: center;
  padding: 20px 24px 16px;
  border-bottom: 1px solid #ecf0f1;
}

.modal-header h3 {
  margin: 0;
  color: #2c3e50;
  font-size: 18px;
  font-weight: 600;
}

.close-btn {
  background: none;
  border: none;
  font-size: 24px;
  color: #95a5a6;
  cursor: pointer;
  padding: 0;
  width: 30px;
  height: 30px;
  display: flex;
  align-items: center;
  justify-content: center;
  border-radius: 50%;
  transition: all 0.2s;
}

.close-btn:hover {
  background: #ecf0f1;
  color: #7f8c8d;
}

.modal-body {
  padding: 24px;
}

.input-group {
  margin-bottom: 16px;
}

.input-group label {
  display: block;
  margin-bottom: 8px;
  color: #2c3e50;
  font-weight: 500;
  font-size: 14px;
}

.role-input-container {
  display: flex;
  align-items: center;
  gap: 8px;
  flex-wrap: wrap;
}

.role-prefix, .role-suffix {
  color: #2c3e50;
  font-weight: 500;
  font-size: 14px;
  white-space: nowrap;
}

.prefix-input, .suffix-input {
  flex: 1;
  min-width: 80px;
  padding: 12px 16px;
  border: 2px solid #ecf0f1;
  border-radius: 8px;
  font-size: 14px;
  transition: all 0.2s;
  box-sizing: border-box;
}

.prefix-input:focus, .suffix-input:focus {
  outline: none;
  border-color: #3498db;
  box-shadow: 0 0 0 3px rgba(52, 152, 219, 0.1);
}

.role-input {
  width: 100%;
  padding: 12px 16px;
  border: 2px solid #ecf0f1;
  border-radius: 8px;
  font-size: 14px;
  transition: all 0.2s;
  box-sizing: border-box;
}

.role-input:focus {
  outline: none;
  border-color: #3498db;
  box-shadow: 0 0 0 3px rgba(52, 152, 219, 0.1);
}

.example-text {
  font-size: 12px;
  color: #7f8c8d;
  line-height: 1.4;
  background: #f8f9fa;
  padding: 12px;
  border-radius: 6px;
  border-left: 3px solid #3498db;
}

.modal-footer {
  display: flex;
  justify-content: flex-end;
  gap: 12px;
  padding: 16px 24px 24px;
  border-top: 1px solid #ecf0f1;
}

.cancel-btn, .confirm-btn {
  padding: 10px 20px;
  border: none;
  border-radius: 6px;
  font-size: 14px;
  font-weight: 500;
  cursor: pointer;
  transition: all 0.2s;
}

.cancel-btn {
  background: #ecf0f1;
  color: #7f8c8d;
}

.cancel-btn:hover {
  background: #d5dbdb;
}

.confirm-btn {
  background: #3498db;
  color: white;
}

.confirm-btn:hover:not(:disabled) {
  background: #2980b9;
}

.confirm-btn:disabled {
  background: #bdc3c7;
  cursor: not-allowed;
}
</style>