<template>
  <!-- 消息列表 -->
  <div class="message-list" ref="messageList">
    <!-- 渲染ai和用户消息 -->
    <div
      v-for="(message, index) in messages"
      :key="index"
      :class="['message-bubble', message.sender]"
    >
      <div class="avatar-container">
        <a-avatar :size="48" :src="message.sender === 'ai' ? interviewerAvatar : userAvatar" />
        <div class="avatar-glow"></div>
      </div>
      <div class="message-content">
        <!--用户内容 -->
        <div class="message-text" v-if="message.sender === 'user'">{{ message.text }}</div>

        <!-- ai内容 -->
        <div v-else>
          <audio
            v-if="message.audioUrl"
            :src="message.audioUrl"
            autoplay
            @play="onInterviewerStatus(2)"
            @ended="audioEnd"
          ></audio>

          <div class="message-text">{{ message.text }}</div>
        </div>
      </div>
    </div>

    <!-- AI 正在输入指示器 -->
    <div v-if="isTyping" class="message-bubble ai">
      <div class="avatar-container">
        <a-avatar :size="48" :src="interviewerAvatar" />
        <div class="avatar-glow"></div>
      </div>
      <div class="message-content">
        <div class="message-text typing"><span></span><span></span><span></span></div>
      </div>
    </div>
  </div>

  <!-- 音频可视化区域 -->
  <div v-if="isRecording" class="audio-visualizer-container">
    <div class="visualizer-header">
      <div class="recording-indicator">
        <span class="recording-pulse"></span>
        <span>{{ language === 'chinese' ? '正在录音...' : 'Recording...' }}</span>
      </div>
      <div class="timer">{{ formatRecordingTime(recordingTime) }}</div>
    </div>
    <canvas ref="visualizer" class="audio-visualizer" width="600" height="100"></canvas>
  </div>

  <!-- 输入区域 -->
  <div class="input-area">
    <!-- 语音识别状态指示器 -->
    <div
      v-if="isRecognizing"
      class="recognition-status"
      :class="{ 'recognition-end': recognitionEnded }"
    >
      <div class="sound-wave">
        <span></span><span></span><span></span><span></span><span></span>
      </div>
      <span>{{
        recognitionEnded
          ? language === 'chinese'
            ? '语音识别完成'
            : 'Recognition complete'
          : language === 'chinese'
          ? '正在识别语音...'
          : 'Recognizing speech...'
      }}</span>
    </div>

    <a-textarea
      v-model:value="inputMessage"
      :placeholder="language === 'chinese' ? '请输入内容' : 'Please enter the content'"
      :auto-size="{ minRows: 2, maxRows: 15 }"
      @pressEnter="handleSend"
      class="message-input"
      :class="{ recognizing: isRecognizing }"
      :readonly="isRecognizing && !recognitionEnded"
    />
    <a-flex justify="space-between" align="center" gap="12px">
      <!-- 发送按钮区域，使用flex-grow使其占据大部分空间 -->
      <div class="input-buttons-container">
        <!-- 语音按钮 -->
        <div class="voice-button-wrapper">
          <a-button
            v-if="!isRecording"
            type="primary"
            @click="handleVideoSend"
            :disabled="!isStartInterview"
            class="voice-button"
            size="large"
          >
            <template #icon>
              <AudioOutlined />
            </template>
            <span>{{ language === 'chinese' ? '语音' : 'Voice' }}</span>
          </a-button>
          <a-button
            v-else
            type="primary"
            danger
            @click="stopRecording"
            class="voice-button recording"
            size="large"
          >
            <template #icon>
              <AudioOutlined />
            </template>
            <span>{{ language === 'chinese' ? '结束' : 'Stop' }}</span>
          </a-button>
        </div>

        <!-- 发送按钮 -->
        <a-button
          type="primary"
          @click="handleSend"
          :disabled="!inputMessage.trim() || !isStartInterview"
          :loading="isSending"
          class="send-button"
        >
          {{ language === 'chinese' ? '发送' : 'Send' }}
        </a-button>
      </div>
    </a-flex>
  </div>
</template>

<script setup lang="ts">
import { ref, nextTick, onMounted, watch, onBeforeUnmount } from 'vue'
import { AudioOutlined } from '@ant-design/icons-vue'
import ClientWebSocket from '@/utils/ClientWebSocket'
import { audioTecInterviewUsingGet } from '@/api/tecInterviewController'
import { audioHrInterviewUsingGet } from '@/api/hrInterviewController'
import { useCommonStore } from '@/stores/useCommonStore'
import { getLocalCache } from '@/utils/LocalStorageUtils'

// 静态资源路径
const userAvatar =
  'https://gw.alipayobjects.com/zos/antfincdn/LlvErxo8H9/photo-1503185912284-5271ff81b9a8.png'

/**
 * Props
 * 接收父组件传递的状态
 */
interface Props {
  isStartInterview: boolean
  language: string
  messages: Message[]
  interviewType: number
  interviewerAvatar: string
  interviewEnd: () => void
  onInterviewerStatus: (status: number) => void
  //添加面试对话消息
  addMessage: (message: Message) => void
  //开始说话
  startSpeaking: () => void
  //结束说话
  stopSpeaking: () => void
  //显示生成问题加载动画
  showGeneratingQuestion: () => void
  //隐藏生成问题加载动画
  hideGeneratingQuestion: () => void
}

//数据类型
interface Message {
  //身份
  sender: string
  //文本数据
  text: string
  //音频url
  audioUrl?: string // 注意这里拼写修正
}

//给props定义默认值
const props = withDefaults(defineProps<Props>(), {
  isStartInterview: false,
})

const commonStore = useCommonStore()

//面试是否结束
const interviewEnd = ref(false)
//用户输入的消息
const inputMessage = ref<string>('')
//AI当前回答的问题文本内容
const currentAiQuestion = ref<string>('')
//消息发送状态
const isSending = ref(false)
//ai回答状态
const isTyping = ref(false)
//通过ref获取DOM元素
const messageList = ref(null)
//聊天按钮状态
const isRecording = ref(false)
//语音识别状态
const isRecognizing = ref(false)
//语音识别是否结束
const recognitionEnded = ref(false)
//录音时长计时
const recordingTime = ref(0)
//录音计时器
let recordingTimer: number | null = null
//通过ref获取DOM元素
const visualizer = ref(null)
let audioContext: any = null
let analyser: any = null
let dataArray: any = null
let animationId: any = null
//ws连接
let websocket: ClientWebSocket | null

// 存储每秒录入的音频数据块
let audioBufferQueue: Blob[] = []
//音频数据块偏移量
let currentOffset = 0
//录音器
let mediaRecorder: MediaRecorder | null = null

//面试官性别
const interviewerGender = ref('')

// 格式化录音时间
const formatRecordingTime = (seconds: number) => {
  const mins = Math.floor(seconds / 60)
  const secs = seconds % 60
  return `${mins.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`
}

// 滚动到底部监听
watch(
  props.messages,
  () => {
    scrollToBottom()
  },
  { deep: true }
)

// 发送用户回答处理函数
const handleSend = async () => {
  if (!inputMessage.value.trim()) return
  if (isSending.value) return

  //显示面试官正在思考
  isTyping.value = true

  // 显示生成问题的加载动画
  props.showGeneratingQuestion()

  //改变面试官状态
  props.onInterviewerStatus(1)

  // 添加用户消息
  props.addMessage({
    sender: 'user',
    text: inputMessage.value.trim(),
  })
  //获取面试官回答音频 0-技术面 1-hr面
  if (props.interviewType === 0) {
    //技术面
    await audioTecInterviewUsingGet({
      userContent: inputMessage.value.trim(),
      gender: interviewerGender.value,
    })
  } else {
    //hr面
    await audioHrInterviewUsingGet({
      userContent: inputMessage.value.trim(),
      gender: interviewerGender.value,
    })
  }

  //清空用户输入
  inputMessage.value = ''
  // 滚动到底部
  scrollToBottom()
}

/**
 * 滚动到底部
 */
const scrollToBottom = () => {
  nextTick(() => {
    if (messageList.value) {
      messageList.value.scrollTop = messageList.value.scrollHeight
    }
  })
}

/**
 * 用户开始说话
 */
const handleVideoSend = () => {
  //开启检测人脸
  props.startSpeaking()
  //修改面试官状态
  props.onInterviewerStatus(3)

  if (isRecording.value) {
    // 停止录音
    stopRecording()
  } else {
    // 开始录音
    isRecording.value = true
    isRecognizing.value = true
    recognitionEnded.value = false
    console.log('开始录音')
    // 清空输入内容
    inputMessage.value = ''
    //录入用户音频
    startRecording()

    // 开始计时
    recordingTime.value = 0
    recordingTimer = window.setInterval(() => {
      recordingTime.value += 1
    }, 1000)
  }
}

/**
 * 开始面试，显示正在生成问题
 */
const startAiInterview = () => {
  isTyping.value = true
  // 显示生成问题的加载动画
  props.showGeneratingQuestion()
}

/**
 * 开始录音并且监听用户录音
 */
//定义 MediaRecorder 实例，用来控制录音的开始、停止。
// 请求麦克风权限，创建录音器，开启录音并每秒发送数据
async function startRecording() {
  try {
    // 请求麦克风权限
    const stream = await navigator.mediaDevices.getUserMedia({ audio: true })

    // 创建MediaRecorder实例（录音器）
    mediaRecorder = new MediaRecorder(stream, {
      mimeType: 'audio/webm',
      audioBitsPerSecond: 128000,
    })

    // 设置音频分析器用于可视化
    //获取web Audio核心实例
    audioContext = new window.AudioContext()
    //获取到分析器，用于分析用户音频
    analyser = audioContext.createAnalyser()
    //将麦克风和分析器连接起来
    const source = audioContext.createMediaStreamSource(stream)
    source.connect(analyser)
    analyser.fftSize = 256
    const bufferLength = analyser.frequencyBinCount
    dataArray = new Uint8Array(bufferLength)

    // 开始可视化
    nextTick(() => {
      if (visualizer.value) {
        initializeVisualizer()
        drawVisualizer()
      }
    })

    // 最大消息大小是 8KB
    const MAX_MESSAGE_SIZE = 8192

    // 录音器回调函数(每秒钟放入音频块到数组中)
    mediaRecorder.ondataavailable = (event: BlobEvent) => {
      // console.log(event.data.type) // 输出应为 "audio/webm"
      if (event.data.size > 0) {
        // 将每秒钟的音频块推入数组
        audioBufferQueue.push(event.data)
        // 启动分片发送
        sendAudioData()
      }
    }

    // 分片发送每秒钟的音频块数据
    function sendAudioData() {
      // 确保数组中有音频块
      if (audioBufferQueue.length === 0 || currentOffset >= audioBufferQueue.length) {
        return // 没有数据需要发送，退出
      }

      // 获取当前的音频块数据
      const audioData = audioBufferQueue[currentOffset]

      // 将每秒钟音频块写入到缓冲区中
      const reader = new FileReader()
      //回调函数
      reader.onload = () => {
        //获取到音频块的字节数据长度
        const arrayBuffer = reader.result as ArrayBuffer
        //偏移量
        let offset = 0

        // 分片发送音频块字节数据
        while (offset < arrayBuffer.byteLength) {
          // 计算要发送的数据块的大小
          const sliceSize = Math.min(MAX_MESSAGE_SIZE, arrayBuffer.byteLength - offset)
          //截取音频字节数据
          const slicedData = arrayBuffer.slice(offset, offset + sliceSize)
          // 通过 WebSocket 发送数据
          websocket?.sendMessage(slicedData)
          // 更新偏移量
          offset += sliceSize
        }

        // 更新当前音频数据块偏移量
        currentOffset += 1

        // 如果数据队列中仍有数据，继续发送
        if (currentOffset < audioBufferQueue.length) {
          sendAudioData()
        }
      }

      // 读取音频块数据写入缓冲区中，触发回调函数
      reader.readAsArrayBuffer(audioData)
    }

    websocket?.sendMessage('start')
    // 开始录制，设置时间片（例如每1秒触发一次回调函数）
    mediaRecorder.start(1000)
  } catch (error) {
    console.error('无法访问麦克风:', error)
    alert('无法访问麦克风，请确保已授予权限')
  }
}

/**
 * 初始化可视化器大小
 */
const initializeVisualizer = () => {
  if (visualizer.value) {
    const canvas = visualizer.value
    canvas.width = canvas.offsetWidth
    canvas.height = canvas.offsetHeight
  }
}

/**
 * 停止录音
 */
const stopRecording = () => {
  //记录说话时长
  commonStore.setGlobalTime(recordingTime.value)
  //调用父组件的方法，结束说话
  props.stopSpeaking()
  //切换面试官状态
  props.onInterviewerStatus(1)

  if (mediaRecorder && mediaRecorder.state !== 'inactive') {
    mediaRecorder.stop()
    mediaRecorder.stream.getTracks().forEach((track: any) => track.stop())
  }

  if (recordingTimer) {
    clearInterval(recordingTimer)
    recordingTimer = null
  }

  cancelAnimationFrame(animationId)
  isRecording.value = false

  websocket?.sendMessage('stop')

  if (audioContext) {
    audioContext.close()
    audioContext = null
  }

  //清理音频缓冲区和偏移量，防止下一次发送旧数据
  audioBufferQueue = []
  currentOffset = 0

  mediaRecorder = null
}

/**
 * 绘制可视化音频图
 */
function drawVisualizer() {
  if (!isRecording.value) return
  //处理数据
  animationId = requestAnimationFrame(drawVisualizer)

  if (!analyser || !visualizer.value) return

  analyser.getByteFrequencyData(dataArray)
  //绘制音频可视化图
  const canvas = visualizer.value
  const canvasContext = canvas.getContext('2d')
  const width = canvas.width
  const height = canvas.height

  // 清空画布
  canvasContext.clearRect(0, 0, width, height)

  // 绘制渐变背景
  const gradient = canvasContext.createLinearGradient(0, 0, width, 0)
  gradient.addColorStop(0, 'rgba(24, 144, 255, 0.1)')
  gradient.addColorStop(1, 'rgba(104, 195, 248, 0.1)')
  canvasContext.fillStyle = gradient
  canvasContext.fillRect(0, 0, width, height)

  // 设置条形图参数
  const barCount = 64 // 控制条形数量
  const barSpacing = 2
  const barWidth = width / barCount - barSpacing

  // 为了平滑的效果，我们采样dataArray
  const step = Math.floor(dataArray.length / barCount)

  // 绘制条形
  for (let i = 0; i < barCount; i++) {
    // 获取频率数据并归一化
    const index = i * step
    const value = dataArray[index] / 255.0

    // 计算条形高度 (使用非线性函数增强视觉效果)
    const barHeight = value * value * height * 0.8

    // 计算x位置
    const x = i * (barWidth + barSpacing)
    const y = height - barHeight

    // 颜色渐变
    const hue = 200 + value * 40 // 蓝色范围
    const saturation = 70 + value * 30 // 增加饱和度
    const lightness = 50 + value * 20 // 增加亮度

    canvasContext.fillStyle = `hsl(${hue}, ${saturation}%, ${lightness}%)`

    // 绘制圆角柱形
    const radius = Math.min(barWidth / 2, 4)
    roundedRect(canvasContext, x, y, barWidth, barHeight, radius)

    // 添加倒影效果
    const gradientReflection = canvasContext.createLinearGradient(0, height - barHeight, 0, height)
    gradientReflection.addColorStop(0, `hsla(${hue}, ${saturation}%, ${lightness}%, 0.5)`)
    gradientReflection.addColorStop(1, `hsla(${hue}, ${saturation}%, ${lightness}%, 0)`)

    canvasContext.fillStyle = gradientReflection
    canvasContext.fillRect(x, height - barHeight, barWidth, barHeight * 0.3)
  }
}

/**
 * 绘制圆角矩形
 */
function roundedRect(ctx, x, y, width, height, radius) {
  ctx.beginPath()
  ctx.moveTo(x + radius, y)
  ctx.lineTo(x + width - radius, y)
  ctx.quadraticCurveTo(x + width, y, x + width, y + radius)
  ctx.lineTo(x + width, y + height)
  ctx.lineTo(x, y + height)
  ctx.lineTo(x, y + radius)
  ctx.quadraticCurveTo(x, y, x + radius, y)
  ctx.closePath()
  ctx.fill()
}

/**
 * 语音播放结束回调函数
 */
const audioEnd = () => {
  if (interviewEnd.value) {
    // 通知父组件面试结束
    props.interviewEnd()
  } else {
    // 修改面试官状态
    props.onInterviewerStatus(3)
    // 直接开启语音
    handleVideoSend()
  }
}

/**
 * 初始化webSocket连接
 */
const createWsConnection = () => {
  if (websocket) websocket.disconnect()
  websocket = new ClientWebSocket()
  //创建ws连接
  websocket.connect()
  //注册获取音频事件
  websocket.on('audio', (message: any) => {
    //临时存储AI回答的音频文本内容
    const audioText = currentAiQuestion.value
    //清空当前AI回答文本内容
    currentAiQuestion.value = ''
    props.addMessage({
      sender: 'ai',
      audioUrl: message.data,
      text: audioText,
    })
    //关闭加载效果
    isTyping.value = false

    // 隐藏生成问题的加载动画
    props.hideGeneratingQuestion()
  })
  //注册获取语音识别事件处理逻辑
  websocket.on('text', (message: any) => {
    // 修改语音识别状态
    recognitionEnded.value = true
    // 再过一段时间后关闭识别状态
    setTimeout(() => {
      isRecognizing.value = false
      recognitionEnded.value = false
    }, 3000)

    //获取返回数据
    const data = JSON.parse(message.data)
    if (data.code === 1) {
      // 正在识别中
      isRecognizing.value = true
      recognitionEnded.value = false
      inputMessage.value += data.text
    } else if (data.code === 2) {
      // 识别完成
      recognitionEnded.value = true
      // 3秒后自动关闭识别状态
      setTimeout(() => {
        isRecognizing.value = false
        recognitionEnded.value = false
      }, 2000)
    }
  })
  //注册获取AI音频的文本内容事件处理逻辑
  websocket.on('audio_text', (message: any) => {
    currentAiQuestion.value = message.data
  })
  //注册获取说话速率事件
  websocket.on('speed', (message: any) => {
    commonStore.setAudioSpeed(message.data)
    //触发这个事件的时候，语音识别已经结束
    //发送语音识别消息
    handleSend()
  })
  //注册获取音频情感事件
  websocket.on('audio_emotion', (message: any) => {
    const data = JSON.parse(message.data)
    commonStore.setAudioEmotion(data.emotion)
  })
  //注册面试结束事件处理逻辑
  websocket.on('finish', (message: any) => {
    interviewEnd.value = true
  })
  return websocket
}

// 组件挂载时设置canvas大小
onMounted(() => {
  // 获取面试官性别
  interviewerGender.value = getLocalCache('interviewerGender') as any
  console.log('面试官性别:' + interviewerGender.value)

  if (visualizer.value) {
    initializeVisualizer()
    window.addEventListener('resize', initializeVisualizer)
  }
})

//组件销毁时断开websocket连接
onBeforeUnmount(() => {
  if (websocket) {
    websocket?.disconnect()
  }
  if (mediaRecorder) {
    mediaRecorder.stop()
    mediaRecorder.stream.getTracks().forEach((track: any) => track.stop())
  }
})

//暴露初始化连接函数
defineExpose({
  createWsConnection,
  startAiInterview,
})
</script>

<style scoped>
/* 全局样式 */
.full-height {
  height: 100vh;
}

/* 消息列表区域 */
.message-list {
  flex: 1;
  padding: 20px;
  overflow-y: auto;
  display: flex;
  flex-direction: column;
  gap: 20px;
  background: linear-gradient(135deg, #f5f7fa 0%, #eef2f8 100%);
  scrollbar-width: thin;
  scrollbar-color: rgba(0, 0, 0, 0.1) transparent;
}

.message-list::-webkit-scrollbar {
  width: 4px;
}

.message-list::-webkit-scrollbar-track {
  background: transparent;
}

.message-list::-webkit-scrollbar-thumb {
  background-color: rgba(0, 0, 0, 0.1);
  border-radius: 10px;
}

/* 消息气泡基础样式 */
.message-bubble {
  display: flex;
  gap: 16px;
  max-width: 80%;
  align-items: flex-start;
}

/* AI消息在左侧 */
.message-bubble.ai {
  align-self: flex-start;
}

/* 用户消息在右侧 */
.message-bubble.user {
  align-self: flex-end;
  flex-direction: row-reverse;
}

/* 头像容器 */
.avatar-container {
  position: relative;
  flex-shrink: 0;
  margin: 0 6px;
}

/* 头像发光效果 */
.avatar-glow {
  position: absolute;
  top: -5px;
  left: -5px;
  right: -5px;
  bottom: -5px;
  border-radius: 50%;
  z-index: -1;
  background: radial-gradient(
    circle,
    rgba(24, 144, 255, 0.15) 0%,
    rgba(65, 117, 232, 0.05) 60%,
    transparent 70%
  );
  opacity: 0;
  transition: opacity 0.3s ease;
}

.message-bubble:hover .avatar-glow {
  opacity: 1;
}

.message-bubble.ai .avatar-glow {
  animation: pulse-gentle 2.5s infinite ease-in-out;
}

/* 消息内容区域 */
.message-content {
  display: flex;
  flex-direction: column;
  background-color: #f5f7fa;
  padding: 12px 16px;
  border-radius: 12px;
  box-shadow: 0 2px 6px rgba(0, 0, 0, 0.05);
  position: relative;
}

/* 消息文本样式 */
.message-text {
  line-height: 1.6;
  font-size: 15px;
  white-space: pre-wrap;
}

/* AI消息气泡样式 */
.message-bubble.ai .message-content {
  background-color: #fff;
  border-bottom-left-radius: 4px;
  border-left: 3px solid rgba(24, 144, 255, 0.5);
}

/* 用户消息气泡样式 */
.message-bubble.user .message-content {
  background: linear-gradient(135deg, rgba(24, 144, 255, 0.1), rgba(65, 117, 232, 0.1));
  border-bottom-right-radius: 4px;
}

/* 输入区域样式 */
.input-area {
  padding: 16px 20px;
  border-top: 1px solid rgba(0, 0, 0, 0.05);
  background-color: white;
  position: relative;
  border-radius: 0 0 12px 12px;
}

.message-input {
  margin-bottom: 14px;
  border-radius: 10px;
  resize: none;
  transition: all 0.3s ease;
  border: 1px solid rgba(0, 0, 0, 0.1);
  box-shadow: 0 2px 6px rgba(0, 0, 0, 0.03);
}

.message-input:focus {
  border-color: rgba(24, 144, 255, 0.3);
  box-shadow: 0 2px 8px rgba(24, 144, 255, 0.1);
}

.message-input.recognizing {
  border-color: rgba(24, 144, 255, 0.6);
  background-color: rgba(24, 144, 255, 0.03);
}

/* 按钮容器 */
.input-buttons-container {
  display: flex;
  justify-content: flex-end;
  align-items: center;
  gap: 16px;
  width: 100%;
}

.voice-button-wrapper {
  display: flex;
  align-items: center;
}

.voice-button {
  color: white;
  height: 44px;
  width: auto;
  min-width: 200px;
  border-radius: 22px;
  font-size: 15px;
  padding: 0 16px;
  display: flex;
  justify-content: center;
  align-items: center;
  gap: 8px;
  transition: all 0.3s ease;
  background: linear-gradient(135deg, rgba(24, 144, 255, 0.9), rgba(65, 117, 232, 0.9));
  border: none;
  box-shadow: 0 2px 6px rgba(65, 117, 232, 0.3);
}

.voice-button:hover {
  transform: translateY(-2px);
  box-shadow: 0 4px 8px rgba(65, 117, 232, 0.4);
  background: linear-gradient(135deg, rgb(24, 144, 255), rgb(65, 117, 232));
}

.voice-button:active {
  transform: translateY(0);
}

.voice-button :deep(.anticon) {
  font-size: 18px;
}

.voice-button.recording {
  background: linear-gradient(135deg, #ff7c7c, #ff4747);
  box-shadow: 0 2px 6px rgba(255, 76, 76, 0.3);
  animation: pulse-recording 1.5s infinite;
}

.send-button {
  height: 40px;
  border-radius: 8px;
  font-size: 14px;
  transition: all 0.3s ease;
  background: linear-gradient(135deg, rgba(24, 144, 255, 0.9), rgba(65, 117, 232, 0.9));
  color: white;
  border: none;
  box-shadow: 0 2px 6px rgba(65, 117, 232, 0.2);
  padding: 0 24px;
  min-width: 88px;
}

.send-button:hover {
  background: linear-gradient(135deg, rgb(24, 144, 255), rgb(65, 117, 232));
  box-shadow: 0 4px 8px rgba(65, 117, 232, 0.3);
  transform: translateY(-2px);
}

/* 语音识别状态 */
.recognition-status {
  display: flex;
  align-items: center;
  padding: 8px 12px;
  margin-bottom: 12px;
  background-color: rgba(24, 144, 255, 0.05);
  border-radius: 8px;
  color: rgba(24, 144, 255, 0.9);
  font-size: 14px;
  transition: all 0.3s ease;
  border: 1px solid rgba(24, 144, 255, 0.1);
}

.recognition-status.recognition-end {
  background-color: rgba(82, 196, 26, 0.15);
  color: rgb(82, 196, 26);
  border: 1px solid rgba(82, 196, 26, 0.2);
  position: relative;
  padding-left: 32px;
}

.recognition-status.recognition-end::before {
  content: '✓';
  position: absolute;
  left: 12px;
  font-size: 16px;
  font-weight: bold;
}

.sound-wave {
  display: flex;
  align-items: center;
  margin-right: 10px;
}

.sound-wave span {
  display: inline-block;
  width: 3px;
  height: 12px;
  margin-right: 3px;
  background: rgba(24, 144, 255, 0.7);
  border-radius: 1px;
  animation: sound-wave 1.2s infinite ease-in-out both;
}

.sound-wave span:nth-child(1) {
  animation-delay: -0.4s;
  height: 10px;
}

.sound-wave span:nth-child(2) {
  animation-delay: -0.3s;
  height: 16px;
}

.sound-wave span:nth-child(3) {
  animation-delay: -0.2s;
  height: 12px;
}

.sound-wave span:nth-child(4) {
  animation-delay: -0.1s;
  height: 14px;
}

.sound-wave span:nth-child(5) {
  animation-delay: 0s;
  height: 8px;
}

.recognition-end .sound-wave span {
  animation: none;
  background: rgba(82, 196, 26, 0.9);
  height: 12px;
}

/* 打字动画效果 */
.typing {
  display: flex;
  align-items: center;
  height: 24px;
}

.typing span {
  display: inline-block;
  width: 8px;
  height: 8px;
  margin-right: 5px;
  background: rgba(24, 144, 255, 0.7);
  border-radius: 50%;
  animation: typing 1.4s infinite ease-in-out both;
}

.typing span:nth-child(1) {
  animation-delay: -0.32s;
}

.typing span:nth-child(2) {
  animation-delay: -0.16s;
}

/* 音频可视化区域 */
.audio-visualizer-container {
  padding: 16px;
  background: white;
  border-radius: 10px;
  margin: 0 24px 16px;
  box-shadow: 0 2px 10px rgba(0, 0, 0, 0.05);
  border: 1px solid rgba(24, 144, 255, 0.1);
}

.visualizer-header {
  display: flex;
  justify-content: space-between;
  align-items: center;
  margin-bottom: 12px;
}

.recording-indicator {
  display: flex;
  align-items: center;
  font-size: 14px;
  color: #333;
}

.recording-pulse {
  display: inline-block;
  width: 10px;
  height: 10px;
  border-radius: 50%;
  background-color: #f5222d;
  margin-right: 8px;
  animation: pulse 1.5s infinite;
}

.timer {
  font-family: 'Monaco', monospace;
  font-size: 16px;
  font-weight: 500;
  color: #333;
}

.audio-visualizer {
  width: 100%;
  height: 100px;
  background: rgba(245, 247, 250, 0.3);
  border-radius: 8px;
}

/* 动画 */
@keyframes typing {
  0%,
  80%,
  100% {
    transform: scale(0.6);
    opacity: 0.6;
  }
  40% {
    transform: scale(1);
    opacity: 1;
  }
}

@keyframes sound-wave {
  0%,
  100% {
    transform: scaleY(0.6);
  }
  50% {
    transform: scaleY(1);
  }
}

@keyframes pulse {
  0% {
    opacity: 1;
    transform: scale(1);
  }
  50% {
    opacity: 0.5;
    transform: scale(1.1);
  }
  100% {
    opacity: 1;
    transform: scale(1);
  }
}

@keyframes pulse-gentle {
  0% {
    opacity: 0.5;
  }
  50% {
    opacity: 0.8;
  }
  100% {
    opacity: 0.5;
  }
}

@keyframes pulse-recording {
  0% {
    box-shadow: 0 0 0 0 rgba(255, 76, 76, 0.4);
  }
  70% {
    box-shadow: 0 0 0 10px rgba(255, 76, 76, 0);
  }
  100% {
    box-shadow: 0 0 0 0 rgba(255, 76, 76, 0);
  }
}
</style>
