<template>
  <div class="layout-chat">
    <ElDrawer v-model="isDrawerVisible" :size="isMobile ? '100%' : '480px'" :with-header="false">
      <div class="header">
        <div class="header-left">
          <span class="name">Art Bot</span>
          <div class="status">
            <div class="dot" :class="{ online: isOnline, offline: !isOnline }"></div>
            <span class="status-text">{{ isOnline ? '在线' : '离线' }}</span>
          </div>
        </div>
        <div class="header-right">
          <ElIcon class="icon-close" :size="20" @click="closeChat">
            <Close />
          </ElIcon>
        </div>
      </div>
      <div class="chat-container">
        <!-- 聊天消息区域 -->
        <div class="chat-messages" ref="messageContainer">
          <template v-for="(message, index) in messages" :key="index">
            <div :class="['message-item', message.isMe ? 'message-right' : 'message-left']">
              <ElAvatar :size="32" :src="message.avatar" class="message-avatar" />
              <div class="message-content">
                <div class="message-info">
                  <span class="sender-name">{{ message.sender }}</span>
                  <span class="message-time">{{ message.time }}</span>
                </div>
                <div class="message-text">{{ message.content }}</div>
              </div>
            </div>
          </template>
        </div>

        <!-- 聊天输入区域 -->
        <div class="chat-input">
          <ElInput
            v-model="messageText"
            type="textarea"
            :rows="3"
            placeholder="输入消息"
            resize="none"
            @keyup.enter.prevent="sendMessage"
          >
            <template #append>
              <div class="input-actions">
                <ElButton :icon="Paperclip" circle plain />
                <ElButton :icon="Picture" circle plain />
                <ElButton
                  :icon="Microphone"
                  :type="isRecording ? 'success' : 'default'"
                  circle
                  plain
                  @click="toggleSpeechRecognition"
                  :loading="isConnecting"
                />
                <ElButton type="primary" @click="sendMessage" v-ripple>发送</ElButton>
              </div>
            </template>
          </ElInput>
          <div class="chat-input-actions">
            <div class="left">
              <i class="iconfont-sys"></i>
              <i class="iconfont-sys"></i>
              <i
                class="iconfont-sys"
                :class="{ 'voice-recording': isRecording }"
                @click="toggleSpeechRecognition"
                >🎤</i
              >
            </div>
            <ElButton type="primary" @click="sendMessage" v-ripple>发送</ElButton>
          </div>
        </div>
      </div>
    </ElDrawer>

    <!-- 录音弹出框 -->
    <ElDialog
      v-model="isRecordingDialogVisible"
      title="语音识别"
      width="400px"
      :close-on-click-modal="false"
      :close-on-press-escape="false"
      :show-close="false"
      center
    >
      <div class="recording-dialog">
        <!-- 实时音浪显示 -->
        <div class="audio-wave">
          <div class="wave-container">
            <div
              v-for="(bar, index) in audioBars"
              :key="index"
              class="wave-bar"
              :style="{ height: bar.height + 'px' }"
            ></div>
          </div>
        </div>

        <!-- 识别结果显示 -->
        <div class="recognition-result">
          <div class="result-text">{{ dialogRecognitionText }}</div>
          <div class="recording-status">
            <ElIcon class="recording-icon" :size="16">
              <Microphone />
            </ElIcon>
            <span class="status-text">{{ isRecording ? '正在录音...' : '录音已停止' }}</span>
          </div>
        </div>

        <!-- 操作按钮 -->
        <div class="dialog-actions">
          <ElButton type="danger" @click="stopRecordingAndCloseDialog" :loading="isConnecting">
            停止录音
          </ElButton>
          <ElButton type="primary" @click="sendDialogMessage" :disabled="!dialogRecognitionText">
            发送
          </ElButton>
        </div>
      </div>
    </ElDialog>
  </div>
</template>

<script setup lang="ts">
  // 资源导入
  import aiAvatar from '@/assets/img/avatar/avatar10.webp'
  import meAvatar from '@/assets/img/avatar/avatar5.webp'

  // 工具和消息总线
  import { mittBus } from '@/utils/sys'

  // Element Plus 组件
  import { ElMessage } from 'element-plus'

  // Element Plus 图标
  import { Close, Microphone, Paperclip, Picture } from '@element-plus/icons-vue'

  defineOptions({ name: 'ArtChatWindow' })

  // 类型定义
  interface ChatMessage {
    id: number
    sender: string
    content: string
    time: string
    isMe: boolean
    avatar: string
  }

  // 常量定义
  const MOBILE_BREAKPOINT = 500
  const SCROLL_DELAY = 100
  const BOT_NAME = 'Art Bot'
  const USER_NAME = 'Ricky'

  // 响应式布局
  const { width } = useWindowSize()
  const isMobile = computed(() => width.value < MOBILE_BREAKPOINT)

  // 组件状态
  const isDrawerVisible = ref(false)
  const isOnline = ref(true)

  // 消息相关状态
  const messageText = ref('')
  const messageId = ref(10)
  const messageContainer = ref<HTMLElement | null>(null)

  // 语音输入相关状态
  const isRecording = ref(false)
  const isConnecting = ref(false)
  const tempMessageText = ref('')
  let speechRecognitionService: any = null

  // 录音弹出框相关状态
  const isRecordingDialogVisible = ref(false)
  const dialogRecognitionText = ref('')
  const audioBars = ref(Array.from({ length: 20 }, () => ({ height: 2 })))
  let audioAnimationInterval: any = null
  let keywordDetectionInterval: any = null

  // WebSocket连接类 - 完全参考vue实时语音识别.html的实现
  class WebSocketConnectMethod {
    private speechSokt: WebSocket | null = null
    private msgHandle: any
    private stateHandle: any
    private useItn: string = 'true'
    private asrMode: string = 'online'
    private hotwords: string = '考勤排班'
    private config: any = null

    constructor(config: any) {
      this.msgHandle = config.msgHandle
      this.stateHandle = config.stateHandle
    }

    wsStart(serverUrl: string): number {
      if (!serverUrl.match(/wss:\S*|ws:\S*/)) {
        throw new Error('请检查WebSocket地址正确性')
      }

      if ('WebSocket' in window) {
        this.speechSokt = new WebSocket(serverUrl)
        this.speechSokt.onopen = () => this.onOpen()
        this.speechSokt.onclose = () => this.onClose()
        this.speechSokt.onmessage = (e) => this.onMessage(e)
        this.speechSokt.onerror = () => this.onError()
        return 1
      } else {
        throw new Error('当前浏览器不支持 WebSocket')
      }
    }

    wsStop() {
      if (this.speechSokt) {
        this.speechSokt.close()
        this.speechSokt = null
      }
    }

    wsSend(data: any) {
      if (this.speechSokt && this.speechSokt.readyState === 1) {
        this.speechSokt.send(data)
      }
    }

    onOpen() {
      // 发送配置信息 - 使用存储的配置对象
      let request: any

      if (this.config) {
        // 使用JSON格式配置参数
        request = {
          chunk_size: this.config.chunk_size || [5, 10, 5],
          wav_name: this.config.wav_name || 'h5',
          is_speaking: this.config.is_speaking !== undefined ? this.config.is_speaking : true,
          wav_format: this.config.wav_format || 'pcm',
          chunk_interval: this.config.chunk_interval || 10,
          itn: this.config.itn !== undefined ? this.config.itn : true,
          mode: this.config.mode || '2pass',
          hotwords: this.config.hotwords || null
        }
      } else {
        // 兼容旧版本配置
        request = {
          chunk_size: [5, 10, 5],
          wav_name: 'h5',
          is_speaking: true,
          chunk_interval: 10,
          mode: this.asrMode,
          itn: this.useItn === 'true'
        }

        if (this.hotwords) {
          request.hotwords = this.hotwords
        }
      }

      console.log('发送配置参数:', request)
      this.speechSokt?.send(JSON.stringify(request))
      this.stateHandle(0) // 连接成功
    }

    onClose() {
      this.stateHandle(1) // 连接关闭
    }

    onMessage(e: any) {
      this.msgHandle(e) // 接收消息
    }

    onError() {
      this.stateHandle(2) // 连接错误
    }

    setConfig(config: any) {
      if (typeof config === 'object') {
        // JSON格式配置参数
        this.useItn = config.itn ? 'true' : 'false'
        this.asrMode = config.mode || '2pass'
        this.hotwords = config.hotwords || null

        // 存储完整的配置对象用于连接时发送
        this.config = config
      } else {
        // 兼容旧版本字符串参数
        this.useItn = arguments[0]
        this.asrMode = arguments[1]
        this.hotwords = arguments[2]
      }
    }
  }

  // SpeechRecognitionService 类定义 - 完全参考vue实时语音识别.html的实现
  class SpeechRecognitionService {
    private wsconnecter: WebSocketConnectMethod | null = null
    private mediaStream: MediaStream | null = null
    private audioContext: AudioContext | null = null
    private audioSource: MediaStreamAudioSourceNode | null = null
    private scriptProcessor: ScriptProcessorNode | null = null
    private isRecording = false
    private callbacks: any = null
    // 音频缓冲区相关属性 - 按照HTML文件的实现方式
    private audioBuffer: Int16Array[] = []
    private sampleBuf = new Int16Array() // 全局缓冲区，类似HTML文件的实现
    private CHUNK_SIZE = 960 // 固定块大小，与HTML文件保持一致
    private targetSampleRate = 16000 // 目标采样率：16KHz

    // 注册回调
    registerCallbacks(callbacks: any) {
      this.callbacks = callbacks
    }

    // 初始化服务
    async init() {
      try {
        console.log('初始化语音识别服务...')

        // 创建WebSocket连接器实例 - 使用HTML文件的实现
        this.wsconnecter = new WebSocketConnectMethod({
          msgHandle: this.handleMessage.bind(this),
          stateHandle: this.handleConnectionState.bind(this)
        })

        // 设置配置参数 - 按照规范使用JSON格式
        this.wsconnecter.setConfig({
          chunk_size: [5, 10, 5],
          wav_name: 'h5',
          is_speaking: true,
          wav_format: 'pcm',
          chunk_interval: 10,
          itn: true,
          mode: '2pass',
          hotwords: '{"阿里巴巴":20,"hello world":40}'
        })

        // 启动WebSocket连接
        this.wsconnecter.wsStart('ws://120.46.221.22:10095/')
      } catch (error: any) {
        console.error('语音识别服务初始化失败:', error)
        if (this.callbacks?.onError) {
          this.callbacks.onError('初始化失败: ' + error.message)
        }
        throw error
      }
    }

    // 处理连接状态
    private handleConnectionState(state: number) {
      switch (state) {
        case 0: // 连接成功
          if (this.callbacks?.onStatus) {
            this.callbacks.onStatus('connected')
          }
          break
        case 1: // 连接关闭
          if (this.callbacks?.onStatus) {
            this.callbacks.onStatus('disconnected')
          }
          break
        case 2: // 连接错误
          if (this.callbacks?.onError) {
            this.callbacks.onError('WebSocket连接错误')
          }
          break
      }
    }

    // 处理接收到的消息
    private handleMessage(e: any) {
      try {
        console.log('收到WebSocket消息:', e.data)
        const message = JSON.parse(e.data)

        if (message.text) {
          // 处理识别结果 - 使用mode字段判断是否是修正结果
          const isOfflineMode = message.mode === '2pass-offline'
          console.log(
            '语音识别结果:',
            message.text,
            '模式:',
            message.mode,
            '是否修正结果:',
            isOfflineMode
          )

          if (this.callbacks?.onResult) {
            this.callbacks.onResult(message.text, isOfflineMode)
          }
        } else if (message.timestamp) {
          // 处理时间戳信息
          console.log('时间戳信息:', message)
        } else {
          console.log('其他类型的消息:', message)
        }
      } catch (error: any) {
        console.error('解析消息失败:', error)
      }
    }

    // 开始录音 - 使用HTML文件的实现方式
    async start() {
      if (this.isRecording) {
        console.log('录音已在进行中')
        return
      }

      try {
        this.isRecording = true
        console.log('开始语音识别录音...')
        if (this.callbacks?.onStatus) {
          this.callbacks.onStatus('connecting')
        }

        // 尝试使用系统音频录制（参考HTML文件的实现）
        await this.startSystemAudioRecording()
        console.log('音频录制启动成功')
      } catch (error: any) {
        console.error('开始录音失败:', error)
        if (this.callbacks?.onError) {
          this.callbacks.onError('开始录音失败: ' + error.message)
        }
        this.isRecording = false
        throw error
      }
    }

    // 开始麦克风音频录制 - 修改为直接使用麦克风录制
    private async startSystemAudioRecording() {
      try {
        // 直接使用麦克风录制，不再尝试系统音频录制
        this.mediaStream = await navigator.mediaDevices.getUserMedia({
          audio: {
            echoCancellation: true,
            noiseSuppression: true,
            autoGainControl: true
          }
        })

        console.log('麦克风媒体流获取成功，音频轨道数量:', this.mediaStream.getAudioTracks().length)

        // 检查音频轨道是否活跃
        const audioTracks = this.mediaStream.getAudioTracks()
        const activeAudioTrack = audioTracks[0]
        console.log('音频轨道状态:', activeAudioTrack.readyState)
        console.log('音频轨道标签:', activeAudioTrack.label)

        console.log('麦克风音频流准备完成，开始音频处理')

        // 初始化音频处理
        this.setupAudioProcessing()
      } catch (error: any) {
        console.error('麦克风录制失败:', error)
        // 如果麦克风录制失败，尝试简化配置的回退
        await this.tryFallbackToMicrophone()
      }
    }

    // 回退到麦克风录制 - 完全按照HTML文件实现
    private async tryFallbackToMicrophone() {
      try {
        this.mediaStream = await navigator.mediaDevices.getUserMedia({
          audio: true // 简化配置以提高兼容性
        })

        // 初始化音频处理
        this.setupAudioProcessing()
      } catch (error: any) {
        console.error('麦克风录制失败:', error)
        throw new Error('所有录制方式都失败了: ' + error.message)
      }
    }

    // 设置音频处理 - 使用AudioContext处理音频数据并发送到WebSocket
    private setupAudioProcessing() {
      try {
        // 确保音频上下文已初始化
        if (!this.initAudioContext()) {
          throw new Error('无法初始化音频上下文')
        }

        // 恢复暂停的音频上下文（如果需要）
        if (this.audioContext && this.audioContext.state === 'suspended') {
          this.audioContext.resume()
        }

        // 创建音频源节点
        this.audioSource = this.audioContext!.createMediaStreamSource(this.mediaStream!)

        // 创建脚本处理器节点 - 用于获取音频数据
        const bufferSize = 4096
        this.scriptProcessor = this.audioContext!.createScriptProcessor(bufferSize, 1, 1)

        // 连接节点
        this.audioSource.connect(this.scriptProcessor)
        this.scriptProcessor.connect(this.audioContext!.destination)

        // 音频处理事件 - 按照HTML文件的实现方式
        this.scriptProcessor.onaudioprocess = (event) => {
          if (!this.isRecording) return

          const inputBuffer = event.inputBuffer
          const channelData = inputBuffer.getChannelData(0)
          const int16Data = new Int16Array(channelData.length)

          // 将Float32转换为Int16
          for (let i = 0; i < channelData.length; i++) {
            const s = Math.max(-1, Math.min(1, channelData[i]))
            int16Data[i] = s < 0 ? s * 0x8000 : s * 0x7fff
          }

          // 重采样到16KHz
          const resampledData = this.resampleAudioData(
            int16Data,
            this.audioContext!.sampleRate,
            this.targetSampleRate
          )

          // 按照HTML文件的实现方式：直接使用buffer数据
          this.sampleBuf = Int16Array.from([...this.sampleBuf, ...resampledData])

          // 当缓冲区达到指定大小时发送数据
          while (this.sampleBuf.length >= this.CHUNK_SIZE) {
            const sendBuf = this.sampleBuf.slice(0, this.CHUNK_SIZE)
            this.sampleBuf = this.sampleBuf.slice(this.CHUNK_SIZE, this.sampleBuf.length)

            // 将Int16Array转换为ArrayBuffer（bytes格式）
            const arrayBuffer = sendBuf.buffer

            // 发送音频数据（bytes格式）
            if (this.wsconnecter) {
              this.wsconnecter.wsSend(arrayBuffer)
            }
          }

          // 调试日志
          channelData.some((value) => Math.abs(value) > 0.001)
        }

        if (this.callbacks?.onStatus) {
          this.callbacks.onStatus('recording')
        }
      } catch (error: any) {
        console.error('音频处理设置失败:', error)
        throw error
      }
    }

    // 音频重采样方法
    private resampleAudioData(
      inputData: Int16Array,
      sourceSampleRate: number,
      targetSampleRate: number
    ): Int16Array {
      if (sourceSampleRate === targetSampleRate) {
        return inputData
      }

      const ratio = sourceSampleRate / targetSampleRate
      const newLength = Math.round(inputData.length / ratio)
      const result = new Int16Array(newLength)

      for (let i = 0; i < newLength; i++) {
        const index = i * ratio
        const lowerIndex = Math.floor(index)
        const upperIndex = Math.min(Math.ceil(index), inputData.length - 1)
        const fraction = index - lowerIndex

        if (lowerIndex === upperIndex) {
          result[i] = inputData[lowerIndex]
        } else {
          result[i] = Math.round(
            inputData[lowerIndex] * (1 - fraction) + inputData[upperIndex] * fraction
          )
        }
      }

      return result
    }

    // 初始化音频上下文
    private initAudioContext(): boolean {
      try {
        // 优先使用标准AudioContext，然后回退到webkitAudioContext
        const AudioContextClass = window.AudioContext || (window as any).webkitAudioContext
        if (!AudioContextClass) {
          throw new Error('浏览器不支持Web Audio API')
        }

        // 创建新的音频上下文
        this.audioContext = new AudioContextClass()
        console.log('音频上下文初始化成功，采样率:', this.audioContext.sampleRate)

        // 目标采样率：16KHz（与服务端要求一致）
        this.targetSampleRate = 16000
        console.log('目标采样率:', this.targetSampleRate)

        return true
      } catch (error: any) {
        console.error('初始化音频上下文失败:', error)
        return false
      }
    }

    // 发送缓冲的音频数据 - 按照HTML文件的实现方式
    private sendBufferedAudioData() {
      if (!this.wsconnecter || this.audioBuffer.length === 0) {
        return
      }

      // 合并所有缓冲的音频数据
      const totalLength = this.audioBuffer.reduce((sum, arr) => sum + arr.length, 0)
      const mergedData = new Int16Array(totalLength)

      let offset = 0
      for (const buffer of this.audioBuffer) {
        mergedData.set(buffer, offset)
        offset += buffer.length
      }

      // 按照HTML文件的实现方式：使用固定CHUNK_SIZE=960发送数据
      const CHUNK_SIZE = 960
      let sampleBuf = mergedData

      // 当缓冲区达到指定大小时发送数据
      while (sampleBuf.length >= CHUNK_SIZE) {
        const sendBuf = sampleBuf.slice(0, CHUNK_SIZE)
        sampleBuf = sampleBuf.slice(CHUNK_SIZE, sampleBuf.length)

        // 将Int16Array转换为ArrayBuffer（bytes格式）
        const arrayBuffer = sendBuf.buffer

        this.wsconnecter.wsSend(arrayBuffer)
      }

      // 清空缓冲区
      this.clearAudioBuffer()
    }

    // 清空音频缓冲区
    private clearAudioBuffer() {
      this.audioBuffer = []
      this.sampleBuf = new Int16Array()
    }

    // 停止录音 - 按照HTML文件的实现方式
    stop() {
      if (!this.isRecording) {
        return
      }

      this.isRecording = false

      // 发送剩余音频数据（按照HTML文件的实现）
      if (this.sampleBuf.length > 0 && this.wsconnecter) {
        const arrayBuffer = this.sampleBuf.buffer
        console.log('发送剩余音频数据，长度:' + this.sampleBuf.length)
        this.wsconnecter.wsSend(arrayBuffer)
        this.sampleBuf = new Int16Array()
      }

      // 发送结束标志（按照规范）
      if (this.wsconnecter) {
        const endSignal = { is_speaking: false }
        console.log('发送结束标志:', endSignal)
        this.wsconnecter.wsSend(JSON.stringify(endSignal))
      }

      // 清理资源
      this.cleanupResources()

      if (this.callbacks?.onStatus) {
        this.callbacks.onStatus('stopped')
      }
    }

    // 清理所有媒体资源
    private cleanupResources() {
      // 断开音频处理节点连接
      if (this.scriptProcessor) {
        try {
          this.scriptProcessor.disconnect()
          this.scriptProcessor = null
        } catch (e: any) {
          console.warn('断开脚本处理器连接时出错:', e.message)
        }
      }

      if (this.audioSource) {
        try {
          this.audioSource.disconnect()
          this.audioSource = null
        } catch (e: any) {
          console.warn('断开音频源连接时出错:', e.message)
        }
      }

      // 关闭音频上下文
      if (this.audioContext && this.audioContext.state !== 'closed') {
        try {
          this.audioContext.close()
          this.audioContext = null
        } catch (e: any) {
          console.warn('关闭音频上下文时出错:', e.message)
        }
      }

      // 停止所有媒体轨道
      if (this.mediaStream) {
        try {
          this.mediaStream.getTracks().forEach((track) => {
            try {
              track.stop()
            } catch (e: any) {
              console.warn('停止媒体轨道时出错:', e.message)
            }
          })
          this.mediaStream = null
        } catch (e: any) {
          console.warn('清理媒体流时出错:', e.message)
        }
      }

      // 关闭WebSocket连接
      if (this.wsconnecter) {
        this.wsconnecter.wsStop()
        this.wsconnecter = null
      }
    }
  }

  // 初始化聊天消息数据
  const initializeMessages = (): ChatMessage[] => [
    {
      id: 1,
      sender: BOT_NAME,
      content: '你好！我是你的AI助手，有什么我可以帮你的吗？',
      time: '10:00',
      isMe: false,
      avatar: aiAvatar
    },
    {
      id: 2,
      sender: USER_NAME,
      content: '我想了解一下系统的使用方法。',
      time: '10:01',
      isMe: true,
      avatar: meAvatar
    }
  ]

  const messages = ref<ChatMessage[]>(initializeMessages())

  // 工具函数
  const formatCurrentTime = (): string => {
    return new Date().toLocaleTimeString([], {
      hour: '2-digit',
      minute: '2-digit'
    })
  }

  const scrollToBottom = (): void => {
    nextTick(() => {
      setTimeout(() => {
        if (messageContainer.value) {
          messageContainer.value.scrollTop = messageContainer.value.scrollHeight
        }
      }, SCROLL_DELAY)
    })
  }

  // 缓存修正结果
  let cachedOfflineResult = ''

  // 注册语音识别回调
  const registerSpeechRecognitionCallbacks = () => {
    if (speechRecognitionService) {
      speechRecognitionService.registerCallbacks({
        onResult: (text: string, isOfflineMode: boolean) => {
          console.log('语音识别结果:', text, '是否修正模式:', isOfflineMode)

          // 更新临时消息文本
          tempMessageText.value = text

          if (isOfflineMode) {
            // 2pass-offline：修正结果
            console.log('修正识别结果:', text)

            // 缓存修正结果
            cachedOfflineResult += text
            messageText.value = cachedOfflineResult
            // 更新弹出框显示
            dialogRecognitionText.value = cachedOfflineResult
          } else {
            // 2pass-online：实时结果
            console.log('实时识别结果:', text)
            messageText.value += text
            // 更新弹出框显示
            dialogRecognitionText.value += text
          }
        },
        onStatus: (status: string) => {
          console.log('语音识别状态:', status)
          switch (status) {
            case 'connecting':
              isConnecting.value = true
              break
            case 'connected':
              isConnecting.value = false
              ElMessage.success('语音识别连接成功')
              break
            case 'disconnected':
            case 'stopped':
              isConnecting.value = false
              isRecording.value = false
              ElMessage.info('语音识别已停止')
              break
            case 'error':
              isConnecting.value = false
              isRecording.value = false
              break
          }
        },
        onError: (error: string) => {
          console.error('语音识别错误:', error)

          // 提供更友好的错误提示
          let userFriendlyError = error
          if (error.includes('Requested device not found')) {
            userFriendlyError = '未找到麦克风设备，请检查设备连接并授权麦克风权限'
          } else if (error.includes('Permission denied')) {
            userFriendlyError = '麦克风权限被拒绝，请在浏览器设置中允许麦克风访问'
          } else if (error.includes('NotSupportedError')) {
            userFriendlyError = '浏览器不支持语音识别功能'
          }

          ElMessage.error(userFriendlyError)
          isConnecting.value = false
          isRecording.value = false
          isRecordingDialogVisible.value = false
        }
      })
    }
  }

  // 开始录音并显示弹出框
  const startRecordingWithDialog = async () => {
    try {
      // 重置状态
      dialogRecognitionText.value = ''
      isRecordingDialogVisible.value = true

      // 创建语音识别服务实例
      speechRecognitionService = new SpeechRecognitionService()

      // 注册回调
      registerSpeechRecognitionCallbacks()

      // 初始化服务
      await speechRecognitionService.init()

      // 开始录音
      await speechRecognitionService.start()
      isRecording.value = true

      // 开始音波动画
      startAudioWaveAnimation()

      // 开始关键词检测
      startKeywordDetection()
    } catch (error: any) {
      console.error('语音识别启动失败:', error)
      isRecordingDialogVisible.value = false

      // 提供更友好的错误提示
      let userFriendlyError = error.message || '语音识别启动失败'

      if (error.message.includes('Requested device not found')) {
        userFriendlyError =
          '未找到音频设备，请检查：\\n1. 确保音频设备已正确连接\\n2. 在浏览器设置中允许音频访问\\n3. 检查设备管理器中的音频设备状态'
      } else if (error.message.includes('Permission denied')) {
        userFriendlyError =
          '音频权限被拒绝，请按以下步骤操作：\n1. 点击地址栏左侧的锁形图标\n2. 选择"网站设置"\n3. 将音频权限改为"允许"'
      } else if (error.message.includes('NotSupportedError')) {
        userFriendlyError = '浏览器不支持语音识别功能，建议使用Chrome、Edge或Firefox等现代浏览器'
      } else if (error.message.includes('WebSocket')) {
        userFriendlyError = '无法连接到语音识别服务器，请检查网络连接'
      }

      ElMessage.error(userFriendlyError)
      isRecording.value = false
      isConnecting.value = false
    }
  }

  // 停止录音并关闭弹出框
  const stopRecordingAndCloseDialog = async () => {
    if (speechRecognitionService) {
      await speechRecognitionService.stop()
    }

    // 清空缓存
    cachedOfflineResult = ''
    isRecording.value = false
    isRecordingDialogVisible.value = false

    // 停止动画
    stopAudioWaveAnimation()
    stopKeywordDetection()
  }

  // 发送对话框中的消息
  const sendDialogMessage = () => {
    if (dialogRecognitionText.value.trim()) {
      messageText.value = dialogRecognitionText.value
      sendMessage()
      stopRecordingAndCloseDialog()
    }
  }

  // 开始音波动画
  const startAudioWaveAnimation = () => {
    audioAnimationInterval = setInterval(() => {
      audioBars.value = audioBars.value.map(() => ({
        height: Math.random() * 40 + 10
      }))
    }, 100)
  }

  // 停止音波动画
  const stopAudioWaveAnimation = () => {
    if (audioAnimationInterval) {
      clearInterval(audioAnimationInterval)
      audioAnimationInterval = null
      // 重置音浪高度
      audioBars.value = audioBars.value.map(() => ({ height: 2 }))
    }
  }

  // 开始关键词检测
  const startKeywordDetection = () => {
    keywordDetectionInterval = setInterval(() => {
      const text = dialogRecognitionText.value
      const keywords = ['发送', '完毕', '说完了']

      for (const keyword of keywords) {
        if (text.toLowerCase().includes(keyword.toLowerCase())) {
          // 检测到关键词，只替换文本中最后出现的关键词
          const lastIndex = text.toLowerCase().lastIndexOf(keyword.toLowerCase())
          if (lastIndex !== -1) {
            messageText.value = text.substring(0, lastIndex).trim()
            sendMessage()
            stopRecordingAndCloseDialog()
            break
          }
        }
      }
    }, 500)
  }

  // 停止关键词检测
  const stopKeywordDetection = () => {
    if (keywordDetectionInterval) {
      clearInterval(keywordDetectionInterval)
      keywordDetectionInterval = null
    }
  }

  // 切换语音识别 - 使用HTML文件的实现方式
  const toggleSpeechRecognition = async () => {
    if (isRecording.value) {
      // 停止录音并关闭弹出框
      await stopRecordingAndCloseDialog()
    } else {
      // 开始录音并显示弹出框
      await startRecordingWithDialog()
    }
  }

  // 消息处理方法
  const sendMessage = (): void => {
    const text = messageText.value.trim()
    if (!text) return

    const newMessage: ChatMessage = {
      id: messageId.value++,
      sender: USER_NAME,
      content: text,
      time: formatCurrentTime(),
      isMe: true,
      avatar: meAvatar
    }

    messages.value.push(newMessage)
    messageText.value = ''
    tempMessageText.value = ''
    // 清空修正结果缓存
    cachedOfflineResult = ''
    scrollToBottom()
  }

  // 聊天窗口控制方法
  const openChat = (): void => {
    isDrawerVisible.value = true
    scrollToBottom()
  }

  const closeChat = (): void => {
    // 如果正在录音，先停止
    if (isRecording.value && speechRecognitionService) {
      speechRecognitionService.stop()
    }
    isDrawerVisible.value = false
  }

  // 生命周期
  onMounted(() => {
    scrollToBottom()
    mittBus.on('openChat', openChat)
  })

  onUnmounted(() => {
    mittBus.off('openChat', openChat)
    // 清理资源
    if (isRecording.value && speechRecognitionService) {
      speechRecognitionService.stop()
    }
  })
</script>

<style lang="scss">
  .layout-chat {
    .el-overlay {
      background-color: rgb(0 0 0 / 20%) !important;
    }
  }
</style>

<style lang="scss" scoped>
  @use './style';

  // 录音弹出框样式
  .recording-dialog {
    text-align: center;
    padding: 20px 0;

    .audio-wave {
      margin-bottom: 20px;

      .wave-container {
        display: flex;
        align-items: center;
        justify-content: center;
        gap: 2px;
        height: 60px;

        .wave-bar {
          width: 4px;
          background: linear-gradient(180deg, #409eff 0%, #67c23a 100%);
          border-radius: 2px;
          transition: height 0.1s ease;
          min-height: 2px;
        }
      }
    }

    .recognition-result {
      margin-bottom: 20px;

      .result-text {
        min-height: 80px;
        max-height: 120px;
        overflow-y: auto;
        padding: 15px;
        background-color: #f5f7fa;
        border-radius: 8px;
        border: 1px solid #e4e7ed;
        font-size: 14px;
        line-height: 1.5;
        text-align: left;
        word-wrap: break-word;
      }

      .recording-status {
        display: flex;
        align-items: center;
        justify-content: center;
        gap: 8px;
        margin-top: 10px;
        color: #909399;
        font-size: 12px;

        .recording-icon {
          color: #f56c6c;
          animation: pulse 1.5s infinite;
        }

        @keyframes pulse {
          0% {
            opacity: 1;
          }
          50% {
            opacity: 0.5;
          }
          100% {
            opacity: 1;
          }
        }
      }
    }

    .dialog-actions {
      display: flex;
      gap: 10px;
      justify-content: center;

      .el-button {
        min-width: 100px;
      }
    }
  }
</style>
