<script setup lang="ts">
import { useUserMedia, useDevicesList } from '@vueuse/core'
import { useControlWebsocket, useDeviceWebsocket } from '@/hooks'
import { voiceToText } from '@/api/user/audio'
import { SESSION } from '@/utils/storage'
import type { WebSocketStatus } from '@vueuse/core'
import type { WatchHandle } from 'vue'
import type { AIWorkerWsResponse, AIWorkerSendData } from '@/types/ws'
import aiLogo from '@/assets/ai-logo.png'
import userAvatar from '@/assets/user-avatar.png'
import aiAvatar from '@/assets/ai-avatar.png'

// ==================== Constants ====================
/** 会话倒计时时间（秒） */
const SESSION_TIMEOUT = 180

// ==================== Router ====================
const router = useRouter()

// ==================== Reactive Data ====================
/** 倒计时剩余时间 */
const countdownRemaining = ref(SESSION_TIMEOUT)

// ==================== WebSocket Related ====================
/** 设备WebSocket连接状态 */
const deviceWebSocketStatus = ref<WebSocketStatus>('CONNECTING')
/** 控制WebSocket连接状态 */
const controlWebSocketStatus = ref<WebSocketStatus>('CONNECTING')

// ==================== Audio Recording Related ====================
/** 是否正在录音 */
const isRecording = ref(false)
/** 录音音频片段数组 */
const audioChunks = ref<Blob[]>([])
/** 媒体录制器实例 */
const mediaRecorder = ref<MediaRecorder | null>(null)

// ==================== Conversation Related ====================
/** 对话项接口定义 */
interface ConversationItem {
  role: 'user' | 'assistant'
  content: string
}

/** 对话历史记录 */
const conversations = ref<ConversationItem[]>([])

// ==================== Processing State ====================
/** 当前是否正在处理语音 */
const isProcessing = ref(false)

// ==================== WebSocket Variables ====================\
/** 设备WebSocket关闭函数 */
let deviceWebSocketClose: (code?: number, reason?: string) => void
/** 设备WebSocket发送函数 */
let deviceWebSocketSend: (data: string | ArrayBuffer | Blob, useBuffer?: boolean) => boolean
/** WebSocket状态监听器 */
let webSocketStatusWatcher: WatchHandle

// ==================== Device & Media Setup ====================
/**
 * 获取设备列表和麦克风权限
 */
useDevicesList({
  requestPermissions: true,
  constraints: {
    audio: true,
    video: false,
  },
})

/**
 * 获取用户媒体流
 */
const { stream, start, stop } = useUserMedia({
  constraints: {
    audio: true,
    video: false,
  },
})

// ==================== Computed Properties ====================
/**
 * 计算当前浏览器是否支持录音功能
 * @returns {boolean} 是否支持录音
 */
const isRecordingSupported = computed(() => {
  return navigator.mediaDevices && MediaRecorder.isTypeSupported('audio/webm')
})

/**
 * 计算系统是否就绪（WebSocket连接是否正常）
 * @returns {boolean} 系统是否就绪
 */
const isSystemReady = computed(() => {
  return controlWebSocketStatus.value === 'OPEN' && deviceWebSocketStatus.value === 'OPEN'
})

// ==================== WebSocket Control ====================
/**
 * Step1: 启动AI_Worker设备控制
 */
const { send: controlWebSocketSend, status: controlStatus } = useControlWebsocket({
  onMessage: (response) => {
    if (response?.status === 'success') {
      if (response.message?.indexOf('stopped') !== -1) {
        return
      }
      if (!response.data?.ws_url) {
        ElMessage.error('未获取到 AI_Worker websocket 地址')
        return
      }
      SESSION.set('ai_worker_ws_url', response.data.ws_url)
      initializeAIWorkerDataConnection()
    }
  },
  onAlreadyConnected: () => {
    initializeAIWorkerDataConnection()
  },
})

const aiIsReady = ref(false)

let countdownTimer: number | null

/**
 * 启用AI_Worker数据连接
 */
const initializeAIWorkerDataConnection = () => {
  const url = SESSION.get('ai_worker_ws_url') || ''
  const { open, close, send, status } = useDeviceWebsocket(url, {
    deviceName: 'AI_Worker',
    onMessage: (aiResponse: AIWorkerWsResponse) => {
      if (aiResponse?.data?.content === 'LLM is ready to take your message.') {
        aiIsReady.value = true
        /** 倒计时定时器 */
        countdownTimer = setInterval(() => {
          if (countdownRemaining.value > 0) {
            countdownRemaining.value--
          } else if (countdownTimer) {
            clearInterval(countdownTimer)
            countdownTimer = null
            router.push('/finish')
          }
        }, 1000)
      }
      if (aiResponse.type === 'device_data' && aiResponse.data?.type === 'response') {
        // 添加AI回复到对话历史
        conversations.value.push({
          role: 'assistant',
          content: aiResponse.data.content,
        })

        isProcessing.value = false
      }
    },
    connectOnMount: false,
    disConnectOnDestroy: true,
  })

  deviceWebSocketClose = close
  deviceWebSocketSend = send
  webSocketStatusWatcher = watchEffect(() => {
    deviceWebSocketStatus.value = status.value
  })
  open()
}

/**
 * 启动AI_Worker设备
 */
const startAIWorkerDevice = () => {
  controlWebSocketSend(
    JSON.stringify({
      command: 'start_device',
      device_name: 'AI_Worker',
      token: SESSION.get('access_token') || '',
    }),
  )
}

/**
 * 停止AI_Worker设备
 */
const stopAIWorkerDevice = () => {
  controlWebSocketSend(
    JSON.stringify({
      command: 'stop_device',
      device_name: 'AI_Worker',
      token: SESSION.get('access_token') || '',
    }),
  )
}

/**
 * 监听控制WebSocket状态变化
 */
watchEffect(() => {
  controlWebSocketStatus.value = controlStatus.value
})

// ==================== Audio Recording Functions ====================
/**
 * Step2: 录音和处理流程
 * 开始录音
 */
const startRecording = async () => {
  if (!isRecordingSupported.value) {
    ElMessage.error('当前浏览器不支持录音功能')
    return
  }

  if (!isSystemReady.value) {
    ElMessage.error('系统未就绪，请稍候')
    return
  }

  if (isProcessing.value) {
    ElMessage.warning('系统正在处理中，请稍候')
    return
  }

  try {
    // 开始获取媒体流
    await start()

    if (!stream.value) {
      ElMessage.error('无法获取麦克风权限')
      return
    }

    // 清空之前的录音数据
    audioChunks.value = []

    // 创建MediaRecorder实例
    mediaRecorder.value = new MediaRecorder(stream.value, {
      mimeType: 'audio/webm',
    })

    // 监听数据可用事件
    mediaRecorder.value.ondataavailable = (event) => {
      if (event.data.size > 0) {
        audioChunks.value.push(event.data)
      }
    }

    // 监听停止录音事件
    mediaRecorder.value.onstop = () => {
      // 创建完整的音频Blob
      const audioBlob = new Blob(audioChunks.value, { type: 'audio/webm' })
      // 处理音频数据
      processAudioData(audioBlob)
    }

    // 开始录音
    mediaRecorder.value.start(100) // 每100ms收集一次数据
    isRecording.value = true
  } catch (error) {
    console.error('启动录音失败:', error)
    ElMessage.error('启动录音失败，请检查麦克风权限')
  }
}

/**
 * 停止录音
 */
const stopRecording = () => {
  if (mediaRecorder.value && isRecording.value) {
    mediaRecorder.value.stop()
    isRecording.value = false
    stop()
  }
}

/**
 * 处理音频数据 - 语音转文字并发送给AI
 * @param {Blob} audioBlob 录音的音频数据
 */
const processAudioData = async (audioBlob: Blob) => {
  try {
    isProcessing.value = true

    // 创建FormData
    const formData = new FormData()
    const audioFile = new File([audioBlob], 'recording.webm', { type: 'audio/webm' })
    formData.append('file', audioFile)

    // 调用语音转文本接口
    const response = await voiceToText(formData)

    if (response?.status === 'success' || response?.code === 200) {
      const text = response.payload?.text || ''

      if (!text.trim()) {
        ElMessage.warning('未识别到语音内容，请重新录音')
        isProcessing.value = false
        return
      }

      // 添加用户消息到对话历史
      conversations.value.push({
        role: 'user',
        content: text,
      })

      // 通过WebSocket发送文本到AI_Worker
      const sendData: AIWorkerSendData = {
        command: 'talk',
        message: text,
      }

      deviceWebSocketSend(JSON.stringify(sendData))
    } else {
      ElMessage.error(response?.message || '语音识别失败')
      isProcessing.value = false
    }
  } catch (error: any) {
    console.error('处理音频失败:', error)
    ElMessage.error(error?.message || '语音处理失败')
    isProcessing.value = false
  }
}

// ==================== Cleanup and Lifecycle ====================
/**
 * 清理资源 - 停止录音、关闭连接、清理定时器
 */
const cleanupResources = async () => {
  // 停止录音
  if (mediaRecorder.value && isRecording.value) {
    mediaRecorder.value.stop()
  }

  // 关闭设备WebSocket
  if (deviceWebSocketClose) {
    deviceWebSocketClose()
  }
  if (webSocketStatusWatcher) {
    webSocketStatusWatcher.stop()
  }

  // 停止AI_Worker设备
  stopAIWorkerDevice()

  // 清理定时器
  if (countdownTimer) {
    clearInterval(countdownTimer)
    countdownTimer = null
  }

  // 停止媒体流
  stop()
}

// ==================== Lifecycle Hooks ====================
/** 组件挂载时初始化会话 */
onMounted(() => {
  startAIWorkerDevice()
})

/** 组件卸载时清理资源 */
onBeforeUnmount(() => {
  cleanupResources()
})

/** 页面关闭前清理资源 */
window.addEventListener('beforeunload', () => {
  cleanupResources()
})
</script>

<template>
  <div class="h-full flex flex-col">
    <!-- 头部 -->
    <div class="flex items-center justify-between p-4 border-b border-gray-200">
      <span class="text-red-500 font-semibold text-sm">倒计时剩余：{{ countdownRemaining }}秒</span>
      <span class="flex items-center text-lg font-bold text-gray-800">
        <img :src="aiLogo" width="32" height="32" alt="AI Logo" class="mr-2" />
        AI智能问答
      </span>
    </div>

    <!-- 对话区域 -->
    <div class="flex-1 flex flex-col bg-gray-50">
      <!-- 状态提示区域 -->
      <div class="flex-1 flex min-w-0 justify-center py-8">
        <div v-if="conversations.length === 0" class="text-center">
          <div v-if="isSystemReady" class="space-y-4">
            <div class="text-gray-600 text-lg">系统连接成功，请按下发送语音问题</div>
          </div>
          <div v-else class="space-y-4">
            <div class="text-gray-600 text-lg">系统连接中 请稍后......</div>
          </div>
        </div>

        <!-- 对话历史 -->
        <div v-else class="w-full space-y-4 max-h-full overflow-y-auto">
          <div
            v-for="(conversation, index) in conversations"
            :key="index"
            class="flex items-start gap-3 px-4"
            :class="conversation.role === 'user' ? 'justify-end' : 'justify-start'"
          >
            <!-- AI头像 -->
            <img
              v-if="conversation.role === 'assistant'"
              :src="aiAvatar"
              class="w-8 h-8 rounded-full flex-shrink-0"
              alt="AI"
            />

            <!-- 消息内容 -->
            <div
              class="max-w-1/2 px-4 py-2 rounded-lg text-sm"
              :class="
                conversation.role === 'user'
                  ? 'bg-blue-500 text-white rounded-br-none'
                  : 'bg-white text-gray-800 rounded-bl-none border border-gray-200'
              "
            >
              {{ conversation.content }}
            </div>

            <!-- 用户头像 -->
            <img
              v-if="conversation.role === 'user'"
              :src="userAvatar"
              class="w-8 h-8 rounded-full flex-shrink-0"
              alt="User"
            />
          </div>
        </div>
      </div>
    </div>

    <!-- 录音按钮 -->
    <el-button
      type="primary"
      class="w-full mt-4 py-6!"
      :disabled="!aiIsReady || !isSystemReady || isProcessing"
      @mousedown="startRecording"
      @mouseup="stopRecording"
      @mouseleave="stopRecording"
    >
      <template v-if="!isSystemReady">系统未连接</template>
      <template v-else-if="!aiIsReady">AI初始化中...</template>
      <template v-else-if="isProcessing">系统处理中...</template>
      <template v-else>
        {{ isRecording ? '松开结束录音' : '按住发送语音' }}
      </template>
    </el-button>
  </div>
</template>
