<template>
  <div class="task-main">
    <template v-if="taskIndex <= taskList.length - 1">
      <FirstDesc v-if="taskList[taskIndex].type === 'first-desc'" :key="taskList[taskIndex].id"/>
      <LxDesc v-if="taskList[taskIndex].type === 'lx-desc'" :key="taskList[taskIndex].id"/>
      <LxImgTask v-if="taskList[taskIndex].type === '罗夏墨迹'" :task="taskList[taskIndex]" :key="taskList[taskIndex].id"/>
      <TATDesc v-if="taskList[taskIndex].type === 'tat-desc'" :task="taskList[taskIndex]"/>
      <TATTask v-if="taskList[taskIndex].type === 'tat'" :task="taskList[taskIndex]" :key="taskList[taskIndex].id"/>
      <VideoDesc  v-if="taskList[taskIndex].type === 'video-desc'" :key="taskList[taskIndex].id"/>
      <VideoTask v-if="taskList[taskIndex].type === '观影'" :task="taskList[taskIndex]" :key="taskList[taskIndex].id"/>

      <InterviewTask v-if="taskList[taskIndex].type === '访谈'" :task="taskList[taskIndex]" :key="taskList[taskIndex].id"/>
    </template>

    <TaskEnd v-else/>

    <!-- 隐藏的人脸检测元素 -->
    <video ref="videoRef" autoplay muted playsinline style="display: none;"></video>
    <canvas ref="canvasRef" style="display: none;"></canvas>
  </div>
</template>

<script setup>
import useTaskStore from '@/stores/task.js'
import {computed, ref, onMounted, onUnmounted} from 'vue'
import { onBeforeRouteLeave } from 'vue-router'

import TaskEnd from "./TaskEnd.vue";
import FirstDesc from "@/views/task/component/desc/FirstDesc.vue";
import LxImgTask from "@/views/task/component/task/LxImgTask.vue";
import LxDesc from "@/views/task/component/desc/LxDesc.vue";
import TATDesc from "@/views/task/component/desc/TATDesc.vue";
import TATTask from "@/views/task/component/task/TATTask.vue";
import VideoDesc from "@/views/task/component/desc/VideoDesc.vue";
import VideoTask from "@/views/task/component/task/VideoTask.vue";
import InterviewTask from "@/views/task/component/task/InterviewTask.vue";


import { ElNotification, ElMessageBox } from 'element-plus'

const taskStore = useTaskStore()
//任务列表
const taskList = computed(() => taskStore.taskList)
//当前任务索引
const taskIndex = computed(() => taskStore.taskIndex)

// 人脸检测相关
const videoRef = ref(null)
const canvasRef = ref(null)
let model = null
let isDetecting = false
let stream = null
let animationId = null
let lastNotificationTime = 0
const notificationInterval = 5000 // 5秒内不重复弹出通知
let faceDetectionEnabled = true // 人脸检测是否启用
let consecutiveErrors = 0 // 连续错误计数
const maxConsecutiveErrors = 3 // 最大连续错误次数
let lastDetectionTime = 0 // 上次检测时间
const detectionInterval = 5000 // 检测间隔：5秒

/**
 * 获取资源路径（兼容开发环境和Electron打包环境）
 */
const getResourcePath = (path) => {
  // 检查是否在Electron环境中
  if (window.electronAPI || window.require) {
    // Electron环境，使用相对路径
    return path.startsWith('/') ? '.' + path : path
  }
  // 开发环境，使用原始路径
  return path
}

/**
 * 配置TensorFlow.js后端
 */
const configureTensorFlowBackend = async () => {
  try {
    if (window.tf) {
      // 在Electron环境中优先使用CPU后端，避免WebGL问题
      if (window.electronAPI || window.require) {
        console.log('Electron环境检测到，配置CPU后端')
        await window.tf.setBackend('cpu')
      } else {
        // 开发环境先检测WebGL支持，再决定后端
        const canvas = document.createElement('canvas')
        const gl = canvas.getContext('webgl') || canvas.getContext('experimental-webgl')
        
        if (!gl) {
          console.warn('设备不支持WebGL，直接使用CPU后端')
          await window.tf.setBackend('cpu')
        } else {
          // 尝试使用WebGL，失败则降级到CPU
          try {
            await window.tf.setBackend('webgl')
            console.log('使用WebGL后端')
          } catch (webglError) {
            console.warn('WebGL后端初始化失败，降级到CPU后端:', webglError)
            await window.tf.setBackend('cpu')
          }
        }
      }
      await window.tf.ready()
      console.log('TensorFlow.js后端配置完成:', window.tf.getBackend())
    }
  } catch (error) {
    console.error('配置TensorFlow.js后端失败:', error)
    faceDetectionEnabled = false
  }
}

/**
 * 加载TensorFlow.js和BlazeFace模型
 */
const loadFaceDetectionModel = async () => {
  try {
    // 检查TensorFlow.js和BlazeFace是否已加载
    if (typeof window.tf === 'undefined' || typeof window.blazeface === 'undefined') {
      // 动态加载TensorFlow.js和BlazeFace
      await loadScript(getResourcePath('/libs/tf.min.js'))
      await loadScript(getResourcePath('/libs/blazeface.min.js'))
    }

    // 配置TensorFlow.js后端
    await configureTensorFlowBackend()

    if (!faceDetectionEnabled) {
      console.warn('人脸检测功能已禁用')
      return
    }

    // 加载BlazeFace模型
    model = await window.blazeface.load({
      modelUrl: getResourcePath('/libs/model.json')
    })
    console.log('人脸检测模型加载完成')
  } catch (error) {
    console.error('加载人脸检测模型失败:', error)
    faceDetectionEnabled = false
  }
}

/**
 * 动态加载脚本
 */
const loadScript = (src) => {
  return new Promise((resolve, reject) => {
    const script = document.createElement('script')
    script.src = src
    script.onload = resolve
    script.onerror = reject
    document.head.appendChild(script)
  })
}

/**
 * 启动摄像头
 */
const startCamera = async () => {
  try {
    // 获取摄像头流
    stream = await navigator.mediaDevices.getUserMedia({
      video: {
        width: { ideal: 640 },
        height: { ideal: 480 }
      }
    })

    if (videoRef.value) {
      videoRef.value.srcObject = stream

      // 等待视频加载完成后开始检测
      videoRef.value.onloadedmetadata = () => {
        if (canvasRef.value && videoRef.value) {
          canvasRef.value.width = videoRef.value.videoWidth
          canvasRef.value.height = videoRef.value.videoHeight
        }

        // 开始人脸检测
        if (model && faceDetectionEnabled) {
          startFaceDetection()
        }
      }
    }
  } catch (error) {
    ElMessage.error('启动摄像头失败，请检查设备权限')
    console.error('启动摄像头失败:', error)
  }
}

/**
 * 开始人脸检测
 */
const startFaceDetection = () => {
  if (isDetecting) return
  isDetecting = true
  detectFaces()
}

/**
 * 停止人脸检测
 */
const stopFaceDetection = () => {
  isDetecting = false
  if (animationId) {
    cancelAnimationFrame(animationId)
    animationId = null
  }
}

/**
 * 人脸检测主函数
 */
const detectFaces = async () => {
  if (!isDetecting || !videoRef.value || !canvasRef.value || !model || !faceDetectionEnabled) {
    return
  }

  const video = videoRef.value
  const currentTime = Date.now()
  
  // 检查是否到达检测间隔时间
  if (currentTime - lastDetectionTime < detectionInterval) {
    // 未到检测时间，继续请求下一帧但不执行检测
    if (isDetecting && faceDetectionEnabled) {
      animationId = requestAnimationFrame(detectFaces)
    }
    return
  }
  
  // 验证视频流有效性，防止texture size [0x0]错误
  if (!video.videoWidth || !video.videoHeight || video.videoWidth === 0 || video.videoHeight === 0) {
    console.warn('视频流尺寸无效，跳过本次检测')
    if (isDetecting && faceDetectionEnabled) {
      animationId = requestAnimationFrame(detectFaces)
    }
    return
  }
  
  // 更新上次检测时间
  lastDetectionTime = currentTime

  try {
    // 检测人脸
    const predictions = await model.estimateFaces(video, {
      returnTensors: false,
      flipHorizontal: true
    })

    // 重置错误计数
    consecutiveErrors = 0

    let shouldNotify = false
    let notificationMessage = ''

    // 检查是否检测到人脸
    if (predictions.length === 0) {
      // 没有检测到人脸
      shouldNotify = true
      notificationMessage = '请确保人脸在摄像头范围内'
    } else {
      // 检查人脸是否在中心位置
      const face = predictions[0]
      let start, end

      if (face.topLeft && face.topLeft.arraySync) {
        start = face.topLeft.arraySync()
        end = face.bottomRight.arraySync()
      } else if (Array.isArray(face.topLeft)) {
        start = face.topLeft
        end = face.bottomRight
      } else {
        return
      }

      // 计算人脸框
      const faceBox = {
        x: start[0],
        y: start[1],
        width: end[0] - start[0],
        height: end[1] - start[1]
      }

      // 计算人脸中心位置
      const faceCenterX = faceBox.x + faceBox.width / 2
      const faceCenterY = faceBox.y + faceBox.height / 2

      // 计算视频中心点
      const videoCenterX = video.videoWidth / 2
      const videoCenterY = video.videoHeight / 2

      // 计算人脸中心与视频中心的偏离程度
      const horizontalOffset = Math.abs(faceCenterX - videoCenterX) / (video.videoWidth / 2) * 50
      const verticalOffset = Math.abs(faceCenterY - videoCenterY) / (video.videoHeight / 2) * 50

      // 检查人脸水平和垂直居中（范围：[-1, 15]，即偏离程度不能超过15）
      if (horizontalOffset > 25) {
        shouldNotify = true
        notificationMessage = '请调整位置，保持人脸水平居中'
      } else if (verticalOffset > 25) {
        shouldNotify = true
        notificationMessage = '请调整位置，保持人脸垂直居中'
      }
    }

    // 如果需要通知且距离上次通知时间超过间隔，则弹出通知
    if (shouldNotify && (currentTime - lastNotificationTime > notificationInterval)) {
      ElNotification({
        title: '人脸检测提醒',
        message: notificationMessage,
        type: 'warning',
        duration: 3000,
        position: 'top-right'
      })
      lastNotificationTime = currentTime
    }

  } catch (error) {
    console.error('人脸检测错误:', error)
    
    // 根据错误类型进行分类处理
    if (error.message.includes('texture') || error.message.includes('WebGL') || error.message.includes('WebCL')) {
      console.warn('检测到WebGL/WebCL相关错误，尝试切换到CPU后端')
      try {
        await window.tf.setBackend('cpu')
        await window.tf.ready()
        console.log('已切换到CPU后端')
        // 重置错误计数，给CPU后端一次机会
        consecutiveErrors = 0
      } catch (backendError) {
        console.error('切换到CPU后端失败:', backendError)
        faceDetectionEnabled = false
        stopFaceDetection()
        ElNotification({
          title: '人脸检测不可用',
          message: '设备不支持人脸检测功能',
          type: 'warning',
          duration: 5000,
          position: 'top-right'
        })
        return
      }
    } else {
      consecutiveErrors++
    }
    
    // 如果连续错误次数过多，禁用人脸检测
    if (consecutiveErrors >= maxConsecutiveErrors) {
      console.warn(`连续${maxConsecutiveErrors}次人脸检测错误，禁用人脸检测功能`)
      faceDetectionEnabled = false
      stopFaceDetection()
      
      // 显示用户友好的错误提示
      ElNotification({
        title: '人脸检测暂时不可用',
        message: '检测到设备兼容性问题，人脸检测功能已暂时关闭，不影响其他功能使用',
        type: 'info',
        duration: 5000,
        position: 'top-right'
      })
      return
    }
  }

  // 继续下一帧检测
  if (isDetecting && faceDetectionEnabled) {
    animationId = requestAnimationFrame(detectFaces)
  }
}

// 组件挂载时初始化
onMounted(async () => {
  try {
    await loadFaceDetectionModel()
    await startCamera()
  } catch (error) {
    console.error('组件初始化失败:', error)
    // 即使人脸检测失败，也不影响页面正常显示
  }
})

// 组件卸载时清理资源
onUnmounted(() => {
  stopFaceDetection()
  if (stream) {
    stream.getTracks().forEach(track => track.stop())
  }
})

/**
 * 路由离开前的确认提示
 */
onBeforeRouteLeave(async (to, from) => {
  if(taskIndex.value < taskList.value.length) {
    try {
      await ElMessageBox.confirm(
          '您确定要离开当前页面吗？离开后当前进度将会保存。',
          '离开确认',
          {
            confirmButtonText: '确定',
            cancelButtonText: '取消',
            type: 'warning',
            center: true
          }
      )
      // 用户确认离开，清理资源
      stopFaceDetection()
      if (stream) {
        stream.getTracks().forEach(track => track.stop())
      }
      return true
    } catch {
      // 用户取消离开
      return false
    }
  }

})

</script>

<style lang="scss" scoped>
.task-main{
  height: 100%;
}
</style>
