<template>
<div class="camara-manage">
  <div class="title">视频输入设备确认</div>

  <div class="flex">
    <div class="left">
      <div class="sub-title">选择输入设备</div>
      <el-select v-model="selectedDevice" placeholder="请选择" style="width: 2.4rem; margin-bottom: 24px">
        <el-option
            v-for="device in camaraDevices"
            :key="device.deviceId"
            :label="device.label || `Device ${device.deviceId}`"
            :value="device.deviceId">
        </el-option>
      </el-select>

      <div class="sub-title">视频测试质量</div>

      <div class="values">
        <CamaraParamsItem label="人脸质量" :range="[10, 100]" :value="faceQuality"/>
        <CamaraParamsItem label="人脸个数" :range="[0, 1]" :value="faceCount"/>
        <CamaraParamsItem label="人脸大小" :range="[10, 25]" :value="faceSize"/>
        <CamaraParamsItem label="光线强度" :range="[30, 100]" :value="lightIntensity"/>
        <CamaraParamsItem label="人脸水平居中" :range="[-1, 15]" :value="faceHorizontalCenter"/>
        <CamaraParamsItem label="人脸垂直居中"  :range="[-1, 15]" :value="faceVerticalCenter"/>
      </div>
      <div>请尽量保持上方6个数字处于绿色状态，在测评过程当中保持良好的摄录状态有助于提升报告的准确性。</div>

      <div style="font-size: 16px; margin-top: 0.14rem; color: rgb(82, 155, 46);" v-if="loading && !detectionStarted">模型加载中...</div>
      <div style="font-size: 16px; margin-top: 0.14rem; color: rgb(255, 87, 87);" v-if="modelLoadError && !detectionStarted">模型加载失败</div>
      <div style="font-size: 16px; margin-top: 0.14rem; color: rgb(82, 155, 46);" v-if="!loading && !modelLoadError && !detectionStarted">等待开始检测...</div>
    </div>
    <div class="right">
      <!-- 显示摄像头画面 -->
      <div class="video-container">
        <video ref="videoRef" autoplay muted playsinline class="video-preview"></video>
        <canvas ref="canvasRef" class="detection-canvas"></canvas>
      </div>
    </div>
  </div>
</div>
</template>

<script setup>
import {ref, onMounted, nextTick, watch, onUnmounted} from 'vue'
import CamaraParamsItem from "@/views/deviceManage/component/CamaraParamsItem.vue";
import useCamaras from "@/views/deviceManage/use/useCamaras.js";
import { ElMessage } from 'element-plus'

const loading = ref(true) // 初始状态为加载中
const modelLoadError = ref(false) // 模型加载失败状态
const detectionStarted = ref(false) // 检测是否已开始

const selectedDevice = ref(null)
const camaraDevices = ref([])
const videoRef = ref(null)
const canvasRef = ref(null)

// 检测参数
const faceQuality = ref(0)
const faceCount = ref(0)
const faceSize = ref(0)
const lightIntensity = ref(0)
const faceHorizontalCenter = ref(0)
const faceVerticalCenter = ref(0)


// 人脸检测相关变量
let model = null
let isDetecting = false
let stream = null
let animationId = null
let faceDetectionEnabled = true // 人脸检测是否启用
let consecutiveErrors = 0 // 连续错误计数
const maxConsecutiveErrors = 3 // 最大连续错误次数

// 目标区域定义（相对于视频尺寸的比例）
const targetAreaRatio = {
  x: 0.2,    // 从视频宽度的20%开始
  y: 0.1,    // 从视频高度的10%开始
  width: 0.6, // 占视频宽度的60%
  height: 0.8 // 占视频高度的80%
}

/**
 * 获取资源路径（兼容开发环境和electron打包环境）
 */
const getResourcePath = (path) => {
  // 检查是否在electron环境中
  if (window.electronAPI || window.require) {
    // electron环境，使用相对路径
    return `.${path}`
  }
  // 开发环境，使用绝对路径
  return path
}

/**
 * 配置TensorFlow.js后端
 */
const configureTensorFlowBackend = async () => {
  try {
    if (window.tf) {
      // 在Electron环境中优先使用CPU后端，避免WebGL问题
      if (window.electronAPI || window.require) {
        console.log('Electron环境检测到，配置CPU后端')
        await window.tf.setBackend('cpu')
      } else {
        // 开发环境先检测WebGL支持，再决定后端
        const canvas = document.createElement('canvas')
        const gl = canvas.getContext('webgl') || canvas.getContext('experimental-webgl')

        if (!gl) {
          console.warn('设备不支持WebGL，直接使用CPU后端')
          await window.tf.setBackend('cpu')
        } else {
          // 尝试使用WebGL，失败则降级到CPU
          try {
            await window.tf.setBackend('webgl')
            console.log('使用WebGL后端')
          } catch (webglError) {
            console.warn('WebGL后端初始化失败，降级到CPU后端:', webglError)
            await window.tf.setBackend('cpu')
          }
        }
      }
      await window.tf.ready()
      console.log('TensorFlow.js后端配置完成:', window.tf.getBackend())
    }
  } catch (error) {
    console.error('配置TensorFlow.js后端失败:', error)
    faceDetectionEnabled = false
  }
}

/**
 * 加载TensorFlow.js和BlazeFace模型
 */
const loadFaceDetectionModel = async () => {
  try {
    loading.value = true
    modelLoadError.value = false
    // 检查TensorFlow.js和BlazeFace是否已加载
    if (typeof window.tf === 'undefined' || typeof window.blazeface === 'undefined') {
      // 动态加载TensorFlow.js和BlazeFace
      await loadScript(getResourcePath('/libs/tf.min.js'))
      await loadScript(getResourcePath('/libs/blazeface.min.js'))
    }

    // 配置TensorFlow.js后端
    await configureTensorFlowBackend()

    if (!faceDetectionEnabled) {
      console.warn('人脸检测功能已禁用')
      loading.value = false
      modelLoadError.value = true
      return
    }

    // 加载BlazeFace模型
    model = await window.blazeface.load({
      modelUrl: getResourcePath('/libs/model.json')
    })
    console.log('人脸检测模型加载完成')
    loading.value = false
  } catch (error) {
    console.error('加载人脸检测模型失败:', error)
    ElMessage.error('人脸检测模型加载失败')
    loading.value = false
    modelLoadError.value = true
    faceDetectionEnabled = false
  }
}

/**
 * 动态加载脚本
 */
const loadScript = (src) => {
  return new Promise((resolve, reject) => {
    const script = document.createElement('script')
    script.src = src
    script.onload = resolve
    script.onerror = reject
    document.head.appendChild(script)
  })
}

/**
 * 初始化摄像头设备列表
 */
const initCameraDevices = async () => {
  try {
    const devices = await useCamaras()
    camaraDevices.value = devices.value
    if(camaraDevices.value && camaraDevices.value.length > 0){
      selectedDevice.value = camaraDevices.value[0].deviceId
      await nextTick()
      await startCamera()
    } else {
      ElMessage.warning('没有可用的摄像头设备，请检查设备连接。')
    }
  } catch (error) {
    console.error('初始化摄像头设备失败:', error)
  }
}

/**
 * 启动摄像头
 */
const startCamera = async () => {
  try {
    // 停止之前的流
    if (stream) {
      stream.getTracks().forEach(track => track.stop())
    }

    // 获取新的摄像头流
    stream = await navigator.mediaDevices.getUserMedia({
      video: {
        deviceId: selectedDevice.value ? { exact: selectedDevice.value } : undefined,
        width: { ideal: 640 },
        height: { ideal: 480 },
        frameRate: { ideal: 30 } // 请求30fps
      }
    })



    if (videoRef.value) {
      videoRef.value.srcObject = stream

      // 等待视频加载完成后设置画布尺寸
      videoRef.value.onloadedmetadata = () => {
        if (canvasRef.value && videoRef.value) {
          // 设置画布尺寸与显示尺寸一致
          const rect = videoRef.value.getBoundingClientRect()
          canvasRef.value.width = rect.width
          canvasRef.value.height = rect.height
        }

        // 开始人脸检测
        if (model && faceDetectionEnabled) {
          startFaceDetection()
        }
      }
    }
  } catch (error) {
    ElMessage.error('启动摄像头失败，请检查设备权限')
    console.error('启动摄像头失败:', error)
  }
}

/**
 * 开始人脸检测
 */
const startFaceDetection = () => {
  if (isDetecting) return
  isDetecting = true
  detectionStarted.value = true
  detectFaces()
}

/**
 * 停止人脸检测
 */
const stopFaceDetection = () => {
  isDetecting = false
  if (animationId) {
    cancelAnimationFrame(animationId)
    animationId = null
  }
}


/**
 * 计算光线强度
 */
const calculateLightIntensity = (canvas, ctx) => {
  const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height)
  const data = imageData.data
  let sum = 0

  for (let i = 0; i < data.length; i += 4) {
    // 计算RGB的平均值作为亮度
    const brightness = (data[i] + data[i + 1] + data[i + 2]) / 3
    sum += brightness
  }

  return Math.round((sum / (data.length / 4)) / 255 * 100)
}

/**
 * 判断人脸是否在目标区域内
 */
const isFaceInsideTarget = (faceBox, targetArea) => {
  const faceCenterX = faceBox.x + faceBox.width / 2
  const faceCenterY = faceBox.y + faceBox.height / 2

  const overlapX = Math.max(0, Math.min(faceBox.x + faceBox.width, targetArea.x + targetArea.width) - Math.max(faceBox.x, targetArea.x))
  const overlapY = Math.max(0, Math.min(faceBox.y + faceBox.height, targetArea.y + targetArea.height) - Math.max(faceBox.y, targetArea.y))
  const overlapArea = overlapX * overlapY
  const faceArea = faceBox.width * faceBox.height
  const overlapRatio = faceArea > 0 ? overlapArea / faceArea : 0

  return (
    (faceCenterX >= targetArea.x && faceCenterX <= (targetArea.x + targetArea.width) &&
     faceCenterY >= targetArea.y && faceCenterY <= (targetArea.y + targetArea.height)) ||
    overlapRatio > 0.4
  )
}

/**
 * 人脸检测主函数
 */
const detectFaces = async () => {
  if (!isDetecting || !videoRef.value || !canvasRef.value || !model || !faceDetectionEnabled) {
    return
  }

  const video = videoRef.value
  const canvas = canvasRef.value
  const ctx = canvas.getContext('2d')

  // 验证视频流有效性，防止texture size [0x0]错误
  if (!video.videoWidth || !video.videoHeight || video.videoWidth === 0 || video.videoHeight === 0) {
    console.warn('视频流尺寸无效，跳过本次检测')
    if (isDetecting && faceDetectionEnabled) {
      animationId = requestAnimationFrame(detectFaces)
    }
    return
  }

  // 清除画布
  ctx.clearRect(0, 0, canvas.width, canvas.height)

  // 计算缩放比例
  const scaleX = canvas.width / video.videoWidth
  const scaleY = canvas.height / video.videoHeight

  // 移除固定目标区域的绘制

  try {
    // 检测人脸
    const predictions = await model.estimateFaces(video, {
      returnTensors: false,
      flipHorizontal: true
    })

    // 重置错误计数
    consecutiveErrors = 0

    let detectedFaceCount = predictions.length
    let totalFaceSize = 0
    let horizontalCenterSum = 0
    let verticalCenterSum = 0

    // 处理检测到的人脸
    for (const face of predictions) {
      let start, end

      if (face.topLeft && face.topLeft.arraySync) {
        start = face.topLeft.arraySync()
        end = face.bottomRight.arraySync()
      } else if (Array.isArray(face.topLeft)) {
        start = face.topLeft
        end = face.bottomRight
      } else {
        continue
      }

      // 原始人脸框（基于视频原始尺寸）
      const originalFaceBox = {
        x: start[0],
        y: start[1],
        width: end[0] - start[0],
        height: end[1] - start[1]
      }

      // 缩放后的人脸框（基于显示尺寸）
      const faceBox = {
        x: originalFaceBox.x * scaleX,
        y: originalFaceBox.y * scaleY,
        width: originalFaceBox.width * scaleX,
        height: originalFaceBox.height * scaleY
      }

      // 计算人脸大小（相对于视频尺寸的百分比）
      const faceArea = originalFaceBox.width * originalFaceBox.height
      const videoArea = video.videoWidth * video.videoHeight
      const faceSizePercent = (faceArea / videoArea) * 100
      totalFaceSize += faceSizePercent

      // 计算人脸中心位置（人脸框中心点）
      const faceCenterX = originalFaceBox.x + originalFaceBox.width / 2
      const faceCenterY = originalFaceBox.y + originalFaceBox.height / 2

      // 计算视频中心点
      const videoCenterX = video.videoWidth / 2
      const videoCenterY = video.videoHeight / 2

      // 计算人脸中心与视频中心的偏离程度（转换为百分比）
      // 偏离程度：0表示完全居中，50表示完全偏离到边缘
      const horizontalOffset = Math.abs(faceCenterX - videoCenterX) / (video.videoWidth / 2) * 50
      const verticalOffset = Math.abs(faceCenterY - videoCenterY) / (video.videoHeight / 2) * 50

      horizontalCenterSum += horizontalOffset
      verticalCenterSum += verticalOffset

      // 绘制人脸框（使用缩放后的坐标）
      ctx.beginPath()
      ctx.rect(faceBox.x, faceBox.y, faceBox.width, faceBox.height)
      ctx.lineWidth = 2
      ctx.strokeStyle = 'green'
      ctx.stroke()
    }

    // 计算光线强度（创建临时画布避免覆盖人脸框）
    const tempCanvas = document.createElement('canvas')
    tempCanvas.width = canvas.width
    tempCanvas.height = canvas.height
    const tempCtx = tempCanvas.getContext('2d')
    tempCtx.drawImage(video, 0, 0, canvas.width, canvas.height)
    const lightLevel = calculateLightIntensity(tempCanvas, tempCtx)



    // 更新检测参数
    faceCount.value = detectedFaceCount
    faceSize.value = detectedFaceCount > 0 ? Math.round(totalFaceSize / detectedFaceCount) : 0
    lightIntensity.value = lightLevel
    faceHorizontalCenter.value = detectedFaceCount > 0 ? Math.round(horizontalCenterSum / detectedFaceCount) : 50
    faceVerticalCenter.value = detectedFaceCount > 0 ? Math.round(verticalCenterSum / detectedFaceCount) : 50

    // 计算人脸质量（综合评分）
    let qualityScore = 0
    if (detectedFaceCount === 1) {
      qualityScore += 30 // 有且仅有一个人脸
    }
    if (faceSize.value >= 15 && faceSize.value <= 20) {
      qualityScore += 25 // 人脸大小适中
    }
    if (lightIntensity.value >= 40 && lightIntensity.value <= 80) {
      qualityScore += 25 // 光线适中
    }
    if (faceHorizontalCenter.value <= 15) {
      qualityScore += 10 // 水平居中
    }
    if (faceVerticalCenter.value <= 15) {
      qualityScore += 10 // 垂直居中
    }

    faceQuality.value = qualityScore

  } catch (error) {
    console.error('人脸检测错误:', error)

    // 根据错误类型进行分类处理
    if (error.message.includes('texture') || error.message.includes('WebGL') || error.message.includes('WebCL')) {
      console.warn('检测到WebGL/WebCL相关错误，尝试切换到CPU后端')
      try {
        await window.tf.setBackend('cpu')
        await window.tf.ready()
        console.log('已切换到CPU后端')
        // 重置错误计数，给CPU后端一次机会
        consecutiveErrors = 0
      } catch (backendError) {
        console.error('切换到CPU后端失败:', backendError)
        faceDetectionEnabled = false
        stopFaceDetection()
        modelLoadError.value = true
        ElMessage({
          message: '设备不支持人脸检测功能',
          type: 'warning',
          duration: 5000
        })
        return
      }
    } else {
      consecutiveErrors++
    }

    // 如果连续错误次数过多，禁用人脸检测
    if (consecutiveErrors >= maxConsecutiveErrors) {
      console.warn(`连续${maxConsecutiveErrors}次人脸检测错误，禁用人脸检测功能`)
      faceDetectionEnabled = false
      stopFaceDetection()
      modelLoadError.value = true

      // 显示用户友好的错误提示
      ElMessage({
        message: '检测到设备兼容性问题，人脸检测功能已暂时关闭',
        type: 'info',
        duration: 5000
      })
      return
    }
  }

  // 继续下一帧检测
  if (isDetecting && faceDetectionEnabled) {
    animationId = requestAnimationFrame(detectFaces)
  }
}

// 监听设备切换
watch(selectedDevice, async (newDevice) => {
  if (newDevice) {
    await startCamera()
  }
})

// 组件挂载时初始化
onMounted(async () => {
  await loadFaceDetectionModel()
  await initCameraDevices()
})

// 组件卸载时清理资源
onUnmounted(() => {
  stopFaceDetection()
  if (stream) {
    stream.getTracks().forEach(track => track.stop())
  }
})

</script>

<style scoped lang="scss">
.camara-manage{
  border-bottom: 1px solid #fff;
  .title{
    font-size: 20px;
    margin-top: 40px;
    margin-bottom: 24px;
  }
  .flex {
    display: flex;
    align-items: flex-start;
    height: 400px; // 固定整体高度
  }
  .left{
    width: 440px;
    height: 100%;
    display: flex;
    flex-direction: column;
  }
  .right{
    flex: 1;
    padding-left: 44px;
    height: 100%;
    display: flex;
    align-items: center;


    .video-container {
      position: relative;
      width: 100%;
      max-width: 480px; // 缩小视频显示区域
      height: 360px; // 固定视频容器高度

      .video-preview {
        width: 100%;
        height: 100%;
        object-fit: cover; // 保持比例填充
        border-radius: 8px;
        background: #000;
      }

      .detection-canvas {
        position: absolute;
        top: 0;
        left: 0;
        width: 100%;
        height: 100%;
        pointer-events: none;
        border-radius: 8px;
      }
    }
  }
}

.sub-title{
  font-size: 16px;
  margin-bottom: 12px;
}

.values{
  display: grid;
  grid-template-columns: 1fr 1fr;
  grid-template-rows: repeat(3, auto);
  gap: 18px; // 缩小间距
  align-content: start;
  margin-bottom: 24px;
}
</style>

<style lang="scss" scoped>
@use '@/styles/variables.scss' as *;
:deep(.el-select__wrapper){
  background: $content-bg-color !important;
  border-color: #DCDCDC !important;
}
:deep(.el-select__placeholder) {
  color: #fff !important;
}
</style>
