<!-- 人脸检测 -->
<script setup>
import { getFaceInfoAPI } from '@/api/user'
import { onBeforeUnmount, onMounted, ref } from 'vue'
import * as faceAPI from 'face-api.js'
import { ElMessage } from 'element-plus'
import { updateFaceImageAPI, updateFaceInfoAPI } from '../api/user'

defineProps(['modelValue'])
const emits = defineEmits(['update:modelValue', 'handleSuccess'])

const userInfo = ref(null)
const getFaceInfo = async () => {
  const { data: res } = await getFaceInfoAPI()
  if (res.status === 200) {
    userInfo.value = res.data
    if (!userInfo.value.face_info) {
      hasFaceInfo.value = true
    }
  }
}
getFaceInfo()

const hasFaceInfo = ref(false) // 用户首次进行人脸识别提示
const imgUrl = ref('') // 首次人脸的照片存放
const videoRef = ref(null)
const canvasRef = ref(null)
const imageRef = ref(null)
const videoShow = ref(false)
const imageShow = ref(false)
const loading = ref(true) // 加载控件

onMounted(() => {
  initModels()
})

onBeforeUnmount(() => {
  clearTimeout(timeout.value)
  if (videoRef.value && videoRef.value.srcObject) {
    // 确保 videoRef.value 和 videoRef.value.srcObject 都存在
    videoRef.value.srcObject.getTracks()[0].stop()
    videoRef.value.pause()
    videoRef.value.srcObject = null // 清除 srcObject，可选
  } else if (stream.value) {
    stream.value.getTracks()[0].stop()
  }
})

const option = ref({}) // faceAPI模型配置
const initModels = async () => {
  // 初始化faceAPI的模型
  await faceAPI.nets.ssdMobilenetv1.loadFromUri('/models')
  await faceAPI.loadFaceLandmarkModel('/models')
  await faceAPI.nets.faceLandmark68Net.loadFromUri('/models')
  await faceAPI.nets.faceRecognitionNet.loadFromUri('/models')
  option.value = new faceAPI.SsdMobilenetv1Options({
    minConfidence: 0.5
  })
  checkCamera()
}

// 检测用户是否有摄像头
const stream = ref(null)
const checkCamera = async () => {
  // 获取媒体信息
  const navigator = window.navigator.mediaDevices
  // 判断用户是否有摄像头
  const devices = await navigator?.enumerateDevices()
  if (devices) {
    videoShow.value = true // 显示视频画面
    stream.value = await navigator.getUserMedia({
      audio: false,
      video: {
        width: 500,
        height: 500
      }
    })
    try {
      imgUrl.value = ''
      if (videoRef.value) {
        videoRef.value.srcObject = stream.value
        loading.value = false
        videoRef.value.play()
        detectFace()
      }
    } catch (e) {
      //
    }
  } else {
    return ElMessage.error('摄像头不可用，不支持本次考试！！！')
  }
}

// 人脸识别方法
const timeout = ref(0)
const noOne = ref(null)
const moreThanOne = ref(null)
const detectFace = async () => {
  if (videoRef.value.paused) {
    // 判断摄像头是否停止使用
    return clearTimeout(timeout.value)
  }
  const ctx = canvasRef.value.getContext('2d')
  ctx.drawImage(videoRef.value, 0, 0, 500, 500)
  const res = await faceAPI
    .detectAllFaces(canvasRef.value, option.value)
    .withFaceLandmarks()

  if (res.length === 0) {
    if (moreThanOne.value) {
      moreThanOne.value.close()
      moreThanOne.value = null
    }
    !noOne.value && (noOne.value = ElMessage.warning('未识别到人脸'))
  } else if (res.length > 1) {
    if (noOne.value) {
      noOne.value.close()
      noOne.value = null
    }
    !moreThanOne.value &&
      (moreThanOne.value = ElMessage.warning('检测到镜头中存在多人'))
  } else {
    if (noOne.value) {
      noOne.value.close()
      noOne.value = null
    }
    if (moreThanOne.value) {
      moreThanOne.value.close()
      moreThanOne.value = null
    }
  }
  // 绘制人脸框
  res.forEach(({ detection }) => {
    const drawBox = new faceAPI.draw.DrawBox(detection.box, { boxColor: 'red' })
    canvasRef.value && drawBox.draw(canvasRef.value)
  })
  // timeout.value = setTimeout(() => {
  //   return detectFace()
  // })
  requestAnimationFrame(detectFace)
}

// 拍摄,截取摄像头当前帧并显示在页面上
const shoot = async () => {
  if (noOne.value) {
    return ElMessage.warning('未检测到人脸')
  }
  if (moreThanOne.value) {
    return ElMessage.warning('检测到多张人脸')
  }
  // 将画布内容转为base64并存放
  imgUrl.value = canvasRef.value.toDataURL('image/png')
  videoRef.value.srcObject.getTracks()[0].stop()
  videoRef.value.pause()
  videoShow.value = false
  imageShow.value = true

  // 将 Base64 数据设置为图片的 src 属性
  imageRef.value.src = imgUrl.value
  getDescriptors()
}

// 重新拍照
const shootAgain = () => {
  imageShow.value = false
  imgUrl.value = ''
  loading.value = true
  resultShow.value = false
  checkCamera()
}

// 获取图片的人脸描述符，进行下一步操作
const descriptors = ref(null)
const getDescriptors = async () => {
  imageShow.value = false
  loading.value = true
  const img = await faceAPI.fetchImage(imgUrl.value)
  const res = await faceAPI
    .detectAllFaces(img, option.value)
    .withFaceLandmarks()
    .withFaceDescriptors()
  descriptors.value = res.map((detection) => detection.descriptor)

  // 如果考生信息内没有人脸描述符信息，则将描述符上传数据库记录，否则直接对比
  if (!userInfo.value.face_info) {
    const { data: res } = await updateFaceInfoAPI({
      id: userInfo.value.id,
      face_info: JSON.stringify(descriptors.value[0])
    })
    if (res.status !== 200) {
      return ElMessage.error(res.message)
    }

    // 上传人脸照片
    const faceImage = new FormData()
    faceImage.append('faceImage', imgUrl.value)
    faceImage.append('id', userInfo.value.id)
    const { data: res2 } = await updateFaceImageAPI(faceImage)
    if (res2.status !== 200) {
      return ElMessage.error(res.message)
    }
    result.value = true
    resultShow.value = true
  } else {
    // 进行余弦相似度计算
    if (!descriptors.value[0]) {
      checkCamera()
      return ElMessage.warning('请重新拍摄')
    }
    calculateSimilarity(
      descriptors.value[0],
      JSON.parse(userInfo.value.face_info)
    )
  }
}

// 计算两个人脸描述符之间的余弦相似度
const resultShow = ref(false)
const minConfidence = 0.45
const result = ref(false)
const calculateSimilarity = (newDesc, oldDesc) => {
  oldDesc.length = Object.keys(oldDesc).length
  const res = faceAPI.euclideanDistance(newDesc, oldDesc)

  resultShow.value = true
  loading.value = false
  if (res > minConfidence) {
    result.value = false
  } else {
    result.value = true
  }
}

// 处理通过
const handleSuccess = () => {
  if (!result.value) {
    return
  }
  emits('handleSuccess', result.value)
  emits('update:modelValue', false)
}
</script>
<template>
  <el-dialog
    title="人脸识别"
    :model-value="modelValue"
    @close="emits('update:modelValue', false)"
    :close-on-click-modal="false"
    :close-on-press-escape="false"
    :show-close="false"
  >
    <div class="face-detect-dialog-content">
      <p class="tip" v-if="hasFaceInfo">
        此账号首次进行人脸识别匹配,请先拍照上传人脸信息
      </p>
      <div
        class="face-detect-video-box"
        v-if="!resultShow"
        v-loading="loading"
        element-loading-text="加载中..."
      >
        <video ref="videoRef" style="display: none" />
        <canvas
          ref="canvasRef"
          width="500"
          height="500"
          v-show="videoShow"
        ></canvas>
        <img ref="imageRef" v-show="imageShow" />
      </div>
      <div class="face-detect-result" v-if="resultShow">
        <el-result
          :icon="result ? 'success' : 'error'"
          :title="result ? '检测通过' : '人脸不匹配'"
        >
        </el-result>
      </div>
      <div class="face-detect--btn" v-if="!result">
        <el-button type="primary" @click="shoot" :disabled="!!imgUrl">
          点击拍照
        </el-button>
        <el-button :disabled="!imgUrl" @click="shootAgain">
          重新拍照
        </el-button>
      </div>
    </div>

    <template #footer>
      <el-button type="primary" :disabled="!imgUrl" @click="handleSuccess">
        确定
      </el-button>
    </template>
  </el-dialog>
</template>

<style lang="scss" scoped>
.face-detect-dialog-content {
  display: flex;
  flex-direction: column;
  align-items: center;
  justify-content: center;
  margin-bottom: 0.2rem;
  .tip {
    font-size: 0.24rem;
  }
  .face-detect-video-box {
    min-width: 5rem;
    min-height: 5rem;
    margin: 0.2rem 0;
  }
  .face-detect--btn {
    display: flex;
    justify-content: center;
    align-items: center;
    margin-bottom: 0.2rem;
  }
}
.face-detect-result {
  min-width: 5rem;
  min-height: 5rem;
  display: flex;
  align-items: center;
  justify-content: center;
}
</style>
