import * as faceapi from '@vladmandic/face-api/dist/face-api.esm.js'
// import * as faceapi from 'face-api.js'
import { VideoController } from '@renderer/components/VideoController'
import { useRef } from 'react'
import { message } from 'antd'
const msgDura = 1
const modelUrl = import.meta.env.BASE_URL + '/models'
function useFaceApi(width = 500, height = 500) {
  const video = useRef<null | VideoController>(null)
  const container = useRef<null | HTMLDivElement>(null) //video容器
  let valve = true
  Promise.all([
    faceapi.nets.tinyFaceDetector.loadFromUri(modelUrl),
    faceapi.nets.faceLandmark68Net.loadFromUri(modelUrl),
    faceapi.nets.faceRecognitionNet.loadFromUri(modelUrl),
    faceapi.nets.faceExpressionNet.loadFromUri(modelUrl),
    faceapi.nets.ageGenderNet.loadFromUri(modelUrl),
    faceapi.nets.ssdMobilenetv1.loadFromUri(modelUrl)
  ])
    .then(() => {
      console.log('加载模型数据成功')
      // loadCamera()
    })
    .catch((err) => {
      message.error('加载模型数据失败', msgDura)
      console.log('人脸识别数据加载失败', err)
    })
  // 加载模型和摄像头
  function loadCamera() {
    video.current = new VideoController(width, height, container.current as HTMLDivElement)
    // 动态创建video容器
    video.current.createVideo()
    console.log('创建video容器成功')
    // 开启摄像头并将摄像头流加载到video
    video.current.getCamera()
    // 监听动态创建video的play事件
    video.current.videoElement.addEventListener('play', () => {
      window.setTimeout(() => {
        // 这里开始获取摄像头的画面
        message.success('开始人脸识别', msgDura)
        console.log('开始人脸识别')
        detectiveFace()
      }, 500) //视频轨道开始播放需要时间
    })
  }
  async function detectiveFace() {
    const detections = await faceapi
      .detectAllFaces(video.current!.videoElement, new faceapi.TinyFaceDetectorOptions()) // 添加各种识别信息
      .withFaceLandmarks() // 人脸区域
      .withFaceExpressions() // 人脸表情
      .withFaceDescriptors() //
      .withAgeAndGender() // 年龄
    if (detections.length < 1) {
      console.log('未检测到人脸')
    }
    // const faceMatcher = new faceapi.FaceMatcher(detections)
    // detections.forEach((fd) => {
    //   const bestMatch = faceMatcher.findBestMatch(fd.descriptor)
    //   console.log('bestMatch', bestMatch)
    // })
    drawDetections(detections)
  }

  function drawDetections(
    detections: faceapi.WithAge<
      faceapi.WithGender<
        faceapi.WithFaceDescriptor<
          faceapi.WithFaceExpressions<
            faceapi.WithFaceLandmarks<{ detection: faceapi.FaceDetection }, faceapi.FaceLandmarks68>
          >
        >
      >
    >[]
  ) {
    // 调整检测到的框的大小，以防您显示的图像与原始图像的大小不同
    const toSize = {
      width: video.current?.videoElement.width as number,
      height: video.current?.videoElement.height as number
    }
    const resizedResults = faceapi.resizeResults(detections, toSize)
    // 设置展示的canvas和输入大小保持相同
    faceapi.matchDimensions(video.current!.overlay!, video.current!.videoElement!)
    // 展示矩形框
    faceapi.draw.drawDetections(video.current!.overlay!, resizedResults)
    // 展示人脸标记
    faceapi.draw.drawFaceLandmarks(video.current!.overlay!, resizedResults)
    // 展示表情
    faceapi.draw.drawFaceExpressions(video.current!.overlay!, resizedResults)
    // 展示年龄和性别
    resizedResults.forEach(
      (result: { detection?: any; age?: any; gender?: any; genderProbability?: any }) => {
        const { age, gender, genderProbability } = result
        new faceapi.draw.DrawTextField(
          [
            `${Math.round(age)} 岁`,
            `${gender === 'male' ? '男' : '女'} 准确率(${Math.round(genderProbability)})`
          ],
          result.detection.box.topRight
        ).draw(video.current!.overlay!)
      }
    )
    if (valve) {
      window.setTimeout(() => {
        detectiveFace()
      }, 100)
    }
  }
  // 停止检测
  function stop() {
    //@ts-ignore
    video.current && video.current.videoElement.srcObject!.getTracks()[0].stop()
    valve = false
    container.current?.removeChild(video.current!.overlay!)
    container.current?.removeChild(video.current!.videoElement!)
  }
  // 开始检测
  function start() {
    if (container.current?.childNodes.length) return
    valve = true
    loadCamera()
  }
  return {
    container,
    video,
    stop,
    start
  }
}

export { useFaceApi }
