<!DOCTYPE html>
<html lang="en">

<head>
  <meta charset="UTF-8">
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  <title>Document</title>
  <script src="https://cdn.jsdelivr.net/npm/face-api.js@0.22.2/dist/face-api.min.js"></script>
  <style>
    body {
      margin: 0;
      padding: 0;
      height: 100vh;
      display: flex;
      justify-content: center;
      align-items: center;
    }

    .video-wrap {
      width: 800px;
      height: 600px;
      border: 1px solid black;
      position: relative;
    }
    .video, #canvas {
      position: absolute;
      top: 0;
      left: 0;
      width: 100%;
      height: 100%;
      z-index: 1;
    }
    #canvas {
      z-index: 2;
    }
    #tip {
      font-size: 26px;
      text-align: center;
      line-height: 2;
    }
  </style>
</head>

<body>
  <div>
  <div class="video-wrap">
    <video src="" autoplay muted class="video" playsInline style="width: 800px; height: 600px;"></video>
  </div>
  <div id="tip">未检测到脸部</div>
  </div>
  <script>
    const video = document.querySelector('.video');
    const tip = document.querySelector('#tip')
    const MODEL_PATH = '/models/weights'
    async function getCamera() {
      try {
        // 请求视频流
        const mediaStream = await navigator.mediaDevices.getUserMedia({
          video: true
        })
        video.srcObject = mediaStream;
        video.play();
      } catch (e) {
        console.error(e)
      }
    }
    async function loadModels() {
      // await faceapi.loadSsdMobilenetv1Model(MODEL_PATH)
      await faceapi.loadTinyFaceDetectorModel(MODEL_PATH);
      // 面部68点位识别模型
      await faceapi.loadFaceLandmarkTinyModel(MODEL_PATH);
      // 表情识别模型
      await faceapi.loadFaceExpressionModel(MODEL_PATH);
      // 性别和年龄识别模型
      await faceapi.loadAgeGenderModel(MODEL_PATH);
    }

    function detectFace(canvas, displaySize) {
      const ctx = canvas.getContext("2d");
      setInterval(async () => {
        const detections = await faceapi
          .detectAllFaces(video, new faceapi.TinyFaceDetectorOptions() )
          .withFaceLandmarks(true)
          .withFaceExpressions()
          .withAgeAndGender();
        
        const resizedDetections = faceapi.resizeResults(detections, displaySize)

        if(detections && detections[0]){
          const detection = detections[0];
          const landmarks = detection.landmarks;

          const leftEye = landmarks.getLeftEye()
          const rightEye = landmarks.getRightEye()
          const nose = landmarks.getNose()

          // 取眼睛中心点的平均值
          const eyeCenterX = (leftEye[0]._x + rightEye[0]._x) / 2;  
          const eyeCenterY = (leftEye[0]._y + rightEye[0]._y) / 2;

          // 计算眼睛和鼻子的向量
          const eyeNoseVecX = nose[0]._x - eyeCenterX;
          const eyeNoseVecY = nose[0]._y - eyeCenterY;
         
          // 判断是否正面
          // const isFaceFrontal = Math.abs(eyeNoseVecY) < 0.1;

          // 设置允许的最大yaw角度
          const MAX_YAW_DEGREES = 30; 

          // 计算眼镜向量的yaw角度
          const yawRadians = Math.atan2(eyeNoseVecY, eyeNoseVecX);
          const yawDegrees = yawRadians * 180 / Math.PI;

          // 宽松一点的判断条件
          const yawAngle = Math.ceil(Math.abs(yawDegrees));
          const isFrontal = yawAngle < MAX_YAW_DEGREES;

          if(isFrontal) {
            // console.log('正对', yawAngle);
            tip.innerHTML = '请保持姿势';
          } else {
            // console.log('不正对', yawAngle); 
            tip.innerHTML = '请正对摄像头'
          }
        }else{
          // console.log('未检测到人脸')
          tip.innerHTML = '未检测到人脸'
        }

        ctx.clearRect(0, 0, displaySize.width, displaySize.height);

        faceapi.draw.drawDetections(canvas, resizedDetections);
        faceapi.draw.drawFaceLandmarks(canvas, resizedDetections);
        faceapi.draw.drawFaceExpressions(canvas, resizedDetections);
      }, 200)
    }
    video.addEventListener('play', () => {
      const { offsetWidth: width, offsetHeight: height } = video;
      const canvas = faceapi.createCanvas(video);
      canvas.id = "canvas";
      canvas.width = width;
      canvas.height = height;
      document.querySelector(".video-wrap").append(canvas);
      const displaySize = { width, height };
      // console.log(displaySize)
      faceapi.matchDimensions(canvas, displaySize);

      detectFace(canvas, displaySize)
    })
    async function start() {
      await getCamera();
      await loadModels();
    }

    start();
    // https://github.com/TsMask/face-api-demo-vue
  </script>
</body>

</html>