<template>
  <div class="face-track">
    <div class="face-content">
      <div>
        <div class="tip-text">请将人脸对准摄像头</div>
        <video id="video" style="transform:rotateY(180deg)" autoplay></video>
      </div>
      <div>
        <div class="tip-text">人脸检测结果</div>
        <canvas id="canvas" width="300" height="300" style="transform:rotateY(180deg)"></canvas>
      </div>
    </div>
    <div class="face-operate">
      <div v-if="takePhotoShow">
        <button class="btn" @click="takePhoto">拍 照</button>
      </div>
    </div>
  </div>
</template>
<script>
import { userMedia } from "../utils/utils";
require("tracking/build/tracking-min.js");
require("tracking/build/data/face-min.js");
import * as faceApi from "face-api.js";
export default {
  name: "FaceTrack",
  data() {
    return {
      photoCanvas: null,
      videoObj: null,
      trackerTask: null,
      takePhotoShow: false,
      faceApi: {
        options: null,
      },
      mouth_Y: "",
      eye_Y: "",
    };
  },
  created() {
    this.initFaceApi();
  },
  mounted() {
    this.openCamera();
  },
  beforeDestroy() {
    this.handleCancel();
  },
  methods: {
    openCamera() {
      // 有可能触发一些其他的按钮会重新获取
      this.$nextTick(() => {
        this.photoCanvas = document.getElementById("canvas");
        const context = this.photoCanvas.getContext("2d");
        this.videoObj = document.getElementById("video");
        // eslint-disable-next-line no-undef
        const tracker = new tracking.ObjectTracker("face"); // 检测人脸
        tracker.setInitialScale(4);
        tracker.setStepSize(2);
        tracker.setEdgesDensity(0.1);

        // eslint-disable-next-line no-undef
        this.trackerTask = tracking.track("#video", tracker, { camera: true });

        const constraints = {
          video: { width: 300, height: 300 },
          audio: false,
        };

        userMedia(constraints, this.success, this.error);

        tracker.on("track", (event) => {
          event.data.forEach((rect) => {
            // 绘制到 canvas
            context.drawImage(
              this.videoObj,
              0,
              0,
              this.photoCanvas.width,
              this.photoCanvas.height
            );
            context.font = "16px Helvetica";
            context.strokeStyle = "#1890ff";
            context.strokeRect(rect.x, rect.y, rect.width, rect.height);
          });

          if (event.data.length !== 0) {
            this.detectFaces(this.photoCanvas);
            // this.recognizeEmotion(this.photoCanvas);
            this.takePhotoShow = true;
          } else {
            this.takePhotoShow = false;
          }
        });
      });
    },
    handleCancel() {
      this.videoObj.srcObject.getTracks()[0].stop();
      this.trackerTask.stop();
    },
    /**
     * 拍照上传
     */
    async takePhoto() {
      // 拿到图片的base64
      const canvas = this.photoCanvas.toDataURL("image/png");
      if (canvas) {
        // 拍照将base64转为file流文件
        const blob = this.dataURLtoBlob(canvas);
        // console.log(blob)
        const file = this.blobToFile(blob, "imgName");
        // console.log(file)
        // // 将blob图片转化路径图片
        // const image = window.URL.createObjectURL(file)
        // console.log(image)
        // 将拍照后的图片发送给后端
        const formData = new FormData();
        formData.append("file", file);
        console.log("formData", formData);
        this.handleCancel();
      } else {
        alert("人脸照片生成失败");
      }
    },
    /**
     * 将图片转为blob格式
     * dataurl 拿到的base64的数据
     */
    dataURLtoBlob(dataurl) {
      const arr = dataurl.split(",");
      const mime = arr[0].match(/:(.*?);/)[1];
      const bstr = atob(arr[1]);
      let n = bstr.length;
      const u8arr = new Uint8Array(n);
      while (n--) {
        u8arr[n] = bstr.charCodeAt(n);
      }
      return new Blob([u8arr], {
        type: mime,
      });
    },
    /**
     * 生成文件信息
     * theBlob 文件
     * fileName 文件名字
     */
    blobToFile(theBlob, fileName) {
      theBlob.lastModifiedDate = new Date().toLocaleDateString();
      theBlob.name = fileName;
      return theBlob;
    },
    // 成功显示
    success(stream) {
      this.videoObj.srcObject = stream;
      this.videoObj.play();
    },
    // 失败抛出错误，可能用户电脑没有摄像头，或者摄像头权限没有打开
    error(error) {
      // 可以在这里面提示用户
      console.log(`访问用户媒体设备失败${error.name}, ${error.message}`);
      alert("访问用户媒体设备失败");
    },
    // 加载faceapi模型
    async initFaceApi() {
      await faceApi.nets.tinyFaceDetector.loadFromUri("/models");
      await faceApi.nets.faceLandmark68Net.loadFromUri("/models");
      await faceApi.nets.faceRecognitionNet.loadFromUri("/models");
      // await faceApi.nets.faceExpressionNet.loadFromUri("/models");
      // await faceApi.nets.ageGenderNet.loadFromUri("/models");
    },
    // 人脸检测是face-api.js最基本的功能之一。可以通过faceapi.detectAllFaces函数实现
    async detectFaces(inputImageElement) {
      const detections = await faceApi
        .detectAllFaces(
          inputImageElement,
          new faceApi.TinyFaceDetectorOptions()
        )
        .withFaceLandmarks()
        .withFaceDescriptors();
      // 处理检测结果
      // console.log("检测结果", detections);
      if (detections.length > 0) {
        const landmarks = detections[0].landmarks;
        const mouth = landmarks.getMouth();
        // console.log("嘴巴", mouth);
        const leftEye = landmarks.getLeftEye();
        // console.log("leftEye", leftEye);
        // const rightEye = landmarks.getRightEye();
        // console.log("rightEye", rightEye);
        // const leftEyeBbrow = landmarks.getLeftEyeBrow();
        // console.log("leftEyeBbrow", leftEyeBbrow);
        // const rightEyeBrow = landmarks.getRightEyeBrow();
        // console.log("rightEyeBrow", rightEyeBrow);
        this.isOpenMouth(mouth);
        this.isOpenEye(leftEye);
      }
    },
    isOpenMouth(mouth) {
      const mouth_Y_list = mouth.map((item) => {
        return item.y;
      });
      const max = Math.max(...mouth_Y_list);
      const min = Math.min(...mouth_Y_list);
      const _y = max - min;
      if (this.mouth_Y === "") {
        this.mouth_Y = _y;
      }
      if (Math.abs(this.mouth_Y - _y) > 10) {
        console.log("检测到嘴巴张开");
      }
      this.mouth_Y = _y;
    },
    isOpenEye(eye) {
      const eye_Y_list = eye.map((item) => {
        return item.y;
      });
      const max = Math.max(...eye_Y_list);
      const min = Math.min(...eye_Y_list);
      const _y = max - min;
      if (this.eye_Y === "") {
        this.eye_Y = _y;
      }
      if (Math.abs(this.eye_Y - _y) > 0.6) {
        console.log("检测到眨眼睛");
      }
      this.eye_Y = _y;
    },
    async recognizeEmotion(inputImageElement) {
      const detections = await faceApi
        .detectAllFaces(
          inputImageElement,
          new faceApi.TinyFaceDetectorOptions()
        )
        .withFaceLandmarks()
        .withFaceExpressions();
      detections.forEach((detection) => {
        // console.log("表情", detection.expressions);
        // 输出表情识别结果，如：'happy', 'sad', 'angry' 等
        const expressionKeys = Object.keys(detection.expressions).filter(
          (key) => detection.expressions[key] > 0.5
        );
        console.log("表情：", expressionKeys);
      });
    },
  },
};
</script>

<style scoped>
.face-track {
  width: 100%;
  height: 100%;
}

.face-content {
  display: flex;
  justify-content: space-between;
  padding: 50px 200px;
}

.tip-text {
  font-size: 14px;
  text-align: center;
  margin-bottom: 20px;
}

.face-operate {
  width: 100%;
  height: 100px;
  display: flex;
  justify-content: center;
  align-items: center;
}

.btn {
  width: 80px;
  height: 30px;
  line-height: 30px;
  cursor: pointer;
}
</style>