<template>
  <div class="video_face_recognition">
    <div class="option">
      <div>
        <label>视频控制：</label>
        <button @click="fnPaused">暂停Or播放</button>
      </div>
    </div>
    <div class="see">
		<div class="org">
		  <img id="orgImg" src="../../public/images/2.jpeg" />
		  <canvas id="orgCanvas" />
		</div>
		<div class="det">
		  <video id="myVideo" src="videos/test.mp4" muted playsinline></video>
		  <canvas id="myCanvas" />
		</div>
    </div>
  </div>
</template>

<script>
import * as faceapi from "face-api.js";
export default {
  name: "VideoAgeAndGenderRecognition",
  data() {
    return {
      nets: "tinyFaceDetector", // 算法模型
      options: null, // 模型参数
      detectFace: "detectAllFaces", // 单（detectSingleFace）or多人脸（detectAllFaces）
      videoEl: null,
      canvasEl: null,
	  orgImgEl: null,
	  orgCanvasEl:null,
      timeout: 0,
	  faceMatcher: null, // 原图样本人脸矩阵结果
    };
  },
  mounted() {
    this.$nextTick(() => {
      this.fnInit();
    });
  },
  methods: {
    // 初始化模型加载
    async fnInit() {
      await faceapi.nets[this.nets].loadFromUri("/models"); // 算法模型
      await faceapi.loadFaceLandmarkModel("/models"); // 轮廓模型
      await faceapi.loadAgeGenderModel("/models"); // 年龄模型
	  await faceapi.loadFaceRecognitionModel("/models");//人脸识别
      // 根据模型参数识别调整结果
     
     this.options = new faceapi.TinyFaceDetectorOptions({
       inputSize: 512, // 160 224 320 416 512 608
       scoreThreshold: 0.5, // 0.1 ~ 0.9
     });
       
      // 节点属性化
      this.videoEl = document.getElementById("myVideo");
      this.canvasEl = document.getElementById("myCanvas");
	  this.orgImgEl = document.getElementById("orgImg");
	  this.orgCanvasEl = document.getElementById("orgCanvas");
	 
	  // 原图人脸识别
	  await this.fnRunOrg();
	  // 视频人脸识别
	  await this.fnRun();
	
    },
	
	// 执行原图样本识别绘制
	async fnRunOrg() {
	  const fullFaceDescriptions = await faceapi
	    .detectAllFaces(this.orgImgEl, this.options)
	    .withFaceLandmarks()  //人脸特征
		 .withFaceDescriptors();//人脸识别描述
	  if (!fullFaceDescriptions.length) {
	    this.faceMatcher = null;
	    return;
	  }
	  
	  // 原图人脸矩阵结果
	  this.faceMatcher = await new faceapi.FaceMatcher(fullFaceDescriptions);
	
	 // 识别图像绘制
	  //我们希望将画布与其显示尺寸匹配，
	  //我们可以使用faceapi.matchDimensions(canvas，displaySize)来做到这一点
	  faceapi.matchDimensions(this.orgCanvasEl, this.orgImgEl);
	 // 创建一个调整大小的检测变量，
	 //并将其设置为等于(faceapi.resizeResults(detections，displaySize)) ，
	 //以便将框调整为我们的脸部大小
	  const resizedResults = faceapi.resizeResults(
	    fullFaceDescriptions,
	    this.orgImgEl
	  );
	  //遍历人像框 绘制内容
	  resizedResults.forEach(({ detection, descriptor }) => {
	    let { label } = this.faceMatcher.findBestMatch(descriptor);
	    new faceapi.draw.DrawBox(detection.box, { label }).draw(
	      this.orgCanvasEl
	    );
	  });
	},
    // 年龄性别识别绘制
    async fnRunFaceAgeAndGender() {
      console.log("RunFaceAgeAndGender");
	  //判断原图是否识别有 人脸矩阵
	  if (!this.faceMatcher) return;
	  
      //判断视频是否暂停
	  if (this.videoEl.paused) return clearTimeout(this.timeout);
      // 识别人脸信息
      const result = await faceapi[this.detectFace](this.videoEl, this.options)
        .withFaceLandmarks()//人脸特征（用来识别人脸）
		.withFaceDescriptors()//人脸描述（用来描述人脸的信息）
        .withAgeAndGender();//性别年龄
      //如果检测到人脸 则绘制
	  if (result) {
        const dims = faceapi.matchDimensions(this.canvasEl, this.videoEl, true);
        const resizeResult = faceapi.resizeResults(result, dims);
       //绘制人像框
	   faceapi.draw.drawDetections(this.canvasEl, resizeResult)
        //绘制信息  
        if (Array.isArray(resizeResult)) {
          resizeResult.forEach((result) => {
			  console.info(result)
			  
            const {detection, descriptor, age, gender, genderProbability } = result;
			let label = this.faceMatcher.findBestMatch(descriptor).toString();
			new faceapi.draw.DrawBox(detection.box, { label }).draw(this.canvasEl);
			
            new faceapi.draw.DrawTextField(
              [
                `${Math.round(age, 0)} years`,
                `${gender} (${Math.round(genderProbability)})`,
              ],
              result.detection.box.bottomLeft
            ).draw(this.canvasEl);
          });
        } else {
          const { age, gender, genderProbability } = resizeResult;
          new faceapi.draw.DrawTextField(
            [
              `${Math.round(age, 0)} years`,
              `${gender} (${Math.round(genderProbability)})`,
            ],
            resizeResult.detection.box.bottomLeft
          ).draw(this.canvasEl);
        }
      } else {
        this.canvasEl
          .getContext("2d")
          .clearRect(0, 0, this.canvasEl.width, this.canvasEl.height);
      }
      this.timeout = setTimeout(() => this.fnRunFaceAgeAndGender());
    },
    // 执行检测识别类型
    fnRun() {
        this.fnRunFaceAgeAndGender();
    },
    // 视频暂停播放
    fnPaused() {
      if (this.videoEl.paused) {
        this.videoEl.play();
        setTimeout(() => this.fnRun(), 300);
      } else {
        this.videoEl.pause();
      }
    },
   
  },
  beforeDestroy() {
    this.videoEl.pause();
    clearTimeout(this.timeout);
  },
};
</script>

<style scoped>
.see .org,
.see .det {
  position: relative;
}
.see .org img,
.see .det video {
 
}
.see .org canvas,
.see .det canvas {
  position: absolute;
  top: 0;
  left: 0;
}
.option {
  padding-bottom: 20px;
}
.option div {
  padding: 10px;
  border-bottom: 2px #42b983 solid;
}
.option div label {
  margin-right: 20px;
}
</style>
