<template>
  <div class="webrtc_face_recognition">
    <div class="option">
      <div>
        <label>面板操作：</label>
        <button @click="fnOpen">启动摄像头视频媒体</button>
        <button @click="fnClose">结束摄像头视频媒体</button>
      </div>
    </div>
    <div class="see">
      <video
        id="myVideo"
        poster="https://dummyimage.com/1280x720"
        muted
        loop
        playsinline
        @loadedmetadata="fnRun"
      ></video>
      <canvas id="myCanvas" />
    </div>
  </div>
</template>

<script>
import * as faceapi from "face-api.js";
export default {
  name: "WebRTCFaceRecognition",
  data() {
    return {
      options: null, // 模型参数
      detection: "landmark",
      videoEl: null,
      canvasEl: null,
      timeout: 0,
      // 视频媒体参数配置
      constraints: {
        audio: false,
        video: {
          // ideal（应用最理想的）
          width: {
            min: 320,
            ideal: 1280,
            max: 1920,
          },
          height: {
            min: 240,
            ideal: 720,
            max: 1080,
          },
          // frameRate受限带宽传输时，低帧率可能更适宜
          frameRate: {
            min: 15,
            ideal: 30,
            max: 60,
          },
          // 显示模式前置后置
          facingMode: "environment",
        },
      },
    };
  },
  mounted() {
    this.$nextTick(() => {
      this.fnInit();
    });
  },
  methods: {
    // 初始化模型加载
    async fnInit() {
      await faceapi.nets.tinyFaceDetector.loadFromUri("/models"); // tinyFaceDetector算法模型
      await faceapi.loadFaceLandmarkModel("/models"); // 轮廓模型
      await faceapi.loadAgeGenderModel("/models"); // 年龄模型s
      //算法模型参数
      this.options = new faceapi.TinyFaceDetectorOptions({
        inputSize: 512, // 160 224 320 416 512 608
        scoreThreshold: 0.5, // 0.1 ~ 0.9
      });

      // 节点属性化
      this.videoEl = document.getElementById("myVideo");
      this.canvasEl = document.getElementById("myCanvas");
    },

    // 年龄性别识别绘制
    async fnRunFaceAgeAndGender() {
      console.log("RunFaceAgeAndGender");
      //如果有摄像头
      if (this.videoEl.paused) return clearTimeout(this.timeout);
      // 识别人脸信息
      const result = await faceapi
        .detectAllFaces(this.videoEl, this.options)
        .withFaceLandmarks() //人脸特征
        .withAgeAndGender(); //人脸性别年龄
      if (result && !this.videoEl.paused) {
        // 识别图像绘制
        //我们希望将画布与其显示尺寸匹配，
        //我们可以使用faceapi.matchDimensions(canvas，displaySize)来做到这一点
        const dims = faceapi.matchDimensions(this.canvasEl, this.videoEl, true);
        // 创建一个调整大小的检测变量，
        //并将其设置为等于(faceapi.resizeResults(detections，displaySize)) ，
        //以便将框调整为我们的脸部大小
        const resizeResults = faceapi.resizeResults(result, dims);
        //画框
        faceapi.draw.drawDetections(this.canvasEl, resizeResults);

        //在框下面加多行文本显示年龄
        //resizeResults 一个人的时候是对象  多个人是数组
        if (Array.isArray(resizeResults)) {
          //多个人
          resizeResults.forEach((result) => {
            const { age, gender, genderProbability } = result;
            new faceapi.draw.DrawTextField(
              [
                `${Math.round(age, 0)} years`,
                `${gender} (${Math.round(genderProbability)})`,
              ],
              result.detection.box.bottomLeft
            ).draw(this.canvasEl);
          });
        } else {
          //一个人
          const { age, gender, genderProbability } = resizeResults;
          new faceapi.draw.DrawTextField(
            [
              `${Math.round(age, 0)} years`,
              `${gender} (${Math.round(genderProbability)})`,
            ],
            resizeResults.detection.box.bottomLeft
          ).draw(this.canvasEl);
        }
      } else {
        //没有结果 或者没有摄像头
        this.canvasEl
          .getContext("2d")
          .clearRect(0, 0, this.canvasEl.width, this.canvasEl.height);
      }
      this.timeout = setTimeout(() => this.fnRunFaceAgeAndGender());
    },

    // 执行检测识别类型
    fnRun() {
      this.fnRunFaceAgeAndGender();
    },
    // 启动摄像头视频媒体
    fnOpen() {
      if (typeof window.stream === "object") return;
      clearTimeout(this.timeout);
      this.timeout = setTimeout(() => {
        clearTimeout(this.timeout);
        //mediaDevices 是 Navigator 只读属性，返回一个 MediaDevices 对象，
        //该对象可提供对相机和麦克风等媒体输入设备的连接访问，也包括屏幕共享。
        navigator.mediaDevices
          .getUserMedia(this.constraints) //constraints媒体参数配置  在前面
          .then(this.fnSuccess) //启动后 调用启动成功的方法
          .catch(this.fnError); //捕捉启动异常错误
      }, 300);
    },
    // 成功启动视频媒体流
    fnSuccess(stream) {
      window.stream = stream; // 使流对浏览器控制台可用
      this.videoEl.srcObject = stream;
      this.videoEl.play(); //播放
    },
    // 失败启动视频媒体流
    fnError(error) {
      console.log(error);
      alert("视频媒体流获取错误" + error);
    },
    // 结束摄像头视频媒体
    fnClose() {
      //画板除图像
      this.canvasEl
        .getContext("2d")
        .clearRect(0, 0, this.canvasEl.width, this.canvasEl.height);
      this.videoEl.pause(); //暂停
      clearTimeout(this.timeout); //清除计时器
      //将所有对象置空
      if (typeof window.stream === "object") {
        window.stream.getTracks().forEach(
        
          (track) => track.stop()
          );
        window.stream = "";
        this.videoEl.srcObject = null;
      }
    },
  },
  beforeDestroy() {
    this.fnClose();
  },
};
</script>

<style scoped>
button {
  height: 30px;
  border: 2px #42b983 solid;
  border-radius: 4px;
  background: #42b983;
  color: white;
  margin: 10px;
}

.see {
  position: relative;
}

.see canvas {
  position: absolute;
  top: 0;
  left: 0;
}

.option {
  padding-bottom: 20px;
}

.option div {
  padding: 10px;
  border-bottom: 2px #42b983 solid;
}

.option div label {
  margin-right: 20px;
}
</style>
