<template>
  <div class="container">
    <video v-show="initialized" ref="video-ref" controls></video>
    <div v-show="!initialized" style="text-align: center">
      <span style="color: aliceblue">初始化中......</span>
    </div>
    <canvas ref="canvas-ref" :width="canvasWidth" :height="canvasHeight"></canvas>
  </div>
</template>
<script>
import * as faceapi from 'face-api.js'
export default {
  name: 'face',
  data() {
    return {
      myVideo: null,
      myCanvas: null,
      timer: null,
      initialized: false,
      canvasWidth: 0,
      canvasHeight: 0,
      isProcessing: false,
      userMedia: null
    }
  },
  mounted() {
    this.initModels()
  },
  methods: {
    initModels() {
      // 加载模型
      Promise.all([
        // faceapi.nets.ageGenderNet.loadFromUri('/models/weights'),
        // faceapi.nets.faceExpressionNet.loadFromUri('/models/weights'),
        faceapi.nets.faceLandmark68Net.loadFromUri('/models/weights'),
        faceapi.nets.faceLandmark68TinyNet.loadFromUri('/models/weights'),
        faceapi.nets.faceRecognitionNet.loadFromUri('/models/weights'),
        faceapi.nets.mtcnn.loadFromUri('/models/weights'),
        faceapi.nets.ssdMobilenetv1.loadFromUri('/models/weights'),
        faceapi.nets.tinyFaceDetector.loadFromUri('/models/weights'),
        // faceapi.nets.tinyYolov2.loadFromUri('/models/weights')
      ]).then((res) => {
        this.initialized = true
        this.getUserMedia()
        // this.bindVideoEvent()
      })
    },
    bindVideoEvent() {
      // this.myVideo = this.$refs.video-ref
      // console.log('myVideo', myVideo.value)
      this.myVideo.addEventListener('play', () => {
        this.timer = setInterval(() => {
          this.drawCanvas()
          this.faceTrack()
        }, 50)
      })
      this.myVideo.addEventListener('pause', () => {
        clearInterval(this.timer)
      })
      this.myVideo.addEventListener('ended', () => {
        clearInterval(this.timer)
      })
    },
    async faceTrack() {
      // isProcess 如果还在处理中就跳过本次处理
      if (this.isProcessing) return
      this.isProcessing = true
      const detections = await faceapi.detectAllFaces(this.myCanvas).withFaceLandmarks()
      const resizedDetections = faceapi.resizeResults(detections, { width: this.canvasWidth, height: this.canvasHeight })
      faceapi.draw.drawDetections(this.myCanvas, resizedDetections)
      faceapi.draw.drawFaceLandmarks(this.myCanvas, resizedDetections)
      this.isProcessing = false
      // requestAnimationFrame(this.faceTrack)
    },
    drawCanvas() {
      const context = this.myCanvas.getContext('2d')
      context.drawImage(this.myVideo, 0, 0, this.canvasWidth, this.canvasHeight)
      // requestAnimationFrame(this.drawCanvas)
    },
    getUserMedia() {
      navigator.mediaDevices.getUserMedia({ video: true }).then(res => {
        console.log('res', res);
        this.myVideo = this.$refs['video-ref']
        this.myCanvas = this.$refs['canvas-ref']
        this.myVideo.srcObject = res
        this.canvasWidth = this.myVideo.clientWidth
        this.canvasHeight = this.myVideo.clientHeight
        setInterval(() => {
          this.drawCanvas()
          this.faceTrack()
        }, 100)
      }).catch(err => {
        console.log('getUserMedia-err', err);

      })
    }
  },
}
</script>
<style lang="scss" scoped>
.container {
  width: 100%;
  height: 100%;
}
</style>
