<template>
  <el-card shadow="never">
    <el-row style="margin-bottom: 20px;">
      <el-button type="danger" @click="switchPlay">打开/关闭摄像头</el-button>
      <el-button type="danger" @click="detect">检测</el-button>
    </el-row>
    <el-row :gutter="20">
      <el-col :span="12">
        <video ref="video" width="100%" />
      </el-col>
      <el-col ref="box" :span="12">
        <canvas ref="canvas" />
      </el-col>
    </el-row>
  </el-card>
</template>

<script>
import * as blazeface from '@/components/FaceDetector/blazeface.js'

export default {
  data() {
    return {
      video: null,
      canvas: null,
      playing: false,
      videoWidth: 0,
      videoHeight: 0,
      canvasBox: null,
      // ctx: null,
      // width: 0,
      // height: 0,
      model: null
    }
  },
  created() {},
  mounted() {
    this.video = this.$refs['video']
    this.canvas = this.$refs['canvas']
    this.canvasBox = this.$refs['box']
  },
  beforeDestroy() {
    this.tearDownCamera()
  },
  methods: {
    switchPlay() {
      if (!this.playing) {
        this.playing = true
        this.setUpPage()
      } else {
        this.playing = false
        this.tearDownCamera()
      }
    },
    async setUpPage() {
      await this.setupCamera()
      this.videoWidth = this.video.videoWidth
      this.videoHeight = this.video.videoHeight
      // console.log(this.videoWidth, this.videoHeight)
      this.video.play()
    },
    async setupCamera() {
      const constraints = { audio: true, video: { width: 1280, height: 720 }}
      const mediaStream = await navigator.mediaDevices.getUserMedia(constraints)
      this.video.srcObject = mediaStream
      return new Promise(resolve => {
        this.video.onloadedmetadata = () => {
          resolve(this.video)
        }
      })
    },
    tearDownCamera() {
      const mediaStream = this.video.srcObject
      if (mediaStream) {
        const tracks = mediaStream.getTracks()
        tracks.forEach(track => {
          track.stop()
        })
      }
    },
    async detect() {
      const width = this.canvasBox.$el.clientWidth - 20
      const height = width * 720 / 1280
      const widthCoef = width / 1280
      const heightCoef = height / 720
      this.canvas.width = width
      this.canvas.height = height
      const ctx = this.canvas.getContext('2d', { alpha: false })
      // ctx.translate(this.canvas.width, 0)
      // ctx.scale(-1, 1)
      // console.log(this.width, this.height)
      // console.log(this.video)
      if (!this.model) {
        this.model = await blazeface.load()
      }
      this.model.estimateFaces(this.video).then(predictions => {
        ctx.drawImage(this.video, 0, 0, Math.floor(width), Math.floor(height))
        console.log(predictions)
        if (predictions.length > 0) {
          for (let i = 0; i < predictions.length; i++) {
            // bounding box
            const start = predictions[i].topLeft
            const end = predictions[i].bottomRight
            const size = [end[0] - start[0], end[1] - start[1]]
            ctx.lineWidth = '3'
            ctx.strokeStyle = 'rgba(255, 0, 0, 0.5)'
            ctx.strokeRect(start[0] * widthCoef, start[1] * heightCoef, size[0] * widthCoef, size[1] * heightCoef)
            // landmarks
            ctx.beginPath()
            const landmarks = predictions[i].landmarks
            ctx.fillStyle = 'blue'
            for (let j = 0; j < landmarks.length; j++) {
              const x = landmarks[j][0] * widthCoef
              const y = landmarks[j][1] * heightCoef
              // var circle = new Path2D()
              // circle.moveTo(x, y)
              // ctx.arc(x, y, 2, 0, 2 * Math.PI)
              // ctx.fill(circle)
              ctx.fillRect(x, y, 5, 5)
            }
          }
        }
      })
    }
  }
}
</script>
