<template>
  <div class="container">
    <canvas id="output"></canvas>
    <canvas id="point"></canvas>
    <video
      id="video"
      x5-video-player-fullscreen="true"
      x5-playsinline
      playsinline
      webkit-playsinline
    ></video>
    <div id="test"></div>

    <!-- 信息展示框 -->
    <div class="info-box">
      <div v-for="(item, index) in poseData" :key="index">
        <span class="name">{{ item.name }}</span>
        :
        <span class="x">{{ parseInt(item.x) }}</span>
        ,
        <span class="y">{{ parseInt(item.y) }}</span>
      </div>
    </div>
  </div>
</template>

<script>
import * as poseDetection from '@tensorflow-models/pose-detection'
import '@tensorflow/tfjs-backend-webgl'
export default {
  data() {
    return {
      videoEl: {},
      canvasEl: {},
      canvasCtx: {},
      pointCanvasEl: {},
      pointCanvasCtx: {},
      detector: {},
      model: poseDetection.SupportedModels.PoseNet,
      DEFAULT_LINE_WIDTH: 1,
      DEFAULT_RADIUS: 2,
      SCORE_THRESHOLD: 0.5,
      isShowLoading: true,
      poseData: [],
      fpsNum: 0,
      oldTime: parseInt(new Date().getTime() / 1000),
    }
  },
  mounted() {
    setTimeout(() => {
      this.init()
    }, 1000)
    setTimeout(() => {
      // this.$router.push('/loginOut')
    }, 5000)
  },
  methods: {
    async init() {
      // 获取dom
      this.canvasEl = document.getElementById('output')
      this.pointCanvasEl = document.getElementById('point')
      this.videoEl = document.getElementById('video')
      // 获取画布
      this.canvasCtx = this.canvasEl.getContext('2d')
      this.pointCanvasCtx = this.pointCanvasEl.getContext('2d')
      // 设置视频源，这里使用摄像头
      const stream = await navigator.mediaDevices.getUserMedia({
        audio: false,
        video: {
          facingMode: 'user',
          // facingMode: { exact: 'environment' },
          width: {
            ideal: window.innerHeight,
          },
          height: {
            ideal: window.innerWidth,
          },
        },
      })
      console.log('相机打开')
      // 设置流
      this.videoEl.srcObject = stream
      // 视频加载后执行
      // this.videoEl.onloadeddata = async function () {
      document.getElementById('video').play()
      const { width, height } = this.videoEl.getBoundingClientRect()
      this.canvasEl.width = width
      this.canvasEl.height = height
      this.pointCanvasEl.width = width
      this.pointCanvasEl.height = height
      // 加载模型,model 在顶级变量里已经设置为poseDetection.SupportedModels.PoseNet
      this.detector = await poseDetection.createDetector(this.model, {
        architecture: 'MobileNetV1',
        outputStride: 16,
        inputResolution: {
          width: 640,
          height: 480,
        },
        multiplier: 0.5,
      })
      this.isShowLoading = false
      // 开始检测
      this.startDetect()
      // }
    },

    //检测一秒的帧率
    checkFPS() {
      if (this.oldTime == parseInt(new Date().getTime() / 1000)) {
        this.fpsNum++
      } else {
        globalVue.toast('一秒钟结束，本秒帧率为' + this.fpsNum)
        this.oldTime = parseInt(new Date().getTime() / 1000)
        this.fpsNum = 0
      }
    },
    // 开始检测
    async startDetect() {
      // this.checkFPS()
      // 检测画布动作
      const poses = await this.detector.estimatePoses(this.canvasEl, {
        flipHorizontal: false, // 是否水平翻转
        imageScaleFactor: 1.0, //数值范围 0.2 到 1.0，默认 0.50。用于在图像输入网络前调整图像比例。这个值设置的越小将会缩小图像比例，增加速度，但是会牺牲准确性
        outputStride: 8, //在通过模型提供图像时，输出的期望步幅。必须是 32、16、8。默认为 16。数字越高，性能越快，准确度越低，反之亦然
        maxPoses: 1, // 最大检测人数
        scoreThreshold: 0.5, // 置信度
        nmsRadius: 5, // 非极大值抑制
      })
      // 绘制视频
      this.canvasCtx.drawImage(
        this.videoEl,
        0,
        0,
        this.canvasEl.width,
        this.canvasEl.height
      )
      // 画第一个人的姿势 poses[0]
      // 画点
      // console.log(poses[0].keypoints)
      this.drawKeypoints(this.pointCanvasCtx, poses[0].keypoints)
      // localStorage.setItem('body', JSON.stringify(poses[0].keypoints))
      this.poseData = poses[0].keypoints
      // 画骨骼
      this.drawSkeleton(this.pointCanvasCtx, poses[0].keypoints)
      // 一帧执行一次  可替换为setTimeout方案: setTimeout(()=>startDetect(),1000/16)
      // requestID = requestAnimationFrame(() => this.startDetect())
      window.requestAnimationFrame(this.startDetect)
    },
    // 画点
    drawKeypoints(ctx, keypoints) {
      //清除上一次画板
      ctx.clearRect(0, 0, this.pointCanvasEl.width, this.pointCanvasEl.height)
      // keypointInd 主要按left middle right  返回索引，left是单数索引，right是双数索引，打印一下你就知道了
      const keypointInd = poseDetection.util.getKeypointIndexBySide(this.model)
      // console.log(keypointInd)
      ctx.strokeStyle = 'Red'
      ctx.lineWidth = this.DEFAULT_LINE_WIDTH

      ctx.fillStyle = 'Red'
      for (const i of keypointInd.middle) {
        this.drawKeypoint(ctx, keypoints[i])
      }

      ctx.fillStyle = 'Red'
      for (const i of keypointInd.left) {
        this.drawKeypoint(ctx, keypoints[i])
      }

      ctx.fillStyle = 'Red'
      for (const i of keypointInd.right) {
        this.drawKeypoint(ctx, keypoints[i])
      }
    },
    drawKeypoint(ctx, keypoint) {
      // If score is null, just show the keypoint.
      const score = keypoint.score != null ? keypoint.score : 1
      // console.log(score)
      if (score >= this.SCORE_THRESHOLD) {
        const circle = new Path2D()
        circle.arc(keypoint.x, keypoint.y, this.DEFAULT_RADIUS, 0, 2 * Math.PI)
        ctx.fill(circle)
        ctx.stroke(circle)
      }
    },
    // 画骨架
    drawSkeleton(ctx, keypoints) {
      // Each poseId is mapped to a color in the color palette.
      const color = 'Red'
      ctx.fillStyle = color
      ctx.strokeStyle = color
      ctx.lineWidth = this.DEFAULT_LINE_WIDTH

      poseDetection.util.getAdjacentPairs(this.model).forEach(([i, j]) => {
        const kp1 = keypoints[i]
        const kp2 = keypoints[j]
        // If score is null, just show the keypoint.
        const score1 = kp1.score != null ? kp1.score : 1
        const score2 = kp2.score != null ? kp2.score : 1

        if (score1 >= this.SCORE_THRESHOLD && score2 >= this.SCORE_THRESHOLD) {
          ctx.beginPath()
          ctx.moveTo(kp1.x, kp1.y)
          ctx.lineTo(kp2.x, kp2.y)
          ctx.stroke()
        }
      })
    },
  },
}
</script>

<style lang="less">
.container {
  max-height: 100vh;
  max-width: 100vw;
  overflow: hidden;
  /* background-color: #000; */
}
#video {
  margin-left: -0.15rem;
  margin-top: -0.3rem;
  width: 105vw;
  height: 105vh;
}
#output {
  position: absolute;
  left: 20px;
  top: 0;
  width: 100vw;
  height: 100vh;
  background-color: #fff;
  display: none;
}
#point {
  position: absolute;
  left: 0.1rem;
  top: -0.1rem;
  width: 100vw;
  height: 100vh;
  background-color: transparent;
  /* opacity: 0.5; */
  /* display: none; */
}
.info-box {
  position: absolute;
  right: 0;
  bottom: 0;
  width: 40vw;
  height: 30vh;
  background-color: rgba(0, 0, 0, 0.5);
  font-size: 0.1rem;
  color: greenyellow;
  text-align: left;
  .name {
    width: 20vw;
  }
  .x {
    width: 10vw;
  }
  .y {
    width: 10vw;
  }
}
</style>
