<script lang='ts' setup>
import * as posenet from '@tensorflow-models/posenet';
import { Pose } from '@tensorflow-models/posenet';
import { Vector2D } from '@tensorflow-models/posenet/dist/types';
import * as tf from '@tensorflow/tfjs';
import { concat, filter, find, forEach, has, map, mean, values } from 'lodash';
import { Random } from 'mockjs';
import { onMounted, ref } from 'vue';
import { KeyPointBodyType, link_map_body, part_map_body } from './body';

// 容器大小
const width = ref(document.body.clientWidth);
const height = ref(document.body.clientHeight);

const videoDom = ref();
const canvasDom = ref();

onMounted(async () => {
  tf.getBackend();
  // 加载PoseNet模型
  const net = await posenet.load();

  // 获取视频标签
  const video = videoDom.value;
  // 获取画布标签
  const canvas = canvasDom.value;

  // 设置视频源为摄像头
  navigator.mediaDevices.getUserMedia({ video: true }).then((stream) => {
    video.srcObject = stream;
  });

  // 每帧执行姿势检测
  video.addEventListener('play', () => {
    const ctx = canvas.getContext('2d') as CanvasRenderingContext2D;

    const animate = async () => {
      // 画笔颜色
      const color = 'blue' || Random.color();
      // 检测姿势
      const poses: Pose = await net.estimateSinglePose(video, { flipHorizontal: true });
      const get_avg_position_fn = (part: (KeyPointBodyType['part'])[], pos: keyof Vector2D) => mean(
        [
          find(poses.keypoints, ['part', part[0]])?.position[pos],
          find(poses.keypoints, ['part', part[1]])?.position[pos]
        ]
      );
      const get_avg_score_fn = (part: (KeyPointBodyType['part'])[]) => mean(
        [
          find(poses.keypoints, ['part', part[0]])?.score,
          find(poses.keypoints, ['part', part[1]])?.score
        ]
      );
      const neck = {
        part: 'neck',
        score: get_avg_score_fn(['leftShoulder', 'rightShoulder']),
        position: {
          x: get_avg_position_fn(['leftShoulder', 'rightShoulder'], 'x'),
          y: get_avg_position_fn(['leftShoulder', 'rightShoulder'], 'y'),
        }
      };
      const belly = {
        part: 'belly',
        score: get_avg_score_fn(['leftHip', 'rightHip']),
        position: {
          x: get_avg_position_fn(['leftHip', 'rightHip'], 'x'),
          y: get_avg_position_fn(['leftHip', 'rightHip'], 'y'),
        }
      };
      const keypoints = map(
        filter(
          concat(poses.keypoints, neck, belly),
          (keypoint: KeyPointBodyType) => keypoint.score > 0.6
        ),
        (keypoint: KeyPointBodyType) => ({
          ...keypoint,
          parent: link_map_body[keypoint.part],
          label: '玥哥' + part_map_body[keypoint.part],
          position: map(values(keypoint.position), (n: number, index: number) => index === 0 ? n : n - 80)
        })
      ) as [];

      requestAnimationFrame(animate);
      ctx.clearRect(0, 0, video.width, video.height);
      forEach(keypoints, (keypoint: KeyPointBodyType) => {
        // 绘制节点
        ctx.beginPath();
        ctx.fillStyle = color;
        ctx.arc(...(keypoint.position as [number, number]), 10, 0, 2 * Math.PI);
        ctx.fill();
        if (keypoint.part === 'nose') {
          ctx.fillStyle = 'black';
          ctx.font = '40px Georgia';
          ctx.fillText('你玥哥', ...(map(keypoint.position, (n: number, index: number) => n + (index === 0 ? -50 : -80)) as [number, number]));
        }
        // 绘制连线
        const node = find(keypoints, ['part', keypoint.parent]) as KeyPointBodyType | undefined;

        if (has(node, 'position')) {
          ctx.strokeStyle = color;
          ctx.lineWidth = 4;
          ctx.moveTo(...(node.position as [number, number]));
          ctx.lineTo(...(keypoint.position as [number, number]));
          ctx.stroke();
        }
      });
    };

    animate();
  });
});
</script>

<template>
  <video class='video' ref='videoDom' :width='width' :height='height' autoplay></video>
  <canvas class='canvas' ref='canvasDom' :width='width' :height='height'></canvas>
</template>

<style lang='less' src='./style.less' scoped />