<script setup lang='ts'>
import * as faceLandmarksDetection from '@tensorflow-models/face-landmarks-detection';
import * as handPoseDetection from '@tensorflow-models/hand-pose-detection';
import * as posenet from '@tensorflow-models/posenet';
import { Pose } from '@tensorflow-models/posenet';
import { Vector2D } from '@tensorflow-models/posenet/dist/types';
import * as tf from '@tensorflow/tfjs';
import { concat, filter, find, findIndex, forEach, has, head, includes, isEmpty, isEqual, last, map, mean, sortBy, values } from 'lodash';
import { Random } from 'mockjs';
import { onMounted, ref } from 'vue';
import { link_map_finger } from '../gesture/finger';
import { KeyPointBodyType, link_map_body } from '../movenet/body';

// 容器大小
const width = ref(document.body.clientWidth);
const height = ref(document.body.clientHeight);

const videoDom = ref();
const canvasDom = ref();

const detector_config_face = {
  runtime: 'mediapipe',
  solutionPath: 'https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh',
} as any;

const detector_config_gesture = {
  runtime: 'tfjs',
  modelType: 'full'
} as any;

onMounted(async () => {
  tf.getBackend();
  // 加载PoseNet模型
  const net_body = await posenet.load();

  // 获取视频标签
  const video = videoDom.value;
  // 获取画布标签
  const canvas = canvasDom.value;

  const model_face = faceLandmarksDetection.SupportedModels.MediaPipeFaceMesh;
  const model_hand = handPoseDetection.SupportedModels.MediaPipeHands;

  // 设置视频源为摄像头
  navigator.mediaDevices.getUserMedia({ video: true }).then((stream) => {
    video.srcObject = stream;
  });

  video.addEventListener('play', async () => {
    const ctx = canvas.getContext('2d') as CanvasRenderingContext2D;
    const colorList = map(new Array(10).fill(0), () => Random.color());
    const color_hand = head(colorList) as string;
    const color_body = head(colorList) as string;
    // 脸
    const detector_face = await faceLandmarksDetection.createDetector(model_face, detector_config_face);
    // 手
    const detector_head = await handPoseDetection.createDetector(model_hand, detector_config_gesture);

    const animate = async () => {;
      // 脸
      const faces_data = await detector_face.estimateFaces(video);
      // 手
      const hands_data: handPoseDetection.Hand[] = await detector_head.estimateHands(video);
      // 身体
      const body_data: Pose = await net_body.estimateSinglePose(video, { flipHorizontal: false });
      const get_avg_position_fn = (part: (KeyPointBodyType['part'])[], pos: keyof Vector2D) => mean(
        [
          find(body_data.keypoints, ['part', part[0]])?.position[pos],
          find(body_data.keypoints, ['part', part[1]])?.position[pos]
        ]
      );
      const get_avg_score_fn = (part: (KeyPointBodyType['part'])[]) => mean(
        [
          find(body_data.keypoints, ['part', part[0]])?.score,
          find(body_data.keypoints, ['part', part[1]])?.score
        ]
      );
      const neck = {
        part: 'neck',
        score: get_avg_score_fn(['leftShoulder', 'rightShoulder']),
        position: {
          x: get_avg_position_fn(['leftShoulder', 'rightShoulder'], 'x'),
          y: get_avg_position_fn(['leftShoulder', 'rightShoulder'], 'y'),
        }
      };
      const belly = {
        part: 'belly',
        score: get_avg_score_fn(['leftHip', 'rightHip']),
        position: {
          x: get_avg_position_fn(['leftHip', 'rightHip'], 'x'),
          y: get_avg_position_fn(['leftHip', 'rightHip'], 'y'),
        }
      };
      body_data.keypoints = concat(body_data.keypoints, neck, belly);

      const face_oval_body = {
        part: 'faceOval',
        score: get_avg_score_fn(['neck', 'nose']),
        position: {
          x: get_avg_position_fn(['neck', 'nose'], 'x'),
          y: get_avg_position_fn(['neck', 'nose'], 'y'),
        }
      };
      body_data.keypoints = concat(body_data.keypoints, face_oval_body);
      // 下颌
      let face_oval = [];

      requestAnimationFrame(animate);
      ctx.clearRect(0, 0, canvas.clientWidth, canvas.clientHeight);

      // 脸
      forEach(faces_data, (face_item, n: number) => {
        const keypoints = map((face_item ?? {})?.keypoints, i => ({...i, x: i.x*3, y: i.y*3 }));
        const face_oval_node = last(sortBy(keypoints, 'y'));
        const offset_x = face_oval_body.position.x - (face_oval_node?.x ?? 1);
        const offset_y = face_oval_body.position.y - (face_oval_node?.y ?? 1);

        face_oval.push(face_oval_node);

        forEach(keypoints, keypoints_item => {
          ctx.beginPath();
          ctx.fillStyle = colorList[n];
          ctx.arc(keypoints_item.x + offset_x, keypoints_item.y + offset_y, 4, 0, 2 * Math.PI);
          ctx.fill();
        });
      });

      // 手
      forEach(hands_data, hand_item => {
        forEach(hand_item?.keypoints, keypoints_item => {
          // 绘制节点
          ctx.beginPath();
          ctx.fillStyle = color_hand;
          ctx.arc(keypoints_item.x, keypoints_item.y, 5, 0, 2 * Math.PI);
          ctx.fill();
          // 绘制连线
          const node_y = findIndex(link_map_finger, i => includes(i, keypoints_item.name));
          const node_x = findIndex(link_map_finger[node_y], i => isEqual(i, keypoints_item.name));
          const node = keypoints_item?.name === head(head(link_map_finger)) ?
            find(hand_item?.keypoints, i => isEqual(i.name, last(head(link_map_finger)))) :
            find(hand_item?.keypoints, i => isEqual(i.name, link_map_finger[node_y][node_x - 1]));

          if (!isEmpty(node)) {
            ctx.strokeStyle = color_hand;
            ctx.lineWidth = 4;
            ctx.moveTo(node.x, node.y);
            ctx.lineTo(keypoints_item.x, keypoints_item.y);
            ctx.stroke();
          }
        });
      });

      // 身体
      const keypoints_body = map(
        filter(
          body_data.keypoints,
          (item: KeyPointBodyType) => item.score > 0.8
        ),
        (item: KeyPointBodyType) => {
          let position = values(item.position);

          if (item.part === 'rightWrist') {
            const wrist_node = find(find(hands_data, ['handedness', 'Left'])?.keypoints, ['name', 'wrist']);

            if (!isEmpty(wrist_node)) {
              position = [wrist_node.x, wrist_node.y]
            }
          }

          if (item.part === 'leftWrist') {
            const wrist_node = find(find(hands_data, ['handedness', 'Right'])?.keypoints, ['name', 'wrist']);

            if (!isEmpty(wrist_node)) {
              position = [wrist_node.x, wrist_node.y]
            }
          }

          const link_map = {
            ...link_map_body,
            neck: 'faceOval'
          };
          return {
            ...item,
            position,
            parent: link_map[item.part],
          };
        }
      ) as [];

      forEach(
        filter(keypoints_body, (i: KeyPointBodyType) => !includes(['nose', 'leftEye', 'leftEar', 'rightEye', 'rightEar'], i.part)),
        (item: KeyPointBodyType) => {
        // 绘制节点
        ctx.beginPath();
        ctx.fillStyle = color_body;
        ctx.arc(...(item.position as [number, number]), includes(['faceOval'], item.part) ? 5 : 10, 0, 2 * Math.PI);
        ctx.fill();
        // 绘制连线
        const node = find(keypoints_body, ['part', item.parent]) as KeyPointBodyType | undefined;

        if (has(node, 'position')) {
          ctx.strokeStyle = color_body;
          ctx.lineWidth = 4;
          ctx.moveTo(...(node.position as [number, number]));
          ctx.lineTo(...(item.position as [number, number]));
          ctx.stroke();
        }
      });
    };

    animate();
  });
});
</script>

<template>
  <video class="video" ref="videoDom" :width='width' :height='height' autoplay></video>
  <canvas class='canvas' ref='canvasDom' :width='width' :height='height'></canvas>
</template>

<style lang='less' src='./style.less' scoped />