<template>
  <section>
    <t-loading :loading="loading" text="准备中，记得允许开启摄像头呦～" show-overlay>
    <div class="video-container card">
      <video ref="video" width="720" height="560" autoplay muted></video>
    <canvas ref="canvas"></canvas>
  </div>
  </t-loading>
  </section>
</template>

<script lang="ts" setup>
import { defineComponent, onMounted, ref } from 'vue';
import * as faceapi from 'face-api.js';
const video = ref();
const canvas = ref();
let loading = ref(true);
onMounted(async () => {
  await faceapi.nets.tinyFaceDetector.loadFromUri('/models'),
    await faceapi.nets.faceLandmark68Net.loadFromUri('/models'),
    await faceapi.nets.faceRecognitionNet.loadFromUri('/models'),
    await faceapi.nets.faceExpressionNet.loadFromUri('/models');
  startVideo();
  const canvasElement = canvas.value;
  faceapi.matchDimensions(canvasElement, {
    width: video.value.width,
    height: video.value.height,
  });

  const drawFace = async () => {
    const detections = await faceapi
      .detectAllFaces(video.value, new faceapi.TinyFaceDetectorOptions())
      .withFaceLandmarks()
      .withFaceExpressions();
    const resizedDetections = faceapi.resizeResults(detections, {
      width: video.value.width,
      height: video.value.height,
    });
    canvasElement
      .getContext('2d')
      .clearRect(0, 0, canvasElement.width, canvasElement.height);
    faceapi.draw.drawDetections(canvasElement, resizedDetections);
    faceapi.draw.drawFaceLandmarks(canvasElement, resizedDetections);
    faceapi.draw.drawFaceExpressions(canvasElement, resizedDetections);
    loading.value = false;
  };

  setInterval(drawFace, 100);
});

function startVideo() {
  (navigator as any).getUserMedia(
    { video: true },
(    stream: any) => (video.value.srcObject = stream),
(    error: any) => console.log(error),
  );
}
</script>

<style scoped>
canvas {
  position: absolute;
}
.video-container {
  display: flex;
  justify-content: center;
  align-items: center;
  width: 100%;
  height: 100%;
}
</style>
