<template>
  <div class="app-container">
    <div>{{ title }}</div>
    <div class="x-face-detect-modal">
      <video  autoplay id="camera" :onCanplay="handleVideoCanPlay" />
      <canvas width="{this.width}" id="canvas" height="{this.height}" />
    </div>
  </div>
</template>
<script setup >
import { ref, onMounted } from "vue";
// import { uploadFile } from "@/api/base"; // 这里需要使用 图片对比接口
import {
  detectSingleFace,
  nets,
  matchDimensions,
  resizeResults,
  draw,
  SsdMobilenetv1Options,
  Box,
} from "face-api.js";
const options = new SsdMobilenetv1Options({
  // 最小置信阈值
  // 默认值：0.5
  minConfidence: 0.5,
});
const formId = "x-face-detect-form";
const title = ref("人脸识别"); //  初始化title
const canvas = ref("canvas"); // 图像画布
const video = ref("video"); // 视频元素
const stream = ref(null); // 当前流
const getUserMediaFail = ref(false); // 获取用户媒体失败
const boxObject = ref({ width: 100, height: 100 }); // 初始化box
const viewFinderBox = ref({
  topLeft: {
    x: 0,
    y: 0,
  },
  topRight: {
    x: 0,
    y: 0,
  },
  bottomLeft: {
    x: 0,
    y: 0,
  },
  bottomRight: {
    x: 0,
    y: 0,
  },
}); // 初始化viewFinderBox

// 加载算法模型 文件存储在 public 文件夹下models文件夹。// 需要文件的话联系我
// !没有文件
// const init = async () => {
//   await nets.ssdMobilenetv1.loadFromUri("/models");
// };

/** @name 调用摄像头 */
const getUserMedia = (
  success,
  error
) => {
  //优先使用前置摄像头（如果有的话）：{ video: { facingMode: "user" } }
  //强制使用后置摄像头：{ video: { facingMode: { exact: "environment" } } }
  // video: {
  //    width: { min: 1024, ideal: 1280, max: 1920 },
  //    height: { min: 776, ideal: 720, max: 1080 }
  // }
  //ideal（应用最理想的）值
  const constraints = {
    video: {
      facingMode: "user",
      width: { ideal: canvas.value.width },
      height: { ideal: canvas.value.height },
    },
  };
  if (navigator.mediaDevices.getUserMedia) {
    // 最新的标准API
    navigator.mediaDevices.getUserMedia(constraints).then(success).catch(error);
  } else if (navigator.webkitGetUserMedia) {
    // webkit核心浏览器
    navigator.webkitGetUserMedia(constraints, success, error);
  } else if (navigator.mozGetUserMedia) {
    // firfox浏览器
    navigator.mozGetUserMedia(constraints, success, error);
  } else if (navigator.getUserMedia) {
    // 旧版API
    navigator.getUserMedia(constraints, success, error);
  }
};
/** @name 初始化取景框 */
const initViewFinder = () => {
  if (!video.value) return;
  const marginLeft = (video.value.width - boxObject.value.width) / 2;
  const marginTop = (video.value.width - boxObject.value.height) / 2;
  if (canvas.value) {
    canvas.value.width = video.value.width;
    canvas.value.height = video.value.height;
  }
  viewFinderBox.value = {
    topLeft: {
      x: marginLeft,
      y: marginTop,
    },
    topRight: {
      x: marginLeft + boxObject.value.width,
      y: marginTop,
    },
    bottomLeft: {
      x: marginLeft,
      y: marginTop + boxObject.value.height,
    },
    bottomRight: {
      x: marginLeft + boxObject.value.width,
      y: marginTop + boxObject.value.height,
    },
  };
};

/** @name 绘制取景框 */
const drawViewFinder = () => {
  const context = canvas.value?.getContext("2d");
  const rectWith = 50;
  if (!context) return;
  context.clearRect(0, 0, canvas.value?.width || 0, canvas.value?.height || 0);
  const fontLeft = video.value ? (video.value.width - 200) / 2 : 200;
  context.font = "20px Arial";
  context.fillText("请保持脸部在取景框内", fontLeft, 50);
  const keys = Object.keys(viewFinderBox.value);
  keys.forEach((key) => {
    const point = viewFinderBox.value[key];
    if (!point) return;
    context.moveTo(point.x, point.y);
    switch (key) {
      case "topLeft":
        context.lineTo(point.x + rectWith, point.y);
        context.moveTo(point.x, point.y);
        context.lineTo(point.x, point.y + rectWith);
        break;
      case "topRight":
        context.lineTo(point.x - rectWith, point.y);
        context.moveTo(point.x, point.y);
        context.lineTo(point.x, point.y + rectWith);
        break;
      case "bottomLeft":
        context.lineTo(point.x + rectWith, point.y);
        context.moveTo(point.x, point.y);
        context.lineTo(point.x, point.y - rectWith);
        break;
      case "bottomRight":
        context.lineTo(point.x - rectWith, point.y);
        context.moveTo(point.x, point.y);
        context.lineTo(point.x, point.y - rectWith);
        break;
      default:
        break;
    }
  });
  context.lineWidth = 2;
  context.strokeStyle = "white";
  context.stroke();
};

/** @name 截取快照 */
const cameraShoot = (
  video,
  startPoint,
  width,
  height
) => {
  const canvas = document.createElement("canvas");
  canvas.width = video.videoWidth;
  canvas.height = video.videoHeight;
  canvas
    .getContext("2d")
    ?.drawImage(
      video,
      startPoint.x - 40,
      startPoint.y - 40,
      width + 80,
      height + 80,
      0,
      0,
      canvas.width,
      canvas.height
    );
  return new Promise<Blob | null>((resolve) =>
    // eslint-disable-next-line no-promise-executor-return
    canvas.toBlob(resolve, "image/jpeg")
  );
};
// 画盒子
const drawBox = (box, label) => {
  if (!canvas.value) return;
  const context = canvas.value.getContext("2d");
  context?.clearRect(box.x, box.y, box.width, box.height);
  const drawBox = new draw.DrawBox(box, {
    label: label,
  });
  drawBox.draw(canvas.value);
};

// 停止
const handleStopVideo = () => {
  if (stream.value) {
    stream.value.getTracks().forEach((track) => {
      track.stop();
    });
  }
};

/** @name 人脸检测 */
const detectFace = async () => {
  // eslint-disable-next-line no-promise-executor-return
  //非常重要：防止卡死
  await new Promise((resolve) => requestAnimationFrame(resolve));
  //绘制取景框
  // drawViewFinder()
  if (
    !canvas.value ||
    !video.value ||
    !video.value.currentTime ||
    video.value.paused ||
    video.value.ended
  )
    return detectFace();
  // 检测图像中具有最高置信度得分的脸部
  const result = await detectSingleFace(video.value, options);
  if (!result) return detectFace();
  // 匹配尺寸
  const dims = matchDimensions(canvas.value, video.value, true);
  // 调整检测到的框的大小，以防显示的图像的大小与原始
  const resizedResult = resizeResults(result, dims);
  const box = resizedResult.box;
  // 检测框是否在取景框内
  // if (!checkInViewFinder(box)) return detectFace()
  // drawViewFinder()
  // 将检测结果绘制到画布（此处不用，可以直接用来绘制检测到的人脸盒子）
  // draw.drawDetections(this.canvas, resizedResult.box);
  drawBox(box, "识别中");
  video.value.pause();
  // //截取人脸图片
  const image = await cameraShoot(
    video.value,
    resizedResult.box.topLeft,
    resizedResult.box.width,
    resizedResult.box.height
  );
  if (!image) {
    drawBox(box, "识别失败");
    await delay(1000);
    video.value.play();
    return detectFace();
  }
  let files = new window.File([image], "人脸头像.jpeg", {
    type: "image/jpeg",
  });
  // 调用接口传入截取的人脸头像进行检测
  const detectResult = await uploadFile({ file: files })    // 没有图片对比接口就暂时用 图片上传代理了
  // if (!detectResult) {
  //   drawBox(box, '识别失败')
  video.value.play();
  return detectFace();
  // }
  // handleStopVideo()
};
// onMounted
onMounted(() => {
  console.log("mounted", canvas.value, video.value);
  video.value = document.getElementById("camera").getElementsByTagName("video")[0];
  canvas.value = document.getElementById("canvas").getElementsByTagName("canvas")[0];
  // 获取用户媒体流
  getUserMedia(
    (streams) => {
      //后续用于停止视频流
      stream.value = streams;
      //显示视频
      if (video.value) {
        video.value["srcObject"] = streams;
      }
    },
    (error) => (getUserMediaFail.value = true)
  );
  // !没有算法模型
  // init();
  detectFace();
  drawViewFinder()
});
</script>

<style lang="scss">

.x-face-detect-modal {
  display: flex;
  flex-direction: column;
  align-items: center;
  justify-content: center;
  position: relative;
  transform: rotateY(180deg);
  canvas {
    position: absolute;
    top: 0;
  }
  video {
    object-fit: fill;
  }
}
</style>
