<template>
  <a-card class="page">
    <a-spin :spinning="state.netsLoadModel" tip="模型加载中">
    </a-spin>
    <div class="page_draw" v-show="!state.netsLoadModel">
      <a-row>
        <a-col :span="8">
          <h4>识别目标图像：</h4>
          <div class="page_draw-target">
            <img id="page_draw-img-target"
                 style="width: 280px;max-height: 320px" :src="imageBase64" />
            <canvas id="page_draw-canvas-target"></canvas>
          </div>
        </a-col>
        <a-col :span="13" :offset="2">
          <h4>识别匹配视频：</h4>
          <div class="page_draw-discern">
            <video
              id="page_draw-video"
              poster="../../assets/img/face.png"
              muted
              playsinline
            ></video>
            <canvas id="page_draw-video-canvas"></canvas>
          </div>
        </a-col>
      </a-row>
      <div>
        <a-flex justify="space-between" align="center">
          <div>
            <a-space style="margin-top: 15px" size="middle">
              <a-button @click="fnOpen()" type="primary" danger :disabled="state.stream !== null">
                开始验证
              </a-button>
              <a-button @click="fnClose()">结束验证</a-button>
            </a-space>
          </div>
          <div>
            <a-button @click="toExam" type="primary" v-if="matchPass">
              去考试
            </a-button>
          </div>
        </a-flex>
      </div>
    </div>
  </a-card>
</template>

<script lang="ts" setup>
import { onMounted, onUnmounted, reactive, ref, watch } from "vue";
import * as faceapi from "@vladmandic/face-api";
import { useMessage } from "@/hooks/web/useMessage";

const emit = defineEmits(["goToExam"]);
const retryCount = ref<number>(10);
const matchPass = ref<boolean>(false);
const { createMessage } = useMessage();
const props = defineProps({
  imageBase64: {
    type: String,
    default: ""
  }
});

/**属性状态 */
const state = reactive<any>({
  /**初始化模型加载 */
  netsLoadModel: true,
  /**算法模型 */
  netsType: "ssdMobilenetv1",
  /**模型参数 */
  netsOptions: {
    ssdMobilenetv1: undefined,
    tinyFaceDetector: undefined
  },
  /**目标图片数据匹配对象 */
  faceMatcher: {},
  /**目标图片元素 */
  targetImgEl: null,
  /**目标画布图层元素 */
  targetCanvasEl: null,
  /**识别视频元素 */
  discernVideoEl: null,
  /**识别画布图层元素 */
  discernCanvasEl: null,
  /**绘制定时器 */
  timer: 0,
  /**视频媒体参数配置 */
  constraints: {
    audio: false,
    video: {
      /**ideal（应用最理想的） */
      width: {
        min: 320,
        ideal: 720,
        max: 1280
      },
      height: {
        min: 200,
        ideal: 480,
        max: 720
      },
      /**frameRate 受限带宽传输时，低帧率可能更适宜 */
      frameRate: {
        min: 7,
        ideal: 15,
        max: 30
      },
      /**facingMode 摄像头前后切换 */
      facingMode: "environment"
    }
  },
  /**视频流 */
  stream: null
});

/**初始化模型加载 */
async function fnLoadModel() {
  // 模型文件访问路径
  const modelsPath = `${import.meta.env.BASE_URL}/models`;
  // 面部轮廓模型
  await faceapi.nets.faceLandmark68Net.load(modelsPath);
  // 面部识别模型
  await faceapi.nets.faceRecognitionNet.load(modelsPath);

  // 模型参数-ssdMobilenetv1
  await faceapi.nets.ssdMobilenetv1.load(modelsPath);
  state.netsOptions.ssdMobilenetv1 = new faceapi.SsdMobilenetv1Options({
    minConfidence: 0.5, // 0 ~ 1
    maxResults: 50 // 0 ~ 100
  });
  // 模型参数-tinyFaceDetector
  await faceapi.nets.tinyFaceDetector.load(modelsPath);
  state.netsOptions.tinyFaceDetector = new faceapi.TinyFaceDetectorOptions({
    inputSize: 416, // 160 224 320 416 512 608
    scoreThreshold: 0.5 // 0 ~ 1
  });

  // 输出库版本
  console.log(
    `FaceAPI Version: ${
      faceapi?.version || "(not loaded)"
    } \nTensorFlow/JS Version: ${
      faceapi.tf?.version_core || "(not loaded)"
    } \nBackend: ${
      faceapi.tf?.getBackend() || "(not loaded)"
    } \nModels loaded: ${faceapi.tf.engine().state.numTensors} tensors`
  );

  // 节点元素
  state.targetImgEl = document.getElementById("page_draw-img-target");
  state.targetCanvasEl = document.getElementById("page_draw-canvas-target");
  state.discernVideoEl = document.getElementById("page_draw-video");
  state.discernCanvasEl = document.getElementById("page_draw-video-canvas");

  // 关闭模型加载
  state.netsLoadModel = false;
}

/**根据模型参数识别绘制--目标图 */
async function fnRedrawTarget() {
  //开始获取图片中每一张人脸的特征数据
  const detect = await faceapi
    .detectAllFaces(state.targetImgEl, state.netsOptions[state.netsType])
    // 需引入面部轮廓模型
    .withFaceLandmarks()
    // 需引入面部识别模型
    .withFaceDescriptors();
  if (!detect.length) {
    state.faceMatcher = null;
    return;
  }

  // 原图人脸矩阵结果
  state.faceMatcher = new faceapi.FaceMatcher(detect);

  // 识别图像绘制
  const dims = faceapi.matchDimensions(state.targetCanvasEl, state.targetImgEl);
  //根据人脸轮廓的大小，调整方框的大小
  const resizedResults = faceapi.resizeResults(detect, dims);
  //开始和事先准备的标签库比对，找出最符合的那个标签
  resizedResults.forEach(({ detection, descriptor }) => {
    const best = state.faceMatcher.findBestMatch(descriptor);
    // 目标原图绘制框
    new faceapi.draw.DrawBox(detection.box, {
      label: best.label,
      boxColor: "#55b881"
    }).draw(state.targetCanvasEl);
  });
}

/**根据模型参数识别绘制 */
async function fnRedrawDiscern() {
  if (!state.faceMatcher) return;

  // 暂停视频时清除定时
  if (state.discernVideoEl.paused) {
    clearTimeout(state.timer);
    state.timer = 0;
    return;
  }

  // 识别绘制人脸信息
  const detect = await faceapi
    .detectAllFaces(state.discernVideoEl, state.netsOptions[state.netsType])
    // 需引入面部轮廓模型
    .withFaceLandmarks()
    // 需引入面部识别模型
    .withFaceDescriptors();

  // 无识别数据时，清除定时重新再次识别
  if (!detect) {
    clearTimeout(state.timer);
    state.timer = 0;
    fnRedrawDiscern();
    return;
  }

  // 匹配元素大小
  const dims = faceapi.matchDimensions(
    state.discernCanvasEl,
    state.discernVideoEl,
    true
  );
  const result = faceapi.resizeResults(detect, dims);
  let matchDistance = 1;
  result.forEach(({ detection, descriptor }) => {
    // 最佳匹配 distance越小越匹配
    const best = state.faceMatcher.findBestMatch(descriptor);
    matchDistance = best._distance < matchDistance ? best._distance : matchDistance;
    // 识别图绘制框
    const label = best.toString();
    new faceapi.draw.DrawBox(detection.box, { label }).draw(
      state.discernCanvasEl
    );
  });

  if (matchDistance < 0.35) {
    createMessage.success("人脸校验通过");
    matchPass.value = true;
    fnClose();
    return;
  } else {
    if (retryCount.value > 0) {
      retryCount.value--;
    } else {
      createMessage.error("人脸校验不通过");
      matchPass.value = false;
      fnClose();
      return;
    }
  }

  // 定时器句柄
  state.timer = setTimeout(() => fnRedrawDiscern(), 0);
}

/**启动摄像头视频媒体 */
async function fnOpen() {
  if (state.stream !== null) return;
  try {
    retryCount.value = 10;
    state.stream = {}; // 置为空对象，避免重复点击
    const stream = await navigator.mediaDevices.getUserMedia(state.constraints);
    state.stream = stream;
    state.discernVideoEl.srcObject = stream;
    state.discernVideoEl.play();
    setTimeout(() => fnRedrawDiscern(), 500);
  } catch (error) {
    state.stream = null; // 置为空，可点击
    console.error(error);
    alert("视频媒体流获取错误: " + error);
  }
}

/**结束摄像头视频媒体 */
function fnClose() {
  if (state.stream === null) return;
  state.discernVideoEl.pause();
  state.discernVideoEl.srcObject = null;
  state.stream.getTracks().forEach((track) => track.stop());
  state.stream = null;
  clearTimeout(state.timer);
  state.timer = 0;

  setTimeout(() => {
    // 清空画布
    state.discernCanvasEl
      .getContext("2d")
      .clearRect(
        0,
        0,
        state.discernCanvasEl.width,
        state.discernCanvasEl.height
      );
  }, 480);
}

/**更换图片 */
async function fnChangeTarget(e) {
  if (!state.targetImgEl || !state.targetCanvasEl) return;
  if (!e.target || !e.target.files.length) return;
  // 将文件显示为图像并识别
  const img = await faceapi.bufferToImage(e.target.files[0]);
  state.targetImgEl.src = img.src;
  fnRedrawTarget();
}

// 摄像头前后切换 启用时，关闭后重开
watch(
  () => state.constraints.video.facingMode,
  () => {
    if (state.stream !== null) {
      fnClose();
      fnOpen();
    } else {
      fnClose();
    }
  }
);

onMounted(() => {
  fnLoadModel().then(() => fnRedrawTarget());
});

onUnmounted(() => {
  fnClose();
});

function toExam() {
  emit("goToExam");
}
</script>

<style scoped>
/*单页面-通用样式 */
.page {
  min-height: 350px;
}

.page .page_draw-target,
.page .page_draw-discern,
.page .page_draw {
  position: relative;
}

.page .page_draw #page_draw-img,
.page .page_draw #page_draw-video {
  max-width: 480px;
  max-height: 320px;
}

.page .page_draw #page_draw-canvas-target,
.page .page_draw #page_draw-canvas-discern,
.page .page_draw #page_draw-canvas {
  position: absolute;
  top: 0;
  left: 0;
}

.page .page_draw #page_draw-video-canvas {
  position: absolute;
  top: 0;
  left: 0;
  max-width: 480px;
  max-height: 320px;
}

.page .page_draw #page_draw-canvas-box {
  border: 1px #9a9a9a solid;
}

</style>
