<script setup>
import * as faceapi from "@vladmandic/face-api";
import { onMounted, onUnmounted, reactive, watch, ref } from "vue";
import { login } from "@/api/user";
import router from "@/router";
// 节流函数
function throttle(fn, delay) {
  let lastTime = 0;
  return function (...args) {
    const now = Date.now();
    if (now - lastTime >= delay) {
      fn.apply(this, args);
      lastTime = now;
    }
  };
}
// 设置 cookie
function setCookie(name, value) {
  document.cookie = `${name}=${value}; path=/;`;
}

// 打印识别信息的函数
function logDetectionInfo(detections) {
  if (!detections || !detections.length) {
    console.log("No faces detected");
    return;
  }

  detections.forEach((detection, index) => {
    const confidence = Math.round(detection.detection._score * 100);
    console.log(`Face ${index + 1}:`);
    console.log(`- Confidence: ${confidence}%`);
    console.log(`- Position: x=${Math.round(detection.detection._box.x)}, y=${Math.round(detection.detection._box.y)}`);

    if (detection.best) {
      console.log(`- Match: ${detection.best.toString()}`);

      // 获取匹配标签
      const matchLabel = detection.best.toString();

      // 根据标签判断是哪个人脸
      if (matchLabel.includes("person 1") && confidence > "90") {
        // 检测到人脸1 和值行度>90
        console.log("检测到人脸1");

        // 验证输入的验证码是否正确
        login({ userName: "张易良", userPassword: "123456" })
          .then((res) => {
            if (res.data.token == null) {
              // this.message("error", "用户名或密码错误");
            } else {
              // 保存令牌到 cookie
              setCookie("token", res.data.token);
              console.log(res.data.token);
              // this.message("success", "登录成功");
              router.push("/");
            }
          })
          .catch((error) => {
            // this.message("error", "登录失败");
            console.error(error);
          });
      } else if (matchLabel.includes("person 2") && confidence > "90") {
        // 检测到人脸2
        console.log("检测到人脸2");

        // 验证输入的验证码是否正确
        login({ userName: "肖胜杰", userPassword: "123456" })
          .then((res) => {
            if (res.data.token == null) {
              // this.message("error", "用户名或密码错误");
            } else {
              // 保存令牌到 cookie
              setCookie("token", res.data.token);
              console.log(res.data.token);
              // this.message("success", "登录成功");
              router.push("/");
            }
          })
          .catch((error) => {
            // this.message("error", "登录失败");
            console.error(error);
          });
      } else {
        // 检测到其他人脸
        console.log("检测到未知人脸");
      }
    }
  });
}

// 使用节流包装打印函数
const throttledLogDetection = throttle(logDetectionInfo, 1000);

// 使用ref来引用DOM元素
const videoRef = ref(null);
const videoCanvasRef = ref(null);
const targetImgRef = ref(null);
const targetCanvasRef = ref(null);

const state = reactive({
  netsLoadModel: true,
  netsType: "tinyFaceDetector",
  netsOptions: {
    ssdMobilenetv1: undefined,
    tinyFaceDetector: undefined,
  },
  faceMatcher: null,
  timer: 0,
  stream: null,
  isInitialized: false,
  constraints: {
    audio: false,
    video: {
      width: { min: 320, ideal: 720, max: 1280 },
      height: { min: 200, ideal: 480, max: 720 },
      frameRate: { min: 7, ideal: 15, max: 30 },
      facingMode: "user",
    },
  },
});

async function fnLoadModel() {
  try {
    const modelsPath = `/models`;
    await Promise.all([
      faceapi.nets.faceLandmark68Net.load(modelsPath),
      faceapi.nets.faceRecognitionNet.load(modelsPath),
      faceapi.nets.ssdMobilenetv1.load(modelsPath),
      faceapi.nets.tinyFaceDetector.load(modelsPath),
    ]);

    state.netsOptions.ssdMobilenetv1 = new faceapi.SsdMobilenetv1Options({
      minConfidence: 0.5,
      maxResults: 50,
    });

    state.netsOptions.tinyFaceDetector = new faceapi.TinyFaceDetectorOptions({
      inputSize: 416,
      scoreThreshold: 0.5,
    });

    state.netsLoadModel = false;
    state.isInitialized = true;

    console.log("Models loaded successfully");
    return true;
  } catch (error) {
    console.error("Error loading models:", error);
    return false;
  }
}

async function fnRedrawTarget() {
  if (!targetImgRef.value || !targetCanvasRef.value) {
    console.log("Target elements not ready");
    return;
  }

  try {
    const detect = await faceapi.detectAllFaces(targetImgRef.value, state.netsOptions[state.netsType]).withFaceLandmarks().withFaceDescriptors();

    if (!detect.length) {
      state.faceMatcher = null;
      console.log("No faces detected in target image");
      return;
    }

    state.faceMatcher = new faceapi.FaceMatcher(detect);
    const dims = faceapi.matchDimensions(targetCanvasRef.value, targetImgRef.value);
    const resizedResults = faceapi.resizeResults(detect, dims);

    throttledLogDetection(resizedResults);

    resizedResults.forEach(({ detection, descriptor }) => {
      const best = state.faceMatcher.findBestMatch(descriptor);
      new faceapi.draw.DrawBox(detection.box, {
        label: best.label,
        boxColor: "#55b881",
      }).draw(targetCanvasRef.value);
    });
  } catch (error) {
    console.error("Error in target detection:", error);
  }
}

async function fnRedrawDiscern() {
  if (!videoRef.value || !videoCanvasRef.value || !state.faceMatcher) return;

  if (videoRef.value.paused) {
    clearTimeout(state.timer);
    state.timer = 0;
    return;
  }

  try {
    const detect = await faceapi.detectAllFaces(videoRef.value, state.netsOptions[state.netsType]).withFaceLandmarks().withFaceDescriptors();

    if (!detect || !detect.length) {
      state.timer = setTimeout(() => fnRedrawDiscern(), 100);
      return;
    }

    const dims = faceapi.matchDimensions(videoCanvasRef.value, videoRef.value, true);
    const result = faceapi.resizeResults(detect, dims);

    const detectionsWithMatch = result.map(({ detection, descriptor }) => {
      const best = state.faceMatcher.findBestMatch(descriptor);
      return { detection, descriptor, best };
    });

    throttledLogDetection(detectionsWithMatch);

    // 清除之前的绘制
    const ctx = videoCanvasRef.value.getContext("2d");
    ctx.clearRect(0, 0, videoCanvasRef.value.width, videoCanvasRef.value.height);

    detectionsWithMatch.forEach(({ detection, best }) => {
      const label = best.toString();
      new faceapi.draw.DrawBox(detection.box, {
        label,
        boxColor: best.distance < 0.5 ? "#55b881" : "#ff5555",
      }).draw(videoCanvasRef.value);
    });

    state.timer = setTimeout(() => fnRedrawDiscern(), 100);
  } catch (error) {
    console.error("Error in video detection:", error);
    state.timer = setTimeout(() => fnRedrawDiscern(), 100);
  }
}

async function fnOpen() {
  if (!videoRef.value || state.stream !== null) return;

  try {
    state.stream = {}; // 防止重复点击
    const stream = await navigator.mediaDevices.getUserMedia(state.constraints);
    state.stream = stream;
    videoRef.value.srcObject = stream;
    await videoRef.value.play();
    setTimeout(() => fnRedrawDiscern(), 300);
  } catch (error) {
    state.stream = null;
    console.error("Camera error:", error);
    alert("无法访问摄像头: " + error.message);
  }
}

function fnClose() {
  if (state.stream === null) return;

  if (videoRef.value) {
    videoRef.value.pause();
    videoRef.value.srcObject = null;
  }

  if (state.stream && state.stream.getTracks) {
    state.stream.getTracks().forEach((track) => track.stop());
  }

  state.stream = null;
  clearTimeout(state.timer);
  state.timer = 0;

  if (videoCanvasRef.value) {
    const ctx = videoCanvasRef.value.getContext("2d");
    ctx.clearRect(0, 0, videoCanvasRef.value.width, videoCanvasRef.value.height);
  }
}

async function fnChangeTarget(e) {
  if (!targetImgRef.value || !targetCanvasRef.value) return;
  if (!e.target || !e.target.files.length) return;

  try {
    const img = await faceapi.bufferToImage(e.target.files[0]);
    targetImgRef.value.src = img.src;
    await fnRedrawTarget();
  } catch (error) {
    console.error("Error changing target:", error);
  }
}

watch(
  () => state.constraints.video.facingMode,
  () => {
    if (state.stream !== null) {
      fnClose();
      fnOpen();
    }
  }
);

onMounted(async () => {
  const modelsLoaded = await fnLoadModel();
  if (modelsLoaded) {
    await fnRedrawTarget();
    await fnOpen();
  }
});

onUnmounted(() => {
  fnClose();
});
</script>

<template>
  <div class="page">
    <div class="page_load" v-if="state.netsLoadModel">
      <div class="loading-text">正在加载人脸识别模型...</div>
    </div>

    <div v-else class="page_draw">
      <div class="page_draw-target" style="display: none">
        <img ref="targetImgRef" src="/images/facescan.jpg" crossorigin="anonymous" />
        <canvas ref="targetCanvasRef"></canvas>
      </div>
      <div class="page_draw-discern">
        <video
          ref="videoRef"
          poster="/images/loading.gif"
          muted
          playsinline
          @loadedmetadata="() => console.log('Video metadata loaded')"
          @error="(e) => console.error('Video error:', e)"
        ></video>
        <canvas ref="videoCanvasRef"></canvas>
      </div>
    </div>
  </div>
</template>

<style scoped>
.page {
  width: 100%;
  height: 100vh;
  display: flex;
  justify-content: center;
  align-items: center;
  background: #f5f5f5;
}

.loading-text {
  font-size: 1.2rem;
  color: #666;
}

.page_draw {
  width: 100%;
  max-width: 720px;
  margin: 0 auto;
}

.page_draw-discern {
  position: relative;
  width: 100%;
  height: 0;
  padding-bottom: 75%;
}

.page_draw-discern video,
.page_draw-discern canvas {
  position: absolute;
  top: 0;
  left: 0;
  width: 100%;
  height: 100%;
  object-fit: cover;
}

canvas {
  position: absolute;
  top: 0;
  left: 0;
}
</style>
