<template>
  <div class="face-verify">
    <!-- 视频区域 -->
    <!-- <el-dialog title="人脸验证" v-model="showDialog" width="350px" class="face-verify-dialog" @close="closeVideo" :close-on-click-modal="false"> -->
      <el-dialog title="人脸验证" v-model="showDialog" width="350px" class="face-verify-dialog" @close="closeVideo" :close-on-click-modal="false">
      <video ref="videoRef" class="video-box" autoplay muted playsinline></video>
      <!-- <canvas ref="canvasRef" width="400" height="400" style="position: absolute;left: 400px;top: 50px;background: white;"></canvas> -->
      <!-- 状态提示 -->
      <div class="status">
        <div v-if="!isModelLoaded" class="loading">模型加载中...<progress :value="modelLoadProgress" max="100"></progress></div>
        <div v-else-if="!isVideo" class="loading">摄像头初始化中...</div>
        <div v-else-if="!isFaceDetected" class="prompt">请正对摄像头，检测人脸数据中</div>
        <div v-else-if="!isFrontalFace" class="prompt">请保持正脸</div>
        <div v-else-if="!isOneFace" class="prompt">请保证屏幕内只有一张人脸</div>
        <div v-else-if="currentActionIndex < selectedActions.length" class="detecting">
          <progress :value="currentActionIndex" max="2"></progress>
          <p>请完成动作: {{ selectedActions[currentActionIndex] }}</p>
        </div>
        <div v-else-if="isWaitingForDirectLook" class="detecting">
          <progress :value="3 - directLookCountdown" max="3" style="display: none;"></progress>
          <p>请正对摄像头</p>
        </div>
        <div v-else class="success">正在检测中</div>
      </div>
      <div class="tips">
        <p class="title">请保持正脸在取景框中并根据屏幕指示完成识别</p>
        <div class="icon-list">
          <div class="icon-item">
            <div class="icon-bg">
              <el-icon><Camera /></el-icon>
            </div>
            <p>允许使用相机</p>
          </div>
          <div class="icon-item">
            <div class="icon-bg">
              <el-icon><FullScreen /></el-icon>
            </div>
            <p>避免遮挡</p>
          </div>
          <div class="icon-item">
            <div class="icon-bg">
              <el-icon><Sunny /></el-icon>
            </div>
            <p>避免遮挡</p>
          </div>
        </div>
      </div>
    </el-dialog>
  </div>
</template>

<script setup>
import { ref, onMounted, onUnmounted, watch, defineProps } from 'vue';
import * as faceapi from "face-api.js";
import { Camera, FullScreen, Sunny } from '@element-plus/icons-vue';
import { facialIdentificationByUser } from "@/api/student"
const props = defineProps({
  verifyCounter: {
    type: Number,
    default: 0
  }
})
const emit = defineEmits(['closeFaceVerify', 'successFaceVerify'])

watch(() => props.verifyCounter, (newValue, oldValue) => {
  if (newValue > oldValue) {
    console.log("触发人脸验证");
    // 重置状态以支持重复调用
    // closeVideo();
    // 启动摄像头捕获
    startCapture();
  }
})

const videoStream = ref(null);
const videoRef = ref(null); 
// const canvasRef = ref(null);
const showDialog = ref(false);
const isModelLoaded = ref(false);
const isFaceDetected = ref(false);
const isVideo = ref(false);
const isFrontalFace = ref(false);
const isOneFace = ref(true);
const currentActionIndex = ref(0);
const selectedActions = ref([]);
const isWaiting = ref(false);
let detectionInterval = null;
const isWaitingForDirectLook = ref(false);
const directLookCountdown = ref(3);
const capturedImage = ref('');
const actions = ['点头', '向左转头', '向右转头', '张嘴']; //眨眼检测效果很差，暂不使用

// const loadModels = async () => {
//   const MODEL_URL = "/models"; // 确保将模型文件放在 public/models 目录下
//   await faceapi.nets.ssdMobilenetv1.loadFromUri(MODEL_URL);
//   await faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL);
//   await faceapi.nets.faceRecognitionNet.loadFromUri(MODEL_URL);
//   isModelLoaded.value = true;
//   console.log("模型加载完成");
// };

const modelLoadProgress = ref(0);

const loadModels = async () => {
  const MODEL_URL = "/models"; // 确保将模型文件放在 public/models 目录下
  const totalModels = 3;
  let loadedModels = 0;

  const onProgress = (event) => {
    if (event.lengthComputable) {
      const percentComplete = (event.loaded / event.total) * 100;
      modelLoadProgress.value = (loadedModels + percentComplete / totalModels).toFixed(2);
    }
  };

  await faceapi.nets.ssdMobilenetv1.loadFromUri(MODEL_URL, { onProgress });
  loadedModels++;
  await faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL, { onProgress });
  loadedModels++;
  await faceapi.nets.faceRecognitionNet.loadFromUri(MODEL_URL, { onProgress });
  loadedModels++;

  isModelLoaded.value = true;
  console.log("模型加载完成");
};

const startCapture = async () => {
  console.log("点击");
  
  showDialog.value = true;
  isVideo.value = false
  try {
    // 获取摄像头访问权限
    if (videoStream.value) {
      videoRef.value.srcObject = videoStream.value;
      isVideo.value = true;
    } else {
      videoStream.value = await navigator.mediaDevices.getUserMedia({
        video: true,
      });
      videoRef.value.srcObject = videoStream.value;
      isVideo.value = true;
    }
    // 等待视频元数据加载完成
    await new Promise((resolve) => {
      videoRef.value.addEventListener('loadedmetadata', resolve, { once: true });
    });

    // 设置视频元素的宽度和高度
    videoRef.value.width = 400;
    videoRef.value.height = 400;
    // 开始检测人脸
    detectFaces();
    // 随机选择两个动作
    selectedActions.value = getRandomActions();
  } catch (error) {
    console.error("无法访问摄像头", error);
  }
};
//停止捕获画面 重置其他人脸识别参数
const stopCapture = () => {
  // openFaceVerify.value = false;
  clearInterval(detectionInterval);
  showDialog.value = false;
  if (videoStream.value) {
    const tracks = videoStream.value.getTracks();
    tracks.forEach((track) => track.stop());
    videoStream.value = null;
    videoRef.value.srcObject = null;
  }
  // selectedActions.value = []
  // isFaceDetected.value = false;
  // isFrontalFace.value = false;
  // isOneFace.value = false;
};
//获取随机动作
const getRandomActions = () => {
  const shuffled = actions.sort(() => 0.5 - Math.random());
  return shuffled.slice(0, 2);
};
//检测人脸
const detectFaces = () => {
  const video = videoRef.value;
  // const canvas = canvasRef.value;
  // if (!video || !canvas) {
  //   console.error("视频或画布元素未正确加载");
  //   return;
  // }
  console.log("开始检测人脸");
  
  // const displaySize = { width: 400, height: 400 };
  // faceapi.matchDimensions(canvas, displaySize);

  detectionInterval = setInterval(async () => {
    console.log("开启定时器检测人脸");
    
    if (video.readyState === video.HAVE_ENOUGH_DATA) {
      // 检测人脸
      const detections = await faceapi
        .detectAllFaces(video)
        .withFaceLandmarks()
        .withFaceDescriptors();

      console.log("加载faceapi完成");
      
      /****取消绘制画布
      // // 清除画布
      // const ctx = canvas.getContext("2d");
      // ctx.clearRect(0, 0, canvas.width, canvas.height);

      // // 在画布上绘制检测结果
      // const resizedDetections = faceapi.resizeResults(
      //   detections,
      //   displaySize
      // );
      // faceapi.draw.drawDetections(canvas, resizedDetections);
      // faceapi.draw.drawFaceLandmarks(canvas, resizedDetections);

      *******/
      // 输出检测到的人脸数量
      isFaceDetected.value = detections.length > 0;

      if (isFaceDetected.value) {
        const landmarks = detections[0].landmarks;
        // isFrontalFace.value = isFrontal(landmarks);
        isFrontalFace.value = true;
        console.log("检测到",detections.length,"张人脸");
        if (detections.length > 1) {
          isOneFace.value = false
        } else {
          isOneFace.value = true
        }
        if (isFrontalFace.value && currentActionIndex.value < selectedActions.value.length) {
          const currentAction = selectedActions.value[currentActionIndex.value];
          if (checkAction(currentAction, landmarks)) {
            clearInterval(detectionInterval);
            isWaiting.value = true;
            currentActionIndex.value++;
            setTimeout(() => {
              isWaiting.value = false;
              if (currentActionIndex.value < selectedActions.value.length) {
                detectFaces();
              } else {
                startDirectLookCountdown();
              }
            }, 1000);
          }
        } else if (isWaitingForDirectLook.value) {
          if (directLookCountdown.value === 2) {
            captureImage();
          }
          if (directLookCountdown.value > 0) {
            directLookCountdown.value--;
          } else {
            clearInterval(detectionInterval);
            isWaitingForDirectLook.value = false;
          }
        }
      }
    }
  }, 200); // 每 200ms 检测一次
};


//检测正脸
// const isFrontal = (landmarks) => {
//   const leftEye = landmarks.getLeftEye();
//   const rightEye = landmarks.getRightEye();
//   const nose = landmarks.getNose();

//   const eyeCenterX = (leftEye[0].x + rightEye[3].x) / 2;
//   const noseX = nose[3].x;

//   const angle = Math.atan2(rightEye[0].y - leftEye[3].y, rightEye[0].x - leftEye[3].x) * (180 / Math.PI);

//   return Math.abs(eyeCenterX - noseX) < 20 && Math.abs(angle) < 10;
// };

const checkAction = (action, landmarks) => {
  switch (action) {
    case '点头':
      return detectNod(landmarks);
    case '向左转头':
      return detectLeftTurn(landmarks);
    case '向右转头':
      return detectRightTurn(landmarks);
    case '张嘴':
      return detectMouthOpen(landmarks);
    case '眨眼':
      return detectBlink(landmarks);
    default:
      return false;
  }
};

const detectNod = (landmarks) => {
  console.log("检测点头动作");
  // 简单示例：检测鼻子上下位置变化
  const noseTop = landmarks.getNose()[0];
  const noseBottom = landmarks.getNose()[3];
  const distance = noseBottom.y - noseTop.y;
  return distance > 35;
};

const detectLeftTurn = (landmarks) => {
  console.log("检测向左转头动作");
  const leftEye = landmarks.getLeftEye();
  const rightEye = landmarks.getRightEye();
  const angle = Math.atan2(rightEye[0].y - leftEye[3].y, rightEye[0].x - leftEye[3].x) * (180 / Math.PI);
  console.log("左眼", leftEye);
  console.log("右眼：", rightEye);
  console.log("角度：", angle);
  
  // 增大阈值并增加角度判断
  return leftEye[0].x < rightEye[3].x - 30 && angle > 10;
  // return leftEye[0].x < rightEye[3].x - 30 ;
};

const detectRightTurn = (landmarks) => {
  console.log("检测向右转头动作");
  const leftEye = landmarks.getLeftEye();
  const rightEye = landmarks.getRightEye();
  const angle = Math.atan2(rightEye[0].y - leftEye[3].y, rightEye[0].x - leftEye[3].x) * (180 / Math.PI);

  console.log("左眼", leftEye);
  console.log("右眼：", rightEye);
  console.log("角度：", angle);
  
  // 增大阈值并增加角度判断
  return rightEye[0].x > leftEye[3].x + 30 && angle < -10;
  // return rightEye[0].x > leftEye[3].x + 30 ;
};

const detectMouthOpen = (landmarks) => {
  console.log("检测张嘴");
  const mouthTop = landmarks.getMouth()[1];
  const mouthBottom = landmarks.getMouth()[7];
  const distance = mouthBottom.y - mouthTop.y;
  return distance > 20;
};

// 眨眼检测逻辑
const detectBlink = (landmarks) => {
  console.log("检测眨眼");
  const leftEye = landmarks.getLeftEye();
  const rightEye = landmarks.getRightEye();

  const leftEAR = eyeAspectRatio(leftEye);
  const rightEAR = eyeAspectRatio(rightEye);

  const ear = (leftEAR + rightEAR) / 2;

  return ear < 0.2;
};

// 计算眼睛纵横比
const eyeAspectRatio = (eye) => {
  const A = Math.sqrt(Math.pow(eye[1].x - eye[5].x, 2) + Math.pow(eye[1].y - eye[5].y, 2));
  const B = Math.sqrt(Math.pow(eye[2].x - eye[4].x, 2) + Math.pow(eye[2].y - eye[4].y, 2));
  const C = Math.sqrt(Math.pow(eye[0].x - eye[3].x, 2) + Math.pow(eye[0].y - eye[3].y, 2));

  return (A + B) / (2.0 * C);
};

const startDirectLookCountdown = () => {
  isWaitingForDirectLook.value = true;
  directLookCountdown.value = 3;
  detectFaces();
};

const captureImage = async () => {
  const video = videoRef.value;
  const canvas = document.createElement('canvas');
  canvas.width = video.videoWidth;
  canvas.height = video.videoHeight;
  const ctx = canvas.getContext('2d');
  ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
  capturedImage.value = canvas.toDataURL('image/jpeg');
  capturedImage.value = canvas.toDataURL('image/jpeg').replace('data:image/jpeg;base64,', '');
  console.log('Captured image:', capturedImage.value);
  clearInterval(detectionInterval);
  //调用后端对比人像，将图片直接传给后端
  const res = await facialIdentificationByUser({
    realName: "石中伟",
    idCardNumber: "510121200206131211",
    imageBase64: capturedImage.value
  })
  // const res = {code:20000}

  console.log("检测人脸返回",res);
  //将对比结果返回给父组件
  let result = ""
  if(res.code == 20000){
    result = { code:20000,message: '人脸验证通过' };
  } else {
    result = { code:50000,message: '人脸验证未通过，请重试' };
  }
  closeVideo()
  emit('successFaceVerify', result);
};

const closeVideo = () => {
  console.log("关闭");
  
  // 重置所有与识别过程相关的参数
  showDialog.value = false;
  // isModelLoaded.value = false;
  isFaceDetected.value = false;
  isVideo.value = false;
  isFrontalFace.value = false;
  isOneFace.value = true;
  currentActionIndex.value = 0;
  selectedActions.value = [];
  isWaiting.value = false;
  isWaitingForDirectLook.value = false;
  directLookCountdown.value = 3;
  capturedImage.value = '';
  // props.startFaceVerify = false
  stopCapture();
};
onMounted(() => {
  // 加载 face-api.js 模型
  loadModels();
});

// 资源清理
onUnmounted(() => {
  stopCapture();
});

// 暴露方法给父组件
defineExpose({
  
  // totalPages
})
</script>
    
<style scoped lang="scss">
.face-verify-dialog{
  .video-box {
    width: 250px;
    height: 250px;
    background: #fff;
    border-radius: 50%;
    border: 4px solid #ddd;
    margin: 0 auto;
    display: block;
    object-fit: cover;
  }
  .status {
    margin-top: 10px;
    text-align: center;
    font-size: 18px;
    color: $black;
  }
  .tips{
    margin-top: 10px;
    .title{
      font-size: 14px;
      color: $black_2;
      text-align: center;
    }
    .icon-list{
      margin-top: 10px;
      display: flex;
      justify-content: space-between;
      .icon-item{
        display: flex;
        flex-direction: column;
        align-items: center;
        justify-content: center;
        .icon-bg{
          width: 40px;
          height: 40px;
          display: flex;
          align-items: center;
          justify-content: center;
          text-align: center;
          border-radius: 50%;
          background: $channalPage;
          i{
            font-size: 24px;
            color: $placeholderColor;
          }
        }
        p{
          margin-top: 5px;
          color: var(--el-text-color-regular);
        }
      }
    }
  }
}

</style>
    