<template>
  <!-- 主页面容器，红色背景，居中显示 -->
  <div ref="wrapper" class="w-full h-full bg-red flex flex-col items-center">
    <!-- 视频元素容器 -->
    <video class="user-video" id="userVideo" ref="videoEl" />
    <video class="user-video2" id="userVideo2" ref="videoEl2" />
<!--    <img :src="avatarImg" class="canvasoImG" alt="" />-->
  </div>
  <TipsModal :appendToBody="true" v-model:show="isShowTips" message="人脸照片不清晰，请靠近一点点重试" :showFooter="true" :isShowCancelBtn="false" confirmText="确定" @confirm="confirmInit" type="error" />
</template>

<script setup>
import { ref, onMounted, defineEmits, nextTick, defineExpose } from "vue"
import ApiPublic from "@/api/public"
import * as faceApi from "face-api.js"

import avater from "/src/assets/images/dialogue/avatar-ex.png"
import TipsModal from "@/components/tips-modal.vue";

const MODEL_PATH = "/models"

const emits = defineEmits(["getCamera", "getAvatar"])
//使用 ref 对象来获取 DOM 元素
const wrapper = ref()
const videoEl = ref(null)
const videoEl2 = ref(null)
const checkFaceTimer = ref(null)
let isCheckFace = ref(false)
let avatarImg = ref("")
let isShowTips = ref(false)
let checkFaceNum = ref(0)

// 在组件挂载后执行检查摄像头的操作
onMounted(() => {
  loadModel()
  // checkCamera()
})

// 检查摄像头并设置视频流
const checkCamera = async () => {
  const video = document.getElementById("userVideo")
  const { width, height } = video.getBoundingClientRect()
  // 获取媒体设备信息
  const navigator = window.navigator.mediaDevices
  //判断用户是否拥有摄像头
  const devices = await navigator.enumerateDevices()
  // 如果存在媒体设备，获取摄像头视频流
  if (devices) {
    emits("getCamera", true)
    // 请求用户媒体设备（摄像头）
    const stream = await navigator.getUserMedia({
        audio: false,
        video: {
          width: 500, // 设置视频宽度
          height: 350, // 设置视频高度
          facingMode: "user" // 使用前置摄像头
        }
      })
      .catch((error) => {
        console.log("error", error)
      })

    // 如果视频元素存在，将视频流绑定到视频元素上并开始播放
    if (videoEl.value) {
      videoEl.value.srcObject = stream
      videoEl.value.play()
      videoEl2.value.srcObject = stream
      videoEl2.value.play()
      setTimeout(() => {
        detectFaces()
      }, 1000)
    }
  } else {
    emits("getCamera", false)
  }
}

const loadModel = async () => {
  // 加载模型
  await faceApi.loadTinyFaceDetectorModel(MODEL_PATH)
  await faceApi.loadFaceLandmarkTinyModel(MODEL_PATH)
  await faceApi.loadFaceExpressionModel(MODEL_PATH)
  await faceApi.loadAgeGenderModel(MODEL_PATH)
  // await faceApi.nets.ssdMobilenetv1.loadFromUri(MODEL_PATH);
  // await faceApi.nets.faceLandmark68Net.loadFromUri(MODEL_PATH);
  // await faceApi.nets.faceRecognitionNet.loadFromUri(MODEL_PATH);
  await checkCamera()
}

const confirmInit = () => {
  detectFaces()
}

const detectFaces = async (type) => {
  if(type === "delayCheck") {
    setTimeout(() => {
      clearCheck()
    },3000)
  }

  const canvas = faceApi.createCanvasFromMedia(videoEl2.value)
  canvas.className = "face-block"
  const ctx = canvas.getContext("2d")
  document.body.append(canvas)

  checkFaceTimer.value = setInterval(async () => {
    const detections = await faceApi.detectSingleFace(videoEl2.value, new faceApi.TinyFaceDetectorOptions()).withFaceLandmarks(true)
        .withFaceExpressions().withAgeAndGender()
    if (detections) {
      isCheckFace.value = true
      const resizedDetections = faceApi.resizeResults(detections, { width: canvas.width + 100, height: canvas.height + 50 })
      ctx.clearRect(0, 0, canvas.width, canvas.height)
      const { age, gender, genderProbability, landmarks } = resizedDetections
      // faceApi.draw.drawDetections(canvas, resizedDetections)
      for (let resKey in resizedDetections) {
        const leftEye = landmarks.getLeftEye()[0]
        const rightEye = landmarks.getRightEye()[0]
        const mouth = landmarks.getMouth()[0]

        if (resizedDetections[resKey].box) {
          exportBase64(resizedDetections[resKey].box, canvas, leftEye, rightEye, mouth)
        }
        // new faceApi.draw.DrawTextField([`${~~age} years`, `${gender} {${genderProbability.toFixed(1)}`], resizedDetections.detection.box.bottomRight).draw(canvas)
      }
    } else {
      isCheckFace.value = false
      avatarImg.value = ""
      checkFaceNum.value = checkFaceNum.value + 300
      if (checkFaceNum.value > 3000) {
        // isShowTips.value = true
        clearInterval(checkFaceTimer.value)
        emits("getAvatar","notFace")
        console.log("未检测到人脸")
      }
    }
  }, 300)
}
const exportBase64 = async (target, canvas, leftEye, rightEye, mouth) => {
  // console.log(target, "target", canvas.width, canvas.height)
  // 创建一个新的canvas
  let canvasElement = document.createElement("canvas")
  let canvasContext = canvasElement.getContext("2d")
  // 计算框选区域相对于图片的定位
  let startX = target.left - 150
  let startY = target.top - 80
  canvasElement.width = target.width + 160
  canvasElement.height = target.height + 80
  canvasContext.drawImage(userVideo2, startX, startY, canvasElement.width, canvasElement.height, 0, 0, canvasElement.width, canvasElement.height)

  if (
    (leftEye.x < canvas.width / 2.5 && rightEye.x < canvas.width / 2.5) || // 左右眼位于图像左侧
    (leftEye.x > (2 * canvas.width) / 2.5 && rightEye.x > (2 * canvas.width) / 2.5) || // 左右眼位于图像右侧
    (mouth.y < canvas.height / 2.5 && mouth.y < canvas.height / 2.5) || // 嘴巴位于图像上方
    (mouth.y > (2 * canvas.height) / 2.5 && mouth.y > (2 * canvas.height) / 2.5) // 嘴巴位于图像下方
  ) {
    // console.log("半张人脸")
  } else {
    console.log("完整人脸")
    if (!avatarImg.value) {
      avatarImg.value = canvasElement.toDataURL("image/jpeg")
      let params = {
          // style: "handdrawn",
          style: "hongkong",
          base64: avatarImg.value
      }

      setTimeout(async () => {
        clearInterval(checkFaceTimer.value)
        const res = await ApiPublic.generateAvatar(params)
        if(res.code === 200) {
          emits("getAvatar",res.data.Data.ImageURL)
          avatarImg.value = ""
        } else if(res.code === 400){
          isShowTips.value = true
          avatarImg.value = ""
        }
      }, 600)
    }
  }
  avatarImg.value = canvasElement.toDataURL("image/jpeg")
}

const clearCheck = () => {
  clearInterval(checkFaceTimer.value)
}

defineExpose({
  detectFaces,
  clearCheck
})
</script>
<style lang="scss">
.user-video {
  width: 500px;
  height: 350px;
  border-radius: 28px;
}
.user-video2 {
  position: absolute;
  left: 2000px;
  width: 5000px;
  height: 3500px;
  border-radius: 28px;
}
.face-block {
  display: none;
  position: absolute;
  right: 90px;
  top: 380px;
  //width: 500px !important;
  //height: 350px !important;
}
.canvasoImG {
  position: relative;
  top: 100px;
  right: 850px;
  width: 212px;
  height: 212px;
}
</style>
