<template>
  <PageHeaderWrapper title="贴图主页" :left-arrow="false">
    <div class="video_wrapper" id="video_wrapper">
      <video :ref="el => videoRef = el"></video>
      <div class="video_wrapper_spin" v-if="!(deviceOptions.loaded && !deviceOptions.useAble)">{{ deviceOptions.text }}
      </div>
      <canvas :ref="el => canvasRef = el"></canvas>
    </div>
    <div v-if="deviceOptions.loaded && deviceOptions.useAble" class="take_photo" @click="takePhoto"></div>
  </PageHeaderWrapper>
</template>

<script setup lang="ts">
import { ref, onMounted } from "vue";
import { onBeforeRouteLeave } from "vue-router";
import * as faceApi from "face-api.js";
import PageHeaderWrapper from "@/components/PageHeaderWrapper/index.vue";
import { showToast } from "vant";
import { IPos } from "@/types";
import { imageUtil, isMobile } from "@/utils";
import hatImage from "./images/hat.png";
import maroonLeft from "./images/maroon_left.png";
import maroonRight from "./images/maroon_right.png";

const MODEL_PATH = "./model";

/** 视频实例 */
const videoRef = ref();
/** 画布实例 */
const canvasRef = ref();
/** 帧动画实例 */
const animateRef = ref<number>(0);
/** 设备参数 */
const deviceOptions = ref({
  useAble: false,
  text: "正在初始化前置摄像头",
  loaded: false,
})


const getMediaCamera = (): Promise<HTMLVideoElement> => {
  if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
    console.log("不支持摄像头!");
    return Promise.reject(null);
  }
  return new Promise((res, rej) => {
    navigator.mediaDevices.getUserMedia({ video: { facingMode: "user" } })
      .then(function (stream) {
        console.log("视频流初始化成功")
        const video = videoRef.value as HTMLVideoElement;
        if (!video) return;
        video.srcObject = stream;
        video.play();
        video.onplay = () => {
          console.log("视频播放")
          setTimeout(() => {
            res(video)
          }, 1500);
        }
      })
      .catch((err) => {
        console.error("Error getting video:", err);
        rej(null)
      });
  })
}

const faceApiInit = () => {
  return new Promise((res, rej) => {
    const modelList = [
      // faceApi.loadTinyFaceDetectorModel(MODEL_PATH),
      // faceApi.loadFaceLandmarkTinyModel(MODEL_PATH),
      // faceApi.loadFaceExpressionModel(MODEL_PATH),
      // faceApi.loadAgeGenderModel(MODEL_PATH)
      faceApi.nets.tinyFaceDetector.loadFromUri(MODEL_PATH),
      faceApi.nets.faceLandmark68Net.loadFromUri(MODEL_PATH),
      faceApi.nets.faceLandmark68TinyNet.loadFromUri(MODEL_PATH),
      faceApi.nets.faceExpressionNet.loadFromUri(MODEL_PATH)
    ]
    Promise.all(modelList).then(() => {
      res(null)
    }).catch((err) => {
      rej(err)
    })
  })
}

/** 获取帽子的位置 */
const getHatDisypay = (positions: Array<faceApi.Point>) => {
  let min_x = Infinity, max_x = -Infinity, min_y = Infinity, max_y = -Infinity;
  positions.forEach(pos => {
    const { x, y } = pos;
    if (x < min_x) min_x = x;
    if (x > max_x) max_x = x;
    if (y < min_y) min_y = y;
    if (y > max_y) max_y = y;
  })
  const width = Math.floor((max_x - min_x) * 2.5);
  const height = Math.floor(width * 200 / 360);
  const top = Math.floor(min_y - height + (max_y - min_y) / 2.8);
  const left = Math.floor(min_x - (width - (max_x - min_x)) / 2 - (max_x - min_x)*1.2);
  return { width, height, top, left };
}

/** 获取左边的鞭炮位置 */
const getMaroonLeftDisplay = (positions: Array<faceApi.Point>) => {
  let min_x = Infinity, max_x = -Infinity, min_y = Infinity, max_y = -Infinity;
  positions.forEach(pos => {
    const { x, y } = pos;
    if (x < min_x) min_x = x;
    if (x > max_x) max_x = x;
    if (y < min_y) min_y = y;
    if (y > max_y) max_y = y;
  })
  const width = Math.floor((max_x - min_x) / 2);
  const height = Math.floor(width * 178 / 145);
  const top = Math.floor(min_y + height * 1.5);
  const left = Math.floor(min_x - width * 4);
  return { width, height, top, left }
}

/** 获取右边的鞭炮位置 */
const getMaroonRightDisplay = (positions: Array<faceApi.Point>) => {
  let min_x = Infinity, max_x = -Infinity, min_y = Infinity, max_y = -Infinity;
  positions.forEach(pos => {
    const { x, y } = pos;
    if (x < min_x) min_x = x;
    if (x > max_x) max_x = x;
    if (y < min_y) min_y = y;
    if (y > max_y) max_y = y;
  })
  const width = Math.floor((max_x - min_x) / 2);
  const height = Math.floor(width * 118 / 114);
  const top = Math.floor(min_y + height * 2);
  const left = Math.floor(max_x - width * 1.5);
  return { width, height, top, left }
}

/** 绘制图片 */
const drawImageToCanvas = (ctx: CanvasRenderingContext2D, imageUrl: string, pos: IPos) => {
  const { width, height, top, left } = pos;
  /** 画图片 */
  const image = new Image();
  image.src = imageUrl;
  image.onload = () => {
    ctx?.drawImage(image, left, top, width, height);
  }
}

/** 处理特效 */
const faceAnimationHandler = (result: faceApi.WithFaceExpressions<faceApi.WithFaceLandmarks<{
  detection: faceApi.FaceDetection;
}, faceApi.FaceLandmarks68>> | undefined, video: HTMLVideoElement, canvas: HTMLCanvasElement) => {
  const ctx = canvas.getContext("2d", { willReadFrequently: true });
  if (!ctx) return;
  ctx.clearRect(0, 0, canvas.width, canvas.height);
  if (!result) {
    showToast({ message: "未检测到人脸" });
  }
  else {
    const { expressions: { happy }, landmarks: { positions } } = result;
    if (happy <= 0.5) {
      showToast({ message: "请保持微笑后显示特效" })
      return;
    }
    drawImageToCanvas(ctx, hatImage, getHatDisypay(positions));
    drawImageToCanvas(ctx, maroonLeft, getMaroonLeftDisplay(positions))
    drawImageToCanvas(ctx, maroonRight, getMaroonRightDisplay(positions))
  }
}


/** 拍照 */
const takePhoto = () => {
  if (isMobile) {
    showToast({ message: "移动端拍照正在研发" })
    return;
  }
  imageUtil.getHtmlPicture(".video_wrapper").then(url => {
    imageUtil.downLoadImg(url, "照片")
  })
}

onMounted(() => {
  faceApiInit().then(() => {
    console.log("face-api初始化成功")
    getMediaCamera().then((video) => {
      console.log("实时视频传输中");
      deviceOptions.value.useAble = true;
      deviceOptions.value.text = "";
      const canvas = canvasRef.value as HTMLCanvasElement;
      if (!canvas) {
        console.log("画布未找到")
        return;
      }
      canvas.width = video.offsetWidth;
      canvas.height = video.offsetHeight;
      console.log("开始执行人脸检测");
      const run = async () => {
        const result = await faceApi.detectSingleFace(video, new faceApi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceExpressions();
        faceAnimationHandler(result, video, canvas);
        animateRef.value = requestAnimationFrame(run);
      }
      run();
    }).catch(() => {
      console.log("媒体初始化失败");
      deviceOptions.value.text = "摄像头初始化失败，可能您拒绝了权限或设备不支持";
      deviceOptions.value.useAble = false;
    }).finally(() => {
      deviceOptions.value.loaded = true;
    })
  });
})

onBeforeRouteLeave(() => {
  cancelAnimationFrame(animateRef.value);
  animateRef.value = 0;
})

</script>

<style lang="less" scoped>
@import "./index.less";

.video_wrapper {
  width: 100%;
  height: 100%;
  overflow: hidden;
  position: relative;
  background-color: #000;

  .video_wrapper_spin {
    position: absolute;
    top: 50%;
    left: 50%;
    color: #fff;
    padding: 3px 5px;
    font-weight: 600;
    transform: translate(-50%, -50%);
  }

  video {
    width: 100%;
    height: 100%;
    object-fit: cover;
    overflow: hidden;
    //opacity: 0;
  }

  canvas {
    position: absolute;
    top: 0%;
    left: 0;
    width: 100%;
    height: 100%;
    z-index: 10;
  }
}

.take_photo {
  position: fixed;
  width: 70px;
  height: 70px;
  border-radius: 50%;
  overflow: hidden;
  background-image: url('./images/photo.svg');
  background-size: 100% 100%;
  background-repeat: no-repeat;
  left: calc(50% - 35px);
  bottom: 25px;
  z-index: 50;

  &:active {
    background-image: url('./images/photo_active.svg');
  }
}
</style>
