// ref https://github.com/WebDevSimplified/Face-Detection-JavaScript.git
const video = document.getElementById("video");

Promise.all([
  faceapi.nets.tinyFaceDetector.loadFromUri("./models"),
  faceapi.nets.faceLandmark68Net.loadFromUri("./models"),
  faceapi.nets.faceRecognitionNet.loadFromUri("./models"),
  faceapi.nets.faceExpressionNet.loadFromUri("./models"),
  faceapi.nets.ssdMobilenetv1.loadFromUri("./models"),
]).then(startVideo);

async function startVideo() {
  // 配置视频流
  navigator.getUserMedia(
    { video: {} },
    (stream) => (video.srcObject = stream),
    (err) => console.error(err)
  );

  const labeledFaceDescriptors = await loadLabeledImages();


  // 特征码转json
  const json_str = "{\"parent\":" + JSON.stringify(labeledFaceDescriptors) + "}"
  const contentJson = JSON.parse(json_str)
  // 把json数据重新转成特征码
  const json_face = await createFaceMatcher(contentJson)


  const faceMatcher = new faceapi.FaceMatcher(labeledFaceDescriptors, 0.6);
  document.body.append("Loaded");


  setTimeout(() => {
    const canvas = faceapi.createCanvasFromMedia(video);
    document.body.append(canvas);
    const displaySize = { width: video.width, height: video.height };
    faceapi.matchDimensions(canvas, displaySize);

    setInterval(async () => {
      const detections = await faceapi
        .detectAllFaces(video) // Change 'image' to 'video' here
        .withFaceLandmarks()
        .withFaceDescriptors();
      const resizedDetections = faceapi.resizeResults(detections, displaySize);
      const results = resizedDetections.map((d) =>
        faceMatcher.findBestMatch(d.descriptor)
      );

      const context = canvas.getContext("2d");
      context.clearRect(0, 0, canvas.width, canvas.height);

      results.forEach((result, i) => {
        const box = resizedDetections[i].detection.box;
        const drawBox = new faceapi.draw.DrawBox(box, {
          label: result.toString(),
        });
        drawBox.draw(canvas);
      });
    }, 100);
  }, 1000);
}

// 加载本地图片获取特征码
function loadLabeledImages() {
  const labels = [
    "John",
    "Kim",
    "Trump",
    "HongShuPen",
    "ZhiQiang",
    "ZhengXing",
  ];
  return Promise.all(
    labels.map(async (label) => {
      const descriptions = [];
      for (let i = 1; i <= 2; i++) {
        const img = await faceapi.fetchImage(
          `/labeled_images/${label}/${i}.jpg`
        );
        const detections = await faceapi
          .detectSingleFace(img)
          .withFaceLandmarks()
          .withFaceDescriptor();
        descriptions.push(detections.descriptor);
      }

      return new faceapi.LabeledFaceDescriptors(label, descriptions);
    })
  );
}

// 特征码转json保存本地
async function createFaceMatcher(data) {
  const labeledFaceDescriptors = await Promise.all(data.parent.map(className => {
    const descriptors = [];
    for (var i = 0; i < className.descriptors.length; i++) {
      descriptors.push(new Float32Array(className.descriptors[i]));
    }
    return new faceapi.LabeledFaceDescriptors(className.label, descriptors);
  }))
  return new faceapi.FaceMatcher(labeledFaceDescriptors);
}
