<template>
  <view class="wapper">
    <view class="box"> </view>
    <view class="text">请根据语音提示以下动作：左转、右转、抬头、低头、正视</view>
    <view>
      <view class="btn btn1" @click="shibie()">开始验证</view>
      <view class="btn btn2" @click="back()">返回</view>
    </view>
  </view>

  <tabBar :currentPage="4"></tabBar>
  <view class="mask" v-show="isShow">
    <view class="quxiao" @click="quxiao()"> X </view>
    <!-- 微信小程序 -->
    <camera
      v-if="isWxMiniProgram"
      device-position="back"
      flash="off"
      binderror="onError"
    ></camera>
    <!-- H5 -->
    <div v-if="isH5" class="camera-container photo">
      <video style="width: 320px;height:240px;"  id="camera" v-show="isVideoShow" autoplay></video>
      <!-- <button @click="capture">Capture</button> -->
      <view class="box" v-show="isVideoShow">
        <view class="smilBox"></view>
      </view>
      <canvas v-show="!isVideoShow" id="canvas" ></canvas>
    </div>
    <view v-show="!isVideoShow" style="text-align:center;font-size: 30rpx;color: #fff;">
      人脸识别完成
      <view @click="quxiao()" style="font-size: 35rpx;background: #007AFF;padding: 10rpx 20rpx;border-radius: 10rpx;margin: 20rpx;"> 确认</view>
    </view>
    <!-- App -->
    <view v-if="isApp">
      <button @click="openCamera">Open Camera</button>
    </view>

    <!-- <view>
      测试拍照图片
      <image
      class="img"
        :src="image"
        mode="scaleToFill"
      />
    </view> -->
  </view>
</template>

<script setup>
import { ref, onMounted } from "vue";
import { onHide } from "@dcloudio/uni-app";
import tabBar from "/components/tabBar/tabBar";

// 引入face-api.js中的方法

import {
  detectAllFaces,  //
  TinyFaceDetectorOptions,
  bufferToImage,
  detectSingleFace,
  nets,
  matchDimensions,
  resizeResults,  //
  draw,
  SsdMobilenetv1Options, //引入ssd模型
  Box,
} from "face-api.js";

//对运行条件判断
let isWxMiniProgram = process.env.VUE_APP_PLATFORM === "mp-weixin";
let isH5 = process.env.VUE_APP_PLATFORM === "h5";
let isApp = process.env.VUE_APP_PLATFORM === "app-plus";

const options = new SsdMobilenetv1Options({
  // 最小置信阈值
  // 默认值：0.5
  minConfidence: 0.5,
});
const videoRef = ref(null);  //视频的元素
const canvasRef = ref(null);  //画布的元素
let isShow = ref(false);
let isVideoShow = ref(true)
let image = ref(); //初始化图片
const boxObject = ref({ width: 100, height: 100 }); // 初始化box
const viewFinderBox = ref({
  topLeft: {
    x: 0,
    y: 0,
  },
  topRight: {
    x: 0,
    y: 0,
  },
  bottomLeft: {
    x: 0,
    y: 0,
  },
  bottomRight: {
    x: 0,
    y: 0,
  },
}); // 初始化viewFin3、derBox

onMounted(async () => {
  videoRef.value = document.getElementById("camera").getElementsByTagName("video")[0];
  canvasRef.value = document.getElementById("canvas").getElementsByTagName("canvas")[0];
  // // 初始化h5端相机
  // initH5Camera();
});

// ? 初始化h5端相机,请求相机权限
function initH5Camera() {
  // const video = document.getElementById("camera").getElementsByTagName("video")[0]; //使用dom操作获取摄像头
  if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
    //判断能否使用摄像头
    navigator.mediaDevices
      .getUserMedia({ video: true })
      .then((stream) => {
        // console.log('成功获取到了数据流',stream);
        // console.log(video);
        videoRef.value.srcObject = stream;
        videoRef.value.onloadedmetadata = function (e) {
          videoRef.value.play();
        };
      })
      .catch((err) => {
        if (err.name === "NotAllowedError") {
          console.error("Permission denied: ", err);
          alert("摄像头权限被拒绝，请在浏览器设置中允许访问摄像头。");
        } else {
          console.error("Error accessing camera: ", err);
          // alert('无法访问摄像头，请检查设备设置或换一个浏览器。');
        }
      });
  } else {
    console.error("getUserMedia not supported in this browser.");
    alert("浏览器不支持摄像头访问，请使用支持的浏览器。");
  }

  // 当打开摄像头的时候，打开算法模型
 //  加载算法模型  文件存储在 public 文件夹下models文件夹

 //     Promise.all([
// nets.ssdMobilenetv1.loadFromUri('/models'),
// nets.tinyFaceDetector.loadFromUri('/models'),
// nets.faceLandmark68Net.loadFromUri('/models'),
// nets.faceRecognitionNet.loadFromUri('/models')
// ])
// // .then(startFaceRecognition());
// .then()

// detectFace()  //进行人脸识别？？？？没整懂里面的东西
}

//?把当前照片渲染道画布上
function capture() {
  // 使用ref获取节点进行操作

  canvasRef.value.width = videoRef.value.videoWidth;
  canvasRef.value.height = videoRef.value.videoHeight;
  console.log(canvasRef.value.width , canvasRef.value.height,'视频的宽高');
  const ctx = canvasRef.value.getContext("2d"); //创建画布上下文
  ctx.drawImage( videoRef.value, 0, 0,200,150); //调用画布的上下文对象并且进行渲染图片
  image.value = canvasRef.value.toDataURL("image/png");
  // 关闭摄像头
  close()
  // 设置视频源为null，以便释放资源
  videoRef.value.srcObject = null;
  // 关闭遮罩层
  // isShow.value = false;
  // console.log(image.value);
  // 处理拍摄后的图片
}
// ? 画盒子 框人脸
const drawBox = (box, label) => {
if (!canvasRef.value) return
const context = canvasRef.value.getContext('2d')
context?.clearRect(box.x, box.y, box.width, box.height)
const drawBox = new draw.DrawBox(box, {
  label: label,
})
drawBox.draw(canvasRef.value)
}

// ?人脸检测
const detectFace = async () => {
//非常重要：防止卡死
await new Promise((resolve) => requestAnimationFrame(resolve))

if (
  !canvasRef.value ||
  !videoRef.value ||
  !videoRef.value.currentTime ||
  videoRef.value.paused ||
  videoRef.value.ended
)
  return detectFace()
// 检测图像中具有最高置信度得分的脸部
const result = await detectSingleFace(videoRef.value, options)
if (!result) return detectFace()
// 匹配尺寸
const dims = matchDimensions(canvasRef.value, videoRef.value, true)
// 调整检测到的框的大小，以防显示的图像的大小与原始
const resizedResult = resizeResults(result, dims)
const box = resizedResult.box
drawBox(box, '识别中')
video.value.pause()
// 截取人脸图片
const image =await cameraShoot(
  video.value,
)
 // 图片blob类型转file类型
let files = new window.File([image], '人脸头像.jpeg', {
  type: 'image/jpeg'
})
// 转标签元素 为获取人脸特征值
let img = await bufferToImage(files);
// 提取人脸特征值
const detections = await detectAllFaces(img, new TinyFaceDetectorOptions()).withFaceLandmarks().withFaceDescriptors();
  if (detections.length > 0) {
    ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
    detections.forEach(async (detection) => {

      // 提取人脸特征值并进行后续操作
     let data= detection.descriptor   
      console.log(data);
      // 接口上传人脸特征值进行对比返回验证结果   
      const detectResult = await uploadImg(data) 
        // if (detectResult.msg ==='验证成功') {  
        //   getResult({
        //     type: 'success',
        //     label: '通过',
        //     id: infor.id,
        //     time: time,
        //   })

        // }else{
        //   getResult({
        //     type: 'danger',
        //     label: '未通过',
        //     id: infor.id,
        //     time: "13:00:00",
        //   })
        // }
        // handleStopVideo()  
      })
} else {
  console.log('未检测到人脸');
  video.value.play()
  return detectFace()
}

if (!image) {
  drawBox(box, '识别失败')
  await delay(1000)
  video.value.play()
  return detectFace()
}

}


// 在app上使用摄像头的方法
function openCamera() {
  const cmr = plus.camera.getCamera();
  cmr.captureImage(
    (path) => {
      plus.io.resolveLocalFileSystemURL(path, (entry) => {
        const localUrl = entry.toLocalURL();
        // 处理图片路径
      });
    },
    (error) => {
      console.error("Capture image failed: ", error);
    }
  );
}

// ?关闭摄像头的方法
function close() {  
  if(!videoRef.value.srcObject){
    // console.log('当前视频已经停止');
    return  //假如没有视频流，就返回不再使用这个进行暂停
  }
  // 直接使用视频的srcObject进行关闭摄像头,也可以重新获取权限进行关闭摄像头
  let track = videoRef.value.srcObject.getTracks()[0];
  track.stop();
  isVideoShow.value = false  //将视频隐藏
    //   // 获取视频流中的所有轨道
  //   const tracks = videoRef.value.srcObject.getTracks();
  // // 遍历所有轨道，并使用stop()方法停止每个轨道
  // tracks.forEach((track) => track.stop());
}
// 打开遮罩层的方法
function shibie() {
  isShow.value = true;
  isVideoShow.value = true
  if (isH5) {
    //#ifdef H5
    initH5Camera();
    setTimeout(()=>{
      capture()
    },2000)
    //#endif
  }
}

// 关闭遮罩层的方法
function quxiao() {
  // isShow.value = false;
  if (isH5) {
    close();
    isShow.value = false;
  }
}

onHide(() => {
  // 关闭页面的时候关闭摄像头
  quxiao();
});

// 返回上一页的方法
function back() {
  uni.navigateBack();
}
</script>

<style lang="scss" scoped>
// 人脸识别遮罩层
.mask {
  position: fixed;
  left: 0;
  top: 0;
  width: 100vw;
  height: 100vh;
  background-color: rgba(0, 0, 0, 0.5);
  z-index: 999;
}

.wapper {
  background-color: rgb(242, 242, 242);
  height: calc(100vh - 91rpx);
  overflow: hidden;

  .box {
    height: 600rpx;
    width: 450rpx;
    // background-color: rgb(209, 236, 255);
    background-image: url("https://cdn7.axureshop.com/demo/1996612/images/%E4%BA%BA%E8%84%B8%E8%AF%86%E5%88%AB%E9%80%9A%E8%A1%8C/u10681.png");
    background-size: cover;

    margin: 200rpx auto;
    margin-bottom: 50rpx;
    position: relative;
  }
  .text {
    width: 80%;
    margin: auto;
    color: rgb(80, 79, 79);
    font-size: 25rpx;
    margin-bottom: 50rpx;
  }
  .btn {
    height: 90rpx;
    width: 100vw;
    background-color: aqua;
    line-height: 90rpx;
    text-align: center;
    margin: 30rpx 0;
    color: #fff;
  }
  .btn1 {
    background-color: rgb(245, 154, 35);
  }
  .btn2 {
    background-color: rgb(170, 170, 170);
  }
}

// 取消按钮的样式
.quxiao {
  margin: 30rpx;
  font-size: 50rpx;
  color: #fff;
  text-align: end;
}


.photo {
  width: 100vw;
  height: 100vw;
  // background-color: #fff;
  display: flex;
  // flex-direction: column;
  // align-items: center;
  // justify-content: center;
  position: relative;
  // 视频的位置
  #camera{
    width: 320px;
    height: 240px;
    // background-color: aqua;
    // position: absolute;
    top: 0;
    left: 0;
    right: 0;
    bottom: 0;
    margin: auto;
    // margin-left: -160px;
    video {
    width: 100px;
    height: 100px;
    // object-fit: fill;
    object-fit:contain;
    // transform: scaleX(-1);
  }
  }
  // 扫描层的位置
  .box{
    // display: none;
    width: 200px;
    height:200px;
    // position: absolute;
    // background-color: aqua;
    border: 1px solid blue;
    top: 50%;
    left: 50%;
    // bottom: 50%;
    // right: 50%;
    position: absolute;
    margin-top: -100px;
    margin-left: -100px;
    .smilBox{
      width: 80%;
      height: 1px;
      background-color: aqua;
      margin: auto;
    }
    // 定义一个小盒子的关键帧动画
    @keyframes move {
      0% {
        // opacity: 1;
        transform: translateY(0);
      }
      50% {
        transform: translateY(100px);
        // opacity: 0;
      }
      100% {
        transform: translateY(200px);
        // opacity: 1;
      }
    }
    .smilBox{
      animation: move 1.5s linear infinite alternate;
    }
  }

  img{
    width: 100%;
  }

  // 画布的位置
#canvas{
  // background-color: aqua;
  width: 320px;
  height: 240px;
    top: 0;
    left: 0;
    right: 0;
    bottom: 0;
    margin: auto;

  canvas {
    width: 100%;
    height: 100%;
    position: absolute;
 
  }
}

  

}



</style>
