<template>
  <van-nav-bar title="人脸识别" fixed placeholder left-arrow @click-left="onBack"> </van-nav-bar>
  <div class="wrapper">
    <div v-show="showContainer" class="face-capture" id="face-capture">
      <p class="tip">请保持人像在取景框内</p>

      <video
        ref="videoRef"
        id="video"
        :width="vwidth"
        :height="vheight"
        playsinline
        webkit-playsinline
      ></video>

      <canvas ref="canvasRef" id="refCanvas" :width="cwidth" :height="cheight"></canvas>

      <img class="img-cover" src="@/assets/yuanxingtouming.png" alt="" />

      <p class="contentp">{{ scanTip }}</p>
    </div>

    <div v-if="!showContainer" class="img-face">
      <img class="imgurl" :src="imgUrl" />
    </div>
  </div>
</template>

<script setup lang="ts">
import '@/assets/js/tracking-min.js'
import '@/assets/js/data/face-min.js'
import '@/assets/js/data/eye-min.js'
import '@/assets/js/data/mouth-min.js'
import { reactive, ref } from 'vue'
import { onMounted } from 'vue'
import { uploadFile, arcFace, getQiniuToken } from '@/api/realName'
import { showLoadingToast, closeToast, showFailToast } from 'vant'
import 'vant/es/toast/style/index'
// import { faceInfo, conFace } from "@/request/api/my.js";
import { useRoute, useRouter } from 'vue-router'
import { unref } from 'vue'
import { useAppStore } from '@/store/modules/app'

const appStore = useAppStore()

const userInfo = appStore.getUserInfo

const { push, back } = useRouter()
const onBack = () => {
  back()
}

const token = ref()
const getUploadToken = async () => {
  let res = await getQiniuToken()
  console.log(res)
  token.value = res.message
}
getUploadToken()

const route = useRoute()
const screenSize = reactive({
  width: window.screen.width,
  height: window.screen.height
})
const videoRef = ref(null)
const canvasRef = ref(null)
let URL: any = null
let streamIns: any = null
const showContainer = ref(true)
let tracker: any = null
const tipFlag = ref(false)
const flag = ref(false)
let context: any = null
let removePhotoID: any = null
const scanTip = ref('人脸识别中···')
const imgUrl = ref('')
let canvas: any = null
let trackertask: any = null
const vwidth = ref(266)
const vheight = ref(266)
const cwidth = ref(266)
const cheight = ref(266)
const orderData = ref({})

onMounted(() => {
  const scale = screenSize.width / 375
  vwidth.value = 266 * scale
  vheight.value = 266 * scale
  cwidth.value = 266 * scale
  cheight.value = 266 * scale
  playVideo()
})

const playVideo = () => {
  getUserMedia(
    {
      //摄像头拍摄的区域
      video: {
        width: 500,
        height: 500,
        facingMode: 'user'
      } /* 前置优先 */
    },

    success,
    error
  )
}

// 访问用户媒体设备
const getUserMedia = (constrains: any, success: any, error: any) => {
  if (navigator.mediaDevices.getUserMedia) {
    //最新标准API

    navigator.mediaDevices

      .getUserMedia(constrains)

      .then(success)

      .catch(error)
  } else if (navigator.webkitGetUserMedia) {
    //webkit内核浏览器

    navigator.webkitGetUserMedia(constrains).then(success).catch(error)
  } else if (navigator.mozGetUserMedia) {
    //Firefox浏览器

    navigator.mozGetUserMedia(constrains).then(success).catch(error)
  } else if (navigator.getUserMedia) {
    //旧版API

    navigator.getUserMedia(constrains).then(success).catch(error)
  } else {
    scanTip.value = '你的浏览器不支持访问用户媒体设备'
  }
}
const success = (stream: any) => {
  streamIns = stream
  const video = document.getElementById('video') as any
  // webkit内核浏览器
  URL = window.URL || window.webkitURL
  if ('srcObject' in video) {
    video.srcObject = stream
  } else {
    video.src = URL.createObjectURL(stream)
  }

  // 苹果手机的系统弹框会阻止js的线程的继续执行 手动0.1秒之后自动执行代码
  setTimeout(() => {
    video.play()
    initTracker() // 人脸捕捉
  }, 100)
}

const error = function (e) {
  scanTip.value = '访问用户媒体失败'
}

// 人脸捕捉 设置各种参数 实例化人脸捕捉实例对象,注意canvas上面的动画效果。

const initTracker = function () {
  const canvasDom = unref(canvasRef) as any
  context = canvasDom.getContext('2d') // 画布

  canvas = document.getElementById('refCanvas')

  tracker = new window.tracking.ObjectTracker('face') // tracker实例

  tracker.setInitialScale(4)

  tracker.setStepSize(3) // 设置步长

  tracker.setEdgesDensity(0.1)

  try {
    trackertask = window.tracking.track('#video', tracker) // 开始追踪
  } catch (e) {
    scanTip.value = '访问用户媒体失败，请重试'
  }

  //开始捕捉方法 一直不停的检测人脸直到检测到人脸

  tracker.on('track', (e: any) => {
    //画布描绘之前清空画布

    context.clearRect(0, 0, canvas.width, canvas.height)

    if (e.data.length === 0) {
      scanTip.value = '未检测到人脸'
    } else {
      e.data.forEach((rect: any) => {
        //设置canvas 方框的颜色大小

        context.strokeStyle = '#42e365'

        context.lineWidth = 2

        context.strokeRect(rect.x, rect.y, rect.width, rect.height)
      })

      if (!tipFlag.value) {
        scanTip.value = '检测成功，正在拍照，请保持不动3秒'
      }

      // 1.5秒后拍照，仅拍一次 给用户一个准备时间

      // falg 限制一直捕捉人脸，只要拍照之后就停止检测

      if (!flag.value) {
        scanTip.value = '拍照中...'

        flag.value = true

        removePhotoID = setTimeout(() => {
          const videoDom = unref(videoRef) as any
          videoDom.pause()
          tackPhoto()
          tipFlag.value = true
        }, 3000)
      }
    }
  })
}

// 拍照

const tackPhoto = function () {
  // 在画布上面绘制拍到的照片

  context.drawImage(document.getElementById('video'), 0, 0, vwidth.value, vwidth.value)

  // 保存为base64格式

  imgUrl.value = saveAsPNG(document.getElementById('refCanvas'))
  showLoadingToast({
    message: '验证中',
    duration: 0
  })
  const domainUrl = 'http://etc.img.xingchuangke.net/'
  let file = getBlobBydataURI(imgUrl.value, 'jpg')
  uploadFile({
    file: file,
    token: token.value,
    key: 'base/' + userInfo.username + '人脸' + new Date().getTime() + '.jpg'
  }).then((res) => {
    arcFace({
      imgFacePath: domainUrl + res.key,
      userId: userInfo.id
    }).then((res) => {
      console.log(res)
      if (res.statusCode == 200) {
        push({
          name: 'esign'
        })

        closeToast()
      } else {

        closeToast()
        showFailToast({ message: res.message, duration: 2000 })
      }
    })
  })
  /** 拿到base64格式图片之后就可以在this.compare方法中去调用后端接口比较了，也可以调用getBlobBydataURI方法转化成文件再去比较

   * 我们项目里有一个设置个人头像的地方，先保存一下用户的图片，然后去拿这个图片的地址和当前拍照图片给后端接口去比较。

   * */

  // this.compare(imgUrl)

  //判断图片大小

  imgSize()

  //  faceToTengXun(); // 人脸比对

  close()
}

const imgSize = function () {
  if (imgUrl.value) {
    // 获取base64图片byte大小

    const equalIndex = imgUrl.value.indexOf('=') // 获取=号下标

    let size

    if (equalIndex > 0) {
      const str = imgUrl.value.substring(0, equalIndex) // 去除=号

      const strLength = str.length

      const fileLength = strLength - (strLength / 8) * 2 // 真实的图片byte大小

      size = Math.floor(fileLength / 1024) // 向下取整

      console.log('size', size + 'KB')
    } else {
      const strLength = imgUrl.value.length

      const fileLength = strLength - (strLength / 8) * 2

      size = Math.floor(fileLength / 1024) // 向下取整

      console.log('size', size + 'KB')
    }

    // if (size > 1024) {
    //   // 图片超过1M 按比例压缩

    //   imgUrl.value = document.getElementById('refCanvas')?.toDataURL('image/png', 1024 / size)
    // }
  }
}

// Base64转文件

const getBlobBydataURI = function (dataURI: any, type: any) {
  var binary = window.atob(dataURI.split(',')[1])

  var array = []

  for (var i = 0; i < binary.length; i++) {
    array.push(binary.charCodeAt(i))
  }

  return new Blob([new Uint8Array(array)], {
    type: type
  })
}

// compare(url) {

//   let blob = this.getBlobBydataURI(url, 'image/png')

//   let formData = new FormData()

//   formData.append("file", blob, "file_" + Date.parse(new Date()) + ".png")

//   // TODO 得到文件后进行人脸识别

// },

// 保存为png,base64格式图片

const saveAsPNG = function (c: any) {
  return c.toDataURL('image/png', 1)
}

const close = function () {
  flag.value = false

  tipFlag.value = false

  showContainer.value = false

  context = null

  scanTip.value = '人脸识别中...'

  clearTimeout(removePhotoID)

  if (streamIns) {
    streamIns.enabled = false

    streamIns.getTracks()[0].stop()

    streamIns.getVideoTracks()[0].stop()
  }

  streamIns = null

  trackertask.stop()

  tracker = null
}
</script>

<style>
.wrapper {
  min-height: 100vh;
  background-color: #fff;
}
.face-capture {
  display: flex;

  flex-direction: column;

  align-items: center;

  justify-content: center;
}

.tip {
  position: fixed;

  top: 45px;

  z-index: 5;

  font-size: 18px;

  font-family:
    PingFangSC-Medium,
    PingFang SC;

  font-weight: 500;

  color: #333333;

  line-height: 25px;
}

.face-capture video,
.face-capture canvas {
  position: fixed;

  top: 117.5px;

  object-fit: cover;

  z-index: 2;

  background-repeat: no-repeat;

  background-size: 100% 100%;
}

.face-capture .img-cover {
  position: fixed;

  top: 63px;

  width: 375px;

  height: 375px;

  object-fit: cover;

  z-index: 3;

  background-repeat: no-repeat;

  background-size: 100% 100%;
}

.face-capture .contentp {
  position: fixed;

  top: 438px;

  font-size: 18px;

  font-weight: 500;

  color: #333333;
}

.face-capture .rect {
  border: 2px solid #0aeb08;

  position: fixed;

  z-index: 4;
}

.img-face {
  display: flex;

  flex-direction: column;

  align-items: center;

  justify-content: center;
}

.img-face .imgurl {
  position: fixed;

  top: 117.5px;

  width: 266px;

  height: 266px;

  border-radius: 133px;
}
</style>
