<template>
  <div class="login">
    <div class="logo"></div>
    <!-- 登录页面 -->
    <div class="login-box">
      <h1 style="margin-bottom: 0.18rem; text-align: left; color: blue">
        <p class="title">黄金璀璨后台管理系统</p>
      </h1>
      <el-input
        v-model="user"
        style="width: 80%; height: 10%; margin-bottom: 7%"
        placeholder="请输入用户名"
      />
      <el-input
        type="password"
        v-model="password"
        placeholder="请输入密码"
        style="width: 80%; height: 10%; margin-bottom: 7%"
      /><br />
      <p class="forget" @click="tiaozhuan()">忘记密码？点击这里</p>
      <br />
      <div style="width: 100%; display: flex; justify-content: space-around">
        <el-button
          style="width: 28%; background-color: rgb(172, 96, 57); color: white; font-weight: 700"
          @click="userlogin()"
          >登录</el-button
        >
        <el-button
          style="width: 28%; background-color: rgb(172, 96, 57); color: white; font-weight: 700"
          @click="chufa()"
          >人脸识别</el-button
        >
      </div>
    </div>

    <!-- 遮罩层 -->
    <van-overlay :show="show">
      <div class="wrapper">
        <!-- 人脸识别 -->
        <div @click.stop>
          <div>{{ title }}</div>
          <div class="x-face-detect-modal">
            <video ref="video" autoplay :onCanplay="handleVideoCanPlay" />
            <canvas ref="canvas" width="{this.width}" height="{this.height}" />
          </div>
          <el-button type="primary" @click="facelogin()">开始识别</el-button>
          <el-button type="primary" @click="handleStopVideo()">取消</el-button>
        </div>
        <!-- 结束 -->
      </div>
    </van-overlay>
  </div>
</template>

<script setup  lang="ts">
import './Css/Login.css'
import { useRouter } from 'vue-router'
import { onMounted, ref } from 'vue'
import { Overlay } from 'vant'
import { login, face } from '@/request/api/api.ts'
import {
  detectSingleFace,
  nets,
  matchDimensions,
  resizeResults,
  draw,
  SsdMobilenetv1Options,
  Box
} from 'face-api.js'

const router = useRouter() //实例化router

let user = ref('') //用户名
let password = ref('') //密码
const show = ref(false) //遮罩层

// 登录验证
let userlogin = async () => {
  if (user.value == '' && password.value == '') {
    ElMessage({
      message: `账号和密码不能为空`,
      type: 'error',
      plain: true
    })
  } else if (user.value == '' && password.value != '') {
    ElMessage({
      message: `请输入用户名`,
      type: 'error',
      plain: true
    })
  } else if (user.value != '' && password.value == '') {
    ElMessage({
      message: `请输入密码`,
      type: 'error',
      plain: true
    })
  } else {
    let data: any = await login({ user: user.value, password: password.value })
    if (data.code == 200) {
      localStorage.setItem('access_token', data.data.access_token)
      localStorage.setItem('REFRESH_token', data.data.REFRESH_token)
      sessionStorage.setItem('identity', JSON.stringify(data.identity))
      sessionStorage.setItem('authentic_name', JSON.stringify(data.authentic_name))
      sessionStorage.setItem('avatar', JSON.stringify(data.avatar))
      if (data.identity == '老板' || '经理') {
        router.push('/xxk/xxk_home')
        ElMessage({
          message: `${data.msg}`,
          type: 'success',
          plain: true
        })
      } else if (data.identity == '客服') {
        router.push('xxk/xxk_order')
        ElMessage({
          message: `${data.msg}`,
          type: 'success',
          plain: true
        })
      } else if (data.identity == '柜台姐') {
        router.push('/xxk/Product_inventory')
        ElMessage({
          message: `${data.msg}`,
          type: 'success',
          plain: true
        })
      }
    } else {
      ElMessage({
        message: `${data.msg}`,
        type: 'error',
        plain: true
      })
    }
  }
}
// 跳转到手机验证页面
let tiaozhuan = () => {
  router.push('/Forgot_password')
}

// 人脸识别
const formId = 'x-face-detect-form'
const title = ref('人脸识别') //  初始化title
const canvas = ref('canvas') // 图像画布
const video = ref('video') // 视频元素
const stream = ref(null) // 当前流
const getUserMediaFail = ref(false) // 获取用户媒体失败
const boxObject = ref({ width: 100, height: 100 }) // 初始化box
let imageurl = ref('')
const viewFinderBox = ref({
  topLeft: {
    x: 0,
    y: 0
  },
  topRight: {
    x: 0,
    y: 0
  },
  bottomLeft: {
    x: 0,
    y: 0
  },
  bottomRight: {
    x: 0,
    y: 0
  }
}) // 初始化viewFinderBox
// 加载算法模型 文件存储在 public 文件夹下weights文件夹。
const init = async () => {
  await nets.ssdMobilenetv1.loadFromUri('/weights')
}

/** @name 调用摄像头 */
const getUserMedia = (
  success: NavigatorUserMediaSuccessCallback,
  error: NavigatorUserMediaErrorCallback
) => {
  //优先使用前置摄像头（如果有的话）：{ video: { facingMode: "user" } }
  //强制使用后置摄像头：{ video: { facingMode: { exact: "environment" } } }
  // video: {
  //    width: { min: 1024, ideal: 1280, max: 1920 },
  //    height: { min: 776, ideal: 720, max: 1080 }
  // }
  //ideal（应用最理想的）值
  const constraints = {
    video: {
      facingMode: 'user',
      width: { ideal: canvas.value.width },
      height: { ideal: canvas.value.height }
    }
  }
  if (navigator.mediaDevices.getUserMedia) {
    // 最新的标准API
    navigator.mediaDevices.getUserMedia(constraints).then(success).catch(error)
  } else if (navigator.webkitGetUserMedia) {
    // webkit核心浏览器
    navigator.webkitGetUserMedia(constraints, success, error)
  } else if (navigator.mozGetUserMedia) {
    // firfox浏览器
    navigator.mozGetUserMedia(constraints, success, error)
  } else if (navigator.getUserMedia) {
    // 旧版API
    navigator.getUserMedia(constraints, success, error)
  }
}

/** @name 截取快照 */
const cameraShoot = (
  video: HTMLVideoElement,
  startPoint: { x: number; y: number },
  width: number,
  height: number
) => {
  const canvas = document.createElement('canvas')
  canvas.width = video.videoWidth
  canvas.height = video.videoHeight
  canvas
    .getContext('2d')
    ?.drawImage(
      video,
      startPoint.x - 40,
      startPoint.y - 40,
      width + 80,
      height + 80,
      0,
      0,
      canvas.width,
      canvas.height
    )
  imageurl.value = canvas.toDataURL('image/jpeg').split(',')[1]
  console.log(imageurl.value)

  return new Promise<Blob | null>((resolve) =>
    // eslint-disable-next-line no-promise-executor-return
    canvas.toBlob(resolve, 'image/jpeg')
  )
}


// 画盒子
const drawBox = (box, label) => {
  if (!canvas.value) return
  const context = canvas.value.getContext('2d')
  context?.clearRect(box.x, box.y, box.width, box.height)
  const drawBox = new draw.DrawBox(box, {
    label: label
  })
  drawBox.draw(canvas.value)
}

/** @name 人脸检测 */
const detectFace = async () => {
  // eslint-disable-next-line no-promise-executor-return
  //非常重要：防止卡死
  await new Promise((resolve) => requestAnimationFrame(resolve))
  //绘制取景框
  // drawViewFinder()
  if (
    !canvas.value ||
    !video.value ||
    !video.value.currentTime ||
    video.value.paused ||
    video.value.ended
  )
    return detectFace()
  // 检测图像中具有最高置信度得分的脸部
  const result = await detectSingleFace(video.value) //options
  if (!result) return detectFace()
  // 匹配尺寸
  const dims = matchDimensions(canvas.value, video.value, true)
  // 调整检测到的框的大小，以防显示的图像的大小与原始
  const resizedResult = resizeResults(result, dims)
  const box = resizedResult.box
  // 检测框是否在取景框内
  // if (!checkInViewFinder(box)) return detectFace()
  // drawViewFinder()
  // 将检测结果绘制到画布（此处不用，可以直接用来绘制检测到的人脸盒子）
  // draw.drawDetections(this.canvas, resizedResult.box);
  drawBox(box, '识别中')
  video.value.pause()

  // //截取人脸图片
  const image = await cameraShoot(
    video.value,
    resizedResult.box.topLeft,
    resizedResult.box.width,
    resizedResult.box.height
  )
  // console.log(image);

  // 调用接口传入截取的人脸头像进行检测
  video.value.play()
  return detectFace()
}

// 触发方法
let chufa = () => {
  // console.log('mounted', canvas.value, video.value)
  show.value = true
  console.log(show.value)

  // // 获取用户媒体流
  getUserMedia(
    (streams: any) => {
      console.log(streams)
      //后续用于停止视频流
      stream.value = streams
      //显示视频
      if (video.value) {
        video.value['srcObject'] = streams
      }
    },
    (error: any) => (getUserMediaFail.value = true)
  )
  init()
  detectFace()
}

// 停止
const handleStopVideo = () => {
  show.value = false
  if (stream.value) {
    stream.value.getTracks().forEach((track) => {
      track.stop()
    })
  }
}

// 像后端发送视频流，并接收结果
let facelogin = async () => {
  let data: any = await face({ imageurl: imageurl })
  if (data.code == 200) {
    localStorage.setItem('access_token', data.data.access_token)
    localStorage.setItem('REFRESH_token', data.data.REFRESH_token)
    sessionStorage.setItem('identity', JSON.stringify(data.identity))
    sessionStorage.setItem('authentic_name', JSON.stringify(data.authentic_name))
    sessionStorage.setItem('avatar', JSON.stringify(data.avatar))
    if (data.identity == '老板' || '经理') {
      router.push('/xxk/xxk_home')
      ElMessage({
        message: `${data.msg}`,
        type: 'success',
        plain: true
      })
    } else if (data.identity == '客服') {
      router.push('xxk/xxk_order')
      ElMessage({
        message: `${data.msg}`,
        type: 'success',
        plain: true
      })
    } else if (data.identity == '柜台姐') {
      router.push('/xxk/Product_inventory')
      ElMessage({
        message: `${data.msg}`,
        type: 'success',
        plain: true
      })
    }
    handleStopVideo()
  } else {
    ElMessage({
      message: `${data.msg}`,
      type: 'error',
      plain: true
    })
  }
}
</script>

<style lang="scss" scoped>
</style>
