<!--
  人脸识别
-->
<template lang="pug">
  div.face-recognise
    div.video-w
      video#video.video(:width="videoWidth" :height="videoHeight")
      div.tt 请目视摄像头调整到合适的位置
      div.user-mk
      div.msg
        div {{faceMessage.msgStr}}
</template>

<script type="text/ecmascript-6">
  import * as faceapi from 'face-api.js'
  import Ftp from 'ftp'
  import fs from 'fs'
  import {mapGetters} from 'vuex'

  export default {
    name: 'FaceRecognise',
    props: {
      photoSaveName: {
        type: String,
        default: ''
      }
    },
    data() {
      return {
        nets: 'ssdMobilenetv1',
        faceMatcher: null,
        detImgEl: null,
        userVideo: null,
        distance: 1,
        videoWidth: 800,
        videoHeight: 600,
        second: 3,
        hasFaceState: 0,
        faceMtSecond: 3,
        faceMt: null,
        faceOptions: null,
        sampleArr: [],
        checkFaceCnt: 0,
        fileName: '',
        msg: '',
        photoSavePath: `${__static}\\photo\\`,
        videoStream: null,
        faceApiServer: window.KalixConfig.faceModelsURL,
        userPhotoUrl: window.KalixConfig.photoURL,
        userPhotoImg: new Image(),
        faceErrorCnt: 0,
        mtAutoIntoExam: null,
        userPhotoImgFlag: true,
        saveFlag: false
      }
    },
    mounted() {
      this.$nextTick(() => {
        this.initExtraction().then(() => {
          this.openCamera().then(() => {
            if (this.userPhotoImgFlag) {
              setTimeout(() => {
                this.videoScreenshot()
              }, 200)
            }
            this.autoIntoExam()
          }).catch(() => {
            this.autoIntoExam()
          })
        })
      })
    },
    methods: {
      autoIntoExam() {
        this.mtAutoIntoExam = setTimeout(() => {
          // console.log('60 秒以后自动进入考试')
          this.distance = 1
          this.savePhoto()
        }, 1000 * 60)
      },
      /**
       * 人脸提取
       */
      faceExtraction() {
        this.videoScreenshot()
      },
      /**
       * 初始化参数
       */
      async initExtraction() {
        // console.log('0,初始化参数')
        await faceapi.nets[this.nets].loadFromUri(this.faceApiServer)
        await faceapi.loadFaceLandmarkModel(this.faceApiServer)
        await faceapi.loadFaceRecognitionModel(this.faceApiServer)
        this.canvasEl = document.createElement(`canvas`)
        this.canvasE2 = document.createElement(`canvas`)
        this.context = this.canvasEl.getContext('2d')
        this.context2 = this.canvasE2.getContext('2d')
        this.canvasEl.width = this.videoWidth
        this.canvasEl.height = this.videoHeight
        this.canvasE2.width = 300
        this.canvasE2.height = 400
        switch (this.nets) {
          case 'ssdMobilenetv1':
            this.faceOptions = new faceapi.SsdMobilenetv1Options({
              minConfidence: 0.5
            })
            break
          case 'tinyFaceDetector':
            this.faceOptions = new faceapi.TinyFaceDetectorOptions({
              inputSize: 512,
              scoreThreshold: 0.5
            })
            break
          case 'mtcnn':
            this.faceOptions = new faceapi.MtcnnOptions({
              minFaceSize: 20, // 1 ~ 50
              scaleFactor: 0.709
            })
            break
        }
        this.imgEl = new Image()
        this.detImgEl = new Image()
        this.imgEl.onload = () => {
          this.imgElOnLoad()
        }
        this.userPhotoImgFlag = await this.getUserPhoto()
      },
      imgElOnLoad() {
        this.context2.drawImage(this.imgEl, 250, 0, 300, 400, 0, 0, 300, 400)
        const faceImgSrc = this.canvasE2.toDataURL('image/png')
        this.detImgEl.src = faceImgSrc
        if (this.hasFaceState === 3) {
          this.checkFaceCnt += 1
          this.faceErrorCnt = 0
          if (this.checkFaceCnt >= 10) {
            // 获取人脸
            this.getFace(this.imgEl.src)
          }
        }
        if (this.checkFaceCnt < 10 && !this.saveFlag) {
          this.faceErrorCnt += 1
          this.checkFace(faceImgSrc)
        }
      },
      /**
       * 获取用户登记照片
       */
      getUserPhoto() {
        return new Promise((resolve) => {
          this.userPhotoImg.crossOrigin = 'anonymous'
          this.userPhotoImg.src = `${this.userPhotoUrl}${this.userLoginName}.gif`
          this.userPhotoImg.onload = () => {
            resolve(true)
          }
          this.userPhotoImg.onsuccess = () => {
            resolve(true)
          }
          this.userPhotoImg.onerror = () => {
            resolve(false)
          }
        })
      },
      /**
       * 截取视频图像
       */
      videoScreenshot() {
        this.context.drawImage(this.userVideo, 0, 0, 800, 600)
        this.imgEl.src = this.canvasEl.toDataURL('image/png')
        // this.imgEl.onload = () => {
        //   this.context2.drawImage(this.imgEl, 250, 0, 300, 400, 0, 0, 300, 400)
        //   const faceImgSrc = this.canvasE2.toDataURL('image/png')
        //   this.detImgEl.src = faceImgSrc
        //   if (this.hasFaceState === 3) {
        //     this.checkFaceCnt += 1
        //     this.faceErrorCnt = 0
        //     if (this.checkFaceCnt >= 10) {
        //       // 获取人脸
        //       this.getFace(videoImg)
        //     }
        //   }
        //   if (this.checkFaceCnt < 10) {
        //     this.faceErrorCnt += 1
        //     this.checkFace(faceImgSrc)
        //   }
        //   console.log('faceErrorCnt', this.faceErrorCnt)
        //   if (this.faceErrorCnt === 15) {
        //     this.distance = 1
        //     this.savePhoto()
        //   }
        // }
      },
      /**
       * 检查人脸
       */
      checkFace(faceImgSrc) {
        // console.log('3,检查人脸')
        const tempFaceImg = new Image()
        tempFaceImg.src = faceImgSrc
        tempFaceImg.onload = async () => {
          const detections = await faceapi.detectSingleFace(tempFaceImg, this.faceOptions)
          if (detections) {
            this.hasFaceState = 3
          } else {
            this.hasFaceState = 2
            this.checkFaceCnt = 0
          }
          setTimeout(() => {
            this.videoScreenshot()
          }, 300)
        }
      },
      async checkHasFace(img) {
        const detections = await faceapi.detectSingleFace(img, this.faceOptions)
        return detections
      },
      /**
       * 获取人脸
       * @returns {Promise<void>}
       */
      getFace(faceImgSrc) {
        // console.log('获取人脸')
        const faceImg = new Image()
        faceImg.src = faceImgSrc
        faceImg.onload = async () => {
          this.faceRecognition()
        }
      },
      /**
       * 人脸识别
       */
      async faceRecognition() {
        clearInterval(this.faceMt)
        this.hasFaceState = 4
        setTimeout(async () => {
          // this.orgImgEl = this.orgImg()
          this.desc = [
            await faceapi.computeFaceDescriptor(this.userPhotoImg),
            await faceapi.computeFaceDescriptor(this.detImgEl)
          ]
          this.distance = await faceapi
            .euclideanDistance(this.desc[0], this.desc[1])
            .toFixed(2)
          this.hasFaceState = 6
          // 取消自动进程
          // clearTimeout(this.mtAutoIntoExam)
          this.savePhoto()
        }, 1000)
      },
      /**
       * 打开照相机
       * @returns {Promise<void>}
       */
      async openCamera() {
        // console.log('打开照相机')
        try {
          this.videoStream = await navigator.mediaDevices.getUserMedia({
            video: {
              width: this.videoWidth,
              height: this.videoHeight
            },
            audio: false
          })
          // console.log('videoStream', this.videoStream)
          this.userVideo = document.querySelector('#video')
          this.userVideo.srcObject = this.videoStream
          this.userVideo.onloadedmetadata = () => {
            this.userVideo.play()
            this.hasFaceState = 5
          }
        } catch (e) {
          alert('没有检查到摄像头！')
        }
      },
      orgImg() {
        const tempImg = new Image()
        tempImg.src = `${this.userPhotoUrl}${this.userLoginName}.jpg`
        tempImg.crossOrigin = 'anonymous'
        return tempImg
      },
      /**
       * 保存照片
       */
      async savePhoto() {
        let data = this.canvasEl.toDataURL('image/png', 1)
        const photoName = `${this.photoSaveName}-${this.distance}-${new Date().getTime()}`
        this.filePath = this.photoSavePath + `${this.photoSaveName}.png`
        let base64 = data.replace(/^data:image\/\w+;base64,/, '')
        let dataBuffer = Buffer.alloc(base64.length, base64, 'base64')
        let err = fs.writeFileSync(this.filePath, dataBuffer)
        if (err) {
          // console.log(err)
        } else {
          // console.log('写入成功！')
        }
        await this.uploadFtp(photoName)
        this.saveFlag = true
        this.handlerCloseCamera()
      },
      /**
       * 上传照片
       */
      uploadFtp(_fileName) {
        return new Promise(resolve => {
          let c = new Ftp()
          c.on('ready', () => {
            c.put(this.filePath, `\\photo\\${_fileName}.png`, err => {
              if (err) {
                // console.log('_uploadFile err', err)
              }
              c.end()
              resolve(err)
            })
          })
          c.connect({
            host: window.KalixConfig.ftphost,
            user: window.KalixConfig.ftpuser,
            password: window.KalixConfig.ftppwd
          })
          c.on('error', (err) => {
            resolve(err)
          })
        })
      },
      handlerCloseCamera() {
        const stream = this.videoStream.getTracks()[0]
        stream && stream.stop()
        this.$emit('photographed')
      }
    },
    computed: {
      ...mapGetters(['userLoginName']),
      faceMessage() {
        let msgStr = ''
        let msgState = ''
        switch (this.hasFaceState) {
          case 0:
            msgStr = '正在启动摄像头'
            break
          case 2:
            msgStr = '没有检测到人脸'
            break
          case 3:
            msgStr = '请保持当前姿势'
            break
          case 4:
            msgStr = '正在识别中'
            break
          case 5:
            msgStr = '摄像头已打开'
            break
          case 6:
            msgStr = '完成，谢谢配合。'
            break
          case 7:
            msgStr = '完成，谢谢配合。'
            break
          default :
            break
        }
        return {msgStr, msgState}
      }
    }
  }
</script>

<style scoped lang="stylus" rel="stylesheet/stylus">
  .face-recognise
    position fixed
    top 0
    left 0
    width 100%
    height 100%

  .video-w {
    position: relative;
    width: 800px;
    height: 600px;
    margin: 50px auto;
    box-sizing: content-box;
    border: 5px solid #FF3e50;
    background-color #000000;
  }

  .video, .msg, .user-mk {
    position: absolute;
    top: 0;
    left: 0;
  }

  .tt
    position absolute
    width: 800px;
    text-align center
    font-size 20px
    height 20px
    top -35px
    left 0

  .user-mk {
    background: url("../assets/images/user.png") 0 0 no-repeat;
    background-size: 100% 100%;
    width: 100%;
    height: 100%;
  }

  .msg {
    width: 100%;
    color: #333333;
    top: auto;
    font-size: 40px;
    bottom: -65px;
    height: 60px;
    text-align: center;
    line-height: 60px;

    &.error {
      color: #ff0000;
    }

    &.success {
      color: #00ff00;
    }

    .second {
      font-family: "Times New Roman";
      padding: 0 10px;
    }
  }


</style>
