<template>
  <div>
    <h3>FR 人脸识别</h3>
    <button @click="initEngine">初始化引擎</button>
    <button @click="getVersion">获取版本信息</button>
    <hr>
    <h3>FD 人脸检测</h3>
    <button @click="FD_init">初始化引擎</button>
    <button @click="FD_un">销毁引擎</button>
    <hr>
    <h3>
      <button @click="startFace">实例</button>
      <button @click="match">人脸比对（图片）</button>
      <button @click="readFace">人脸比对（文件）</button>
      <button @click="FT_Detect">人脸跟踪（视频）</button>
      <button @click="openVideo">打开摄像头</button>
    </h3>
    <div class="face-warp">
      <div class="left">
        <b>被比较的人脸</b>
        <div class="img-list">
          <div v-show="false" class="img" style="width: 100%" v-for="item in left_img"><img :src="item"></div>
          <div class="video-warp">
            <div class="border-warp" v-if="faces.length">
              <div class="border" v-for="item in faces" :style="getBorder(item)"></div>
            </div>
            <video class="video"></video>
          </div>
          <div class="img">
            <div class="rectangle"></div>
          </div>
        </div>
      </div>
      <div class="right">
        <b>已有脸部特征</b>
        <div class="img-list">
          <div class="img" v-for="item in right_img"><img :src="item"></div>
        </div>
      </div>
    </div>
  </div>
</template>

<script>
  import ref from 'ref'
  import FaceBase from '../../sdk/arcface/FaceBase'
  import FaceFR from '../../sdk/arcface/FaceFR'
  import FaceFD from '../../sdk/arcface/FaceFD'
  import FaceFT from '../../sdk/arcface/FaceFT'
  import fs from 'fs'
  import ffi from 'ffi'
  import Datastore from 'nedb'

  export default {
    name: 'Home',
    data() {
      return {
        // 虹软APPID
        APPID: 'HFJf1mwGzwmK9NRSFRidjomtVt2bJwKXbKScRw2NxZfa',
        // 人脸识别SDK
        FR_SDKKEY: '9vzwxdiQSxCdbkT1sUraGWqPYxf7m6jyd4Bvc9oSVXH6',
        // 人脸检测SDK
        FD_SDKKEY: '9vzwxdiQSxCdbkT1sUraGWptuMcRhSTbKUPyDM4K37mH',
        // 人脸跟踪SDK
        FT_SDKKEY: '9vzwxdiQSxCdbkT1sUraGWpmjxMG8Vp5FhaYUqfEBrt6',
        // 人脸识别引擎
        hFREngine: null,
        // 人脸检测引擎
        hFDEngine: null,
        // 人脸跟踪引擎
        hFTEngine: null,
        left_img: ['./static/face/1.jpg'],
        right_img: [
          './static/face/4.jpg',
          './static/face/5.jpg',
          './static/face/1.jpg',
          './static/face/7.jpg'
        ],
        faces: []
      }
    },
    created() {
      this.initEngine()
      this.FD_init()
      this.FT_init()

      let libname = './libs/voice.dll'
      let Library = ffi.Library(libname, {
        'speak': [ref.types.int, [ref.types.CString]]
      })
      console.log(Library)
      // let a = Library.speak('从结果中我们看到')
      // console.log(a)
      // this.openDB()
    },
    methods: {
      openDB() {
        let db = {}
        db.users = new Datastore('./users.db')
        db.users.loadDatabase()
        let doc = {
          hello: 'world',
          n: 5,
          today: new Date(),
          nedbIsAwesome: true,
          notthere: null,
          fruits: ['apple', 'orange', 'pear'],
          infos: {name: 'nedb'}
        }
        db.users.insert(doc, function (err, newDoc) {
          console.log(err)
          console.log(newDoc)
        })
      },
      // 初始化人脸识别引擎
      initEngine() {
        // 分配给引擎使用的内存大小
        let FR_WORKBUF_SIZE = 400 * 1024 * 1024

        // 分配给引擎使用的内存地址
        let pFRWorkMem = FaceBase.malloc(FR_WORKBUF_SIZE)

        // 引擎handle
        let phFREngine = ref.ref(Buffer.alloc(FaceBase.MIntPtr_t.size))
        var ret = FaceFR.AFR_FSDK_InitialEngine(this.APPID, this.FR_SDKKEY, pFRWorkMem, FR_WORKBUF_SIZE, phFREngine)
        console.log('初始化人脸识别引擎:' + ret)
        if (ret !== 0) {
          FaceBase.free(pFRWorkMem)
          console.log('初始化引擎失败:' + ret)
          process.exit()
        }
        this.hFREngine = ref.deref(phFREngine)
      },
      // 人脸识别引擎版本
      getVersion() {
        let pVersionFR = FaceFR.AFR_FSDK_GetVersion(this.hFREngine)
        let versuonFR = pVersionFR.deref()
        console.log(versuonFR)
      },
      FD_un() {
        let ret = FaceFR.AFR_FSDK_UninitialEngine(this.hFREngine)
        console.log(ret)
      },
      // 获取脸部特征（图片文件）
      img_extractFRFeature(imgpath, callback) {
        FaceBase.loadImage(imgpath, (err, inputImage) => {
          if (err) throw err
          FaceFD.process(this.hFDEngine, inputImage, (err, asvl, faces) => {
            if (err) throw err
            this.FR_extractFRFeature(asvl, faces, faceFeature => {
              callback(faceFeature)
            })
          })
        })
      },
      // 提取的脸部特征信息
      FR_extractFRFeature(asvl, faces, callback) {
        // 已检测到的脸部信息
        let face = faces.info[0]
        let faceRes = new FaceFR.AFR_FSDK_FACERES()
        faceRes.rcFace.left = face.left
        faceRes.rcFace.top = face.top
        faceRes.rcFace.right = face.right
        faceRes.rcFace.bottom = face.bottom
        faceRes.lOrient = face.orient

        let faceFeature = new FaceFR.AFR_FSDK_FACEMODEL()
        faceFeature.lFeatureSize = 0
        faceFeature.pbFeature = ref.NULL

        let ret = FaceFR.AFR_FSDK_ExtractFRFeature(this.hFREngine, asvl.ref(), faceRes.ref(), faceFeature.ref())

        if (ret !== 0) {
          console.log('获取脸部特征失败:' + ret)
        } else {
          let faceFeatureCopy = new FaceFR.AFR_FSDK_FACEMODEL()
          faceFeatureCopy.lFeatureSize = faceFeature.lFeatureSize
          faceFeatureCopy.pbFeature = FaceBase.malloc(faceFeatureCopy.lFeatureSize)
          FaceBase.memcpy(faceFeatureCopy.pbFeature.address(), faceFeature.pbFeature.address(), faceFeatureCopy.lFeatureSize)
          // console.log(faceFeature)
          callback(faceFeatureCopy)
        }
      },
      // 初始化人脸检测引擎
      FD_init() {
        let FD_WORKBUF_SIZE = 20 * 1024 * 1024
        let pFDWorkMem = FaceBase.malloc(FD_WORKBUF_SIZE)
        let phFDEngine = ref.ref(Buffer.alloc(FaceBase.MIntPtr_t.size))
        let ret = FaceFD.AFD_FSDK_InitialFaceEngine(this.APPID, this.FD_SDKKEY, pFDWorkMem, FD_WORKBUF_SIZE, phFDEngine, FaceFD.OrientPriority.AFD_FSDK_OPF_0_HIGHER_EXT, 16, 50)
        console.log('初始化人脸检测引擎:' + ret)
        if (ret !== 0) {
          FaceBase.free(pFDWorkMem)
          console.log('初始化人脸检测引擎失败:' + ret)
          process.exit()
        }
        this.hFDEngine = ref.deref(phFDEngine)
      },
      // 据输入的图像检测出人脸位置，一般用于静态图像检测
      faceDetection(img_path, callback) {
        FaceBase.loadImage(img_path, (err, inputImage) => {
          if (err) throw err
          FaceFD.process(this.hFDEngine, inputImage, (err, asvl, faces) => {
            if (err) throw err
            callback(asvl, faces)
          })
        })
      },
      startFace() {
        this.left_img.forEach((value, index) => {
          this.faceDetection(value, (asvl, faces) => {
            if (faces.nFace) {
              let face = faces.info[0]
              let img = document.querySelectorAll('.left .img img')[index]
              this.addBorder(face, img)
            } else {
              console.log('未检测到人脸')
            }
          })
        })
        this.right_img.forEach((value, index) => {
          this.faceDetection(value, (asvl, faces) => {
            if (faces.nFace) {
              let face = faces.info[0]
              let img = document.querySelectorAll('.right .img img')[index]
              this.addBorder(face, img)
            } else {
              console.log('未检测到人脸')
            }
          })
        })
      },
      // 人脸比对
      match() {
        let a, b
        this.img_extractFRFeature('./static/face/5.jpg', (faceFeature) => {
          a = faceFeature
          let data = Buffer.alloc(a.lFeatureSize)
          FaceBase.memcpy(data.address(), a.pbFeature.address(), a.lFeatureSize)
          fs.writeFile('./1.dat', data, err => {
            if (err) throw err
          })
          this.img_extractFRFeature('./static/face/7.jpg', (faceFeature) => {
            b = faceFeature

            let data = Buffer.alloc(b.lFeatureSize)
            FaceBase.memcpy(data.address(), b.pbFeature.address(), b.lFeatureSize)
            fs.writeFile('./2.dat', data, err => {
              if (err) throw err
            })

            let pfSimilScore = Buffer.alloc(ref.sizeof.float)
            pfSimilScore.type = ref.refType(ref.types.float)
            pfSimilScore.writeFloatLE(0, 0.0)
            let ret = FaceFR.AFR_FSDK_FacePairMatching(this.hFREngine, a.ref(), b.ref(), pfSimilScore)
            if (ret !== 0) {
              console.log('脸部特征比较失败:' + ret)
            }
            console.log('图片相似度', pfSimilScore.readFloatLE(0))
          })
        })
      },
      readFace() {
        fs.readFile('./1.dat', (err, data) => {
          if (err) throw err
          // 提取的脸部特征信息
          let faceFeatureA = new FaceFR.AFR_FSDK_FACEMODEL()
          faceFeatureA.lFeatureSize = data.length
          faceFeatureA.pbFeature = data
          console.log('本地脸部特征信息A', faceFeatureA)

          fs.readFile('./2.dat', (err, data) => {
            if (err) throw err
            // 提取的脸部特征信息
            let faceFeatureB = new FaceFR.AFR_FSDK_FACEMODEL()
            faceFeatureB.lFeatureSize = data.length
            faceFeatureB.pbFeature = data
            console.log('本地脸部特征信息B', faceFeatureA)

            let pfSimilScore = Buffer.alloc(ref.sizeof.float)
            pfSimilScore.type = ref.refType(ref.types.float)
            pfSimilScore.writeFloatLE(0, 0.0)
            let ret = FaceFR.AFR_FSDK_FacePairMatching(this.hFREngine, faceFeatureA.ref(), faceFeatureB.ref(), pfSimilScore)
            if (ret !== 0) {
              console.log('脸部特征比较失败:' + ret)
            }
            console.log('本地相似度', pfSimilScore.readFloatLE(0))
          })
        })
      },
      addBorder(face, img) {
        let img_warp = img.parentElement
        let width = img.naturalWidth
        let height = img.naturalHeight
        let display_width = img.clientWidth
        let display_height = img.clientHeight

        let l = (face.left * display_width / width) | 0
        let r = (face.right * display_width / width) | 0
        let t = (face.top * display_height / height) | 0
        let b = (face.bottom * display_height / height) | 0

        let face_rectangle = document.createElement('div')
        face_rectangle.setAttribute('class', 'rectangle')
        face_rectangle.setAttribute('style', 'top: ' + (t) + 'px; left:' + (l) + 'px;width:' + (r - l) + 'px;height:' + (b - t) + 'px;')
        img_warp.appendChild(face_rectangle)
      },
      FT_init() {
        let FT_WORKBUF_SIZE = 30 * 1024 * 1024
        let pFTWorkMem = FaceBase.malloc(FT_WORKBUF_SIZE)
        let phFTEngine = ref.ref(Buffer.alloc(FaceBase.MIntPtr_t.size))
        let ret = FaceFT.AFT_FSDK_InitialFaceEngine(this.APPID, this.FT_SDKKEY, pFTWorkMem, FT_WORKBUF_SIZE, phFTEngine, FaceFD.OrientPriority.AFD_FSDK_OPF_0_HIGHER_EXT, 16, 50)
        console.log('初始化人脸跟踪引擎:' + ret)
        if (ret !== 0) {
          FaceBase.free(pFTWorkMem)
          console.log('初始化人脸跟踪引擎失败:' + ret)
          process.exit()
        }
        this.hFTEngine = ref.deref(phFTEngine)
      },
      // 人脸跟踪
      FT_Detect() {
        FaceBase.loadImage(this.left_img[0], (err, img) => {
          if (err) throw err
          FaceFT.detect(this.hFTEngine, img, (err, asvl, faces) => {
            if (err) throw err
            this.FR_extractFRFeature(asvl, faces, faceFeature => {
              let data = Buffer.alloc(faceFeature.lFeatureSize)
              FaceBase.memcpy(data.address(), faceFeature.pbFeature.address(), faceFeature.lFeatureSize)
              fs.writeFile('./3.dat', data, err => {
                if (err) throw err
                console.log('人脸特征已经保存在文件./3.dat中')
              })
            })
          })
        })
      },
      // 打开摄像头
      openVideo() {
        let _this = this
        let width = 470
        let height = 280
        let constraints = {audio: false, video: {width, height}}
        navigator.mediaDevices.getUserMedia(constraints).then(mediaStream => {
          var options = {
            mimeType: 'video/webm;codecs=h264'
          }
          let video = document.querySelector('.video-warp .video')
          let mediaRecorder = new MediaRecorder(mediaStream, options)
          let canvas = document.createElement('canvas')
          let ctx = canvas.getContext('2d')
          // var reader = new FileReader()
          video.width = width
          video.height = height
          canvas.width = width
          canvas.height = height
          video.srcObject = mediaStream
          video.onloadedmetadata = function (e) {
            video.play()
          }

          mediaRecorder.start(60)
          mediaRecorder.ondataavailable = function (e) {
            _this.faces = []
            ctx.clearRect(0, 0, width, height)
            ctx.drawImage(video, 0, 0, width, height)
            let data = canvas.toDataURL('image/jpg')
            data = Buffer.from(data.replace(/.+,/, ''), 'base64')
            FaceBase.loadImage(data, (err, img) => {
              if (err) throw err
              FaceFT.detect(_this.hFTEngine, img, (err, asvl, faces) => {
                if (err) throw err
                asvl = {}
                if (faces.nFace === 0) {
                  console.log('没有检测到人脸')
                }
                _this.faces = faces.info
              })
            })
          }
        }).catch(err => {
          console.log('打开摄像头失败', err.name + ':' + err.message)
        })
      },
      getBorder(info) {
        return {
          top: `${info.top}px`,
          left: `${info.left}px`,
          width: `${info.right - info.left}px`,
          height: `${info.bottom - info.top}px`
        }
      }
    }
  }
</script>

<style>
  .face-warp {
    display: flex;
    width: 100%;
    height: 300px;
    overflow: hidden;
  }

  .left, .right {
    flex: 1;
    padding: 10px;
  }

  .img-list {
    border: 1px solid #ccc;
    height: 100%;
  }

  .img-list .img {
    position: relative;
    float: left;
    width: 50%;
    overflow: hidden;
  }

  .img-list img {
    display: inline-block;
    width: 100%;
  }

  .rectangle {
    position: absolute;
    border: 2px solid #f00;
  }

  .video-warp {
    width: 100%;
    height: 100%;
    position: relative;
  }

  .video-warp .video, .video-warp .border-warp, .video-warp .canvas {
    position: absolute;
    top: 0;
    left: 0;
    width: 100%;
    height: 100%;
  }

  .video-warp .border-warp .border {
    position: absolute;
    display: flex;
    border: 1px solid #f00;
    z-index: 999;
  }
</style>
