<script setup lang="ts">
import { nextTick, onMounted, ref, shallowRef, unref } from 'vue'
import {
  loadTinyFaceDetectorModel,
  detectSingleFace,
  TinyFaceDetectorOptions
} from 'face-api.js'

const emit = defineEmits(['notSupported', 'finished'])

const url = ref('')
const hasFace = ref(false)
const RECORD_STATUS = shallowRef({
  default: Symbol(),
  inRecording: Symbol(),
  end: Symbol()
})
const recordStatus = ref(unref(RECORD_STATUS).default)


/**
 * 获取当前设备支持的视频编码和格式
 */
const getSupportedMimeTypes = () => {
  const VIDEO_TYPES = ['webm', 'ogg', 'mp4', 'x-matroska']
  const VIDEO_CODECS = [
    'vp9',
    'vp9.0',
    'vp8',
    'vp8.0',
    'avc1',
    'av1',
    'h265',
    'h.265',
    'h264',
    'h.264',
    'opus'
  ]

  const supportedTypes = []
  VIDEO_TYPES.forEach(videoType => {
    const type = `video/${videoType}`
    VIDEO_CODECS.forEach(codec => {
      const variations = [
        `${type}`,
        `${type};codecs=${codec}`,
        `${type};codecs:${codec}`,
        `${type};codecs=${codec.toUpperCase()}`,
        `${type};codecs:${codec.toUpperCase()}`
      ]
      variations.forEach(variation => {
        // 通过MediaRecorder提供的isTypeSupported检查支持的格式和编码
        if (MediaRecorder.isTypeSupported(variation))
          supportedTypes.push(variation)
      })
    })
  })
  return supportedTypes
}
const refVideo = ref()
const mediaStream = shallowRef<MediaStream>()
const mediaRecorder = shallowRef<MediaRecorder>()

/**
 * @description 打开摄像头后获得媒体流，初始化MediaRecorder
 */
const openUserMedia = () => {
  const supportedMimeTypes = getSupportedMimeTypes()
  console.log(
    '支持的编码有 : '
  )
  console.table(supportedMimeTypes)
  const video = unref(refVideo)
  window.navigator.mediaDevices
    .getUserMedia({
      video: {
        aspectRatio: { ideal: 1 },
        facingMode: 'user' // 优先调前置摄像头
      },
      audio: false
    })
    .then(stream => {
      mediaStream.value = stream
      let winURL = window.URL || window.webkitURL
      if ('srcObject' in unref(refVideo)) {
        video.srcObject = stream
      } else {
        video.src = winURL.createObjectURL(stream)
      }
      video.muted = true
      video.onloadedmetadata = () => {
        if (video) {
          video.play()
          nextTick(onPlay)
        }
      }
      mediaRecorder.value = new MediaRecorder(stream, {
        mimeType: supportedMimeTypes[0],
        videoBitsPerSecond: 2500000
      })
    })
    .catch((err) => {
      console.error(err)
      video?.pause()
      closeStream()
      emit('notSupported')
    })
}
/**
 * @description 关闭媒体流输出
 */
const closeStream = () => {
  if (unref(mediaStream)?.getTracks) {
    unref(mediaStream).getTracks().forEach(track => track.stop())
  }
}

const recordedBlob = ref<BlobPart[]>([])
/**
 * 开始录制
 */
const startRecorder = () => {
  if (unref(recordStatus) !== unref(RECORD_STATUS).default) {
    return
  }
  // 每次数据准备好后将切片数据保存，录制结束后合并
  unref(mediaRecorder)!.ondataavailable = ({ data }) => {
    recordedBlob.value.push(data)
  }
  // 指定时间切片，1s生成一个BlobPart
  unref(mediaRecorder)!.start(1000)
  // 设置录制状态为录制中
  recordStatus.value = unref(RECORD_STATUS).inRecording
  // 倒计时
  countDown(recordEnd)()
}
let countTimer = null
const time = ref(5)
const countDown = (endCallback) => {
  return function reword() {
    countTimer = setTimeout(() => {
      countTimer && clearTimeout(countTimer)
      time.value--
      if (time.value === 0) {
        endCallback && endCallback()
      } else {
        reword()
      }
    }, 1000)
  }
}
const recordEnd = () => {
  recordStatus.value = RECORD_STATUS.value.end
  unref(mediaRecorder)!.onstop = () => {
    const blob = new Blob(recordedBlob.value, { type: 'video/mp4' })
    blob.name = 'face.mp4'
    emit('finished', {
      blob,
      reset,
      from: 'videoByRtc'
    })
  }
  if (mediaRecorder.value.state !== 'inactive') {
    mediaRecorder.value.stop()
  }
}
const onPlay = async () => {
  const video = unref(refVideo)
  if (unref(recordStatus) === unref(RECORD_STATUS).end || !video) {
    return
  }
  // 视频处于暂停或结束状态下，不去执行检测逻辑
  if (video.paused || video.ended) {
    return setTimeout(() => onPlay(), 100)
  }
  // 持续进行人脸检测扫描，人脸不在镜头就提示用户
  const faceDetectionTask = await detectSingleFace(
    video,
    new TinyFaceDetectorOptions({
      inputSize: 512,
      scoreThreshold: 0.1
    })
  )
  console.log('!!faceDetectionTask', !!faceDetectionTask)
  hasFace.value = !!faceDetectionTask
  setTimeout(() => onPlay(), 100)
}
const reset = () => {
  recordedBlob.value = []
  recordStatus.value = unref(RECORD_STATUS).default
  onPlay()
}
onMounted(async () => {
  openUserMedia()
  // 需要读取face-api.js需要的训练模型
  await loadTinyFaceDetectorModel(
    '/webrtc/models'
  )
})

</script>

<template>
  <div class="video-verify-container">
    <div class="video-container">
      <video
        ref="refVideo"
        class="video"
        playsinline
        :class="{ 'not-face': !hasFace }"
        :src="url"
      ></video>
      <div class="face-tip" v-if="!hasFace">请调整脸部位置，置于取景框内</div>
    </div>
    <div class="tip-box flex flex-col items-start!">
      <div>1、面孔处于取景框内</div>
      <div>2、请取下帽子和眼镜</div>
      <div>3、拍摄光线充足</div>
    </div>
    <div class="action-box">
      <div class="btn" @click="startRecorder">
        <template v-if="recordStatus === RECORD_STATUS.default">
          开始录制
        </template>
        <van-loading v-else size="18" color="#fff" type="spinner">录制中{{time}}</van-loading>
      </div>
    </div>
  </div>
</template>

<style scoped lang="scss">
@import "../faceRecognition.scss";
</style>
