<template>
  <div id="expression" style="position: relative; width: 1124px; height: 600px; overflow: hidden;">
    <div class="rec-label">REC</div>
    <video ref="video" width="800px" height="660px" autoplay></video>
    <canvas ref="canvas" style="position: absolute; "></canvas>
    <div class="alert-container">
      <el-alert v-if="fatigue" title="别打哈欠，认真听课" type="warning"></el-alert>
    </div>
    
  </div>
</template>

<script>
import * as faceapi from 'face-api.js';

export default {
  data() {
    return {
      countTired:0,
      mouthOpen: false,
      mouthOpenStartTime: null,
      fatigue: false, // 新增的疲劳状态变量
      scale:1,
      inputSize:512,

    };
  },
  mounted() {
    this.init();
  
  },
  beforeDestroy() {
    // 清理定时器
    clearInterval(this.intervalId);
  },
  methods: {
    async init() {
      await this.loadModels();

      const video = this.$refs.video;
      const canvas = this.$refs.canvas;
      canvas.width = 800;
canvas.height = 660;
      this.startVideo(video, canvas);
    },
    async loadModels() {
      try {
        await Promise.all([
          faceapi.nets.tinyFaceDetector.loadFromUri('/models'),
          faceapi.nets.faceLandmark68Net.loadFromUri('/models'),
          faceapi.nets.faceRecognitionNet.loadFromUri('/models'),
          faceapi.nets.faceExpressionNet.loadFromUri('/models'),
          console.log('model')
        ]);
      } catch (error) {
        console.error('加载 face-api.js 模型时出错:', error);
        throw error;
      }
    },
    startVideo(video, canvas) {
      navigator.mediaDevices.getUserMedia({ video: true })
        .then(stream => {
          video.srcObject = stream;

          video.addEventListener('loadedmetadata', () => {
            this.initVideo(video, canvas);
          });
        })
        .catch(err => {
          console.error('获取摄像头流时出错:', err);
        });
    },
    initVideo(video, canvas) {
      const videoWidth = video.videoWidth;
      const videoHeight = video.videoHeight;

      if (videoWidth === 0 || videoHeight === 0) {
        video.addEventListener('loadedmetadata', () => {
          this.initVideoAfterMetadata(video, canvas);
        });
      } else {
        this.initVideoAfterMetadata(video, canvas);
      }
    },
    initVideoAfterMetadata(video, canvas) {
      const videoWidth = video.videoWidth;
      const videoHeight = video.videoHeight;

      if (videoWidth === 0 || videoHeight === 0) {
        console.error('无效的视频尺寸');
        return;
      }

      const displaySize = {
        width: videoWidth,
        height: videoHeight
      };

      faceapi.matchDimensions(canvas, displaySize);
 // 在视频上方左上角添加 "REC" 标签
 const recLabel = document.querySelector('.rec-label');
      recLabel.style.position = 'absolute';
      recLabel.style.top = '10px';
      recLabel.style.left = '10px';
      recLabel.style.fontSize = '20px';
      recLabel.style.color = 'red';
      recLabel.style.zIndex = '2'; // 设置 "REC" 标签在视频上方
      this.intervalId = setInterval(async () => {
        // console.log('定时器执行中');

        try {
          const detections = await faceapi.detectAllFaces(video, 
          new faceapi.TinyFaceDetectorOptions({ 
            inputSize:256,
             scoreThreshold: 0.5,
              scale: 1 }))
            .withFaceLandmarks()
            .withFaceExpressions();

          // console.log('检测结果：', detections);

          if (detections.length === 0) {
            // console.log('未检测到人脸');
          } else {
            console.log('检测到人脸数量：', detections.length);
          }

          const resizedDetections = faceapi.resizeResults(detections, displaySize);

          // console.log('调整大小后的检测结果：', resizedDetections);

          canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height);

          faceapi.draw.drawDetections(canvas, resizedDetections);
          faceapi.draw.drawFaceLandmarks(canvas, resizedDetections);
          faceapi.draw.drawFaceExpressions(canvas, resizedDetections);

          // 获取嘴巴张开概率
          const mouthOpenProbability = resizedDetections[0]?.expressions?.surprised ?? 0;

          // 设置一个阈值，当嘴巴张开概率大于该阈值时认为嘴巴张开
          const mouthOpenThreshold = 0.2;

          if (mouthOpenProbability > mouthOpenThreshold) {
            // 如果嘴巴之前是闭合状态，记录下开始时间
            if (!this.mouthOpen) {
              this.mouthOpenStartTime = new Date();
            }

            this.mouthOpen = true;
          } else {
            this.mouthOpen = false;
               
             

            // 如果嘴巴之前是张开状态，计算时间差
            if (this.mouthOpenStartTime) {
              const mouthOpenTime = new Date() - this.mouthOpenStartTime;
               console.log(mouthOpenTime)
              // 设置一个时间阈值，当嘴巴张开时间超过该阈值时认为疲劳
              const fatigueThreshold = 1000; // 毫秒

              if (mouthOpenTime > fatigueThreshold) {
                console.log('疲劳状态');
               
                // 在这里触发疲劳状态的处理逻辑
                this.countTired++
                this.fatigue = true; // 设置疲劳状态变量为 true
              } else {
                this.fatigue = false; // 否则设置为 false
              }

              this.mouthOpenStartTime = null;
            }
          }
        } catch (error) {
          console.error('人脸检测时出错:', error);
        }
        //给父组件传状态数组
        this.$emit('my-event', );
      }, 100);
    }
  }
};
</script>

<style>
#expression {
    margin: -52px;
    padding: 52px;
    display: flex;
    align-items: center;
    justify-content: center;
}

.fatigue-text {
  font-size: 24px;
  color: red;
  position: absolute;
  top: 20px;
  left: 20px;

              
}
.alert-container {
  position: absolute;
  top: 50%; /* Center the alert vertically */
  left: 50%; /* Center the alert horizontally */
  transform: translate(-50%, -50%); /* Center the alert using translate */
  width: 50%; /* Set the alert width to half of the video width */
  z-index: 1; /* Ensure that the alert is above the video */
}

#expression::v-deep .rec-label {
  position: absolute;
  top: 10px;
  left: 10px;
  font-size: 20px;
  color: red;
  z-index: 299;
}
</style>







