<!-- 面部检测 -->
<!-- 躯体检测 -->
<template>
  <div class="container">
    <div class="video-canvas-container">
      <video ref="videoRef" autoplay playsinline></video>
      <canvas ref="canvasRef" class="output_canvas canvas"></canvas>
    </div>
    <body-detection
      ref="bodyRef"
      :interview-type="interviewType"
      :video-ref="videoRef"
      @body-action-detected="handleBodyActionDetected"
    />
  </div>
</template>

<script setup lang="ts">
/**
 * 眨眼抿嘴检测
 */
//============================================================================================================
import { ref, onMounted, onBeforeUnmount } from 'vue'
import vision from 'https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.3'

const { FaceLandmarker, FilesetResolver, DrawingUtils } = vision

interface Props {
  interviewType: string
}

const commonStore = useCommonStore()
//给props定义默认值
const props = withDefaults(defineProps<Props>(), {})

// 定义emit，更新emotion-detected事件以支持传递情绪百分比
const emit = defineEmits(['emotion-detected', 'face-action-detected'])

//视频标签
const videoRef = ref(null)
//画布标签
const canvasRef = ref(null)
//躯干检测标签
const bodyRef = ref(null)

// 抿嘴状态记录
let isMouthPucker = false
// 眨眼状态记录
let isEyeBlink = false
// 眼睛向左斜上方看状态记录
let isEyeUpLeft = false
// 眨眼向右斜上方看状态记录
let isEyeUpRight = false
// 眨眼计数器
let blinkCount = 0
// 抿嘴计数器
let mouthPuckerCount = 0
// 眼睛向左斜上方看计数器
let eyeUpLeftCount = 0
// 眨眼向右斜上方看计数器
let eyeUpRightCount = 0

// 动作判定阈值（可根据实际调整）
const BLINK_THRESHOLD = 0.5 // 眨眼阈值
const PUCKER_THRESHOLD = 0.12 // 抿嘴阈值
const EYE_UP_LEFT_THRESHOLD = 0.6 // 眼睛向左斜上方看阈值
const EYE_UP_RIGHT_THRESHOLD = 0.6 // 眼睛向右斜上方看阈值

let faceLandmarker = null
let runningMode = 'VIDEO'

//表情动作识别状态控制
const shouldDetect = ref(false)
//画布
let bufferCanvas

/**
 * 发送面部动作结果
 * @returns {Promise<void>}
 * @param faceActionList
 */
const sendFaceAction = async (faceActionList) => {
  if (props.interviewType === '0') {
    //技术面
    await getFaceActionUsingPost({
      faceActionList: faceActionList,
    })
  } else {
    await getHrFaceActionUsingPost({
      faceActionList: faceActionList,
    })
  }
}

/**
 * 开始人脸检测和肢体检测
 */
const startFace = () => {
  //修改状态
  shouldDetect.value = true

  // 重置动作状态变量，但不重置计数器
  isEyeBlink = false
  isMouthPucker = false
  isEyeUpLeft = false
  isEyeUpRight = false

  //面部表情检测
  predictWebcam()

  //开始躯干检测
  bodyRef.value?.startBodyDetection()

  // 同时开始微表情检测 - 确保微表情检测与面部行为检测互不干扰
  // 延迟200ms启动微表情检测，避免与面部行为检测同时初始化造成冲突
  setTimeout(() => {
    runDetection()
    console.log('微表情检测已启动')
  }, 200)

  console.log('面部检测和微表情检测已开始')
}

/**
 * 结束检测
 */
const stopFace = () => {
  const time = commonStore.globalTime
  shouldDetect.value = false

  //清除画布
  clearCanvas()

  //结束躯干检测，发送躯干数据
  bodyRef.value?.stopBodyDetection()

  //释放资源
  if (animationFrameId) {
    cancelAnimationFrame(animationFrameId)
  }

  // 停止微表情检测
  stopDetection()

  // 清除缓存
  lastValidLandmarks = null

  //发送面部动作结果
  sendFaceAction([
    {
      faceAction: 'EYE_BLINK',
      counter: (blinkCount / time).toFixed(1),
    },
    {
      faceAction: 'MOUTH_PUFF',
      counter: mouthPuckerCount,
    },
    {
      faceAction: 'EYE_UP_LEFT',
      counter: eyeUpLeftCount,
    },
    {
      faceAction: 'EYE_UP_RIGHT',
      counter: eyeUpRightCount,
    },
  ])

  console.log('面部检测和微表情检测已停止，动作计数已发送')
}

/**
 * 初始化人脸检测器
 * @returns {Promise<void>}
 */
async function createFaceLandmarker() {
  const resolver = await FilesetResolver.forVisionTasks(
    'https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.3/wasm'
  )
  faceLandmarker = await FaceLandmarker.createFromOptions(resolver, {
    baseOptions: {
      modelAssetPath:
        'https://storage.googleapis.com/mediapipe-models/face_landmarker/face_landmarker/float16/1/face_landmarker.task',
      delegate: 'GPU',
    },
    outputFaceBlendshapes: true,
    runningMode,
    numFaces: 1,
  })
  console.log('✅微表情动作识别模型加载完成')
}

// 在顶部添加状态缓存变量
let lastValidLandmarks = null
let animationFrameId = null
const SMOOTHING_FACTOR = 0.7 // 平滑系数

/**
 * 调用模型，识别表情
 * @returns {Promise<void>}
 */
async function predictWebcam() {
  const video = videoRef.value
  const canvas = canvasRef.value
  if (!video || !canvas) return

  const ctx = canvas.getContext('2d')

  // 确保canvas与视频完全相同的尺寸，使轮廓能够精确覆盖
  canvas.width = video.videoWidth
  canvas.height = video.videoHeight

  // 添加双缓冲画布
  bufferCanvas = document.createElement('canvas')
  bufferCanvas.width = canvas.width
  bufferCanvas.height = canvas.height
  const bufferCtx = bufferCanvas.getContext('2d')

  //  添加时间间隔
  let lastOutputTime = 0
  /**
   * 检测表情
   * @returns {Promise<void>}
   */
  const detectFrame = async () => {
    // 即使不检测也保持绘制，确保面部网格持续显示
    try {
      const startTimeMs = performance.now()

      // 仅在检测状态时更新检测结果
      if (shouldDetect.value && startTimeMs - lastOutputTime >= 100) {
        lastOutputTime = startTimeMs
        const results = await faceLandmarker.detectForVideo(video, startTimeMs)

        // 添加结果缓存
        if (results.faceLandmarks?.length) {
          if (!lastValidLandmarks) {
            lastValidLandmarks = results.faceLandmarks[0]
          } else {
            // 平滑处理
            results.faceLandmarks[0].forEach((point, i) => {
              lastValidLandmarks[i].x =
                SMOOTHING_FACTOR * lastValidLandmarks[i].x + (1 - SMOOTHING_FACTOR) * point.x
              lastValidLandmarks[i].y =
                SMOOTHING_FACTOR * lastValidLandmarks[i].y + (1 - SMOOTHING_FACTOR) * point.y
            })
          }
        }

        // 动作统计
        if (results.faceBlendshapes?.[0]?.categories) {
          const blendshapes = results.faceBlendshapes[0].categories

          // 1. 获取当前帧特征值
          const blinkLeft = blendshapes.find((c) => c.categoryName === 'eyeBlinkLeft')?.score || 0
          const blinkRight = blendshapes.find((c) => c.categoryName === 'eyeBlinkRight')?.score || 0
          const puckerLeft =
            blendshapes.find((c) => c.categoryName === 'mouthDimpleLeft')?.score || 0
          const puckerRight =
            blendshapes.find((c) => c.categoryName === 'mouthDimpleRight')?.score || 0
          const eyeLookInLeft =
            blendshapes.find((c) => c.categoryName === 'eyeLookInLeft')?.score || 0
          const eyeLookOutRight =
            blendshapes.find((c) => c.categoryName === 'eyeLookOutRight')?.score || 0
          const eyeLookInRight =
            blendshapes.find((c) => c.categoryName === 'eyeLookInRight')?.score || 0
          const eyeLookOutLeft =
            blendshapes.find((c) => c.categoryName === 'eyeLookOutLeft')?.score || 0

          // 2. 动作判定（双边同时触发才计数）
          //眨眼检查
          if (blinkLeft > BLINK_THRESHOLD && blinkRight > BLINK_THRESHOLD) {
            if (!isEyeBlink) {
              isEyeBlink = true
              blinkCount++
              console.log(`检测到眨眼！当前计数: ${blinkCount}`)
              // 发射事件，通知父组件
              emit('face-action-detected', { type: 'EYE_BLINK', index: 0 })
            }
          } else if (
            isEyeBlink &&
            (blinkLeft < BLINK_THRESHOLD * 0.5 || blinkRight < BLINK_THRESHOLD * 0.5)
          ) {
            isEyeBlink = false
          }

          // 抿嘴检查
          if (puckerLeft > PUCKER_THRESHOLD && puckerRight > PUCKER_THRESHOLD) {
            if (!isMouthPucker) {
              isMouthPucker = true
              mouthPuckerCount++
              console.log(`检测到抿嘴！当前计数: ${mouthPuckerCount}`)
              // 发射事件，通知父组件
              emit('face-action-detected', { type: 'MOUTH_PUFF', index: 1 })
            }
          } else if (
            isMouthPucker &&
            (puckerLeft < PUCKER_THRESHOLD * 0.5 || puckerRight < PUCKER_THRESHOLD * 0.5)
          ) {
            isMouthPucker = false
          }

          // 修正: 眼睛向左斜上方看检查 (当实际是向左看时)
          // 左眼向内看(眼球向鼻子方向)和右眼向外看(眼球向远离鼻子方向)表示向左看
          if (eyeLookInLeft > EYE_UP_LEFT_THRESHOLD && eyeLookOutRight > EYE_UP_RIGHT_THRESHOLD) {
            if (!isEyeUpLeft) {
              isEyeUpLeft = true
              eyeUpLeftCount++
              console.log(`检测到眼睛向左斜上方看！当前计数: ${eyeUpLeftCount}`)
              // 发射事件，通知父组件 - 实际是左斜视
              emit('face-action-detected', { type: 'EYE_UP_RIGHT', index: 3 }) // 修改：发送右斜视事件
            }
          } else if (
            isEyeUpLeft &&
            (eyeLookInLeft < EYE_UP_LEFT_THRESHOLD * 0.5 ||
              eyeLookOutRight < EYE_UP_RIGHT_THRESHOLD * 0.5)
          ) {
            isEyeUpLeft = false
          }

          // 修正: 眼睛向右斜上方看检查 (当实际是向右看时)
          // 右眼向内看(眼球向鼻子方向)和左眼向外看(眼球向远离鼻子方向)表示向右看
          if (eyeLookInRight > EYE_UP_RIGHT_THRESHOLD && eyeLookOutLeft > EYE_UP_LEFT_THRESHOLD) {
            if (!isEyeUpRight) {
              isEyeUpRight = true
              eyeUpRightCount++
              console.log(`检测到眼睛向右斜上方看！当前计数: ${eyeUpRightCount}`)
              // 发射事件，通知父组件 - 实际是右斜视
              emit('face-action-detected', { type: 'EYE_UP_LEFT', index: 2 }) // 修改：发送左斜视事件
            }
          } else if (
            isEyeUpRight &&
            (eyeLookInRight < EYE_UP_RIGHT_THRESHOLD * 0.5 ||
              eyeLookOutLeft < EYE_UP_LEFT_THRESHOLD * 0.5)
          ) {
            isEyeUpRight = false
          }
        }
      }

      // 使用缓冲画布绘制
      bufferCtx.clearRect(0, 0, bufferCanvas.width, bufferCanvas.height)

      // 即使检测暂停，只要有有效的面部标记就继续绘制
      if (lastValidLandmarks) {
        const draw = new DrawingUtils(bufferCtx)

        // 面部轮廓绘制
        draw.drawConnectors(lastValidLandmarks, FaceLandmarker.FACE_LANDMARKS_TESSELATION, {
          color: '#C0C0C070',
          lineWidth: 1,
        })
        draw.drawConnectors(lastValidLandmarks, FaceLandmarker.FACE_LANDMARKS_RIGHT_EYE, {
          color: '#FF3030',
        })
        draw.drawConnectors(lastValidLandmarks, FaceLandmarker.FACE_LANDMARKS_LEFT_EYE, {
          color: '#30FF30',
        })
        draw.drawConnectors(lastValidLandmarks, FaceLandmarker.FACE_LANDMARKS_LIPS, {
          color: '#E0E0E0',
        })
        draw.drawConnectors(lastValidLandmarks, FaceLandmarker.FACE_LANDMARKS_RIGHT_IRIS, {
          color: '#FF3030',
        })
        draw.drawConnectors(lastValidLandmarks, FaceLandmarker.FACE_LANDMARKS_LEFT_IRIS, {
          color: '#30FF30',
        })
        // 在眉毛绘制
        draw.drawConnectors(lastValidLandmarks, FaceLandmarker.FACE_LANDMARKS_LEFT_EYEBROW, {
          color: '#10FF10', // 亮绿色（比眼睛稍浅）
          lineWidth: 3, // 比网格线稍粗
        })
        draw.drawConnectors(lastValidLandmarks, FaceLandmarker.FACE_LANDMARKS_RIGHT_EYEBROW, {
          color: '#FF1010', // 亮红色（比眼睛稍浅）
          lineWidth: 3,
        })
      }

      // 复制到显示画布
      ctx.clearRect(0, 0, canvas.width, canvas.height)
      ctx.drawImage(bufferCanvas, 0, 0)
    } catch (error) {
      console.error('Detection error:', error)
    }

    // 循环下一帧
    animationFrameId = requestAnimationFrame(detectFrame)
  }

  // 启动检测
  await detectFrame()
}

/**
 * 清除画布方法
 */
function clearCanvas() {
  const canvas = canvasRef.value
  if (!canvas) return

  const ctx = canvas.getContext('2d')
  ctx.clearRect(0, 0, canvas.width, canvas.height)

  // 同时清除缓冲画布（如果有）
  if (bufferCanvas) {
    const bufferCtx = bufferCanvas.getContext('2d')
    bufferCtx.clearRect(0, 0, bufferCanvas.width, bufferCanvas.height)
  }
}

/**
 * 初始化摄像头
 * @returns {Promise<void>}
 */
async function initCamera() {
  return new Promise((resolve) => {
    // 设置视频约束，限制视频分辨率，避免太大
    const constraints = {
      video: {
        width: { ideal: 640, max: 1280 },
        height: { ideal: 480, max: 720 },
        facingMode: 'user',
        frameRate: { ideal: 30, max: 60 }, // 添加帧率限制
      },
    }

    navigator.mediaDevices
      .getUserMedia(constraints)
      .then((stream) => {
        console.log('成功获取摄像头权限')
        const video = videoRef.value
        if (video) {
          video.srcObject = stream
          video.addEventListener('loadedmetadata', () => {
            video
              .play()
              .then(() => {
                console.log('视频开始播放')
                resolve()
              })
              .catch((err) => {
                console.error('视频播放失败:', err)
                resolve()
              })
          })
        } else {
          console.error('视频元素不存在')
          resolve()
        }
      })
      .catch((err) => {
        console.error('获取摄像头失败:', err)
        resolve() // 即使失败也resolve，避免阻塞
      })
  })
}

/**
 * 微表情情感检测
 */
//=====================================================================================================================
import * as faceapi from 'face-api.js'
import BodyDetection from '@/components/BodyDetection.vue'
import { getHrFaceActionUsingPost, getHrFaceEmotionUsingPost } from '@/api/hrInterviewController'
import { getFaceActionUsingPost, getFaceEmotionUsingPost } from '@/api/tecInterviewController'
import { useCommonStore } from '@/stores/useCommonStore'
let faceEmotionList = []
// 1. 元素引用和状态
let detectionTimer = null

// 2. 模型加载
const loadModels = async () => {
  try {
    // 显式设置模型路径
    const MODEL_URL = '/models'

    await Promise.all([
      faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL),
      faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL),
      faceapi.nets.faceExpressionNet.loadFromUri(MODEL_URL),
    ])

    console.log('✅ 微表情情感模型加载完成')

    // 成功加载后立即进行一次测试检测，验证模型是否正常工作
    if (videoRef.value && videoRef.value.readyState > 1) {
      try {
        const testResult = await faceapi
          .detectAllFaces(videoRef.value, new faceapi.TinyFaceDetectorOptions())
          .withFaceExpressions()
        console.log('初始测试检测结果:', testResult)
      } catch (error) {
        console.error('初始测试检测失败:', error)
      }
    }
  } catch (error) {
    console.error('❌ 模型加载失败:', error)
    throw error
  }
}
// 3. 识别结果
// 表情检测逻辑（每 1500ms）
const runDetection = () => {
  const video = videoRef.value
  let consecutiveFailures = 0 // 跟踪连续失败次数
  let lastDetectedEmotion = 'calm' // 记录最后一次成功检测的情绪
  let lastPercentages = { confident: 33, nervous: 33, calm: 34 } // 记录最后一次成功检测的百分比

  const detect = async () => {
    if (!shouldDetect.value) return // 检查检测状态

    try {
      // 确保video元素和其上下文准备就绪
      if (!video || !video.readyState || video.readyState < 2) {
        console.log('视频元素未准备好，等待...')
        detectionTimer = setTimeout(detect, 500)
        return
      }

      // 直接在原始视频上运行检测，不使用显示的画布
      // 这样即使面部行为检测轮廓存在，也不会干扰微表情检测
      const results = await faceapi
        .detectAllFaces(
          video,
          new faceapi.TinyFaceDetectorOptions({
            // 降低检测置信度阈值以提高检测率
            scoreThreshold: 0.1,
          })
        )
        .withFaceLandmarks()
        .withFaceExpressions()

      // 重置连续失败计数
      if (results && results?.length > 0) {
        consecutiveFailures = 0
        const { emotion, confidence, percentages } = classifyEmotion(results[0])
        console.log(`🎯 情绪：${emotion}（置信度 ${confidence}）百分比：`, percentages)

        // 更新最后检测到的情绪和百分比
        lastDetectedEmotion = emotion
        lastPercentages = percentages

        /**
         * 记录面部情感
         */
        faceEmotionList.push(emotion)
        // 通知父组件动态更新情感数据，同时传递情绪百分比数据
        emit('emotion-detected', emotion, percentages)
      } else {
        consecutiveFailures++
        console.log(`未检测到面部表情，连续失败次数：${consecutiveFailures}`)

        // 连续失败时，使用最后一次检测到的情绪，而不是默认calm
        if (consecutiveFailures > 2) {
          // 降低阈值，更快地使用备用值
          console.log('使用最后一次检测到的情绪值:', lastDetectedEmotion)
          // 发送最后一次检测到的情绪，保持连续性
          emit('emotion-detected', lastDetectedEmotion, lastPercentages)
          consecutiveFailures = 1 // 不完全重置，保持一定的失败计数
        }
      }
    } catch (error) {
      console.error('微表情检测失败:', error)
      consecutiveFailures++

      // 错误超过阈值时进行恢复措施
      if (consecutiveFailures > 3) {
        // 降低阈值，更快地进行恢复
        console.log('微表情检测连续失败，尝试恢复，使用上次检测结果')
        // 使用最后一次检测到的情绪
        emit('emotion-detected', lastDetectedEmotion, lastPercentages)

        // 尝试延迟后恢复
        setTimeout(() => {
          if (shouldDetect.value) {
            consecutiveFailures = 1 // 不完全重置
          }
        }, 1000) // 恢复时间
      }
    } finally {
      // 无论成功与否，都继续下一次检测（除非已停止）
      if (shouldDetect.value) {
        // 根据连续失败次数动态调整检测间隔，保持较长间隔以降低CPU消耗
        const interval = consecutiveFailures > 3 ? 2000 : 1500 // 调整回更合理的检测间隔
        detectionTimer = setTimeout(detect, interval)
      }
    }
  }

  // 立即开始第一次检测
  detect()
}
// 4. 停止表情情感检测
const stopDetection = async () => {
  //发送表情情感数据
  if (props.interviewType === '0') {
    await getFaceEmotionUsingPost({
      faceEmotions: faceEmotionList,
    })
  } else {
    await getHrFaceEmotionUsingPost({
      faceEmotions: faceEmotionList,
    })
  }

  //重置面部标签列表
  faceEmotionList = []
  if (detectionTimer) {
    clearTimeout(detectionTimer)
    detectionTimer = null
  }
}

/**
 * 增强情感分类算法
 * 结合面部表情和面部特征点来更准确地判断情绪状态
 * 修改后的算法平衡三种情绪的检测率
 */
function classifyEmotion(result) {
  const { expressions, landmarks } = result

  // 1. 获取表情数据和置信度
  const expressionEntries = Object.entries(expressions || {})
  const sorted = [...expressionEntries].sort((a, b) => Number(b[1]) - Number(a[1]))
  const [topLabel, topConfidence] = sorted[0] || ['neutral', 0.5]

  // 2. 计算面部特征指标
  let eyebrowRaiseScore = 0
  let mouthTensionScore = 0
  let eyeOpenScore = 0
  let mouthSmileScore = 0
  let eyebrowFurrowScore = 0 // 新增眉头紧锁指标

  // 如果有面部特征点数据，计算额外指标
  if (landmarks && landmarks.positions && landmarks.positions.length > 0) {
    const points = landmarks.positions

    // 计算眉毛高度 (眉毛到眼睛的距离)
    if (points[19] && points[37]) {
      // 左眉中点到左眼上点
      eyebrowRaiseScore += Math.abs(Number(points[19].y) - Number(points[37].y))
    }
    if (points[24] && points[44]) {
      // 右眉中点到右眼上点
      eyebrowRaiseScore += Math.abs(Number(points[24].y) - Number(points[44].y))
    }

    // 计算眉头紧锁程度 (眉毛内侧点的接近程度)
    if (points[21] && points[22]) {
      // 左右眉毛内侧点的距离，距离越小表示眉头越紧锁
      const eyebrowInnerDistance = Math.abs(Number(points[21].x) - Number(points[22].x))
      // 转换为分数，距离越小分数越高
      eyebrowFurrowScore = Math.max(0, 1 - eyebrowInnerDistance / 30)

      // 记录眉毛内侧点的y坐标差异，眉头皱起时这个差异通常会变小
      const eyebrowInnerYDiff = Math.abs(Number(points[21].y) - Number(points[22].y))
      // 如果y坐标差异小，说明眉毛在同一水平线上，可能是皱眉状态
      if (eyebrowInnerYDiff < 5) {
        eyebrowFurrowScore += 0.3 // 增加皱眉评分
      }
    }

    // 计算嘴部紧张度 (嘴角拉伸程度)
    if (points[48] && points[54]) {
      // 嘴角宽度
      const mouthWidth = Math.abs(Number(points[48].x) - Number(points[54].x))
      if (points[51] && points[57]) {
        // 嘴高度
        const mouthHeight = Math.abs(Number(points[51].y) - Number(points[57].y))
        mouthTensionScore = mouthWidth / (mouthHeight || 1) // 避免除零
      }
    }

    // 计算嘴角上扬程度 (微笑检测)
    if (points[48] && points[54] && points[57]) {
      // 嘴角高度与嘴巴中心点高度的差异
      const leftCornerY = Number(points[48].y)
      const rightCornerY = Number(points[54].y)
      const bottomY = Number(points[57].y)
      // 嘴角上扬时，嘴角Y值会小于底部Y值
      mouthSmileScore = (bottomY - (leftCornerY + rightCornerY) / 2) / 10
    }

    // 计算眼睛开合度
    if (points[37] && points[41]) {
      // 左眼高度
      eyeOpenScore += Math.abs(Number(points[37].y) - Number(points[41].y))
    }
    if (points[44] && points[46]) {
      // 右眼高度
      eyeOpenScore += Math.abs(Number(points[44].y) - Number(points[46].y))
    }
  }

  // 3. 情感分类逻辑 - 结合表情和面部特征
  // 基础表情分类
  const happyScore = Number(expressions.happy || 0)
  const surprisedScore = Number(expressions.surprised || 0)
  const sadScore = Number(expressions.sad || 0)
  const angryScore = Number(expressions.angry || 0)
  const fearfulScore = Number(expressions.fearful || 0)
  const disgustedScore = Number(expressions.disgusted || 0)
  const neutralScore = Number(expressions.neutral || 0)

  // 调整后的情绪得分计算 - 缩小自信的范围，扩大紧张和冷静的范围

  // 自信指标: 需要高兴表情得分高 + 明显的嘴角上扬
  const confidentScore =
    happyScore * 0.7 +
    surprisedScore * 0.1 +
    (mouthSmileScore > 1.5 ? 0.3 : 0) -
    sadScore * 0.3 -
    fearfulScore * 0.3 -
    angryScore * 0.2

  // 紧张指标: 负面情绪 + 眉毛紧皱 + 嘴角紧绷 + 眉头紧锁
  const nervousScore =
    sadScore * 0.5 + // 提高悲伤表情的权重
    angryScore * 0.4 + // 提高愤怒表情的权重
    fearfulScore * 0.6 + // 进一步增加恐惧表情的权重
    disgustedScore * 0.4 + // 提高厌恶表情的权重
    (eyebrowRaiseScore < 8 ? 0.4 : 0) + // 提高眉毛紧皱的影响
    (mouthTensionScore < 1.0 ? 0.4 : 0) + // 提高嘴角紧绷的影响
    (eyebrowFurrowScore > 0.3 ? 0.8 : 0) + // 大幅提高眉头紧锁的影响，并降低阈值
    surprisedScore * 0.3 - // 增加惊讶也可能表示紧张的权重
    happyScore * 0.3 // 减少快乐表情的负面影响

  // 冷静指标: 中性表情占主导 + 面部放松
  const calmScore =
    neutralScore * 0.7 + // 稍微降低中性表情的权重
    (eyeOpenScore > 3 && eyeOpenScore < 12 ? 0.3 : 0) +
    (mouthTensionScore > 1.0 && mouthTensionScore < 1.6 ? 0.2 : 0) -
    happyScore * 0.2 - // 减少对快乐的惩罚
    sadScore * 0.3 -
    fearfulScore * 0.3 -
    angryScore * 0.3

  // 确保得分不为负
  const adjustedConfidentScore = Math.max(0, confidentScore)
  const adjustedNervousScore = Math.max(0, nervousScore * 1.4) // 进一步提升紧张情绪的整体权重
  const adjustedCalmScore = Math.max(0, calmScore * 0.9) // 稍微降低冷静的权重

  // 计算总分，用于后续计算百分比
  const totalScore = adjustedConfidentScore + adjustedNervousScore + adjustedCalmScore

  // 确定最终情绪 - 降低紧张情绪的判定阈值，优先考虑眉头紧锁
  let emotion = 'calm' // 默认为冷静
  let confidenceScore = 0

  // 优先检查眉头紧锁状态，如果明显皱眉就直接判定为紧张
  if (eyebrowFurrowScore > 0.5) {
    emotion = 'nervous'
    confidenceScore = adjustedNervousScore
  } else if (
    adjustedConfidentScore > adjustedNervousScore * 1.3 && // 提高自信判定的难度
    adjustedConfidentScore > adjustedCalmScore * 1.3 &&
    happyScore > 0.4
  ) {
    emotion = 'confident'
    confidenceScore = adjustedConfidentScore
  } else if (
    adjustedNervousScore > adjustedConfidentScore * 0.9 && // 进一步降低紧张判定的阈值
    adjustedNervousScore > adjustedCalmScore * 0.9 && // 进一步降低紧张判定的阈值
    (sadScore > 0.15 ||
      angryScore > 0.15 ||
      fearfulScore > 0.15 ||
      disgustedScore > 0.15 ||
      eyebrowFurrowScore > 0.3) // 降低皱眉的判定阈值
  ) {
    emotion = 'nervous'
    confidenceScore = adjustedNervousScore
  } else {
    emotion = 'calm'
    confidenceScore = adjustedCalmScore
  }

  // 如果没有面部特征点数据，使用更宽松的表情分类
  if (!landmarks || !landmarks.positions || landmarks.positions.length === 0) {
    if (happyScore > 0.5) {
      // 自信判定阈值
      emotion = 'confident'
    } else if (
      sadScore > 0.25 ||
      angryScore > 0.25 ||
      fearfulScore > 0.25 ||
      disgustedScore > 0.25
    ) {
      // 进一步降低紧张判定阈值
      emotion = 'nervous'
    } else {
      emotion = 'calm'
    }
  }

  // 计算每种情绪的百分比，确保即使很小的值也至少有1%的显示
  const confidentPercent =
    totalScore > 0 ? Math.max(1, Math.round((adjustedConfidentScore / totalScore) * 100)) : 33
  const nervousPercent =
    totalScore > 0 ? Math.max(1, Math.round((adjustedNervousScore / totalScore) * 100)) : 33
  const calmPercent = totalScore > 0 ? Math.max(1, 100 - confidentPercent - nervousPercent) : 34

  // 4. 返回结果，包含各情绪的百分比
  return {
    emotion,
    confidence: String(Number(confidenceScore).toFixed(2)),
    percentages: {
      confident: confidentPercent,
      nervous: nervousPercent,
      calm: calmPercent,
    },
  }
}

/**
 * 重置所有计数器但不重置检测状态
 */
const resetCounts = () => {
  console.log('重置FaceDetection中的所有计数器')
  // 重置眨眼计数器
  blinkCount = 0
  // 重置抿嘴计数器
  mouthPuckerCount = 0
  // 重置眼睛向左斜上方看计数器
  eyeUpLeftCount = 0
  // 重置眼睛向右斜上方看计数器
  eyeUpRightCount = 0
}

/**
 * 获取躯干检测组件引用
 */
const getBodyRef = () => {
  return bodyRef.value
}

// ✅ 暴露方法给父组件
defineExpose({
  startFace,
  stopFace,
  runDetection,
  stopDetection,
  resetCounts,
  getBodyRef,
})

onMounted(async () => {
  try {
    // 初始化摄像头
    await initCamera()

    // 等待视频元素就绪
    const waitForVideoReady = () => {
      return new Promise((resolve) => {
        const checkReady = () => {
          if (videoRef.value && videoRef.value.readyState >= 2) {
            resolve()
          } else {
            setTimeout(checkReady, 200)
          }
        }
        checkReady()
      })
    }

    await waitForVideoReady()

    // 首先加载微表情情感识别模型
    await loadModels()

    // 然后加载面部行为识别模型
    // 引入短暂延迟以避免模型加载冲突
    await new Promise((resolve) => setTimeout(resolve, 300))
    await createFaceLandmarker()

    // 测试一次微表情检测，确保模型正常加载
    try {
      if (videoRef.value && videoRef.value.readyState >= 2) {
        const testResult = await faceapi
          .detectAllFaces(
            videoRef.value,
            new faceapi.TinyFaceDetectorOptions({ scoreThreshold: 0.2 })
          )
          .withFaceExpressions()

        if (testResult && testResult.length > 0) {
        } else {
          console.log('微表情模型初始测试未检测到面部')
        }
      }
    } catch (testError) {
      console.warn('微表情模型初始测试失败，但不影响继续:', testError)
    }
  } catch (error) {
    console.error('组件初始化过程中出错:', error)
  }
})

//组件销毁回调函数
onBeforeUnmount(() => {
  clearTimeout(detectionTimer)
  if (videoRef.value?.srcObject) {
    videoRef.value.srcObject.getTracks().forEach((track) => track.stop())
  }
})

// 添加新的方法来处理BodyDetection组件的事件
const handleBodyActionDetected = (action) => {
  console.log('Body action detected:', action)
  // 将躯体动作事件转发给父组件
  emit('face-action-detected', action)
}
</script>

<style scoped>
.container {
  font-family: Helvetica, Arial, sans-serif;
  color: #3d3d3d;
  height: 100%;
  width: 100%;
  display: flex;
  flex-direction: column;
  justify-content: center;
}

.video-canvas-container {
  position: relative;
  width: 100%;
  height: 100%;
  display: flex;
  justify-content: center;
  align-items: center;
  overflow: hidden;
}

video {
  width: 100%;
  height: 100%;
  object-fit: contain; /* 使用contain确保视频完整显示且保持比例 */
  transform: rotateY(180deg);
  -webkit-transform: rotateY(180deg);
  -moz-transform: rotateY(180deg);
  max-height: none; /* 移除最大高度限制，使视频填满父容器高度 */
  max-width: 100%; /* 限制最大宽度 */
}

.canvas {
  position: absolute;
  top: 0;
  left: 50%; /* 居中定位 */
  transform: translateX(-50%) rotateY(180deg); /* 使用translateX实现精确居中 */
  -webkit-transform: translateX(-50%) rotateY(180deg);
  -moz-transform: translateX(-50%) rotateY(180deg);
  max-height: none; /* 与视频保持一致，移除高度限制 */
  width: auto; /* 自动调整宽度 */
  height: 100%; /* 填满高度 */
  pointer-events: none;
}

.invisible {
  opacity: 0.2;
}

.blend-shapes-item {
  display: flex;
  align-items: center;
  height: 20px;
}

.blend-shapes-label {
  width: 120px;
  text-align: right;
  margin-right: 4px;
}

.blend-shapes-value {
  height: 16px;
  background-color: #007f8b;
  color: #fff;
  padding-left: 4px;
}
</style>
