# video_analysis.py
import cv2
import mediapipe as mp
import numpy as np
import logging
from typing import Dict
from mediapipe.python.solutions.pose import PoseLandmark

# 配置根 logger，确保 INFO 以后的日志都会输出
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s %(message)s')
logger = logging.getLogger(__name__)


def video_analysis(video_path: str) -> Dict:
    """
    视频分析核心函数（增强版）：
    - 使用 print 进行关键进度调试
    - 确保不会漏掉任何日志
    """
    print(f"[video_analysis] 开始分析视频: {video_path}")
    logger.info(f"开始分析视频: {video_path}")

    # 初始化 MediaPipe（加载 TFLite delegate）
    mp_face_mesh = mp.solutions.face_mesh.FaceMesh(
        static_image_mode=False,
        max_num_faces=1,
        min_detection_confidence=0.5,
        min_tracking_confidence=0.5
    )
    mp_pose = mp.solutions.pose.Pose(
        static_image_mode=False,
        min_detection_confidence=0.5,
        min_tracking_confidence=0.5
    )

    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        raise ValueError(f"无法打开视频文件: {video_path}")

    fps = cap.get(cv2.CAP_PROP_FPS) or 0
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT) or 0)
    video_duration = total_frames / fps if fps else 0
    print(f"[video_analysis] FPS={fps}, 总帧数={total_frames}, 时长={video_duration:.1f}s")

    # 抽样设置
    sample_rate = 1 if video_duration <= 120 else 2
    frame_interval = max(int(fps * sample_rate), 1)

    # 指标
    eye_contact, smile, gestures = 0, 0, 0
    head_angles = []
    prev_gesture = False
    processed = 0
    frame_idx = 0

    try:
        while True:
            ret, frame = cap.read()
            if not ret:
                print("[video_analysis] 视频读取完毕")
                break

            if frame_idx % frame_interval != 0:
                frame_idx += 1
                continue

            # 打印每 50 帧进度
            if processed % 50 == 0:
                print(f"[video_analysis] 已处理 {processed}/{total_frames} 帧 (frame_idx={frame_idx})")

            # 可选：降低分辨率
            # frame = cv2.resize(frame, (640, 360))

            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # 面部检测
            faces = mp_face_mesh.process(rgb)
            if faces.multi_face_landmarks:
                lm = faces.multi_face_landmarks[0].landmark
                eye_cx = (lm[33].x + lm[263].x) / 2
                if abs(eye_cx - lm[4].x) < 0.05:
                    eye_contact += 1
                if (lm[14].y - lm[13].y) > 0.02:
                    smile += 1
                dx = lm[263].x - lm[33].x
                dy = lm[263].y - lm[33].y
                head_angles.append(np.degrees(np.arctan2(dy, dx)))

            # 姿态检测
            poses = mp_pose.process(rgb)
            if poses.pose_landmarks:
                z = poses.pose_landmarks.landmark
                curr = (z[PoseLandmark.LEFT_WRIST].y < z[PoseLandmark.LEFT_SHOULDER].y) or \
                       (z[PoseLandmark.RIGHT_WRIST].y < z[PoseLandmark.LEFT_SHOULDER].y)
                if curr and not prev_gesture:
                    gestures += 1
                prev_gesture = curr

            processed += 1
            frame_idx += 1
    finally:
        cap.release()
        mp_face_mesh.close()
        mp_pose.close()
        print("[video_analysis] 已释放所有资源，分析结束")

    if processed == 0:
        print("[video_analysis] 没有处理任何帧，返回错误")
        return {"error": "未处理任何帧"}

    result = {
        "eye_contact_rate": round(eye_contact / processed * 100, 1),
        "smile_rate": round(smile / processed * 100, 1),
        "gesture_rate": round(gestures / (video_duration / 60) if video_duration else 0, 1),
        "head_stability": round(np.std(head_angles), 1) if head_angles else 0,
        "total_frames": total_frames,
        "processed_frames": processed,
        "video_duration": round(video_duration, 1)
    }
    print(f"[video_analysis] 分析结果: {result}")
    return result
