"""
MediaPipe Pose Detection Backend
==============================

真实的AI姿态检测模块，使用MediaPipe Pose进行人体关键点检测
"""

import cv2
import numpy as np
import json
from typing import List, Dict, Tuple, Any

try:
    import mediapipe as mp
    MEDIAPIPE_AVAILABLE = True
    print("[PoseDetector] MediaPipe 可用")
except ImportError:
    MEDIAPIPE_AVAILABLE = False
    print("[PoseDetector] MediaPipe 不可用，将使用模拟模式")


class MediaPipePoseDetector:
    """
    MediaPipe姿态检测器
    """
    
    def __init__(self):
        self.mp_pose = None
        self.pose = None
        self.mp_drawing = None
        self.mp_drawing_styles = None
        self.is_initialized = False
        
        if MEDIAPIPE_AVAILABLE:
            self._initialize_mediapipe()
    
    def _initialize_mediapipe(self):
        """初始化MediaPipe"""
        try:
            self.mp_pose = mp.solutions.pose
            self.mp_drawing = mp.solutions.drawing_utils
            self.mp_drawing_styles = mp.solutions.drawing_styles
            
            self.pose = self.mp_pose.Pose(
                static_image_mode=True,
                model_complexity=2,
                enable_segmentation=False,
                min_detection_confidence=0.5,
                min_tracking_confidence=0.5
            )
            
            self.is_initialized = True
            print("[PoseDetector] MediaPipe Pose 初始化成功")
            
        except Exception as e:
            print(f"[PoseDetector] MediaPipe 初始化失败: {e}")
            self.is_initialized = False
    
    def detect_poses(self, image: np.ndarray, confidence_threshold: float = 0.5, 
                    max_persons: int = 10) -> List[Dict[str, Any]]:
        """
        检测图像中的人体姿态
        
        Args:
            image: 输入图像 (numpy array, RGB格式)
            confidence_threshold: 置信度阈值
            max_persons: 最大检测人数 (MediaPipe单人检测，这个参数用于未来扩展)
            
        Returns:
            List[Dict]: 检测到的姿态列表
        """
        if not self.is_initialized or not MEDIAPIPE_AVAILABLE:
            return self._simulate_pose_detection(image, confidence_threshold, max_persons)
        
        try:
            # 确保图像是RGB格式
            if len(image.shape) == 3 and image.shape[2] == 3:
                # 假设输入是BGR，转换为RGB
                rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            else:
                rgb_image = image
            
            # MediaPipe处理
            results = self.pose.process(rgb_image)
            
            detected_poses = []
            
            if results.pose_landmarks:
                # 转换MediaPipe关键点为COCO格式
                coco_keypoints = self._convert_mediapipe_to_coco(
                    results.pose_landmarks.landmark, 
                    image.shape[1], image.shape[0]
                )
                
                # 过滤低置信度关键点
                filtered_keypoints = []
                for kp in coco_keypoints:
                    if kp['confidence'] >= confidence_threshold:
                        filtered_keypoints.append(kp)
                    else:
                        # 保持关键点但标记为不可见
                        kp['confidence'] = 0.0
                        filtered_keypoints.append(kp)
                
                # 计算整体置信度
                valid_keypoints = [kp for kp in filtered_keypoints if kp['confidence'] > 0]
                overall_confidence = (
                    sum(kp['confidence'] for kp in valid_keypoints) / len(valid_keypoints)
                    if valid_keypoints else 0.0
                )
                
                if overall_confidence > confidence_threshold:
                    detected_poses.append({
                        'id': 0,
                        'keypoints': filtered_keypoints,
                        'overall_confidence': overall_confidence
                    })
            
            print(f"[PoseDetector] MediaPipe检测完成: 找到 {len(detected_poses)} 个人")
            return detected_poses
            
        except Exception as e:
            print(f"[PoseDetector] MediaPipe检测失败: {e}")
            return self._simulate_pose_detection(image, confidence_threshold, max_persons)
    
    def _convert_mediapipe_to_coco(self, mediapipe_landmarks, width: int, height: int) -> List[Dict]:
        """
        将MediaPipe 33点关键点转换为COCO 17点格式
        
        MediaPipe 33点 -> COCO 17点映射:
        0: nose -> 0: nose
        2: left_eye_inner -> 1: left_eye  
        5: right_eye_inner -> 2: right_eye
        7: left_ear -> 3: left_ear
        8: right_ear -> 4: right_ear
        11: left_shoulder -> 5: left_shoulder
        12: right_shoulder -> 6: right_shoulder
        13: left_elbow -> 7: left_elbow
        14: right_elbow -> 8: right_elbow
        15: left_wrist -> 9: left_wrist
        16: right_wrist -> 10: right_wrist
        23: left_hip -> 11: left_hip
        24: right_hip -> 12: right_hip
        25: left_knee -> 13: left_knee
        26: right_knee -> 14: right_knee
        27: left_ankle -> 15: left_ankle
        28: right_ankle -> 16: right_ankle
        """
        
        # MediaPipe到COCO的索引映射
        mediapipe_to_coco_map = [
            0,   # nose
            2,   # left_eye (使用left_eye_inner)
            5,   # right_eye (使用right_eye_inner)  
            7,   # left_ear
            8,   # right_ear
            11,  # left_shoulder
            12,  # right_shoulder
            13,  # left_elbow
            14,  # right_elbow
            15,  # left_wrist
            16,  # right_wrist
            23,  # left_hip
            24,  # right_hip
            25,  # left_knee
            26,  # right_knee
            27,  # left_ankle
            28   # right_ankle
        ]
        
        coco_keypoints = []
        
        for i in range(17):  # COCO有17个关键点
            mp_idx = mediapipe_to_coco_map[i]
            
            if mp_idx < len(mediapipe_landmarks):
                landmark = mediapipe_landmarks[mp_idx]
                
                # 转换为像素坐标
                x = landmark.x * width
                y = landmark.y * height
                
                # MediaPipe使用visibility作为置信度
                confidence = landmark.visibility if hasattr(landmark, 'visibility') else 0.8
                
                coco_keypoints.append({
                    'x': float(x),
                    'y': float(y),
                    'confidence': float(confidence)
                })
            else:
                # 如果MediaPipe点不存在，添加一个无效点
                coco_keypoints.append({
                    'x': 0.0,
                    'y': 0.0,
                    'confidence': 0.0
                })
        
        return coco_keypoints
    
    def _simulate_pose_detection(self, image: np.ndarray, confidence_threshold: float, 
                                max_persons: int) -> List[Dict[str, Any]]:
        """
        模拟姿态检测（当MediaPipe不可用时）
        """
        print("[PoseDetector] 使用模拟检测模式")
        
        height, width = image.shape[:2]
        
        # 模拟检测1-3个人
        import random
        num_persons = min(random.randint(1, 3), max_persons)
        
        detected_poses = []
        
        for i in range(num_persons):
            # 生成模拟的17个关键点
            keypoints = []
            
            # 随机生成人体中心位置
            center_x = random.uniform(0.2 * width, 0.8 * width)
            center_y = random.uniform(0.3 * height, 0.7 * height)
            
            # COCO 17点的相对位置
            keypoint_offsets = [
                (0, -0.15),      # 0: nose
                (-0.02, -0.12),  # 1: left_eye
                (0.02, -0.12),   # 2: right_eye
                (-0.04, -0.10),  # 3: left_ear
                (0.04, -0.10),   # 4: right_ear
                (-0.08, -0.05),  # 5: left_shoulder
                (0.08, -0.05),   # 6: right_shoulder
                (-0.12, 0.05),   # 7: left_elbow
                (0.12, 0.05),    # 8: right_elbow
                (-0.15, 0.15),   # 9: left_wrist
                (0.15, 0.15),    # 10: right_wrist
                (-0.06, 0.10),   # 11: left_hip
                (0.06, 0.10),    # 12: right_hip
                (-0.08, 0.25),   # 13: left_knee
                (0.08, 0.25),    # 14: right_knee
                (-0.08, 0.40),   # 15: left_ankle
                (0.08, 0.40),    # 16: right_ankle
            ]
            
            for dx, dy in keypoint_offsets:
                # 添加一些随机变化
                variation = random.uniform(0.8, 1.2)
                x = center_x + dx * width * variation
                y = center_y + dy * height * variation
                
                # 确保在图像边界内
                x = max(0, min(width - 1, x))
                y = max(0, min(height - 1, y))
                
                # 生成置信度
                confidence = random.uniform(confidence_threshold, 1.0)
                
                keypoints.append({
                    'x': float(x),
                    'y': float(y),
                    'confidence': float(confidence)
                })
            
            # 计算整体置信度
            overall_confidence = sum(kp['confidence'] for kp in keypoints) / len(keypoints)
            
            detected_poses.append({
                'id': i,
                'keypoints': keypoints,
                'overall_confidence': overall_confidence
            })
        
        return detected_poses
    
    def render_skeleton_on_image(self, image: np.ndarray, poses: List[Dict], 
                                style: str = "classic", show_confidence: bool = True) -> np.ndarray:
        """
        在图像上渲染骨架
        """
        result = image.copy()
        
        # 定义骨架连接（COCO 17点格式）
        skeleton_connections = [
            # 头部连接
            (0, 1), (0, 2),     # nose to eyes
            (1, 3), (2, 4),     # eyes to ears
            # 身体连接
            (5, 6),             # shoulders
            (5, 7), (6, 8),     # shoulders to elbows
            (7, 9), (8, 10),    # elbows to wrists
            (5, 11), (6, 12),   # shoulders to hips
            (11, 12),           # hips
            # 腿部连接
            (11, 13), (12, 14), # hips to knees
            (13, 15), (14, 16)  # knees to ankles
        ]
        
        # 颜色方案
        color_schemes = {
            'classic': [(0, 255, 0), (255, 0, 0), (0, 0, 255)],
            'modern': [(255, 100, 100), (100, 255, 100), (100, 100, 255)],
            'minimal': [(200, 200, 200), (150, 150, 150), (100, 100, 100)]
        }
        
        colors = color_schemes.get(style, color_schemes['classic'])
        
        for pose_idx, pose in enumerate(poses):
            color = colors[pose_idx % len(colors)]
            keypoints = pose['keypoints']
            
            # 绘制骨架连接线
            for start_idx, end_idx in skeleton_connections:
                start_kp = keypoints[start_idx]
                end_kp = keypoints[end_idx]
                
                if start_kp['confidence'] > 0.3 and end_kp['confidence'] > 0.3:
                    start_point = (int(start_kp['x']), int(start_kp['y']))
                    end_point = (int(end_kp['x']), int(end_kp['y']))
                    
                    cv2.line(result, start_point, end_point, color, 2)
            
            # 绘制关键点
            for i, keypoint in enumerate(keypoints):
                if keypoint['confidence'] > 0.3:
                    center = (int(keypoint['x']), int(keypoint['y']))
                    
                    # 根据置信度调整点的大小
                    radius = max(3, int(5 * keypoint['confidence']))
                    cv2.circle(result, center, radius, color, -1)
                    
                    # 显示置信度
                    if show_confidence:
                        conf_text = f"{keypoint['confidence']:.2f}"
                        cv2.putText(result, conf_text, 
                                  (center[0] + 5, center[1] - 5),
                                  cv2.FONT_HERSHEY_SIMPLEX, 0.3, color, 1)
            
            # 显示人物ID
            if keypoints[0]['confidence'] > 0.3:  # 如果检测到鼻子
                nose_pos = (int(keypoints[0]['x']), int(keypoints[0]['y']))
                cv2.putText(result, f"Person {pose['id'] + 1}", 
                          (nose_pos[0] - 20, nose_pos[1] - 20),
                          cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
        
        return result
    
    def get_keypoint_names(self) -> List[str]:
        """
        获取COCO 17点关键点名称
        """
        return [
            'nose', 'left_eye', 'right_eye', 'left_ear', 'right_ear',
            'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow',
            'left_wrist', 'right_wrist', 'left_hip', 'right_hip',
            'left_knee', 'right_knee', 'left_ankle', 'right_ankle'
        ]
    
    def format_pose_data_to_json(self, poses: List[Dict]) -> str:
        """
        将姿态数据格式化为JSON字符串
        """
        keypoint_names = self.get_keypoint_names()
        
        formatted_data = {
            'version': '1.0',
            'format': 'COCO_17_keypoints',
            'detector': 'MediaPipe' if self.is_initialized else 'Simulated',
            'persons': []
        }
        
        for pose in poses:
            person_data = {
                'id': pose['id'],
                'overall_confidence': pose['overall_confidence'],
                'keypoints': []
            }
            
            for i, kp in enumerate(pose['keypoints']):
                person_data['keypoints'].append({
                    'index': i,
                    'name': keypoint_names[i],
                    'x': kp['x'],
                    'y': kp['y'],
                    'confidence': kp['confidence']
                })
            
            formatted_data['persons'].append(person_data)
        
        return json.dumps(formatted_data, indent=2)


# 全局检测器实例（单例模式）
_pose_detector_instance = None

def get_pose_detector() -> MediaPipePoseDetector:
    """
    获取姿态检测器实例（单例模式）
    """
    global _pose_detector_instance
    if _pose_detector_instance is None:
        _pose_detector_instance = MediaPipePoseDetector()
    return _pose_detector_instance 