"""
姿态检测和行为识别服务
集成MediaPipe/OpenPose等姿态估计模型，实现基于姿态的行为分类
"""
import asyncio
import logging
import cv2
import numpy as np
from datetime import datetime, timedelta
from typing import List, Dict, Optional, Tuple, Any
from uuid import uuid4
import time
from collections import deque, defaultdict
import json

try:
    import mediapipe as mp
    MEDIAPIPE_AVAILABLE = True
except ImportError:
    MEDIAPIPE_AVAILABLE = False
    logging.warning("MediaPipe not available. Pose detection will use mock data.")

try:
    import torch
    import torch.nn as nn
    TORCH_AVAILABLE = True
except ImportError:
    TORCH_AVAILABLE = False
    logging.warning("PyTorch not available. Action recognition will be limited.")

from schemas.pose_detection import (
    PoseEstimation, ActionRecognition, BehaviorPattern, PoseAnalysisResult,
    PoseConfig, ActionConfig, PoseTrackingInfo, BehaviorAlert,
    Keypoint, JointType, ActionType, BehaviorSeverity, PoseModel,
    ModelPerformance
)
from core.config import get_settings

logger = logging.getLogger(__name__)
settings = get_settings()


class MediaPipePoseDetector:
    """MediaPipe姿态检测器"""
    
    def __init__(self, config: PoseConfig):
        self.config = config
        self.mp_pose = None
        self.pose = None
        self.mp_drawing = None
        self.is_initialized = False
        
        if MEDIAPIPE_AVAILABLE:
            self._initialize_mediapipe()
    
    def _initialize_mediapipe(self):
        """初始化MediaPipe"""
        try:
            self.mp_pose = mp.solutions.pose
            self.mp_drawing = mp.solutions.drawing_utils
            
            self.pose = self.mp_pose.Pose(
                static_image_mode=self.config.static_image_mode,
                model_complexity=1,
                smooth_landmarks=self.config.smooth_landmarks,
                enable_segmentation=self.config.enable_segmentation,
                smooth_segmentation=True,
                min_detection_confidence=self.config.min_detection_confidence,
                min_tracking_confidence=self.config.min_tracking_confidence
            )
            
            self.is_initialized = True
            logger.info("MediaPipe pose detector initialized")
            
        except Exception as e:
            logger.error(f"Failed to initialize MediaPipe: {e}")
            self.is_initialized = False
    
    async def detect_poses(self, image: np.ndarray) -> List[PoseEstimation]:
        """检测姿态"""
        if not self.is_initialized:
            return await self._mock_pose_detection(image)
        
        try:
            # 转换颜色空间
            rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            
            # 执行姿态检测
            results = self.pose.process(rgb_image)
            
            poses = []
            if results.pose_landmarks:
                pose = await self._convert_mediapipe_pose(results.pose_landmarks, image.shape)
                poses.append(pose)
            
            return poses
            
        except Exception as e:
            logger.error(f"MediaPipe pose detection failed: {e}")
            return await self._mock_pose_detection(image)
    
    async def _convert_mediapipe_pose(self, landmarks, image_shape) -> PoseEstimation:
        """转换MediaPipe姿态结果"""
        height, width = image_shape[:2]
        keypoints = []
        
        # MediaPipe关键点映射
        landmark_mapping = {
            0: JointType.NOSE,
            2: JointType.LEFT_EYE,
            5: JointType.RIGHT_EYE,
            7: JointType.LEFT_EAR,
            8: JointType.RIGHT_EAR,
            11: JointType.LEFT_SHOULDER,
            12: JointType.RIGHT_SHOULDER,
            13: JointType.LEFT_ELBOW,
            14: JointType.RIGHT_ELBOW,
            15: JointType.LEFT_WRIST,
            16: JointType.RIGHT_WRIST,
            23: JointType.LEFT_HIP,
            24: JointType.RIGHT_HIP,
            25: JointType.LEFT_KNEE,
            26: JointType.RIGHT_KNEE,
            27: JointType.LEFT_ANKLE,
            28: JointType.RIGHT_ANKLE
        }
        
        # 转换关键点
        for idx, landmark in enumerate(landmarks.landmark):
            if idx in landmark_mapping:
                keypoint = Keypoint(
                    joint_type=landmark_mapping[idx],
                    x=landmark.x * width,
                    y=landmark.y * height,
                    confidence=landmark.visibility,
                    visibility=landmark.visibility
                )
                keypoints.append(keypoint)
        
        # 计算边界框
        if keypoints:
            x_coords = [kp.x for kp in keypoints if kp.confidence > 0.5]
            y_coords = [kp.y for kp in keypoints if kp.confidence > 0.5]
            
            if x_coords and y_coords:
                x1, x2 = min(x_coords), max(x_coords)
                y1, y2 = min(y_coords), max(y_coords)
                
                # 扩展边界框
                margin = 20
                bbox = [
                    max(0, x1 - margin),
                    max(0, y1 - margin),
                    min(width, x2 + margin),
                    min(height, y2 + margin)
                ]
            else:
                bbox = [0, 0, width, height]
        else:
            bbox = [0, 0, width, height]
        
        # 计算整体置信度
        pose_confidence = np.mean([kp.confidence for kp in keypoints]) if keypoints else 0.0
        
        pose = PoseEstimation(
            pose_id=str(uuid4()),
            person_id=f"person_{uuid4().hex[:8]}",
            keypoints=keypoints,
            bbox=bbox,
            pose_confidence=pose_confidence,
            timestamp=datetime.now(),
            model_used=PoseModel.MEDIAPIPE
        )
        
        return pose
    
    async def _mock_pose_detection(self, image: np.ndarray) -> List[PoseEstimation]:
        """模拟姿态检测"""
        height, width = image.shape[:2]
        poses = []
        
        # 生成1-3个模拟姿态
        num_poses = np.random.randint(1, min(4, self.config.max_num_poses + 1))
        
        for i in range(num_poses):
            # 生成随机关键点
            keypoints = []
            center_x = np.random.randint(width // 4, 3 * width // 4)
            center_y = np.random.randint(height // 4, 3 * height // 4)
            
            # 基本关键点位置
            joint_positions = {
                JointType.NOSE: (center_x, center_y - 80),
                JointType.LEFT_SHOULDER: (center_x - 40, center_y - 40),
                JointType.RIGHT_SHOULDER: (center_x + 40, center_y - 40),
                JointType.LEFT_ELBOW: (center_x - 60, center_y),
                JointType.RIGHT_ELBOW: (center_x + 60, center_y),
                JointType.LEFT_WRIST: (center_x - 80, center_y + 20),
                JointType.RIGHT_WRIST: (center_x + 80, center_y + 20),
                JointType.LEFT_HIP: (center_x - 30, center_y + 40),
                JointType.RIGHT_HIP: (center_x + 30, center_y + 40),
                JointType.LEFT_KNEE: (center_x - 35, center_y + 80),
                JointType.RIGHT_KNEE: (center_x + 35, center_y + 80),
                JointType.LEFT_ANKLE: (center_x - 40, center_y + 120),
                JointType.RIGHT_ANKLE: (center_x + 40, center_y + 120)
            }
            
            for joint_type, (x, y) in joint_positions.items():
                # 添加随机噪声
                x += np.random.randint(-10, 11)
                y += np.random.randint(-10, 11)
                
                # 确保在图像范围内
                x = max(0, min(width, x))
                y = max(0, min(height, y))
                
                keypoint = Keypoint(
                    joint_type=joint_type,
                    x=float(x),
                    y=float(y),
                    confidence=np.random.uniform(0.6, 1.0),
                    visibility=np.random.uniform(0.7, 1.0)
                )
                keypoints.append(keypoint)
            
            # 计算边界框
            x_coords = [kp.x for kp in keypoints]
            y_coords = [kp.y for kp in keypoints]
            bbox = [min(x_coords) - 20, min(y_coords) - 20, 
                   max(x_coords) + 20, max(y_coords) + 20]
            
            pose = PoseEstimation(
                pose_id=str(uuid4()),
                person_id=f"mock_person_{i}",
                keypoints=keypoints,
                bbox=bbox,
                pose_confidence=np.random.uniform(0.7, 0.95),
                timestamp=datetime.now(),
                model_used=PoseModel.MEDIAPIPE
            )
            
            poses.append(pose)
        
        return poses
    
    def draw_pose(self, image: np.ndarray, pose: PoseEstimation) -> np.ndarray:
        """在图像上绘制姿态"""
        annotated_image = image.copy()
        
        # 绘制关键点
        for keypoint in pose.keypoints:
            if keypoint.confidence > self.config.confidence_threshold:
                cv2.circle(annotated_image, 
                          (int(keypoint.x), int(keypoint.y)), 
                          5, (0, 255, 0), -1)
        
        # 绘制骨架连接
        connections = [
            (JointType.LEFT_SHOULDER, JointType.RIGHT_SHOULDER),
            (JointType.LEFT_SHOULDER, JointType.LEFT_ELBOW),
            (JointType.LEFT_ELBOW, JointType.LEFT_WRIST),
            (JointType.RIGHT_SHOULDER, JointType.RIGHT_ELBOW),
            (JointType.RIGHT_ELBOW, JointType.RIGHT_WRIST),
            (JointType.LEFT_SHOULDER, JointType.LEFT_HIP),
            (JointType.RIGHT_SHOULDER, JointType.RIGHT_HIP),
            (JointType.LEFT_HIP, JointType.RIGHT_HIP),
            (JointType.LEFT_HIP, JointType.LEFT_KNEE),
            (JointType.LEFT_KNEE, JointType.LEFT_ANKLE),
            (JointType.RIGHT_HIP, JointType.RIGHT_KNEE),
            (JointType.RIGHT_KNEE, JointType.RIGHT_ANKLE)
        ]
        
        for joint1, joint2 in connections:
            kp1 = pose.get_keypoint(joint1)
            kp2 = pose.get_keypoint(joint2)
            
            if (kp1 and kp2 and 
                kp1.confidence > self.config.confidence_threshold and 
                kp2.confidence > self.config.confidence_threshold):
                cv2.line(annotated_image,
                        (int(kp1.x), int(kp1.y)),
                        (int(kp2.x), int(kp2.y)),
                        (255, 0, 0), 2)
        
        # 绘制边界框
        x1, y1, x2, y2 = [int(coord) for coord in pose.bbox]
        cv2.rectangle(annotated_image, (x1, y1), (x2, y2), (0, 255, 255), 2)
        
        # 绘制置信度
        cv2.putText(annotated_image, f"Conf: {pose.pose_confidence:.2f}",
                   (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
        
        return annotated_image


class ActionClassifier:
    """动作分类器"""
    
    def __init__(self, config: ActionConfig):
        self.config = config
        self.pose_sequences: Dict[str, deque] = defaultdict(lambda: deque(maxlen=config.sequence_length))
        self.action_history: Dict[str, List[ActionRecognition]] = defaultdict(list)
        
        # 预定义的行为模式
        self.behavior_patterns = self._initialize_behavior_patterns()
    
    def _initialize_behavior_patterns(self) -> List[BehaviorPattern]:
        """初始化行为模式"""
        patterns = [
            BehaviorPattern(
                pattern_id="fighting_pattern",
                pattern_name="打架行为模式",
                action_sequence=[ActionType.PUSHING, ActionType.FIGHTING],
                temporal_constraints={"max_interval": 5.0, "min_duration": 2.0},
                spatial_constraints={"max_distance": 100.0},
                confidence_threshold=0.8,
                severity=BehaviorSeverity.DANGEROUS,
                description="检测推搡后的打架行为"
            ),
            BehaviorPattern(
                pattern_id="falling_pattern",
                pattern_name="跌倒行为模式",
                action_sequence=[ActionType.FALLING],
                temporal_constraints={"min_duration": 1.0},
                spatial_constraints={},
                confidence_threshold=0.7,
                severity=BehaviorSeverity.CONCERNING,
                description="检测跌倒行为"
            ),
            BehaviorPattern(
                pattern_id="running_pattern",
                pattern_name="奔跑行为模式",
                action_sequence=[ActionType.RUNNING],
                temporal_constraints={"min_duration": 3.0},
                spatial_constraints={},
                confidence_threshold=0.6,
                severity=BehaviorSeverity.SUSPICIOUS,
                description="检测持续奔跑行为"
            )
        ]
        
        return patterns
    
    async def classify_action(self, pose: PoseEstimation) -> Optional[ActionRecognition]:
        """分类动作"""
        person_id = pose.person_id
        
        # 添加姿态到序列
        self.pose_sequences[person_id].append(pose)
        
        # 需要足够的姿态序列才能分类
        if len(self.pose_sequences[person_id]) < 10:
            return None
        
        # 提取特征
        features = self._extract_pose_features(list(self.pose_sequences[person_id]))
        
        # 分类动作
        action_type, confidence = await self._classify_features(features)
        
        if confidence > self.config.confidence_threshold:
            action = ActionRecognition(
                action_id=str(uuid4()),
                person_id=person_id,
                action_type=action_type,
                confidence=confidence,
                start_time=datetime.now() - timedelta(seconds=len(self.pose_sequences[person_id]) * 0.1),
                features=features,
                severity=self._get_action_severity(action_type)
            )
            
            # 添加到历史
            self.action_history[person_id].append(action)
            
            # 保持最近50个动作
            if len(self.action_history[person_id]) > 50:
                self.action_history[person_id] = self.action_history[person_id][-50:]
            
            return action
        
        return None
    
    def _extract_pose_features(self, poses: List[PoseEstimation]) -> Dict[str, float]:
        """提取姿态特征"""
        features = {}
        
        if len(poses) < 2:
            return features
        
        # 计算运动特征
        velocities = []
        accelerations = []
        
        for i in range(1, len(poses)):
            prev_center = poses[i-1].get_center_point()
            curr_center = poses[i].get_center_point()
            
            # 计算速度
            velocity = np.sqrt((curr_center[0] - prev_center[0])**2 + 
                             (curr_center[1] - prev_center[1])**2)
            velocities.append(velocity)
            
            # 计算加速度
            if i > 1:
                prev_velocity = velocities[i-2] if i-2 < len(velocities) else 0
                acceleration = abs(velocity - prev_velocity)
                accelerations.append(acceleration)
        
        # 统计特征
        features['avg_velocity'] = np.mean(velocities) if velocities else 0
        features['max_velocity'] = np.max(velocities) if velocities else 0
        features['velocity_std'] = np.std(velocities) if velocities else 0
        features['avg_acceleration'] = np.mean(accelerations) if accelerations else 0
        
        # 姿态特征
        latest_pose = poses[-1]
        
        # 身体倾斜度
        left_shoulder = latest_pose.get_keypoint(JointType.LEFT_SHOULDER)
        right_shoulder = latest_pose.get_keypoint(JointType.RIGHT_SHOULDER)
        
        if left_shoulder and right_shoulder:
            shoulder_angle = np.arctan2(
                right_shoulder.y - left_shoulder.y,
                right_shoulder.x - left_shoulder.x
            )
            features['shoulder_angle'] = abs(shoulder_angle)
        
        # 手臂位置特征
        left_wrist = latest_pose.get_keypoint(JointType.LEFT_WRIST)
        right_wrist = latest_pose.get_keypoint(JointType.RIGHT_WRIST)
        
        if left_shoulder and left_wrist:
            features['left_arm_raised'] = 1.0 if left_wrist.y < left_shoulder.y else 0.0
        
        if right_shoulder and right_wrist:
            features['right_arm_raised'] = 1.0 if right_wrist.y < right_shoulder.y else 0.0
        
        # 腿部特征
        left_hip = latest_pose.get_keypoint(JointType.LEFT_HIP)
        left_knee = latest_pose.get_keypoint(JointType.LEFT_KNEE)
        left_ankle = latest_pose.get_keypoint(JointType.LEFT_ANKLE)
        
        if left_hip and left_knee and left_ankle:
            # 计算膝盖角度
            v1 = np.array([left_hip.x - left_knee.x, left_hip.y - left_knee.y])
            v2 = np.array([left_ankle.x - left_knee.x, left_ankle.y - left_knee.y])
            
            cos_angle = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
            cos_angle = np.clip(cos_angle, -1.0, 1.0)
            knee_angle = np.arccos(cos_angle)
            features['left_knee_angle'] = knee_angle
        
        return features
    
    async def _classify_features(self, features: Dict[str, float]) -> Tuple[ActionType, float]:
        """基于特征分类动作"""
        # 简化的规则基分类器
        avg_velocity = features.get('avg_velocity', 0)
        max_velocity = features.get('max_velocity', 0)
        left_arm_raised = features.get('left_arm_raised', 0)
        right_arm_raised = features.get('right_arm_raised', 0)
        shoulder_angle = features.get('shoulder_angle', 0)
        
        # 跑步检测
        if avg_velocity > 30 and max_velocity > 50:
            return ActionType.RUNNING, 0.8
        
        # 行走检测
        elif avg_velocity > 10 and max_velocity > 20:
            return ActionType.WALKING, 0.7
        
        # 举手检测
        elif left_arm_raised > 0.5 or right_arm_raised > 0.5:
            return ActionType.RAISING_HAND, 0.75
        
        # 跌倒检测（基于身体倾斜）
        elif shoulder_angle > 0.5:  # 约30度
            return ActionType.FALLING, 0.8
        
        # 打架检测（高速度 + 手臂活动）
        elif avg_velocity > 20 and (left_arm_raised > 0.3 or right_arm_raised > 0.3):
            return ActionType.FIGHTING, 0.7
        
        # 默认为站立
        else:
            return ActionType.STANDING, 0.6
    
    def _get_action_severity(self, action_type: ActionType) -> BehaviorSeverity:
        """获取动作严重程度"""
        severity_mapping = {
            ActionType.STANDING: BehaviorSeverity.NORMAL,
            ActionType.WALKING: BehaviorSeverity.NORMAL,
            ActionType.SITTING: BehaviorSeverity.NORMAL,
            ActionType.WAVING: BehaviorSeverity.NORMAL,
            ActionType.RAISING_HAND: BehaviorSeverity.NORMAL,
            ActionType.CLAPPING: BehaviorSeverity.NORMAL,
            ActionType.RUNNING: BehaviorSeverity.SUSPICIOUS,
            ActionType.JUMPING: BehaviorSeverity.SUSPICIOUS,
            ActionType.POINTING: BehaviorSeverity.SUSPICIOUS,
            ActionType.PUSHING: BehaviorSeverity.CONCERNING,
            ActionType.FALLING: BehaviorSeverity.CONCERNING,
            ActionType.FIGHTING: BehaviorSeverity.DANGEROUS,
            ActionType.LYING: BehaviorSeverity.CONCERNING,
            ActionType.UNKNOWN: BehaviorSeverity.NORMAL
        }
        
        return severity_mapping.get(action_type, BehaviorSeverity.NORMAL)
    
    async def detect_behavior_patterns(self, person_id: str) -> List[Dict[str, Any]]:
        """检测行为模式"""
        if person_id not in self.action_history:
            return []
        
        recent_actions = self.action_history[person_id][-10:]  # 最近10个动作
        matched_patterns = []
        
        for pattern in self.behavior_patterns:
            match_result = await self._match_pattern(recent_actions, pattern)
            if match_result:
                matched_patterns.append(match_result)
        
        return matched_patterns
    
    async def _match_pattern(self, actions: List[ActionRecognition], pattern: BehaviorPattern) -> Optional[Dict[str, Any]]:
        """匹配行为模式"""
        if len(actions) < len(pattern.action_sequence):
            return None
        
        # 简化的模式匹配
        action_types = [action.action_type for action in actions]
        
        # 查找模式序列
        for i in range(len(action_types) - len(pattern.action_sequence) + 1):
            sequence = action_types[i:i + len(pattern.action_sequence)]
            
            if sequence == pattern.action_sequence:
                # 检查时间约束
                start_time = actions[i].start_time
                end_time = actions[i + len(pattern.action_sequence) - 1].start_time
                duration = (end_time - start_time).total_seconds()
                
                min_duration = pattern.temporal_constraints.get('min_duration', 0)
                max_interval = pattern.temporal_constraints.get('max_interval', float('inf'))
                
                if duration >= min_duration and duration <= max_interval:
                    # 计算匹配置信度
                    confidences = [actions[j].confidence for j in range(i, i + len(pattern.action_sequence))]
                    avg_confidence = np.mean(confidences)
                    
                    if avg_confidence >= pattern.confidence_threshold:
                        return {
                            'pattern_id': pattern.pattern_id,
                            'pattern_name': pattern.pattern_name,
                            'confidence': avg_confidence,
                            'severity': pattern.severity,
                            'start_time': start_time,
                            'end_time': end_time,
                            'duration': duration,
                            'matched_actions': [actions[j].action_id for j in range(i, i + len(pattern.action_sequence))]
                        }
        
        return None


class PoseDetectionService:
    """姿态检测和行为识别服务"""
    
    def __init__(self):
        self.pose_config = PoseConfig()
        self.action_config = ActionConfig()
        
        self.pose_detector = MediaPipePoseDetector(self.pose_config)
        self.action_classifier = ActionClassifier(self.action_config)
        
        self.tracking_info: Dict[str, PoseTrackingInfo] = {}
        self.analysis_results: List[PoseAnalysisResult] = []
        self.behavior_alerts: List[BehaviorAlert] = []
        
        # 性能指标
        self.performance_metrics = ModelPerformance(
            model_name="MediaPipe Pose + Action Classifier",
            model_type="pose_action_recognition",
            last_updated=datetime.now()
        )
        
        # 服务状态
        self._running = False
        self._analysis_task = None
        self.analysis_interval = 1.0  # 1秒分析一次
    
    async def start_service(self):
        """启动服务"""
        if self._running:
            return
        
        self._running = True
        self._analysis_task = asyncio.create_task(self._analysis_loop())
        logger.info("Pose detection service started")
    
    async def stop_service(self):
        """停止服务"""
        self._running = False
        if self._analysis_task:
            self._analysis_task.cancel()
            try:
                await self._analysis_task
            except asyncio.CancelledError:
                pass
        logger.info("Pose detection service stopped")
    
    async def _analysis_loop(self):
        """分析循环"""
        while self._running:
            try:
                await self._periodic_analysis()
                await asyncio.sleep(self.analysis_interval)
            except asyncio.CancelledError:
                break
            except Exception as e:
                logger.error(f"Error in analysis loop: {e}")
                await asyncio.sleep(1.0)
    
    async def _periodic_analysis(self):
        """周期性分析"""
        current_time = datetime.now()
        
        # 清理过期的跟踪信息
        expired_tracks = []
        for track_id, track_info in self.tracking_info.items():
            age = (current_time - track_info.last_update).total_seconds()
            if age > 300:  # 5分钟未更新
                expired_tracks.append(track_id)
            elif age > 30:  # 30秒未更新标记为非活跃
                track_info.is_active = False
        
        for track_id in expired_tracks:
            del self.tracking_info[track_id]
        
        # 检测行为模式
        for track_id, track_info in self.tracking_info.items():
            if track_info.is_active:
                patterns = await self.action_classifier.detect_behavior_patterns(track_info.person_id)
                
                # 生成告警
                for pattern in patterns:
                    if pattern['severity'] in [BehaviorSeverity.DANGEROUS, BehaviorSeverity.CRITICAL]:
                        await self._generate_behavior_alert(track_info, pattern)
    
    async def _generate_behavior_alert(self, track_info: PoseTrackingInfo, pattern: Dict[str, Any]):
        """生成行为告警"""
        alert = BehaviorAlert(
            alert_id=str(uuid4()),
            camera_id="unknown",  # 需要从上下文获取
            person_ids=[track_info.person_id],
            behavior_type=pattern['pattern_name'],
            severity=pattern['severity'],
            confidence=pattern['confidence'],
            description=f"检测到{pattern['pattern_name']}",
            evidence={
                'pattern_id': pattern['pattern_id'],
                'matched_actions': pattern['matched_actions'],
                'duration': pattern['duration']
            },
            timestamp=datetime.now(),
            duration=pattern['duration']
        )
        
        self.behavior_alerts.append(alert)
        
        # 保持最近1000个告警
        if len(self.behavior_alerts) > 1000:
            self.behavior_alerts = self.behavior_alerts[-1000:]
        
        logger.warning(f"Behavior alert generated: {alert.behavior_type} for person {track_info.person_id}")
    
    async def analyze_poses(self, image: np.ndarray, camera_id: str) -> PoseAnalysisResult:
        """分析姿态和行为"""
        start_time = time.time()
        
        try:
            # 检测姿态
            poses = await self.pose_detector.detect_poses(image)
            
            # 分类动作
            actions = []
            for pose in poses:
                action = await self.action_classifier.classify_action(pose)
                if action:
                    actions.append(action)
                
                # 更新跟踪信息
                await self._update_tracking_info(pose, camera_id)
            
            # 检测行为模式
            behavior_patterns = []
            for pose in poses:
                patterns = await self.action_classifier.detect_behavior_patterns(pose.person_id)
                behavior_patterns.extend(patterns)
            
            processing_time = time.time() - start_time
            
            # 创建分析结果
            result = PoseAnalysisResult(
                analysis_id=str(uuid4()),
                camera_id=camera_id,
                timestamp=datetime.now(),
                poses=poses,
                actions=actions,
                behavior_patterns=behavior_patterns,
                total_persons=len(poses),
                processing_time=processing_time,
                model_info={
                    'pose_model': self.pose_config.model_type.value,
                    'action_classifier': 'rule_based'
                }
            )
            
            # 存储结果
            self.analysis_results.append(result)
            
            # 保持最近1000个结果
            if len(self.analysis_results) > 1000:
                self.analysis_results = self.analysis_results[-1000:]
            
            # 更新性能指标
            await self._update_performance_metrics(processing_time, len(poses))
            
            return result
            
        except Exception as e:
            logger.error(f"Pose analysis failed: {e}")
            raise
    
    async def _update_tracking_info(self, pose: PoseEstimation, camera_id: str):
        """更新跟踪信息"""
        person_id = pose.person_id
        
        # 查找现有跟踪或创建新跟踪
        track_id = None
        for tid, track_info in self.tracking_info.items():
            if track_info.person_id == person_id:
                track_id = tid
                break
        
        if track_id is None:
            track_id = f"track_{uuid4().hex[:8]}"
            self.tracking_info[track_id] = PoseTrackingInfo(
                track_id=track_id,
                person_id=person_id,
                start_time=datetime.now(),
                last_update=datetime.now()
            )
        
        # 更新跟踪信息
        track_info = self.tracking_info[track_id]
        track_info.pose_history.append(pose.pose_id)
        track_info.last_update = datetime.now()
        track_info.is_active = True
        
        # 保持最近100个姿态
        if len(track_info.pose_history) > 100:
            track_info.pose_history = track_info.pose_history[-100:]
    
    async def _update_performance_metrics(self, processing_time: float, num_poses: int):
        """更新性能指标"""
        self.performance_metrics.total_inferences += 1
        self.performance_metrics.successful_inferences += 1
        
        # 更新平均推理时间
        total_time = (self.performance_metrics.average_inference_time * 
                     (self.performance_metrics.successful_inferences - 1) + 
                     processing_time * 1000)  # 转换为毫秒
        self.performance_metrics.average_inference_time = total_time / self.performance_metrics.successful_inferences
        
        self.performance_metrics.last_updated = datetime.now()
    
    def draw_analysis_result(self, image: np.ndarray, result: PoseAnalysisResult) -> np.ndarray:
        """在图像上绘制分析结果"""
        annotated_image = image.copy()
        
        # 绘制姿态
        for pose in result.poses:
            annotated_image = self.pose_detector.draw_pose(annotated_image, pose)
        
        # 绘制动作标签
        for action in result.actions:
            # 找到对应的姿态
            pose = next((p for p in result.poses if p.person_id == action.person_id), None)
            if pose:
                x1, y1, x2, y2 = [int(coord) for coord in pose.bbox]
                
                # 绘制动作标签
                label = f"{action.action_type.value}: {action.confidence:.2f}"
                cv2.putText(annotated_image, label,
                           (x1, y2 + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 0), 2)
                
                # 根据严重程度设置颜色
                if action.severity == BehaviorSeverity.DANGEROUS:
                    color = (0, 0, 255)  # 红色
                elif action.severity == BehaviorSeverity.CONCERNING:
                    color = (0, 165, 255)  # 橙色
                elif action.severity == BehaviorSeverity.SUSPICIOUS:
                    color = (0, 255, 255)  # 黄色
                else:
                    color = (0, 255, 0)  # 绿色
                
                cv2.rectangle(annotated_image, (x1, y1), (x2, y2), color, 3)
        
        return annotated_image
    
    async def get_analysis_results(
        self,
        camera_id: Optional[str] = None,
        start_time: Optional[datetime] = None,
        end_time: Optional[datetime] = None,
        limit: int = 100
    ) -> List[PoseAnalysisResult]:
        """获取分析结果"""
        results = self.analysis_results
        
        # 过滤条件
        if camera_id:
            results = [r for r in results if r.camera_id == camera_id]
        
        if start_time:
            results = [r for r in results if r.timestamp >= start_time]
        
        if end_time:
            results = [r for r in results if r.timestamp <= end_time]
        
        # 按时间倒序排列
        results.sort(key=lambda x: x.timestamp, reverse=True)
        
        return results[:limit]
    
    async def get_behavior_alerts(
        self,
        camera_id: Optional[str] = None,
        severity: Optional[BehaviorSeverity] = None,
        resolved: Optional[bool] = None,
        limit: int = 100
    ) -> List[BehaviorAlert]:
        """获取行为告警"""
        alerts = self.behavior_alerts
        
        # 过滤条件
        if camera_id:
            alerts = [a for a in alerts if a.camera_id == camera_id]
        
        if severity:
            alerts = [a for a in alerts if a.severity == severity]
        
        if resolved is not None:
            alerts = [a for a in alerts if a.resolved == resolved]
        
        # 按时间倒序排列
        alerts.sort(key=lambda x: x.timestamp, reverse=True)
        
        return alerts[:limit]
    
    def get_tracking_info(self) -> List[PoseTrackingInfo]:
        """获取跟踪信息"""
        return list(self.tracking_info.values())
    
    def get_performance_metrics(self) -> ModelPerformance:
        """获取性能指标"""
        return self.performance_metrics
    
    def update_pose_config(self, config: PoseConfig):
        """更新姿态配置"""
        self.pose_config = config
        self.pose_detector = MediaPipePoseDetector(config)
        logger.info("Pose configuration updated")
    
    def update_action_config(self, config: ActionConfig):
        """更新动作配置"""
        self.action_config = config
        self.action_classifier = ActionClassifier(config)
        logger.info("Action configuration updated")


# 全局姿态检测服务实例
pose_detection_service = PoseDetectionService()