"""
Template Matching and Scoring Module

Compares patient actions against expert templates using:
1. Soft-DTW feature similarity
2. Geometric metrics (ROM, speed, stability, symmetry)
3. Weighted fusion scoring
"""

import numpy as np
from typing import List, Dict, Tuple, Optional
import logging

try:
    from soft_dtw_cuda import SoftDTW
    SOFTDTW_AVAILABLE = True
except ImportError:
    SOFTDTW_AVAILABLE = False
    logging.warning("soft-dtw-cuda not available, using fallback implementation")

from .skeleton_normalizer import calculate_joint_angles
from .pose_detector import LandmarkIndices

logger = logging.getLogger(__name__)


class SoftDTWMatcher:
    """
    Computes similarity using Soft-DTW (Differentiable Dynamic Time Warping).
    """

    def __init__(self, gamma: float = 0.1, bandwidth: Optional[int] = None):
        """
        Initialize Soft-DTW matcher.

        Args:
            gamma: Smoothing parameter (smaller = closer to hard DTW)
            bandwidth: Sakoe-Chiba bandwidth (None = full alignment)
        """
        self.gamma = gamma
        self.bandwidth = bandwidth

        if SOFTDTW_AVAILABLE:
            self.softdtw = SoftDTW(gamma=gamma, normalize=False)
        else:
            self.softdtw = None

        # Cache for self-distance (used in bias correction)
        self._self_distance_cache = {}

    def compute_cost_matrix(self,
                           features1: np.ndarray,
                           features2: np.ndarray) -> np.ndarray:
        """
        Compute pairwise cost matrix (1 - cosine similarity).

        Args:
            features1: Feature matrix (n1, dim)
            features2: Feature matrix (n2, dim)

        Returns:
            Cost matrix (n1, n2)
        """
        # Normalize features
        norm1 = np.linalg.norm(features1, axis=1, keepdims=True) + 1e-8
        norm2 = np.linalg.norm(features2, axis=1, keepdims=True) + 1e-8

        features1_norm = features1 / norm1
        features2_norm = features2 / norm2

        # Cosine similarity
        similarity = features1_norm @ features2_norm.T

        # Convert to cost (distance)
        cost = 1.0 - similarity

        return cost

    def align_sequences(self,
                       features1: np.ndarray,
                       features2: np.ndarray,
                       use_bias_correction: bool = True) -> float:
        """
        Compute Soft-DTW distance between two feature sequences.

        Args:
            features1: Query features (n1, dim)
            features2: Reference features (n2, dim)
            use_bias_correction: Apply bias correction to ensure distance >= 0

        Returns:
            Soft-DTW distance (guaranteed non-negative if use_bias_correction=True)
        """
        # Compute cost matrix
        cost_matrix = self.compute_cost_matrix(features1, features2)

        logger.debug(f"Cost matrix shape: {cost_matrix.shape}, "
                    f"min: {cost_matrix.min():.4f}, max: {cost_matrix.max():.4f}, "
                    f"mean: {cost_matrix.mean():.4f}")

        if SOFTDTW_AVAILABLE:
            # Use CUDA-accelerated implementation
            import torch
            cost_tensor = torch.from_numpy(cost_matrix).float().unsqueeze(0).cuda()
            raw_distance = self.softdtw(cost_tensor).item()
            logger.warning(f"CUDA Soft-DTW raw distance: {raw_distance:.6f}")
        else:
            # Fallback: use standard DTW
            raw_distance = self._soft_dtw_fallback(cost_matrix)
            logger.debug(f"Fallback Soft-DTW raw distance: {raw_distance:.6f}")

        # Apply bias correction (Soft-DTW paper recommendation)
        # Corrected distance: D(X,Y) = DTW(X,Y) - 0.5 * [DTW(X,X) + DTW(Y,Y)]
        if use_bias_correction:
            # Compute or retrieve self-distances
            self_dist1 = self._compute_self_distance(features1)
            self_dist2 = self._compute_self_distance(features2)

            distance = raw_distance - 0.5 * (self_dist1 + self_dist2)

            logger.debug(f"Bias correction: raw={raw_distance:.6f}, "
                        f"self1={self_dist1:.6f}, self2={self_dist2:.6f}, "
                        f"corrected={distance:.6f}")
        else:
            distance = raw_distance

        # Ensure non-negative (numerical precision might still cause tiny negative values)
        distance = max(0.0, distance)

        return distance

    def _compute_self_distance(self, features: np.ndarray) -> float:
        """
        Compute Soft-DTW distance of a sequence with itself (for bias correction).

        Args:
            features: Feature sequence (n, dim)

        Returns:
            Self-distance (always negative due to soft_min bias)
        """
        # Use features hash as cache key
        key = hash(features.tobytes())

        if key in self._self_distance_cache:
            return self._self_distance_cache[key]

        # Compute self-cost matrix
        cost_matrix = self.compute_cost_matrix(features, features)

        if SOFTDTW_AVAILABLE:
            import torch
            cost_tensor = torch.from_numpy(cost_matrix).float().unsqueeze(0).cuda()
            self_dist = self.softdtw(cost_tensor).item()
        else:
            self_dist = self._soft_dtw_fallback(cost_matrix)

        # Cache the result
        self._self_distance_cache[key] = self_dist

        return self_dist

    def _soft_dtw_fallback(self, cost_matrix: np.ndarray) -> float:
        """
        Fallback Soft-DTW implementation (CPU) with Sakoe-Chiba band constraint.

        Args:
            cost_matrix: Cost matrix (n1, n2)

        Returns:
            Soft-DTW distance
        """
        n1, n2 = cost_matrix.shape
        gamma = self.gamma

        # Calculate Sakoe-Chiba band width
        if self.bandwidth is not None:
            if isinstance(self.bandwidth, float) and 0 < self.bandwidth < 1:
                # Percentage of sequence length
                w = int(self.bandwidth * max(n1, n2))
            else:
                # Absolute width
                w = int(self.bandwidth)
            w = max(abs(n1 - n2), w)  # At least allow diagonal
        else:
            w = max(n1, n2)  # No constraint

        # Initialize DP table
        dp = np.full((n1 + 1, n2 + 1), np.inf)
        dp[0, 0] = 0

        # Soft-min operation
        def soft_min(*args):
            min_val = min(args)
            exp_sum = sum(np.exp(-(val - min_val) / gamma) for val in args)
            return min_val - gamma * np.log(exp_sum)

        # Fill DP table with Sakoe-Chiba band constraint
        for i in range(1, n1 + 1):
            # Calculate valid j range based on band constraint
            j_start = max(1, i - w)
            j_end = min(n2, i + w)

            for j in range(j_start, j_end + 1):
                cost = cost_matrix[i - 1, j - 1]
                dp[i, j] = cost + soft_min(
                    dp[i - 1, j],      # insertion
                    dp[i, j - 1],      # deletion
                    dp[i - 1, j - 1]   # match
                )

        return dp[n1, n2]


class GeometricScorer:
    """
    Computes geometric metrics: ROM, speed, stability, symmetry.
    """

    # Joint weights by action type (task-specific)
    JOINT_WEIGHTS = {
        'squat': {
            'left_knee': 0.28, 'right_knee': 0.28,
            'left_hip': 0.22, 'right_hip': 0.22,
            'left_elbow': 0.0, 'right_elbow': 0.0  # Elbow irrelevant for squat
        },
        'stand_up': {
            'left_knee': 0.28, 'right_knee': 0.28,
            'left_hip': 0.22, 'right_hip': 0.22,
            'left_elbow': 0.0, 'right_elbow': 0.0  # Elbow irrelevant for stand_up
        },
        'arms_up': {
            'left_elbow': 0.3, 'right_elbow': 0.3,
            'left_knee': 0.1, 'right_knee': 0.1,
            'left_hip': 0.1, 'right_hip': 0.1  # Legs less important
        },
        'arms_down': {
            'left_elbow': 0.3, 'right_elbow': 0.3,
            'left_knee': 0.1, 'right_knee': 0.1,
            'left_hip': 0.1, 'right_hip': 0.1  # Legs less important
        },
        'default': {  # Uniform weights if action type unknown
            'left_knee': 0.166, 'right_knee': 0.166,
            'left_hip': 0.166, 'right_hip': 0.166,
            'left_elbow': 0.166, 'right_elbow': 0.166
        }
    }

    def __init__(self):
        """Initialize geometric scorer."""
        pass

    def normalize_joint_angles(self,
                              angles_sequence: List[Dict[str, float]],
                              reference_min: Dict[str, float],
                              reference_max: Dict[str, float]) -> List[Dict[str, float]]:
        """
        Normalize joint angles using reference ROM (min-max normalization).
        This decouples shape similarity from amplitude difference.

        Args:
            angles_sequence: Sequence of joint angles for each frame
            reference_min: Minimum angles from reference/expert
            reference_max: Maximum angles from reference/expert

        Returns:
            Normalized angle sequence (values in [0, 1] range per joint)
        """
        normalized_sequence = []

        for angles in angles_sequence:
            normalized_angles = {}
            for joint, angle in angles.items():
                ref_range = reference_max.get(joint, 180.0) - reference_min.get(joint, 0.0)
                if ref_range > 1e-6:  # Avoid division by zero
                    normalized_angles[joint] = (angle - reference_min.get(joint, 0.0)) / ref_range
                else:
                    normalized_angles[joint] = 0.5  # Neutral if no range
            normalized_sequence.append(normalized_angles)

        return normalized_sequence

    def calculate_rom_score(self,
                           patient_landmarks: List[np.ndarray],
                           template_landmarks: List[np.ndarray],
                           action_type: str = 'default') -> Dict[str, float]:
        """
        Calculate Range of Motion (ROM) score with task-specific joint weights.

        Args:
            patient_landmarks: Patient landmark sequence
            template_landmarks: Template landmark sequence
            action_type: Type of action ('squat', 'stand_up', 'arms_up', 'arms_down')

        Returns:
            Dictionary with ROM scores for each joint
        """
        patient_angles = [calculate_joint_angles(lm) for lm in patient_landmarks]
        template_angles = [calculate_joint_angles(lm) for lm in template_landmarks]

        # Calculate max angle ranges
        patient_ranges = {}
        template_ranges = {}

        for joint in patient_angles[0].keys():
            patient_vals = [angles[joint] for angles in patient_angles]
            template_vals = [angles[joint] for angles in template_angles]

            patient_ranges[joint] = max(patient_vals) - min(patient_vals)
            template_ranges[joint] = max(template_vals) - min(template_vals)

        # Log ROM comparison
        logger.debug(f"ROM comparison (patient_frames={len(patient_landmarks)}, template_frames={len(template_landmarks)}):")
        for joint in patient_ranges.keys():
            logger.debug(f"  {joint}: patient={patient_ranges[joint]:.1f}°, template={template_ranges[joint]:.1f}°, "
                        f"ratio={patient_ranges[joint]/max(template_ranges[joint], 1e-6):.2f}")

        # Get task-specific joint weights
        weights = self.JOINT_WEIGHTS.get(action_type, self.JOINT_WEIGHTS['default'])

        # Calculate ROM similarity scores (0-100) with saturation
        rom_scores = {}
        for joint in patient_ranges.keys():
            if template_ranges[joint] > 0:
                ratio = patient_ranges[joint] / template_ranges[joint]

                # Balanced scoring formula for rehabilitation patients
                # Philosophy: Accept reasonable variations, penalize large deviations
                if 0.8 <= ratio <= 1.2:
                    score = 100.0
                elif 1.2 < ratio <= 1.5:
                    score = 100 - 60 * (ratio - 1.2)  # 100→82, moderate slope
                elif 1.5 < ratio <= 2.0:
                    score = 82 - 70 * (ratio - 1.5)   # 82→47, steeper slope
                elif ratio > 2.0:
                    score = max(40, 47 - 15 * (ratio - 2.0))  # Saturate at 40
                elif 0.5 <= ratio < 0.8:
                    score = 50 + 50 * (ratio - 0.5) / 0.3
                else:  # ratio < 0.5
                    score = 100 * ratio / 0.5

                rom_scores[joint] = max(0, min(100, float(score)))
            else:
                rom_scores[joint] = 100.0

        # Overall ROM score (weighted average by joint importance)
        weighted_sum = 0.0
        weight_total = 0.0
        for joint, score in rom_scores.items():
            weight = weights.get(joint, 0.0)
            weighted_sum += score * weight
            weight_total += weight

        rom_scores['overall'] = weighted_sum / max(weight_total, 1e-6)

        return rom_scores

    def calculate_speed_score(self,
                             patient_landmarks: List[np.ndarray],
                             template_landmarks: List[np.ndarray],
                             fps: float = 30.0) -> float:
        """
        Calculate speed consistency score.

        Args:
            patient_landmarks: Patient landmark sequence
            template_landmarks: Template landmark sequence
            fps: Frame rate

        Returns:
            Speed score (0-100)
        """
        # Calculate velocities
        patient_velocities = np.diff([lm.flatten() for lm in patient_landmarks], axis=0)
        template_velocities = np.diff([lm.flatten() for lm in template_landmarks], axis=0)

        # Calculate mean speeds
        patient_speed = np.mean(np.linalg.norm(patient_velocities, axis=1))
        template_speed = np.mean(np.linalg.norm(template_velocities, axis=1))

        logger.debug(f"Speed: patient={patient_speed:.4f}, template={template_speed:.4f}")

        # Score based on speed ratio
        if template_speed > 0:
            speed_ratio = patient_speed / template_speed
            logger.debug(f"Speed ratio: {speed_ratio:.2f}")

            # Smooth speed scoring for rehabilitation (using Huber-like piecewise function)
            # More tolerant parameters for rehabilitation patients
            alpha = 0.35  # Penalty factor for moderate deviation (was 0.5)
            beta = 0.25   # Penalty factor for large deviation (was 0.8, now much gentler)

            if 0.7 <= speed_ratio <= 1.3:
                # Ideal range - full score
                score = 100.0
            elif (0.5 <= speed_ratio < 0.7) or (1.3 < speed_ratio <= 2.0):
                # Moderate deviation - gentle penalty
                score = 100.0 * (1.0 - alpha * (abs(speed_ratio - 1.0) - 0.3))
            elif 2.0 < speed_ratio <= 6.0:
                # Large deviation - gentler penalty with saturation
                score = 100.0 * max(0.15, 1.0 - beta * (abs(speed_ratio - 1.0) - 1.0))
            elif speed_ratio < 0.5:
                # Very slow - scale linearly
                score = 100.0 * (speed_ratio / 0.5)
            else:  # speed_ratio > 6.0
                # Extremely fast - minimum score
                score = 15.0  # Not 0, give some credit
        else:
            score = 50.0

        return max(0, min(100, score))

    def calculate_stability_score(self,
                                  patient_landmarks: List[np.ndarray],
                                  template_landmarks: List[np.ndarray]) -> float:
        """
        Calculate stability score by comparing patient and template jitter.

        Args:
            patient_landmarks: Patient landmark sequence
            template_landmarks: Template landmark sequence

        Returns:
            Stability score (0-100, higher means closer to template stability)
        """
        # Calculate patient jitter
        patient_positions = np.array([lm.flatten() for lm in patient_landmarks])
        patient_velocities = np.diff(patient_positions, axis=0)
        patient_accelerations = np.diff(patient_velocities, axis=0)
        patient_jitter = np.sqrt(np.mean(patient_accelerations ** 2))

        # Calculate template jitter
        template_positions = np.array([lm.flatten() for lm in template_landmarks])
        template_velocities = np.diff(template_positions, axis=0)
        template_accelerations = np.diff(template_velocities, axis=0)
        template_jitter = np.sqrt(np.mean(template_accelerations ** 2))

        # Compare jitters - score based on similarity
        jitter_diff = abs(patient_jitter - template_jitter)
        # Use same threshold as before (0.01) for the difference
        score = 100 * np.exp(-jitter_diff / 0.01)

        return max(0, min(100, score))

    def calculate_symmetry_score(self,
                                patient_landmarks: List[np.ndarray],
                                template_landmarks: List[np.ndarray]) -> Dict[str, float]:
        """
        Calculate symmetry score by comparing patient and template symmetry.

        Args:
            patient_landmarks: Patient landmark sequence
            template_landmarks: Template landmark sequence

        Returns:
            Dictionary with symmetry scores (0-100, higher means closer to template)
        """
        # Calculate patient angles
        patient_angles = [calculate_joint_angles(lm) for lm in patient_landmarks]

        # Calculate template angles
        template_angles = [calculate_joint_angles(lm) for lm in template_landmarks]

        symmetry_scores = {}

        # Elbow symmetry difference
        patient_left_elbows = [a['left_elbow'] for a in patient_angles]
        patient_right_elbows = [a['right_elbow'] for a in patient_angles]
        patient_elbow_diff = np.mean(np.abs(np.array(patient_left_elbows) - np.array(patient_right_elbows)))

        template_left_elbows = [a['left_elbow'] for a in template_angles]
        template_right_elbows = [a['right_elbow'] for a in template_angles]
        template_elbow_diff = np.mean(np.abs(np.array(template_left_elbows) - np.array(template_right_elbows)))

        elbow_symmetry_diff = abs(patient_elbow_diff - template_elbow_diff)
        symmetry_scores['elbow'] = 100 * np.exp(-elbow_symmetry_diff / 30.0)

        # Knee symmetry difference
        patient_left_knees = [a['left_knee'] for a in patient_angles]
        patient_right_knees = [a['right_knee'] for a in patient_angles]
        patient_knee_diff = np.mean(np.abs(np.array(patient_left_knees) - np.array(patient_right_knees)))

        template_left_knees = [a['left_knee'] for a in template_angles]
        template_right_knees = [a['right_knee'] for a in template_angles]
        template_knee_diff = np.mean(np.abs(np.array(template_left_knees) - np.array(template_right_knees)))

        knee_symmetry_diff = abs(patient_knee_diff - template_knee_diff)
        symmetry_scores['knee'] = 100 * np.exp(-knee_symmetry_diff / 30.0)

        # Hip symmetry difference
        patient_left_hips = [a['left_hip'] for a in patient_angles]
        patient_right_hips = [a['right_hip'] for a in patient_angles]
        patient_hip_diff = np.mean(np.abs(np.array(patient_left_hips) - np.array(patient_right_hips)))

        template_left_hips = [a['left_hip'] for a in template_angles]
        template_right_hips = [a['right_hip'] for a in template_angles]
        template_hip_diff = np.mean(np.abs(np.array(template_left_hips) - np.array(template_right_hips)))

        hip_symmetry_diff = abs(patient_hip_diff - template_hip_diff)
        symmetry_scores['hip'] = 100 * np.exp(-hip_symmetry_diff / 30.0)

        # Overall symmetry
        symmetry_scores['overall'] = np.mean([
            symmetry_scores['elbow'],
            symmetry_scores['knee'],
            symmetry_scores['hip']
        ])

        return symmetry_scores

    def calculate_direction_score(self,
                                  patient_landmarks: List[np.ndarray],
                                  template_landmarks: List[np.ndarray],
                                  action_type: str = 'squat') -> float:
        """
        Calculate action direction consistency score.

        Uses different joints based on action type:
        - squat/stand_up: Uses knee angles (leg movement)
        - arms_up/arms_down: Uses elbow angles (arm movement)

        Args:
            patient_landmarks: Patient landmark sequence
            template_landmarks: Template landmark sequence
            action_type: Type of action to determine which joint to check

        Returns:
            Direction score (0-100, higher means same direction)
        """
        # Calculate all joint angles for all frames
        patient_angles = [calculate_joint_angles(lm) for lm in patient_landmarks]
        template_angles = [calculate_joint_angles(lm) for lm in template_landmarks]

        # Select joint based on action type
        if action_type in ['arms_up', 'arms_down']:
            # For arm movements, check elbow angle progression
            patient_progression = [
                (angles['left_elbow'] + angles['right_elbow']) / 2
                for angles in patient_angles
            ]
            template_progression = [
                (angles['left_elbow'] + angles['right_elbow']) / 2
                for angles in template_angles
            ]
        else:  # squat, stand_up, or default
            # For leg movements, check knee angle progression
            patient_progression = [
                (angles['left_knee'] + angles['right_knee']) / 2
                for angles in patient_angles
            ]
            template_progression = [
                (angles['left_knee'] + angles['right_knee']) / 2
                for angles in template_angles
            ]

        # Calculate overall trend: positive = increasing, negative = decreasing
        patient_trend = patient_progression[-1] - patient_progression[0]
        template_trend = template_progression[-1] - template_progression[0]

        # Calculate trend correlation (cosine similarity of trends)
        # If both trends have same sign (both increasing or both decreasing), score high
        if patient_trend * template_trend > 0:
            # Same direction
            # Calculate magnitude similarity
            trend_ratio = min(abs(patient_trend), abs(template_trend)) / max(abs(patient_trend), abs(template_trend), 1e-6)
            direction_score = 50 + 50 * trend_ratio  # Score: 50-100
        else:
            # Opposite direction - strong penalty
            direction_score = 0.0

        logger.debug(f"Direction analysis ({action_type}): patient_trend={patient_trend:.1f}°, "
                    f"template_trend={template_trend:.1f}°, score={direction_score:.1f}")

        return float(direction_score)


class ActionMatcher:
    """
    Complete action matching and scoring system.
    """

    def __init__(self,
                 gamma: float = 0.1,
                 sigma: float = 5.0,
                 similarity_weight: float = 0.4,
                 geometric_weight: float = 0.2,
                 attribute_weight: float = 0.4):
        """
        Initialize action matcher.

        Args:
            gamma: Soft-DTW smoothing parameter
            sigma: Exponential decay parameter for similarity score
            similarity_weight: Weight for feature similarity
            geometric_weight: Weight for geometric metrics
            attribute_weight: Weight for attributes (ROM, speed, etc.)
        """
        self.softdtw_matcher = SoftDTWMatcher(gamma=gamma)
        self.geometric_scorer = GeometricScorer()

        self.sigma = sigma
        self.similarity_weight = similarity_weight
        self.geometric_weight = geometric_weight
        self.attribute_weight = attribute_weight

    def compute_similarity_score(self,
                                 patient_features: np.ndarray,
                                 template_features: np.ndarray) -> float:
        """
        Compute feature similarity score using Soft-DTW.

        Args:
            patient_features: Patient feature sequence
            template_features: Template feature sequence

        Returns:
            Similarity score (0-100)
        """
        # Compute Soft-DTW distance with bias correction
        dtw_distance = self.softdtw_matcher.align_sequences(
            patient_features, template_features, use_bias_correction=True
        )

        # Convert to similarity score using exponential decay
        # For identical sequences: distance ≈ 0, similarity ≈ 100
        # As distance increases, similarity decreases exponentially
        similarity = 100 * np.exp(-dtw_distance / self.sigma)

        logger.debug(f"DTW distance: {dtw_distance:.6f}, Similarity: {similarity:.2f}")

        return float(similarity)

    def calculate_confidence(self,
                           patient_landmarks: List[np.ndarray],
                           template_landmarks: List[np.ndarray],
                           rom_scores: Dict[str, float],
                           fps: float = 30.0) -> Dict:
        """
        Calculate confidence level for the assessment based on video quality indicators.

        Based on:
        1. Frame count (video duration)
        2. Number of complete action cycles
        3. ROM completeness

        Args:
            patient_landmarks: Patient landmark sequence
            template_landmarks: Template landmark sequence
            rom_scores: ROM scores from geometric scorer
            fps: Frame rate

        Returns:
            Dictionary with confidence metrics: {
                'level': str,  # 'high', 'medium', 'low'
                'score': float,  # 0-100
                'warnings': List[str],
                'factors': Dict  # breakdown of confidence factors
            }
        """
        warnings = []
        factors = {}

        # Factor 1: Frame count
        num_frames = len(patient_landmarks)
        video_duration = num_frames / fps

        if num_frames < 30:  # < 1 second at 30fps
            frame_confidence = 0.0
            warnings.append(f"视频过短 ({video_duration:.1f}秒 < 1秒)")
            factors['frame_count'] = 'very_short'
        elif num_frames < 50:  # < 1.7 seconds
            frame_confidence = 50.0
            warnings.append(f"视频较短 ({video_duration:.1f}秒 < 1.7秒)，置信度较低")
            factors['frame_count'] = 'short'
        elif num_frames < 90:  # < 3 seconds
            frame_confidence = 75.0
            factors['frame_count'] = 'medium'
        else:
            frame_confidence = 100.0
            factors['frame_count'] = 'good'

        # Factor 2: Action cycle completeness
        # Estimate cycles by comparing with template duration
        template_duration = len(template_landmarks) / fps
        estimated_cycles = video_duration / max(template_duration, 1.0)

        if estimated_cycles < 0.7:
            cycle_confidence = 0.0
            warnings.append(f"动作周期不完整 (完成度 < 70%)")
            factors['cycle_completeness'] = 'incomplete'
        elif estimated_cycles < 1.0:
            cycle_confidence = 50.0
            warnings.append(f"动作周期部分完成 (完成度 {estimated_cycles*100:.0f}%)")
            factors['cycle_completeness'] = 'partial'
        elif estimated_cycles < 1.3:
            cycle_confidence = 90.0
            factors['cycle_completeness'] = 'complete'
        else:
            cycle_confidence = 100.0
            factors['cycle_completeness'] = 'multiple_cycles'

        # Factor 3: ROM completeness
        # Check if key joints have sufficient ROM
        rom_threshold = 60.0  # Minimum ROM score considered acceptable
        avg_rom = rom_scores.get('overall', 0)

        if avg_rom < 30.0 and num_frames < 50:
            rom_confidence = 0.0
            warnings.append(f"关节活动范围过小 (ROM {avg_rom:.1f} < 30) 且视频较短，可能是未完成动作")
            factors['rom_completeness'] = 'insufficient'
        elif avg_rom < rom_threshold:
            rom_confidence = 50.0
            factors['rom_completeness'] = 'low'
        else:
            rom_confidence = 100.0
            factors['rom_completeness'] = 'sufficient'

        # Calculate overall confidence score (weighted average)
        confidence_score = (
            frame_confidence * 0.4 +
            cycle_confidence * 0.35 +
            rom_confidence * 0.25
        )

        # Determine confidence level
        if confidence_score >= 80:
            level = 'high'
            level_text = '高'
        elif confidence_score >= 50:
            level = 'medium'
            level_text = '中'
        else:
            level = 'low'
            level_text = '低'

        confidence_result = {
            'level': level,
            'level_text': level_text,
            'score': float(confidence_score),
            'warnings': warnings,
            'factors': factors,
            'details': {
                'video_duration': float(video_duration),
                'num_frames': int(num_frames),
                'estimated_cycles': float(estimated_cycles),
                'avg_rom_score': float(avg_rom)
            }
        }

        # Log confidence assessment
        if warnings:
            logger.warning(f"置信度评估: {level_text} ({confidence_score:.1f}) - {', '.join(warnings)}")
        else:
            logger.info(f"置信度评估: {level_text} ({confidence_score:.1f})")

        return confidence_result

    def compute_comprehensive_score(self,
                                   patient_features: np.ndarray,
                                   template_features: np.ndarray,
                                   patient_landmarks: List[np.ndarray],
                                   template_landmarks: List[np.ndarray],
                                   fps: float = 30.0,
                                   action_type: str = 'default') -> Dict:
        """
        Compute comprehensive scoring combining all metrics.

        Args:
            patient_features: Patient DINO features
            template_features: Template DINO features
            patient_landmarks: Patient normalized landmarks
            template_landmarks: Template normalized landmarks
            fps: Frame rate (for confidence calculation)
            action_type: Action type for task-specific joint weights

        Returns:
            Dictionary with detailed scores including confidence
        """
        # 1. Feature similarity score
        similarity_score = self.compute_similarity_score(
            patient_features, template_features
        )

        # 2. Geometric metrics (with task-specific weights)
        rom_scores = self.geometric_scorer.calculate_rom_score(
            patient_landmarks, template_landmarks, action_type=action_type
        )
        speed_score = self.geometric_scorer.calculate_speed_score(
            patient_landmarks, template_landmarks
        )
        stability_score = self.geometric_scorer.calculate_stability_score(
            patient_landmarks, template_landmarks
        )
        symmetry_scores = self.geometric_scorer.calculate_symmetry_score(
            patient_landmarks, template_landmarks
        )
        direction_score = self.geometric_scorer.calculate_direction_score(
            patient_landmarks, template_landmarks, action_type=action_type
        )

        # 3. Aggregate geometric score (with direction as important factor)
        geometric_score = np.mean([
            rom_scores['overall'],
            speed_score,
            stability_score,
            symmetry_scores['overall'],
            direction_score  # Add direction score
        ])

        logger.debug(f"Geometric components: ROM={rom_scores['overall']:.2f}, "
                    f"speed={speed_score:.2f}, stability={stability_score:.2f}, "
                    f"symmetry={symmetry_scores['overall']:.2f}, direction={direction_score:.2f}")

        # 4. Attribute score (weighted average of ROM, speed, stability, symmetry)
        attribute_score = (
            rom_scores['overall'] * 0.3 +
            speed_score * 0.2 +
            stability_score * 0.25 +
            symmetry_scores['overall'] * 0.25
        )

        # 5. Final weighted score
        base_score = (
            self.similarity_weight * similarity_score +
            self.geometric_weight * geometric_score +
            self.attribute_weight * attribute_score
        )

        # 6. Apply direction penalty (critical for squat vs stand_up)
        # Direction score must be sufficiently high to indicate same action type
        if direction_score < 60:
            # Strong penalty for low direction match (likely different action)
            direction_penalty = direction_score / 100.0  # 0-0.60 multiplier
            final_score = base_score * direction_penalty
            logger.debug(f"Applied direction penalty: {direction_penalty:.2f}x (direction={direction_score:.1f})")
        elif direction_score < 75:
            # Moderate penalty for medium direction match (similar but not identical)
            direction_penalty = 0.6 + (direction_score - 60) / 100.0  # 0.60-0.75 multiplier
            final_score = base_score * direction_penalty
            logger.debug(f"Applied moderate direction penalty: {direction_penalty:.2f}x (direction={direction_score:.1f})")
        else:
            # High direction match - no penalty
            final_score = base_score

        # 7. Calculate confidence
        confidence = self.calculate_confidence(
            patient_landmarks,
            template_landmarks,
            rom_scores,
            fps
        )

        result = {
            'final_score': float(final_score),
            'similarity_score': float(similarity_score),
            'geometric_score': float(geometric_score),
            'attribute_score': float(attribute_score),
            'rom_scores': rom_scores,
            'speed_score': float(speed_score),
            'stability_score': float(stability_score),
            'symmetry_scores': symmetry_scores,
            'direction_score': float(direction_score),
            'confidence': confidence
        }

        logger.info(f"Action score: {final_score:.2f} "
                   f"(sim={similarity_score:.2f}, geo={geometric_score:.2f}, "
                   f"attr={attribute_score:.2f}, confidence={confidence['level_text']})")

        return result

    def compute_phase_scores(self,
                            patient_features: np.ndarray,
                            template_features: np.ndarray,
                            patient_landmarks: List[np.ndarray],
                            template_landmarks: List[np.ndarray],
                            patient_phases: Dict,
                            template_phases: Dict) -> Dict:
        """
        Compute scores for individual phases (squat_down, squat_up).

        Args:
            patient_features: Full patient DINO features sequence
            template_features: Full template DINO features sequence
            patient_landmarks: Full patient landmarks sequence
            template_landmarks: Full template landmarks sequence
            patient_phases: Patient phase boundaries {
                'squat_down': (start, end),
                'squat_up': (start, end),
                'peak': int
            }
            template_phases: Template phase boundaries (same structure)

        Returns:
            Dictionary with phase-specific scores: {
                'squat_down': {...scores...},
                'squat_up': {...scores...}
            }
        """
        phase_scores = {}

        # Process squat_down phase
        if 'squat_down' in patient_phases and 'squat_down' in template_phases:
            p_down_start, p_down_end = patient_phases['squat_down']
            t_down_start, t_down_end = template_phases['squat_down']

            # Extract phase data
            p_down_features = patient_features[p_down_start:p_down_end]
            t_down_features = template_features[t_down_start:t_down_end]
            p_down_landmarks = patient_landmarks[p_down_start:p_down_end]
            t_down_landmarks = template_landmarks[t_down_start:t_down_end]

            # Compute comprehensive score for this phase
            if len(p_down_features) > 0 and len(t_down_features) > 0:
                down_score = self.compute_comprehensive_score(
                    p_down_features,
                    t_down_features,
                    p_down_landmarks,
                    t_down_landmarks
                )
                phase_scores['squat_down'] = down_score
                logger.info(f"  Squat Down phase score: {down_score['final_score']:.2f}")
            else:
                logger.warning("Squat Down phase has insufficient data")
                phase_scores['squat_down'] = None

        # Process squat_up phase
        if 'squat_up' in patient_phases and 'squat_up' in template_phases:
            p_up_start, p_up_end = patient_phases['squat_up']
            t_up_start, t_up_end = template_phases['squat_up']

            # Extract phase data
            p_up_features = patient_features[p_up_start:p_up_end]
            t_up_features = template_features[t_up_start:t_up_end]
            p_up_landmarks = patient_landmarks[p_up_start:p_up_end]
            t_up_landmarks = template_landmarks[t_up_start:t_up_end]

            # Compute comprehensive score for this phase
            if len(p_up_features) > 0 and len(t_up_features) > 0:
                up_score = self.compute_comprehensive_score(
                    p_up_features,
                    t_up_features,
                    p_up_landmarks,
                    t_up_landmarks
                )
                phase_scores['squat_up'] = up_score
                logger.info(f"  Squat Up phase score: {up_score['final_score']:.2f}")
            else:
                logger.warning("Squat Up phase has insufficient data")
                phase_scores['squat_up'] = None

        return phase_scores
