"""
Action Segmentation Module

Adaptive segmentation that automatically adjusts to different action speeds:
- Estimates action period (T0) via autocorrelation
- Dynamically adjusts all parameters based on T0
- Multi-scale energy fusion (angular velocity + trunk + DINO features)
- Handles both slow and fast motions without manual tuning
"""

import numpy as np
from typing import List, Tuple, Optional, Dict
from scipy.signal import find_peaks, savgol_filter, correlate
from scipy.stats import median_abs_deviation
import logging

from .skeleton_normalizer import calculate_joint_angles
from .pose_detector import LandmarkIndices

logger = logging.getLogger(__name__)


class ActionSegmenter:
    """
    Robust action segmenter with hysteresis thresholding and anchor snapping.

    Handles slow motions without flickering by using:
    1. Adaptive hysteresis thresholds (median + MAD)
    2. Duration and silence constraints
    3. Multi-channel energy fusion
    4. Anchor-based phase alignment
    """

    DRIVER_JOINTS = {
        'squat': ('left_knee', 'right_knee'),
        'stand_up': ('left_knee', 'right_knee'),
        'leg_raise': ('left_hip', 'right_hip'),
        'raise_leg_left': ('left_hip', 'left_knee'),
        'raise_leg_right': ('right_hip', 'right_knee'),
        'arms_up': ('left_elbow', 'right_elbow'),
        'arms_down': ('left_elbow', 'right_elbow'),
        'default': ('left_knee', 'right_knee')
    }

    UPPER_EXTREMITY_INDICES = [
        LandmarkIndices.LEFT_WRIST,
        LandmarkIndices.RIGHT_WRIST,
        LandmarkIndices.LEFT_ELBOW,
        LandmarkIndices.RIGHT_ELBOW,
        LandmarkIndices.LEFT_SHOULDER,
        LandmarkIndices.RIGHT_SHOULDER
    ]

    LOWER_EXTREMITY_INDICES = [
        LandmarkIndices.LEFT_ANKLE,
        LandmarkIndices.RIGHT_ANKLE,
        LandmarkIndices.LEFT_KNEE,
        LandmarkIndices.RIGHT_KNEE,
        LandmarkIndices.LEFT_HIP,
        LandmarkIndices.RIGHT_HIP
    ]

    def __init__(self,
                 fps: float = 30.0,
                 k_high: float = 2.5,
                 k_low: float = 1.2,
                 L_on: int = 6,
                 L_off: int = 12,
                 min_duration: float = 0.8,
                 min_silence: float = 0.3,
                 smooth_window: float = 0.25,
                 anchor_left_ratio: float = 0.4,
                 anchor_right_ratio: float = 0.6,
                 adaptive: bool = True,
                 min_mad: float = 1e-3,
                 energy_weights: Optional[Dict[str, float]] = None):
        """
        Initialize adaptive action segmenter.

        Args:
            fps: Video frame rate
            k_high: High threshold multiplier (median + k_high * MAD) - will be adapted
            k_low: Low threshold multiplier (median + k_low * MAD) - will be adapted
            L_on: Consecutive frames above high threshold to start segment - will be adapted
            L_off: Consecutive frames below low threshold to end segment - will be adapted
            min_duration: Minimum segment duration in seconds - will be adapted
            min_silence: Minimum silence between segments in seconds - will be adapted
            smooth_window: Smoothing window size in seconds - will be adapted
            anchor_left_ratio: Left proportion from anchor point
            anchor_right_ratio: Right proportion from anchor point
            adaptive: Enable adaptive parameter adjustment based on action frequency
        """
        self.fps = fps
        self.adaptive = adaptive
        self.min_mad = max(1e-6, min_mad)

        # Default parameters (will be adapted if adaptive=True)
        self.k_high_default = k_high
        self.k_low_default = k_low
        self.L_on_default = L_on
        self.L_off_default = L_off
        self.min_duration_default = min_duration
        self.min_silence_default = min_silence
        self.smooth_window_default = smooth_window

        # Current parameters (updated by adaptation)
        self.k_high = k_high
        self.k_low = k_low
        self.L_on = L_on
        self.L_off = L_off
        self.min_duration_frames = int(min_duration * fps)
        self.min_silence_frames = int(min_silence * fps)
        self.smooth_window = int(smooth_window * fps) | 1  # Ensure odd
        self.velocity_smooth_window = max(5, self.smooth_window)

        self.anchor_left_ratio = anchor_left_ratio
        self.anchor_right_ratio = anchor_right_ratio

        default_weights = {'angle': 0.6, 'trunk': 0.25, 'extremity': 0.15}
        self.energy_weights = self._normalize_weights(
            energy_weights if energy_weights is not None else default_weights
        )

        logger.info(f"ActionSegmenter initialized: adaptive={adaptive}, "
                   f"k_high={k_high}, k_low={k_low}, "
                   f"L_on={L_on}, L_off={L_off}, min_duration={min_duration}s")

    @staticmethod
    def _normalize_weights(weights: Dict[str, float]) -> Dict[str, float]:
        """Ensure motion-energy weights are usable."""
        cleaned = {k: max(0.0, float(v)) for k, v in weights.items()}
        total = sum(cleaned.values())
        if total <= 0:
            fallback = 1.0 / len(cleaned) if cleaned else 1.0
            return {k: fallback for k in cleaned} if cleaned else {'angle': 1.0}
        return {k: v / total for k, v in cleaned.items()}

    @staticmethod
    def _safe_savgol(signal: np.ndarray, window: int, polyorder: int = 2) -> np.ndarray:
        """Apply Savitzky–Golay smoothing with guards around short signals."""
        if signal.ndim != 1 or len(signal) < 3:
            return signal

        window = max(polyorder + 2, window)
        if window % 2 == 0:
            window -= 1

        window = min(window, len(signal) if len(signal) % 2 == 1 else len(signal) - 1)
        if window < 3:
            return signal

        try:
            return savgol_filter(signal, window, polyorder)
        except ValueError:
            return signal

    def _get_driver_joint_names(self, action_type: str) -> Tuple[str, str]:
        """Return joints that best represent the action rhythm."""
        return self.DRIVER_JOINTS.get(action_type, self.DRIVER_JOINTS['default'])

    def _build_driver_signal(self,
                             landmarks_sequence: List[np.ndarray],
                             action_type: str) -> np.ndarray:
        """Construct 1D signal used for period estimation based on action type."""
        joint_names = self._get_driver_joint_names(action_type)
        signal = []
        last_value = 90.0

        for landmarks in landmarks_sequence:
            if landmarks is not None:
                angles = calculate_joint_angles(landmarks)
                joint_values = [angles.get(joint, last_value) for joint in joint_names]
                if len(joint_values) == 0:
                    current = last_value
                else:
                    current = float(np.mean(joint_values))
                last_value = current
            else:
                current = last_value
            signal.append(current)

        return np.array(signal)

    def _extremity_indices(self, action_type: str) -> List[int]:
        """Pick extremity joints (upper/lower) depending on the action."""
        upper_actions = {'arms_up', 'arms_down', 'raise_arm', 'arm_circle'}
        if action_type in upper_actions:
            return self.UPPER_EXTREMITY_INDICES
        return self.LOWER_EXTREMITY_INDICES

    def _compute_extremity_velocity(self,
                                    landmarks_sequence: List[np.ndarray],
                                    action_type: str) -> np.ndarray:
        """Compute average extremity translational speed as additional energy cue."""
        indices = self._extremity_indices(action_type)
        tracked = []
        last_valid = None

        for landmarks in landmarks_sequence:
            if landmarks is not None:
                coords = landmarks[indices, :3]
                tracked.append(coords)
                last_valid = coords
            elif last_valid is not None:
                tracked.append(last_valid)
            else:
                tracked.append(np.zeros((len(indices), 3)))

        if len(tracked) < 2:
            return np.array([])

        coords_array = np.stack(tracked)  # (T, J, 3)
        velocities = np.diff(coords_array, axis=0) * self.fps
        return np.linalg.norm(velocities, axis=2).mean(axis=1)

    def calculate_angular_velocities(self,
                                    landmarks_sequence: List[np.ndarray]) -> np.ndarray:
        """
        Calculate angular velocities for key joints across frames.

        Args:
            landmarks_sequence: List of normalized landmarks (T, 33, 3)

        Returns:
            Angular velocities array (T-1, num_joints)
        """
        angles_sequence = []

        # Calculate angles for each frame
        for landmarks in landmarks_sequence:
            if landmarks is None:
                angles_sequence.append(None)
            else:
                angles = calculate_joint_angles(landmarks)
                # Convert to array: [left_elbow, right_elbow, left_knee, right_knee, left_hip, right_hip]
                angle_vector = np.array([
                    angles.get('left_elbow', 0),
                    angles.get('right_elbow', 0),
                    angles.get('left_knee', 0),
                    angles.get('right_knee', 0),
                    angles.get('left_hip', 0),
                    angles.get('right_hip', 0)
                ])
                angles_sequence.append(angle_vector)

        # Convert to array and handle None values
        valid_angles = []
        for angles in angles_sequence:
            if angles is not None:
                valid_angles.append(angles)
            elif len(valid_angles) > 0:
                # Use last valid angles
                valid_angles.append(valid_angles[-1])
            else:
                # Use zeros
                valid_angles.append(np.zeros(6))

        angles_array = np.array(valid_angles)  # (T, 6)

        # Smooth each joint trajectory to reduce jitter before differentiation
        for joint_idx in range(angles_array.shape[1]):
            angles_array[:, joint_idx] = self._safe_savgol(
                angles_array[:, joint_idx], self.velocity_smooth_window
            )

        # Calculate angular velocities (degrees per second)
        angular_velocities = np.diff(angles_array, axis=0) * self.fps

        return angular_velocities

    def estimate_T0(self,
                    landmarks_sequence: List[np.ndarray],
                    action_type: str = 'default') -> float:
        """
        Estimate action period T0 using multiple methods for robustness.

        Methods:
        1. Peak detection in knee angle signal
        2. Autocorrelation of knee angle
        3. Fallback based on video length

        Args:
            landmarks_sequence: List of normalized landmarks

        Returns:
            Estimated period T0 in seconds
        """
        # Build action-specific driver signal (angle or surrogate)
        angle = self._build_driver_signal(landmarks_sequence, action_type)
        video_duration = len(angle) / self.fps

        # Method 1: Peak detection
        # Smooth the signal first
        if len(angle) >= 5:
            from scipy.signal import savgol_filter
            try:
                window = min(11, len(angle) if len(angle) % 2 == 1 else len(angle) - 1)
                if window >= 5:
                    angle_smooth = savgol_filter(angle, window, 2)
                else:
                    angle_smooth = angle
            except:
                angle_smooth = angle
        else:
            angle_smooth = angle

        # Find peaks (for squat: valleys; for leg raise: peaks)
        # We look for both valleys and peaks
        peaks, _ = find_peaks(angle_smooth, distance=int(0.3 * self.fps), prominence=10)
        valleys, _ = find_peaks(-angle_smooth, distance=int(0.3 * self.fps), prominence=10)

        # Estimate T0 from peak spacing
        T0_from_peaks = None
        if len(peaks) >= 2:
            peak_distances = np.diff(peaks) / self.fps
            T0_from_peaks = np.median(peak_distances)
            logger.debug(f"T0 from peaks: {T0_from_peaks:.2f}s (found {len(peaks)} peaks)")

        T0_from_valleys = None
        if len(valleys) >= 2:
            valley_distances = np.diff(valleys) / self.fps
            T0_from_valleys = np.median(valley_distances)
            logger.debug(f"T0 from valleys: {T0_from_valleys:.2f}s (found {len(valleys)} valleys)")

        # Method 2: Autocorrelation (as backup)
        x = angle - np.mean(angle)
        if len(x) > 10:
            ac = correlate(x, x, mode='full')
            ac = ac[len(ac) // 2:]  # Keep only positive lags
            ac[0] = 0  # Ignore zero lag

            # Find first peak within reasonable range
            min_lag = int(0.3 * self.fps)  # At least 0.3s
            max_lag = min(int(3.0 * self.fps), len(ac) - 1)  # At most 3s

            T0_from_autocorr = None
            if max_lag > min_lag:
                search_range = ac[min_lag:max_lag]
                if len(search_range) > 0 and np.max(search_range) > 0:
                    peak_idx = np.argmax(search_range) + min_lag
                    T0_from_autocorr = peak_idx / self.fps
                    logger.debug(f"T0 from autocorr: {T0_from_autocorr:.2f}s")
        else:
            T0_from_autocorr = None

        # Choose best estimate
        candidates = []
        if T0_from_peaks is not None and 0.5 <= T0_from_peaks <= 3.0:
            candidates.append(T0_from_peaks)
        if T0_from_valleys is not None and 0.5 <= T0_from_valleys <= 3.0:
            candidates.append(T0_from_valleys)
        if T0_from_autocorr is not None and 0.5 <= T0_from_autocorr <= 3.0:
            candidates.append(T0_from_autocorr)

        if candidates:
            T0 = np.median(candidates)
            logger.info(f"Estimated action period T0 = {T0:.2f}s (from {len(candidates)} methods)")
        else:
            # Fallback: assume video contains 1-3 complete actions
            # For short videos, assume medium speed
            if video_duration < 3.0:
                T0 = video_duration  # Entire video is one action
            elif video_duration < 6.0:
                T0 = video_duration / 2  # Two actions
            else:
                T0 = 1.5  # Default medium speed
            logger.warning(f"Could not reliably estimate T0, using fallback: {T0:.2f}s (video duration: {video_duration:.1f}s)")

        # Clamp to reasonable range
        T0 = max(0.5, min(3.0, T0))

        logger.info(f"Final T0 = {T0:.2f}s")
        return T0

    def adapt_parameters(self, T0: float):
        """
        Dynamically adjust all parameters based on estimated period T0.

        For slow actions (T0 > 1.5s): Lower thresholds, wider windows
        For fast actions (T0 < 0.8s): Higher thresholds, tighter windows

        Args:
            T0: Estimated action period in seconds
        """
        # Classify action speed
        if T0 > 1.5:
            # Slow action
            self.k_high = 1.2
            self.k_low = 0.5
            logger.info(f"Adapting for SLOW action (T0={T0:.2f}s)")
        elif T0 < 0.8:
            # Fast action
            self.k_high = 2.0
            self.k_low = 1.0
            logger.info(f"Adapting for FAST action (T0={T0:.2f}s)")
        else:
            # Medium action
            self.k_high = 1.5
            self.k_low = 0.8
            logger.info(f"Adapting for MEDIUM action (T0={T0:.2f}s)")

        # Adjust temporal windows based on T0
        self.smooth_window = int(0.3 * T0 * self.fps) | 1  # Ensure odd
        self.L_on = max(3, int(0.1 * T0 * self.fps))
        self.L_off = max(6, int(0.2 * T0 * self.fps))
        self.min_duration_frames = int(0.7 * T0 * self.fps)
        self.min_silence_frames = int(0.3 * T0 * self.fps)

        logger.info(f"Adapted parameters: k_high={self.k_high}, k_low={self.k_low}, "
                   f"L_on={self.L_on}, L_off={self.L_off}, "
                   f"min_duration={self.min_duration_frames}f, "
                   f"min_silence={self.min_silence_frames}f")

    def compute_motion_energy(self,
                              landmarks_sequence: List[np.ndarray],
                              action_type: str = 'default') -> np.ndarray:
        """
        Compute multi-channel motion energy (ME) for robust segmentation.

        ME = α·||θ̇||₂ + β·|trunk_angular_velocity|

        Args:
            landmarks_sequence: List of normalized landmarks

        Returns:
            Motion energy array (T-1,)
        """
        # 1. Angular velocity component
        angular_velocities = self.calculate_angular_velocities(landmarks_sequence)
        angle_energy = np.linalg.norm(angular_velocities, axis=1)  # L2 norm across joints

        # 2. Trunk angular velocity (hip angle change)
        trunk_angles = []
        for landmarks in landmarks_sequence:
            if landmarks is not None:
                angles = calculate_joint_angles(landmarks)
                # Average of left and right hip
                trunk_angle = (angles.get('left_hip', 0) + angles.get('right_hip', 0)) / 2
                trunk_angles.append(trunk_angle)
            elif len(trunk_angles) > 0:
                trunk_angles.append(trunk_angles[-1])
            else:
                trunk_angles.append(0)

        trunk_angles = np.array(trunk_angles)
        trunk_velocity = np.abs(np.diff(trunk_angles) * self.fps)

        # 3. Extremity translational speed captures subtle ROM with low angular change
        extremity_velocity = self._compute_extremity_velocity(landmarks_sequence, action_type)

        # 4. Combine with configurable weights
        components = {
            'angle': angle_energy,
            'trunk': trunk_velocity,
            'extremity': extremity_velocity
        }

        target_len = len(angle_energy)

        def _pad(signal: np.ndarray) -> np.ndarray:
            if len(signal) == target_len:
                return signal
            if len(signal) == 0:
                return np.zeros(target_len)
            if len(signal) < target_len:
                return np.pad(signal, (0, target_len - len(signal)), mode='edge')
            return signal[:target_len]

        motion_energy = np.zeros(target_len)
        for name, weight in self.energy_weights.items():
            motion_energy += weight * _pad(components.get(name, np.zeros(target_len)))

        # Final smoothing to suppress isolated spikes
        motion_energy = self._safe_savgol(motion_energy, self.smooth_window)

        return motion_energy

    def detect_with_hysteresis(self, motion_energy: np.ndarray) -> List[Tuple[int, int]]:
        """
        Detect segments using hysteresis thresholding.

        Uses robust baseline (median + MAD) with high/low thresholds
        to avoid flickering in slow motions.

        Args:
            motion_energy: Motion energy signal

        Returns:
            List of (start, end) frame tuples
        """
        # 1. Compute robust thresholds
        baseline = np.median(motion_energy)
        mad = median_abs_deviation(motion_energy)
        if mad < self.min_mad:
            mad = max(self.min_mad, 0.1 * np.std(motion_energy))

        T_high = baseline + self.k_high * mad
        T_low = baseline + self.k_low * mad

        # Debug: show motion energy distribution
        me_max = np.max(motion_energy)
        me_75 = np.percentile(motion_energy, 75)
        me_90 = np.percentile(motion_energy, 90)

        logger.info(f"Motion energy stats: min={np.min(motion_energy):.2f}, "
                   f"median={baseline:.2f}, 75th={me_75:.2f}, 90th={me_90:.2f}, max={me_max:.2f}")
        logger.info(f"Hysteresis thresholds: T_high={T_high:.2f}, T_low={T_low:.2f} "
                   f"(baseline={baseline:.2f}, MAD={mad:.2f})")

        # Check if thresholds are too high
        if T_high > me_90:
            logger.warning(f"T_high ({T_high:.2f}) exceeds 90th percentile ({me_90:.2f}), "
                          "may miss actions. Consider lowering k_high or using percentile-based thresholds.")

        # 2. State machine with hysteresis
        active = np.zeros(len(motion_energy), dtype=bool)
        on_run = 0
        off_run = 0
        current_state = False

        for t, e in enumerate(motion_energy):
            if e > T_high:
                on_run += 1
                off_run = 0
            elif e < T_low:
                off_run += 1
                on_run = 0
            # else: in dead zone, counters stay

            # State transitions
            if not current_state and on_run >= self.L_on:
                current_state = True  # Start segment
                on_run = 0
            elif current_state and off_run >= self.L_off:
                current_state = False  # End segment
                off_run = 0

            active[t] = current_state

        # 3. Extract segments from active mask
        segments = []
        start = None
        for t, is_active in enumerate(active):
            if is_active and start is None:
                start = t
            elif not is_active and start is not None:
                segments.append((start, t))
                start = None
        if start is not None:
            segments.append((start, len(active)))

        logger.info(f"Hysteresis detected {len(segments)} raw segments")

        # 4. Apply duration and silence constraints
        segments = self._apply_duration_constraints(segments, motion_energy)

        return segments

    def _apply_duration_constraints(self,
                                   segments: List[Tuple[int, int]],
                                   motion_energy: np.ndarray) -> List[Tuple[int, int]]:
        """
        Apply minimum duration and minimum silence constraints.

        Args:
            segments: Raw segments
            motion_energy: Motion energy signal

        Returns:
            Filtered segments
        """
        if not segments:
            return []

        # 1. Remove short segments
        filtered = []
        for start, end in segments:
            if end - start >= self.min_duration_frames:
                filtered.append((start, end))
            else:
                logger.debug(f"Removed short segment [{start}, {end}) - "
                           f"duration {end-start} < {self.min_duration_frames}")

        if not filtered:
            return []

        # 2. Merge segments with insufficient silence
        merged = [filtered[0]]
        for start, end in filtered[1:]:
            prev_start, prev_end = merged[-1]
            silence = start - prev_end

            if silence < self.min_silence_frames:
                # Merge with previous
                merged[-1] = (prev_start, end)
                logger.debug(f"Merged segments: silence {silence} < {self.min_silence_frames}")
            else:
                merged.append((start, end))

        logger.info(f"After constraints: {len(merged)} segments")
        return merged

    def find_anchor_point(self,
                         landmarks_sequence: List[np.ndarray],
                         start: int,
                         end: int,
                         action_type: str = 'squat') -> int:
        """
        Find semantic anchor point within segment.

        For squat: knee angle valley (lowest point)
        For leg raise: hip angle peak

        Args:
            landmarks_sequence: Full landmark sequence
            start: Segment start frame
            end: Segment end frame
            action_type: Type of action

        Returns:
            Anchor frame index (absolute)
        """
        segment_landmarks = landmarks_sequence[start:end]

        # Calculate relevant angles
        if action_type == 'squat' or action_type == 'stand_up':
            # Use knee angles - find minimum (deepest squat)
            angles = []
            for lm in segment_landmarks:
                if lm is not None:
                    a = calculate_joint_angles(lm)
                    # Average of left and right knee
                    knee_avg = (a.get('left_knee', 90) + a.get('right_knee', 90)) / 2
                    angles.append(knee_avg)
                elif len(angles) > 0:
                    angles.append(angles[-1])
                else:
                    angles.append(90)

            angles = np.array(angles)

            # Smooth before finding extremum
            if len(angles) >= self.smooth_window:
                angles_smooth = savgol_filter(angles, self.smooth_window, 2)
            else:
                angles_smooth = angles

            # Find valley (minimum)
            anchor_relative = np.argmin(angles_smooth)

        else:  # leg_raise or other
            # Use hip angles - find maximum
            angles = []
            for lm in segment_landmarks:
                if lm is not None:
                    a = calculate_joint_angles(lm)
                    hip_avg = (a.get('left_hip', 0) + a.get('right_hip', 0)) / 2
                    angles.append(hip_avg)
                elif len(angles) > 0:
                    angles.append(angles[-1])
                else:
                    angles.append(0)

            angles = np.array(angles)

            if len(angles) >= self.smooth_window:
                angles_smooth = savgol_filter(angles, self.smooth_window, 2)
            else:
                angles_smooth = angles

            anchor_relative = np.argmax(angles_smooth)

        anchor_absolute = start + anchor_relative
        return anchor_absolute

    def snap_to_anchor(self,
                      landmarks_sequence: List[np.ndarray],
                      start: int,
                      end: int,
                      action_type: str = 'squat') -> Tuple[int, int]:
        """
        Snap segment boundaries to anchor point.

        Centers segment around semantic anchor (e.g., squat lowest point)
        with fixed proportions before/after.

        Args:
            landmarks_sequence: Full landmark sequence
            start: Original start frame
            end: Original end frame
            action_type: Action type

        Returns:
            Adjusted (start, end) tuple
        """
        anchor = self.find_anchor_point(landmarks_sequence, start, end, action_type)

        # Calculate new boundaries
        total_length = end - start
        half_left = int(self.anchor_left_ratio * total_length)
        half_right = int(self.anchor_right_ratio * total_length)

        new_start = max(0, anchor - half_left)
        new_end = min(len(landmarks_sequence), anchor + half_right)

        logger.debug(f"Anchor snapping: [{start}, {end}) -> [{new_start}, {new_end}) "
                    f"(anchor at {anchor})")

        return (new_start, new_end)

    def segment_sequence(self,
                         landmarks_sequence: List[np.ndarray],
                         action_type: str = 'default') -> List[Tuple[int, int]]:
        """
        Main entry point: adaptive segmentation that auto-adjusts to action speed.

        Args:
            landmarks_sequence: List of normalized landmarks
            action_type: Semantic label to tune adaptive heuristics

        Returns:
            List of (start, end) frame tuples
        """
        video_duration = len(landmarks_sequence) / self.fps
        logger.info(f"Segmenting sequence with {len(landmarks_sequence)} frames "
                   f"({video_duration:.1f}s, adaptive={self.adaptive})")

        # Check if video is too short
        if video_duration < 1.5:
            logger.warning(f"⚠️ Video is very short ({video_duration:.1f}s < 1.5s). "
                          "May not contain complete action cycles. Consider recording longer video.")
        elif video_duration < 3.0:
            logger.warning(f"⚠️ Video is short ({video_duration:.1f}s < 3.0s). "
                          "Confidence may be lower for single-cycle actions.")

        # Step 0: Estimate action period and adapt parameters
        if self.adaptive:
            T0 = self.estimate_T0(landmarks_sequence, action_type=action_type)

            # Warn if video is too short for the estimated period
            if video_duration < 1.2 * T0:
                logger.warning(f"⚠️ Video duration ({video_duration:.1f}s) is less than 1.2 × estimated period ({T0:.2f}s). "
                              "Action may be incomplete. Scoring confidence will be reduced.")

            self.adapt_parameters(T0)

        # Step 1: Compute motion energy
        motion_energy = self.compute_motion_energy(landmarks_sequence, action_type=action_type)

        # Step 2: Detect with hysteresis (using adapted parameters)
        segments = self.detect_with_hysteresis(motion_energy)

        # Step 3: Validate segments
        if len(segments) == 0:
            logger.error("❌ No action segments detected. Video may be too short or lack sufficient motion.")
        elif len(segments) == 1 and video_duration > 3.0:
            logger.warning(f"⚠️ Only 1 segment detected in {video_duration:.1f}s video. "
                          "Expected multiple actions. Consider adjusting parameters or re-recording.")

        logger.info(f"Final segmentation: {len(segments)} segments")
        for i, (s, e) in enumerate(segments):
            duration = (e - s) / self.fps
            logger.info(f"  Segment {i}: frames [{s}, {e}), length={e-s}, duration={duration:.2f}s")

        return segments

    def segment_with_phases(self,
                           landmarks_sequence: List[np.ndarray],
                           fps: float = 30.0,
                           action_type: str = 'default') -> List[dict]:
        """
        Segment sequence and detect phases within each segment.

        Args:
            landmarks_sequence: List of normalized landmarks
            fps: Frame rate for phase detection

        Returns:
            List of dicts containing segment info and phases:
            [
                {
                    'start': int,
                    'end': int,
                    'duration_frames': int,
                    'duration_seconds': float,
                    'phases': {
                        'preparation': (start, end),
                        'execution': (start, end),
                        'peak': int,
                        'return': (start, end),
                        'squat_down': (start, end),  # 下蹲阶段
                        'squat_up': (start, end)     # 起立阶段
                    }
                },
                ...
            ]
        """
        # Step 1: Detect action segments
        segments = self.segment_sequence(landmarks_sequence, action_type=action_type)

        # Step 2: Detect phases within each segment
        phase_detector = PhaseDetector()
        segments_with_phases = []

        for start, end in segments:
            # Extract segment landmarks
            segment_landmarks = landmarks_sequence[start:end]

            # Detect phases within this segment
            phases = phase_detector.detect_phases(segment_landmarks, fps)

            # Convert relative indices to absolute indices
            absolute_phases = {}
            for phase_name, phase_value in phases.items():
                if isinstance(phase_value, tuple):
                    absolute_phases[phase_name] = (
                        int(start + phase_value[0]),
                        int(start + phase_value[1])
                    )
                else:  # peak is a single index
                    absolute_phases[phase_name] = int(start + phase_value)

            # Add squat-specific phase divisions
            # Squat down: from start to peak
            # Squat up: from peak to end
            peak_idx = absolute_phases['peak']
            absolute_phases['squat_down'] = (int(start), int(peak_idx))
            absolute_phases['squat_up'] = (int(peak_idx), int(end))

            segment_info = {
                'start': start,
                'end': end,
                'duration_frames': end - start,
                'duration_seconds': (end - start) / fps,
                'phases': absolute_phases
            }

            segments_with_phases.append(segment_info)

        logger.info(f"Detected {len(segments_with_phases)} segments with phase information")
        for i, seg in enumerate(segments_with_phases):
            logger.info(f"  Segment {i+1}: frames {seg['start']}-{seg['end']} "
                       f"(squat_down: {seg['phases']['squat_down']}, "
                       f"squat_up: {seg['phases']['squat_up']})")

        return segments_with_phases

    def extract_segments(self,
                        data_sequence: List,
                        segments: List[Tuple[int, int]]) -> List[List]:
        """
        Extract data for each segment.

        Args:
            data_sequence: Full sequence of data (landmarks, features, etc.)
            segments: List of (start, end) frame indices

        Returns:
            List of data segments
        """
        extracted = []

        for start, end in segments:
            segment_data = data_sequence[start:end]
            extracted.append(segment_data)

        return extracted


class PhaseDetector:
    """
    Detects phases within an action (preparation, execution, peak, return).

    Based on velocity profiles and position extrema.
    """

    def __init__(self):
        """Initialize phase detector."""
        pass

    def detect_phases(self,
                     landmarks_sequence: List[np.ndarray],
                     fps: float = 30.0) -> dict:
        """
        Detect action phases.

        Args:
            landmarks_sequence: Sequence of normalized landmarks
            fps: Frame rate

        Returns:
            Dictionary with phase boundaries: {
                'preparation': (start, end),
                'execution': (start, end),
                'peak': frame_idx,
                'return': (start, end)
            }
        """
        if len(landmarks_sequence) < 4:
            # Too short to detect phases
            return {
                'preparation': (0, len(landmarks_sequence)),
                'execution': None,
                'peak': None,
                'return': None
            }

        # Calculate angular velocities
        angles_sequence = []
        for landmarks in landmarks_sequence:
            angles = calculate_joint_angles(landmarks)
            angle_vector = np.array([
                angles.get('left_elbow', 0),
                angles.get('right_elbow', 0),
                angles.get('left_knee', 0),
                angles.get('right_knee', 0),
                angles.get('left_hip', 0),
                angles.get('right_hip', 0)
            ])
            angles_sequence.append(angle_vector)

        angles_array = np.array(angles_sequence)  # (T, 6)

        # Calculate velocities
        velocities = np.diff(angles_array, axis=0) * fps
        velocity_magnitude = np.linalg.norm(velocities, axis=1)

        # Find peak velocity (execution phase)
        peak_idx = np.argmax(velocity_magnitude) + 1  # +1 for diff offset

        # Find preparation phase (before peak)
        # Look for low velocity period before peak
        prep_end = peak_idx
        prep_start = 0
        threshold = np.mean(velocity_magnitude) * 0.3

        for i in range(peak_idx - 1, 0, -1):
            if velocity_magnitude[i] < threshold:
                prep_end = i + 1
                break

        # Find return phase (after peak)
        # Look for velocity drop after peak
        return_start = peak_idx
        return_end = len(landmarks_sequence)

        for i in range(peak_idx, len(velocity_magnitude)):
            if velocity_magnitude[i] < threshold:
                return_start = i + 1
                break

        phases = {
            'preparation': (prep_start, prep_end),
            'execution': (prep_end, return_start),
            'peak': peak_idx,
            'return': (return_start, return_end)
        }

        logger.debug(f"Detected phases: {phases}")

        return phases
