"""
Pose Detection Module using MediaPipe

This module handles pose detection from video frames and extracts
33 3D landmark coordinates for skeleton analysis.
"""

import cv2
import numpy as np
import mediapipe as mp
from typing import List, Tuple, Optional
import logging

logger = logging.getLogger(__name__)


class PoseDetector:
    """
    Detects human pose landmarks from video frames using MediaPipe Pose.

    Attributes:
        mp_pose: MediaPipe Pose solution
        pose: Configured pose detector instance
        model_complexity: Model complexity (0, 1, or 2)
    """

    def __init__(self,
                 model_complexity: int = 1,
                 min_detection_confidence: float = 0.5,
                 min_tracking_confidence: float = 0.5,
                 smooth_landmarks: bool = True):
        """
        Initialize the pose detector.

        Args:
            model_complexity: Model complexity (0=Lite, 1=Full, 2=Heavy)
            min_detection_confidence: Minimum confidence for detection
            min_tracking_confidence: Minimum confidence for tracking
            smooth_landmarks: Whether to smooth landmarks across frames
        """
        self.mp_pose = mp.solutions.pose
        self.mp_drawing = mp.solutions.drawing_utils

        # Store parameters for recreating pose instance
        self.model_complexity = model_complexity
        self.min_detection_confidence = min_detection_confidence
        self.min_tracking_confidence = min_tracking_confidence
        self.smooth_landmarks = smooth_landmarks

        self.pose = self.mp_pose.Pose(
            model_complexity=model_complexity,
            min_detection_confidence=min_detection_confidence,
            min_tracking_confidence=min_tracking_confidence,
            smooth_landmarks=smooth_landmarks
        )

        logger.info(f"PoseDetector initialized with complexity={model_complexity}")

    def detect_from_frame(self, frame: np.ndarray) -> Optional[np.ndarray]:
        """
        Detect pose landmarks from a single frame.

        Args:
            frame: RGB image array (H, W, 3)

        Returns:
            Landmarks array of shape (33, 4) containing [x, y, z, visibility]
            or None if no pose detected
        """
        # Convert BGR to RGB if needed
        if frame.shape[2] == 3:
            frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        else:
            frame_rgb = frame

        # Process the frame
        results = self.pose.process(frame_rgb)

        if results.pose_landmarks is None:
            return None

        # Extract landmarks
        landmarks = []
        for landmark in results.pose_landmarks.landmark:
            landmarks.append([
                landmark.x,
                landmark.y,
                landmark.z,
                landmark.visibility
            ])

        return np.array(landmarks, dtype=np.float32)

    def detect_from_video(self, video_path: str) -> Tuple[List[np.ndarray], dict]:
        """
        Detect pose landmarks from a video file.

        Args:
            video_path: Path to video file

        Returns:
            Tuple of (landmarks_sequence, metadata)
            - landmarks_sequence: List of landmark arrays, each (33, 4)
            - metadata: Dict containing fps, frame_count, resolution, etc.
        """
        # Create a fresh Pose instance for this video to ensure clean state
        # This is critical when smooth_landmarks=True to avoid state contamination
        logger.debug("Creating fresh Pose instance for video processing")
        video_pose = self.mp_pose.Pose(
            model_complexity=self.model_complexity,
            min_detection_confidence=self.min_detection_confidence,
            min_tracking_confidence=self.min_tracking_confidence,
            smooth_landmarks=self.smooth_landmarks
        )

        cap = cv2.VideoCapture(video_path)

        if not cap.isOpened():
            raise ValueError(f"Failed to open video: {video_path}")

        # Get video metadata
        fps = cap.get(cv2.CAP_PROP_FPS)
        frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

        metadata = {
            'fps': fps,
            'frame_count': frame_count,
            'resolution': (width, height),
            'video_path': video_path
        }

        landmarks_sequence = []
        frame_idx = 0
        valid_frames = 0

        logger.info(f"Processing video: {video_path} ({frame_count} frames, {fps} fps)")

        while cap.isOpened():
            ret, frame = cap.read()
            if not ret:
                break

            # Process frame with video-specific pose instance
            if frame.shape[2] == 3:
                frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            else:
                frame_rgb = frame

            results = video_pose.process(frame_rgb)

            if results.pose_landmarks is None:
                landmarks_sequence.append(None)
            else:
                # Extract landmarks
                landmarks = []
                for landmark in results.pose_landmarks.landmark:
                    landmarks.append([
                        landmark.x,
                        landmark.y,
                        landmark.z,
                        landmark.visibility
                    ])
                landmarks_array = np.array(landmarks, dtype=np.float32)
                landmarks_sequence.append(landmarks_array)
                valid_frames += 1

            frame_idx += 1

        cap.release()
        video_pose.close()  # Clean up pose instance

        # Update metadata with detection stats
        metadata['valid_frames'] = valid_frames
        metadata['detection_rate'] = valid_frames / frame_count if frame_count > 0 else 0

        logger.info(f"Detected poses in {valid_frames}/{frame_count} frames "
                   f"({metadata['detection_rate']:.2%})")

        return landmarks_sequence, metadata

    def interpolate_missing_frames(self,
                                   landmarks_sequence: List[Optional[np.ndarray]]) -> List[np.ndarray]:
        """
        Interpolate missing landmark frames using linear interpolation.

        Args:
            landmarks_sequence: List of landmark arrays with possible None values

        Returns:
            List of landmark arrays with interpolated values
        """
        result = []

        for i, landmarks in enumerate(landmarks_sequence):
            if landmarks is not None:
                result.append(landmarks)
            else:
                # Find previous and next valid frames
                prev_idx = i - 1
                next_idx = i + 1

                while prev_idx >= 0 and landmarks_sequence[prev_idx] is None:
                    prev_idx -= 1

                while next_idx < len(landmarks_sequence) and landmarks_sequence[next_idx] is None:
                    next_idx += 1

                # Interpolate if possible
                if prev_idx >= 0 and next_idx < len(landmarks_sequence):
                    prev_landmarks = landmarks_sequence[prev_idx]
                    next_landmarks = landmarks_sequence[next_idx]

                    # Linear interpolation
                    alpha = (i - prev_idx) / (next_idx - prev_idx)
                    interpolated = prev_landmarks * (1 - alpha) + next_landmarks * alpha
                    result.append(interpolated)
                elif prev_idx >= 0:
                    # Use previous frame
                    result.append(landmarks_sequence[prev_idx].copy())
                elif next_idx < len(landmarks_sequence):
                    # Use next frame
                    result.append(landmarks_sequence[next_idx].copy())
                else:
                    # No valid frames available, create zero landmarks
                    result.append(np.zeros((33, 4), dtype=np.float32))

        return result

    def __del__(self):
        """Clean up resources."""
        if hasattr(self, 'pose'):
            self.pose.close()


class LandmarkIndices:
    """MediaPipe Pose landmark indices for easy reference."""

    # Face
    NOSE = 0
    LEFT_EYE_INNER = 1
    LEFT_EYE = 2
    LEFT_EYE_OUTER = 3
    RIGHT_EYE_INNER = 4
    RIGHT_EYE = 5
    RIGHT_EYE_OUTER = 6
    LEFT_EAR = 7
    RIGHT_EAR = 8
    MOUTH_LEFT = 9
    MOUTH_RIGHT = 10

    # Torso
    LEFT_SHOULDER = 11
    RIGHT_SHOULDER = 12
    LEFT_ELBOW = 13
    RIGHT_ELBOW = 14
    LEFT_WRIST = 15
    RIGHT_WRIST = 16
    LEFT_PINKY = 17
    RIGHT_PINKY = 18
    LEFT_INDEX = 19
    RIGHT_INDEX = 20
    LEFT_THUMB = 21
    RIGHT_THUMB = 22
    LEFT_HIP = 23
    RIGHT_HIP = 24

    # Legs
    LEFT_KNEE = 25
    RIGHT_KNEE = 26
    LEFT_ANKLE = 27
    RIGHT_ANKLE = 28
    LEFT_HEEL = 29
    RIGHT_HEEL = 30
    LEFT_FOOT_INDEX = 31
    RIGHT_FOOT_INDEX = 32
