# video_processing/opencv_analyzer.py - Video analysis using OpenCV

import os
import cv2
import numpy as np
import logging
from concurrent.futures import ThreadPoolExecutor

logger = logging.getLogger(__name__)


class OpenCVAnalyzer:
    """
    Class for analyzing videos using OpenCV
    """

    def __init__(self):
        pass

        # video_processing/opencv_analyzer.py - Video analysis using OpenCV (continued)

    def get_video_info(self, video_path):
        """
        Get basic information about a video file

        Args:
            video_path: Path to the video file

        Returns:
            Dictionary with video information
        """
        if not os.path.exists(video_path):
            logger.error(f"Video file not found: {video_path}")
            return None

        try:
            cap = cv2.VideoCapture(video_path)

            if not cap.isOpened():
                logger.error(f"Could not open video: {video_path}")
                return None

            # Get video properties
            width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fps = cap.get(cv2.CAP_PROP_FPS)
            frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            duration = frame_count / fps if fps > 0 else 0

            # Release the video capture object
            cap.release()

            return {
                'path': video_path,
                'width': width,
                'height': height,
                'fps': fps,
                'frame_count': frame_count,
                'duration': duration
            }

        except Exception as e:
            logger.error(f"Error getting video info: {str(e)}")
            return None

    def extract_frames(self, video_path, sample_rate=1, max_frames=1000):
        """
        Extract frames from a video at a given sampling rate

        Args:
            video_path: Path to the video file
            sample_rate: Number of frames per second to extract
            max_frames: Maximum number of frames to extract

        Returns:
            List of extracted frames
        """
        if not os.path.exists(video_path):
            logger.error(f"Video file not found: {video_path}")
            return []

        frames = []

        try:
            cap = cv2.VideoCapture(video_path)

            if not cap.isOpened():
                logger.error(f"Could not open video: {video_path}")
                return []

            # Get video properties
            fps = cap.get(cv2.CAP_PROP_FPS)
            frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

            # Calculate frame step based on sample rate
            if sample_rate <= 0:
                sample_rate = 1

            if fps <= 0:
                frame_step = 1
            else:
                frame_step = int(fps / sample_rate)

            if frame_step < 1:
                frame_step = 1

            # Calculate the frames to sample
            frames_to_sample = min(frame_count, max_frames * frame_step)

            # Extract frames
            for i in range(0, frames_to_sample, frame_step):
                cap.set(cv2.CAP_PROP_POS_FRAMES, i)
                ret, frame = cap.read()

                if ret:
                    frames.append(frame)

                    # Stop if we've reached max_frames
                    if len(frames) >= max_frames:
                        break
                else:
                    logger.warning(f"Could not read frame {i} from {video_path}")

            # Release the video capture object
            cap.release()

            logger.info(f"Extracted {len(frames)} frames from {video_path}")

        except Exception as e:
            logger.error(f"Error extracting frames: {str(e)}")

        return frames

    def detect_scene_changes(self, frames, threshold=0.4, min_scene_length=10):
        """
        Detect scene changes in a sequence of frames

        Args:
            frames: List of frames
            threshold: Difference threshold for scene change detection
            min_scene_length: Minimum number of frames in a scene

        Returns:
            List of scenes as (start_frame, end_frame) tuples
        """
        if not frames:
            return []

        try:
            # Calculate frame differences
            diffs = []
            prev_frame = cv2.cvtColor(frames[0], cv2.COLOR_BGR2GRAY)

            for i in range(1, len(frames)):
                curr_frame = cv2.cvtColor(frames[i], cv2.COLOR_BGR2GRAY)

                # Calculate absolute difference between frames
                diff = cv2.absdiff(prev_frame, curr_frame)

                # Calculate mean difference
                mean_diff = np.mean(diff) / 255.0
                diffs.append(mean_diff)

                prev_frame = curr_frame

            # Detect scene changes
            scene_changes = [0]  # Start with the first frame

            for i in range(len(diffs)):
                if diffs[i] > threshold:
                    scene_changes.append(i + 1)

            scene_changes.append(len(frames))  # End with the last frame

            # Merge short scenes
            merged_scenes = []
            start_frame = scene_changes[0]

            for i in range(1, len(scene_changes)):
                end_frame = scene_changes[i]

                if end_frame - start_frame < min_scene_length:
                    # Skip short scenes
                    continue

                merged_scenes.append((start_frame, end_frame))
                start_frame = end_frame

            # Ensure we include the last frames if not already included
            if merged_scenes and merged_scenes[-1][1] < len(frames):
                start_frame = merged_scenes[-1][1]
                merged_scenes.append((start_frame, len(frames)))

            # If no scenes were detected, use the entire video as one scene
            if not merged_scenes:
                merged_scenes.append((0, len(frames)))

            return merged_scenes

        except Exception as e:
            logger.error(f"Error detecting scene changes: {str(e)}")
            return [(0, len(frames))]  # Return entire video as one scene on error

    def extract_keyframes(self, frames, num_keyframes=5):
        """
        Extract representative keyframes from a scene

        Args:
            frames: List of frames in the scene
            num_keyframes: Number of keyframes to extract

        Returns:
            List of keyframes
        """
        if not frames:
            return []

        if len(frames) <= num_keyframes:
            return frames

        try:
            # Simple approach: Pick evenly spaced frames
            keyframes = []

            step = len(frames) / num_keyframes

            for i in range(num_keyframes):
                idx = min(int(i * step), len(frames) - 1)
                keyframes.append(frames[idx])

            return keyframes

        except Exception as e:
            logger.error(f"Error extracting keyframes: {str(e)}")
            return frames[:min(num_keyframes, len(frames))]

    def detect_motion(self, frames):
        """
        Calculate motion intensity in a sequence of frames

        Args:
            frames: List of frames

        Returns:
            Average motion score (0-1)
        """
        if len(frames) < 2:
            return 0.0

        try:
            motion_scores = []

            # Convert frames to grayscale
            gray_frames = [cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) for frame in frames]

            # Calculate optical flow between consecutive frames
            for i in range(len(gray_frames) - 1):
                flow = cv2.calcOpticalFlowFarneback(
                    gray_frames[i], gray_frames[i + 1], None, 0.5, 3, 15, 3, 5, 1.2, 0)

                # Calculate magnitude
                mag, _ = cv2.cartToPolar(flow[..., 0], flow[..., 1])

                # Get average magnitude
                avg_mag = np.mean(mag)

                # Normalize to 0-1 range
                motion_score = min(1.0, avg_mag / 10.0)
                motion_scores.append(motion_score)

            # Return average motion score
            if motion_scores:
                return sum(motion_scores) / len(motion_scores)
            else:
                return 0.0

        except Exception as e:
            logger.error(f"Error calculating motion: {str(e)}")
            return 0.0

    def detect_dominant_color(self, frame):
        """
        Detect dominant color in a frame (useful for identifying green field)

        Args:
            frame: Input frame

        Returns:
            Dominant color as RGB tuple
        """
        try:
            # Reshape the frame for K-means clustering
            pixels = frame.reshape(-1, 3).astype(np.float32)

            # Define criteria
            criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)

            # Apply K-means clustering
            _, labels, centers = cv2.kmeans(pixels, 5, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)

            # Count occurrences of each label
            label_counts = np.bincount(labels.flatten())

            # Get the most frequent label
            dominant_label = np.argmax(label_counts)

            # Get the corresponding color center
            dominant_color = centers[dominant_label].astype(np.uint8)

            return tuple(dominant_color)

        except Exception as e:
            logger.error(f"Error detecting dominant color: {str(e)}")
            return (0, 0, 0)
