"""
Video Processor Module - Handles video file processing for OCR
"""
import cv2
import numpy as np
import tempfile
import logging
from pathlib import Path
from typing import List, Tuple, Dict, Generator
import json

from .ocr_processor import OCRProcessor
from .config import VIDEO_MAX_SIDE, VIDEO_FPS

# Configure logging
logger = logging.getLogger(__name__)
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)


def sanitize_for_json(obj):
    """
    Recursively sanitize objects to be JSON serializable.
    Converts NaN, Inf, and numpy types to JSON-safe values.
    """
    if isinstance(obj, dict):
        return {k: sanitize_for_json(v) for k, v in obj.items()}
    elif isinstance(obj, (list, tuple)):
        return [sanitize_for_json(item) for item in obj]
    elif isinstance(obj, np.ndarray):
        return sanitize_for_json(obj.tolist())
    elif isinstance(obj, (np.integer, np.floating)):
        obj = obj.item()

    # Handle float values
    if isinstance(obj, float):
        if np.isnan(obj):
            logger.warning("Converting NaN to 0.0")
            return 0.0
        elif np.isinf(obj):
            logger.warning(f"Converting Inf to {'1.0' if obj > 0 else '-1.0'}")
            return 1.0 if obj > 0 else -1.0

    return obj


class VideoProcessor:
    """Process video files for OCR"""
    
    def __init__(self, ocr_processor: OCRProcessor):
        """Initialize video processor with OCR processor"""
        self.ocr_processor = ocr_processor
    
    def process_video(self, video_path: str, sample_rate: int = 1) -> Dict:
        """
        Process video file and extract OCR results

        Args:
            video_path: Path to video file
            sample_rate: Process every Nth frame (1 = all frames)

        Returns:
            Dictionary with OCR results for each frame
        """
        logger.info(f"Starting video processing: {video_path}, sample_rate={sample_rate}")
        cap = cv2.VideoCapture(video_path)

        if not cap.isOpened():
            logger.error(f"Cannot open video file: {video_path}")
            raise ValueError(f"Cannot open video file: {video_path}")

        # Get video properties
        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        fps = cap.get(cv2.CAP_PROP_FPS)
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

        logger.info(f"Video properties - Frames: {total_frames}, FPS: {fps}, Resolution: {width}x{height}")

        results = {
            "video_info": {
                "total_frames": total_frames,
                "fps": fps,
                "width": width,
                "height": height,
                "duration_seconds": total_frames / fps if fps > 0 else 0
            },
            "frames": []
        }

        frame_count = 0
        processed_count = 0
        error_count = 0

        try:
            while True:
                ret, frame = cap.read()
                if not ret:
                    break

                # Process every Nth frame
                if frame_count % sample_rate == 0:
                    try:
                        logger.debug(f"Processing frame {frame_count}")
                        dt_boxes, txts, scores = self.ocr_processor.process_frame(frame)

                        # Convert scores safely, handling NaN and Inf
                        safe_scores = []
                        for s in scores:
                            s_float = float(s)
                            if np.isnan(s_float):
                                logger.warning(f"Frame {frame_count}: Converting NaN score to 0.0")
                                safe_scores.append(0.0)
                            elif np.isinf(s_float):
                                logger.warning(f"Frame {frame_count}: Converting Inf score to 1.0")
                                safe_scores.append(1.0)
                            else:
                                safe_scores.append(s_float)

                        frame_result = {
                            "frame_number": frame_count,
                            "timestamp": frame_count / fps if fps > 0 else 0,
                            "text_boxes": self._format_boxes(dt_boxes),
                            "texts": txts,
                            "scores": safe_scores
                        }
                        results["frames"].append(frame_result)
                        processed_count += 1

                        if len(txts) > 0:
                            logger.debug(f"Frame {frame_count}: Found {len(txts)} texts")
                    except Exception as e:
                        error_count += 1
                        logger.error(f"Error processing frame {frame_count}: {str(e)}", exc_info=True)
                        # Continue processing other frames instead of stopping

                frame_count += 1

        finally:
            cap.release()
            logger.info(f"Video processing completed - Processed: {processed_count}/{frame_count}, Errors: {error_count}")

        results["processing_info"] = {
            "total_frames_processed": processed_count,
            "sample_rate": sample_rate,
            "error_count": error_count
        }

        # Sanitize results for JSON serialization
        results = sanitize_for_json(results)

        return results
    
    def process_video_with_output(self, video_path: str, output_path: str, sample_rate: int = 1) -> Dict:
        """
        Process video and save output video with OCR results drawn

        Args:
            video_path: Path to input video file
            output_path: Path to save output video
            sample_rate: Process every Nth frame

        Returns:
            Dictionary with processing results
        """
        logger.info(f"Starting video processing with output: {video_path} -> {output_path}, sample_rate={sample_rate}")
        cap = cv2.VideoCapture(video_path)

        if not cap.isOpened():
            logger.error(f"Cannot open video file: {video_path}")
            raise ValueError(f"Cannot open video file: {video_path}")

        # Get video properties
        fps = cap.get(cv2.CAP_PROP_FPS)
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

        logger.info(f"Video properties - Frames: {total_frames}, FPS: {fps}, Resolution: {width}x{height}")

        # Create video writer
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))

        if not out.isOpened():
            logger.error(f"Cannot create video writer for: {output_path}")
            cap.release()
            raise ValueError(f"Cannot create video writer for: {output_path}")

        frame_count = 0
        processed_count = 0
        error_count = 0
        ocr_results = []

        try:
            while True:
                ret, frame = cap.read()
                if not ret:
                    break

                if frame_count % sample_rate == 0:
                    try:
                        logger.debug(f"Processing frame {frame_count}")
                        dt_boxes, txts, scores = self.ocr_processor.process_frame(frame)
                        draw_img = self.ocr_processor.draw_results(frame, dt_boxes, txts, scores)

                        # Convert from RGB to BGR for video writing
                        draw_img_bgr = cv2.cvtColor(draw_img, cv2.COLOR_RGB2BGR)
                        out.write(draw_img_bgr)

                        # Convert scores safely, handling NaN and Inf
                        safe_scores = []
                        for s in scores:
                            s_float = float(s)
                            if np.isnan(s_float):
                                logger.warning(f"Frame {frame_count}: Converting NaN score to 0.0")
                                safe_scores.append(0.0)
                            elif np.isinf(s_float):
                                logger.warning(f"Frame {frame_count}: Converting Inf score to 1.0")
                                safe_scores.append(1.0)
                            else:
                                safe_scores.append(s_float)

                        ocr_results.append({
                            "frame_number": frame_count,
                            "texts": txts,
                            "scores": safe_scores
                        })
                        processed_count += 1

                        if len(txts) > 0:
                            logger.debug(f"Frame {frame_count}: Found {len(txts)} texts")
                    except Exception as e:
                        error_count += 1
                        logger.error(f"Error processing frame {frame_count}: {str(e)}", exc_info=True)
                        # Write original frame if processing fails
                        out.write(frame)
                else:
                    out.write(frame)

                frame_count += 1

        finally:
            cap.release()
            out.release()
            logger.info(f"Video processing completed - Processed: {processed_count}/{frame_count}, Errors: {error_count}")

        result = {
            "output_video": output_path,
            "total_frames": total_frames,
            "processed_frames": processed_count,
            "fps": fps,
            "error_count": error_count,
            "ocr_results": ocr_results
        }

        # Sanitize results for JSON serialization
        result = sanitize_for_json(result)

        return result
    
    def process_video_text_only(self, video_path: str, sample_rate: int = 1, min_score: float = 0.0) -> Dict:
        """
        Process video and extract only recognized text segments.
        Returns a clean list of text without coordinates, scores, or other metadata.

        Args:
            video_path: Path to video file
            sample_rate: Process every Nth frame (1 = all frames)
            min_score: Minimum confidence score to include text (0.0 to 1.0)

        Returns:
            Dictionary with only recognized text segments organized by frame
        """
        logger.info(f"Starting text-only video processing: {video_path}, sample_rate={sample_rate}, min_score={min_score}")
        cap = cv2.VideoCapture(video_path)

        if not cap.isOpened():
            logger.error(f"Cannot open video file: {video_path}")
            raise ValueError(f"Cannot open video file: {video_path}")

        # Get video properties
        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        fps = cap.get(cv2.CAP_PROP_FPS)
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

        logger.info(f"Video properties - Frames: {total_frames}, FPS: {fps}, Resolution: {width}x{height}")

        results = {
            "video_info": {
                "total_frames": total_frames,
                "fps": fps,
                "width": width,
                "height": height,
                "duration_seconds": total_frames / fps if fps > 0 else 0
            },
            "text_segments": []
        }

        frame_count = 0
        processed_count = 0
        error_count = 0
        text_count = 0

        try:
            while True:
                ret, frame = cap.read()
                if not ret:
                    break

                # Process every Nth frame
                if frame_count % sample_rate == 0:
                    try:
                        logger.debug(f"Processing frame {frame_count}")
                        dt_boxes, txts, scores = self.ocr_processor.process_frame(frame)

                        # Filter texts by minimum score and collect only text
                        frame_texts = []
                        for txt, score in zip(txts, scores):
                            score_float = float(score)
                            # Handle NaN and Inf values
                            if np.isnan(score_float):
                                score_float = 0.0
                            elif np.isinf(score_float):
                                score_float = 1.0

                            # Only include text if score meets minimum threshold
                            if score_float >= min_score:
                                frame_texts.append(txt)
                                text_count += 1

                        # Only add frame if it has recognized text
                        if frame_texts:
                            frame_result = {
                                "frame_number": frame_count,
                                "timestamp": frame_count / fps if fps > 0 else 0,
                                "texts": frame_texts
                            }
                            results["text_segments"].append(frame_result)
                            logger.debug(f"Frame {frame_count}: Found {len(frame_texts)} texts")

                        processed_count += 1

                    except Exception as e:
                        error_count += 1
                        logger.error(f"Error processing frame {frame_count}: {str(e)}", exc_info=True)
                        # Continue processing other frames instead of stopping

                frame_count += 1

        finally:
            cap.release()
            logger.info(f"Text-only processing completed - Processed: {processed_count}/{frame_count}, Text segments: {text_count}, Errors: {error_count}")

        results["processing_info"] = {
            "total_frames_processed": processed_count,
            "sample_rate": sample_rate,
            "min_score": min_score,
            "total_text_segments": text_count,
            "error_count": error_count
        }

        # Sanitize results for JSON serialization
        results = sanitize_for_json(results)

        return results

    def process_video_stream(self, video_path: str, sample_rate: int = 1, min_score: float = 0.0) -> Generator[str, None, None]:
        """
        Process video and stream OCR results in real-time using Server-Sent Events (SSE).
        Yields JSON strings that can be sent as SSE events.

        Args:
            video_path: Path to video file
            sample_rate: Process every Nth frame (1 = all frames)
            min_score: Minimum confidence score to include text (0.0 to 1.0)

        Yields:
            JSON strings representing frame results or metadata
        """
        logger.info(f"Starting streaming video processing: {video_path}, sample_rate={sample_rate}, min_score={min_score}")
        cap = cv2.VideoCapture(video_path)

        if not cap.isOpened():
            logger.error(f"Cannot open video file: {video_path}")
            raise ValueError(f"Cannot open video file: {video_path}")

        try:
            # Get video properties
            total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            fps = cap.get(cv2.CAP_PROP_FPS)
            width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

            logger.info(f"Video properties - Frames: {total_frames}, FPS: {fps}, Resolution: {width}x{height}")

            # Send video metadata first
            metadata = {
                "type": "metadata",
                "video_info": {
                    "total_frames": total_frames,
                    "fps": fps,
                    "width": width,
                    "height": height,
                    "duration_seconds": total_frames / fps if fps > 0 else 0
                }
            }
            yield f"data: {json.dumps(sanitize_for_json(metadata))}\n\n"

            frame_count = 0
            processed_count = 0
            error_count = 0
            text_count = 0

            while True:
                ret, frame = cap.read()
                if not ret:
                    break

                # Process every Nth frame
                if frame_count % sample_rate == 0:
                    try:
                        logger.debug(f"Processing frame {frame_count}")
                        dt_boxes, txts, scores = self.ocr_processor.process_frame(frame)

                        # Filter texts by minimum score
                        frame_texts = []
                        for txt, score in zip(txts, scores):
                            score_float = float(score)
                            # Handle NaN and Inf values
                            if np.isnan(score_float):
                                score_float = 0.0
                            elif np.isinf(score_float):
                                score_float = 1.0

                            # Only include text if score meets minimum threshold
                            if score_float >= min_score:
                                frame_texts.append(txt)
                                text_count += 1

                        # Send frame result
                        frame_result = {
                            "type": "frame",
                            "frame_number": frame_count,
                            "timestamp": frame_count / fps if fps > 0 else 0,
                            "texts": frame_texts,
                            "text_count": len(frame_texts)
                        }
                        yield f"data: {json.dumps(sanitize_for_json(frame_result))}\n\n"
                        processed_count += 1

                        if len(frame_texts) > 0:
                            logger.debug(f"Frame {frame_count}: Found {len(frame_texts)} texts")

                    except Exception as e:
                        error_count += 1
                        logger.error(f"Error processing frame {frame_count}: {str(e)}", exc_info=True)
                        # Send error event
                        error_event = {
                            "type": "error",
                            "frame_number": frame_count,
                            "error": str(e)
                        }
                        yield f"data: {json.dumps(error_event)}\n\n"

                frame_count += 1

            # Send completion event
            completion = {
                "type": "complete",
                "processing_info": {
                    "total_frames_processed": processed_count,
                    "sample_rate": sample_rate,
                    "min_score": min_score,
                    "total_text_segments": text_count,
                    "error_count": error_count
                }
            }
            yield f"data: {json.dumps(sanitize_for_json(completion))}\n\n"
            logger.info(f"Streaming processing completed - Processed: {processed_count}/{frame_count}, Text segments: {text_count}, Errors: {error_count}")

        finally:
            cap.release()

    @staticmethod
    def _format_boxes(dt_boxes) -> List[List[List[float]]]:
        """Format detection boxes to list of coordinates"""
        if len(dt_boxes) == 0:
            return []

        formatted = []
        for box in dt_boxes:
            box_coords = [[float(x), float(y)] for x, y in box]
            formatted.append(box_coords)

        return formatted

