# video_processing/processing_worker.py - Worker thread for video processing

import os
import time
import logging
import traceback
from PyQt5.QtCore import QRunnable, QObject, pyqtSignal, pyqtSlot

from ai_detection.yolo_detector import YoloDetector
from ai_detection.scene_classifier import SceneClassifier
from video_processing.ffmpeg_cutter import FFmpegCutter
from video_processing.opencv_analyzer import OpenCVAnalyzer

logger = logging.getLogger(__name__)


# Signals class for the worker thread
class WorkerSignals(QObject):
    progress = pyqtSignal(int, str)
    result = pyqtSignal(list)
    error = pyqtSignal(str)
    event_detected = pyqtSignal(dict)


class VideoProcessingWorker(QRunnable):
    """
    Worker thread for processing videos in the background
    """

    def __init__(self, video_files, settings, database):
        super().__init__()
        self.video_files = video_files
        self.settings = settings
        self.db = database
        self.signals = WorkerSignals()

        # Initialize AI models
        self.yolo_detector = YoloDetector()
        self.scene_classifier = SceneClassifier()

        # Initialize processing tools
        self.ffmpeg_cutter = FFmpegCutter()
        self.opencv_analyzer = OpenCVAnalyzer()

        # Track progress
        self.total_files = len(video_files)
        self.processed_files = 0
        self.stop_requested = False

    @pyqtSlot()
    def run(self):
        """Run the video processing task"""
        try:
            results = []

            # Process each video file
            for i, video_path in enumerate(self.video_files):
                if self.stop_requested:
                    break

                # Update progress
                self.processed_files = i
                progress_percent = int((i / self.total_files) * 100)
                video_name = os.path.basename(video_path)
                self.signals.progress.emit(progress_percent, f"Processing {video_name}")

                # Process the video
                video_results = self.process_video(video_path)
                results.extend(video_results)

            # Final progress update
            self.signals.progress.emit(100, "Processing complete")
            self.signals.result.emit(results)

        except Exception as e:
            logger.error(f"Error in processing: {str(e)}")
            logger.error(traceback.format_exc())
            self.signals.error.emit(f"Error: {str(e)}")

    def process_video(self, video_path):
        """Process a single video file"""
        logger.info(f"Processing video: {video_path}")

        video_results = []

        try:
            # 1. Extract video information
            video_info = self.opencv_analyzer.get_video_info(video_path)

            # 2. Extract frames for analysis
            logger.info("Extracting frames for analysis")
            frames = self.opencv_analyzer.extract_frames(
                video_path,
                sample_rate=1,  # 1 frame per second
                max_frames=1000  # Limit total frames
            )

            # 3. Perform scene detection on extracted frames
            logger.info("Performing scene detection")
            scenes = self.opencv_analyzer.detect_scene_changes(frames,
                                                               threshold=0.3)

            # 4. Process each detected scene
            for scene_idx, scene in enumerate(scenes):
                if self.stop_requested:
                    break

                start_frame, end_frame = scene
                start_time = start_frame / video_info['fps']
                end_time = end_frame / video_info['fps']

                # Update progress within the video
                scene_progress = int((scene_idx / len(scenes)) * 100)
                self.signals.progress.emit(
                    scene_progress,
                    f"Processing scene {scene_idx + 1}/{len(scenes)}"
                )

                # Extract frames for this scene
                scene_frames = frames[start_frame:end_frame]

                # 5. Classify the scene
                scene_type, confidence = self.scene_classifier.classify_scene(scene_frames)

                # Skip if this scene type is disabled in settings
                if (scene_type == "stadium" and not self.settings["detect_stadium"]) or \
                        (scene_type == "shot" and not self.settings["detect_shots"]) or \
                        (scene_type == "celebration" and not self.settings["detect_celebration"]):
                    continue

                # Apply confidence threshold based on sensitivity
                min_confidence = 1.0 - (self.settings["sensitivity"] / 10.0)
                if confidence < min_confidence:
                    continue

                # 6. Create an event
                event = {
                    'type': scene_type,
                    'start_time': start_time,
                    'end_time': end_time,
                    'duration': end_time - start_time,
                    'confidence': confidence,
                    'video_path': video_path,
                    'frames': {
                        'start': start_frame,
                        'end': end_frame
                    }
                }

                # 7. Save the event to database
                event_id = self.db.insert_event(event)
                event['id'] = event_id

                # 8. Cut the segment from the video
                output_folder = f"output/{scene_type}"
                output_file = os.path.join(
                    output_folder,
                    f"{os.path.splitext(os.path.basename(video_path))[0]}_{scene_idx}.mp4"
                )

                try:
                    self.ffmpeg_cutter.cut_segment(
                        video_path, output_file, start_time, end_time
                    )
                    event['output_file'] = output_file

                    # Update the database with the output file
                    self.db.update_event(event)

                except Exception as e:
                    logger.error(f"Error cutting segment: {str(e)}")
                    event['output_file'] = ""

                # 9. Emit the detected event
                self.signals.event_detected.emit(event)

                # Add to results
                video_results.append(event)

        except Exception as e:
            logger.error(f"Error processing video {video_path}: {str(e)}")
            logger.error(traceback.format_exc())
            self.signals.error.emit(f"Error processing {os.path.basename(video_path)}: {str(e)}")

        return video_results

    def stop(self):
        """Request the worker to stop processing"""
        self.stop_requested = True