"""
Optimized video stream pipeline - integrates cone x-center alignment filtering and cone ROI optimization
Combines alignment-based cone filtering after stage-1 with intelligent cone ROI building
"""

import os
import cv2
import numpy as np
from typing import Optional, Dict, Any, List, Tuple
from yolo_pipeline_optimized_cone_filtered import YOLOTrafficLightPipelineOptimized


class VideoStreamPipelineConeFiltered:
    """
    Video stream pipeline using the cone-alignment-filtered YOLO pipeline
    
    Features:
    - Filters out side cones using x-center alignment with dashboard/number
    - Optimizes cone ROI building based on dashboard width
    - Real-time visualization and performance info
    - Option to create a fresh pipeline instance per frame for stability
    """
    
    def __init__(self, 
                 model_path: str,
                 device: Optional[str] = None,
                 display_width: int = 1920,
                 display_height: int = 1080,
                 use_fresh_pipeline_per_frame: bool = True):
        """
        Initialize cone-filtered video stream pipeline
        
        Args:
            model_path: YOLO model weights path
            device: Inference device
            display_width: Display window width
            display_height: Display window height
            use_fresh_pipeline_per_frame: Whether to create a new pipeline per frame
        """
        self.model_path = model_path
        self.device = device
        self.display_width = display_width
        self.display_height = display_height
        self.use_fresh_pipeline_per_frame = use_fresh_pipeline_per_frame
        
        # Smoke-test model
        test_pipeline = YOLOTrafficLightPipelineOptimized(model_path, device=device, debug=False)
        print(f"✅ Cone-filtered optimized model loaded successfully: {model_path}")
        del test_pipeline
        
        # Persistent pipeline if requested
        if not self.use_fresh_pipeline_per_frame:
            self.pipeline = YOLOTrafficLightPipelineOptimized(model_path, device=device, debug=False)
        else:
            self.pipeline = None
            print("🔧 Fresh pipeline per frame mode enabled (more stable but slightly slower)")
        
        # Colors for UI
        self.colors = {
            'cone': (255, 165, 0),      # orange
            'number': (0, 255, 0),      # green
            'dashboard': (255, 0, 0),   # blue
            'text': (255, 255, 255),    # white
            'background': (50, 50, 50), # dark gray
            'success': (0, 255, 0),     # green
            'warning': (0, 255, 255),   # yellow
            'error': (0, 0, 255)        # red
        }
        
        # Font settings
        self.font = cv2.FONT_HERSHEY_SIMPLEX
        self.font_scale = 0.8
        self.font_thickness = 2
    
    def _draw_text_with_background(self, image: np.ndarray, text: str, 
                                   position: Tuple[int, int], 
                                   font_color: Tuple[int, int, int] = (255, 255, 255),
                                   bg_color: Tuple[int, int, int] = (0, 0, 0),
                                   font_scale: float = 0.8,
                                   thickness: int = 2) -> np.ndarray:
        """Draw text with background"""
        x, y = position
        (text_width, text_height), baseline = cv2.getTextSize(text, self.font, font_scale, thickness)
        cv2.rectangle(image, (x - 5, y - text_height - 5), (x + text_width + 5, y + baseline + 5), bg_color, -1)
        cv2.putText(image, text, (x, y), self.font, font_scale, font_color, thickness)
        return image
    
    def process_single_frame(self, frame: np.ndarray, frame_idx: int) -> Dict[str, Any]:
        """
        Process a single frame using the cone-filtered optimized pipeline
        """
        try:
            if self.use_fresh_pipeline_per_frame:
                pipeline = YOLOTrafficLightPipelineOptimized(self.model_path, device=self.device, debug=False)
            else:
                pipeline = self.pipeline
            
            results = pipeline.process_image(frame)
            
            if self.use_fresh_pipeline_per_frame:
                del pipeline
            
            return results
        except Exception as e:
            print(f"❌ Error processing frame {frame_idx}: {e}")
            return {
                'final_results': [],
                'cone_color': "None",
                'number': "None",
                'dashboard_state': "None",
                'error': str(e),
                'processing_info': {}
            }
    
    def create_enhanced_display(self, frame: np.ndarray, results: Dict[str, Any], 
                                frame_idx: int, fps: float = 0.0, 
                                success_rate: float = 0.0) -> np.ndarray:
        """Create enhanced display interface with more debug information"""
        display = frame.copy()
        
        # Extract results
        cone_color = results.get('cone_color', 'None')
        number = results.get('number', 'None')
        dashboard_state = results.get('dashboard_state', 'None')
        processing_info = results.get('processing_info', {})
        
        # Text layout
        y_start = 50
        line_height = 40
        
        # Main results
        cone_color_bgr = {
            'red': (0, 0, 255),
            'orange': (0, 165, 255),
            'yellow': (0, 255, 255),
            'green': (0, 255, 0),
            'blue': (255, 0, 0)
        }.get(str(cone_color).lower(), self.colors['text'])
        
        self._draw_text_with_background(
            display, f"Cone color: {cone_color}", (30, y_start),
            font_color=cone_color_bgr, font_scale=1.2, thickness=3
        )
        
        y_start += line_height
        number_color = self.colors['success'] if number != 'None' else self.colors['error']
        self._draw_text_with_background(
            display, f"Number: {number}", (30, y_start),
            font_color=number_color, font_scale=1.2, thickness=3
        )
        
        y_start += line_height
        dashboard_color = self.colors['success'] if dashboard_state != 'None' else self.colors['error']
        self._draw_text_with_background(
            display, f"Dashboard: {dashboard_state}", (30, y_start),
            font_color=dashboard_color, font_scale=1.2, thickness=3
        )
        
        # Info banner
        y_start += line_height + 20
        self._draw_text_with_background(
            display, "CONE ALIGNMENT FILTERED + ROI OPTIMIZED", (30, y_start),
            font_color=self.colors['warning'], font_scale=1.0, thickness=2
        )
        
        # Processing info
        if processing_info:
            y_start += line_height
            info_lines = [
                f"Stage1 detections: {processing_info.get('num_detections_stage1', 0)}",
                f"ROIs generated: {processing_info.get('num_rois', 0)}",
                f"ROI types: {', '.join(processing_info.get('roi_types', []))}"
            ]
            for line in info_lines:
                self._draw_text_with_background(
                    display, line, (30, y_start),
                    font_color=self.colors['text'], font_scale=0.8, thickness=2
                )
                y_start += 30
        
        # Right-top metrics
        right_x = display.shape[1] - 250
        right_y = 40
        
        if fps > 0:
            fps_color = self.colors['success'] if fps > 10 else self.colors['warning']
            self._draw_text_with_background(
                display, f"FPS: {fps:.1f}", (right_x, right_y),
                font_color=fps_color, font_scale=1.0, thickness=2
            )
            right_y += 35
        
        if success_rate > 0:
            success_color = self.colors['success'] if success_rate > 70 else (
                self.colors['warning'] if success_rate > 40 else self.colors['error']
            )
            self._draw_text_with_background(
                display, f"Success: {success_rate:.1f}%", (right_x, right_y),
                font_color=success_color, font_scale=1.0, thickness=2
            )
            right_y += 35
        
        self._draw_text_with_background(
            display, f"Frame: {frame_idx}", (right_x, right_y),
            font_color=self.colors['text'], font_scale=0.9, thickness=2
        )
        
        mode_text = "Fresh Pipeline" if self.use_fresh_pipeline_per_frame else "Persistent Pipeline"
        mode_color = self.colors['success'] if self.use_fresh_pipeline_per_frame else self.colors['warning']
        self._draw_text_with_background(
            display, mode_text, (right_x, right_y + 35),
            font_color=mode_color, font_scale=0.7, thickness=1
        )
        
        return display
    
    def process_video_stream(self, 
                             video_source: str | int = 0,
                             output_path: Optional[str] = None,
                             max_frames: Optional[int] = None,
                             fps_limit: Optional[float] = None,
                             show_roi_debug: bool = False) -> None:
        """
        Process video stream with cone-alignment-filtered optimized pipeline
        """
        cap = cv2.VideoCapture(video_source)
        if not cap.isOpened():
            raise ValueError(f"Cannot open video source: {video_source}")
        
        # Video props
        if isinstance(video_source, str):
            fps = cap.get(cv2.CAP_PROP_FPS)
            total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            print(f"📹 Video: {video_source}")
            print(f"📊 FPS: {fps}, Total frames: {total_frames}")
        else:
            fps = 30.0
            total_frames = -1
            print(f"📷 Camera: {video_source}")
        
        # Writer
        out_writer = None
        if output_path:
            fourcc = cv2.VideoWriter_fourcc(*'XVID')
            frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            out_writer = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
            print(f"💾 Output will be saved to: {output_path}")
        
        # Stats
        frame_count = 0
        successful_detections = 0
        fps_counter = 0
        fps_timer = cv2.getTickCount()
        current_fps = 0.0
        
        roi_stats = {
            'total_rois': 0,
            'number_rois': 0,
            'cone_rois': 0,
            'dashboard_rois': 0
        }
        
        print("🚀 Starting cone-filtered optimized video processing. Press 'q' to quit, 's' to save frame, 'd' to toggle ROI debug mode.")
        print("🔧 Using cone x-center alignment filtering + dashboard-width-based cone ROI optimization")
        
        try:
            while True:
                ret, frame = cap.read()
                if not ret:
                    if isinstance(video_source, str):
                        print("📹 Video file ended")
                    else:
                        print("📷 Camera read failed")
                    break
                
                if max_frames and frame_count >= max_frames:
                    print(f"🔢 Reached maximum frame limit: {max_frames}")
                    break
                
                # Optional FPS limit
                if fps_limit:
                    expected_time = frame_count / fps_limit
                    elapsed_time = (cv2.getTickCount() - fps_timer) / cv2.getTickFrequency()
                    if elapsed_time < expected_time:
                        continue
                
                try:
                    results = self.process_single_frame(frame, frame_count)
                    
                    cone_color = results.get('cone_color', 'None')
                    number = results.get('number', 'None')
                    dashboard_state = results.get('dashboard_state', 'None')
                    
                    has_detections = (
                        cone_color != 'None' or number != 'None' or dashboard_state != 'None'
                    )
                    if has_detections:
                        successful_detections += 1
                    
                    if 'processing_info' in results:
                        info = results['processing_info']
                        roi_types = info.get('roi_types', [])
                        roi_stats['total_rois'] += len(roi_types)
                        roi_stats['number_rois'] += roi_types.count('number')
                        roi_stats['cone_rois'] += roi_types.count('cone')
                        roi_stats['dashboard_rois'] += roi_types.count('dashboard')
                    
                    success_rate = (successful_detections / (frame_count + 1)) * 100 if frame_count >= 0 else 0
                    display_frame = self.create_enhanced_display(frame, results, frame_count, current_fps, success_rate)
                    
                    # ROI debug panel
                    if show_roi_debug and frame_count > 0:
                        debug_y = display_frame.shape[0] - 120
                        avg_rois = roi_stats['total_rois'] / (frame_count + 1)
                        roi_debug_lines = [
                            f"Avg ROIs/frame: {avg_rois:.1f}",
                            f"Number ROIs: {roi_stats['number_rois']}",
                            f"Cone ROIs: {roi_stats['cone_rois']}",
                            f"Dashboard ROIs: {roi_stats['dashboard_rois']}"
                        ]
                        for line in roi_debug_lines:
                            self._draw_text_with_background(
                                display_frame, line, (30, debug_y),
                                font_color=self.colors['warning'], font_scale=0.7, thickness=1
                            )
                            debug_y += 25
                    
                    # FPS calc every 10 frames
                    fps_counter += 1
                    if fps_counter % 10 == 0:
                        current_time = cv2.getTickCount()
                        elapsed = (current_time - fps_timer) / cv2.getTickFrequency()
                        if elapsed > 0:
                            current_fps = fps_counter / elapsed
                        fps_counter = 0
                        fps_timer = current_time
                    
                    cv2.imshow('YOLO Traffic Light - CONE ALIGNMENT FILTERED + ROI OPTIMIZED', display_frame)
                    if out_writer:
                        out_writer.write(display_frame)
                    
                    key = cv2.waitKey(1) & 0xFF
                    if key == ord('q'):
                        print("👋 User requested exit")
                        break
                    elif key == ord('s'):
                        save_path = f"cone_filtered_frame_{frame_count:06d}.jpg"
                        cv2.imwrite(save_path, display_frame)
                        print(f"💾 Frame saved: {save_path}")
                    elif key == ord('d'):
                        show_roi_debug = not show_roi_debug
                        print(f"🔧 ROI debug mode: {'enabled' if show_roi_debug else 'disabled'}")
                    
                    frame_count += 1
                    
                    if isinstance(video_source, str) and total_frames > 0 and frame_count % 50 == 0:
                        progress = (frame_count / total_frames) * 100
                        print(f"📊 Progress: {frame_count}/{total_frames} ({progress:.1f}%) - Success rate: {success_rate:.1f}%")
                except Exception as e:
                    print(f"❌ Error processing frame {frame_count}: {e}")
                    continue
        except KeyboardInterrupt:
            print("⚠️ User interrupted")
        finally:
            cap.release()
            if out_writer:
                out_writer.release()
            cv2.destroyAllWindows()
            
            if not self.use_fresh_pipeline_per_frame and self.pipeline:
                del self.pipeline
            
            print(f"\n🏁 Cone-filtered optimized video processing completed")
            print(f"📊 Total frames processed: {frame_count}")
            print(f"✅ Successful detections: {successful_detections}")
            if frame_count > 0:
                final_success_rate = (successful_detections / frame_count) * 100
                print(f"📈 Overall success rate: {final_success_rate:.1f}%")
                if roi_stats['total_rois'] > 0:
                    print(f"\n🔧 ROI generation statistics:")
                    print(f"   Total ROIs: {roi_stats['total_rois']}")
                    print(f"   Average per frame: {roi_stats['total_rois'] / frame_count:.1f}")
                    print(f"   Number ROIs: {roi_stats['number_rois']} ({roi_stats['number_rois']/roi_stats['total_rois']*100:.1f}%)")
                    print(f"   Cone ROIs: {roi_stats['cone_rois']} ({roi_stats['cone_rois']/roi_stats['total_rois']*100:.1f}%)")
                    print(f"   Dashboard ROIs: {roi_stats['dashboard_rois']} ({roi_stats['dashboard_rois']/roi_stats['total_rois']*100:.1f}%)")


def main():
    """Main function to run cone-filtered optimized video stream"""
    # Find model weights
    candidate_models = [
        r".\weights\all_yolo_all_epochs_0811\best.pt",
        r".\weights\all_yolo_all_epochs_0808\best.pt", 
        r".\weights\all_yolo_all_epochs_0805\best.pt",
        r".\weights\all_yolo_all_epochs_0804\best.pt",
    ]
    model_path = None
    for path in candidate_models:
        if os.path.exists(path):
            model_path = path
            break
    if model_path is None:
        print("❌ Model weights not found")
        return 1
    
    # Find a video source
    candidate_videos = [
        r".\video_2025-08-10_13-17-30.avi",
        r".\video_2025-08-10_13-16-08.avi",
        r".\video_2025-08-03_22-31-55.avi",
    ]
    video_source = 0
    for video_path in candidate_videos:
        if os.path.exists(video_path):
            video_source = video_path
            break
    video_source = candidate_videos[1]
    
    try:
        print(f"🔧 Creating cone-alignment-filtered + ROI optimized video stream pipeline")
        print(f"📹 Model: {model_path}")
        print(f"📹 Source: {video_source}")
        
        video_pipeline = VideoStreamPipelineConeFiltered(
            model_path,
            use_fresh_pipeline_per_frame=True
        )
        
        video_pipeline.process_video_stream(
            video_source=video_source,
            # output_path="cone_filtered_output.avi",
            max_frames=None,
            fps_limit=None,
            show_roi_debug=True
        )
        return 0
    except Exception as e:
        print(f"❌ Error: {e}")
        import traceback
        traceback.print_exc()
        return 1


if __name__ == "__main__":
    exit(main())


