"""
Optimized video stream pipeline - combines cone ROI optimization and video stream processing
Uses optimized cone ROI building logic to improve cone color recognition accuracy
"""

import os
import cv2
import numpy as np
from typing import Optional, Dict, Any, List, Tuple
from yolo_pipeline_optimized import YOLOTrafficLightPipelineOptimized


class VideoStreamPipelineOptimized:
    """
    Optimized video stream pipeline, combines cone ROI optimization and video stream processing
    
    Main features:
    - Only optimizes cone ROI building, maintains original stability of number recognition
    - Intelligent cone area cropping based on dashboard width
    - Real-time performance monitoring and debug information
    - Enhanced visualization interface
    """
    
    def __init__(self, 
                 model_path: str,
                 device: Optional[str] = None,
                 display_width: int = 1920,
                 display_height: int = 1080,
                 use_fresh_pipeline_per_frame: bool = True):
        """
        Initialize optimized video stream pipeline
        
        Args:
            model_path: YOLO model weights path
            device: Inference device
            display_width: Display window width
            display_height: Display window height
            use_fresh_pipeline_per_frame: Whether to create new pipeline instance per frame (solves state issues)
        """
        self.model_path = model_path
        self.device = device
        self.display_width = display_width
        self.display_height = display_height
        self.use_fresh_pipeline_per_frame = use_fresh_pipeline_per_frame
        
        # 测试模型是否可用
        test_pipeline = YOLOTrafficLightPipelineOptimized(model_path, device=device, debug=False)
        print(f"✅ Optimized model loaded successfully: {model_path}")
        del test_pipeline  # 清理测试实例
        
        # 如果不使用每帧新建实例，则保持一个持久实例
        if not self.use_fresh_pipeline_per_frame:
            self.pipeline = YOLOTrafficLightPipelineOptimized(model_path, device=device, debug=False)
        else:
            self.pipeline = None
            print("🔧 Fresh pipeline per frame mode enabled (more stable but slightly slower)")
        
        # 可视化颜色
        self.colors = {
            'cone': (255, 165, 0),      # 橙色
            'number': (0, 255, 0),      # 绿色
            'dashboard': (255, 0, 0),   # 蓝色
            'text': (255, 255, 255),    # 白色
            'background': (50, 50, 50), # 深灰色
            'success': (0, 255, 0),     # 绿色 - 成功
            'warning': (0, 255, 255),   # 黄色 - 警告
            'error': (0, 0, 255)        # 红色 - 错误
        }
        
        # 字体设置
        self.font = cv2.FONT_HERSHEY_SIMPLEX
        self.font_scale = 0.8
        self.font_thickness = 2
    
    def _draw_text_with_background(self, image: np.ndarray, text: str, 
                                  position: Tuple[int, int], 
                                  font_color: Tuple[int, int, int] = (255, 255, 255),
                                  bg_color: Tuple[int, int, int] = (0, 0, 0),
                                  font_scale: float = 0.8,
                                  thickness: int = 2) -> np.ndarray:
        """Draw text with background"""
        x, y = position
        
        # 获取文本尺寸
        (text_width, text_height), baseline = cv2.getTextSize(text, self.font, font_scale, thickness)
        
        # 绘制背景矩形
        cv2.rectangle(image, (x - 5, y - text_height - 5), 
                     (x + text_width + 5, y + baseline + 5), bg_color, -1)
        
        # 绘制文本
        cv2.putText(image, text, (x, y), self.font, font_scale, font_color, thickness)
        
        return image
    
    def process_single_frame_optimized(self, frame: np.ndarray, frame_idx: int) -> Dict[str, Any]:
        """
        Process single frame using optimized pipeline
        
        Args:
            frame: Input BGR frame
            frame_idx: Frame index for debugging
            
        Returns:
            Processing result dictionary
        """
        try:
            if self.use_fresh_pipeline_per_frame:
                # 每帧创建新的管道实例（解决状态腐败问题）
                pipeline = YOLOTrafficLightPipelineOptimized(
                    self.model_path, 
                    device=self.device, 
                    debug=False
                )
            else:
                # 使用持久管道实例
                pipeline = self.pipeline
            
            # 处理帧
            results = pipeline.process_image(frame)
            
            # 如果使用每帧新建实例，清理管道
            if self.use_fresh_pipeline_per_frame:
                del pipeline
            
            return results
            
        except Exception as e:
            print(f"❌ Error processing frame {frame_idx}: {e}")
            return {
                'final_results': [],
                'cone_color': "None",
                'number': "None",
                'dashboard_state': "None",
                'error': str(e),
                'processing_info': {}
            }
    
    def create_enhanced_display(self, frame: np.ndarray, results: Dict[str, Any], 
                               frame_idx: int, fps: float = 0.0, 
                               success_rate: float = 0.0) -> np.ndarray:
        """Create enhanced display interface with more debug information"""
        display = frame.copy()
        
        # 获取结果
        cone_color = results.get('cone_color', 'None')
        number = results.get('number', 'None')
        dashboard_state = results.get('dashboard_state', 'None')
        processing_info = results.get('processing_info', {})
        
        # 创建结果文本
        y_start = 50
        line_height = 40
        
        # 🎯 主要结果显示
        # 锥筒颜色 - 使用颜色编码
        cone_color_bgr = {
            'red': (0, 0, 255),
            'orange': (0, 165, 255),
            'yellow': (0, 255, 255),
            'green': (0, 255, 0),
            'blue': (255, 0, 0)
                }.get(cone_color.lower(), self.colors['text'])
        
        self._draw_text_with_background(
            display, f"Cone color: {cone_color}", (30, y_start),
            font_color=cone_color_bgr, font_scale=1.2, thickness=3
        )
        
        y_start += line_height
        number_color = self.colors['success'] if number != 'None' else self.colors['error']
        self._draw_text_with_background(
            display, f"Number: {number}", (30, y_start),
            font_color=number_color, font_scale=1.2, thickness=3
        )
        
        y_start += line_height
        dashboard_color = self.colors['success'] if dashboard_state != 'None' else self.colors['error']
        self._draw_text_with_background(
             display, f"Dashboard: {dashboard_state}", (30, y_start),
             font_color=dashboard_color, font_scale=1.2, thickness=3
         )
        
        # Optimized version info
        y_start += line_height + 20
        self._draw_text_with_background(
            display, "CONE ROI OPTIMIZED", (30, y_start),
            font_color=self.colors['warning'], font_scale=1.0, thickness=2
        )
        
        # Processing info
        if processing_info:
            y_start += line_height
            
            info_lines = [
                f"Stage1 detections: {processing_info.get('num_detections_stage1', 0)}",
                f"ROIs generated: {processing_info.get('num_rois', 0)}",
                f"ROI types: {', '.join(processing_info.get('roi_types', []))}"
            ]
            
            for line in info_lines:
                self._draw_text_with_background(
                    display, line, (30, y_start),
                    font_color=self.colors['text'], font_scale=0.8, thickness=2
                )
                y_start += 30
        
        # Performance info in top right
        right_x = display.shape[1] - 250
        right_y = 40
        
        # FPS
        if fps > 0:
            fps_color = self.colors['success'] if fps > 10 else self.colors['warning']
            self._draw_text_with_background(
                display, f"FPS: {fps:.1f}", (right_x, right_y),
                font_color=fps_color, font_scale=1.0, thickness=2
            )
            right_y += 35
        
        # Success rate
        if success_rate > 0:
            success_color = self.colors['success'] if success_rate > 70 else (
                self.colors['warning'] if success_rate > 40 else self.colors['error']
            )
            self._draw_text_with_background(
                display, f"Success: {success_rate:.1f}%", (right_x, right_y),
                font_color=success_color, font_scale=1.0, thickness=2
            )
            right_y += 35
        
        # Frame counter
        self._draw_text_with_background(
            display, f"Frame: {frame_idx}", (right_x, right_y),
            font_color=self.colors['text'], font_scale=0.9, thickness=2
        )
        
        # Mode indicator
        mode_text = "Fresh Pipeline" if self.use_fresh_pipeline_per_frame else "Persistent Pipeline"
        mode_color = self.colors['success'] if self.use_fresh_pipeline_per_frame else self.colors['warning']
        self._draw_text_with_background(
            display, mode_text, (right_x, right_y + 35),
            font_color=mode_color, font_scale=0.7, thickness=1
        )
        
        return display
    
    def process_video_stream(self, 
                           video_source= 0,
                           output_path: Optional[str] = None,
                           max_frames: Optional[int] = None,
                           fps_limit: Optional[float] = None,
                           show_roi_debug: bool = False) -> None:
        """
        Process optimized video stream
        
        Args:
            video_source: Video file path or camera index
            output_path: Optional output video path
            max_frames: Maximum frames to process
            fps_limit: Limit processing FPS
            show_roi_debug: Whether to show ROI debug info
        """
        # 打开视频源
        cap = cv2.VideoCapture(video_source)
        if not cap.isOpened():
            raise ValueError(f"Cannot open video source: {video_source}")
        
        # 获取视频属性
        if isinstance(video_source, str):
            fps = cap.get(cv2.CAP_PROP_FPS)
            total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            print(f"📹 Video: {video_source}")
            print(f"📊 FPS: {fps}, Total frames: {total_frames}")
        else:
            fps = 30.0
            total_frames = -1
            print(f"📷 Camera: {video_source}")
        
        # 设置视频写入器
        out_writer = None
        if output_path:
            fourcc = cv2.VideoWriter_fourcc(*'XVID')
            frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            out_writer = cv2.VideoWriter(
                output_path, fourcc, fps, (frame_width, frame_height)
            )
            print(f"💾 Output will be saved to: {output_path}")
        
        # 处理变量
        frame_count = 0
        successful_detections = 0
        fps_counter = 0
        fps_timer = cv2.getTickCount()
        current_fps = 0.0
        
        # ROI调试统计
        roi_stats = {
            'total_rois': 0,
            'number_rois': 0,
            'cone_rois': 0,
            'dashboard_rois': 0
        }
        
        print("🚀 Starting optimized video processing. Press 'q' to quit, 's' to save frame, 'd' to toggle ROI debug mode.")
        print("🔧 Using optimized cone ROI building logic - intelligent cropping based on dashboard width, number recognition keeps original logic")
        
        try:
            while True:
                ret, frame = cap.read()
                if not ret:
                    if isinstance(video_source, str):
                        print("📹 Video file ended")
                    else:
                        print("📷 Camera read failed")
                    break
                
                # 检查帧数限制
                if max_frames and frame_count >= max_frames:
                    print(f"🔢 Reached maximum frame limit: {max_frames}")
                    break
                
                # FPS限制
                if fps_limit:
                    expected_time = frame_count / fps_limit
                    elapsed_time = (cv2.getTickCount() - fps_timer) / cv2.getTickFrequency()
                    if elapsed_time < expected_time:
                        continue
                
                try:
                    # 使用优化版管道处理帧
                    results = self.process_single_frame_optimized(frame, frame_count)
                    
                    # 检查检测是否成功
                    cone_color = results.get('cone_color', 'None')
                    number = results.get('number', 'None')
                    dashboard_state = results.get('dashboard_state', 'None')
                    
                    has_detections = (
                        cone_color != 'None' or 
                        number != 'None' or 
                        dashboard_state != 'None'
                    )
                    
                    if has_detections:
                        successful_detections += 1
                    
                    # 更新ROI统计
                    if 'processing_info' in results:
                        info = results['processing_info']
                        roi_types = info.get('roi_types', [])
                        roi_stats['total_rois'] += len(roi_types)
                        roi_stats['number_rois'] += roi_types.count('number')
                        roi_stats['cone_rois'] += roi_types.count('cone')
                        roi_stats['dashboard_rois'] += roi_types.count('dashboard')
                    
                    # 创建增强显示
                    success_rate = (successful_detections / (frame_count + 1)) * 100 if frame_count >= 0 else 0
                    display_frame = self.create_enhanced_display(
                        frame, results, frame_count, current_fps, success_rate
                    )
                    
                    # 添加ROI调试信息
                    if show_roi_debug and frame_count > 0:
                        debug_y = display_frame.shape[0] - 120
                        avg_rois = roi_stats['total_rois'] / (frame_count + 1)
                        roi_debug_lines = [
                            f"Avg ROIs/frame: {avg_rois:.1f}",
                            f"Number ROIs: {roi_stats['number_rois']}",
                            f"Cone ROIs: {roi_stats['cone_rois']}",
                            f"Dashboard ROIs: {roi_stats['dashboard_rois']}"
                        ]
                        
                        for line in roi_debug_lines:
                            self._draw_text_with_background(
                                display_frame, line, (30, debug_y),
                                font_color=self.colors['warning'], font_scale=0.7, thickness=1
                            )
                            debug_y += 25
                    
                    # FPS计算
                    fps_counter += 1
                    if fps_counter % 10 == 0:
                        current_time = cv2.getTickCount()
                        elapsed = (current_time - fps_timer) / cv2.getTickFrequency()
                        if elapsed > 0:
                            current_fps = fps_counter / elapsed
                        fps_counter = 0
                        fps_timer = current_time
                    
                    # Display frame
                    cv2.imshow('YOLO Traffic Light Detection - CONE ROI OPTIMIZED', display_frame)
                    
                    # 保存帧
                    if out_writer:
                        out_writer.write(display_frame)
                    
                    # 处理键盘输入
                    key = cv2.waitKey(1) & 0xFF
                    if key == ord('q'):
                        print("👋 User requested exit")
                        break
                    elif key == ord('s'):
                        save_path = f"optimized_frame_{frame_count:06d}.jpg"
                        cv2.imwrite(save_path, display_frame)
                        print(f"💾 Frame saved: {save_path}")
                    elif key == ord('d'):
                        show_roi_debug = not show_roi_debug
                        debug_status = "enabled" if show_roi_debug else "disabled"
                        print(f"🔧 ROI debug mode: {debug_status}")
                    
                    frame_count += 1
                    
                    # 打印进度
                    if isinstance(video_source, str) and total_frames > 0:
                        if frame_count % 50 == 0:
                            progress = (frame_count / total_frames) * 100
                            print(f"📊 Progress: {frame_count}/{total_frames} ({progress:.1f}%) - Success rate: {success_rate:.1f}%")
                
                except Exception as e:
                    print(f"❌ Error processing frame {frame_count}: {e}")
                    continue
        
        except KeyboardInterrupt:
            print("⚠️ User interrupted")
        
        finally:
            # 清理资源
            cap.release()
            if out_writer:
                out_writer.release()
            cv2.destroyAllWindows()
            
            # 清理持久管道
            if not self.use_fresh_pipeline_per_frame and self.pipeline:
                del self.pipeline
            
            # 显示最终统计
            print(f"\n🏁 Optimized video processing completed")
            print(f"📊 Total frames processed: {frame_count}")
            print(f"✅ Successful detections: {successful_detections}")
            if frame_count > 0:
                final_success_rate = (successful_detections / frame_count) * 100
                print(f"📈 Overall success rate: {final_success_rate:.1f}%")
                
                # ROI统计
                if roi_stats['total_rois'] > 0:
                    print(f"\n🔧 ROI generation statistics:")
                    print(f"   Total ROIs: {roi_stats['total_rois']}")
                    print(f"   Average per frame: {roi_stats['total_rois'] / frame_count:.1f}")
                    print(f"   Number ROIs: {roi_stats['number_rois']} ({roi_stats['number_rois']/roi_stats['total_rois']*100:.1f}%)")
                    print(f"   Cone ROIs: {roi_stats['cone_rois']} ({roi_stats['cone_rois']/roi_stats['total_rois']*100:.1f}%)")
                    print(f"   Dashboard ROIs: {roi_stats['dashboard_rois']} ({roi_stats['dashboard_rois']/roi_stats['total_rois']*100:.1f}%)")


def main():
    """Main function to run optimized video stream"""
    # 查找模型权重
    candidate_models = [
        r".\weights\all_yolo_all_epochs_0811\best.pt",
        r".\weights\all_yolo_all_epochs_0808\best.pt", 
        r".\weights\all_yolo_all_epochs_0805\best.pt",
        r".\weights\all_yolo_all_epochs_0804\best.pt",
    ]
    
    model_path = None
    for path in candidate_models:
        if os.path.exists(path):
            model_path = path
            break
    
    if model_path is None:
        print("❌ Model weights not found")
        return 1
    
    # 查找视频源
    candidate_videos = [
        r".\video_2025-08-10_13-17-30.avi",
        r".\video_2025-08-10_13-16-08.avi",
        r".\video_2025-08-03_22-31-55.avi",
    ]
    
    video_source = 0  # 默认使用摄像头
    for video_path in candidate_videos:
        if os.path.exists(video_path):
            video_source = video_path
            break
    video_source=candidate_videos[0] 
    try:
        # 创建优化版视频流管道
        print(f"🔧 Creating cone ROI optimized video stream pipeline")
        print(f"📹 Model: {model_path}")
        print(f"📹 Source: {video_source}")
        
        video_pipeline = VideoStreamPipelineOptimized(
            model_path,
            use_fresh_pipeline_per_frame=True  # 使用每帧新建实例模式确保稳定性
        )
        
        # 处理视频流
        video_pipeline.process_video_stream(
            video_source=video_source,
            # output_path="optimized_output.avi",  # 取消注释以保存
            max_frames=None,
            fps_limit=None,
            show_roi_debug=True  # 启用ROI调试信息
        )
        
        return 0
        
    except Exception as e:
        print(f"❌ Error: {e}")
        import traceback
        traceback.print_exc()
        return 1


if __name__ == "__main__":
    exit(main())
