"""
Enhanced video stream pipeline - improves number ROI extraction with doubled height
Based on video_stream_optimized.py, but with enhanced number ROI cropping logic
"""

import os
import cv2
import numpy as np
from typing import Optional, Dict, Any, List, Tuple
from yolo_pipeline_optimized import YOLOTrafficLightPipelineOptimized


class YOLOTrafficLightPipelineNumberEnhanced(YOLOTrafficLightPipelineOptimized):
    """
    Enhanced YOLO pipeline with improved number ROI extraction
    
    Main improvements:
    - Doubles the height of number ROI extraction (expand up and down by half each)
    - Keeps cone ROI optimization intact
    - Dashboard ROI remains unchanged
    """
    
    def __init__(self, 
                 model_path: str,
                 device: Optional[str] = None,
                 debug: bool = True):
        """
        Initialize enhanced pipeline with improved number ROI extraction
        
        Args:
            model_path: YOLO model weights path
            device: Inference device
            debug: Enable debug visualization and logging
        """
        super().__init__(model_path, device, debug)
        
        if self.debug:
            print(f"🔧 Enhanced Number ROI Pipeline initialized")
            print(f"   📊 Number ROI height will be doubled (expand up/down by half each)")
    
    def build_optimized_adaptive_rois(self, detections: List[Tuple[str, Tuple[float, List[float]], bool]],
                                     base_image_bgr: np.ndarray,
                                     debug_title_prefix: str = "number-enhanced-roi") -> List[Tuple[str, np.ndarray]]:
        """
        Build optimized adaptive ROIs with enhanced number ROI height
        
        Enhanced strategy:
        1. If only dashboard detected: Number area uses DOUBLED HEIGHT (expand up and down by half each), cone area uses dashboard width optimization
        2. If number and dashboard detected: Cone area uses number height + dashboard width optimization
        3. If only number detected: Keep original logic
        4. NUMBER ROI HEIGHT: Original height * 2 (expand up and down by half each)
        5. Cone ROI optimization remains unchanged
        """
        if base_image_bgr is None or base_image_bgr.size == 0:
            raise ValueError("Base image is empty")
        
        H, W = base_image_bgr.shape[:2]
        
        def _pick_best(detections, allowed_names: set):
            best = None
            for name, (prob, box), _is_conf in detections:
                if name.lower() in allowed_names:
                    if best is None or prob > best[0]:
                        best = (prob, box, name)
            return best
        
        def _clip(v, lo, hi):
            return max(lo, min(int(v), hi))
        
        def _box_h(box):
            """获取box高度"""
            return max(0, int(box[3] - box[1]))
        
        def _box_w(box):
            """获取box宽度"""
            return max(0, int(box[2] - box[0]))
        
        def slice_band_with_width(y0, y1, x_center, width_ratio=1.0):
            """根据指定宽度和中心位置切片"""
            y0c = _clip(y0, 0, H)
            y1c = _clip(y1, 0, H)
            if y1c <= y0c:
                return None
            
            # 计算x范围
            half_width = int(W * width_ratio / 2)
            x0c = _clip(x_center - half_width, 0, W)
            x1c = _clip(x_center + half_width, 0, W)
            
            if x1c <= x0c:
                return base_image_bgr[y0c:y1c, :]
            
            return base_image_bgr[y0c:y1c, x0c:x1c]
        
        def slice_band(y0, y1):
            """原始的全宽度切片"""
            y0c = _clip(y0, 0, H)
            y1c = _clip(y1, 0, H)
            if y1c <= y0c:
                return None
            return base_image_bgr[y0c:y1c, :]
        
        def slice_band_with_enhanced_number_height(y_center, original_height):
            """
            Enhanced number ROI extraction with doubled height
            Expand up and down by half each
            """
            enhanced_height = original_height * 2
            half_enhanced = enhanced_height // 2
            
            y_start = y_center - half_enhanced
            y_end = y_center + half_enhanced
            
            return slice_band(y_start, y_end)
        
        # 找到最佳检测
        best_number = _pick_best(detections, self.NUMBER_CLASSES)
        best_dash = _pick_best(detections, self.STATE_CLASSES)
        
        rois = []
        
        if best_number is None and best_dash is None:
            if self.debug:
                self._debug_print(f"🔧 {debug_title_prefix}: 没有数字/仪表盘锚点; 返回空列表")
            return rois
        
        if self.debug:
            self._debug_print(f"🔧 {debug_title_prefix}: 开始增强数字ROI构建")
            if best_number:
                prob_n, box_n, name_n = best_number
                self._debug_print(f"   📍 最佳数字: {name_n} ({prob_n:.3f}) at {[int(x) for x in box_n]}")
            if best_dash:
                prob_d, box_d, name_d = best_dash
                self._debug_print(f"   📍 最佳仪表盘: {name_d} ({prob_d:.3f}) at {[int(x) for x in box_d]}")
        
        if best_number is None and best_dash is not None:
            # 🎯 增强情况1: 只检测到仪表盘
            # 数字区域使用增强高度（全宽度），锥筒区域使用仪表盘宽度优化
            prob_d, box_d, name_d = best_dash
            x1_d, y1_d, x2_d, y2_d = [int(x) for x in box_d]
            y_top_d = y1_d
            h_d = _box_h(box_d)
            w_d = _box_w(box_d)
            x_center_d = (x1_d + x2_d) // 2
            
            if self.debug:
                self._debug_print(f"🔧 {debug_title_prefix}: 情况1 - 只有仪表盘 (增强数字ROI)")
                self._debug_print(f"   仪表盘: 中心x={x_center_d}, 高度={h_d}, 宽度={w_d}")
            
            # 数字区域: 仪表盘上方，使用增强高度（原高度的2倍）
            original_num_height = int(0.5 * h_d)  # 原始数字区域高度
            num_y_center = y_top_d - original_num_height // 2  # 数字区域中心
            num_roi = slice_band_with_enhanced_number_height(num_y_center, original_num_height)
            if num_roi is not None:
                rois.append(("number", num_roi))
                if self.debug:
                    enhanced_height = original_num_height * 2
                    self._debug_print(f"   ✅ 数字ROI: 中心y={num_y_center}, 增强高度={enhanced_height} (原高度={original_num_height}), 全宽度")
            
            # 锥筒区域: 数字区域上方再 0.5*h_d，使用仪表盘宽度优化（保持原有逻辑）
            dashboard_width_ratio = min(1.0, w_d / W * 2.0)  # 使用2倍仪表盘宽度，但不超过全宽
            cone_y1 = y_top_d - int(1.0 * h_d)
            cone_y2 = y_top_d - int(0.5 * h_d)
            cone_roi = slice_band_with_width(cone_y1, cone_y2, x_center_d, dashboard_width_ratio)
            if cone_roi is not None:
                rois.append(("cone", cone_roi))
                if self.debug:
                    self._debug_print(f"   ✅ 锥筒ROI: y({cone_y1}-{cone_y2}), 宽度比例={dashboard_width_ratio:.2f} (优化)")
        
        elif best_number is not None and best_dash is None:
            # 情况2: 只检测到数字 (保持原逻辑)
            prob_n, box_n, name_n = best_number
            x1_n, y1_n, x2_n, y2_n = [int(x) for x in box_n]
            y_top_n = y1_n
            y_bot_n = y2_n
            h_n = _box_h(box_n)
            
            if self.debug:
                self._debug_print(f"🔧 {debug_title_prefix}: 情况2 - 只有数字 (保持原逻辑)")
                self._debug_print(f"   数字: 高度={h_n}")
            
            # 仪表盘区域: 数字底部下方 2*h_n
            dash_roi = slice_band(y_bot_n, y_bot_n + 2.0 * h_n)
            if dash_roi is not None:
                rois.append(("dashboard", dash_roi))
                if self.debug:
                    self._debug_print(f"   ✅ 仪表盘ROI: y({y_bot_n}-{y_bot_n + int(2.0 * h_n)})")
            
            # 锥筒区域: 数字顶部上方 1*h_n
            cone_roi = slice_band(y_top_n - 1.0 * h_n, y_top_n)
            if cone_roi is not None:
                rois.append(("cone", cone_roi))
                if self.debug:
                    self._debug_print(f"   ✅ 锥筒ROI: y({y_top_n - int(1.0 * h_n)}-{y_top_n})")
        
        else:
            # 🎯 情况3: 数字和仪表盘都存在
            # 使用数字box高度和仪表盘box宽度来截取锥筒区域（只优化锥筒，保持原逻辑）
            prob_n, box_n, name_n = best_number
            prob_d, box_d, name_d = best_dash
            
            x1_n, y1_n, x2_n, y2_n = [int(x) for x in box_n]
            x1_d, y1_d, x2_d, y2_d = [int(x) for x in box_d]
            
            y_top_n = y1_n
            h_n = _box_h(box_n)
            w_d = _box_w(box_d)
            x_center_d = (x1_d + x2_d) // 2
            
            if self.debug:
                self._debug_print(f"🔧 {debug_title_prefix}: 情况3 - 数字和仪表盘都存在")
                self._debug_print(f"   数字: 高度={h_n}")
                self._debug_print(f"   仪表盘: 中心x={x_center_d}, 宽度={w_d}")
            
            # 基于仪表盘宽度计算width_ratio
            dashboard_width_ratio = min(1.0, w_d / W * 1.5)  # 使用1.5倍仪表盘宽度
            
            # 锥筒区域: 数字顶部上方 1*h_n，使用仪表盘宽度优化
            cone_y1 = y_top_n - int(1.0 * h_n)
            cone_y2 = y_top_n
            cone_roi = slice_band_with_width(cone_y1, cone_y2, x_center_d, dashboard_width_ratio)
            if cone_roi is not None:
                rois.append(("cone", cone_roi))
                if self.debug:
                    self._debug_print(f"   ✅ 锥筒ROI: y({cone_y1}-{cone_y2}), 宽度比例={dashboard_width_ratio:.2f} (优化)")
            
            # 注意：在这种情况下，数字和仪表盘的ROI由原有的第一阶段检测结果直接提供，
            # 不需要额外生成，这是原版本的设计逻辑
        
        if self.debug:
            for cname, roi in rois:
                self._show_image(roi, title=f"{debug_title_prefix} | {cname} ROI ({roi.shape[1]}x{roi.shape[0]})", size=6)
            self._debug_print(f"🔧 {debug_title_prefix} 返回 {[(c, (r.shape[1], r.shape[0])) for c, r in rois]}")
        
        return rois


class VideoStreamPipelineNumberEnhanced:
    """
    Enhanced video stream pipeline with improved number ROI extraction
    
    Main features:
    - Enhanced number ROI building with doubled height (expand up/down by half each)
    - Maintains original cone ROI optimization logic
    - Real-time performance monitoring and debug information
    - Enhanced visualization interface
    """
    
    def __init__(self, 
                 model_path: str,
                 device: Optional[str] = None,
                 display_width: int = 1920,
                 display_height: int = 1080,
                 use_fresh_pipeline_per_frame: bool = True):
        """
        Initialize enhanced video stream pipeline
        
        Args:
            model_path: YOLO model weights path
            device: Inference device
            display_width: Display window width
            display_height: Display window height
            use_fresh_pipeline_per_frame: Whether to create new pipeline instance per frame (solves state issues)
        """
        self.model_path = model_path
        self.device = device
        self.display_width = display_width
        self.display_height = display_height
        self.use_fresh_pipeline_per_frame = use_fresh_pipeline_per_frame
        
        # 测试模型是否可用
        test_pipeline = YOLOTrafficLightPipelineNumberEnhanced(model_path, device=device, debug=False)
        print(f"✅ Enhanced Number ROI model loaded successfully: {model_path}")
        del test_pipeline  # 清理测试实例
        
        # 如果不使用每帧新建实例，则保持一个持久实例
        if not self.use_fresh_pipeline_per_frame:
            self.pipeline = YOLOTrafficLightPipelineNumberEnhanced(model_path, device=device, debug=False)
        else:
            self.pipeline = None
            print("🔧 Fresh pipeline per frame mode enabled (more stable but slightly slower)")
        
        # 可视化颜色
        self.colors = {
            'cone': (255, 165, 0),      # 橙色
            'number': (0, 255, 0),      # 绿色
            'dashboard': (255, 0, 0),   # 蓝色
            'text': (255, 255, 255),    # 白色
            'background': (50, 50, 50), # 深灰色
            'success': (0, 255, 0),     # 绿色 - 成功
            'warning': (0, 255, 255),   # 黄色 - 警告
            'error': (0, 0, 255),       # 红色 - 错误
            'enhanced': (255, 0, 255)   # 紫色 - 增强功能
        }
        
        # 字体设置
        self.font = cv2.FONT_HERSHEY_SIMPLEX
        self.font_scale = 0.8
        self.font_thickness = 2
    
    def _draw_text_with_background(self, image: np.ndarray, text: str, 
                                  position: Tuple[int, int], 
                                  font_color: Tuple[int, int, int] = (255, 255, 255),
                                  bg_color: Tuple[int, int, int] = (0, 0, 0),
                                  font_scale: float = 0.8,
                                  thickness: int = 2) -> np.ndarray:
        """Draw text with background"""
        x, y = position
        
        # 获取文本尺寸
        (text_width, text_height), baseline = cv2.getTextSize(text, self.font, font_scale, thickness)
        
        # 绘制背景矩形
        cv2.rectangle(image, (x - 5, y - text_height - 5), 
                     (x + text_width + 5, y + baseline + 5), bg_color, -1)
        
        # 绘制文本
        cv2.putText(image, text, (x, y), self.font, font_scale, font_color, thickness)
        
        return image
    
    def process_single_frame_enhanced(self, frame: np.ndarray, frame_idx: int) -> Dict[str, Any]:
        """
        Process single frame using enhanced number ROI pipeline
        
        Args:
            frame: Input BGR frame
            frame_idx: Frame index for debugging
            
        Returns:
            Processing result dictionary
        """
        try:
            if self.use_fresh_pipeline_per_frame:
                # 每帧创建新的管道实例（解决状态腐败问题）
                pipeline = YOLOTrafficLightPipelineNumberEnhanced(
                    self.model_path, 
                    device=self.device, 
                    debug=False
                )
            else:
                # 使用持久管道实例
                pipeline = self.pipeline
            
            # 处理帧
            results = pipeline.process_image(frame)
            
            # 如果使用每帧新建实例，清理管道
            if self.use_fresh_pipeline_per_frame:
                del pipeline
            
            return results
            
        except Exception as e:
            print(f"❌ Error processing frame {frame_idx}: {e}")
            return {
                'final_results': [],
                'cone_color': "None",
                'number': "None",
                'dashboard_state': "None",
                'error': str(e),
                'processing_info': {}
            }
    
    def create_enhanced_display(self, frame: np.ndarray, results: Dict[str, Any], 
                               frame_idx: int, fps: float = 0.0, 
                               success_rate: float = 0.0) -> np.ndarray:
        """Create enhanced display interface with number ROI enhancement indicator"""
        display = frame.copy()
        
        # 获取结果
        cone_color = results.get('cone_color', 'None')
        number = results.get('number', 'None')
        dashboard_state = results.get('dashboard_state', 'None')
        processing_info = results.get('processing_info', {})
        
        # 创建结果文本
        y_start = 50
        line_height = 40
        
        # 🎯 主要结果显示
        # 锥筒颜色 - 使用颜色编码
        cone_color_bgr = {
            'red': (0, 0, 255),
            'orange': (0, 165, 255),
            'yellow': (0, 255, 255),
            'green': (0, 255, 0),
            'blue': (255, 0, 0)
                }.get(cone_color.lower(), self.colors['text'])
        
        self._draw_text_with_background(
            display, f"Cone color: {cone_color}", (30, y_start),
            font_color=cone_color_bgr, font_scale=1.2, thickness=3
        )
        
        y_start += line_height
        number_color = self.colors['success'] if number != 'None' else self.colors['error']
        self._draw_text_with_background(
            display, f"Number: {number}", (30, y_start),
            font_color=number_color, font_scale=1.2, thickness=3
        )
        
        y_start += line_height
        dashboard_color = self.colors['success'] if dashboard_state != 'None' else self.colors['error']
        self._draw_text_with_background(
             display, f"Dashboard: {dashboard_state}", (30, y_start),
             font_color=dashboard_color, font_scale=1.2, thickness=3
         )
        
        # Enhanced Number ROI version info
        y_start += line_height + 20
        self._draw_text_with_background(
            display, "NUMBER ROI ENHANCED (2x HEIGHT)", (30, y_start),
            font_color=self.colors['enhanced'], font_scale=1.0, thickness=2
        )
        
        y_start += 30
        self._draw_text_with_background(
            display, "Cone ROI Optimized + Number Height Doubled", (30, y_start),
            font_color=self.colors['warning'], font_scale=0.8, thickness=2
        )
        
        # Processing info
        if processing_info:
            y_start += line_height
            
            info_lines = [
                f"Stage1 detections: {processing_info.get('num_detections_stage1', 0)}",
                f"ROIs generated: {processing_info.get('num_rois', 0)}",
                f"ROI types: {', '.join(processing_info.get('roi_types', []))}"
            ]
            
            for line in info_lines:
                self._draw_text_with_background(
                    display, line, (30, y_start),
                    font_color=self.colors['text'], font_scale=0.8, thickness=2
                )
                y_start += 30
        
        # Performance info in top right
        right_x = display.shape[1] - 250
        right_y = 40
        
        # FPS
        if fps > 0:
            fps_color = self.colors['success'] if fps > 10 else self.colors['warning']
            self._draw_text_with_background(
                display, f"FPS: {fps:.1f}", (right_x, right_y),
                font_color=fps_color, font_scale=1.0, thickness=2
            )
            right_y += 35
        
        # Success rate
        if success_rate > 0:
            success_color = self.colors['success'] if success_rate > 70 else (
                self.colors['warning'] if success_rate > 40 else self.colors['error']
            )
            self._draw_text_with_background(
                display, f"Success: {success_rate:.1f}%", (right_x, right_y),
                font_color=success_color, font_scale=1.0, thickness=2
            )
            right_y += 35
        
        # Frame counter
        self._draw_text_with_background(
            display, f"Frame: {frame_idx}", (right_x, right_y),
            font_color=self.colors['text'], font_scale=0.9, thickness=2
        )
        
        # Mode indicator
        mode_text = "Fresh Pipeline" if self.use_fresh_pipeline_per_frame else "Persistent Pipeline"
        mode_color = self.colors['success'] if self.use_fresh_pipeline_per_frame else self.colors['warning']
        self._draw_text_with_background(
            display, mode_text, (right_x, right_y + 35),
            font_color=mode_color, font_scale=0.7, thickness=1
        )
        
        return display
    
    def process_video_stream(self, 
                           video_source: str | int = 0,
                           output_path: Optional[str] = None,
                           max_frames: Optional[int] = None,
                           fps_limit: Optional[float] = None,
                           show_roi_debug: bool = False) -> None:
        """
        Process enhanced video stream with improved number ROI extraction
        
        Args:
            video_source: Video file path or camera index
            output_path: Optional output video path
            max_frames: Maximum frames to process
            fps_limit: Limit processing FPS
            show_roi_debug: Whether to show ROI debug info
        """
        # 打开视频源
        cap = cv2.VideoCapture(video_source)
        if not cap.isOpened():
            raise ValueError(f"Cannot open video source: {video_source}")
        
        # 获取视频属性
        if isinstance(video_source, str):
            fps = cap.get(cv2.CAP_PROP_FPS)
            total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            print(f"📹 Video: {video_source}")
            print(f"📊 FPS: {fps}, Total frames: {total_frames}")
        else:
            fps = 30.0
            total_frames = -1
            print(f"📷 Camera: {video_source}")
        
        # 设置视频写入器
        out_writer = None
        if output_path:
            fourcc = cv2.VideoWriter_fourcc(*'XVID')
            frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            out_writer = cv2.VideoWriter(
                output_path, fourcc, fps, (frame_width, frame_height)
            )
            print(f"💾 Output will be saved to: {output_path}")
        
        # 处理变量
        frame_count = 0
        successful_detections = 0
        fps_counter = 0
        fps_timer = cv2.getTickCount()
        current_fps = 0.0
        
        # ROI调试统计
        roi_stats = {
            'total_rois': 0,
            'number_rois': 0,
            'cone_rois': 0,
            'dashboard_rois': 0
        }
        
        print("🚀 Starting enhanced number ROI video processing. Press 'q' to quit, 's' to save frame, 'd' to toggle ROI debug mode.")
        print("🔧 Using enhanced number ROI logic - number height doubled (expand up/down by half each), cone ROI optimization intact")
        
        try:
            while True:
                ret, frame = cap.read()
                if not ret:
                    if isinstance(video_source, str):
                        print("📹 Video file ended")
                    else:
                        print("📷 Camera read failed")
                    break
                
                # 检查帧数限制
                if max_frames and frame_count >= max_frames:
                    print(f"🔢 Reached maximum frame limit: {max_frames}")
                    break
                
                # FPS限制
                if fps_limit:
                    expected_time = frame_count / fps_limit
                    elapsed_time = (cv2.getTickCount() - fps_timer) / cv2.getTickFrequency()
                    if elapsed_time < expected_time:
                        continue
                
                try:
                    # 使用增强版管道处理帧
                    results = self.process_single_frame_enhanced(frame, frame_count)
                    
                    # 检查检测是否成功
                    cone_color = results.get('cone_color', 'None')
                    number = results.get('number', 'None')
                    dashboard_state = results.get('dashboard_state', 'None')
                    
                    has_detections = (
                        cone_color != 'None' or 
                        number != 'None' or 
                        dashboard_state != 'None'
                    )
                    
                    if has_detections:
                        successful_detections += 1
                    
                    # 更新ROI统计
                    if 'processing_info' in results:
                        info = results['processing_info']
                        roi_types = info.get('roi_types', [])
                        roi_stats['total_rois'] += len(roi_types)
                        roi_stats['number_rois'] += roi_types.count('number')
                        roi_stats['cone_rois'] += roi_types.count('cone')
                        roi_stats['dashboard_rois'] += roi_types.count('dashboard')
                    
                    # 创建增强显示
                    success_rate = (successful_detections / (frame_count + 1)) * 100 if frame_count >= 0 else 0
                    display_frame = self.create_enhanced_display(
                        frame, results, frame_count, current_fps, success_rate
                    )
                    
                    # 添加ROI调试信息
                    if show_roi_debug and frame_count > 0:
                        debug_y = display_frame.shape[0] - 120
                        avg_rois = roi_stats['total_rois'] / (frame_count + 1)
                        roi_debug_lines = [
                            f"Avg ROIs/frame: {avg_rois:.1f}",
                            f"Number ROIs: {roi_stats['number_rois']} (Enhanced Height)",
                            f"Cone ROIs: {roi_stats['cone_rois']} (Width Optimized)",
                            f"Dashboard ROIs: {roi_stats['dashboard_rois']}"
                        ]
                        
                        for line in roi_debug_lines:
                            self._draw_text_with_background(
                                display_frame, line, (30, debug_y),
                                font_color=self.colors['enhanced'], font_scale=0.7, thickness=1
                            )
                            debug_y += 25
                    
                    # FPS计算
                    fps_counter += 1
                    if fps_counter % 10 == 0:
                        current_time = cv2.getTickCount()
                        elapsed = (current_time - fps_timer) / cv2.getTickFrequency()
                        if elapsed > 0:
                            current_fps = fps_counter / elapsed
                        fps_counter = 0
                        fps_timer = current_time
                    
                    # Display frame
                    cv2.imshow('YOLO Traffic Light Detection - NUMBER ROI ENHANCED', display_frame)
                    
                    # 保存帧
                    if out_writer:
                        out_writer.write(display_frame)
                    
                    # 处理键盘输入
                    key = cv2.waitKey(1) & 0xFF
                    if key == ord('q'):
                        print("👋 User requested exit")
                        break
                    elif key == ord('s'):
                        save_path = f"enhanced_frame_{frame_count:06d}.jpg"
                        cv2.imwrite(save_path, display_frame)
                        print(f"💾 Frame saved: {save_path}")
                    elif key == ord('d'):
                        show_roi_debug = not show_roi_debug
                        debug_status = "enabled" if show_roi_debug else "disabled"
                        print(f"🔧 ROI debug mode: {debug_status}")
                    
                    frame_count += 1
                    
                    # 打印进度
                    if isinstance(video_source, str) and total_frames > 0:
                        if frame_count % 50 == 0:
                            progress = (frame_count / total_frames) * 100
                            print(f"📊 Progress: {frame_count}/{total_frames} ({progress:.1f}%) - Success rate: {success_rate:.1f}%")
                
                except Exception as e:
                    print(f"❌ Error processing frame {frame_count}: {e}")
                    continue
        
        except KeyboardInterrupt:
            print("⚠️ User interrupted")
        
        finally:
            # 清理资源
            cap.release()
            if out_writer:
                out_writer.release()
            cv2.destroyAllWindows()
            
            # 清理持久管道
            if not self.use_fresh_pipeline_per_frame and self.pipeline:
                del self.pipeline
            
            # 显示最终统计
            print(f"\n🏁 Enhanced number ROI video processing completed")
            print(f"📊 Total frames processed: {frame_count}")
            print(f"✅ Successful detections: {successful_detections}")
            if frame_count > 0:
                final_success_rate = (successful_detections / frame_count) * 100
                print(f"📈 Overall success rate: {final_success_rate:.1f}%")
                
                # ROI统计
                if roi_stats['total_rois'] > 0:
                    print(f"\n🔧 ROI generation statistics (Enhanced):")
                    print(f"   Total ROIs: {roi_stats['total_rois']}")
                    print(f"   Average per frame: {roi_stats['total_rois'] / frame_count:.1f}")
                    print(f"   Number ROIs: {roi_stats['number_rois']} ({roi_stats['number_rois']/roi_stats['total_rois']*100:.1f}%) - Enhanced Height")
                    print(f"   Cone ROIs: {roi_stats['cone_rois']} ({roi_stats['cone_rois']/roi_stats['total_rois']*100:.1f}%) - Width Optimized")
                    print(f"   Dashboard ROIs: {roi_stats['dashboard_rois']} ({roi_stats['dashboard_rois']/roi_stats['total_rois']*100:.1f}%)")


def main():
    """Main function to run enhanced number ROI video stream"""
    # 查找模型权重
    candidate_models = [
        r".\weights\all_yolo_all_epochs_0811\best.pt",
        r".\weights\all_yolo_all_epochs_0808\best.pt", 
        r".\weights\all_yolo_all_epochs_0805\best.pt",
        r".\weights\all_yolo_all_epochs_0804\best.pt",
    ]
    
    model_path = None
    for path in candidate_models:
        if os.path.exists(path):
            model_path = path
            break
    
    if model_path is None:
        print("❌ Model weights not found")
        return 1
    
    # 查找视频源
    candidate_videos = [
        r".\video_2025-08-10_13-17-30.avi",
        r".\video_2025-08-10_13-16-08.avi",
        r".\video_2025-08-03_22-31-55.avi",
    ]
    
    video_source = 0  # 默认使用摄像头
    for video_path in candidate_videos:
        if os.path.exists(video_path):
            video_source = video_path
            break
    video_source = candidate_videos[2] 
    
    try:
        # 创建增强版视频流管道
        print(f"🔧 Creating enhanced number ROI video stream pipeline")
        print(f"📹 Model: {model_path}")
        print(f"📹 Source: {video_source}")
        print(f"🎯 Enhancement: Number ROI height doubled (expand up/down by half each)")
        
        video_pipeline = VideoStreamPipelineNumberEnhanced(
            model_path,
            use_fresh_pipeline_per_frame=True  # 使用每帧新建实例模式确保稳定性
        )
        
        # 处理视频流
        video_pipeline.process_video_stream(
            video_source=video_source,
            # output_path="enhanced_number_output.avi",  # 取消注释以保存
            max_frames=None,
            fps_limit=None,
            show_roi_debug=True  # 启用ROI调试信息
        )
        
        return 0
        
    except Exception as e:
        print(f"❌ Error: {e}")
        import traceback
        traceback.print_exc()
        return 1


if __name__ == "__main__":
    exit(main())
