"""
优化版YOLO检测管道 - 改进的ROI构建逻辑 + 锥形桶x轴对齐过滤
基于仪表盘宽度和数字高度的智能ROI截取，并在第一阶段结果后过滤干扰锥形桶
"""

import os
import sys
import cv2
import numpy as np
import torch
from ultralytics import YOLO
import matplotlib.pyplot as plt
from collections.abc import Iterable
from typing import List, Tuple, Optional, Union, Dict, Any
import gc

from cone_alignment_filter import filter_cones_by_x_alignment, ConeAlignmentFilterConfig


class YOLOTrafficLightPipelineOptimized:
    """
    优化版交通灯检测管道，改进了锥筒ROI构建逻辑
    
    主要改进：
    - 仅优化锥筒ROI的构建，基于仪表盘宽度进行智能截取
    - 数字和仪表盘ROI保持原有逻辑，确保数字识别稳定性
    - 锥筒颜色识别更精确，减少背景干扰
    - 新增：在第一阶段结果后，按x轴中心对齐规则过滤干扰锥形桶
    """
    
    def __init__(self, 
                 model_path: str,
                 device: Optional[str] = None,
                 debug: bool = True):
        """
        初始化优化版管道
        
        Args:
            model_path: YOLO模型权重路径
            device: 推理设备 ('cuda', 'cpu', 或None自动选择)
            debug: 启用调试可视化和日志
        """
        self.debug = debug
        self.device = device if device is not None else ('cuda' if torch.cuda.is_available() else 'cpu')
        
        # 加载YOLO模型
        if not os.path.exists(model_path):
            raise FileNotFoundError(f"Model weights not found: {model_path}")
        
        # 使用TensorRT引擎时，显式指定task以消除"Unable to automatically guess model task"警告
        try:
            if isinstance(model_path, str) and model_path.lower().endswith('.engine'):
                self.model = YOLO(model_path, task='detect')
            else:
                self.model = YOLO(model_path)
        except TypeError:
            # 兼容旧版本Ultralytics不支持task参数
            self.model = YOLO(model_path)
        
        # 强制模型初始化和适当状态
        self._initialize_model()
        
        # 类别定义
        self.COLOR_CLASSES = {"red", "orange", "yellow", "green", "blue"}
        self.NUMBER_CLASSES = {"one", "two", "three", "four", "five", "six"}
        self.STATE_CLASSES = {"down", "normal", "up"}
        
        # HSV颜色范围 - 基于真实图像数据的精确范围
        self.COLOR_RANGES = {
            "red":    [(0, 20, 225, 4, 188, 255)],
            "orange": [(0, 97, 222, 11, 155, 255), (18, 97, 222, 179, 155, 255)],
            "yellow": [(23, 122, 219, 32, 193, 255)],
            "green":  [(37, 46, 207, 47, 185, 255)],
            "blue":   [(92, 171, 222, 108, 239, 255)],
        }
        
        if self.debug:
            print(f"🔧 优化版管道已初始化 - 设备: {self.device}, 模型: {model_path}")
    
    def _initialize_model(self):
        """正确初始化模型以避免状态问题"""
        try:
            # 仅当底层是PyTorch模块时才设置eval/to/冻结参数（TensorRT后端没有这些属性）
            if hasattr(self.model, 'model'):
                base_model = getattr(self.model, 'model')
                if hasattr(base_model, 'eval'):
                    base_model.eval()
                    if hasattr(base_model, 'to'):
                        base_model.to(self.device)
                    params = getattr(base_model, 'parameters', None)
                    if callable(params):
                        for param in params():
                            param.requires_grad = False
            
            # 运行虚拟预测以初始化内部状态
            dummy_input = np.zeros((640, 640, 3), dtype=np.uint8)
            dummy_input.fill(128)  # 灰色图像
            
            with torch.no_grad():
                _ = self.model.predict(
                    dummy_input, 
                    verbose=False, 
                    conf=0.1, 
                    device=self.device,
                    save=False,
                    show=False
                )
            
            # 清除任何缓存状态
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            
            if self.debug:
                print("✅ 模型初始化成功完成")
                
        except Exception as e:
            print(f"⚠️ 模型初始化问题: {e}")
    
    def _debug_print(self, *args, **kwargs):
        """如果启用调试模式则打印调试信息"""
        if self.debug:
            print(*args, **kwargs)
    
    def _show_image(self, img_bgr: np.ndarray, title: str = "", size: int = 8):
        """如果启用调试模式则显示图像"""
        if not self.debug:
            return
        h, w = img_bgr.shape[:2]
        plt.figure(figsize=(size, size * (h / max(1, w))))
        plt.imshow(cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB))
        plt.title(title)
        plt.axis('off')
        plt.show()
    
    def crop_image(self, image: np.ndarray, 
                   top_cut_ratio: float = 0.25,
                   width_center_ratio: float = 0.5) -> np.ndarray:
        """
        通过移除顶部部分并提取中心宽度来裁剪图像
        """
        if image is None or image.size == 0:
            raise ValueError("Input image is empty")
        
        frame_height, frame_width = image.shape[:2]
        
        # 移除顶部部分
        top_cut = int(frame_height * top_cut_ratio)
        image = image[top_cut:, :]
        frame_height = image.shape[0]
        
        # 提取中心宽度
        center_width = int(frame_width * width_center_ratio)
        w_x1 = frame_width // 2 - center_width // 2
        w_x2 = frame_width // 2 + center_width // 2
        
        cropped_image = image[:, w_x1:w_x2]
        
        if self.debug:
            self._debug_print(f"📐 裁剪: {frame_width}x{frame_height} -> {cropped_image.shape[1]}x{cropped_image.shape[0]}")
            vis = image.copy()
            cv2.rectangle(vis, (w_x1, 0), (w_x2 - 1, frame_height - 1), (0, 255, 255), 3)
            self._show_image(vis, f"原始图像与裁剪区域 [{w_x1}:{w_x2}]")
            self._show_image(cropped_image, f"裁剪后图像")
        
        return cropped_image
    
    def yolo_first_stage(self, image_bgr: np.ndarray,
                        conf: float = 0.6,
                        imgsz: Optional[Union[int, Tuple[int, int]]] = None,
                        classes: Optional[List[int]] = None,
                        iou: Optional[float] = None,
                        agnostic_nms: Optional[bool] = None,
                        max_det: Optional[int] = None,
                        debug_title_prefix: str = "first-stage") -> List[Any]:
        """
        在BGR图像上运行第一阶段YOLO检测，具有适当的状态管理
        """
        if image_bgr is None or image_bgr.size == 0:
            raise ValueError("Input image is empty")
        
        # 确保图像是连续的并且格式正确
        if not image_bgr.flags['C_CONTIGUOUS']:
            image_bgr = np.ascontiguousarray(image_bgr)
        
        # 确保适当的数据类型
        if image_bgr.dtype != np.uint8:
            image_bgr = image_bgr.astype(np.uint8)
        
        predict_kwargs = {
            "verbose": False,
            "conf": conf,
            "device": self.device,
            "save": False,
            "show": False,
            "stream": False,  # 禁用流式传输以避免状态问题
        }
        
        if imgsz is not None:
            predict_kwargs["imgsz"] = imgsz
        if classes is not None:
            predict_kwargs["classes"] = classes
        if iou is not None:
            predict_kwargs["iou"] = iou
        if agnostic_nms is not None:
            predict_kwargs["agnostic_nms"] = agnostic_nms
        if max_det is not None:
            predict_kwargs["max_det"] = max_det
        
        # 清除任何先前的状态
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
        
        # 使用适当的上下文管理运行预测
        with torch.no_grad():
            results = self.model.predict(image_bgr, **predict_kwargs)
        
        # 强制垃圾回收以防止内存问题
        gc.collect()
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
        
        if self.debug:
            try:
                if results and len(results) > 0:
                    annotated_bgr = results[0].plot()
                    boxes = getattr(results[0], "boxes", None)
                    summary = []
                    if boxes is not None and len(boxes) > 0:
                        id2name = {k: v for k, v in results[0].names.items()}
                        for i in range(len(boxes)):
                            cls_id = int(boxes.cls[i].item())
                            score = float(boxes.conf[i].item())
                            name = id2name.get(cls_id, str(cls_id))
                            summary.append(f"{name}:{score:.2f}")
                    self._debug_print(f"🎯 {debug_title_prefix} 检测: {', '.join(summary) if summary else 'none'}")
                    self._show_image(annotated_bgr, title=f"{debug_title_prefix} 标注结果")
                else:
                    self._debug_print(f"🎯 {debug_title_prefix} 检测: none (空结果)")
            except Exception as e:
                self._debug_print(f"DEBUG 可视化失败: {e}")
        
        return results
    
    def analyze_first_stage_results(self, results: List[Any], 
                                   conf: float,
                                   debug_title_prefix: str = "first-stage analysis") -> List[Tuple[str, Tuple[float, List[float]], bool]]:
        """
        分析YOLO第一阶段结果，使用置信度阈值
        """
        detections = []
        if results is None or len(results) == 0:
            if self.debug:
                self._debug_print(f"📊 {debug_title_prefix}: 结果为None或空")
            return detections
        
        res_list = results if isinstance(results, Iterable) else [results]
        
        for res in res_list:
            boxes = getattr(res, "boxes", None)
            if boxes is None or len(boxes) == 0:
                continue
            id2name = {k: v.lower() for k, v in getattr(res, "names", {}).items()}
            for i in range(len(boxes)):
                try:
                    cls_id = int(boxes.cls[i].item())
                    name = id2name.get(cls_id, str(cls_id))
                    prob = float(boxes.conf[i].item())
                    box = boxes.xyxy[i].tolist()
                    is_confident = prob >= conf
                    detections.append((name, (prob, box), is_confident))
                except Exception as e:
                    if self.debug:
                        self._debug_print(f"📊 {debug_title_prefix}: 跳过一个框，错误: {e}")
                    continue
        
        if self.debug:
            self._debug_print(f"📊 {debug_title_prefix} 列表: {detections}")
        
        return detections
    
    def build_optimized_adaptive_rois(self, detections: List[Tuple[str, Tuple[float, List[float]], bool]],
                                     base_image_bgr: np.ndarray,
                                     debug_title_prefix: str = "optimized-roi") -> List[Tuple[str, np.ndarray]]:
        """
        构建优化的自适应ROI (锥筒/数字/仪表盘)，基于检测锚点
        
        优化策略（仅优化锥筒ROI，数字ROI保持原逻辑）:
        1. 如果只检测到仪表盘: 数字区域保持全宽度，锥筒区域使用仪表盘宽度优化
        2. 如果检测到数字和仪表盘: 使用数字box高度和仪表盘box宽度来截取锥筒区域
        3. 如果只检测到数字: 完全保持原逻辑
        4. 高度处理原则保持与原代码一致
        """
        if base_image_bgr is None or base_image_bgr.size == 0:
            raise ValueError("Base image is empty")
        
        H, W = base_image_bgr.shape[:2]
        
        def _pick_best(detections, allowed_names: set):
            best = None
            for name, (prob, box), _is_conf in detections:
                if name.lower() in allowed_names:
                    if best is None or prob > best[0]:
                        best = (prob, box, name)
            return best
        
        def _clip(v, lo, hi):
            return max(lo, min(int(v), hi))
        
        def _box_h(box):
            """获取box高度"""
            return max(0, int(box[3] - box[1]))
        
        def _box_w(box):
            """获取box宽度"""
            return max(0, int(box[2] - box[0]))
        
        def slice_band_with_width(y0, y1, x_center, width_ratio=1.0):
            """根据指定宽度和中心位置切片"""
            y0c = _clip(y0, 0, H)
            y1c = _clip(y1, 0, H)
            if y1c <= y0c:
                return None
            
            # 计算x范围
            half_width = int(W * width_ratio / 2)
            x0c = _clip(x_center - half_width, 0, W)
            x1c = _clip(x_center + half_width, 0, W)
            
            if x1c <= x0c:
                return base_image_bgr[y0c:y1c, :]
            
            return base_image_bgr[y0c:y1c, x0c:x1c]
        
        def slice_band(y0, y1):
            """原始的全宽度切片"""
            y0c = _clip(y0, 0, H)
            y1c = _clip(y1, 0, H)
            if y1c <= y0c:
                return None
            return base_image_bgr[y0c:y1c, :]
        
        # 找到最佳检测
        best_number = _pick_best(detections, self.NUMBER_CLASSES)
        best_dash = _pick_best(detections, self.STATE_CLASSES)
        
        rois = []
        
        if best_number is None and best_dash is None:
            if self.debug:
                self._debug_print(f"🔧 {debug_title_prefix}: 没有数字/仪表盘锚点; 返回空列表")
            return rois
        
        if self.debug:
            self._debug_print(f"🔧 {debug_title_prefix}: 开始优化ROI构建")
            if best_number:
                prob_n, box_n, name_n = best_number
                self._debug_print(f"   📍 最佳数字: {name_n} ({prob_n:.3f}) at {[int(x) for x in box_n]}")
            if best_dash:
                prob_d, box_d, name_d = best_dash
                self._debug_print(f"   📍 最佳仪表盘: {name_d} ({prob_d:.3f}) at {[int(x) for x in box_d]}")
        
        if best_number is None and best_dash is not None:
            # 🎯 优化情况1: 只检测到仪表盘
            # 数字区域保持原逻辑（全宽度），锥筒区域使用仪表盘宽度优化
            prob_d, box_d, name_d = best_dash
            x1_d, y1_d, x2_d, y2_d = [int(x) for x in box_d]
            y_top_d = y1_d
            h_d = _box_h(box_d)
            w_d = _box_w(box_d)
            x_center_d = (x1_d + x2_d) // 2
            
            if self.debug:
                self._debug_print(f"🔧 {debug_title_prefix}: 情况1 - 只有仪表盘")
                self._debug_print(f"   仪表盘: 中心x={x_center_d}, 高度={h_d}, 宽度={w_d}")
            
            # 数字区域: 仪表盘上方 0.5*h_d，保持原逻辑（全宽度）
            num_y1 = y_top_d - int(0.5 * h_d)
            num_y2 = y_top_d
            num_roi = slice_band(num_y1, num_y2)
            if num_roi is not None:
                rois.append(("number", num_roi))
                if self.debug:
                    self._debug_print(f"   ✅ 数字ROI: y({num_y1}-{num_y2}), 全宽度（保持原逻辑）")
            
            # 锥筒区域: 数字区域上方再 0.5*h_d，使用仪表盘宽度优化
            dashboard_width_ratio = min(1.0, w_d / W * 2.0)  # 使用2倍仪表盘宽度，但不超过全宽
            cone_y1 = y_top_d - int(1.0 * h_d)
            cone_y2 = y_top_d - int(0.5 * h_d)
            cone_roi = slice_band_with_width(cone_y1, cone_y2, x_center_d, dashboard_width_ratio)
            if cone_roi is not None:
                rois.append(("cone", cone_roi))
                if self.debug:
                    self._debug_print(f"   ✅ 锥筒ROI: y({cone_y1}-{cone_y2}), 宽度比例={dashboard_width_ratio:.2f} (优化)")
        
        elif best_number is not None and best_dash is None:
            # 情况2: 只检测到数字 (保持原逻辑)
            prob_n, box_n, name_n = best_number
            x1_n, y1_n, x2_n, y2_n = [int(x) for x in box_n]
            y_top_n = y1_n
            y_bot_n = y2_n
            h_n = _box_h(box_n)
            
            if self.debug:
                self._debug_print(f"🔧 {debug_title_prefix}: 情况2 - 只有数字")
                self._debug_print(f"   数字: 高度={h_n}")
            
            # 仪表盘区域: 数字底部下方 2*h_n
            dash_roi = slice_band(y_bot_n, y_bot_n + 2.0 * h_n)
            if dash_roi is not None:
                rois.append(("dashboard", dash_roi))
                if self.debug:
                    self._debug_print(f"   ✅ 仪表盘ROI: y({y_bot_n}-{y_bot_n + int(2.0 * h_n)})")
            
            # 锥筒区域: 数字顶部上方 1*h_n
            cone_roi = slice_band(y_top_n - 1.0 * h_n, y_top_n)
            if cone_roi is not None:
                rois.append(("cone", cone_roi))
                if self.debug:
                    self._debug_print(f"   ✅ 锥筒ROI: y({y_top_n - int(1.0 * h_n)}-{y_top_n})")
        
        else:
            # 🎯 优化情况3: 数字和仪表盘都存在
            # 使用数字box高度和仪表盘box宽度来截取锥筒区域（只优化锥筒）
            prob_n, box_n, name_n = best_number
            prob_d, box_d, name_d = best_dash
            
            x1_n, y1_n, x2_n, y2_n = [int(x) for x in box_n]
            x1_d, y1_d, x2_d, y2_d = [int(x) for x in box_d]
            
            y_top_n = y1_n
            h_n = _box_h(box_n)
            w_d = _box_w(box_d)
            x_center_d = (x1_d + x2_d) // 2
            
            if self.debug:
                self._debug_print(f"🔧 {debug_title_prefix}: 情况3 - 数字和仪表盘都存在")
                self._debug_print(f"   数字: 高度={h_n}")
                self._debug_print(f"   仪表盘: 中心x={x_center_d}, 宽度={w_d}")
            
            # 基于仪表盘宽度计算width_ratio
            dashboard_width_ratio = min(1.0, w_d / W * 1.5)  # 使用1.5倍仪表盘宽度
            
            # 锥筒区域: 数字顶部上方 1*h_n，使用仪表盘宽度优化
            cone_y1 = y_top_n - int(1.0 * h_n)
            cone_y2 = y_top_n
            cone_roi = slice_band_with_width(cone_y1, cone_y2, x_center_d, dashboard_width_ratio)
            if cone_roi is not None:
                rois.append(("cone", cone_roi))
                if self.debug:
                    self._debug_print(f"   ✅ 锥筒ROI: y({cone_y1}-{cone_y2}), 宽度比例={dashboard_width_ratio:.2f} (优化)")
            
            # 注意：在这种情况下，数字和仪表盘的ROI由原有的第一阶段检测结果直接提供，
            # 不需要额外生成，这是原版本的设计逻辑
        
        if self.debug:
            for cname, roi in rois:
                self._show_image(roi, title=f"{debug_title_prefix} | {cname} ROI ({roi.shape[1]}x{roi.shape[0]})", size=6)
            self._debug_print(f"🔧 {debug_title_prefix} 返回 {[(c, (r.shape[1], r.shape[0])) for c, r in rois]}")
        
        return rois
    
    def yolo_second_stage(self, roi_list: List[Tuple[str, np.ndarray]],
                         conf: float = 0.5,
                         imgsz: Optional[Union[int, Tuple[int, int]]] = None,
                         scale_factor: float = 2.0,
                         debug_title_prefix: str = "second-stage") -> List[Tuple[str, List[Any]]]:
        """
        在ROI上运行YOLO，每个ROI类型进行类别组过滤
        """
        outputs = []
        if not roi_list:
            if self.debug:
                self._debug_print(f"🔍 {debug_title_prefix}: 空的roi_list")
            return outputs
        
        # 构建名称<->id映射
        name_to_id = {name.lower(): idx for idx, name in self.model.names.items()}
        id_to_name = {idx: name.lower() for idx, name in self.model.names.items()}
        
        def _allowed_ids_for(roi_class: str) -> Optional[List[int]]:
            if roi_class == "cone":
                allowed_names = self.COLOR_CLASSES
            elif roi_class == "number":
                allowed_names = self.NUMBER_CLASSES
            elif roi_class == "dashboard":
                allowed_names = self.STATE_CLASSES
            else:
                return None
            return [name_to_id[n] for n in allowed_names if n in name_to_id]
        
        for roi_class, roi_img in roi_list:
            if roi_img is None or roi_img.size == 0:
                continue
            
            # 确保适当的图像格式
            if not roi_img.flags['C_CONTIGUOUS']:
                roi_img = np.ascontiguousarray(roi_img)
            
            if roi_img.dtype != np.uint8:
                roi_img = roi_img.astype(np.uint8)
            
            # 可选的放大以帮助检测
            h, w = roi_img.shape[:2]
            if scale_factor and scale_factor != 1.0:
                new_w, new_h = int(w * scale_factor), int(h * scale_factor)
                roi_infer = cv2.resize(roi_img, (new_w, new_h), interpolation=cv2.INTER_LANCZOS4)
            else:
                roi_infer = roi_img
            
            allowed_ids = _allowed_ids_for(roi_class)
            predict_kwargs = {
                "verbose": False,
                "conf": conf,
                "device": self.device,
                "save": False,
                "show": False,
                "stream": False,
            }
            if imgsz is not None:
                predict_kwargs["imgsz"] = imgsz
            if allowed_ids:
                predict_kwargs["classes"] = allowed_ids
            
            # 每次预测前清除缓存
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            
            with torch.no_grad():
                res = self.model.predict(roi_infer, **predict_kwargs)
            
            outputs.append((roi_class, res))
            
            # 清理
            gc.collect()
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            
            if self.debug:
                try:
                    if res and len(res) > 0:
                        annotated = res[0].plot()
                        boxes = getattr(res[0], "boxes", None)
                        summary = []
                        if boxes is not None and len(boxes) > 0:
                            for i in range(len(boxes)):
                                cls_id = int(boxes.cls[i].item())
                                name = id_to_name.get(cls_id, str(cls_id))
                                score = float(boxes.conf[i].item())
                                summary.append(f"{name}:{score:.2f}")
                        self._debug_print(f"🔍 {debug_title_prefix} [{roi_class}] 检测: {', '.join(summary) if summary else 'none'}")
                        self._show_image(annotated, title=f"{debug_title_prefix} [{roi_class}] 标注结果", size=6)
                except Exception as e:
                    self._debug_print(f"🔍 {debug_title_prefix} [{roi_class}] 可视化失败: {e}")
        
        return outputs
    
    def _make_color_masks(self, hsv_image: np.ndarray, color_name: str) -> np.ndarray:
        """为基于HSV的锥筒颜色检测创建颜色掩码"""
        specs = self.COLOR_RANGES[color_name]
        masks = []
        for (h1, s1, v1, h2, s2, v2) in specs:
            lower = np.array([h1, s1, v1], dtype=np.uint8)
            upper = np.array([h2, s2, v2], dtype=np.uint8)
            m = cv2.inRange(hsv_image, lower, upper)
            masks.append(m)
        
        mask = masks[0]
        for m in masks[1:]:
            mask = cv2.bitwise_or(mask, m)
        
        k = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
        mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, k, iterations=1)
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, k, iterations=1)
        return mask
    
    def _predict_color_by_hsv(self, image_bgr: np.ndarray) -> Tuple[str, Dict[str, int]]:
        """使用HSV分析预测锥筒颜色"""
        img = cv2.resize(image_bgr, (640, 640))
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        counts = {}
        for cname in ["red", "orange", "yellow", "green", "blue"]:
            mask = self._make_color_masks(hsv, cname)
            counts[cname] = int(cv2.countNonZero(mask))
        best_color = max(counts.items(), key=lambda kv: kv[1])[0]
        return best_color, counts
    
    def hsv_cone_color_vote(self, roi_list: List[Tuple[str, np.ndarray]],
                           debug_title_prefix: str = "hsv-cone") -> Tuple[str, str]:
        """
        使用HSV投票在锥筒ROI中确定锥筒颜色
        """
        cones = [img for cname, img in roi_list if cname == "cone"]
        if len(cones) == 0:
            return ("cone", "None")
        
        total_counts = {"red": 0, "orange": 0, "yellow": 0, "green": 0, "blue": 0}
        per_roi_best = []
        
        for idx, cone_img in enumerate(cones):
            color, counts = self._predict_color_by_hsv(cone_img)
            for k in total_counts:
                total_counts[k] += counts.get(k, 0)
            per_roi_best.append((idx, color))
            if self.debug:
                self._debug_print(f"🎨 {debug_title_prefix} ROI#{idx} 最佳={color} 计数={counts}")
                self._show_image(cone_img, title=f"{debug_title_prefix} ROI#{idx} | 输入", size=5)
        
        final_color = max(total_counts.items(), key=lambda kv: kv[1])[0]
        if total_counts[final_color] <= 0:
            final_color = "None"
        
        if self.debug:
            self._debug_print(f"🎨 {debug_title_prefix} 总计数={total_counts} -> 最终={final_color}")
        
        return ("cone", final_color)
    
    def finalize_results(self, first_stage_results: List[Any],
                        second_stage_outputs: List[Tuple[str, List[Any]]],
                        hsv_cone_result: Tuple[str, str],
                        conf: float = 0.6) -> List[Tuple[str, Union[str, float], Union[str, float]]]:
        """
        将结果融合为最终的三个目标：锥筒颜色、数字、仪表盘状态
        """
        def _best_by_group_from_result(res_obj, allowed_names: set):
            boxes = getattr(res_obj, "boxes", None)
            if boxes is None or len(boxes) == 0:
                return None
            id2name = {k: v.lower() for k, v in getattr(res_obj, "names", {}).items()}
            best = None
            for i in range(len(boxes)):
                cls_id = int(boxes.cls[i].item())
                name = id2name.get(cls_id, str(cls_id))
                if name in allowed_names:
                    score = float(boxes.conf[i].item())
                    if best is None or score > best[1]:
                        best = (name, score)
            return best
        
        def _best_from_first_stage(results, allowed_names: set):
            best = None
            if results is None or len(results) == 0:
                return None
            res_list = results if isinstance(results, Iterable) else [results]
            for r in res_list:
                cand = _best_by_group_from_result(r, allowed_names)
                if cand is not None and (best is None or cand[1] > best[1]):
                    best = cand
            return best
        
        def _best_from_second_stage(second_stage_outputs, roi_class: str):
            if not second_stage_outputs:
                return None
            name_best = None
            score_best = -1.0
            for cls_name, res_list in second_stage_outputs:
                if cls_name != roi_class:
                    continue
                if not res_list or len(res_list) == 0:
                    continue
                r0 = res_list[0]
                boxes = getattr(r0, "boxes", None)
                id2name = {k: v.lower() for k, v in getattr(r0, "names", {}).items()}
                if boxes is None or len(boxes) == 0:
                    continue
                for i in range(len(boxes)):
                    cls_id = int(boxes.cls[i].item())
                    name = id2name.get(cls_id, str(cls_id))
                    score = float(boxes.conf[i].item())
                    if score > score_best:
                        score_best = score
                        name_best = name
            if name_best is None:
                return None
            return (name_best, score_best)
        
        final_list = []
        
        # 锥筒 (颜色)
        best_cone_2 = _best_from_second_stage(second_stage_outputs, "cone")
        best_cone_1 = _best_from_first_stage(first_stage_results, self.COLOR_CLASSES)
        if best_cone_2 is not None and best_cone_2[1] >= conf:
            final_list.append(("cone", best_cone_2[0], best_cone_2[1]))
        elif best_cone_1 is not None and best_cone_1[1] >= conf:
            final_list.append(("cone", best_cone_1[0], best_cone_1[1]))
        else:
            # HSV后备
            cone_color = None
            try:
                if isinstance(hsv_cone_result, tuple) and len(hsv_cone_result) == 2:
                    cone_color = hsv_cone_result[1]
            except Exception:
                cone_color = None
            final_list.append(("cone", cone_color or "None", "HSV"))
        
        # 数字
        best_num_2 = _best_from_second_stage(second_stage_outputs, "number")
        best_num_1 = _best_from_first_stage(first_stage_results, self.NUMBER_CLASSES)
        if best_num_2 is not None and best_num_2[1] >= conf:
            final_list.append(("number", best_num_2[0], best_num_2[1]))
        elif best_num_1 is not None and best_num_1[1] >= conf:
            final_list.append(("number", best_num_1[0], best_num_1[1]))
        else:
            final_list.append(("number", "None", 0.0))
        
        # 仪表盘
        best_dash_2 = _best_from_second_stage(second_stage_outputs, "dashboard")
        best_dash_1 = _best_from_first_stage(first_stage_results, self.STATE_CLASSES)
        if best_dash_2 is not None and best_dash_2[1] >= conf:
            final_list.append(("dashboard", best_dash_2[0], best_dash_2[1]))
        elif best_dash_1 is not None and best_dash_1[1] >= conf:
            final_list.append(("dashboard", best_dash_1[0], best_dash_1[1]))
        else:
            final_list.append(("dashboard", "None", 0.0))
        
        # 始终打印最终结果（无论DEBUG如何）
        print({"final_results": final_list})
        return final_list
    
    def process_image(self, image: np.ndarray,
                     top_cut_ratio: float = 0.25,
                     width_center_ratio: float = 0.5,
                     first_stage_conf: float = 0.6,
                     second_stage_conf: float = 0.5,
                     final_conf: float = 0.6,
                     scale_factor: float = 2.0) -> Dict[str, Any]:
        """
        完整工作流程：通过所有阶段处理输入图像并返回结果
        """
        if image is None or image.size == 0:
            raise ValueError("Input image is empty")
        
        try:
            # 步骤1: 裁剪图像
            cropped_image = self.crop_image(image, top_cut_ratio, width_center_ratio)
            
            # 步骤2: 第一阶段YOLO检测
            first_stage_results = self.yolo_first_stage(
                cropped_image,
                conf=first_stage_conf,
                imgsz=640,
                debug_title_prefix="first-stage"
            )
            
            # 步骤3: 分析第一阶段结果
            detections = self.analyze_first_stage_results(
                first_stage_results,
                conf=first_stage_conf,
                debug_title_prefix="first-stage analysis"
            )
            
            # 步骤3.5: 基于x轴中心一致性过滤干扰锥形桶
            try:
                before_cnt = len(detections)
                cfg = ConeAlignmentFilterConfig(
                    cone_class_names=set(self.COLOR_CLASSES) | {"锥形桶", "路锥", "cone", "traffic cone"},
                    target_class_names=set(self.NUMBER_CLASSES)
                    | set(self.STATE_CLASSES)
                    | {"仪表盘", "dashboard", "数字", "number", "digit", "meter", "gauge", "clock"},
                    max_center_dx_ratio=0.35,
                    max_center_dx_pixels=None,
                    use_target_width_for_ratio=True,
                )
                detections = filter_cones_by_x_alignment(detections, config=cfg, debug=self.debug)
                if self.debug:
                    self._debug_print(f"🧹 cone-filter: {before_cnt} -> {len(detections)}")
            except Exception as e:
                if self.debug:
                    self._debug_print(f"🧹 cone-filter 失败: {e}")
            
            # 步骤4: 构建优化的自适应ROI
            roi_list = self.build_optimized_adaptive_rois(
                detections,
                cropped_image,
                debug_title_prefix="optimized-roi"
            )
            
            # 步骤5: 第二阶段YOLO检测
            second_stage_outputs = self.yolo_second_stage(
                roi_list,
                conf=second_stage_conf,
                imgsz=640,
                scale_factor=scale_factor,
                debug_title_prefix="second-stage"
            )
            
            # 步骤6: HSV锥筒颜色分析
            hsv_cone_result = self.hsv_cone_color_vote(
                roi_list,
                debug_title_prefix="hsv-cone"
            )
            
            # 步骤7: 最终化和整合结果
            final_results = self.finalize_results(
                first_stage_results,
                second_stage_outputs,
                hsv_cone_result,
                conf=final_conf
            )
            
            # 提取单个结果以便更容易访问
            cone_color = "None"
            number = "None"
            dashboard_state = "None"
            
            for class_name, detected_name, confidence in final_results:
                if class_name == "cone":
                    cone_color = detected_name
                elif class_name == "number":
                    number = detected_name
                elif class_name == "dashboard":
                    dashboard_state = detected_name
            
            return {
                'final_results': final_results,
                'cone_color': cone_color,
                'number': number,
                'dashboard_state': dashboard_state,
                'processing_info': {
                    'image_shape': image.shape,
                    'cropped_shape': cropped_image.shape,
                    'num_detections_stage1': len(detections),
                    'num_rois': len(roi_list),
                    'roi_types': [roi_type for roi_type, _ in roi_list]
                }
            }
            
        except Exception as e:
            error_msg = f"管道处理中出错: {str(e)}"
            self._debug_print(error_msg)
            return {
                'final_results': [],
                'cone_color': "None",
                'number': "None", 
                'dashboard_state': "None",
                'error': error_msg,
                'processing_info': {}
            }


def create_optimized_pipeline_from_config(model_path: str, 
                                         device: Optional[str] = None,
                                         debug: bool = True) -> YOLOTrafficLightPipelineOptimized:
    """
    工厂函数，使用标准配置创建优化的管道实例
    """
    return YOLOTrafficLightPipelineOptimized(model_path=model_path, device=device, debug=debug)


# 示例用法
if __name__ == "__main__":
    # 示例模型路径（根据需要调整）
    candidate_models = [
        r".\weights\all_yolo_all_epochs_0811\best.pt",
        r".\weights\all_yolo_all_epochs_0808\best.pt", 
        r".\weights\all_yolo_all_epochs_0805\best.pt",
        r".\weights\all_yolo_all_epochs_0804\best.pt",
    ]
    
    # 找到第一个存在的模型
    model_path = None
    for path in candidate_models:
        if os.path.exists(path):
            model_path = path
            break
    
    if model_path is None:
        print("❌ 在候选路径中未找到模型权重")
        sys.exit(1)
    
    # 创建优化管道
    pipeline = create_optimized_pipeline_from_config(model_path=model_path, debug=True)
    
    # 测试图像示例
    test_image_path = r"D:\Pictures\Screenshots\屏幕截图 2025-08-12 142256.png"  # 替换为实际图像路径
    if os.path.exists(test_image_path):
        image = cv2.imread(test_image_path)
        results = pipeline.process_image(image)
        print("🎉 优化版处理完成!")
        print(f"锥筒颜色: {results['cone_color']}")
        print(f"数字: {results['number']}")
        print(f"仪表盘状态: {results['dashboard_state']}")
    else:
        print(f"❌ 测试图像未找到: {test_image_path}")
        print("✅ 优化管道创建成功。使用 pipeline.process_image(your_image) 来处理图像。")


