"""
双阶段 YOLO 管道：

阶段1：在裁剪后的帧上进行第一阶段锚点检测，用于自适应构建 ROI（数字、锥桶）。
阶段2：对数字 ROI 使用 YOLO 进行二次检测（替代 OCR），输出数字 one~six。

其他：
- 锥形桶 ROI 的宽度与数字 ROI 一致；位置取“数字 ROI 上方一个数字高度的区域”。
- 终端始终输出最终结果；若 debug=True，保存并/或显示中间可视化与最终可视化。
"""

import os
import sys
import gc
import time
from typing import List, Tuple, Optional, Union, Dict, Any

import cv2
import numpy as np
import torch
from ultralytics import YOLO


class DoubleYoloPipeline:
    """第一阶段 YOLO + 自适应 ROI + 第二阶段 YOLO(数字) 的轻量管道"""

    COLOR_CLASSES = {"red", "orange", "yellow", "green", "blue"}
    NUMBER_CLASSES = {"one", "two", "three", "four", "five", "six"}
    STATE_CLASSES = {"down", "normal", "up"}

    def __init__(self,
                 model_path: str,
                 device: Optional[str] = None,
                 debug: bool = True,
                 save_dir: Optional[str] = None) -> None:
        if not os.path.exists(model_path):
            raise FileNotFoundError(f"Model weights not found: {model_path}")

        self.debug = debug
        self.device = device if device is not None else ('cuda' if torch.cuda.is_available() else 'cpu')
        self.model = YOLO(model_path)
        self._initialize_model()
        self._last_crop_offsets: Dict[str, int] = {"top": 0, "left": 0}

        # 用户提供的颜色阈值（HSV，OpenCV 标准：H[0,179], S[0,255], V[0,255]）
        # 来源：
        # orange min：hsv(15, 79%, 100%)  max：hsv(17, 89%, 98%)  → 自动做 low/high 交换与取整、限幅
        # blue   min：hsv(191, 75%, 99%) max：hsv(210, 86%, 100%) → 取 H∈[191,210], S∈[75%,86%], V∈[99%,100%]
        # yellow min：hsv(59, 68%, 99%)  max：hsv(53, 56%, 98%)  → 取 H∈[53,59], S∈[56%,68%], V∈[98%,99%]
        # red    min：hsv(5, 100%, 88%)  max：hsv(2, 60%, 100%)  → 自动做 low/high 交换与取整、限幅
        # 为确保范围合法，自动执行 low/high 交换与取整、限幅
        self.USER_COLOR_RANGES: Dict[str, List[Tuple[int, int, int, int, int, int]]] = self._build_user_hsv_ranges()

        # 可视化保存目录
        ts = time.strftime("%Y%m%d_%H%M%S")
        self.save_dir = save_dir or os.path.join("0816_double_yolo", f"debug_{ts}")
        if self.debug:
            os.makedirs(self.save_dir, exist_ok=True)
            print(f"🔧 管道初始化 - 设备: {self.device}, 模型: {model_path}")
            print(f"🗂️ Debug 输出目录: {self.save_dir}")

    def _initialize_model(self) -> None:
        try:
            if hasattr(self.model, 'model'):
                self.model.model.eval()
                if hasattr(self.model.model, 'to'):
                    self.model.model.to(self.device)
                for p in self.model.model.parameters():
                    p.requires_grad = False

            dummy = np.full((640, 640, 3), 128, dtype=np.uint8)
            with torch.no_grad():
                _ = self.model.predict(dummy, verbose=False, conf=0.1, device=self.device, save=False, show=False)
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            if self.debug:
                print("✅ 模型初始化成功")
        except Exception as e:
            print(f"⚠️ 模型初始化问题: {e}")

    def _debug_print(self, *args, **kwargs) -> None:
        if self.debug:
            print(*args, **kwargs)

    def _save_debug_image(self, name: str, img: np.ndarray) -> None:
        if not self.debug:
            return
        try:
            path = os.path.join(self.save_dir, name)
            cv2.imwrite(path, img)
        except Exception:
            pass

    def crop_image(self, image: np.ndarray,
                   top_cut_ratio: float = 0.25,
                   width_center_ratio: float = 0.5) -> np.ndarray:
        if image is None or image.size == 0:
            raise ValueError("Input image is empty")
        frame_height, frame_width = image.shape[:2]
        top_cut = int(frame_height * top_cut_ratio)
        image2 = image[top_cut:, :]
        center_width = int(frame_width * width_center_ratio)
        w_x1 = frame_width // 2 - center_width // 2
        w_x2 = frame_width // 2 + center_width // 2
        cropped = image2[:, w_x1:w_x2]
        self._last_crop_offsets = {"top": top_cut, "left": w_x1}
        if self.debug:
            self._debug_print(f"📐 裁剪: {frame_width}x{frame_height} -> {cropped.shape[1]}x{cropped.shape[0]}")
        return cropped

    def yolo_infer(self, image_bgr: np.ndarray,
                   conf: float = 0.6,
                   imgsz: Optional[Union[int, Tuple[int, int]]] = 640,
                   debug_title_prefix: str = "yolo") -> List[Any]:
        if image_bgr is None or image_bgr.size == 0:
            raise ValueError("Input image is empty")
        if not image_bgr.flags['C_CONTIGUOUS']:
            image_bgr = np.ascontiguousarray(image_bgr)
        if image_bgr.dtype != np.uint8:
            image_bgr = image_bgr.astype(np.uint8)

        if torch.cuda.is_available():
            torch.cuda.empty_cache()
        with torch.no_grad():
            results = self.model.predict(
                image_bgr,
                verbose=False,
                conf=conf,
                device=self.device,
                save=False,
                show=False,
                stream=False,
                imgsz=imgsz,
            )
        gc.collect()
        if torch.cuda.is_available():
            torch.cuda.empty_cache()

        if self.debug:
            try:
                if results and len(results) > 0:
                    boxes = getattr(results[0], "boxes", None)
                    summary = []
                    if boxes is not None and len(boxes) > 0:
                        id2name = {k: v for k, v in results[0].names.items()}
                        for i in range(len(boxes)):
                            cls_id = int(boxes.cls[i].item())
                            score = float(boxes.conf[i].item())
                            name = id2name.get(cls_id, str(cls_id))
                            summary.append(f"{name}:{score:.2f}")
                    self._debug_print(f"🎯 {debug_title_prefix} 检测: {', '.join(summary) if summary else 'none'}")
                else:
                    self._debug_print(f"🎯 {debug_title_prefix} 检测: none (空结果)")
            except Exception as e:
                self._debug_print(f"DEBUG 可视化失败: {e}")

        return results

    def _clip(self, v: int, lo: int, hi: int) -> int:
        return max(lo, min(int(v), hi))

    def _box_h(self, box: List[float]) -> int:
        return max(0, int(box[3] - box[1]))

    def _box_w(self, box: List[float]) -> int:
        return max(0, int(box[2] - box[0]))

    def _expand_and_clip_box(self, box: List[float], pad_ratio: float, H: int, W: int) -> Tuple[int, int, int, int]:
        x1, y1, x2, y2 = box
        w = max(1, x2 - x1)
        h = max(1, y2 - y1)
        pad_x = w * pad_ratio
        pad_y = h * pad_ratio
        nx1 = self._clip(int(x1 - pad_x), 0, W)
        ny1 = self._clip(int(y1 - pad_y), 0, H)
        nx2 = self._clip(int(x2 + pad_x), 0, W)
        ny2 = self._clip(int(y2 + pad_y), 0, H)
        return nx1, ny1, nx2, ny2

    def _pick_best(self, detections, allowed_names: set):
        best = None
        for name, (prob, box), _ in detections:
            if name.lower() in allowed_names:
                if best is None or prob > best[0]:
                    best = (prob, box, name)
        return best

    # ========================= HSV 颜色投票（锥桶） =========================
    @staticmethod
    def _deg_to_h_opencv(deg: float) -> int:
        return max(0, min(int(round(deg / 2.0)), 179))

    @staticmethod
    def _perc_to_sv(value_percent: float) -> int:
        return max(0, min(int(round(value_percent / 100.0 * 255.0)), 255))

    def _build_user_hsv_ranges(self) -> Dict[str, List[Tuple[int, int, int, int, int, int]]]:
        def make_range(h_min_deg: float, s_min_p: float, v_min_p: float,
                       h_max_deg: float, s_max_p: float, v_max_p: float) -> Tuple[int, int, int, int, int, int]:
            h1 = self._deg_to_h_opencv(min(h_min_deg, h_max_deg))
            h2 = self._deg_to_h_opencv(max(h_min_deg, h_max_deg))
            s1 = self._perc_to_sv(min(s_min_p, s_max_p))
            s2 = self._perc_to_sv(max(s_min_p, s_max_p))
            v1 = self._perc_to_sv(min(v_min_p, v_max_p))
            v2 = self._perc_to_sv(max(v_min_p, v_max_p))
            return (h1, s1, v1, h2, s2, v2)

        ranges: Dict[str, List[Tuple[int, int, int, int, int, int]]] = {
            # 注意：OpenCV H = 度数/2；S/V 以百分比给出，由 _perc_to_sv 换算回 0~255
            # 来自 check_hsv 的滑块值（已换算）：
            # - red:    H 0~4 → 0°~8°，S 147~255 → 58%~100%，V 165~255 → 65%~100%
            # - orange: H 7~13 → 14°~26°，S 119~255 → 47%~100%，V 163~255 → 64%~100%
            # - yellow: H 14~35 → 28°~70°，S 104~218 → 41%~85%， V 176~255 → 69%~100%
            # - blue:   H 100~113 → 200°~226°，S 98~255 → 38%~100%， V 143~235 → 56%~92%
            "orange": [make_range(14, 47, 64, 40, 100, 100)],
            "blue":   [make_range(200, 38, 56, 226, 100, 92)],
            "yellow": [make_range(28, 41, 69, 70, 85, 100)],
            # 红色跨色相环边界，使用两段范围覆盖 0° 和 360°
            "red":    [
                make_range(0,   58, 65, 8,   100, 100),
                make_range(344, 58, 65, 360, 100, 100),
            ],
        }
        return ranges

    @staticmethod
    def _morph_cleanup(mask: np.ndarray) -> np.ndarray:
        k = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
        mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, k, iterations=1)
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, k, iterations=1)
        return mask

    def _count_hsv_by_ranges(self, image_bgr: np.ndarray) -> Tuple[str, Dict[str, int]]:
        if image_bgr is None or image_bgr.size == 0:
            return "None", {}
        hsv = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2HSV)
        counts: Dict[str, int] = {}
        for cname, specs in self.USER_COLOR_RANGES.items():
            masks = []
            for (h1, s1, v1, h2, s2, v2) in specs:
                lower = np.array([h1, s1, v1], dtype=np.uint8)
                upper = np.array([h2, s2, v2], dtype=np.uint8)
                masks.append(cv2.inRange(hsv, lower, upper))
            if not masks:
                counts[cname] = 0
                continue
            mask = masks[0]
            for m in masks[1:]:
                mask = cv2.bitwise_or(mask, m)
            mask = self._morph_cleanup(mask)
            counts[cname] = int(cv2.countNonZero(mask))
        best_color = "None"
        if len(counts) > 0:
            best_color = max(counts.items(), key=lambda kv: kv[1])[0]
        return best_color, counts

    # 更稳健的颜色投票：中心裁剪 + 动态饱和度阈值 + 色相直方图的环形距离加权
    @staticmethod
    def _circular_hue_distance(h1: np.ndarray, h2: int) -> np.ndarray:
        d = np.abs(h1.astype(np.int16) - int(h2))
        return np.minimum(d, 180 - d).astype(np.float32)

    def _center_crop_fraction(self, image_bgr: np.ndarray,
                               crop_w_ratio: float = 0.7,
                               crop_h_ratio: float = 0.85) -> np.ndarray:
        if image_bgr is None or image_bgr.size == 0:
            return image_bgr
        h, w = image_bgr.shape[:2]
        cw = int(max(1, w * crop_w_ratio))
        ch = int(max(1, h * crop_h_ratio))
        x1 = (w - cw) // 2
        y1 = (h - ch) // 2
        x2 = x1 + cw
        y2 = y1 + ch
        return image_bgr[y1:y2, x1:x2]

    def _vote_color_robust(self, image_bgr: np.ndarray) -> Tuple[str, float, Dict[str, float]]:
        if image_bgr is None or image_bgr.size == 0:
            return "None", 0.0, {}

        # 中心裁剪，降低背景干扰
        roi = self._center_crop_fraction(image_bgr, crop_w_ratio=0.7, crop_h_ratio=0.8)
        hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
        h = hsv[:, :, 0]
        s = hsv[:, :, 1]
        v = hsv[:, :, 2]

        # 动态阈值：按分位数选择下限，去掉低饱和/高高光像素
        s_thresh = int(np.clip(np.percentile(s, 60), 80, 160))
        v_low = 40
        v_high = 245
        mask = (s >= s_thresh) & (v >= v_low) & (v <= v_high)
        if mask.sum() < 50:
            # 像素太少，回退为旧计数法（避免零分母）
            name, counts = self._count_hsv_by_ranges(roi)
            total = float(max(1, sum(counts.values())))
            score = float(counts.get(name, 0)) / total
            norm = {k: float(vv) / total for k, vv in counts.items()} if counts else {}
            return name, score, norm

        h_sel = h[mask]
        # 色相直方图
        hist, _ = np.histogram(h_sel, bins=180, range=(0, 180))
        hist = hist.astype(np.float32)
        # 若直方图全零，回退
        if float(hist.sum()) <= 1e-6:
            name, counts = self._count_hsv_by_ranges(roi)
            total = float(max(1, sum(counts.values())))
            score = float(counts.get(name, 0)) / total
            norm = {k: float(vv) / total for k, vv in counts.items()} if counts else {}
            return name, score, norm

        # 定义类别中心（OpenCV H 0-179）
        centers = {
            "red": [0, 179],
            "orange": [18],
            "yellow": [30],
            "blue": [110],
        }
        sigma = 10.0  # 高斯核的尺度（色相距离）

        # 颜色门控窗口：根据用户的 HSV 范围动态生成（OpenCV H: 0~179），避免与滑块阈值不一致
        bin_indices = np.arange(180, dtype=np.int16)
        margin = 2  # 给窗口两端留少量冗余
        windows: Dict[str, np.ndarray] = {}
        for cname, specs in self.USER_COLOR_RANGES.items():
            allowed = np.zeros(180, dtype=np.bool_)
            for (h1, _s1, _v1, h2, _s2, _v2) in specs:
                lo = max(0, int(h1) - margin)
                hi = min(179, int(h2) + margin)
                allowed[lo:hi + 1] = True
            windows[cname] = allowed

        def kernel(dist: np.ndarray) -> np.ndarray:
            return np.exp(-(dist * dist) / (2.0 * sigma * sigma))

        # 预计算每个色相 bin 到各中心的得分
        scores: Dict[str, float] = {}
        for cname, c_list in centers.items():
            s_sum = 0.0
            window_mask = windows.get(cname, None)
            valid_mask = window_mask.astype(np.float32) if window_mask is not None else np.ones_like(bin_indices, dtype=np.float32)
            for c in c_list:
                d = np.minimum(np.abs(bin_indices - c), 180 - np.abs(bin_indices - c)).astype(np.float32)
                s_sum += float(np.sum((hist * valid_mask) * kernel(d)))
            scores[cname] = s_sum

        total_score = sum(scores.values())
        if total_score <= 1e-6:
            return "None", 0.0, {k: 0.0 for k in scores.keys()}

        # 归一化并选择最大
        norm_scores = {k: float(v) / float(total_score) for k, v in scores.items()}
        best_name = max(norm_scores.items(), key=lambda kv: kv[1])[0]
        best_score = norm_scores[best_name]
        return best_name, float(best_score), norm_scores

    def analyze_results(self, results: List[Any], conf: float,
                         debug_title_prefix: str = "analyze") -> List[Tuple[str, Tuple[float, List[float]], bool]]:
        detections: List[Tuple[str, Tuple[float, List[float]], bool]] = []
        if results is None or len(results) == 0:
            if self.debug:
                self._debug_print(f"📊 {debug_title_prefix}: 结果为None或空")
            return detections
        from collections.abc import Iterable
        res_list = results if isinstance(results, Iterable) else [results]
        for res in res_list:
            boxes = getattr(res, "boxes", None)
            if boxes is None or len(boxes) == 0:
                continue
            id2name = {k: v.lower() for k, v in getattr(res, "names", {}).items()}
            for i in range(len(boxes)):
                try:
                    cls_id = int(boxes.cls[i].item())
                    name = id2name.get(cls_id, str(cls_id))
                    prob = float(boxes.conf[i].item())
                    box = boxes.xyxy[i].tolist()
                    is_confident = prob >= conf
                    detections.append((name, (prob, box), is_confident))
                except Exception as e:
                    if self.debug:
                        self._debug_print(f"📊 {debug_title_prefix}: 跳过一个框，错误: {e}")
                    continue
        if self.debug:
            self._debug_print(f"📊 {debug_title_prefix} 列表: {detections}")
        return detections

    def build_adaptive_rois_with_boxes(self,
                                        detections: List[Tuple[str, Tuple[float, List[float]], bool]],
                                        base_image_bgr: np.ndarray,
                                        debug_title_prefix: str = "adaptive-roi") -> List[Tuple[str, np.ndarray, Tuple[int, int, int, int]]]:
        if base_image_bgr is None or base_image_bgr.size == 0:
            raise ValueError("Base image is empty")
        H, W = base_image_bgr.shape[:2]

        best_number = self._pick_best(detections, self.NUMBER_CLASSES)
        best_dash = self._pick_best(detections, self.STATE_CLASSES)

        outputs: List[Tuple[str, np.ndarray, Tuple[int, int, int, int]]] = []

        # 先确定数字 ROI
        number_bbox = None  # type: Optional[Tuple[int, int, int, int]]
        number_bbox_from_dash_only = False  # 仅仪表盘时推断出的数字框标记
        if best_number is None and best_dash is None:
            if self.debug:
                self._debug_print(f"🔧 {debug_title_prefix}: 没有数字/仪表盘锚点; 返回空列表")
            return outputs

        if best_number is None and best_dash is not None:
            _prob_d, box_d, _name_d = best_dash
            x1_d, y1_d, x2_d, y2_d = [int(x) for x in box_d]
            h_d = self._box_h(box_d)
            w_d = self._box_w(box_d)
            x_center_d = (x1_d + x2_d) // 2

            num_y1 = y1_d - int(1.0 * h_d)
            num_y2 = y1_d
            number_width_ratio = min(1.0, max(0.0, w_d / W))
            half_width = int(W * number_width_ratio / 2)
            nx1 = self._clip(x_center_d - half_width, 0, W)
            nx2 = self._clip(x_center_d + half_width, 0, W)
            ny1 = self._clip(num_y1, 0, H)
            ny2 = self._clip(num_y2, 0, H)
            if ny2 > ny1 and nx2 > nx1:
                number_bbox = (nx1, ny1, nx2, ny2)
                number_bbox_from_dash_only = True

        elif best_number is not None and best_dash is None:
            _prob_n, box_n, _name_n = best_number
            nx1, ny1, nx2, ny2 = self._expand_and_clip_box(box_n, pad_ratio=0.15, H=H, W=W)
            if ny2 > ny1 and nx2 > nx1:
                number_bbox = (nx1, ny1, nx2, ny2)

        else:
            # 两者都被检测到：直接使用第一阶段 YOLO 的数字框
            _prob_n, box_n, _name_n = best_number
            nx1, ny1, nx2, ny2 = self._expand_and_clip_box(box_n, pad_ratio=0.15, H=H, W=W)
            if ny2 > ny1 and nx2 > nx1:
                number_bbox = (nx1, ny1, nx2, ny2)

        if number_bbox is not None:
            nx1, ny1, nx2, ny2 = number_bbox
            number_roi = base_image_bgr[ny1:ny2, nx1:nx2]
            if number_roi is not None and number_roi.size > 0:
                outputs.append(("number", number_roi, (nx1, ny1, nx2, ny2)))

            # 新规则：
            # - 若第一阶段识别到数字（不论是否也识别到仪表盘），
            #   则 cone ROI = 数字 ROI 上方，高度为 1×数字高度，宽度为 2×数字宽度，水平居中。
            # - 若仅识别到仪表盘（数字由仪表盘推断），
            #   则 cone ROI = 数字 ROI 上方 0.5×数字高度，宽度与数字 ROI 一致。
            h_n = max(1, ny2 - ny1)
            w_n = max(1, nx2 - nx1)
            center_x_n = (nx1 + nx2) // 2

            if number_bbox_from_dash_only:
                # 仅仪表盘时：直接取数字 ROI 的上半部分
                cone_h = max(1, int(0.5 * h_n))
                cone_w = w_n
            else:
                cone_h = h_n
                cone_w = min(W, int(2.0 * w_n))

            # y 方向：
            # - 仪表盘推断数字：使用数字 ROI 的上半部（在数字 ROI 内）
            # - 正常数字存在：在数字 ROI 上方取 1×数字高
            if number_bbox_from_dash_only:
                cone_y1 = ny1
                cone_y2 = self._clip(ny1 + cone_h, 0, H)
                cx1 = nx1
                cx2 = nx2
            else:
                cone_y2 = ny1
                cone_y1 = self._clip(ny1 - cone_h, 0, H)
                cx1 = self._clip(center_x_n - cone_w // 2, 0, W)
                cx2 = self._clip(center_x_n + cone_w // 2, 0, W)
            cy1, cy2 = cone_y1, cone_y2
            if cy2 > cy1 and cx2 > cx1:
                cone_roi = base_image_bgr[cy1:cy2, cx1:cx2]
                if cone_roi is not None and cone_roi.size > 0:
                    outputs.append(("cone", cone_roi, (cx1, cy1, cx2, cy2)))

        # 仅识别到数字时，显式补充 dashboard ROI
        if number_bbox is not None and best_dash is None:
            nx1, ny1, nx2, ny2 = number_bbox
            # 规则：在数字 ROI 的正下方，宽度和高度均为数字 ROI 的 3 倍，水平居中
            h_n = max(1, ny2 - ny1)
            w_n = max(1, nx2 - nx1)
            dash_h = int(3.0 * h_n)
            dash_w = int(3.0 * w_n)
            center_x = (nx1 + nx2) // 2
            dx1 = self._clip(center_x - dash_w // 2, 0, W)
            dx2 = self._clip(center_x + dash_w // 2, 0, W)
            dy1 = self._clip(ny2, 0, H)
            dy2 = self._clip(ny2 + dash_h, 0, H)
            if dy2 > dy1 and dx2 > dx1:
                dash_roi = base_image_bgr[dy1:dy2, dx1:dx2]
                if dash_roi is not None and dash_roi.size > 0:
                    outputs.append(("dashboard", dash_roi, (dx1, dy1, dx2, dy2)))

        if self.debug:
            self._debug_print(f"🔧 {debug_title_prefix} 返回带坐标的 ROI 共 {len(outputs)} 个")
        return outputs

    def draw_boxes(self, img: np.ndarray, detections: List[Tuple[str, Tuple[float, List[float]], bool]]) -> np.ndarray:
        if img is None or img.size == 0:
            return img
        out = img.copy()
        for name, (prob, box), _ in detections:
            x1, y1, x2, y2 = [int(x) for x in box]
            cv2.rectangle(out, (x1, y1), (x2, y2), (0, 255, 0), 2)
            cv2.putText(out, f"{name}:{prob:.2f}", (x1, max(0, y1-5)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
        return out

    def second_stage_number(self, number_roi: np.ndarray, conf: float = 0.5) -> Tuple[str, float, Optional[List[float]]]:
        """对数字 ROI 再次用 YOLO 检测，仅输出 one~six 中的最佳结果。"""
        if number_roi is None or number_roi.size == 0:
            return ("None", 0.0, None)
        results = self.yolo_infer(number_roi, conf=conf, imgsz=640, debug_title_prefix="second-stage-number")
        best = ("None", 0.0, None)  # (name, score, box)
        if results and len(results) > 0:
            r = results[0]
            boxes = getattr(r, "boxes", None)
            if boxes is not None and len(boxes) > 0:
                id2name = {k: v.lower() for k, v in getattr(r, "names", {}).items()}
                for i in range(len(boxes)):
                    cls_id = int(boxes.cls[i].item())
                    name = id2name.get(cls_id, str(cls_id))
                    if name not in self.NUMBER_CLASSES:
                        continue
                    score = float(boxes.conf[i].item())
                    box = boxes.xyxy[i].tolist()
                    if score > best[1]:
                        best = (name, score, box)
        return best

    def finalize_results(self,
                         first_stage_results: List[Any],
                         detections: List[Tuple[str, Tuple[float, List[float]], bool]],
                         second_stage_number_res: Tuple[str, float, Optional[List[float]]],
                         hsv_cone_vote: Optional[Tuple[str, float]] = None,
                         conf: float = 0.6) -> List[Tuple[str, Union[str, float], Union[str, float]]]:
        from collections.abc import Iterable

        def _best_by_group_from_result(res_obj, allowed_names: set):
            boxes = getattr(res_obj, "boxes", None)
            if boxes is None or len(boxes) == 0:
                return None
            id2name = {k: v.lower() for k, v in getattr(res_obj, "names", {}).items()}
            best = None
            for i in range(len(boxes)):
                cls_id = int(boxes.cls[i].item())
                name = id2name.get(cls_id, str(cls_id))
                if name in allowed_names:
                    score = float(boxes.conf[i].item())
                    if best is None or score > best[1]:
                        best = (name, score)
            return best

        def _best_from_first_stage(results, allowed_names: set):
            best = None
            if results is None or len(results) == 0:
                return None
            res_list = results if isinstance(results, Iterable) else [results]
            for r in res_list:
                cand = _best_by_group_from_result(r, allowed_names)
                if cand is not None and (best is None or cand[1] > best[1]):
                    best = cand
            return best

        final_list: List[Tuple[str, Union[str, float], Union[str, float]]] = []

        # number: 由第二阶段结果主导
        n_name, n_score, _ = second_stage_number_res
        if n_name in self.NUMBER_CLASSES:
            final_list.append(("number", n_name, float(n_score)))
        else:
            # 兜底使用第一阶段
            best_num_1 = _best_from_first_stage(first_stage_results, self.NUMBER_CLASSES)
            if best_num_1 is not None and best_num_1[1] >= conf:
                final_list.append(("number", best_num_1[0], best_num_1[1]))
            else:
                final_list.append(("number", "None", 0.0))

        # cone: 综合 YOLO 第一阶段与 HSV 投票
        best_cone_1 = _best_from_first_stage(first_stage_results, self.COLOR_CLASSES)
        hsv_name, hsv_score = (None, 0.0)
        if hsv_cone_vote is not None:
            hsv_name, hsv_score = hsv_cone_vote
        yolo_name, yolo_score = (None, 0.0)
        if best_cone_1 is not None:
            yolo_name, yolo_score = best_cone_1

        # 决策：
        # 1) 若二者均缺失 → None
        # 2) 若只有其一 → 取其一
        # 3) 若名称一致 → 取该名称，置信度取二者较大
        # 4) 若名称不一致 → 比较 yolo_score 与 hsv_score（二者都是 0~1），取较大者
        decided_name = "None"
        decided_score: float = 0.0
        if yolo_name is None and hsv_name is None:
            decided_name, decided_score = "None", 0.0
        elif yolo_name is None:
            decided_name, decided_score = hsv_name or "None", float(hsv_score)
        elif hsv_name is None:
            if yolo_score >= conf:
                decided_name, decided_score = yolo_name, float(yolo_score)
            else:
                decided_name, decided_score = yolo_name, float(yolo_score)
        else:
            if yolo_name == hsv_name:
                decided_name = yolo_name
                decided_score = float(max(yolo_score, hsv_score))
            else:
                # 不一致时，选更强的信号
                if float(hsv_score) > float(yolo_score):
                    decided_name, decided_score = hsv_name, float(hsv_score)
                else:
                    decided_name, decided_score = yolo_name, float(yolo_score)

        final_list.append(("cone", decided_name, decided_score))

        # dashboard: 第一阶段
        best_dash_1 = _best_from_first_stage(first_stage_results, self.STATE_CLASSES)
        if best_dash_1 is not None and best_dash_1[1] >= conf:
            final_list.append(("dashboard", best_dash_1[0], best_dash_1[1]))
        else:
            final_list.append(("dashboard", "None", 0.0))

        print({"final_results": final_list})
        return final_list

    def visualize_pipeline(self,
                           frame_idx: int,
                           base_image: np.ndarray,
                           cropped: np.ndarray,
                           dets: List[Tuple[str, Tuple[float, List[float]], bool]],
                           rois_with_boxes: List[Tuple[str, np.ndarray, Tuple[int, int, int, int]]],
                           second_stage_number_res: Tuple[str, float, Optional[List[float]]],
                           final_results: List[Tuple[str, Union[str, float], Union[str, float]]]) -> None:
        if not self.debug:
            return
        # 第一阶段框
        vis1 = self.draw_boxes(cropped, dets)

        # 标注 ROI 框
        vis2 = vis1.copy()
        colors = {"number": (0, 255, 255), "cone": (255, 0, 0)}
        number_bbox_in_cropped: Optional[Tuple[int, int, int, int]] = None
        for cls_name, _roi_img, (x1, y1, x2, y2) in rois_with_boxes:
            c = colors.get(cls_name, (200, 200, 200))
            cv2.rectangle(vis2, (x1, y1), (x2, y2), c, 2)
            cv2.putText(vis2, cls_name, (x1, max(0, y1-5)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, c, 2)
            # 保存 ROI 图像
            try:
                self._save_debug_image(f"frame_{frame_idx:06d}_roi_{cls_name}.jpg", _roi_img)
            except Exception:
                pass
            if cls_name == "number":
                number_bbox_in_cropped = (x1, y1, x2, y2)

        # 第二阶段数字结果叠加
        n_name, n_score, n_box = second_stage_number_res
        vis3 = vis2.copy()
        if n_box is not None and n_name in self.NUMBER_CLASSES and number_bbox_in_cropped is not None:
            # 将第二阶段框从 number_roi 坐标系映射回裁剪图坐标系
            rb_x1, rb_y1, rb_x2, rb_y2 = number_bbox_in_cropped
            roi_w = max(1, rb_x2 - rb_x1)
            roi_h = max(1, rb_y2 - rb_y1)
            nbx1, nby1, nbx2, nby2 = [int(v) for v in n_box]
            # n_box 相对于 number_roi，本实现直接在 number_roi 尺度，无需缩放，仅需平移
            bx1 = rb_x1 + nbx1
            by1 = rb_y1 + nby1
            bx2 = rb_x1 + nbx2
            by2 = rb_y1 + nby2
            # 限幅
            h_c, w_c = vis3.shape[:2]
            bx1 = max(0, min(bx1, w_c - 1))
            bx2 = max(0, min(bx2, w_c - 1))
            by1 = max(0, min(by1, h_c - 1))
            by2 = max(0, min(by2, h_c - 1))
            cv2.rectangle(vis3, (bx1, by1), (bx2, by2), (0, 128, 255), 2)
            cv2.putText(vis3, f"2nd:{n_name}:{n_score:.2f}", (bx1, max(0, by1-5)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 128, 255), 2)

        # 最终结果条幅
        vis4 = vis3.copy()
        h, w = vis4.shape[:2]
        panel_h = 40
        cv2.rectangle(vis4, (0, 0), (w, panel_h), (0, 0, 0), -1)
        txt = " | ".join([f"{c}:{v}" for c, v, _ in final_results])
        cv2.putText(vis4, txt, (10, 28), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)

        # 保存
        self._save_debug_image(f"frame_{frame_idx:06d}_cropped.jpg", cropped)
        self._save_debug_image(f"frame_{frame_idx:06d}_stage1.jpg", vis1)
        self._save_debug_image(f"frame_{frame_idx:06d}_rois.jpg", vis2)
        self._save_debug_image(f"frame_{frame_idx:06d}_second_and_final.jpg", vis4)

    def process_image(self,
                      image: np.ndarray,
                      top_cut_ratio: float = 0.25,
                      width_center_ratio: float = 0.5,
                      first_stage_conf: float = 0.6,
                      second_stage_conf: float = 0.5,
                      final_conf: float = 0.6,
                      frame_idx: int = 0) -> Dict[str, Any]:
        if image is None or image.size == 0:
            raise ValueError("Input image is empty")
        try:
            cropped = self.crop_image(image, top_cut_ratio, width_center_ratio)
            first_stage_results = self.yolo_infer(cropped, conf=first_stage_conf, imgsz=640, debug_title_prefix="first-stage")
            detections = self.analyze_results(first_stage_results, conf=first_stage_conf)
            rois_with_boxes = self.build_adaptive_rois_with_boxes(detections, cropped)

            # 准备数字 ROI 给第二阶段
            number_roi = None
            for (cls_name, roi_img, _bbox) in rois_with_boxes:
                if cls_name == "number":
                    number_roi = roi_img
                    break
            second_stage_number_res: Tuple[str, float, Optional[List[float]]] = ("None", 0.0, None)
            if number_roi is not None:
                second_stage_number_res = self.second_stage_number(number_roi, conf=second_stage_conf)

            # HSV 对锥桶 ROI 投票（稳健版，含回退）
            hsv_vote_name: Optional[str] = None
            hsv_vote_conf: float = 0.0
            for (cls_name, roi_img, _bbox) in rois_with_boxes:
                if cls_name == "cone":
                    name_rb, score_rb, _ = self._vote_color_robust(roi_img)
                    if name_rb in {"red", "orange", "yellow", "blue"}:
                        hsv_vote_name, hsv_vote_conf = name_rb, float(score_rb)
                    else:
                        best_c, counts = self._count_hsv_by_ranges(roi_img)
                        total = max(1, sum(counts.values()))
                        hsv_vote_name = best_c if best_c in {"red", "orange", "yellow", "blue"} else None
                        hsv_vote_conf = float(counts.get(best_c, 0)) / float(total)
                    break

            # 兜底：若没得到 cone ROI，但得到了 number ROI，则用 number 推理出的 cone 区域（在数字上方一个数字高度）再做一次 HSV 投票
            if hsv_vote_name is None:
                for (cls_name, _roi_img, (bx1, by1, bx2, by2)) in rois_with_boxes:
                    if cls_name == "number":
                        h_n = max(1, by2 - by1)
                        cy2 = by1
                        cy1 = self._clip(by1 - h_n, 0, cropped.shape[0])
                        cx1, cx2 = bx1, bx2
                        if cy2 > cy1 and cx2 > cx1:
                            cone_roi_retry = cropped[cy1:cy2, cx1:cx2]
                            name_rb, score_rb, _ = self._vote_color_robust(cone_roi_retry)
                            if name_rb in {"red", "orange", "yellow", "blue"}:
                                hsv_vote_name, hsv_vote_conf = name_rb, float(score_rb)
                            else:
                                best_c, counts = self._count_hsv_by_ranges(cone_roi_retry)
                                total = max(1, sum(counts.values()))
                                hsv_vote_name = best_c if best_c in {"red", "orange", "yellow", "blue"} else None
                                hsv_vote_conf = float(counts.get(best_c, 0)) / float(total)
                        break

            final_results = self.finalize_results(
                first_stage_results,
                detections,
                second_stage_number_res,
                hsv_cone_vote=(hsv_vote_name, hsv_vote_conf) if hsv_vote_name is not None else None,
                conf=final_conf,
            )

            # 结构化结果
            cone_color = "None"
            number = "None"
            dashboard_state = "None"
            for class_name, detected_name, _confidence in final_results:
                if class_name == "cone":
                    cone_color = str(detected_name)
                elif class_name == "number":
                    number = str(detected_name)
                elif class_name == "dashboard":
                    dashboard_state = str(detected_name)

            # 可视化
            self.visualize_pipeline(frame_idx, image, cropped, detections, rois_with_boxes, second_stage_number_res, final_results)

            return {
                'final_results': final_results,
                'cone_color': cone_color,
                'number': number,
                'dashboard_state': dashboard_state,
                'processing_info': {
                    'image_shape': image.shape,
                    'cropped_shape': cropped.shape,
                    'num_detections_stage1': len(detections),
                    'num_rois': len(rois_with_boxes),
                    'roi_types': [cls_name for (cls_name, _roi_img, _bbox) in rois_with_boxes],
                    'roi_boxes': [
                        {'type': cls_name, 'bbox': [int(bx1), int(by1), int(bx2), int(by2)]}
                        for (cls_name, _roi_img, (bx1, by1, bx2, by2)) in rois_with_boxes
                    ],
                    'crop_offsets': {
                        'top': int(self._last_crop_offsets.get('top', 0)),
                        'left': int(self._last_crop_offsets.get('left', 0)),
                    },
                }
            }
        except Exception as e:
            err = f"管道处理中出错: {e}"
            self._debug_print(err)
            return {
                'final_results': [],
                'cone_color': "None",
                'number': "None",
                'dashboard_state': "None",
                'error': err,
                'processing_info': {}
            }


def create_double_yolo_pipeline(model_path: str,
                                device: Optional[str] = None,
                                debug: bool = True,
                                save_dir: Optional[str] = None) -> DoubleYoloPipeline:
    return DoubleYoloPipeline(model_path=model_path, device=device, debug=debug, save_dir=save_dir)


if __name__ == "__main__":
    # 权重
    candidate_models = [
        r".\weights\all_yolo_all_epochs_0811\best.pt",
        r".\weights\all_yolo_all_epochs_0808\best.pt",
        r".\weights\all_yolo_all_epochs_0805\best.pt",
        r".\weights\all_yolo_all_epochs_0804\best.pt",
    ]
    model_path = None
    for path in candidate_models:
        if os.path.exists(path):
            model_path = path
            break
    if model_path is None:
        print("❌ Model weights not found")
        sys.exit(1)
    model_path = candidate_models[1]
    # 测试视频
    candidate_videos = [
        r".\video_2025-08-10_13-16-08.avi",
        r".\video_2025-08-03_22-31-55.avi",
        r".\video_2025-08-16_00-10-05.avi",
    ]
    video_source = None
    for video_path in candidate_videos:
        if os.path.exists(video_path):
            video_source = video_path
            break
    if video_source is None:
        print("❌ Video source not found")
        sys.exit(1)
    video_source =candidate_videos[2]
    pipeline = create_double_yolo_pipeline(model_path=model_path, debug=True)

    cap = cv2.VideoCapture(video_source)
    if not cap.isOpened():
        print(f"❌ 无法打开视频源: {video_source}")
        sys.exit(1)

    frame_count = 0
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        frame_count += 1
        print(f"--- 正在处理第 {frame_count} 帧 ---")

        res = pipeline.process_image(frame, frame_idx=frame_count)

        print("🎉 处理完成!")
        print(f"锥筒颜色: {res.get('cone_color', 'None')}")
        print(f"数字: {res.get('number', 'None')}")
        print(f"仪表盘状态: {res.get('dashboard_state', 'None')}")
        print("\n")

        # 为示例，限制前 10 帧
        # if frame_count >= 10:
        #     break

    cap.release()
    print("视频处理结束。")


