"""
YOLO + OCR + HSV 管道（无第二阶段YOLO）

特性：
- 使用第一阶段 YOLO 进行目标锚点检测
- 自适应裁剪数字与锥筒 ROI
- 使用 CnOCR 的 number-densenet_lite_136-fc 模型对数字进行识别（限制输出为 1-6 的数字名）
- 数字ROI预处理屏蔽二值化，仅使用灰度图像增强
- 使用 HSV 投票法对锥筒颜色进行识别（red/orange/yellow/green/blue）
- 仪表盘状态（down/normal/up）直接取第一阶段 YOLO 最佳结果
"""

import os
import sys
import gc
from typing import List, Tuple, Optional, Union, Dict, Any

import cv2
import numpy as np
import torch
from ultralytics import YOLO

try:
    import pytesseract
    _HAS_TESSERACT = True
except Exception:
    pytesseract = None  # type: ignore
    _HAS_TESSERACT = False

if pytesseract is not None:
    pytesseract.pytesseract.tesseract_cmd = r"D:\tesseract\tesseract.exe"

class YoloOcrHsvPipeline:
    """仅使用第一阶段 YOLO + OCR(数字) + HSV(锥筒) 的轻量管道"""

    COLOR_CLASSES = {"red", "orange", "yellow", "green", "blue"}
    NUMBER_CLASSES = {"one", "two", "three", "four", "five", "six"}
    STATE_CLASSES = {"down", "normal", "up"}

    COLOR_RANGES = {
        "red":    [(0, 20, 225, 4, 188, 255)],
        "orange": [(0, 97, 222, 11, 155, 255), (18, 97, 222, 179, 155, 255)],
        "yellow": [(23, 122, 219, 32, 193, 255)],
        "green":  [(37, 46, 207, 47, 185, 255)],
        "blue":   [(92, 171, 222, 108, 239, 255)],
    }

    DIGIT_TO_NAME = {"1": "one", "2": "two", "3": "three", "4": "four", "5": "five", "6": "six"}

    def __init__(self,
                 model_path: str,
                 device: Optional[str] = None,
                 debug: bool = True,
                 tesseract_cmd: Optional[str] = None,
                 template_dir: Optional[str] = None):
        if not os.path.exists(model_path):
            raise FileNotFoundError(f"Model weights not found: {model_path}")

        self.debug = debug
        self.device = device if device is not None else ('cuda' if torch.cuda.is_available() else 'cpu')
        self.model = YOLO(model_path)
        self._initialize_model()
        # 最近一次裁剪的偏移记录（用于把裁剪坐标映射回原始帧）
        self._last_crop_offsets: Dict[str, int] = {"top": 0, "left": 0}

        # CnOCR 初始化（替代 Tesseract OCR）
        try:
            from cnocr import CnOcr
            # 使用指定的数字识别模型和检测模型
            self.cnocr = CnOcr(model_name='number-densenet_lite_136-fc', det_model_name='en_PP-OCRv3_det',cand_alphabet=['1', '2', '3', '4', '5', '6'])
            self.cnocr_available = True
        except Exception:
            self.cnocr = None
            self.cnocr_available = False
        # 为向后兼容，沿用 ocr_available 字段名
        self.ocr_available = self.cnocr_available

        if self.debug:
            print(f"🔧 管道初始化 - 设备: {self.device}, 模型: {model_path}, CnOCR: {'OK' if self.ocr_available else 'Unavailable'}")

    def _initialize_model(self):
        try:
            if hasattr(self.model, 'model'):
                self.model.model.eval()
                if hasattr(self.model.model, 'to'):
                    self.model.model.to(self.device)
                for p in self.model.model.parameters():
                    p.requires_grad = False

            dummy = np.full((640, 640, 3), 128, dtype=np.uint8)
            with torch.no_grad():
                _ = self.model.predict(dummy, verbose=False, conf=0.1, device=self.device, save=False, show=False)
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            if self.debug:
                print("✅ 模型初始化成功")
        except Exception as e:
            print(f"⚠️ 模型初始化问题: {e}")

    def _debug_print(self, *args, **kwargs):
        if self.debug:
            print(*args, **kwargs)

    def crop_image(self, image: np.ndarray,
                   top_cut_ratio: float = 0.25,
                   width_center_ratio: float = 0.5) -> np.ndarray:
        if image is None or image.size == 0:
            raise ValueError("Input image is empty")
        frame_height, frame_width = image.shape[:2]
        top_cut = int(frame_height * top_cut_ratio)
        image = image[top_cut:, :]
        center_width = int(frame_width * width_center_ratio)
        w_x1 = frame_width // 2 - center_width // 2
        w_x2 = frame_width // 2 + center_width // 2
        cropped = image[:, w_x1:w_x2]
        # 记录偏移，供外部将裁剪坐标映射回原图
        self._last_crop_offsets = {"top": top_cut, "left": w_x1}
        if self.debug:
            self._debug_print(f"📐 裁剪: {frame_width}x{frame_height} -> {cropped.shape[1]}x{cropped.shape[0]}")
        return cropped

    def yolo_first_stage(self, image_bgr: np.ndarray,
                          conf: float = 0.6,
                          imgsz: Optional[Union[int, Tuple[int, int]]] = 640,
                          debug_title_prefix: str = "first-stage") -> List[Any]:
        if image_bgr is None or image_bgr.size == 0:
            raise ValueError("Input image is empty")
        if not image_bgr.flags['C_CONTIGUOUS']:
            image_bgr = np.ascontiguousarray(image_bgr)
        if image_bgr.dtype != np.uint8:
            image_bgr = image_bgr.astype(np.uint8)

        if torch.cuda.is_available():
            torch.cuda.empty_cache()
        with torch.no_grad():
            results = self.model.predict(
                image_bgr,
                verbose=False,
                conf=conf,
                device=self.device,
                save=False,
                show=False,
                stream=False,
                imgsz=imgsz,
            )
        gc.collect()
        if torch.cuda.is_available():
            torch.cuda.empty_cache()

        if self.debug:
            try:
                if results and len(results) > 0:
                    boxes = getattr(results[0], "boxes", None)
                    summary = []
                    if boxes is not None and len(boxes) > 0:
                        id2name = {k: v for k, v in results[0].names.items()}
                        for i in range(len(boxes)):
                            cls_id = int(boxes.cls[i].item())
                            score = float(boxes.conf[i].item())
                            name = id2name.get(cls_id, str(cls_id))
                            summary.append(f"{name}:{score:.2f}")
                    self._debug_print(f"🎯 {debug_title_prefix} 检测: {', '.join(summary) if summary else 'none'}")
                else:
                    self._debug_print(f"🎯 {debug_title_prefix} 检测: none (空结果)")
            except Exception as e:
                self._debug_print(f"DEBUG 可视化失败: {e}")

        return results

    def analyze_first_stage_results(self, results: List[Any],
                                    conf: float,
                                    debug_title_prefix: str = "first-stage analysis") -> List[Tuple[str, Tuple[float, List[float]], bool]]:
        detections: List[Tuple[str, Tuple[float, List[float]], bool]] = []
        if results is None or len(results) == 0:
            if self.debug:
                self._debug_print(f"📊 {debug_title_prefix}: 结果为None或空")
            return detections
        from collections.abc import Iterable
        res_list = results if isinstance(results, Iterable) else [results]
        for res in res_list:
            boxes = getattr(res, "boxes", None)
            if boxes is None or len(boxes) == 0:
                continue
            id2name = {k: v.lower() for k, v in getattr(res, "names", {}).items()}
            for i in range(len(boxes)):
                try:
                    cls_id = int(boxes.cls[i].item())
                    name = id2name.get(cls_id, str(cls_id))
                    prob = float(boxes.conf[i].item())
                    box = boxes.xyxy[i].tolist()
                    is_confident = prob >= conf
                    detections.append((name, (prob, box), is_confident))
                except Exception as e:
                    if self.debug:
                        self._debug_print(f"📊 {debug_title_prefix}: 跳过一个框，错误: {e}")
                    continue
        if self.debug:
            self._debug_print(f"📊 {debug_title_prefix} 列表: {detections}")
        return detections

    def _clip(self, v: int, lo: int, hi: int) -> int:
        return max(lo, min(int(v), hi))

    def _box_h(self, box: List[float]) -> int:
        return max(0, int(box[3] - box[1]))

    def _box_w(self, box: List[float]) -> int:
        return max(0, int(box[2] - box[0]))

    def _expand_and_clip_box(self, box: List[float], pad_ratio: float, H: int, W: int) -> Tuple[int, int, int, int]:
        x1, y1, x2, y2 = box
        w = max(1, x2 - x1)
        h = max(1, y2 - y1)
        pad_x = w * pad_ratio
        pad_y = h * pad_ratio
        nx1 = self._clip(int(x1 - pad_x), 0, W)
        ny1 = self._clip(int(y1 - pad_y), 0, H)
        nx2 = self._clip(int(x2 + pad_x), 0, W)
        ny2 = self._clip(int(y2 + pad_y), 0, H)
        return nx1, ny1, nx2, ny2

    def build_adaptive_rois(self,
                            detections: List[Tuple[str, Tuple[float, List[float]], bool]],
                            base_image_bgr: np.ndarray,
                            debug_title_prefix: str = "adaptive-roi") -> List[Tuple[str, np.ndarray]]:
        if base_image_bgr is None or base_image_bgr.size == 0:
            raise ValueError("Base image is empty")
        H, W = base_image_bgr.shape[:2]

        def _pick_best(dets, allowed_names: set):
            best = None
            for name, (prob, box), _ in dets:
                if name.lower() in allowed_names:
                    if best is None or prob > best[0]:
                        best = (prob, box, name)
            return best

        def slice_band(y0, y1):
            y0c = self._clip(y0, 0, H)
            y1c = self._clip(y1, 0, H)
            if y1c <= y0c:
                return None
            return base_image_bgr[y0c:y1c, :]

        def slice_band_with_width(y0, y1, x_center, width_ratio=1.0):
            y0c = self._clip(y0, 0, H)
            y1c = self._clip(y1, 0, H)
            if y1c <= y0c:
                return None
            half_width = int(W * width_ratio / 2)
            x0c = self._clip(x_center - half_width, 0, W)
            x1c = self._clip(x_center + half_width, 0, W)
            if x1c <= x0c:
                return base_image_bgr[y0c:y1c, :]
            return base_image_bgr[y0c:y1c, x0c:x1c]

        best_number = _pick_best(detections, self.NUMBER_CLASSES)
        best_dash = _pick_best(detections, self.STATE_CLASSES)

        rois: List[Tuple[str, np.ndarray]] = []

        if best_number is None and best_dash is None:
            if self.debug:
                self._debug_print(f"🔧 {debug_title_prefix}: 没有数字/仪表盘锚点; 返回空列表")
            return rois

        if best_number is None and best_dash is not None:
            prob_d, box_d, _name_d = best_dash
            x1_d, y1_d, x2_d, y2_d = [int(x) for x in box_d]
            h_d = self._box_h(box_d)
            w_d = self._box_w(box_d)
            x_center_d = (x1_d + x2_d) // 2

            num_y1 = y1_d - int(1.0 * h_d)
            num_y2 = y1_d
            number_width_ratio = min(1.0, max(0.0, w_d / W))
            num_roi = slice_band_with_width(num_y1, num_y2, x_center_d, number_width_ratio)
            if num_roi is not None:
                rois.append(("number", num_roi))

                h_nr = num_roi.shape[0]
                half_h = max(1, h_nr // 2)
                cone_roi = num_roi[:half_h, :]
                if cone_roi is not None and cone_roi.size > 0:
                    rois.append(("cone", cone_roi))

        elif best_number is not None and best_dash is None:
            prob_n, box_n, _name_n = best_number
            nx1, ny1, nx2, ny2 = self._expand_and_clip_box(box_n, pad_ratio=0.15, H=H, W=W)
            number_roi = base_image_bgr[ny1:ny2, nx1:nx2]
            if number_roi is not None and number_roi.size > 0:
                rois.append(("number", number_roi))

                half_h = max(1, number_roi.shape[0] // 2)
                cone_roi = number_roi[:half_h, :]
                if cone_roi is not None and cone_roi.size > 0:
                    rois.append(("cone", cone_roi))

        else:
            prob_n, box_n, _name_n = best_number
            prob_d, box_d, _name_d = best_dash

            x1_d, y1_d, x2_d, y2_d = [int(x) for x in box_d]
            h_d = self._box_h(box_d)
            w_d = self._box_w(box_d)
            x_center_d = (x1_d + x2_d) // 2
            num_y1 = y1_d - int(1.0 * h_d)
            num_y2 = y1_d
            number_width_ratio = min(1.0, max(0.0, w_d / W))
            num_roi = slice_band_with_width(num_y1, num_y2, x_center_d, number_width_ratio)
            if num_roi is not None and num_roi.size > 0:
                rois.append(("number", num_roi))

                half_h = max(1, num_roi.shape[0] // 2)
                cone_roi = num_roi[:half_h, :]
                if cone_roi is not None and cone_roi.size > 0:
                    rois.append(("cone", cone_roi))

        if self.debug:
            self._debug_print(f"🔧 {debug_title_prefix} 返回 {[(c, (r.shape[1], r.shape[0])) for c, r in rois]}")

        return rois

    def build_adaptive_rois_with_boxes(self,
                                       detections: List[Tuple[str, Tuple[float, List[float]], bool]],
                                       base_image_bgr: np.ndarray,
                                       debug_title_prefix: str = "adaptive-roi") -> List[Tuple[str, np.ndarray, Tuple[int, int, int, int]]]:
        """与 build_adaptive_rois 相同，但同时返回 ROI 的裁剪坐标 (x1,y1,x2,y2) 基于 base_image_bgr。"""
        if base_image_bgr is None or base_image_bgr.size == 0:
            raise ValueError("Base image is empty")
        H, W = base_image_bgr.shape[:2]

        def _pick_best(dets, allowed_names: set):
            best = None
            for name, (prob, box), _ in dets:
                if name.lower() in allowed_names:
                    if best is None or prob > best[0]:
                        best = (prob, box, name)
            return best

        def _band_coords(y0, y1):
            y0c = self._clip(y0, 0, H)
            y1c = self._clip(y1, 0, H)
            if y1c <= y0c:
                return None
            return (0, y0c, W, y1c)

        def _band_with_width_coords(y0, y1, x_center, width_ratio=1.0):
            y0c = self._clip(y0, 0, H)
            y1c = self._clip(y1, 0, H)
            if y1c <= y0c:
                return None
            half_width = int(W * max(0.0, min(1.0, width_ratio)) / 2)
            x0c = self._clip(x_center - half_width, 0, W)
            x1c = self._clip(x_center + half_width, 0, W)
            if x1c <= x0c:
                return (0, y0c, W, y1c)
            return (x0c, y0c, x1c, y1c)

        best_number = _pick_best(detections, self.NUMBER_CLASSES)
        best_dash = _pick_best(detections, self.STATE_CLASSES)

        outputs: List[Tuple[str, np.ndarray, Tuple[int, int, int, int]]] = []

        if best_number is None and best_dash is None:
            if self.debug:
                self._debug_print(f"🔧 {debug_title_prefix}: 没有数字/仪表盘锚点; 返回空列表")
            return outputs

        if best_number is None and best_dash is not None:
            prob_d, box_d, _name_d = best_dash
            x1_d, y1_d, x2_d, y2_d = [int(x) for x in box_d]
            h_d = self._box_h(box_d)
            w_d = self._box_w(box_d)
            x_center_d = (x1_d + x2_d) // 2

            num_y1 = y1_d - int(1.0 * h_d)
            num_y2 = y1_d
            number_width_ratio = min(1.0, max(0.0, w_d / W))
            nb = _band_with_width_coords(num_y1, num_y2, x_center_d, number_width_ratio)
            if nb is not None:
                nx1, ny1, nx2, ny2 = nb
                roi = base_image_bgr[ny1:ny2, nx1:nx2]
                if roi is not None and roi.size > 0:
                    outputs.append(("number", roi, (nx1, ny1, nx2, ny2)))

            dashboard_width_ratio = min(1.0, w_d / W * 2.0)
            cone_y1 = y1_d - int(1.0 * h_d)
            cone_y2 = y1_d - int(0.5 * h_d)
            cb = _band_with_width_coords(cone_y1, cone_y2, x_center_d, dashboard_width_ratio)
            if cb is not None:
                cx1, cy1, cx2, cy2 = cb
                roi = base_image_bgr[cy1:cy2, cx1:cx2]
                if roi is not None and roi.size > 0:
                    outputs.append(("cone", roi, (cx1, cy1, cx2, cy2)))

        elif best_number is not None and best_dash is None:
            prob_n, box_n, _name_n = best_number
            nx1, ny1, nx2, ny2 = self._expand_and_clip_box(box_n, pad_ratio=0.15, H=H, W=W)
            number_roi = base_image_bgr[ny1:ny2, nx1:nx2]
            if number_roi is not None and number_roi.size > 0:
                outputs.append(("number", number_roi, (nx1, ny1, nx2, ny2)))

            h_n = self._box_h(box_n)
            cb = _band_coords(int(ny1 - 1.0 * h_n), ny1)
            if cb is not None:
                cx1, cy1, cx2, cy2 = cb
                roi = base_image_bgr[cy1:cy2, cx1:cx2]
                if roi is not None and roi.size > 0:
                    outputs.append(("cone", roi, (cx1, cy1, cx2, cy2)))

        else:
            prob_n, box_n, _name_n = best_number
            prob_d, box_d, _name_d = best_dash

            x1_d, y1_d, x2_d, y2_d = [int(x) for x in box_d]
            h_d = self._box_h(box_d)
            w_d = self._box_w(box_d)
            x_center_d = (x1_d + x2_d) // 2
            num_y1 = y1_d - int(1.0 * h_d)
            num_y2 = y1_d
            number_width_ratio = min(1.0, max(0.0, w_d / W))
            nb = _band_with_width_coords(num_y1, num_y2, x_center_d, number_width_ratio)
            if nb is not None:
                nx1, ny1, nx2, ny2 = nb
                roi = base_image_bgr[ny1:ny2, nx1:nx2]
                if roi is not None and roi.size > 0:
                    outputs.append(("number", roi, (nx1, ny1, nx2, ny2)))

            h_n = self._box_h(box_n)
            dashboard_width_ratio = min(1.0, w_d / W * 1.5)
            cb = _band_with_width_coords(int(ny1 - 1.0 * h_n), ny1, x_center_d, dashboard_width_ratio)
            if cb is not None:
                cx1, cy1, cx2, cy2 = cb
                roi = base_image_bgr[cy1:cy2, cx1:cx2]
                if roi is not None and roi.size > 0:
                    outputs.append(("cone", roi, (cx1, cy1, cx2, cy2)))

        if self.debug:
            self._debug_print(f"🔧 {debug_title_prefix} 返回带坐标的 ROI 共 {len(outputs)} 个")

        return outputs

    def _make_color_masks(self, hsv_image: np.ndarray, color_name: str) -> np.ndarray:
        specs = self.COLOR_RANGES[color_name]
        masks = []
        for (h1, s1, v1, h2, s2, v2) in specs:
            lower = np.array([h1, s1, v1], dtype=np.uint8)
            upper = np.array([h2, s2, v2], dtype=np.uint8)
            m = cv2.inRange(hsv_image, lower, upper)
            masks.append(m)
        mask = masks[0]
        for m in masks[1:]:
            mask = cv2.bitwise_or(mask, m)
        k = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
        mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, k, iterations=1)
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, k, iterations=1)
        return mask

    def _predict_color_by_hsv(self, image_bgr: np.ndarray) -> Tuple[str, Dict[str, int]]:
        img = cv2.resize(image_bgr, (640, 640))
        lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
        L, A, Bc = cv2.split(lab)
        L = cv2.min(L, 230)
        lab = cv2.merge([L, A, Bc])
        img = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(hsv)
        s = cv2.convertScaleAbs(s, alpha=1.2, beta=0)
        v = cv2.min(v, 245)
        hsv = cv2.merge([h, s, v])
        counts: Dict[str, int] = {}
        for cname in ["red", "orange", "yellow", "green", "blue"]:
            mask = self._make_color_masks(hsv, cname)
            counts[cname] = int(cv2.countNonZero(mask))
        best_color = max(counts.items(), key=lambda kv: kv[1])[0]
        return best_color, counts

    def hsv_cone_color_vote(self, roi_list: List[Tuple[str, np.ndarray]],
                            debug_title_prefix: str = "hsv-cone") -> Tuple[str, str]:
        cones = [img for cname, img in roi_list if cname == "cone"]
        if len(cones) == 0:
            return ("cone", "None")
        total_counts = {"red": 0, "orange": 0, "yellow": 0, "green": 0, "blue": 0}
        for idx, cone_img in enumerate(cones):
            color, counts = self._predict_color_by_hsv(cone_img)
            for k in total_counts:
                total_counts[k] += counts.get(k, 0)
            if self.debug:
                self._debug_print(f"🎨 {debug_title_prefix} ROI#{idx} 最佳={color} 计数={counts}")
        final_color = max(total_counts.items(), key=lambda kv: kv[1])[0]
        if total_counts[final_color] <= 0:
            final_color = "None"
        if self.debug:
            self._debug_print(f"🎨 {debug_title_prefix} 总计数={total_counts} -> 最终={final_color}")
        return ("cone", final_color)

    def _preprocess_for_ocr(self, img_bgr: np.ndarray) -> List[np.ndarray]:
        if img_bgr is None or img_bgr.size == 0:
            return []
        # 转换为灰度图
        gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
        
        # 基础图像增强（不使用二值化）
        h, w = gray.shape[:2]
        scale = 3.0 if max(h, w) < 120 else 2.0
        gray_up = cv2.resize(gray, (int(w * scale), int(h * scale)), interpolation=cv2.INTER_CUBIC)
        
        # 轻微的高斯模糊和锐化
        blur = cv2.GaussianBlur(gray_up, (0, 0), sigmaX=0.5)
        sharp = cv2.addWeighted(gray_up, 1.2, blur, -0.2, 0)
        
        # 直方图均衡化
        eq = cv2.equalizeHist(gray_up)
        
        # 只返回灰度图像变体，不包含二值化结果
        variants: List[np.ndarray] = [gray_up, sharp, eq]
        
        return variants

    def _extract_digit_candidate_bbox(self, img_bgr: np.ndarray) -> np.ndarray:
        """从数字 ROI 中提取最大连通域的包围框，聚焦单个数字区域。
        失败时返回原图。"""
        if img_bgr is None or img_bgr.size == 0:
            return img_bgr
        gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
        h, w = gray.shape[:2]
        if h < 10 or w < 10:
            return img_bgr
        
        # 使用自适应阈值进行二值化（仅用于轮廓检测，不用于OCR）
        _, bin1 = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        _, bin2 = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
        
        best = None
        for b in (bin1, bin2):
            cnts, _ = cv2.findContours(b, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            if not cnts:
                continue
            cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
            for c in cnts:
                area = cv2.contourArea(c)
                if area <= 0:
                    continue
                ratio = area / float(h * w)
                if ratio < 0.01 or ratio > 0.6:
                    continue
                x, y, ww, hh = cv2.boundingRect(c)
                ar = ww / float(max(1, hh))
                if ar < 0.2 or ar > 1.2:
                    continue
                pad_x = int(0.1 * ww)
                pad_y = int(0.1 * hh)
                x1 = max(0, x - pad_x)
                y1 = max(0, y - pad_y)
                x2 = min(w, x + ww + pad_x)
                y2 = min(h, y + hh + pad_y)
                best = img_bgr[y1:y2, x1:x2]
                break
            if best is not None:
                break
        return best if best is not None and best.size > 0 else img_bgr

    def recognize_number_with_ocr(self, roi_list: List[Tuple[str, np.ndarray]],
                                  debug_title_prefix: str = "ocr-number") -> Tuple[str, str]:
        if not self.ocr_available:
            if self.debug:
                self._debug_print("⚠️ OCR 不可用，跳过数字OCR")
            return ("number", "None")

        number_rois = [img for cname, img in roi_list if cname == "number"]
        if len(number_rois) == 0:
            return ("number", "None")

        best_digit = None
        best_conf = -1.0
        best_raw = ""
        vote_counts: Dict[str, int] = {str(i): 0 for i in range(1, 7)}
        vote_conf_sum: Dict[str, float] = {str(i): 0.0 for i in range(1, 7)}

        allowed_digits = set(list("123456"))

        for idx, num_img in enumerate(number_rois):
            refined = self._extract_digit_candidate_bbox(num_img)
            candidates = [refined, num_img] if refined is not None else [num_img]
            preps = []
            for cand in candidates:
                preps.extend(self._preprocess_for_ocr(cand))
            for v_i, prep in enumerate(preps):
                try:
                    ocr_res = self.cnocr.ocr(prep) if self.cnocr is not None else []
                except Exception as e:
                    ocr_res = []
                    if self.debug:
                        self._debug_print(f"CnOCR 失败 V{v_i}: {e}")
                if not isinstance(ocr_res, list):
                    ocr_res = []
                for item in ocr_res:
                    text = ''
                    score = 0.0
                    if isinstance(item, dict):
                        text = str(item.get('text', '')).strip()
                        score = float(item.get('score', 0.0)) * 100.0
                    elif isinstance(item, (list, tuple)) and len(item) >= 2:
                        text = str(item[0]).strip()
                        try:
                            score = float(item[1]) * 100.0
                        except Exception:
                            score = 0.0
                    if not text:
                        continue
                    text_digits = ''.join(ch for ch in text if ch in allowed_digits)
                    if len(text_digits) != 1:
                        continue
                    d = text_digits
                    vote_counts[d] += 1
                    vote_conf_sum[d] += max(0.0, score)
                    if score >= 85.0:
                        return ("number", self.DIGIT_TO_NAME[d])
                    if score > best_conf:
                        best_conf = score
                        best_digit = d
                        best_raw = text
                if self.debug:
                    self._debug_print(f"🔎 {debug_title_prefix} ROI#{idx}/V{v_i} -> best_digit={best_digit} conf={best_conf:.1f}")
        voted_digit = None
        if sum(vote_counts.values()) > 0:
            voted_digit = max(vote_counts.items(), key=lambda kv: (kv[1], vote_conf_sum.get(kv[0], 0.0)))[0]
        if voted_digit is not None and vote_counts[voted_digit] > 0:
            return ("number", self.DIGIT_TO_NAME[voted_digit])
        if best_digit is None:
            return ("number", "None")
        name = self.DIGIT_TO_NAME[best_digit]
        if self.debug:
            self._debug_print(f"✅ {debug_title_prefix} 结果: {name} (digit={best_digit}, conf={best_conf:.1f}, raw='{best_raw}')")
        return ("number", name)

    def finalize_results(self,
                         first_stage_results: List[Any],
                         detections: List[Tuple[str, Tuple[float, List[float]], bool]],
                         ocr_number_result: Tuple[str, str],
                         hsv_cone_result: Tuple[str, str],
                         conf: float = 0.6) -> List[Tuple[str, Union[str, float], Union[str, float]]]:
        from collections.abc import Iterable
        def _best_by_group_from_result(res_obj, allowed_names: set):
            boxes = getattr(res_obj, "boxes", None)
            if boxes is None or len(boxes) == 0:
                return None
            id2name = {k: v.lower() for k, v in getattr(res_obj, "names", {}).items()}
            best = None
            for i in range(len(boxes)):
                cls_id = int(boxes.cls[i].item())
                name = id2name.get(cls_id, str(cls_id))
                if name in allowed_names:
                    score = float(boxes.conf[i].item())
                    if best is None or score > best[1]:
                        best = (name, score)
            return best
        def _best_from_first_stage(results, allowed_names: set):
            best = None
            if results is None or len(results) == 0:
                return None
            res_list = results if isinstance(results, Iterable) else [results]
            for r in res_list:
                cand = _best_by_group_from_result(r, allowed_names)
                if cand is not None and (best is None or cand[1] > best[1]):
                    best = cand
            return best
        final_list: List[Tuple[str, Union[str, float], Union[str, float]]] = []
        cone_color = hsv_cone_result[1] if isinstance(hsv_cone_result, tuple) else "None"
        if cone_color == "None":
            best_cone_1 = _best_from_first_stage(first_stage_results, self.COLOR_CLASSES)
            if best_cone_1 is not None and best_cone_1[1] >= conf:
                final_list.append(("cone", best_cone_1[0], best_cone_1[1]))
            else:
                final_list.append(("cone", "None", "HSV"))
        else:
            final_list.append(("cone", cone_color, "HSV"))
        number_name = ocr_number_result[1] if isinstance(ocr_number_result, tuple) else "None"
        if number_name == "None":
            best_num_1 = _best_from_first_stage(first_stage_results, self.NUMBER_CLASSES)
            if best_num_1 is not None and best_num_1[1] >= conf:
                final_list.append(("number", best_num_1[0], best_num_1[1]))
            else:
                final_list.append(("number", "None", 0.0))
        else:
            final_list.append(("number", number_name, "OCR"))
        best_dash_1 = _best_from_first_stage(first_stage_results, self.STATE_CLASSES)
        if best_dash_1 is not None and best_dash_1[1] >= conf:
            final_list.append(("dashboard", best_dash_1[0], best_dash_1[1]))
        else:
            final_list.append(("dashboard", "None", 0.0))
        print({"final_results": final_list})
        return final_list

    def process_image(self,
                      image: np.ndarray,
                      top_cut_ratio: float = 0.25,
                      width_center_ratio: float = 0.5,
                      first_stage_conf: float = 0.6,
                      final_conf: float = 0.6) -> Dict[str, Any]:
        if image is None or image.size == 0:
            raise ValueError("Input image is empty")
        try:
            cropped = self.crop_image(image, top_cut_ratio, width_center_ratio)
            first_stage_results = self.yolo_first_stage(cropped, conf=first_stage_conf, imgsz=640)
            detections = self.analyze_first_stage_results(first_stage_results, conf=first_stage_conf)
            rois_with_boxes = self.build_adaptive_rois_with_boxes(detections, cropped)
            roi_list = [(cls_name, roi_img) for (cls_name, roi_img, _bbox) in rois_with_boxes]
            ocr_number_result = self.recognize_number_with_ocr(roi_list)
            hsv_cone_result = self.hsv_cone_color_vote(roi_list)
            final_results = self.finalize_results(
                first_stage_results,
                detections,
                ocr_number_result,
                hsv_cone_result,
                conf=final_conf,
            )
            cone_color = "None"
            number = "None"
            dashboard_state = "None"
            for class_name, detected_name, _confidence in final_results:
                if class_name == "cone":
                    cone_color = detected_name
                elif class_name == "number":
                    number = detected_name
                elif class_name == "dashboard":
                    dashboard_state = detected_name
            return {
                'final_results': final_results,
                'cone_color': cone_color,
                'number': number,
                'dashboard_state': dashboard_state,
                'processing_info': {
                    'image_shape': image.shape,
                    'cropped_shape': cropped.shape,
                    'num_detections_stage1': len(detections),
                    'num_rois': len(roi_list),
                    'roi_types': [roi_type for roi_type, _ in roi_list],
                    'roi_boxes': [
                        {'type': cls_name, 'bbox': [int(bx1), int(by1), int(bx2), int(by2)]}
                        for (cls_name, _roi_img, (bx1, by1, bx2, by2)) in rois_with_boxes
                    ],
                    'crop_offsets': {
                        'top': int(self._last_crop_offsets.get('top', 0)),
                        'left': int(self._last_crop_offsets.get('left', 0)),
                    },
                    'ocr_available': self.ocr_available,
                }
            }
        except Exception as e:
            err = f"管道处理中出错: {e}"
            self._debug_print(err)
            return {
                'final_results': [],
                'cone_color': "None",
                'number': "None",
                'dashboard_state': "None",
                'error': err,
                'processing_info': {}
            }

def create_ocr_hsv_pipeline(model_path: str,
                            device: Optional[str] = None,
                            debug: bool = True,
                            tesseract_cmd: Optional[str] = None) -> YoloOcrHsvPipeline:
    return YoloOcrHsvPipeline(model_path=model_path, device=device, debug=debug, tesseract_cmd=tesseract_cmd)

if __name__ == "__main__":
    candidate_models = [
        r".\weights\all_yolo_all_epochs_0811\best.pt",
        r".\weights\all_yolo_all_epochs_0808\best.pt",
        r".\weights\all_yolo_all_epochs_0805\best.pt",
        r".\weights\all_yolo_all_epochs_0804\best.pt",
    ]
    model_path = None
    for path in candidate_models:
        if os.path.exists(path):
            model_path = path
            break
    if model_path is None:
        print("❌ Model weights not found")
        sys.exit(1)

    candidate_videos = [
        r".\video_2025-08-10_13-17-30.avi",
        r".\video_2025-08-10_13-16-08.avi",
        r".\video_2025-08-03_22-31-55.avi",
    ]
    video_source = None
    for video_path in candidate_videos:
        if os.path.exists(video_path):
            video_source = video_path
            break
    if video_source is None:
        print("❌ Video source not found")
        sys.exit(1)

    tesseract_path = None

    pipeline = create_ocr_hsv_pipeline(model_path=model_path, debug=True, tesseract_cmd=tesseract_path)

    cap = cv2.VideoCapture(video_source)
    if not cap.isOpened():
        print(f"❌ 无法打开视频源: {video_source}")
        sys.exit(1)

    frame_count = 0
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        
        frame_count += 1
        print(f"--- 正在处理第 {frame_count} 帧 ---")

        res = pipeline.process_image(frame)
        
        print("🎉 处理完成!")
        print(f"锥筒颜色: {res['cone_color']}")
        print(f"数字: {res['number']}")
        print(f"仪表盘状态: {res['dashboard_state']}")
        print("\n")

        if frame_count >= 10:
            break

    cap.release()
    print("视频处理结束。")