"""
YOLO + OCR + HSV 管道（优化版本）

主要优化：
1. 减少不必要的内存分配和复制
2. 优化图像预处理流程
3. 减少OCR变体数量和预处理步骤
4. 缓存和复用计算结果
5. 优化HSV颜色检测
6. 减少调试输出的开销
"""

import os
import sys
import gc
from typing import List, Tuple, Optional, Union, Dict, Any
import functools
import warnings

import cv2
import numpy as np
import torch
from ultralytics import YOLO

# 禁用不必要的警告
warnings.filterwarnings("ignore")

class OptimizedYoloOcrHsvPipeline:
    """优化版的YOLO + OCR + HSV 管道"""

    COLOR_CLASSES = {"red", "orange", "yellow", "green", "blue"}
    NUMBER_CLASSES = {"one", "two", "three", "four", "five", "six"}
    STATE_CLASSES = {"down", "normal", "up"}

    # 预编译的颜色范围（优化HSV检测）
    COLOR_RANGES = {
        "red": [np.array([[0, 20, 225], [4, 188, 255]], dtype=np.uint8)],
        "orange": [
            np.array([[0, 97, 222], [11, 155, 255]], dtype=np.uint8),
            np.array([[18, 97, 222], [179, 155, 255]], dtype=np.uint8)
        ],
        "yellow": [np.array([[23, 122, 219], [32, 193, 255]], dtype=np.uint8)],
        "green": [np.array([[37, 46, 207], [47, 185, 255]], dtype=np.uint8)],
        "blue": [np.array([[92, 171, 222], [108, 239, 255]], dtype=np.uint8)],
    }

    DIGIT_TO_NAME = {"1": "one", "2": "two", "3": "three", "4": "four", "5": "five", "6": "six"}

    def __init__(self,
                 model_path: str,
                 device: Optional[str] = None,
                 debug: bool = False,  # 默认关闭debug
                 enable_warmup: bool = True):
        
        if not os.path.exists(model_path):
            raise FileNotFoundError(f"Model weights not found: {model_path}")

        self.debug = debug
        self.device = device if device is not None else ('cuda' if torch.cuda.is_available() else 'cpu')
        
        # 加载模型时就设置为eval模式
        self.model = YOLO(model_path)
        if hasattr(self.model, 'model'):
            self.model.model.eval()
            
        if enable_warmup:
            self._warmup_model()
        
        # 初始化CnOCR（只初始化一次）
        self._init_cnocr()
        
        # 预分配常用的numpy数组和OpenCV核
        self._morph_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
        
        # 缓存上次的裁剪偏移
        self._last_crop_offsets = {"top": 0, "left": 0}
        
        if self.debug:
            print(f"🔧 优化管道初始化 - 设备: {self.device}, CnOCR: {'OK' if self.ocr_available else 'Unavailable'}")

    def _warmup_model(self):
        """模型预热，减少首次推理延迟"""
        try:
            dummy = np.full((640, 640, 3), 128, dtype=np.uint8)
            with torch.no_grad():
                _ = self.model.predict(dummy, verbose=False, conf=0.1, device=self.device, save=False, show=False)
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            if self.debug:
                print("✅ 模型预热完成")
        except Exception as e:
            if self.debug:
                print(f"⚠️ 模型预热失败: {e}")

    def _init_cnocr(self):
        """初始化CnOCR"""
        try:
            from cnocr import CnOcr
            self.cnocr = CnOcr(
                model_name='number-densenet_lite_136-fc',
                det_model_name='en_PP-OCRv3_det',
                cand_alphabet=['1', '2', '3', '4', '5', '6']
            )
            self.ocr_available = True
        except Exception:
            self.cnocr = None
            self.ocr_available = False

    @functools.lru_cache(maxsize=8)
    def _get_resize_dimensions(self, original_height: int, original_width: int, scale: float) -> Tuple[int, int]:
        """缓存的尺寸计算"""
        return int(original_width * scale), int(original_height * scale)

    def crop_image(self, image: np.ndarray,
                   top_cut_ratio: float = 0.25,
                   width_center_ratio: float = 0.5) -> np.ndarray:
        """优化的图像裁剪"""
        if image is None or image.size == 0:
            raise ValueError("Input image is empty")
            
        h, w = image.shape[:2]
        
        # 直接计算裁剪区域
        top_cut = int(h * top_cut_ratio)
        center_width = int(w * width_center_ratio)
        w_x1 = (w - center_width) // 2
        w_x2 = w_x1 + center_width
        
        # 一次性裁剪
        cropped = image[top_cut:, w_x1:w_x2]
        
        # 更新偏移缓存
        self._last_crop_offsets = {"top": top_cut, "left": w_x1}
        
        return cropped

    def yolo_first_stage(self, image_bgr: np.ndarray,
                          conf: float = 0.6,
                          imgsz: int = 640) -> List[Any]:
        """优化的YOLO推理"""
        if image_bgr is None or image_size == 0:
            raise ValueError("Input image is empty")
            
        # 确保图像连续性（只在需要时转换）
        if not image_bgr.flags['C_CONTIGUOUS']:
            image_bgr = np.ascontiguousarray(image_bgr)
            
        # 直接推理，减少上下文切换
        with torch.no_grad():
            results = self.model.predict(
                image_bgr,
                verbose=False,
                conf=conf,
                device=self.device,
                save=False,
                show=False,
                imgsz=imgsz,
            )
        
        return results

    def analyze_first_stage_results(self, results: List[Any], conf: float) -> List[Tuple[str, Tuple[float, List[float]], bool]]:
        """优化的结果分析"""
        if not results or len(results) == 0:
            return []
            
        detections = []
        res = results[0]  # 通常只有一个结果
        
        boxes = getattr(res, "boxes", None)
        if boxes is None or len(boxes) == 0:
            return detections
            
        # 预计算名称映射
        id2name = {k: v.lower() for k, v in getattr(res, "names", {}).items()}
        
        # 批量处理所有检测框
        cls_ids = boxes.cls.cpu().numpy()
        confs = boxes.conf.cpu().numpy()
        boxes_xyxy = boxes.xyxy.cpu().numpy()
        
        for i, (cls_id, prob, box) in enumerate(zip(cls_ids, confs, boxes_xyxy)):
            name = id2name.get(int(cls_id), str(int(cls_id)))
            is_confident = prob >= conf
            detections.append((name, (float(prob), box.tolist()), is_confident))
            
        return detections

    def _expand_and_clip_box(self, box: List[float], pad_ratio: float, H: int, W: int) -> Tuple[int, int, int, int]:
        """优化的边界框扩展"""
        x1, y1, x2, y2 = box
        w, h = max(1, x2 - x1), max(1, y2 - y1)
        pad_x, pad_y = w * pad_ratio, h * pad_ratio
        
        return (
            max(0, min(int(x1 - pad_x), W)),
            max(0, min(int(y1 - pad_y), H)),
            max(0, min(int(x2 + pad_x), W)),
            max(0, min(int(y2 + pad_y), H))
        )

    def build_adaptive_rois(self, detections: List[Tuple[str, Tuple[float, List[float]], bool]],
                           base_image_bgr: np.ndarray) -> List[Tuple[str, np.ndarray]]:
        """优化的自适应ROI构建"""
        if base_image_bgr is None or base_image_bgr.size == 0:
            return []
            
        H, W = base_image_bgr.shape[:2]
        
        # 快速查找最佳检测
        best_number = self._find_best_detection(detections, self.NUMBER_CLASSES)
        best_dash = self._find_best_detection(detections, self.STATE_CLASSES)
        
        if best_number is None and best_dash is None:
            return []
            
        rois = []
        
        if best_dash is not None:
            # 基于仪表盘构建ROI
            _, box_d, _ = best_dash
            x1_d, y1_d, x2_d, y2_d = [int(x) for x in box_d]
            h_d = max(1, y2_d - y1_d)
            w_d = max(1, x2_d - x1_d)
            x_center_d = (x1_d + x2_d) // 2
            
            # 数字区域
            num_y1 = max(0, y1_d - h_d)
            num_y2 = y1_d
            
            # 宽度比例
            width_ratio = min(1.0, max(0.3, w_d / W))
            half_width = int(W * width_ratio / 2)
            num_x1 = max(0, x_center_d - half_width)
            num_x2 = min(W, x_center_d + half_width)
            
            if num_y2 > num_y1 and num_x2 > num_x1:
                num_roi = base_image_bgr[num_y1:num_y2, num_x1:num_x2]
                if num_roi.size > 0:
                    rois.append(("number", num_roi))
                    
                    # 锥筒区域（数字区域的上半部分）
                    cone_h = max(1, num_roi.shape[0] // 2)
                    cone_roi = num_roi[:cone_h, :]
                    if cone_roi.size > 0:
                        rois.append(("cone", cone_roi))
        
        elif best_number is not None:
            # 基于数字检测构建ROI
            _, box_n, _ = best_number
            nx1, ny1, nx2, ny2 = self._expand_and_clip_box(box_n, 0.15, H, W)
            
            number_roi = base_image_bgr[ny1:ny2, nx1:nx2]
            if number_roi.size > 0:
                rois.append(("number", number_roi))
                
                # 锥筒区域
                cone_h = max(1, number_roi.shape[0] // 2)
                cone_roi = number_roi[:cone_h, :]
                if cone_roi.size > 0:
                    rois.append(("cone", cone_roi))
        
        return rois

    def _find_best_detection(self, detections: List[Tuple[str, Tuple[float, List[float]], bool]], 
                           allowed_names: set) -> Optional[Tuple[float, List[float], str]]:
        """快速查找最佳检测结果"""
        best = None
        for name, (prob, box), _ in detections:
            if name in allowed_names:
                if best is None or prob > best[0]:
                    best = (prob, box, name)
        return best

    def _make_color_masks_optimized(self, hsv_image: np.ndarray, color_name: str) -> np.ndarray:
        """优化的颜色掩膜生成"""
        ranges = self.COLOR_RANGES[color_name]
        
        if len(ranges) == 1:
            # 单个范围，直接使用inRange
            lower, upper = ranges[0][0], ranges[0][1]
            mask = cv2.inRange(hsv_image, lower, upper)
        else:
            # 多个范围，合并
            mask = None
            for range_arr in ranges:
                lower, upper = range_arr[0], range_arr[1]
                temp_mask = cv2.inRange(hsv_image, lower, upper)
                if mask is None:
                    mask = temp_mask
                else:
                    mask = cv2.bitwise_or(mask, temp_mask)
        
        # 形态学操作（使用预分配的核）
        mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, self._morph_kernel, iterations=1)
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, self._morph_kernel, iterations=1)
        
        return mask

    def _predict_color_by_hsv_optimized(self, image_bgr: np.ndarray) -> Tuple[str, Dict[str, int]]:
        """优化的HSV颜色预测"""
        # 调整resize目标尺寸，减少计算量
        img = cv2.resize(image_bgr, (320, 320))  # 从640减少到320
        
        # 简化LAB处理
        lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
        L, A, Bc = cv2.split(lab)
        L = np.minimum(L, 230)  # 使用numpy函数
        lab = cv2.merge([L, A, Bc])
        img = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
        
        # HSV转换和增强
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(hsv)
        s = cv2.convertScaleAbs(s, alpha=1.2, beta=0)
        v = np.minimum(v, 245)
        hsv = cv2.merge([h, s, v])
        
        # 批量处理所有颜色
        counts = {}
        for color_name in ["red", "orange", "yellow", "green", "blue"]:
            mask = self._make_color_masks_optimized(hsv, color_name)
            counts[color_name] = int(cv2.countNonZero(mask))
        
        best_color = max(counts.items(), key=lambda x: x[1])[0]
        return best_color, counts

    def hsv_cone_color_vote(self, roi_list: List[Tuple[str, np.ndarray]]) -> Tuple[str, str]:
        """优化的HSV颜色投票"""
        cones = [img for cname, img in roi_list if cname == "cone"]
        if not cones:
            return ("cone", "None")
        
        # 使用字典推导式初始化
        total_counts = {color: 0 for color in ["red", "orange", "yellow", "green", "blue"]}
        
        for cone_img in cones:
            _, counts = self._predict_color_by_hsv_optimized(cone_img)
            for color in total_counts:
                total_counts[color] += counts.get(color, 0)
        
        final_color = max(total_counts.items(), key=lambda x: x[1])[0]
        if total_counts[final_color] <= 0:
            final_color = "None"
        
        return ("cone", final_color)

    def _preprocess_for_ocr_optimized(self, img_bgr: np.ndarray) -> List[np.ndarray]:
        """优化的OCR预处理 - 减少变体数量"""
        if img_bgr is None or img_bgr.size == 0:
            return []
        
        gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
        h, w = gray.shape[:2]
        
        # 只生成2个变体而不是3个
        scale = 2.5 if max(h, w) < 100 else 2.0
        new_w, new_h = self._get_resize_dimensions(h, w, scale)
        gray_up = cv2.resize(gray, (new_w, new_h), interpolation=cv2.INTER_CUBIC)
        
        # 只保留最有效的预处理
        sharp = cv2.addWeighted(gray_up, 1.1, cv2.GaussianBlur(gray_up, (0, 0), 0.5), -0.1, 0)
        
        return [gray_up, sharp]  # 只返回2个变体

    def recognize_number_with_ocr_optimized(self, roi_list: List[Tuple[str, np.ndarray]]) -> Tuple[str, str]:
        """优化的OCR数字识别"""
        if not self.ocr_available:
            return ("number", "None")

        number_rois = [img for cname, img in roi_list if cname == "number"]
        if not number_rois:
            return ("number", "None")

        # 简化投票机制
        digit_scores = {str(i): [] for i in range(1, 7)}
        allowed_digits = set("123456")

        for num_img in number_rois:
            # 减少候选图像数量
            refined = self._extract_digit_candidate_bbox_optimized(num_img)
            candidates = [refined] if refined is not None else [num_img]
            
            for cand in candidates:
                preps = self._preprocess_for_ocr_optimized(cand)
                
                for prep in preps:
                    try:
                        ocr_res = self.cnocr.ocr(prep) if self.cnocr is not None else []
                    except Exception:
                        continue
                    
                    if not isinstance(ocr_res, list):
                        continue
                    
                    for item in ocr_res:
                        text, score = self._extract_ocr_result(item)
                        if not text:
                            continue
                            
                        text_digits = ''.join(ch for ch in text if ch in allowed_digits)
                        if len(text_digits) == 1:
                            digit = text_digits
                            digit_scores[digit].append(max(0.0, score))
                            
                            # 早期返回高置信度结果
                            if score >= 90.0:
                                return ("number", self.DIGIT_TO_NAME[digit])

        # 选择最佳数字
        best_digit = None
        best_avg_score = -1
        
        for digit, scores in digit_scores.items():
            if scores:
                avg_score = sum(scores) / len(scores)
                if avg_score > best_avg_score:
                    best_avg_score = avg_score
                    best_digit = digit

        if best_digit is not None:
            return ("number", self.DIGIT_TO_NAME[best_digit])
        
        return ("number", "None")

    def _extract_ocr_result(self, item) -> Tuple[str, float]:
        """提取OCR结果的文本和分数"""
        text = ''
        score = 0.0
        
        if isinstance(item, dict):
            text = str(item.get('text', '')).strip()
            score = float(item.get('score', 0.0)) * 100.0
        elif isinstance(item, (list, tuple)) and len(item) >= 2:
            text = str(item[0]).strip()
            try:
                score = float(item[1]) * 100.0
            except Exception:
                score = 0.0
                
        return text, score

    def _extract_digit_candidate_bbox_optimized(self, img_bgr: np.ndarray) -> Optional[np.ndarray]:
        """优化的数字候选框提取"""
        if img_bgr is None or img_bgr.size == 0:
            return None
            
        gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
        h, w = gray.shape[:2]
        
        if h < 10 or w < 10:
            return img_bgr
        
        # 使用单一阈值化方法
        _, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        
        contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        if not contours:
            return img_bgr
        
        # 找到最大的合理轮廓
        best_contour = None
        max_area = 0
        
        for contour in contours:
            area = cv2.contourArea(contour)
            if area <= 0:
                continue
                
            ratio = area / (h * w)
            if 0.01 <= ratio <= 0.6:
                x, y, ww, hh = cv2.boundingRect(contour)
                aspect_ratio = ww / max(1, hh)
                if 0.2 <= aspect_ratio <= 1.2 and area > max_area:
                    max_area = area
                    best_contour = contour
        
        if best_contour is not None:
            x, y, ww, hh = cv2.boundingRect(best_contour)
            pad_x = max(1, int(0.1 * ww))
            pad_y = max(1, int(0.1 * hh))
            x1 = max(0, x - pad_x)
            y1 = max(0, y - pad_y)
            x2 = min(w, x + ww + pad_x)
            y2 = min(h, y + hh + pad_y)
            
            result = img_bgr[y1:y2, x1:x2]
            return result if result.size > 0 else img_bgr
        
        return img_bgr

    def finalize_results(self, first_stage_results: List[Any], detections: List[Tuple[str, Tuple[float, List[float]], bool]],
                        ocr_number_result: Tuple[str, str], hsv_cone_result: Tuple[str, str], conf: float = 0.6) -> List[Tuple[str, Union[str, float], Union[str, float]]]:
        """优化的结果合并"""
        final_list = []
        
        # 处理锥筒颜色
        cone_color = hsv_cone_result[1] if isinstance(hsv_cone_result, tuple) else "None"
        if cone_color == "None":
            best_cone = self._find_best_from_results(first_stage_results, self.COLOR_CLASSES, conf)
            if best_cone:
                final_list.append(("cone", best_cone[0], best_cone[1]))
            else:
                final_list.append(("cone", "None", "HSV"))
        else:
            final_list.append(("cone", cone_color, "HSV"))
        
        # 处理数字
        number_name = ocr_number_result[1] if isinstance(ocr_number_result, tuple) else "None"
        if number_name == "None":
            best_num = self._find_best_from_results(first_stage_results, self.NUMBER_CLASSES, conf)
            if best_num:
                final_list.append(("number", best_num[0], best_num[1]))
            else:
                final_list.append(("number", "None", 0.0))
        else:
            final_list.append(("number", number_name, "OCR"))
        
        # 处理仪表盘
        best_dash = self._find_best_from_results(first_stage_results, self.STATE_CLASSES, conf)
        if best_dash:
            final_list.append(("dashboard", best_dash[0], best_dash[1]))
        else:
            final_list.append(("dashboard", "None", 0.0))
        
        return final_list

    def _find_best_from_results(self, results: List[Any], allowed_names: set, conf: float) -> Optional[Tuple[str, float]]:
        """从结果中找到最佳检测"""
        if not results:
            return None
            
        best = None
        for res in results:
            boxes = getattr(res, "boxes", None)
            if boxes is None or len(boxes) == 0:
                continue
                
            id2name = {k: v.lower() for k, v in getattr(res, "names", {}).items()}
            cls_ids = boxes.cls.cpu().numpy()
            confs = boxes.conf.cpu().numpy()
            
            for cls_id, score in zip(cls_ids, confs):
                name = id2name.get(int(cls_id), str(int(cls_id)))
                if name in allowed_names and score >= conf:
                    if best is None or score > best[1]:
                        best = (name, float(score))
        
        return best

    def process_image(self, image: np.ndarray,
                      top_cut_ratio: float = 0.25,
                      width_center_ratio: float = 0.5,
                      first_stage_conf: float = 0.6,
                      final_conf: float = 0.6) -> Dict[str, Any]:
        """优化的图像处理主函数"""
        if image is None or image.size == 0:
            raise ValueError("Input image is empty")
            
        try:
            # 图像预处理
            cropped = self.crop_image(image, top_cut_ratio, width_center_ratio)
            
            # YOLO检测
            first_stage_results = self.yolo_first_stage(cropped, conf=first_stage_conf, imgsz=640)
            detections = self.analyze_first_stage_results(first_stage_results, conf=first_stage_conf)
            
            # 构建ROI
            roi_list = self.build_adaptive_rois(detections, cropped)
            
            # 并行处理OCR和HSV（如果有多个线程）
            ocr_result = self.recognize_number_with_ocr_optimized(roi_list)
            hsv_result = self.hsv_cone_color_vote(roi_list)
            
            # 合并结果
            final_results = self.finalize_results(
                first_stage_results, detections, ocr_result, hsv_result, conf=final_conf
            )
            
            # 提取最终结果
            cone_color = "None"
            number = "None"  
            dashboard_state = "None"
            
            for class_name, detected_name, _confidence in final_results:
                if class_name == "cone":
                    cone_color = detected_name
                elif class_name == "number":
                    number = detected_name
                elif class_name == "dashboard":
                    dashboard_state = detected_name
            
            return {
                'final_results': final_results,
                'cone_color': cone_color,
                'number': number,
                'dashboard_state': dashboard_state,
                'processing_info': {
                    'image_shape': image.shape,
                    'cropped_shape': cropped.shape,
                    'num_detections': len(detections),
                    'num_rois': len(roi_list),
                    'crop_offsets': dict(self._last_crop_offsets),
                    'ocr_available': self.ocr_available,
                } if self.debug else {}
            }
            
        except Exception as e:
            return {
                'final_results': [],
                'cone_color': "None",
                'number': "None", 
                'dashboard_state': "None",
                'error': str(e),
                'processing_info': {}
            }


def create_optimized_pipeline(model_path: str, device: Optional[str] = None, debug: bool = False) -> OptimizedYoloOcrHsvPipeline:
    """创建优化管道的工厂函数"""
    return OptimizedYoloOcrHsvPipeline(model_path=model_path, device=device, debug=debug)


if __name__ == "__main__":
    # 测试代码
    candidate_models = [
        r".\weights\all_yolo_all_epochs_0811\best.pt",
        r".\weights\all_yolo_all_epochs_0808\best.pt", 
        r".\weights\all_yolo_all_epochs_0805\best.pt",
        r".\weights\all_yolo_all_epochs_0804\best.pt",
    ]
    
    model_path = None
    for path in candidate_models:
        if os.path.exists(path):
            model_path = path
            break
    
    if model_path is None:
        print("❌ Model weights not found")
        sys.exit(1)

    candidate_videos = [
        r".\video_2025-08-10_13-17-30.avi",
        r".\video_2025-08-10_13-16-08.avi", 
        r".\video_2025-08-03_22-31-55.avi",
    ]
    
    video_source = None
    for video_path in candidate_videos:
        if os.path.exists(video_path):
            video_source = video_path
            break
            
    if video_source is None:
        print("❌ Video source not found")
        sys.exit(1)

    # 创建优化管道
    pipeline = create_optimized_pipeline(model_path=model_path, debug=False)  # 关闭debug加速
    
    cap = cv2.VideoCapture(video_source)
    if not cap.isOpened():
        print(f"❌ 无法打开视频源: {video_source}")
        sys.exit(1)

    print("🚀 开始处理视频（优化版本）...")
    
    import time
    frame_count = 0
    total_time = 0
    
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        
        frame_count += 1
        start_time = time.time()
        
        # 处理帧
        result = pipeline.process_image(frame)
        
        end_time = time.time()
        frame_time = end_time - start_time
        total_time += frame_time
        
        print(f"帧 {frame_count}: {result['cone_color']}, {result['number']}, {result['dashboard_state']} ({frame_time:.3f}s)")
        
        if frame_count >= 20:  # 增加测试帧数
            break

    cap.release()
    
    if frame_count > 0:
        avg_fps = frame_count / total_time
        avg_time_per_frame = total_time / frame_count
        print(f"\n📊 性能统计:")
        print(f"总帧数: {frame_count}")
        print(f"总时间: {total_time:.2f}s")
        print(f"平均FPS: {avg_fps:.2f}")
        print(f"平均每帧时间: {avg_time_per_frame:.3f}s")
    
    print("✅ 优化版本处理完成！")