import cv2
from ultralytics import YOLO
import torch
import time
from collections import Counter
import math
import numpy as np


class YoloInspector:
    """
    一个YOLO巡检任务处理器，采用两步识别策略以提高准确性。
    - 首先对图像中心区域进行裁剪，以适应广角镜头。
    - 第一步：在裁剪后的图像上识别最主要的仪表盘（锚点）及其关联的数字。
    - 第二步：动态裁剪出仪表盘上方的区域，放大后专门用于识别颜色较小的锥形桶。
    - 在2秒的时间窗口内收集和稳定所有识别结果。
    - 根据要求格式化并输出最终结果。
    """

    def __init__(self, model_path, conf_threshold=0.5):
        """
        初始化巡检器。
        :param model_path: YOLO模型文件的路径 (.pt文件)。
        :param conf_threshold: 用于过滤检测结果的置信度阈值。
        """
        # 检查设备
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        print(f"正在使用设备: {self.device}")

        # 加载模型
        self.model = YOLO(model_path)

        # 状态管理变量
        self.is_inspecting = False
        self.inspection_start_time = None
        self.INSPECTION_WINDOW_S = 2.0
        self.CONF_THRESHOLD = conf_threshold

        # 用于存储2秒窗口内的检测结果
        self.collected_colors = []
        self.collected_numbers = []
        self.collected_states = []

        # 类别名称定义
        self.COLOR_CLASSES = ["red", "orange", "blue", "green", "yellow"]
        self.NUMBER_CLASSES = ["one", "two", "three", "four", "five", "six"]
        self.STATE_CLASSES = ["down", "normal", "up"]

        # 标签到中文的映射
        self.color_map = {"red": "红色", "orange": "橙色", "blue": "蓝色", "green": "绿色", "yellow": "黄色"}
        self.number_map = {"one": "1号", "two": "2号", "three": "3号", "four": "4号", "five": "5号", "six": "6号"}
        self.state_map = {"down": "偏低", "normal": "正常", "up": "偏高"}

    def _get_center(self, box):
        """计算边界框的中心点。"""
        x1, y1, x2, y2 = box
        return (x1 + x2) / 2, (y1 + y2) / 2

    def _get_dominant_object_info(self, boxes, class_names, target_classes):
        """
        从一类检测中找出最主要的一个（基于边界框面积），并返回其完整信息。
        """
        dominant_info = None
        max_area = -1

        if not boxes:
            return None

        for i in range(len(boxes)):
            class_id = int(boxes.cls[i].item())
            class_name = class_names.get(class_id)

            if class_name in target_classes:
                box = boxes.xyxy[i].tolist()
                area = (box[2] - box[0]) * (box[3] - box[1])
                if area > max_area:
                    max_area = area
                    dominant_info = {'class_name': class_name, 'box': box}

        return dominant_info

    def _find_closest_object(self, anchor_box, all_boxes, class_names, target_classes):
        """
        在所有检测到的物体中，找到一个离锚点最近的。
        """
        anchor_center = self._get_center(anchor_box)
        closest_object_name = None
        min_dist = float('inf')

        if not all_boxes:
            return None

        for i in range(len(all_boxes)):
            class_id = int(all_boxes.cls[i].item())
            class_name = class_names.get(class_id)

            if class_name in target_classes:
                box = all_boxes.xyxy[i].tolist()
                center = self._get_center(box)
                dist = math.dist(anchor_center, center)

                if dist < min_dist:
                    min_dist = dist
                    closest_object_name = class_name

        return closest_object_name

    def process_frame(self, frame):
        """
        处理单帧图像，执行中心裁剪和两步检测。
        """
        # 1. 中心裁剪：只取画面宽度 1/4 到 3/4 的部分
        h, w, _ = frame.shape
        x_start = w // 4
        x_end = w * 3 // 4
        cropped_frame = frame[:, x_start:x_end]

        # --- 第一步: 识别仪表盘状态和数字 ---
        results_full = self.model.predict(cropped_frame, verbose=False, conf=self.CONF_THRESHOLD)
        if not results_full or not results_full[0].boxes:
            if self.is_inspecting and (time.time() - self.inspection_start_time > self.INSPECTION_WINDOW_S):
                self._finalize_and_print_results()
            return

        boxes_full = results_full[0].boxes
        class_names_full = results_full[0].names

        # 找出最主要的仪表盘作为“锚点”
        dominant_state_info = self._get_dominant_object_info(boxes_full, class_names_full, self.STATE_CLASSES)

        if dominant_state_info is None:
            if self.is_inspecting and (time.time() - self.inspection_start_time > self.INSPECTION_WINDOW_S):
                self._finalize_and_print_results()
            return

        # 如果检测到锚点，开始或维持检测窗口
        if not self.is_inspecting:
            print("检测到目标，开始2秒钟的稳定识别...")
            self.is_inspecting = True
            self.inspection_start_time = time.time()
            self.collected_colors.clear()
            self.collected_numbers.clear()
            self.collected_states.clear()

        # 在检测窗口期间，持续收集信息
        if self.is_inspecting:
            anchor_box = dominant_state_info['box']
            state_name = dominant_state_info['class_name']

            # 基于锚点的位置，找到最近的数字
            closest_number = self._find_closest_object(anchor_box, boxes_full, class_names_full, self.NUMBER_CLASSES)

            # --- 第二步: 裁剪并放大识别锥形桶颜色 ---
            # 截取锚点（仪表盘）上方的区域
            y_top_of_anchor = int(anchor_box[1])
            color_roi = cropped_frame[0:y_top_of_anchor, :]

            closest_color = None
            if color_roi.shape[0] > 10 and color_roi.shape[1] > 10:  # 确保ROI有足够尺寸
                # 放大ROI以提高小目标检测率
                h_roi, w_roi, _ = color_roi.shape
                scale_factor = 2.0  # 放大两倍
                new_w, new_h = int(w_roi * scale_factor), int(h_roi * scale_factor)
                resized_color_roi = cv2.resize(color_roi, (new_w, new_h), interpolation=cv2.INTER_LANCZOS4)

                # 在放大的ROI上只预测颜色
                results_color = self.model.predict(resized_color_roi, verbose=False, conf=self.CONF_THRESHOLD, classes=[self.COLOR_CLASSES.index(c) for c in self.COLOR_CLASSES])
                if results_color and results_color[0].boxes:
                    boxes_color = results_color[0].boxes
                    class_names_color = results_color[0].names
                    dominant_color_info = self._get_dominant_object_info(boxes_color, class_names_color, self.COLOR_CLASSES)
                    if dominant_color_info:
                        closest_color = dominant_color_info['class_name']

            # 收集三部分结果
            self.collected_states.append(state_name)
            if closest_number:
                self.collected_numbers.append(closest_number)
            if closest_color:
                self.collected_colors.append(closest_color)

            # 检查检测窗口是否结束
            if time.time() - self.inspection_start_time > self.INSPECTION_WINDOW_S:
                self._finalize_and_print_results()

    def _finalize_and_print_results(self):
        """
        在2秒窗口结束后，汇总结果并打印。
        """
        print("2秒识别结束，正在生成最终结果...")

        try:
            if not self.collected_states:
                print("在检测窗口内未能稳定识别出任何仪表盘状态。\n")
                return

            final_state_en = Counter(self.collected_states).most_common(1)[0][0]
            final_color_en = Counter(self.collected_colors).most_common(1)[0][0] if self.collected_colors else None
            final_number_en = Counter(self.collected_numbers).most_common(1)[0][0] if self.collected_numbers else None

            final_color_zh = self.color_map.get(final_color_en, "未知颜色")
            final_number_zh = self.number_map.get(final_number_en, "未知编号")
            final_state_zh = self.state_map.get(final_state_en)

            output_parts = [f"{final_color_zh}区域", final_number_zh, f"仪表盘{final_state_zh}"]
            final_output = " ".join(filter(None, output_parts))

            print("\n--- 最终巡检结果 ---")
            print(final_output)
            print("-" * 22 + "\n")

        except Exception as e:
            print(f"处理结果时发生错误: {e}\n")

        finally:
            self.is_inspecting = False
            self.inspection_start_time = None
            self.collected_colors.clear()
            self.collected_numbers.clear()
            self.collected_states.clear()


def main():
    """
    主函数，启动摄像头并进行实时巡检。
    """
    # --- 配置 ---
    # !!!重要!!! 请将此路径更改为您自己训练好的YOLOv8模型文件的实际路径。
    model_path = r"D:\python.learn\python.project\yolo_dataset\250730\runs\0_epochs100\weights\best.pt"

    try:
        inspector = YoloInspector(model_path, conf_threshold=0.65) # 可适当调整置信度
    except Exception as e:
        print(f"错误：无法加载模型，请检查路径 '{model_path}' 是否正确。错误信息: {e}")
        return

    cap = cv2.VideoCapture(0)
    if not cap.isOpened():
        print("错误：无法打开摄像头。")
        return

    print("\n摄像头已启动。")
    print(">>> 启动两步识别模式：优先识别仪表盘和数字，再裁剪并放大识别锥形桶颜色。<<<")
    print("正在等待巡检目标... 按 Ctrl+C 退出。")

    try:
        while True:
            ret, frame = cap.read()
            if not ret:
                print("错误：无法从摄像头捕获帧。")
                break
            
            inspector.process_frame(frame)

            # 加一个微小的延时，避免CPU占用过高
            time.sleep(0.01) 

    except KeyboardInterrupt:
        print("\n检测已由用户停止。")
    finally:
        cap.release()
        cv2.destroyAllWindows()
        print("资源已释放。")


if __name__ == "__main__":
    main()