# -*- coding: utf-8 -*-
# vis_video.py
"""
可视化模块：在视频帧上叠加中心带、检测框、当前区域状态（可清/禁清）等 HUD 信息。
使用示例见文件末尾 CLI。

依赖：opencv-python（必须），若要在线推理需安装 ultralytics 并提供 YOLOv8 权重。
"""
from __future__ import annotations
import argparse
import time
from typing import Dict, List, Tuple, Optional
from collections import deque, Counter

import cv2
import numpy as np


class CenterBandVoter:
    """中心带类别投票器，用于五帧投票机制"""

    def __init__(self, window_size: int = 5, min_confidence: float = 0.5):
        self.window_size = window_size
        self.min_confidence = min_confidence
        self.detections_history = deque(maxlen=window_size)

    def add_detections(self, dets: List[Tuple[int, float, float, float, float, float]], x_band: Tuple[int, int]):
        """添加当前帧的检测结果"""
        band_y1, band_y2 = x_band

        # 找出当前帧与中心带有重叠的部件类别
        current_frame_classes = []

        for cls, conf, bx1, by1, bx2, by2 in dets:
            # 只考虑置信度足够高的检测
            if conf < self.min_confidence:
                continue

            # 优先检查中心带是否完全落入该检测框
            if by1 <= band_y1 and band_y2 <= by2:
                # 完全包含，给予最高权重
                current_frame_classes.append((cls, conf, 1.0))
            else:
                # 计算检测框与中心带的重叠区域
                overlap_y1 = max(band_y1, by1)
                overlap_y2 = min(band_y2, by2)

                # 如果有重叠区域
                if overlap_y1 < overlap_y2:
                    overlap_height = overlap_y2 - overlap_y1
                    box_height = by2 - by1

                    # 计算重叠比例
                    if box_height > 0:
                        overlap_ratio = overlap_height / box_height
                        # 如果重叠比例超过30%，则认为该检测框与中心带相关·
                        if overlap_ratio > 0.3:
                            current_frame_classes.append((cls, conf, overlap_ratio))

        self.detections_history.append(current_frame_classes)

    def vote(self) -> Optional[int]:
        """进行投票判断中心带所在类别"""
        if len(self.detections_history) == 0:
            return None

        # 统计所有帧中出现的类别，加权计分
        class_scores = {}
        for frame_classes in self.detections_history:
            for cls, conf, overlap_ratio in frame_classes:
                if cls not in class_scores:
                    class_scores[cls] = 0
                # 根据置信度和重叠程度加权
                class_scores[cls] += conf * overlap_ratio

        if not class_scores:
            return None

        # 找出得分最高的类别
        best_class = max(class_scores, key=class_scores.get)

        # 只有当得分超过一定阈值时才认为有效
        if class_scores[best_class] >= 1.5:  # 阈值可根据需要调整
            return best_class

        return None

    def get_detailed_info(self) -> str:
        """获取详细信息用于调试"""
        if not self.detections_history:
            return "No history"

        class_scores = {}
        total_detections = 0
        for frame_classes in self.detections_history:
            for cls, conf, overlap_ratio in frame_classes:
                if cls not in class_scores:
                    class_scores[cls] = 0
                class_scores[cls] += conf * overlap_ratio
                total_detections += 1

        info = f"Total detections in window: {total_detections}, Scores: "
        for cls, score in class_scores.items():
            class_name = CLASS_NAMES.get(cls, f"Unknown({cls})")
            info += f"{class_name}: {score:.2f} "
        return info

    def get_history_info(self) -> str:
        """获取历史信息用于调试"""
        info = []
        for i, frame_classes in enumerate(self.detections_history):
            info.append(f"Frame {i}: {frame_classes}")
        return "; ".join(info)


# =============== 配色与类别映射（按你的四类：顶端=0, 片体=1, 法兰=2, 底座=3） ===============
CLASS_NAMES = {0: "tip", 1: "disk", 2: "flange", 3: "base"}
CLASS_COLORS = {
    0: (0, 170, 255),   # 顶端：橙
    1: (40, 220, 40),   # 片体：绿
    2: (255, 80, 80),   # 法兰：蓝->红
    3: (200, 100, 255), # 底座：紫
}

# =============== 绘制函数 ===============
def draw_center_band(img: np.ndarray, band_px: int = 20, alpha: float = 0.25) -> Tuple[int, int]:
    """绘制水平中心带（高 band_px，默认20），返回 (y_top, y_bottom)。"""
    h, w = img.shape[:2]
    cy = h // 2
    half = max(1, band_px // 2)
    y1, y2 = max(0, cy - half), min(h - 1, cy + half)

    overlay = img.copy()
    cv2.rectangle(overlay, (0, y1), (w - 1, y2), (0, 255, 0), thickness=-1)
    img[:] = cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0)
    cv2.line(img, (0, cy), (w - 1, cy), (0, 255, 0), 1)  # 中心线
    return y1, y2


# def draw_detections(
#     img: np.ndarray,
#     dets: List[Tuple[int, float, float, float, float, float]],
#     conf_thr: Dict[str, float] = None,
# ) -> None:
#     """
#     绘制检测框。
#     dets: [(cls, conf, x1, y1, x2, y2), ...]  像素坐标
#     conf_thr: 可选，不同类别的阈值（用于仅显示高于阈值的框）
#     """
#     for cls, conf, x1, y1, x2, y2 in dets:
#         name = CLASS_NAMES.get(cls, str(cls))
#         if conf_thr:
#             t = conf_thr.get(name, 0.0)
#             if conf < t:
#                 continue
#         color = CLASS_COLORS.get(cls, (255, 255, 255))
#         p1, p2 = (int(x1), int(y1)), (int(x2), int(y2))
#         cv2.rectangle(img, p1, p2, color, 2)
#         label = f"{name}:{conf:.2f}"
#         _put_label(img, label, p1, color)
def draw_detections(
    img: np.ndarray,
    dets: List[Tuple[int, float, float, float, float, float]],
    conf_thr: Dict[int, float] = None,
) -> None:
    """
    绘制检测框。
    dets: [(cls, conf, x1, y1, x2, y2), ...]  像素坐标
    conf_thr: 可选，不同类别的阈值（用于仅显示高于阈值的框）
    """
    for cls, conf, x1, y1, x2, y2 in dets:
        # 如果没有设置阈值，则默认为0.0
        threshold = conf_thr.get(cls, 0.0) if conf_thr else 0.0
        if conf < threshold:
            continue
        name = CLASS_NAMES.get(cls, str(cls))
        color = CLASS_COLORS.get(cls, (255, 255, 255))
        p1, p2 = (int(x1), int(y1)), (int(x2), int(y2))
        cv2.rectangle(img, p1, p2, color, 2)
        label = f"{name}:{conf:.2f}"
        _put_label(img, label, p1, color)
def _put_label(img: np.ndarray, text: str, topleft: Tuple[int, int], color=(0, 255, 0)):
    """在框上方绘制半透明标签。"""
    x, y = topleft
    (tw, th), baseline = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
    cv2.rectangle(img, (x, y - th - baseline - 6), (x + tw + 6, y), color, -1)
    cv2.putText(img, text, (x + 3, y - 4), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)


def judge_center_band_flag(
        dets: List[Tuple[int, float, float, float, float, float]],
        x_band: Tuple[int, int],
) -> int:
    """
    基于"中心带覆盖"输出当前帧 flag（1=可清，0=禁清优先）。
    规则：如果中心带完全落入某个检测框内，则根据该检测框类别判断；
         否则，如果中心带与某个检测框有显著重叠，按原逻辑判断；
         其余情况返回0。
    """
    band_y1, band_y2 = x_band

    # 首先检查是否有检测框完全包含中心带
    for cls, conf, bx1, by1, bx2, by2 in dets:
        # 检查中心带是否完全落入该检测框
        if by1 <= band_y1 and band_y2 <= by2:
            # 中心带完全落入该检测框，直接根据类别判断
            if int(cls) in (0, 2, 3):  # 禁清类
                return 0
            elif int(cls) == 1:  # 可清类（片体）
                return 1

    # 如果没有完全包含的检测框，再检查是否有显著重叠
    # 先查禁清类
    for cls, conf, bx1, by1, bx2, by2 in dets:
        # 计算检测框与中心带的重叠区域
        overlap_y1 = max(band_y1, by1)
        overlap_y2 = min(band_y2, by2)

        # 如果有重叠区域
        if overlap_y1 < overlap_y2:
            overlap_height = overlap_y2 - overlap_y1
            box_height = by2 - by1

            # 计算重叠比例
            if box_height > 0:
                overlap_ratio = overlap_height / box_height
                # 如果重叠比例超过50%，且是禁清类
                if overlap_ratio > 0.5 and int(cls) in (0, 2, 3):
                    return 0

    # 再查片体（可清）
    for cls, conf, bx1, by1, bx2, by2 in dets:
        # 计算检测框与中心带的重叠区域
        overlap_y1 = max(band_y1, by1)
        overlap_y2 = min(band_y2, by2)

        # 如果有重叠区域
        if overlap_y1 < overlap_y2:
            overlap_height = overlap_y2 - overlap_y1
            box_height = by2 - by1

            # 计算重叠比例
            if box_height > 0:
                overlap_ratio = overlap_height / box_height
                # 如果重叠比例超过50%，且是片体
                if overlap_ratio > 0.5 and int(cls) == 1:
                    return 1
    return 0


def draw_status_hud(img: np.ndarray, z_mm: float, flag: int):
    """在左上角显示当前高度 Z 和区域状态。"""
    text1 = f"Z = {z_mm:.1f} mm"
    text2 = "Region: 可清洗" if flag == 1 else "Region: 禁清"
    color = (0, 200, 0) if flag == 1 else (0, 0, 200)

    _draw_tag(img, text1, (8, 12))
    _draw_tag(img, text2, (8, 36), color=color)

def _draw_tag(img: np.ndarray, text: str, origin: Tuple[int, int], color=(50, 180, 255)):
    """小标签背景+文本。"""
    (tw, th), baseline = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)
    x, y = origin
    cv2.rectangle(img, (x - 4, y - th - baseline - 4), (x + tw + 4, y + 4), color, -1)
    cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 2, cv2.LINE_AA)

# =============== 简易在线推理（可选） ===============
def yolo_infer(frame: np.ndarray, weight_path: Optional[str]) -> List[Tuple[int, float, float, float, float, float]]:
    """若提供权重且安装 ultralytics，则跑 YOLOv8；否则返回空结果。"""
    if not weight_path:
        return []
    try:
        from ultralytics import YOLO
    except Exception:
        return []
    if not hasattr(yolo_infer, "_model"):
        yolo_infer._model = YOLO(weight_path)
    res = yolo_infer._model.predict(source=frame, verbose=False)[0]
    dets = []
    for b in res.boxes:
        cls = int(b.cls.item())
        conf = float(b.conf.item())
        x1, y1, x2, y2 = map(float, b.xyxy[0].tolist())
        dets.append((cls, conf, x1, y1, x2, y2))
    return dets

# =============== CLI：离线视频回放/在线推理演示 ===============
# def main():
#     ap = argparse.ArgumentParser(description="中心带 + 检测框 + 区域状态 可视化")
#     ap.add_argument("--video", default="video/demo2-1.mp4", help="视频路径或摄像头索引(整数)")
#     ap.add_argument("--weights", default="models/best2.pt", help="YOLOv8 权重路径（留空则不做在线推理，仅显示中心带/HUD）")
#     ap.add_argument("--band_px", type=int, default=20, help="中心带像素宽度，默认20")
#     ap.add_argument("--fps", type=float, default=25.0, help="期望显示帧率（控制播放速度）")
    ap.add_argument("--z0", type=float, default=0.0, help="模拟Z起点（mm），仅演示用")
    # ap.add_argument("--zv", type=float, default=80.0, help="模拟Z上升速度（mm/s），仅演示用")
    # args = ap.parse_args()
    #
    # # 在 main 函数中添加以下内容
    # CONF_THRESHOLDS = {
    #     0: 0.4,  # 顶端
    #     1: 0.8,  # 片体
    #     2: 0.7,  # 法兰
    #     3: 0.4  # 底座
    # }
    #
    # cap = cv2.VideoCapture(0 if args.video.isdigit() else args.video)
    # if not cap.isOpened():
    #     raise RuntimeError(f"无法打开视频源：{args.video}")
    #
    # t0 = time.time()
    # while True:
    #     ret, frame = cap.read()
    #     if not ret:
    #         break
    #
    #     # 1) 画中心带
    #     x_band = draw_center_band(frame, band_px=args.band_px)
    #
    #     # 2) 推理（或留空）
    #     dets = yolo_infer(frame, args.weights)
    #
    #     # # 3) 画检测框
    #     # draw_detections(frame, dets)
    #     # 然后修改 draw_detections 调用
    #     draw_detections(frame, dets, CONF_THRESHOLDS)
    #
    #     # 4) 判定当前区域（可清/禁清），并显示HUD（含模拟Z）
    #     t = time.time() - t0
    #     z = args.z0 + args.zv * t
    #     flag = judge_center_band_flag(dets, x_band)
    #     draw_status_hud(frame, z_mm=z, flag=flag)
    #
    #     cv2.imshow("CenterBand HUD", frame)
    #     key = cv2.waitKey(int(1000.0 / max(1.0, args.fps))) & 0xFF
    #     if key in (27, ord('q')):  # ESC/q 退出
    #         break
    #     elif key == ord('=') or key == ord('+'):
    #         args.band_px += 2
    #     elif key == ord('-') and args.band_px > 4:
    #         args.band_px -= 2
    #
    # cap.release()
    # cv2.destroyAllWindows()


def main():
    ap = argparse.ArgumentParser(description="中心带 + 检测框 + 区域状态 可视化")
    ap.add_argument("--video", default="video/demo.mp4", help="视频路径或摄像头索引(整数)")
    ap.add_argument("--weights", default="models/best2.pt", help="YOLOv8 权重路径（留空则不做在线推理，仅显示中心带/HUD）")
    ap.add_argument("--band_px", type=int, default=20, help="中心带像素宽度，默认20")
    ap.add_argument("--fps", type=float, default=25.0, help="期望显示帧率（控制播放速度）")
    ap.add_argument("--z0", type=float, default=0.0, help="模拟Z起点（mm），仅演示用")
    ap.add_argument("--zv", type=float, default=80.0, help="模拟Z上升速度（mm/s），仅演示用")
    args = ap.parse_args()

    # 在 main 函数中添加以下内容
    CONF_THRESHOLDS = {
        0: 0.4,  # 顶端
        1: 0.8,  # 片体
        2: 0.8,  # 法兰
        3: 0.4  # 底座
    }

    # 初始化投票器
    voter = CenterBandVoter(window_size=5, min_confidence=0.5)

    cap = cv2.VideoCapture(0 if args.video.isdigit() else args.video)
    if not cap.isOpened():
        raise RuntimeError(f"无法打开视频源：{args.video}")

    t0 = time.time()
    frame_count = 0
    while True:
        ret, frame = cap.read()
        if not ret:
            break

        # 1) 画中心带
        x_band = draw_center_band(frame, band_px=args.band_px)

        # 2) 推理（或留空）
        dets = yolo_infer(frame, args.weights)

        # 3) 画检测框
        draw_detections(frame, dets, CONF_THRESHOLDS)

        # 4) 添加检测结果到投票器
        voter.add_detections(dets, x_band)

        # 5) 进行投票判断
        center_class = voter.vote()

        # 6) 输出当前中心带所在类别（每5帧输出一次，避免过于频繁）
        if frame_count % 5 == 0:
            if center_class is not None:
                class_name = CLASS_NAMES.get(center_class, f"Unknown({center_class})")
                print(f"Frame {frame_count}: Center band contains {class_name} (class {center_class})")
                # 可选：输出详细信息
                # print(f"  Details: {voter.get_detailed_info()}")
            else:
                print(f"Frame {frame_count}: Center band contains no consistent object")
                # 可选：输出详细信息
                # print(f"  Details: {voter.get_detailed_info()}")

        # 7) 判定当前区域（可清/禁清），并显示HUD（含模拟Z）
        t = time.time() - t0
        z = args.z0 + args.zv * t
        flag = judge_center_band_flag(dets, x_band)
        draw_status_hud(frame, z_mm=z, flag=flag)

        cv2.imshow("CenterBand HUD", frame)
        key = cv2.waitKey(int(1000.0 / max(1.0, args.fps))) & 0xFF
        if key in (27, ord('q')):  # ESC/q 退出
            break
        elif key == ord('=') or key == ord('+'):
            args.band_px += 2
        elif key == ord('-') and args.band_px > 4:
            args.band_px -= 2

        frame_count += 1

    cap.release()
    cv2.destroyAllWindows()



if __name__ == "__main__":
    main()
