# video_inference_4class_complete.py
import cv2
import numpy as np
from openvino.runtime import Core
import os
import time
import warnings
import logging
import subprocess
import tempfile
import shutil
import threading
from queue import Queue, Empty
from ultralytics import YOLO

# -----------------------------
# 配置路径（请根据你的实际路径修改！）
# -----------------------------
YOLOV8_MODEL_PATH = r"models/yolov8n-seg.pt"  # YOLOv8n-seg模型路径
ROAD_MODEL_PATH = r"D:\CodeCNN\intel\road-segmentation-adas-0001\FP32\road-segmentation-adas-0001.xml"
INPUT_FOLDER = r"H:\xiaomi\test"  # 输入图片文件夹
OUTPUT_FOLDER = r"detect"  # 输出结果文件夹
VIOLATION_FOLDER = r"violations"  # 压线违规图片文件夹
MASK_FOLDER = r"masks"  # 车辆掩码图片文件夹
DEBUG_FOLDER = r"debug"  # 调试图片文件夹
ORIGINAL_FOLDER = r"original"  # 原始图片文件夹
VIDEO_INPUT = r"H:\xiaomi\test\video\192.168.1.64_01_20251027093805446.MP4"  # 视频文件路径

# RTSP相机配置
RTSP_URLS = [
    "rtsp://admin:wkkj1234@192.168.200.74:554/Steaming/Channels/1",  # 示例RTSP URL，根据实际修改
]

# 创建输出目录
os.makedirs(OUTPUT_FOLDER, exist_ok=True)
os.makedirs(VIOLATION_FOLDER, exist_ok=True)
os.makedirs(MASK_FOLDER, exist_ok=True)
os.makedirs(DEBUG_FOLDER, exist_ok=True)
os.makedirs(ORIGINAL_FOLDER, exist_ok=True)  # 创建原始图片文件夹

# -----------------------------
# 配置参数
# -----------------------------
DETECTION_INTERVAL = 3  # 视频检测间隔帧数
PREPROCESS_METHOD = 'adaptive'  # 预处理方法
CONFIDENCE_THRESHOLD = 0.01  # 压线检测阈值
SIDE_MARGIN_RATIO = 0.15  # 侧边边界比例
YOLO_CONFIDENCE = 0.3  # YOLO检测置信度阈值
YOLO_IOU = 0.5  # YOLO IOU阈值
SAVE_MASKS = True  # 是否保存车辆掩码图片
SAVE_DEBUG = True  # 是否保存调试图片
SAVE_ORIGINAL = True  # 是否保存原始图片（仅在检测到车辆时）
MASK_ALPHA = 0.5  # 掩码透明度（提高透明度使掩码更明显）
BOTTOM_HEIGHT_RATIO = 0.3  # 车辆底部检测区域高度比例
RTSP_TIMEOUT = 10  # RTSP连接超时时间（秒）
RTSP_RECONNECT_INTERVAL = 5  # RTSP重连间隔（秒）

# 设置环境变量
os.environ['GLOG_minloglevel'] = '2'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
warnings.filterwarnings('ignore')

# -----------------------------
# 初始化 OpenVINO
# -----------------------------
core = Core()
device = "CPU"  # 可选 "GPU"（若支持）

# -----------------------------
# 1. 加载YOLOv8n-seg实例分割模型
# -----------------------------
print("Loading YOLOv8n-seg instance segmentation model...")
try:
    yolo_model = YOLO(YOLOV8_MODEL_PATH)
    print("✅ YOLOv8n-seg模型加载成功")
except Exception as e:
    print(f"❌ YOLOv8n-seg模型加载失败: {e}")
    print("请确保已安装ultralytics: pip install ultralytics")
    exit(1)

# -----------------------------
# 2. 加载道路分割模型（4类）
# -----------------------------
print("Loading 4-class road segmentation model...")
try:
    road_model = core.read_model(model=ROAD_MODEL_PATH)
    road_compiled = core.compile_model(model=road_model, device_name=device)
    road_input_layer = road_compiled.input(0)
    road_output_layer = road_compiled.output(0)

    # 输出形状应为 [1, 4, H, W]
    _, num_classes, h_seg, w_seg = road_output_layer.shape
    print(f"Road segmentation output classes: {num_classes} (should be 4)")
    if num_classes != 4:
        print("⚠️ 模型输出不是4类！请确认是否使用了多分类版本")
        exit(1)

    print(f"Road segmentation input shape: {h_seg}x{w_seg}")
except Exception as e:
    print(f"❌ 道路分割模型加载失败: {e}")
    exit(1)

# -----------------------------
# 3. 定义 4 类颜色映射（BGR 格式）
# -----------------------------
COLOR_MAP = {
    0: None,  # 背景 - 透明
    1: (0, 255, 0),  # green - 道路
    2: (255, 0, 0),  # blue - 路缘
    3: (0, 0, 255),  # red - 道路标记
}

# -----------------------------
# 4. 车辆类别定义和颜色映射
# -----------------------------
VEHICLE_CLASSES = {
    2: 'car',  # 汽车
    3: 'motorcycle',  # 摩托车
    5: 'bus',  # 公交车
    7: 'truck',  # 卡车
}

VEHICLE_COLORS = {
    2: (0, 255, 0),  # 绿色 - 汽车
    3: (255, 255, 0),  # 青色 - 摩托车
    5: (255, 0, 255),  # 紫色 - 公交车
    7: (0, 165, 255),  # 橙色 - 卡车
}


# -----------------------------
# 5. 图像预处理增强函数
# -----------------------------
def preprocess_image_for_segmentation(image, method='adaptive'):
    """
    对输入图像进行预处理，增强对比度以改善分割效果
    """
    processed = image.copy()

    if method == 'adaptive' or method == 'all':
        # 自适应方法：根据图像亮度选择最佳预处理
        gray = cv2.cvtColor(processed, cv2.COLOR_BGR2GRAY)
        brightness = np.mean(gray)

        if brightness < 80:  # 暗图像
            print(f"    🌙 检测到暗图像 (亮度: {brightness:.1f})，使用组合增强")
            # Gamma校正提亮
            gamma = 1.8
            inv_gamma = 1.0 / gamma
            table = np.array([((i / 255.0) ** inv_gamma) * 255 for i in np.arange(0, 256)]).astype("uint8")
            processed = cv2.LUT(processed, table)

            # CLAHE增强对比度
            lab = cv2.cvtColor(processed, cv2.COLOR_BGR2LAB)
            lab_planes = list(cv2.split(lab))
            clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
            lab_planes[0] = clahe.apply(lab_planes[0])
            lab = cv2.merge(lab_planes)
            processed = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)

        elif brightness > 200:  # 过亮图像
            print(f"    ☀️ 检测到过亮图像 (亮度: {brightness:.1f})，使用对比度增强")
            lab = cv2.cvtColor(processed, cv2.COLOR_BGR2LAB)
            lab_planes = list(cv2.split(lab))
            clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
            lab_planes[0] = clahe.apply(lab_planes[0])
            lab = cv2.merge(lab_planes)
            processed = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)

        else:  # 正常亮度
            print(f"    ⛅ 正常亮度图像 (亮度: {brightness:.1f})，使用标准增强")
            lab = cv2.cvtColor(processed, cv2.COLOR_BGR2LAB)
            lab_planes = list(cv2.split(lab))
            clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
            lab_planes[0] = clahe.apply(lab_planes[0])
            lab = cv2.merge(lab_planes)
            processed = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)

    elif method == 'clahe':
        # CLAHE 对比度受限自适应直方图均衡化
        print("    🔧 使用CLAHE增强")
        lab = cv2.cvtColor(processed, cv2.COLOR_BGR2LAB)
        lab_planes = list(cv2.split(lab))
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
        lab_planes[0] = clahe.apply(lab_planes[0])
        lab = cv2.merge(lab_planes)
        processed = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)

    elif method == 'histeq':
        # 直方图均衡化
        print("    🔧 使用直方图均衡化")
        ycrcb = cv2.cvtColor(processed, cv2.COLOR_BGR2YCrCb)
        ycrcb[:, :, 0] = cv2.equalizeHist(ycrcb[:, :, 0])
        processed = cv2.cvtColor(ycrcb, cv2.COLOR_YCrCb2BGR)

    elif method == 'gamma':
        # Gamma 校正
        gamma = 1.5
        print(f"    🔧 使用Gamma校正 (gamma={gamma})")
        inv_gamma = 1.0 / gamma
        table = np.array([((i / 255.0) ** inv_gamma) * 255 for i in np.arange(0, 256)]).astype("uint8")
        processed = cv2.LUT(processed, table)

    return processed


# -----------------------------
# 6. 增强版道路分割推理
# -----------------------------
def enhanced_road_segmentation(image, preprocess_method='adaptive'):
    """
    增强版道路分割，包含图像预处理
    """
    # 图像预处理
    print(f"    应用图像预处理: {preprocess_method}")
    processed_image = preprocess_image_for_segmentation(image, preprocess_method)

    # 调整尺寸并进行推理
    resized_seg = cv2.resize(processed_image, (w_seg, h_seg))
    input_seg = np.expand_dims(resized_seg.transpose(2, 0, 1), axis=0).astype(np.float32)

    # 推理
    infer_result = road_compiled([input_seg])
    tensor_output = infer_result[road_output_layer]
    seg_probs = np.array(tensor_output)
    seg_class = np.argmax(seg_probs[0], axis=0).astype(np.uint8)

    # 缩放回原图尺寸
    orig_h, orig_w = image.shape[:2]
    seg_class_resized = cv2.resize(seg_class, (orig_w, orig_h), interpolation=cv2.INTER_NEAREST)

    return seg_class_resized


# -----------------------------
# 7. 使用YOLOv8进行车辆检测和实例分割（修复版）
# -----------------------------
def detect_vehicles_yolov8(image):
    """
    使用YOLOv8进行车辆检测和实例分割
    返回: 检测结果列表，每个元素为 [x_min, y_min, x_max, y_max, confidence, class_id, mask]
    """
    try:
        # 使用YOLOv8进行推理
        results = yolo_model(image, conf=YOLO_CONFIDENCE, iou=YOLO_IOU, classes=list(VEHICLE_CLASSES.keys()))
        detections = []

        for result in results:
            if hasattr(result, 'boxes') and result.boxes is not None:
                boxes = result.boxes
                masks = result.masks if hasattr(result, 'masks') and result.masks is not None else None

                for i, box in enumerate(boxes):
                    # 获取边界框坐标
                    x_min, y_min, x_max, y_max = box.xyxy[0].cpu().numpy()
                    confidence = box.conf[0].cpu().numpy()
                    class_id = int(box.cls[0].cpu().numpy())

                    # 获取分割掩码
                    mask_data = None
                    if masks is not None and i < len(masks.data):
                        # 获取当前检测框对应的掩码
                        mask_tensor = masks.data[i]
                        if mask_tensor is not None:
                            # 将掩码转换为numpy数组并调整到原图尺寸
                            orig_h, orig_w = image.shape[:2]
                            mask_resized = mask_tensor.cpu().numpy()

                            # 确保掩码是2D数组
                            if len(mask_resized.shape) == 3:
                                mask_resized = mask_resized[0]  # 取第一个通道

                            # 调整掩码尺寸到原图大小
                            if mask_resized.shape[0] != orig_h or mask_resized.shape[1] != orig_w:
                                mask_data = cv2.resize(mask_resized, (orig_w, orig_h), interpolation=cv2.INTER_NEAREST)
                            else:
                                mask_data = mask_resized

                    detections.append([
                        int(x_min), int(y_min), int(x_max), int(y_max),
                        float(confidence), class_id, mask_data
                    ])

        print(f"    🔍 检测到 {len(detections)} 个车辆目标")
        return detections

    except Exception as e:
        print(f"    ❌ YOLOv8检测出错: {e}")
        return []


# -----------------------------
# 8. 保存原始图片函数（仅在检测到车辆时调用）
# -----------------------------
def save_original_image(image, frame_info="", vehicle_count=0, has_violation=False):
    """
    保存原始图片，不带任何检测信息
    仅在检测到车辆时保存
    """
    if not SAVE_ORIGINAL or vehicle_count == 0:
        return None

    timestamp = time.strftime("%Y%m%d_%H%M%S")

    # 根据是否有违规添加标记
    violation_mark = "_violation" if has_violation else ""

    if "RTSP" in frame_info:
        # RTSP流帧
        frame_num = frame_info.split("_")[-1] if "_" in frame_info else "unknown"
        original_filename = f"original_rtsp_frame_{frame_num}_vehicles{vehicle_count}{violation_mark}_{timestamp}.jpg"
    elif "Frame" in frame_info:
        # 视频帧
        frame_num = frame_info.split(" ")[-1] if " " in frame_info else "unknown"
        original_filename = f"original_video_frame_{frame_num}_vehicles{vehicle_count}{violation_mark}_{timestamp}.jpg"
    else:
        # 图片文件
        base_name = os.path.splitext(frame_info)[0] if frame_info else "frame"
        original_filename = f"original_{base_name}_vehicles{vehicle_count}{violation_mark}_{timestamp}.jpg"

    original_path = os.path.join(ORIGINAL_FOLDER, original_filename)
    cv2.imwrite(original_path, image)
    print(f"📷 原始图片已保存 (车辆数: {vehicle_count}, 违规: {'是' if has_violation else '否'}): {original_path}")

    return original_path


# -----------------------------
# 9. 修复版保存车辆掩码图片函数
# -----------------------------
def save_vehicle_mask(image, vehicle_bbox, mask, class_id, confidence, vehicle_index, frame_info=""):
    """
    保存完整的车辆掩码图片，修复掩码显示问题
    """
    if not SAVE_MASKS or mask is None:
        return None

    try:
        # 创建原图副本
        mask_img = image.copy()
        orig_h, orig_w = image.shape[:2]

        # 确保掩码是二值化的
        if mask.dtype != np.bool_ and mask.dtype != np.uint8:
            mask = mask.astype(np.float32)

        # 将掩码转换为二值图像
        if mask.max() <= 1.0:
            mask_binary = (mask > 0.5).astype(np.uint8)
        else:
            mask_binary = (mask > 127).astype(np.uint8)

        # 检查掩码尺寸，如果不匹配则调整
        if mask_binary.shape[0] != orig_h or mask_binary.shape[1] != orig_w:
            print(f"    ⚠️ 掩码尺寸不匹配: {mask_binary.shape} -> 调整到: ({orig_h}, {orig_w})")
            mask_binary = cv2.resize(mask_binary, (orig_w, orig_h), interpolation=cv2.INTER_NEAREST)

        # 创建彩色掩码
        vehicle_color = VEHICLE_COLORS.get(class_id, (0, 255, 0))  # 默认绿色
        color_mask = np.zeros_like(mask_img)

        # 应用颜色到整个掩码区域
        for c in range(3):
            color_mask[:, :, c] = np.where(mask_binary > 0, vehicle_color[c], 0)

        # 融合原图和彩色掩码（提高掩码可见性）
        mask_img = cv2.addWeighted(mask_img, 1 - MASK_ALPHA, color_mask, MASK_ALPHA, 0)

        # 绘制车辆边界框
        x_min, y_min, x_max, y_max = vehicle_bbox
        cv2.rectangle(mask_img, (x_min, y_min), (x_max, y_max), vehicle_color, 3)

        # 绘制更粗的边界框以突出显示
        cv2.rectangle(mask_img, (x_min, y_min), (x_max, y_max), vehicle_color, 2)

        # 添加车辆信息标签
        vehicle_type = VEHICLE_CLASSES.get(class_id, 'unknown')
        label = f"{vehicle_type} {confidence:.2f}"
        label_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)[0]

        # 标签背景
        cv2.rectangle(mask_img, (x_min, y_min - label_size[1] - 10),
                      (x_min + label_size[0] + 10, y_min), vehicle_color, -1)
        cv2.rectangle(mask_img, (x_min, y_min - label_size[1] - 10),
                      (x_min + label_size[0] + 10, y_min), vehicle_color, 2)

        # 标签文字
        cv2.putText(mask_img, label, (x_min + 5, y_min - 5),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)

        # 添加统计信息
        mask_pixels = np.sum(mask_binary)
        total_pixels = orig_h * orig_w
        mask_coverage = (mask_pixels / total_pixels) * 100

        info_lines = [
            f"Vehicle {vehicle_index} - {vehicle_type}",
            f"Confidence: {confidence:.3f}",
            f"Mask Coverage: {mask_coverage:.2f}%",
            f"Mask Pixels: {mask_pixels}",
            f"BBox: [{x_min}, {y_min}, {x_max}, {y_max}]"
        ]

        # 绘制信息背景
        text_bg_height = len(info_lines) * 25 + 20
        cv2.rectangle(mask_img, (10, 10), (400, text_bg_height), (0, 0, 0), -1)
        cv2.rectangle(mask_img, (10, 10), (400, text_bg_height), (255, 255, 255), 1)

        # 添加信息文本
        for i, line in enumerate(info_lines):
            y_pos = 35 + i * 25
            color = (255, 255, 255)
            cv2.putText(mask_img, line, (20, y_pos),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)

        # 保存掩码图片
        timestamp = time.strftime("%Y%m%d_%H%M%S")
        base_name = os.path.splitext(frame_info)[0] if frame_info else "frame"
        mask_filename = f"mask_{base_name}_vehicle{vehicle_index}_{timestamp}.jpg"
        mask_path = os.path.join(MASK_FOLDER, mask_filename)

        cv2.imwrite(mask_path, mask_img)
        print(f"📸 车辆掩码已保存 (覆盖率: {mask_coverage:.2f}%): {mask_path}")

        return mask_path

    except Exception as e:
        print(f"❌ 保存车辆掩码时出错: {e}")
        return None


# -----------------------------
# 10. 优化版车辆压线检测函数
# -----------------------------
def check_lane_violation_optimized(vehicle_bbox, vehicle_mask, lane_marking_mask,
                                   confidence_threshold=0.01, side_margin_ratio=0.15):
    """
    优化版压线检测 - 修复底部掩码问题
    """
    x_min, y_min, x_max, y_max = vehicle_bbox

    # 确保坐标在合理范围内
    x_min = max(0, x_min)
    y_min = max(0, y_min)
    x_max = min(lane_marking_mask.shape[1] - 1, x_max)
    y_max = min(lane_marking_mask.shape[0] - 1, y_max)

    # 计算车辆高度和宽度
    vehicle_height = y_max - y_min
    vehicle_width = x_max - x_min

    # 如果车辆太小，跳过检测
    if vehicle_height < 10 or vehicle_width < 10:
        return False, 0, 0, [x_min, y_min, x_max, y_max], np.zeros_like(lane_marking_mask), np.zeros_like(
            lane_marking_mask)

    # 如果提供了车辆分割掩码，使用掩码进行精确检测
    if vehicle_mask is not None:
        try:
            # 调整掩码尺寸到原图大小
            orig_h, orig_w = lane_marking_mask.shape
            mask_h, mask_w = vehicle_mask.shape

            if mask_h != orig_h or mask_w != orig_w:
                # 调整掩码尺寸
                vehicle_mask_resized = cv2.resize(vehicle_mask, (orig_w, orig_h), interpolation=cv2.INTER_NEAREST)
                vehicle_mask_binary = (vehicle_mask_resized > 0.5).astype(np.uint8)
            else:
                vehicle_mask_binary = (vehicle_mask > 0.5).astype(np.uint8)

            # 优化底部区域计算
            bottom_height = int(vehicle_height * BOTTOM_HEIGHT_RATIO)
            bottom_start = max(y_min, y_max - bottom_height)

            # 创建底部区域掩码
            bottom_mask = np.zeros_like(vehicle_mask_binary)
            bottom_mask[bottom_start:y_max, x_min:x_max] = 1

            # 车辆底部接触区域
            vehicle_bottom_mask = vehicle_mask_binary & bottom_mask

            # 如果底部掩码区域太小，扩大检测区域
            bottom_pixels = np.sum(vehicle_bottom_mask)
            if bottom_pixels < 10:
                print(f"    ⚠️ 底部掩码像素过少 ({bottom_pixels})，使用扩展区域")
                extended_bottom_start = max(y_min, y_max - int(vehicle_height * 0.4))
                bottom_mask_extended = np.zeros_like(vehicle_mask_binary)
                bottom_mask_extended[extended_bottom_start:y_max, x_min:x_max] = 1
                vehicle_bottom_mask = vehicle_mask_binary & bottom_mask_extended

            # 排除两侧区域，只检测中心部分
            side_margin = int(vehicle_width * side_margin_ratio)
            center_mask = np.zeros_like(vehicle_bottom_mask)
            center_x_min = max(0, x_min + side_margin)
            center_x_max = min(orig_w, x_max - side_margin)

            if center_x_max > center_x_min:
                center_mask[:, center_x_min:center_x_max] = 1
            else:
                center_mask[:, x_min:x_max] = 1

            # 最终检测区域
            detection_mask = vehicle_bottom_mask & center_mask

            # 如果检测区域仍然太小，使用完整的底部区域
            detection_pixels = np.sum(detection_mask)
            if detection_pixels < 5:
                print(f"    ⚠️ 检测区域过小 ({detection_pixels})，使用完整底部区域")
                detection_mask = vehicle_bottom_mask

        except Exception as e:
            print(f"    ⚠️ 掩码处理出错: {e}，回退到边界框检测")
            detection_mask = None
    else:
        detection_mask = None

    # 如果没有有效的检测掩码，使用边界框方法
    if detection_mask is None or np.sum(detection_mask) == 0:
        bottom_height = int(vehicle_height * BOTTOM_HEIGHT_RATIO)
        bottom_y_min = max(y_min, y_max - bottom_height)
        side_margin = int(vehicle_width * side_margin_ratio)
        center_x_min = max(0, x_min + side_margin)
        center_x_max = min(lane_marking_mask.shape[1], x_max - side_margin)

        if center_x_max <= center_x_min or (center_x_max - center_x_min) < 5:
            center_x_min = x_min
            center_x_max = x_max

        detection_mask = np.zeros_like(lane_marking_mask)
        if bottom_y_min < y_max and center_x_max > center_x_min:
            detection_mask[bottom_y_min:y_max, center_x_min:center_x_max] = 1

    # 计算与车道线的重叠
    overlap_mask = detection_mask & (lane_marking_mask > 0)
    overlap_pixels = np.sum(overlap_mask)

    # 计算重叠比例
    detection_area = np.sum(detection_mask)
    if detection_area > 0:
        overlap_ratio = overlap_pixels / detection_area
    else:
        overlap_ratio = 0

    # 调试信息
    print(f"    📊 检测统计: 区域={detection_area}, 重叠={overlap_pixels}, 比例={overlap_ratio:.3f}")

    # 判断是否压线
    is_violation = overlap_ratio > confidence_threshold

    # 获取检测区域的边界框
    if np.any(detection_mask):
        y_coords, x_coords = np.where(detection_mask)
        if len(x_coords) > 0 and len(y_coords) > 0:
            bottom_bbox = [np.min(x_coords), np.min(y_coords), np.max(x_coords), np.max(y_coords)]
        else:
            bottom_bbox = [x_min, y_min, x_max, y_max]
    else:
        bottom_bbox = [x_min, y_min, x_max, y_max]

    return is_violation, overlap_ratio, overlap_pixels, bottom_bbox, overlap_mask, detection_mask


# -----------------------------
# 11. 保存调试信息图片
# -----------------------------
def save_debug_image(image, vehicle_bbox, detection_mask, overlap_mask, lane_marking_mask,
                     class_id, confidence, vehicle_index, frame_info=""):
    """
    保存详细的调试信息图片
    """
    if not SAVE_DEBUG:
        return None

    debug_img = image.copy()
    orig_h, orig_w = debug_img.shape[:2]

    # 创建各个掩码的可视化
    masks_combined = np.zeros((orig_h, orig_w, 3), dtype=np.uint8)

    # 车道线掩码（蓝色）
    if lane_marking_mask is not None:
        masks_combined[lane_marking_mask > 0] = [255, 0, 0]  # 蓝色

    # 检测区域掩码（黄色）
    if detection_mask is not None:
        masks_combined[detection_mask > 0] = [0, 255, 255]  # 黄色

    # 重叠区域掩码（红色）
    if overlap_mask is not None:
        masks_combined[overlap_mask > 0] = [0, 0, 255]  # 红色

    # 融合原图和掩码
    debug_img = cv2.addWeighted(debug_img, 0.7, masks_combined, 0.3, 0)

    # 绘制边界框
    x_min, y_min, x_max, y_max = vehicle_bbox
    vehicle_color = VEHICLE_COLORS.get(class_id, (0, 255, 0))
    cv2.rectangle(debug_img, (x_min, y_min), (x_max, y_max), vehicle_color, 2)

    # 添加检测区域信息
    detection_area = np.sum(detection_mask) if detection_mask is not None else 0
    overlap_area = np.sum(overlap_mask) if overlap_mask is not None else 0
    overlap_ratio = overlap_area / max(detection_area, 1)

    info_lines = [
        f"Vehicle {vehicle_index}",
        f"Class: {VEHICLE_CLASSES.get(class_id, 'unknown')}",
        f"Confidence: {confidence:.3f}",
        f"Detection Area: {detection_area}",
        f"Overlap Area: {overlap_area}",
        f"Overlap Ratio: {overlap_ratio:.3f}",
        f"BBox: [{x_min}, {y_min}, {x_max}, {y_max}]"
    ]

    # 添加信息背景
    text_bg_height = len(info_lines) * 20 + 20
    cv2.rectangle(debug_img, (10, 10), (300, text_bg_height), (0, 0, 0), -1)
    cv2.rectangle(debug_img, (10, 10), (300, text_bg_height), (255, 255, 255), 1)

    # 添加信息文本
    for i, line in enumerate(info_lines):
        y_pos = 30 + i * 20
        color = (255, 255, 255)
        cv2.putText(debug_img, line, (20, y_pos),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.4, color, 1)

    # 保存调试图片
    timestamp = time.strftime("%Y%m%d_%H%M%S")
    base_name = os.path.splitext(frame_info)[0] if frame_info else "frame"
    debug_filename = f"debug_{base_name}_vehicle{vehicle_index}_{timestamp}.jpg"
    debug_path = os.path.join(DEBUG_FOLDER, debug_filename)

    cv2.imwrite(debug_path, debug_img)
    print(f"🔍 调试图片已保存: {debug_path}")

    return debug_path


# -----------------------------
# 12. 保存压线违规图片函数
# -----------------------------
def save_violation_image(image, filename, vehicle_bbox, bottom_bbox, overlap_mask,
                         vehicle_mask, class_id, confidence, vehicle_index,
                         violation_count, total_vehicles, overlap_ratio, is_video=False, frame_num=0):
    """
    保存压线违规图片，包含详细标注信息和车辆掩码
    """
    # 创建违规图片副本
    violation_img = image.copy()

    # 如果有车辆掩码，在图片上叠加显示
    if vehicle_mask is not None:
        # 调整掩码尺寸到原图大小
        orig_h, orig_w = image.shape[:2]
        mask_h, mask_w = vehicle_mask.shape

        if mask_h != orig_h or mask_w != orig_w:
            mask_resized = cv2.resize(vehicle_mask, (orig_w, orig_h), interpolation=cv2.INTER_NEAREST)
        else:
            mask_resized = vehicle_mask

        # 创建彩色掩码
        mask_binary = mask_resized > 0.5
        color_mask = np.zeros_like(violation_img)
        vehicle_color = VEHICLE_COLORS.get(class_id, (0, 255, 0))  # 默认绿色

        # 应用颜色到掩码区域
        for c in range(3):
            color_mask[:, :, c] = np.where(mask_binary, vehicle_color[c], 0)

        # 融合原图和掩码
        violation_img = cv2.addWeighted(violation_img, 1 - MASK_ALPHA, color_mask, MASK_ALPHA, 0)

    # 绘制车辆边界框（红色）
    x_min, y_min, x_max, y_max = vehicle_bbox
    cv2.rectangle(violation_img, (x_min, y_min), (x_max, y_max), (0, 0, 255), 3)

    # 绘制检测底部区域（黄色）
    bx_min, by_min, bx_max, by_max = bottom_bbox
    cv2.rectangle(violation_img, (bx_min, by_min), (bx_max, by_max), (0, 255, 255), 2)

    # 绘制重叠区域（红色高亮）
    overlap_coords = np.where(overlap_mask)
    for y, x in zip(overlap_coords[0], overlap_coords[1]):
        cv2.circle(violation_img, (x, y), 2, (0, 0, 255), -1)

    # 添加车辆类型标签
    vehicle_type = VEHICLE_CLASSES.get(class_id, 'unknown')
    vehicle_label = f"{vehicle_type} {confidence:.2f}"
    label_size = cv2.getTextSize(vehicle_label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)[0]

    # 标签背景
    cv2.rectangle(violation_img, (x_min, y_min - label_size[1] - 10),
                  (x_min + label_size[0] + 10, y_min), (0, 0, 255), -1)
    cv2.rectangle(violation_img, (x_min, y_min - label_size[1] - 10),
                  (x_min + label_size[0] + 10, y_min), (0, 0, 255), 2)

    # 标签文字
    cv2.putText(violation_img, vehicle_label, (x_min + 5, y_min - 5),
                cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)

    # 添加详细的标注信息
    if is_video:
        info_lines = [
            f"🚨 VIOLATION DETECTED!",
            f"Vehicle: {vehicle_type} (ID: {vehicle_index})",
            f"Confidence: {confidence:.3f}",
            f"Frame: {frame_num}",
            f"Overlap Ratio: {overlap_ratio:.3f}",
            f"BBox: [{x_min}, {y_min}, {x_max}, {y_max}]",
            f"Time: {time.strftime('%H:%M:%S')}",
            f"Violations: {violation_count}/{total_vehicles}"
        ]
    else:
        info_lines = [
            f"🚨 VIOLATION DETECTED!",
            f"Vehicle: {vehicle_type} (ID: {vehicle_index})",
            f"Confidence: {confidence:.3f}",
            f"Overlap Ratio: {overlap_ratio:.3f}",
            f"BBox: [{x_min}, {y_min}, {x_max}, {y_max}]",
            f"Total Vehicles: {total_vehicles}",
            f"Violations: {violation_count}"
        ]

    # 绘制信息背景
    text_bg_height = len(info_lines) * 25 + 20
    cv2.rectangle(violation_img, (10, 10), (450, text_bg_height), (0, 0, 0), -1)
    cv2.rectangle(violation_img, (10, 10), (450, text_bg_height), (0, 0, 255), 2)

    # 绘制信息文本
    for i, line in enumerate(info_lines):
        y_pos = 40 + i * 25
        color = (0, 0, 255) if i == 0 else (255, 255, 255)
        font_scale = 0.7 if i == 0 else 0.5
        thickness = 2 if i == 0 else 1
        cv2.putText(violation_img, line, (20, y_pos),
                    cv2.FONT_HERSHEY_SIMPLEX, font_scale, color, thickness)

    # 生成文件名并保存
    base_name = os.path.splitext(filename)[0]
    timestamp = time.strftime("%Y%m%d_%H%M%S")
    vehicle_type_name = VEHICLE_CLASSES.get(class_id, 'vehicle')

    if is_video:
        violation_filename = f"violation_{base_name}_{vehicle_type_name}{vehicle_index}_frame{frame_num}_{timestamp}.jpg"
    else:
        violation_filename = f"violation_{base_name}_{vehicle_type_name}{vehicle_index}_{timestamp}.jpg"

    violation_path = os.path.join(VIOLATION_FOLDER, violation_filename)
    cv2.imwrite(violation_path, violation_img)
    print(f"🚨 压线违规图片已保存: {violation_path}")

    return violation_path


# -----------------------------
# 13. 单帧处理函数
# -----------------------------
def process_single_frame(frame, frame_info=""):
    """
    处理单帧图像，返回处理结果和统计信息
    """
    if frame is None:
        return None, {}

    orig_h, orig_w = frame.shape[:2]
    print(f"处理帧: {frame_info}, 尺寸: {orig_w}x{orig_h}")

    # 使用YOLOv8进行车辆检测和实例分割
    detections = detect_vehicles_yolov8(frame)
    vehicle_count = len(detections)

    if vehicle_count == 0:
        print("    ⚠️ 未检测到车辆，跳过此帧")
        stats = {
            'total_vehicles': 0,
            'violations': 0,
            'has_violation': False,
            'violation_ratio': 0
        }
        return frame, stats

    print(f"    ✅ 检测到 {vehicle_count} 辆车，开始详细处理...")

    # 增强版道路分割
    seg_class_resized = enhanced_road_segmentation(frame, PREPROCESS_METHOD)

    # 创建道路标记掩码
    lane_marking_mask = (seg_class_resized == 3)
    print(f"    📊 车道线像素数: {np.sum(lane_marking_mask)}")

    # 统计信息
    car_count = 0
    violation_count = 0
    has_violation_in_frame = False

    # 处理每个检测到的车辆
    for det_idx, det in enumerate(detections):
        x_min, y_min, x_max, y_max, conf, class_id, mask = det

        # 只处理车辆类别
        if class_id in VEHICLE_CLASSES:
            vehicle_bbox = [x_min, y_min, x_max, y_max]
            vehicle_type = VEHICLE_CLASSES.get(class_id, 'unknown')

            print(f"    🚗 车辆 {det_idx + 1}: {vehicle_type} (置信度: {conf:.2f})")

            # 压线检测
            is_violation, overlap_ratio, overlap_pixels, bottom_bbox, overlap_mask, detection_mask = check_lane_violation_optimized(
                vehicle_bbox, mask, lane_marking_mask, CONFIDENCE_THRESHOLD, SIDE_MARGIN_RATIO
            )

            # 保存车辆掩码图片
            if SAVE_MASKS and mask is not None:
                mask_path = save_vehicle_mask(
                    frame, vehicle_bbox, mask, class_id, conf, det_idx + 1, frame_info
                )

            # 保存调试图片
            if SAVE_DEBUG:
                debug_path = save_debug_image(
                    frame, vehicle_bbox, detection_mask, overlap_mask, lane_marking_mask,
                    class_id, conf, det_idx + 1, frame_info
                )

            # 根据检测结果设置标签
            if is_violation:
                violation_count += 1
                has_violation_in_frame = True
                print(f"    🚨 车辆 {det_idx + 1} ({vehicle_type}) 压线! 重叠比例: {overlap_ratio:.3f}")

                # 保存压线违规图片
                violation_path = save_violation_image(
                    frame, frame_info, vehicle_bbox, bottom_bbox,
                    overlap_mask, mask, class_id, conf, det_idx + 1,
                    violation_count, len(detections), overlap_ratio,
                    is_video="RTSP" in frame_info or "Frame" in frame_info,
                    frame_num=det_idx + 1
                )
            else:
                print(f"    ✅ 车辆 {det_idx + 1} ({vehicle_type}) 未压线")

            car_count += 1

    # 只有在检测到车辆时才保存原始图片
    if vehicle_count > 0:
        original_path = save_original_image(frame, frame_info, vehicle_count, has_violation_in_frame)

    # 统计信息
    stats = {
        'total_vehicles': car_count,
        'violations': violation_count,
        'has_violation': has_violation_in_frame,
        'violation_ratio': violation_count / max(car_count, 1)
    }

    print(f"    📈 本帧统计: 车辆数={car_count}, 违规数={violation_count}, 违规比例={stats['violation_ratio']:.3f}")

    return frame, stats


# -----------------------------
# 14. 稳健的视频读取器
# -----------------------------
class RobustVideoReader:
    """
    稳健的视频读取器，处理损坏的视频文件
    """

    def __init__(self, video_path):
        self.video_path = video_path
        self.cap = None
        self.temp_dir = None
        self.repaired_path = None
        self.current_frame = 0
        self.max_retries = 3

    def open(self):
        """打开视频文件"""
        print(f"🎬 尝试打开视频: {self.video_path}")

        # 首先尝试直接打开
        self.cap = cv2.VideoCapture(self.video_path)
        if self.cap.isOpened():
            print("✅ 直接打开视频成功")
            return True

        # 如果直接打开失败，尝试修复
        print("❌ 直接打开失败，尝试修复视频...")
        self.repaired_path, self.temp_dir = self.repair_video()

        if self.repaired_path and os.path.exists(self.repaired_path):
            self.cap = cv2.VideoCapture(self.repaired_path)
            if self.cap.isOpened():
                print("✅ 使用修复后的视频成功")
                return True

        print("❌ 所有打开视频的尝试都失败了")
        return False

    def repair_video(self):
        """使用FFmpeg修复视频文件"""
        print("🔧 尝试修复视频文件...")

        # 创建临时文件
        temp_dir = tempfile.mkdtemp()
        repaired_video_path = os.path.join(temp_dir, "repaired_video.mp4")

        try:
            # 使用FFmpeg修复视频
            ffmpeg_cmd = [
                'ffmpeg',
                '-y',  # 覆盖输出文件
                '-err_detect', 'ignore_err',  # 忽略错误
                '-i', self.video_path,
                '-c', 'copy',  # 流复制，不重新编码
                '-f', 'mp4',
                repaired_video_path
            ]

            # 运行FFmpeg，忽略所有输出
            result = subprocess.run(
                ffmpeg_cmd,
                stdout=subprocess.DEVNULL,
                stderr=subprocess.DEVNULL,
                timeout=30  # 30秒超时
            )

            if result.returncode == 0 and os.path.exists(repaired_video_path):
                print("✅ 视频修复成功")
                return repaired_video_path, temp_dir
            else:
                print("❌ 视频修复失败，尝试重新编码...")
                return self.repair_video_reencode(temp_dir)

        except Exception as e:
            print(f"❌ 视频修复出错: {e}")
            return self.repair_video_reencode(temp_dir)

    def repair_video_reencode(self, temp_dir):
        """重新编码视频文件"""
        try:
            repaired_video_path = os.path.join(temp_dir, "reencoded_video.mp4")

            ffmpeg_cmd = [
                'ffmpeg',
                '-y',
                '-err_detect', 'ignore_err',
                '-i', self.video_path,
                '-c:v', 'libx264',  # 重新编码视频
                '-c:a', 'aac',  # 重新编码音频
                '-crf', '23',  # 中等质量
                '-preset', 'fast',
                repaired_video_path
            ]

            result = subprocess.run(
                ffmpeg_cmd,
                stdout=subprocess.DEVNULL,
                stderr=subprocess.DEVNULL,
                timeout=60  # 60秒超时
            )

            if result.returncode == 0 and os.path.exists(repaired_video_path):
                print("✅ 视频重新编码成功")
                return repaired_video_path, temp_dir
            else:
                print("❌ 视频重新编码失败")
                return None, temp_dir

        except Exception as e:
            print(f"❌ 视频重新编码出错: {e}")
            return None, temp_dir

    def read(self):
        """读取下一帧，带有错误处理"""
        for attempt in range(self.max_retries):
            try:
                if self.cap is None:
                    return False, None

                ret, frame = self.cap.read()

                # 检查帧是否有效
                if ret and frame is not None and frame.size > 0:
                    self.current_frame += 1
                    return True, frame
                else:
                    # 如果读取失败，尝试重新打开视频
                    print(f"⚠️ 读取帧失败 (尝试 {attempt + 1}/{self.max_retries})")
                    if attempt < self.max_retries - 1:
                        time.sleep(0.1)
                        # 重新尝试读取
                        continue
                    else:
                        return False, None

            except Exception as e:
                print(f"⚠️ 读取帧时出错 (尝试 {attempt + 1}/{self.max_retries}): {e}")
                if attempt < self.max_retries - 1:
                    time.sleep(0.1)  # 短暂等待后重试
                    continue
                else:
                    return False, None

        return False, None

    def get_properties(self):
        """获取视频属性"""
        if self.cap is None:
            return 0, 0, 0, 0

        fps = self.cap.get(cv2.CAP_PROP_FPS)
        total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
        width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

        return fps, total_frames, width, height

    def release(self):
        """释放资源"""
        if self.cap is not None:
            self.cap.release()

        # 清理临时文件
        if self.temp_dir and os.path.exists(self.temp_dir):
            try:
                shutil.rmtree(self.temp_dir)
                print("✅ 临时文件已清理")
            except Exception as e:
                print(f"⚠️ 清理临时文件时出错: {e}")


# -----------------------------
# 15. RTSP流处理器
# -----------------------------
class RTSPProcessor:
    """
    RTSP流处理器，支持多路RTSP流和自动重连
    """

    def __init__(self, rtsp_url, detection_interval=3):
        self.rtsp_url = rtsp_url
        self.detection_interval = detection_interval
        self.cap = None
        self.frame_count = 0
        self.processed_frame_count = 0
        self.violation_stats = []
        self.start_time = None
        self.running = False
        self.reconnect_attempts = 0
        self.max_reconnect_attempts = 10
        self.consecutive_error_count = 0
        self.max_consecutive_errors = 50

    def connect(self):
        """连接到RTSP流"""
        print(f"📡 尝试连接到RTSP流: {self.rtsp_url}")

        try:
            # 设置OpenCV参数以优化RTSP连接
            self.cap = cv2.VideoCapture(self.rtsp_url)

            # 设置缓冲区大小，减少延迟
            self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)

            # 设置超时参数
            self.cap.set(cv2.CAP_PROP_OPEN_TIMEOUT_MSEC, RTSP_TIMEOUT * 1000)
            self.cap.set(cv2.CAP_PROP_READ_TIMEOUT_MSEC, RTSP_TIMEOUT * 1000)

            # 检查连接是否成功
            if not self.cap.isOpened():
                print(f"❌ 无法连接到RTSP流: {self.rtsp_url}")
                return False

            # 获取视频属性
            width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fps = self.cap.get(cv2.CAP_PROP_FPS)
            print(f"✅ 成功连接到RTSP流: {self.rtsp_url}")
            print(f"视频属性: {width}x{height}, FPS: {fps:.2f}")

            return True

        except Exception as e:
            print(f"❌ 连接RTSP流时出错: {e}")
            return False

    def reconnect(self):
        """重新连接到RTSP流"""
        print(f"🔄 尝试重新连接RTSP流: {self.rtsp_url}")

        if self.cap:
            self.cap.release()

        # 增加重连尝试次数
        self.reconnect_attempts += 1
        if self.reconnect_attempts > self.max_reconnect_attempts:
            print(f"❌ 重连尝试次数超过限制: {self.max_reconnect_attempts}")
            return False

        # 等待一段时间再重连
        time.sleep(RTSP_RECONNECT_INTERVAL)

        return self.connect()

    def process_stream(self, max_frames=None):
        """处理RTSP流"""
        if not self.cap:
            print("❌ RTSP流未连接")
            return

        print(f"🎬 开始处理RTSP流: {self.rtsp_url}")
        print(f"⚠️ 每{self.detection_interval}帧检测一次")
        print("⚠️ 按 'q' 键退出实时显示窗口")
        print("⚠️ 按 Ctrl+C 退出程序")

        self.start_time = time.time()
        self.running = True

        # 创建显示窗口
        cv2.namedWindow('RTSP Stream', cv2.WINDOW_NORMAL)
        cv2.resizeWindow('RTSP Stream', 800, 600)

        while self.running:
            try:
                # 读取帧
                ret, frame = self.cap.read()

                if not ret or frame is None or frame.size == 0:
                    print("⚠️ 无法读取帧，可能流已断开...")
                    self.consecutive_error_count += 1

                    if self.consecutive_error_count >= self.max_consecutive_errors:
                        print("⚠️ 连续错误次数过多，尝试重新连接...")
                        if not self.reconnect():
                            print("❌ 重新连接失败，退出处理")
                            break
                        self.consecutive_error_count = 0  # 重置错误计数
                    else:
                        time.sleep(0.1)  # 短暂等待
                        continue
                else:
                    self.consecutive_error_count = 0  # 重置错误计数
                    self.reconnect_attempts = 0  # 重置重连计数

                self.frame_count += 1

                # 显示当前帧（不包含检测结果）
                display_frame = frame.copy()
                cv2.putText(display_frame, f"Frame: {self.frame_count}", (10, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
                cv2.putText(display_frame, f"RTSP: {os.path.basename(self.rtsp_url)}", (10, 60),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
                cv2.imshow('RTSP Stream', display_frame)

                # 每隔指定帧数检测一次
                if self.frame_count % self.detection_interval == 0:
                    self.processed_frame_count += 1
                    print(f"--- 处理RTSP帧 {self.frame_count} (检测 {self.processed_frame_count}) ---")

                    # 处理当前帧
                    result, stats = process_single_frame(frame, f"RTSP_Frame_{self.frame_count}")

                    if result is not None:
                        stats['frame_num'] = self.frame_count
                        stats['source'] = 'RTSP'
                        self.violation_stats.append(stats)

                        # 显示处理结果
                        cv2.putText(result, f"Processed: {self.processed_frame_count}", (10, 90),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
                        cv2.imshow('RTSP Stream', result)

                # 检查退出键
                key = cv2.waitKey(1) & 0xFF
                if key == ord('q'):
                    print("⏹️ 用户中断处理")
                    break

                # 限制处理帧数（如果设置了max_frames）
                if max_frames and self.frame_count >= max_frames:
                    print(f"✅ 达到最大帧数限制: {max_frames}")
                    break

            except KeyboardInterrupt:
                print("⏹️ 用户中断处理")
                break
            except Exception as e:
                print(f"⚠️ 处理RTSP帧时出错: {e}")
                self.consecutive_error_count += 1
                if self.consecutive_error_count >= self.max_consecutive_errors:
                    print("❌ 错误次数过多，尝试重新连接")
                    if not self.reconnect():
                        print("❌ 重新连接失败，退出处理")
                        break
                    self.consecutive_error_count = 0
                time.sleep(1)  # 短暂等待后继续
                continue

        # 计算处理时间
        if self.start_time:
            total_time = time.time() - self.start_time
            processing_fps = self.processed_frame_count / total_time if total_time > 0 else 0
            print(f"\n✅ RTSP流处理完成!")
            print(f"总帧数: {self.frame_count}, 处理帧数: {self.processed_frame_count}")
            print(f"总耗时: {total_time:.2f}秒, 处理速度: {processing_fps:.2f} FPS")

        # 关闭显示窗口
        cv2.destroyWindow('RTSP Stream')

    def release(self):
        """释放资源"""
        self.running = False
        if self.cap:
            self.cap.release()
        cv2.destroyAllWindows()


# -----------------------------
# 16. 图片批量处理函数
# -----------------------------
def process_images():
    """
    批量处理图片
    """
    image_extensions = {'.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif'}
    image_files = []
    for root, dirs, files in os.walk(INPUT_FOLDER):
        for file in files:
            if os.path.splitext(file)[1].lower() in image_extensions:
                image_files.append(os.path.join(root, file))

    if not image_files:
        print(f"❌ 没有在 '{INPUT_FOLDER}' 中找到图片！")
        return

    print(f"✅ 发现 {len(image_files)} 张图片，开始批量处理...")

    violation_stats = []

    for i, img_path in enumerate(image_files, 1):
        print(f"\n--- 处理第 {i}/{len(image_files)} 张: {os.path.basename(img_path)} ---")

        # 读取图像
        image = cv2.imread(img_path)
        if image is None:
            print(f"⚠️ 无法加载图像: {img_path}")
            continue

        # 处理单帧
        result, stats = process_single_frame(image, os.path.basename(img_path))

        if result is not None:
            # 保存结果
            base_name = os.path.splitext(os.path.basename(img_path))[0]
            output_filename = f"{base_name}_result.jpg"
            output_path = os.path.join(OUTPUT_FOLDER, output_filename)

            if os.path.exists(output_path):
                timestamp = time.strftime("%Y%m%d_%H%M%S")
                output_filename = f"{base_name}_result_{timestamp}.jpg"
                output_path = os.path.join(OUTPUT_FOLDER, output_filename)

            cv2.imwrite(output_path, result)
            print(f"✅ 结果已保存至: {output_path}")

            # 记录统计信息
            stats['filename'] = os.path.basename(img_path)
            violation_stats.append(stats)

    # 输出统计报告
    print_statistics(violation_stats, "图片")


# -----------------------------
# 17. 视频处理函数
# -----------------------------
def process_video():
    """
    处理视频文件，每3帧检测一次，增强错误处理，不保存视频
    """
    if not os.path.exists(VIDEO_INPUT):
        print(f"❌ 视频文件不存在: {VIDEO_INPUT}")
        return

    print(f"🎬 开始处理视频: {VIDEO_INPUT}")
    print("⚠️ 使用稳健的视频读取器，跳过损坏的帧...")
    print("⚠️ 注意: 此模式不保存检测视频，仅输出统计结果和违规图片")

    # 创建稳健的视频读取器
    video_reader = RobustVideoReader(VIDEO_INPUT)

    if not video_reader.open():
        print("❌ 无法打开视频文件")
        return

    # 获取视频属性
    fps, total_frames, width, height = video_reader.get_properties()
    print(f"视频信息: {width}x{height}, FPS: {fps:.2f}, 总帧数: {total_frames}")

    frame_count = 0
    processed_frame_count = 0
    violation_stats = []
    start_time = time.time()
    error_count = 0
    max_errors = 100  # 允许更多错误

    print("开始视频处理（每3帧检测一次）...")

    # 主处理循环
    while True:
        try:
            # 读取下一帧
            ret, frame = video_reader.read()

            if not ret:
                print("✅ 视频读取完成")
                break

            frame_count += 1

            # 检查帧是否有效
            if frame is None or frame.size == 0:
                print(f"⚠️ 第 {frame_count} 帧无效，跳过")
                error_count += 1
                continue

            # 每3帧处理一次
            if frame_count % DETECTION_INTERVAL == 0:
                processed_frame_count += 1
                print(f"--- 处理第 {frame_count}/{total_frames} 帧 ({processed_frame_count}次检测) ---")

                # 处理当前帧
                result, stats = process_single_frame(frame, f"Frame {frame_count}")

                if result is not None:
                    stats['frame_num'] = frame_count
                    violation_stats.append(stats)

            # 显示进度
            if frame_count % 30 == 0:
                elapsed_time = time.time() - start_time
                progress = (frame_count / total_frames) * 100 if total_frames > 0 else 0
                print(f"进度: {progress:.1f}% | 已处理: {elapsed_time:.1f}s")

            # 检查错误限制
            if error_count > max_errors:
                print(f"❌ 错误次数超过限制 ({max_errors})，停止处理")
                break

        except KeyboardInterrupt:
            print("⏹️ 用户中断处理")
            break
        except Exception as e:
            error_count += 1
            print(f"⚠️ 处理第 {frame_count} 帧时发生错误: {e}")
            print(f"    错误类型: {type(e).__name__}")

            if error_count > max_errors:
                print(f"❌ 错误次数超过限制，停止处理")
                break

            continue

    # 释放资源
    video_reader.release()
    cv2.destroyAllWindows()

    # 计算处理时间
    total_time = time.time() - start_time
    processing_fps = frame_count / total_time if total_time > 0 else 0

    print(f"\n✅ 视频处理完成!")
    print(f"总帧数: {frame_count}, 处理帧数: {processed_frame_count}")
    print(f"总耗时: {total_time:.2f}秒, 处理速度: {processing_fps:.2f} FPS")
    print(f"错误帧数: {error_count}")

    # 输出统计报告
    print_statistics(violation_stats, "视频")


# -----------------------------
# 18. RTSP流处理函数
# -----------------------------
def process_rtsp_streams():
    """
    处理RTSP流
    """
    if not RTSP_URLS:
        print("❌ 没有配置RTSP URL！请在RTSP_URLS列表中添加RTSP流地址")
        return

    print(f"📡 检测到 {len(RTSP_URLS)} 个RTSP流")

    for i, rtsp_url in enumerate(RTSP_URLS, 1):
        print(f"\n--- 处理RTSP流 {i}/{len(RTSP_URLS)}: {rtsp_url} ---")

        # 创建RTSP处理器
        rtsp_processor = RTSPProcessor(rtsp_url, DETECTION_INTERVAL)

        # 连接并处理流
        if rtsp_processor.connect():
            rtsp_processor.process_stream()

            # 输出统计报告
            print_statistics(rtsp_processor.violation_stats, f"RTSP流 {i}")
        else:
            print(f"❌ 无法连接到RTSP流 {i}: {rtsp_url}")

        # 释放资源
        rtsp_processor.release()


# -----------------------------
# 19. 统计报告函数
# -----------------------------
def print_statistics(stats, data_type):
    """
    输出统计报告
    """
    if not stats:
        print(f"❌ 没有{data_type}处理数据！")
        return

    print(f"\n{'=' * 50}")
    print(f"🚗 {data_type}压线检测统计报告")
    print(f"{'=' * 50}")

    total_vehicles = sum(stat['total_vehicles'] for stat in stats)
    total_violations = sum(stat['violations'] for stat in stats)
    total_violation_frames = sum(1 for stat in stats if stat['has_violation'])

    print(f"总处理帧数: {len(stats)}")
    print(f"总检测车辆数: {total_vehicles}")
    print(f"总压线违规数: {total_violations}")
    print(f"总体违规比例: {total_violations / max(total_vehicles, 1) * 100:.1f}%")
    print(f"检测到违规的帧数: {total_violation_frames}")

    if 'RTSP' in data_type or '视频' in data_type:
        print(f"检测间隔: 每{DETECTION_INTERVAL}帧检测一次")
        print(f"预处理方法: {PREPROCESS_METHOD}")

    # 输出掩码保存信息
    if SAVE_MASKS:
        print(f"车辆掩码保存: 已启用 (保存在 {MASK_FOLDER} 文件夹)")

    if SAVE_DEBUG:
        print(f"调试图片保存: 已启用 (保存在 {DEBUG_FOLDER} 文件夹)")

    if SAVE_ORIGINAL:
        print(f"原始图片保存: 已启用 (仅在检测到车辆时保存，保存在 {ORIGINAL_FOLDER} 文件夹)")


# -----------------------------
# 20. 主函数
# -----------------------------
def main():
    """
    主函数 - 选择处理模式
    """
    print("=" * 60)
    print("🚗 车辆压线检测系统（YOLOv8n-seg + 实例分割）")
    print("=" * 60)
    print("配置信息:")
    print(f"  - 车辆检测模型: YOLOv8n-seg")
    print(f"  - 道路分割模型: 4类道路分割")
    print(f"  - 检测间隔: 每{DETECTION_INTERVAL}帧检测一次")
    print(f"  - 掩码保存: {'启用' if SAVE_MASKS else '禁用'}")
    print(f"  - 调试图片: {'启用' if SAVE_DEBUG else '禁用'}")
    print(f"  - 原始图片: {'启用' if SAVE_ORIGINAL else '禁用'} (仅在检测到车辆时)")
    print(f"  - 掩码透明度: {MASK_ALPHA}")
    print(f"  - 底部检测区域比例: {BOTTOM_HEIGHT_RATIO}")
    print()
    print("请选择处理模式:")
    print("1. 处理图片文件夹")
    print("2. 处理视频文件")
    print("3. 处理RTSP流")
    print("4. 同时处理图片和视频")
    print("5. 同时处理图片、视频和RTSP流")
    print("6. 退出")

    choice = input("请输入选择 (1/2/3/4/5/6): ").strip()

    if choice == "1":
        print("\n🎯 选择模式: 处理图片文件夹")
        process_images()
    elif choice == "2":
        print("\n🎯 选择模式: 处理视频文件")
        process_video()
    elif choice == "3":
        print("\n🎯 选择模式: 处理RTSP流")
        process_rtsp_streams()
    elif choice == "4":
        print("\n🎯 选择模式: 同时处理图片和视频")
        process_images()
        process_video()
    elif choice == "5":
        print("\n🎯 选择模式: 同时处理图片、视频和RTSP流")
        process_images()
        process_video()
        process_rtsp_streams()
    elif choice == "6":
        print("👋 退出程序")
        return
    else:
        print("❌ 无效选择，请重新运行程序并输入1、2、3、4、5或6")
        return

    print(f"\n🎉 所有处理完成！")
    print(f"📁 结果目录:")
    print(f"   - 检测结果: {OUTPUT_FOLDER}")
    print(f"   - 违规图片: {VIOLATION_FOLDER}")
    if SAVE_MASKS:
        print(f"   - 车辆掩码: {MASK_FOLDER}")
    if SAVE_DEBUG:
        print(f"   - 调试图片: {DEBUG_FOLDER}")
    if SAVE_ORIGINAL:
        print(f"   - 原始图片: {ORIGINAL_FOLDER} (仅在检测到车辆时保存)")


# -----------------------------
# 21. 运行主函数
# -----------------------------
if __name__ == "__main__":
    main()