import time
import cv2
import torch
import numpy as np
from pathlib import Path

# 从 utils 包中导入所需的工具函数
from utils.utils import (
    time_synchronized, select_device, increment_path,
    scale_coords, xyxy2xywh, non_max_suppression,
    driving_area_mask, lane_line_mask, plot_one_box, show_seg_result,
    AverageMeter
)

# 导入 YOLOv8 模型（Ultralytics 提供的封装）
from ultralytics import YOLO

# =================== 参数设置 ====================
# YOLOP（分割）模型参数
yolop_weights = "data/weights/yolopv2.pt"  # YOLOP模型文件（TorchScript格式）
img_size = 640                            # 推理图像尺寸
device = '0'                              # 设备：'0'表示使用CUDA设备，或'cpu'

# YOLOv8（目标检测）模型参数
yolov8_model_path = "data/weights/best.pt"  # YOLOv8模型文件路径
conf_thres = 0.3                            # 检测置信度阈值
iou_thres = 0.45                            # NMS IOU 阈值

# 视频输入及输出
video_source = "C:/Users/永生理想/Desktop/2.mp4"  # 输入视频路径（用于分割，后续检测在分割结果上进行）
# 结果保存路径（会自动生成 runs/detect/exp* 目录）
project = 'runs/detect'
exp_name = 'exp'

# =================== 主函数 ====================
def main():
    # 设置设备并加载 YOLOP 分割模型
    selected_device = select_device(device)
    print("Loading YOLOP segmentation model...")
    yolop_model = torch.jit.load(yolop_weights)
    yolop_model = yolop_model.to(selected_device)
    half = selected_device.type != 'cpu'
    if half:
        yolop_model.half()
    yolop_model.eval()

    # 预热 YOLOP 模型
    dummy = torch.zeros(1, 3, img_size, img_size).to(selected_device).type_as(next(yolop_model.parameters()))
    yolop_model(dummy)

    # 加载 YOLOv8 检测模型
    print("Loading YOLOv8 detection model...")
    yolo_model = YOLO(yolov8_model_path)

    # 设置视频读取和保存
    cap = cv2.VideoCapture(video_source)
    if not cap.isOpened():
        print("Error opening video file.")
        return

    # 获取视频基本属性
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = cap.get(cv2.CAP_PROP_FPS)

    # 按照 OLDtest.py 的风格生成保存目录
    save_dir = Path(increment_path(Path(project) / exp_name, exist_ok=False))
    (save_dir / 'labels').mkdir(parents=True, exist_ok=True)
    output_path = str(save_dir / "output.mp4")
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))

    print("Processing video...")
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        # 记录原始帧，用于后续检测
        orig_frame = frame.copy()

        # ------------- YOLOP 分割部分 -------------
        # 预处理：调整尺寸、BGR转RGB、转CHW
        resized = cv2.resize(frame, (img_size, img_size))
        rgb_img = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
        img_tensor = np.transpose(rgb_img, (2, 0, 1))
        img_tensor = np.expand_dims(img_tensor, axis=0)
        img_tensor = np.ascontiguousarray(img_tensor)
        img_tensor = torch.from_numpy(img_tensor).to(selected_device)
        img_tensor = img_tensor.half() if half else img_tensor.float()
        img_tensor /= 255.0  # 归一化

        # 推理（输出格式：[pred, anchor_grid], seg, ll）
        t1 = time_synchronized()
        outputs = yolop_model(img_tensor)
        t2 = time_synchronized()
        # 忽略检测分支：只取分割输出
        seg = outputs[1]
        ll = outputs[2]

        # 生成分割掩码（可行驶区域、车道线）
        da_seg_mask = driving_area_mask(seg)
        ll_seg_mask = lane_line_mask(ll)

        # 将分割掩码调整到原始图像尺寸，防止尺寸不匹配问题
        da_seg_mask = cv2.resize(da_seg_mask, (orig_frame.shape[1], orig_frame.shape[0]), interpolation=cv2.INTER_NEAREST)
        ll_seg_mask = cv2.resize(ll_seg_mask, (orig_frame.shape[1], orig_frame.shape[0]), interpolation=cv2.INTER_NEAREST)

        # 在原始帧上绘制分割结果
        seg_frame = orig_frame.copy()
        seg_frame = show_seg_result(seg_frame, (da_seg_mask, ll_seg_mask), is_demo=True)

        # 检查 seg_frame 是否为空
        if seg_frame is None or seg_frame.size == 0:
            print("Warning: seg_frame is empty, using original frame instead.")
            seg_frame = orig_frame.copy()

        # 调试打印 seg_frame 的尺寸
        print("seg_frame shape:", seg_frame.shape)

        # ------------- YOLOv8 检测部分 -------------
        # 将分割后的帧作为输入，进行目标检测
        results = yolo_model(seg_frame)
        # 遍历检测结果，使用与 OLDtest.py 类似的绘制方式（plot_one_box）
        for result in results:
            boxes = result.boxes  # 检测框集合
            for box in boxes:
                # 获取检测框坐标、置信度和类别
                xyxy = box.xyxy[0].cpu().numpy().astype(int)
                conf = box.conf[0].cpu().item()
                cls = int(box.cls[0].cpu().item())
                label = f'{yolo_model.names[cls]} {conf:.2f}'
                # 绘制检测框（颜色、线宽与 OLDtest.py 风格一致）
                plot_one_box(xyxy, seg_frame, label=label, color=(0, 255, 0), line_thickness=3)

        # 将最终结果写入视频
        out.write(seg_frame)
        # 如果图像有效，则显示，否则跳过显示
        if seg_frame.shape[0] > 0 and seg_frame.shape[1] > 0:
            cv2.imshow("Final Output", seg_frame)
        else:
            print("Error: Invalid seg_frame dimensions, skipping cv2.imshow.")

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # 释放资源
    cap.release()
    out.release()
    cv2.destroyAllWindows()
    print(f"Processing complete. Output saved to {output_path}")

if __name__ == '__main__':
    with torch.no_grad():
        main()
