import time
import cv2
import torch
import numpy as np
from pathlib import Path

# 从 utils 包中导入所需的工具函数
from utils.utils import (
    time_synchronized, select_device, increment_path,
    scale_coords, xyxy2xywh, non_max_suppression,
    driving_area_mask, lane_line_mask, plot_one_box,
    AverageMeter
)

# 导入 YOLOv8 模型（Ultralytics 提供的封装）
from ultralytics import YOLO

# =================== 参数设置 ====================
yolop_weights = "data/weights/yolopv2.pt"  # YOLOP模型文件（TorchScript格式）
img_size = 640                            # 推理图像尺寸（YOLOPv2输入）
device = '0'                              # 设备：'0'表示使用CUDA设备，或'cpu'
yolov8_model_path = "data/weights/best.pt"  # YOLOv8模型文件路径
conf_thres = 0.3                            # 检测置信度阈值
iou_thres = 0.45                            # NMS IOU 阈值
video_source = "C:/Users/永生理想/Desktop/2.mp4"  # 输入视频路径
project = 'runs/detect'
exp_name = 'exp'

# =================== 辅助函数 ====================
def resize_with_padding(image, target_size):
    """调整图像大小并保持长宽比，填充空白区域"""
    h, w = image.shape[:2]
    scale = min(target_size / h, target_size / w)
    new_h, new_w = int(h * scale), int(w * scale)
    resized = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
    padded = np.zeros((target_size, target_size, 3), dtype=np.uint8)
    top = (target_size - new_h) // 2
    left = (target_size - new_w) // 2
    padded[top:top+new_h, left:left+new_w] = resized
    return padded

def adjust_mask_to_original(mask, orig_width, orig_height):
    """将掩码从模型输出尺寸直接调整到原始视频分辨率"""
    # 直接将掩码从模型输出尺寸（640x640）调整到原始分辨率
    mask_adjusted = cv2.resize(mask, (orig_width, orig_height), interpolation=cv2.INTER_NEAREST)
    return mask_adjusted.astype(np.uint8)

def draw_segmentation(frame, da_seg_mask, ll_seg_mask):
    """手动绘制分割结果到帧上"""
    seg_frame = frame.copy()
    da_mask_colored = np.zeros_like(seg_frame)
    da_mask_colored[da_seg_mask > 0] = [0, 255, 0]  # 绿色可行驶区域
    seg_frame = cv2.addWeighted(seg_frame, 0.8, da_mask_colored, 0.2, 0)
    seg_frame[ll_seg_mask > 0] = [255, 255, 255]  # 白色车道线
    return seg_frame

# =================== 主函数 ====================
def main():
    # 设置设备并加载模型
    selected_device = select_device(device)
    print("Loading YOLOP segmentation model...")
    yolop_model = torch.jit.load(yolop_weights)
    yolop_model = yolop_model.to(selected_device)
    half = selected_device.type != 'cpu'
    if half:
        yolop_model.half()
    yolop_model.eval()

    # 预热 YOLOP 模型
    dummy = torch.zeros(1, 3, img_size, img_size).to(selected_device).type_as(next(yolop_model.parameters()))
    yolop_model(dummy)

    print("Loading YOLOv8 detection model...")
    yolo_model = YOLO(yolov8_model_path)

    # 设置视频读取和保存
    cap = cv2.VideoCapture(video_source)
    if not cap.isOpened():
        print("Error opening video file.")
        return

    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = cap.get(cv2.CAP_PROP_FPS)
    print(f"Input video resolution: {width}x{height}, FPS: {fps}")

    save_dir = Path(increment_path(Path(project) / exp_name, exist_ok=False))
    (save_dir / 'labels').mkdir(parents=True, exist_ok=True)
    output_path = str(save_dir / "output.mp4")
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))

    print("Processing video...")
    try:
        while cap.isOpened():
            ret, frame = cap.read()
            if not ret:
                break
            orig_frame = frame.copy()

            # YOLOv8 检测
            t1 = time_synchronized()
            results = yolo_model(orig_frame)
            t2 = time_synchronized()

            det_frame = orig_frame.copy()
            for result in results:
                boxes = result.boxes
                for box in boxes:
                    xyxy = box.xyxy[0].cpu().numpy().astype(int)
                    conf = box.conf[0].cpu().item()
                    cls = int(box.cls[0].cpu().item())
                    label = f'{yolo_model.names[cls]} {conf:.2f}'
                    plot_one_box(xyxy, det_frame, label=label, color=(0, 255, 0), line_thickness=3)

            # YOLOP 分割
            resized = resize_with_padding(det_frame, img_size)
            rgb_img = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
            img_tensor = torch.from_numpy(np.transpose(rgb_img, (2, 0, 1))).to(selected_device)
            img_tensor = img_tensor.half() if half else img_tensor.float()
            img_tensor = img_tensor.unsqueeze(0) / 255.0

            t3 = time_synchronized()
            outputs = yolop_model(img_tensor)
            seg, ll = outputs[1], outputs[2]
            t4 = time_synchronized()

            # 调整掩码到原始分辨率
            da_seg_mask = driving_area_mask(seg)
            ll_seg_mask = lane_line_mask(ll)
            da_seg_mask_orig = adjust_mask_to_original(da_seg_mask, width, height)
            ll_seg_mask_orig = adjust_mask_to_original(ll_seg_mask, width, height)

            # 绘制分割结果
            final_frame = draw_segmentation(det_frame, da_seg_mask_orig, ll_seg_mask_orig)

            # 保存和显示
            out.write(final_frame)
            cv2.imshow("Final Output", final_frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

            print(f"YOLOv8: {t2 - t1:.3f}s, YOLOP: {t4 - t3:.3f}s")

    finally:
        cap.release()
        out.release()
        cv2.destroyAllWindows()
        print(f"Processing complete. Output saved to {output_path}")

if __name__ == '__main__':
    with torch.no_grad():
        main()