# Author : ZZH
# Date : 2025/5/26
import cv2
from ultralytics import YOLO
from pathlib import Path
import argparse


def args_parser():
    parser = argparse.ArgumentParser(description="基于YOLO的安全帽推理脚本")

    parser.add_argument("--weights", type = str, default = "train6-20250523-175204-yolo11m-best.pt", help = "模型权重路径")
    parser.add_argument("--source", type = str, default = "0", help = "输入源（图像/文件夹/视频/摄像头ID，如‘0’）")
    parser.add_argument("--imgsz", type = int, default = 640, help = "输入图像尺寸")
    parser.add_argument("--conf", type = float, default = 0.25, help = "置信度阈值")
    parser.add_argument("--iou", type = float, default = 0.45, help = "IOU阈值")
    parser.add_argument("--save", type = bool, default = True, help = "保存预测图像")
    parser.add_argument("--save_dir", type = str, default = "runs/val", help = "保存预测图像的目录")
    parser.add_argument("--save_txt", type = bool, default = True, help = "保存预测结果为TXT")
    parser.add_argument("--save_conf", type = bool, default = True, help = "在TXT中包含置信度值")
    parser.add_argument("--save_frames", type = bool, default = True, help = "保存摄像头/视频每帧图像")
    parser.add_argument("--save_crop", type = bool, default = True, help = "保存检测框裁剪图像")
    parser.add_argument("--show", type = bool, default = True, help = "显示结果")
    parser.add_argument("--display-size",
                        type=str,
                        default="720p",
                        choices=["360p", "720p", "1280p", "2K", "4K"],
                        help="显示窗口大小")
    return parser.parse_args()


def main():
    """
    主函数，执行推理
    :return: None
    """
    args = args_parser()
    # 分辨率的映射
    resolution_map = {
        "360p": (640, 360),
        "720p": (1280, 720),
        "1280p": (1920, 1080),  # 注意：常规1080p应为1920x1080
        "2K": (2560, 1440),
        "4K": (3840, 2160)
    }
    # 获取显示尺寸
    display_width, display_height = resolution_map[args.display_size]

    # 模型路径标准化
    model_path = Path(args.weights)
    if not model_path.is_absolute():
        model_path = Path(r"../models/checkpoints") / args.weights

    # 输入源验证
    source = args.source
    if not source.isdigit():  # 摄像头ID为数字时跳过路径检查
        source_path = Path(source)
        if not source_path.exists():
            raise FileNotFoundError(f"输入源不存在：{source_path}")
        source = str(source_path)
    # 模型存在性检查
    if not model_path.exists():
        raise FileNotFoundError(f"模型文件不存在：{model_path}")
    # 加载模型
    model = YOLO(str(model_path))

    # 摄像头推理
    if source.isdigit():
        # 初始化视频写入
        cap = cv2.VideoCapture(int(source))
        if not cap.isOpened():
            raise RuntimeError(f"无法打开摄像头: {source}")
        fps = cap.get(cv2.CAP_PROP_FPS) or 30

        # 设置显示窗口
        window_name = "YOLO Safety Helmet Detection"
        cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
        cv2.resizeWindow(window_name, display_width, display_height)

        frame_idx = 0
        video_writer = None
        frames_dir = None

        # 逐帧推理
        for idx, result in enumerate(model.predict(
                source=source,
                imgsz=args.imgsz,
                conf=args.conf,
                iou=args.iou,
                save=False,
                save_txt=args.save_txt,
                save_conf=args.save_conf,
                save_crop=args.save_crop,
                show=False,  # 必须要写
                project="runs/predict",
                name="exp",
                stream=True
                )):
            if idx == 0:
                # 动态生成保存路径（exp, exp2, exp3...）
                save_dir = Path(result.save_dir)

                # 创建帧保存目录
                if args.save_frames:
                    frames_dir = save_dir / "0_frames"
                    frames_dir.mkdir(parents=True, exist_ok=True)
                # 初始化视频写入器
                if args.save:
                    video_path = save_dir / "output.mp4"
                    video_writer = cv2.VideoWriter(
                        str(video_path),
                        cv2.VideoWriter_fourcc(*"mp4v"),
                        fps,
                        (display_width, display_height)
                    )

            # 获取原始帧和标注结果
            frame = result.orig_img
            annotated_frame = result.plot()

            # 缩放标注帧
            annotated_frame = cv2.resize(annotated_frame, (display_width, display_height))

            # 写入视频文件
            if video_writer:
                video_writer.write(annotated_frame)
            # 实时显示窗口
            cv2.imshow(window_name, annotated_frame)
            # 保存单帧图像
            if frames_dir:
                cv2.imwrite(str(frames_dir / f"{idx}.jpg"), annotated_frame)
            else:
                pass
            # 设定退出条件
            key = cv2.waitKey(10) & 0xFF
            if key == ord('q') or key == 27:
                break
            frame_idx += 1

        cap.release()
        if video_writer:
            video_writer.release()
        cv2.destroyAllWindows()

    else:
        results = model.predict(
            source=source,
            imgsz=args.imgsz,
            conf=args.conf,
            iou=args.iou,
            save=args.save,
            save_txt=args.save_txt,
            save_conf=args.save_conf,
            save_crop=args.save_crop,
            show=False,
            )


if __name__ == "__main__":
    main()
