# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license

"""
Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
"""

import argparse
import csv
import os
import platform
import sys
from pathlib import Path

import torch
import cv2
import numpy as np
from PIL import Image, ImageDraw, ImageFont  # 添加PIL库用于中文显示

FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]  # YOLOv5 root directory
if str(ROOT) not in sys.path:
    sys.path.append(str(ROOT))  # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative

from ultralytics.utils.plotting import Annotator, colors, save_one_box

from models.common import DetectMultiBackend
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
from utils.general import (
    LOGGER,
    Profile,
    check_file,
    check_img_size,
    check_imshow,
    check_requirements,
    colorstr,
    increment_path,
    non_max_suppression,
    print_args,
    scale_boxes,
    strip_optimizer,
    xyxy2xywh,
)
from utils.torch_utils import select_device, smart_inference_mode


# 定义一个函数，使用PIL在图像上绘制中文
def draw_chinese_text(image, text, position, font_size=36, color=(255, 255, 255)):
    """
    使用PIL库在图像上绘制中文
    :param image: OpenCV格式的图像
    :param text: 要绘制的文本
    :param position: 文本位置 (x, y)
    :param font_size: 字体大小
    :param color: 文本颜色，BGR格式
    :return: 处理后的图像
    """
    # 将OpenCV的BGR格式转换为PIL的RGB格式
    image_pil = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
    draw = ImageDraw.Draw(image_pil)

    # 尝试加载系统字体，优先使用SimHei（黑体）
    font_path = None
    # 检查常见的中文字体路径
    if platform.system() == "Windows":
        font_path = "C:/Windows/Fonts/simhei.ttf"  # Windows系统
    elif platform.system() == "Linux":
        font_path = "/usr/share/fonts/opentype/noto/NotoSansCJK-Regular.ttc"  # Linux系统
    elif platform.system() == "Darwin":  # macOS
        font_path = "/System/Library/Fonts/PingFang.ttc"

    # 如果找不到指定字体，尝试使用默认字体
    try:
        if font_path and os.path.exists(font_path):
            font = ImageFont.truetype(font_path, font_size)
        else:
            # 使用PIL默认字体
            font = ImageFont.load_default()
            LOGGER.warning("未找到中文字体，将使用默认字体，可能无法正确显示中文")
    except Exception as e:
        LOGGER.error(f"加载字体失败: {e}，将使用默认字体")
        font = ImageFont.load_default()

    # 绘制文本
    draw.text(position, text, font=font, fill=tuple(reversed(color)))  # 注意PIL使用RGB格式

    # 将PIL图像转回OpenCV的BGR格式
    return cv2.cvtColor(np.array(image_pil), cv2.COLOR_RGB2BGR)


@smart_inference_mode()
def run(
        weights=ROOT / "exp21/weights/best.pt",  # 使用你的模型路径
        source=ROOT / "1/my_dates/images/test",  # 修改后的source路径
        data=ROOT / "data/person_classes.yaml",  # 自定义数据集配置
        imgsz=(640, 640),  # inference size (height, width)
        conf_thres=0.25,  # confidence threshold
        iou_thres=0.45,  # NMS IOU threshold
        max_det=1000,  # maximum detections per image
        device="",  # cuda device, i.e. 0 or 0,1,2,3 or cpu
        view_img=False,  # show results
        save_txt=False,  # save results to *.txt
        save_format=0,  # save boxes coordinates in YOLO format or Pascal-VOC format (0 for YOLO and 1 for Pascal-VOC)
        save_csv=False,  # save results in CSV format
        save_conf=False,  # save confidences in --save-txt labels
        save_crop=False,  # save cropped prediction boxes
        nosave=False,  # do not save images/videos
        classes=None,  # filter by class: --class 0, or --class 0 2 3
        agnostic_nms=False,  # class-agnostic NMS
        augment=False,  # augmented inference
        visualize=False,  # visualize features
        update=False,  # update all models
        project=ROOT / "runs/detect",  # save results to project/name
        name="exp",  # save results to project/name
        exist_ok=False,  # existing project/name ok, do not increment
        line_thickness=3,  # bounding box thickness (pixels)
        hide_labels=False,  # hide labels
        hide_conf=False,  # hide confidences
        half=False,  # use FP16 half-precision inference
        dnn=False,  # use OpenCV DNN for ONNX inference
        vid_stride=1,  # video frame-rate stride
):
    """
    Runs YOLOv5 detection inference on various sources with person type counting.
    """
    source = str(source)
    save_img = not nosave and not source.endswith(".txt")  # save inference images
    is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
    is_url = source.lower().startswith(("rtsp://", "rtmp://", "http://", "https://"))
    webcam = source.isnumeric() or source.endswith(".streams") or (is_url and not is_file)
    screenshot = source.lower().startswith("screen")
    if is_url and is_file:
        source = check_file(source)  # download

    # Directories
    save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)  # increment run
    (save_dir / "labels" if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir

    # Load model
    device = select_device(device)
    model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
    stride, names, pt = model.stride, model.names, model.pt
    imgsz = check_img_size(imgsz, s=stride)  # check image size

    # 定义职业人员和普通人员的标签名称（中文）
    PROFESSIONAL_LABEL = "职业人员"
    GENERAL_LABEL = "普通人员"

    # 初始化类别ID映射
    professional_id = None
    general_id = None

    # Dataloader
    bs = 1  # batch_size
    if webcam:
        view_img = check_imshow(warn=True)
        dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
        bs = len(dataset)
    elif screenshot:
        dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
    else:
        dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
    vid_path, vid_writer = [None] * bs, [None] * bs

    # Run inference
    model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz))  # warmup
    seen, windows, dt = 0, [], (Profile(device=device), Profile(device=device), Profile(device=device))
    for path, im, im0s, vid_cap, s in dataset:
        with dt[0]:
            im = torch.from_numpy(im).to(model.device)
            im = im.half() if model.fp16 else im.float()  # uint8 to fp16/32
            im /= 255  # 0 - 255 to 0.0 - 1.0
            if len(im.shape) == 3:
                im = im[None]  # expand for batch dim
            if model.xml and im.shape[0] > 1:
                ims = torch.chunk(im, im.shape[0], 0)

        # Inference
        with dt[1]:
            visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
            if model.xml and im.shape[0] > 1:
                pred = None
                for image in ims:
                    if pred is None:
                        pred = model(image, augment=augment, visualize=visualize).unsqueeze(0)
                    else:
                        pred = torch.cat((pred, model(image, augment=augment, visualize=visualize).unsqueeze(0)), dim=0)
                pred = [pred, None]
            else:
                pred = model(im, augment=augment, visualize=visualize)
        # NMS
        with dt[2]:
            pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)

        # 确定类别ID与标签的映射关系（首次检测时执行）
        if professional_id is None or general_id is None:
            if names and len(names) >= 2:
                for idx, name in enumerate(names):
                    if name == PROFESSIONAL_LABEL or name == "Professional":  # 兼容英文和中文标签
                        professional_id = idx
                    elif name == GENERAL_LABEL or name == "General":  # 兼容英文和中文标签
                        general_id = idx
                # 检查是否找到对应类别
                if professional_id is None or general_id is None:
                    LOGGER.warning(f"未找到标签 '{PROFESSIONAL_LABEL}' 或 '{GENERAL_LABEL}' 的对应类别ID")
                    # 默认假设：0=Professional, 1=General
                    professional_id = 0
                    general_id = 1
            else:
                LOGGER.error("类别名称配置不正确，请检查data/person_classes.yaml")
                return

        # 统计不同类别的人数
        professional_count = 0
        general_count = 0

        # Process predictions
        for i, det in enumerate(pred):  # per image
            seen += 1
            if webcam:  # batch_size >= 1
                p, im0, frame = path[i], im0s[i].copy(), dataset.count
                s += f"{i}: "
            else:
                p, im0, frame = path, im0s.copy(), getattr(dataset, "frame", 0)

            p = Path(p)  # to Path
            save_path = str(save_dir / p.name)  # im.jpg
            txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}")  # im.txt
            s += "{:g}x{:g} ".format(*im.shape[2:])  # print string
            gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
            imc = im0.copy() if save_crop else im0  # for save_crop
            annotator = Annotator(im0, line_width=line_thickness, example=str(names))

            if len(det):
                # Rescale boxes from img_size to im0 size
                det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()

                # 遍历每个检测结果，统计人数
                for *xyxy, conf, cls in reversed(det):
                    c = int(cls)
                    if c == professional_id:
                        professional_count += 1
                    elif c == general_id:
                        general_count += 1

                    # 绘制边界框和标签（使用中文标签）
                    label_text = PROFESSIONAL_LABEL if c == professional_id else GENERAL_LABEL
                    label = None if hide_labels else (label_text if hide_conf else f"{label_text} {conf:.2f}")

                    # 使用原始的边界框绘制函数（不含标签）
                    annotator.box_label(xyxy, "", color=colors(c, True))

                    # 使用PIL绘制中文标签（字体大小调整为28）
                    x1, y1, x2, y2 = map(int, xyxy)
                    # 将标签放在边界框上方，调整位置避免遮挡
                    text_position = (x1, y1 - 40)  # 增加距离，避免与边界框重叠
                    # 使用PIL绘制中文
                    im0 = draw_chinese_text(im0, label, text_position, font_size=28)  # 字体大小28

                    # 保存裁剪图像（如果需要）
                    if save_crop:
                        save_one_box(xyxy, imc, file=save_dir / "crops" / label_text / f"{p.stem}.jpg", BGR=True)

            # 在图像上绘制统计信息（使用中文标签和PIL）
            im0 = annotator.result()
            if view_img or save_img:
                # 设置统计信息显示样式
                color = (255, 255, 255)  # 白色
                bg_color = (0, 0, 0)  # 黑色背景
                padding = 15  # 增加内边距

                # 构建统计文本（使用中文标签）
                text = f"{PROFESSIONAL_LABEL}: {professional_count} | {GENERAL_LABEL}: {general_count}"

                # 计算文本位置（顶部居中）
                img_height, img_width = im0.shape[:2]

                # 使用PIL计算文本尺寸
                temp_img = Image.fromarray(cv2.cvtColor(im0, cv2.COLOR_BGR2RGB))
                draw = ImageDraw.Draw(temp_img)
                try:
                    # 尝试加载中文字体（字体大小调整为40）
                    if platform.system() == "Windows":
                        font = ImageFont.truetype("C:/Windows/Fonts/simhei.ttf", 40)  # 字体大小40
                    elif platform.system() == "Linux":
                        font = ImageFont.truetype("/usr/share/fonts/opentype/noto/NotoSansCJK-Regular.ttc",
                                                  40)  # 字体大小40
                    elif platform.system() == "Darwin":  # macOS
                        font = ImageFont.truetype("/System/Library/Fonts/PingFang.ttc", 40)  # 字体大小40
                    else:
                        font = ImageFont.load_default()
                except:
                    font = ImageFont.load_default()

                # 使用textbbox方法计算文本包围盒
                try:
                    # 获取文本包围盒 (left, top, right, bottom)
                    bbox = draw.textbbox((0, 0), text, font=font)
                    text_width = bbox[2] - bbox[0]
                    text_height = bbox[3] - bbox[1]
                except AttributeError:
                    # 兼容旧版本Pillow
                    LOGGER.warning("使用的Pillow版本较旧，可能无法正确计算文本尺寸")
                    text_width, text_height = 400, 50  # 使用更大的默认值

                # 计算文本和背景框的位置
                text_x = (img_width - text_width) // 2
                text_y = 50  # 距离顶部更远，避免遮挡

                # 绘制黑色背景矩形（使用PIL以确保透明度正确）
                overlay = Image.fromarray(cv2.cvtColor(im0, cv2.COLOR_BGR2RGB))
                draw = ImageDraw.Draw(overlay)
                draw.rectangle(
                    [text_x - padding, text_y - text_height - padding,
                     text_x + text_width + padding, text_y + padding],
                    fill=(0, 0, 0, 180)  # RGBA，最后一个值是透明度（0-255）
                )

                # 绘制白色文本
                draw.text((text_x, text_y - text_height), text, font=font, fill=(255, 255, 255))

                # 将PIL图像转回OpenCV格式
                im0 = cv2.cvtColor(np.array(overlay), cv2.COLOR_RGB2BGR)

            # Stream results
            if view_img:
                if platform.system() == "Linux" and p not in windows:
                    windows.append(p)
                    cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)  # allow window resize (Linux)
                    cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
                cv2.imshow(str(p), im0)
                cv2.waitKey(1)  # 1 millisecond

            # Save results (image with detections)
            if save_img:
                if dataset.mode == "image":
                    cv2.imwrite(save_path, im0)
                else:  # 'video' or 'stream'
                    if vid_path[i] != save_path:  # new video
                        vid_path[i] = save_path
                        if isinstance(vid_writer[i], cv2.VideoWriter):
                            vid_writer[i].release()  # release previous video writer
                        if vid_cap:  # video
                            fps = vid_cap.get(cv2.CAP_PROP_FPS)
                            w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                            h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                        else:  # stream
                            fps, w, h = 30, im0.shape[1], im0.shape[0]
                        save_path = str(Path(save_path).with_suffix(".mp4"))  # force *.mp4 suffix on results videos
                        vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
                    vid_writer[i].write(im0)

        # Print time (inference-only)
        LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1e3:.1f}ms")

    # Print results
    t = tuple(x.t / seen * 1e3 for x in dt)  # speeds per image
    LOGGER.info(f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}" % t)
    if save_txt or save_img:
        s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ""
        LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
    if update:
        strip_optimizer(weights[0])  # update model (to fix SourceChangeWarning)


def parse_opt():
    parser = argparse.ArgumentParser()
    parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "exp21/exp21/weights/best.pt",
                        help="model path or triton URL")
    parser.add_argument("--source", type=str, default=ROOT / "1/my_dates/images/test",
                        help="file/dir/URL/glob/screen/0(webcam)")
    parser.add_argument("--data", type=str, default=ROOT / "1/my_dates/my_date.yaml", help="数据集配置文件")
    parser.add_argument("--imgsz", "--img", "--img-size", nargs="+", type=int, default=[640], help="inference size h,w")
    parser.add_argument("--conf-thres", type=float, default=0.25, help="confidence threshold")
    parser.add_argument("--iou-thres", type=float, default=0.45, help="NMS IoU threshold")
    parser.add_argument("--max-det", type=int, default=1000, help="maximum detections per image")
    parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
    parser.add_argument("--view-img", action="store_true", help="show results")
    parser.add_argument("--save-txt", action="store_true", help="save results to *.txt")
    parser.add_argument(
        "--save-format",
        type=int,
        default=0,
        help="whether to save boxes coordinates in YOLO format or Pascal-VOC format when save-txt is True, 0 for YOLO and 1 for Pascal-VOC",
    )
    parser.add_argument("--save-csv", action="store_true", help="save results in CSV format")
    parser.add_argument("--save-conf", action="store_true", help="save confidences in --save-txt labels")
    parser.add_argument("--save-crop", action="store_true", help="save cropped prediction boxes")
    parser.add_argument("--nosave", action="store_true", help="do not save images/videos")
    parser.add_argument("--classes", nargs="+", type=int, help="filter by class: --classes 0, or --classes 0 2 3")
    parser.add_argument("--agnostic-nms", action="store_true", help="class-agnostic NMS")
    parser.add_argument("--augment", action="store_true", help="augmented inference")
    parser.add_argument("--visualize", action="store_true", help="visualize features")
    parser.add_argument("--update", action="store_true", help="update all models")
    parser.add_argument("--project", default=ROOT / "runs/detect", help="save results to project/name")
    parser.add_argument("--name", default="exp", help="save results to project/name")
    parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
    parser.add_argument("--line-thickness", default=3, type=int, help="bounding box thickness (pixels)")
    parser.add_argument("--hide-labels", default=False, action="store_true", help="hide labels")
    parser.add_argument("--hide-conf", default=False, action="store_true", help="hide confidences")
    parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference")
    parser.add_argument("--dnn", action="store_true", help="use OpenCV DNN for ONNX inference")
    parser.add_argument("--vid-stride", type=int, default=1, help="video frame-rate stride")
    opt = parser.parse_args()
    opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1  # expand
    print_args(vars(opt))
    return opt


def main(opt):
    # 确保安装了PIL库
    try:
        import PIL
        from packaging import version
        # 检查Pillow版本
        if version.parse(PIL.__version__) < version.parse("8.0.0"):
            LOGGER.warning("建议升级Pillow库到8.0.0或更高版本: pip install --upgrade pillow")
    except ImportError:
        LOGGER.error("请安装PIL库: pip install pillow")
        return

    check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop"))
    run(**vars(opt))


if __name__ == "__main__":
    opt = parse_opt()
    main(opt)