'''
https://blog.csdn.net/CharmsLUO/article/details/123422822?spm=1001.2101.3001.6661.1&utm_medium=distribute.pc_relevant_t0.none-task-blog-2%7Edefault%7ECTRLIST%7ERate-1-123422822-blog-124290927.pc_relevant_aa_2&depth_1-utm_source=distribute.pc_relevant_t0.none-task-blog-2%7Edefault%7ECTRLIST%7ERate-1-123422822-blog-124290927.pc_relevant_aa_2&utm_relevant_index=1

1. det 每张图的检测结果，中心点xy,x,y,置信度，类别
2. 需要重新加载函数，这里是对图片做检测，dataset = MyLoadImages(source, img_size=self.imgsz, stride=self.stride)
    详情见MyLoadImages函数
3. cpu检测的时候，无法用half精度, cuda才支持FP16  half = True
    cuda: device=0, half=False,
          device=0, half=True,
    cpu:  device='cpu', half=False,

4 运行
    修改main函数中权重weights路径和图像源source路径
    python detect_api.py
'''

import random
import numpy as np
import torch
import time
from models.common import DetectMultiBackend
from utils.dataloaders import MyLoadImages
from utils.general import check_img_size, cv2, non_max_suppression, scale_coords
from utils.plots import Annotator, colors

# 初始化参数设置
class simulation_opt:
    def __init__(self, weights, img_size=640, conf_thres=0.25, iou_thres=0.45,
                  device=0, half=False,            # cuda才支持FP16  half = True
                 # device='cpu', half=False,
                classes=None, agnostic_nms=False, augment=False, visualize=False, max_det=1000,line_thickness=2, dnn=False):
        self.weights = weights
        self.device = device
        self.half = half
        self.img_size = img_size

        self.conf_thres = conf_thres
        self.iou_thres = iou_thres
        self.max_det = max_det
        self.classes = classes
        self.agnostic_nms = agnostic_nms
        self.augment = augment
        self.visualize = visualize
        self.line_thickness = line_thickness
        self.dnn = dnn

# detectapi类，对detect函数修改
class detectapi:
    def __init__(self, weights, img_size=640):
        # 构造函数先做好必要准备，如初始化参数，加载模型
        self.opt = simulation_opt(weights = weights,img_size=img_size)
        weights, imgsz = self.opt.weights, self.opt.img_size

        # 初始化
        self.device = torch.device("cpu") if self.opt.device == 'cpu' else torch.device("cuda")

        # 加载模型
        self.model = DetectMultiBackend(weights, device=self.device, dnn=self.opt.dnn, fp16=self.opt.half)
        self.stride, self.names, self.pt = self.model.stride, self.model.names, self.model.pt
        self.model = self.model.to(self.device)

        self.model = self.model.half() if self.opt.half else self.model.float()   # 采用cuda时to FP16

        self.imgsz = check_img_size(imgsz, s=self.stride)
        self.color = [[random.randint(0, 255) for _ in range(3)] for _ in self.names]

    def detect(self, source):
        # 重写加载图片数据，因为这里喂的视频流，或者img
        dataset = MyLoadImages(source, img_size=self.imgsz, stride=self.stride)

        for im, im0s in dataset:
            im = im.astype(np.float32) / 255  # uint8 to float16/32
            im = torch.tensor(im).to(self.device) # np.array to tensor
            im = im.half() if self.opt.half else im.float()
            # if(self.half):
            #     im = im.half()
            if len(im.shape) == 3:
                im = im[None]

            t1 = time.time()
            # 模型检测
            pred = self.model(im, augment=self.opt.augment, visualize=False)

            #NMS，非极大值抑制
            pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, self.opt.classes, self.opt.agnostic_nms, max_det=self.opt.max_det)
            t2 = time.time()

            im0 = im0s.copy()
            # annotator画框准备，可以不用
            annotator = Annotator(im0, line_width=self.opt.line_thickness, example=str(self.names))
            result_text =[]
            # 打印结果 ,det每张图的检测结果: n *（x,y,x,y,confidence level,classes）
            for i,det in enumerate(pred):
                if len(det):
                    # 恢复尺度
                    det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
                    for *xyxy, conf, cls in reversed(det):
                        c = int(cls)  # integer class
                        label = f'{self.names[int(cls)]} {conf:.2f}'
                        line = (int(cls.item()), [int(_.item()) for _ in xyxy], conf.item())
                        result_text.append(line)
                        # 可视化画框，结果图片为annotator.result()
                        annotator.box_label(xyxy, label, color=colors(c, True))

            im0 = annotator.result()
            print(f'Done. ({t2 - t1:.3f}s)')
            return im0

def main():
    # 检查环境/打印参数,主要是requrement.txt的包是否安装，用彩色显示设置的参数
    cap = cv2.VideoCapture(r'D:/code/yolov5_det/1.MP4')   # source
    det = detectapi(weights ="weights/yolov5n.pt")        # weights
    while True:
        rec, img = cap.read()
        img = cv2.resize(img, (640, 480))
        img_out = det.detect([img])
        cv2.imshow('src', img_out)
        if cv2.waitKey(1) == ord('q'):
            break


if __name__ == "__main__":
    main()


