#该程序实现了从yolov5的ONNX模型，并将识别结果在图像上进行标注
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import cv2

#笔者模型中的类别标签
LABELS = "s1,s2,s3,s4,s5,s6,s7,s8,s9,s10,s11,s12,s13,s14,s15,s16,s17,s18,s19,s20,s21"
np.random.seed(666)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8")
# 导入 YOLO 配置和权重文件并加载网络：
net = cv2.dnn.readNetFromONNX("train_best.onnx")
# 获取 YOLO 未连接的输出图层
image = cv2.imread('test.png')
print(image.shape)
(H, W) = image.shape[:2]
image_copy = cv2.resize(image, (int(W/4), int(H/4)),3)#宽和高
layer = net.getUnconnectedOutLayersNames()

# 从输入图像构造一个 blob，然后执行 YOLO 对象检测器的前向传递，给我们边界和相关概率
blob = cv2.dnn.blobFromImage(image_copy, 1/255.0, (640, 640),swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
# 前向传递，获得信息
net.setInput(blob)

# 获取网络输出层信息（所有输出层的名字），设定并前向传播
outInfo = net.getUnconnectedOutLayersNames()
# 得到各个输出层的、各个检测框等信息，是二维结构。
#print("1:",outInfo)
detections = net.forward(outInfo)
#print("1too:",detections[0][0])
# 用于得出检测时间


boxes = []
confidences = []
classIDs = []

# 循环提取每个输出层
for output in detections:
    #print("2:",output)
    # 循环提取每个框
    for detection in output:
        #print("3too:",detection)
        #print("3:",detection.shape)
        # 提取当前目标的类 ID 和置信度
        #scores = detection[0,5:].astype(float)
        #print("4:",scores)
        classID = int(detection[0,4:5])
        print(classID)
        confidence = detection[0,5:].astype(float)
        print(confidence)
        # 通过确保检测概率大于最小概率来过滤弱预测
        if confidence > 0.25:
            # 将边界框坐标相对于图像的大小进行缩放，YOLO 返回的是边界框的中心(x, y)坐标，
            # 后面是边界框的宽度和高度
            print(detection.shape)
            print(detection[0,0:4])
            # box = detection[0,0:4] * np.array([W/4, H/4, W/4, H/4])
            # print(box)
            # print(np.array([W/4, H/4, W/4, H/4]))
            box = detection[0,0:4]
            #(centerX, centerY, width, height) = box.astype("int")
            (centerX, centerY, width, height) = box.astype("int")
            # 转换出边框左上角坐标
            x = int(centerX - (width / 2))*4
            y = int(centerY - (height / 2))*4
            # 更新边界框坐标、置信度和类 id 的列表
            boxes.append([x, y, int(width*4), int(height*4)])
            confidences.append(float(confidence))
            classIDs.append(classID)
            # 非最大值抑制，确定唯一边框
            idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3)
            print(idxs)
            # 确定每个对象至少有一个框存在
                # 循环画出保存的边框
            for i in idxs.flatten():
                # 提取坐标和宽度
                (x, y) = (boxes[i][0], boxes[i][1])
                (w, h) = (boxes[i][2], boxes[i][3])
                # 画出边框和标签
                color = [int(c) for c in COLORS[classIDs[i]]]
                cv2.rectangle(image, (x, y), (x + w, y + h), color, 1, lineType=cv2.LINE_AA)
                text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i])
                cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,0.5, color, 1, lineType=cv2.LINE_AA)
cv2.imshow("Tag", image)
cv2.waitKey(0)