import os
import onnxruntime
import numpy as np
import cv2


class yolo_onnx():
    def __init__(self, onnx_path, clas_names, conf_thres=0.25):
        # 创建一个会话  类似于pytorch创建模型
        # providers = ['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider']
        providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
        # providers = ['CPUExecutionProvider']
        self.ort_session = onnxruntime.InferenceSession(onnx_path, providers=providers)
        # print(self.ort_session.get_outputs()[0])  # 输出结果  [1, 84, 8400]
        # print(self.ort_session.get_inputs()[0])  # 输入形状  [1, 3, 640, 640]
        print(self.ort_session.get_providers())  # device  ['CUDAExecutionProvider', 'CPUExecutionProvider']
        # print(self.ort_session.get_inputs()[0])

        self.new_shape = self.ort_session.get_inputs()[0].shape[2:]  # 输入形状 [height, width]
        self.clas_names = clas_names  # 类别名
        self.conf_thres = conf_thres  # 框的置信度阈值
        self.color = (114, 114, 114)  # 等比缩放后填充黑边的颜色
        self.color_list = self.SetColor(len(self.clas_names))

    def detect(self, image):
        img = image.copy()  # 深度复制，防止改变原图片

        # 前处理
        img, pad, scale = self.resize(img)  # 加黑边  (640, 640, 3)
        # BGR-->RGB，hwc-->chw-->nchw
        float_img = self.normalize(img)

        # 推理
        ort_inputs = {self.ort_session.get_inputs()[0].name: float_img}
        # ort_inputs = {"input": float_img}
        pred = self.ort_session.run(None, ort_inputs)[0]  # (1, 84, 8400)  cx,cy,w,h,clas

        # 结果换轴
        # print(pred.shape)  # (1, 84, 8400)
        pred = pred.transpose(0, 2, 1)
        # print(pred.shape)  # (1, 8400, 84)

        # 后处理
        # 输出为一个list大小为batch  classes为需要检测的类别，None为全检测 (输入[0, 1])
        pred = self.non_max_suppression(pred, conf_thres=self.conf_thres, iou_thres=0.45, classes=None)[0]
        # print(pred.shape)  # (n, 6)  [x1,y1,x2,y2，置信度，类别索引]
        # [{'clas_id':0,'conf':0.5,'box':[x1, y1, w, h]},{...}]
        result = self.pred2result(pred, pad, scale, image.shape)

        # 将结果画在图中
        draw_img = self.draw_img(image, result)

        return result, draw_img

    def resize(self, image):
        # print(self.new_shape)  # [640, 640]  onnx的输入形状
        shape = image.shape[:2]  # 图像大小 [height, width]  输入图像的形状

        scale = min(self.new_shape[0] / shape[0], self.new_shape[1] / shape[1])
        img_new = cv2.resize(image, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)  # (640, 640, 3)

        dh, dw = self.new_shape[0] - img_new.shape[0], self.new_shape[1] - img_new.shape[1]  # 还需要填充的像素

        left, top = dw // 2, dh // 2  # 分别计算到上下左右需要填充的像素
        right, bottom = dw - left, dh - top

        img = cv2.copyMakeBorder(img_new, top, bottom, left, right, cv2.BORDER_CONSTANT, value=self.color)  # 填充黑边

        return img, [left, top], scale  # (576, 1024, 3)  填充后的图像 上左需要填充的像素大小

    def normalize(self, resize_img):
        img = resize_img[:, :, ::-1].transpose(2, 0, 1).astype(np.float32)  # (3, 640, 640)

        img /= 255.0
        if img.ndim == 3:
            img = img[None]  # 加batchsize  # (1, 3, 640, 640)

        return img

    def pred2result(self, pred, pad, scale, img_shape):
        # result_lists = []
        # for i, det in enumerate(pred):  # 循环每张图片的标签
        det = pred
        result_list = []
        if len(det):  # 如果图像有建议框  需要把建议框计算到缩放前的原图上
            det[:, :4] = self.scale_coords(det[:, :4], pad, scale, img_shape)
            for *xyxy, conf, cls in reversed(det):  # 遍历出每个框
                result_dict = {}
                cls = int(cls)
                x1, y1 = [int(i) for i in xyxy[0:2]]
                w, h = int(xyxy[2] - xyxy[0]), int(xyxy[3] - xyxy[1])
                result_dict["cls_id"] = cls
                result_dict["conf"] = round(conf, 3)
                result_dict["box"] = [x1, y1, w, h]
                result_list.append(result_dict)

        return result_list

    def draw_img(self, img, result_box):
        # 循环多个类的轮廓
        for box_dict in result_box:
            clas_id = box_dict["cls_id"]
            label = self.clas_names[clas_id]
            conf = round(box_dict["conf"], 3)
            box = box_dict["box"]

            x, y, w, h = box

            cv2.rectangle(img, (x, y), (x + w, y + h), color=self.color_list[clas_id], thickness=3)

            # 写类别
            # cv2.putText(img, label + f"_{area}", (x, y - 7), cv2.FONT_HERSHEY_SIMPLEX, 1, self.color_list[clas_id],3)
            cv2.putText(img, label + f"_{str(conf)}", (x, y - 7), cv2.FONT_HERSHEY_SIMPLEX, 1, self.color_list[clas_id],
                        3)

        return img

    def SetColor(self, num_classes):
        arr_b = [0, 64, 255, 192, 128]
        arr_g = [255, 64, 128, 192, 0]
        arr_r = [128, 192, 0, 64, 255]
        color_list = []
        for a in arr_b:
            for b in arr_g:
                for c in arr_r:
                    color_list.append([a,b,c])
        return color_list

    def nms(self, dets, iou_thresh):  # dets [[xmin,ymin,xmax,ymax,scores]....]  iou阈值
        boxes_area = (dets[:, 2] - dets[:, 0]) * (dets[:, 3] - dets[:, 1])  # 所有框的面积
        index = (-dets[:, -1]).argsort()  # 根据置信度从大到小排序后框的索引
        keep = []  # 保留nms后的框的索引

        def iou(box, boxes, box_area, boxes_area):  # 一个框，多个框，一个框的面积，多个框的面积(防止重复计算)
            xx1 = np.maximum(box[0], boxes[:, 0])  # numpy广播
            yy1 = np.maximum(box[1], boxes[:, 1])
            xx2 = np.minimum(box[2], boxes[:, 2])
            yy2 = np.minimum(box[3], boxes[:, 3])

            w = np.maximum(0, xx2 - xx1)
            h = np.maximum(0, yy2 - yy1)

            inter = w * h  # 交集
            ovr = inter / (box_area + boxes_area - inter)  # 交并比

            return ovr

        while index.size > 0:  # 有框时
            i = index[0]  # 置信度最大的框的索引
            keep.append(i)  # 保留
            idx = np.where(iou(dets[index[0]], dets[index[1:]],
                               boxes_area[index[0]], boxes_area[index[1:]]) <= iou_thresh)[0]  # 交集小于阈值的所有索引
            index = index[idx + 1]

        keep = np.array(keep)

        return keep

    # prediction: (1, 84, 8400)  cx,cy,w,h,clas
    def non_max_suppression(self, prediction, conf_thres=0.25, iou_thres=0.45, classes=None):

        min_wh, max_wh = 2, 4096  # 为了把不同类的框分的更开，这样就可以直接用普通nms直接做

        output = [np.zeros((0, 6))] * prediction.shape[0]  # list大小为batch

        for xi, x in enumerate(prediction):  # image index, image inference  遍历batchsize
            if not x.shape[0]:  # 没有候选框就跳过
                continue

            box = self.xywh2xyxy(x[:, :4])  # cx,cy,w,h ---> x1,y1,x2,y2  (74, 4)

            conf = np.expand_dims(x[:, 4:].max(1), 1)  # 类别置信度的最大值  np.expand_dims扩展维度
            j = np.expand_dims(x[:, 4:].argmax(1), 1)  # 类别置信度最大值的索引  即类别索引

            # [x1,y1,x2,y2  置信度  类别索引]
            x = np.concatenate((box, conf, j.astype(conf.dtype)), 1)[conf.reshape(-1) > conf_thres]  # (71, 6)

            if classes is not None:
                x = x[(x[:, 5:6] == np.array(classes)).any(1)]  # 把需要的类检索出来  any符合任一条件返回True

            if not x.shape[0]:  # 没有候选框就跳过
                continue

            c = x[:, 5:6] * max_wh  # 类别 * 一个很大的值，加到坐标上，可以把不同类的框分的更开，直接就可以用普通nms
            boxes, scores = x[:, :4] + c, x[:, 4:5]  # 坐标 置信度
            dets = np.concatenate((boxes, scores), 1)  # (71, 5)

            i = self.nms(dets, iou_thres)  # 最终符合条件的索引  (5,)

            output[xi] = x[i]

        return output

    def xywh2xyxy(self, x):
        # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
        y = np.copy(x)
        y[:, 0] = x[:, 0] - x[:, 2] / 2  # top left x
        y[:, 1] = x[:, 1] - x[:, 3] / 2  # top left y
        y[:, 2] = x[:, 0] + x[:, 2] / 2  # bottom right x
        y[:, 3] = x[:, 1] + x[:, 3] / 2  # bottom right y
        return y

    def scale_coords(self, boxes, pad, scale, image_shape):  # pad: [top, left]
        boxes[:, [0, 2]] -= pad[0]
        boxes[:, [1, 3]] -= pad[1]

        boxes[:, :4] /= scale

        def clip_coords(boxes, img_shape):  # 把坐标限制在图像范围内
            # Clip bounding xyxy bounding boxes to image shape (height, width)
            boxes[:, 0] = boxes[:, 0].clip(0, img_shape[1])  # x1
            boxes[:, 1] = boxes[:, 1].clip(0, img_shape[0])  # y1
            boxes[:, 2] = boxes[:, 2].clip(0, img_shape[1])  # x2
            boxes[:, 3] = boxes[:, 3].clip(0, img_shape[0])  # y2

        clip_coords(boxes, image_shape)

        return boxes


if __name__ == '__main__':
    import time
    import matplotlib.pyplot as plt
    import yaml

    onnx_path = r"./weights/yolov8s.onnx"
    images_path = r"data\coco128\images\train2017"
    label_path = r"./coco_labels.yaml"

    f = open(label_path, "r")
    data = f.read()
    f.close()
    clas_names = list(yaml.safe_load(data)["names"].values())

    # clas_names = ["mt"]

    yolo_detect = yolo_onnx(onnx_path, clas_names, conf_thres=0.30)

    for img_name in os.listdir(images_path):
        if img_name.split(".")[-1] != "jpg":
            continue
        img_path = os.path.join(images_path, img_name)

        img = cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), 1)  # 用三通道打开

        t0 = time.time()
        # for i in range(100):
        # [{'cls_id': 0, 'conf': 0.409, 'box': [x1, y1, w, h]},{box2}]
        #     result, out_img = yolo_detect.detect(img.copy())
        # print((time.time()-t0)/1000)

        result, out_img = yolo_detect.detect(img.copy())

        t1 = time.time()
        print("time: ", (t1 - t0))  # 0.45675015449523926

        print(result)
        print("==============")
        # if not result:
        plt.subplot(1, 2, 1), plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
        plt.subplot(1, 2, 2), plt.imshow(cv2.cvtColor(out_img, cv2.COLOR_BGR2RGB))
        plt.show()
