#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/1/17 9:42
# @Author  : Zangzihan
# @File    : predict_trt.py
# @Description : 这个函数是用来balabalabala自己写
# -*- coding: utf-8 -*-
import colorsys
import numpy as np
import time
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import cv2
from PIL import Image, ImageDraw, ImageFont
import os
from utils.utils import resize_image
from utils.utils_rbox import *


def resize_image_(image, size, letterbox_image):
    ih, iw = image.shape[:2]
    h, w = size
    if letterbox_image:
        scale = min(w / iw, h / ih)
        nw = int(iw * scale)
        nh = int(ih * scale)

        image = cv2.resize(image, (nw, nh), interpolation=cv2.INTER_CUBIC)
        new_image = 128 * np.ones((h, w, 3), dtype=np.uint8)
        new_image[(h - nh) // 2:(h - nh) // 2 + nh, (w - nw) // 2:(w - nw) // 2 + nw, :] = image
    else:
        new_image = cv2.resize(image, (w, h), interpolation=cv2.INTER_CUBIC)
    return new_image


def preprocess_input(image):
    image /= 255.0
    return image


def get_classes(classes_path):
    with open(classes_path, encoding='utf-8') as f:
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]
    return class_names, len(class_names)


def get_anchors(anchors_path):
    '''loads the anchors from a file'''
    with open(anchors_path, encoding='utf-8') as f:
        anchors = f.readline()
    anchors = [float(x) for x in anchors.split(',')]
    anchors = np.array(anchors).reshape(-1, 2)
    return anchors, len(anchors)


class DecodeBox():
    def __init__(self, anchors, num_classes, input_shape, anchors_mask=[[6, 7, 8], [3, 4, 5], [0, 1, 2]]):
        super(DecodeBox, self).__init__()
        self.anchors = anchors
        self.num_classes = num_classes
        self.bbox_attrs = 6 + num_classes
        self.input_shape = input_shape
        # -----------------------------------------------------------#
        #   13x13的特征层对应的anchor是[142, 110],[192, 243],[459, 401]
        #   26x26的特征层对应的anchor是[36, 75],[76, 55],[72, 146]
        #   52x52的特征层对应的anchor是[12, 16],[19, 36],[40, 28]
        # -----------------------------------------------------------#
        self.anchors_mask = anchors_mask

    def decode_box(self, inputs):
        outputs = []
        for i, input in enumerate(inputs):
            # -----------------------------------------------#
            #   输入的input一共有三个，他们的shape分别是
            #   batch_size = 1
            #   batch_size, 3 * (5 + 1 + 80), 20, 20
            #   batch_size, 255, 40, 40
            #   batch_size, 255, 80, 80
            # -----------------------------------------------#
            batch_size = input.shape[0]
            input_height = input.shape[2]
            input_width = input.shape[3]

            # -----------------------------------------------#
            #   输入为640x640时
            #   stride_h = stride_w = 32、16、8
            # -----------------------------------------------#
            stride_h = self.input_shape[0] / input_height
            stride_w = self.input_shape[1] / input_width
            # -------------------------------------------------#
            #   此时获得的scaled_anchors大小是相对于特征层的
            # -------------------------------------------------#
            scaled_anchors = [(anchor_width / stride_w, anchor_height / stride_h) for anchor_width, anchor_height in
                              self.anchors[self.anchors_mask[i]]]

            # -----------------------------------------------#
            #   输入的input一共有三个，他们的shape分别是
            #   batch_size, 3, 20, 20, 85
            #   batch_size, 3, 40, 40, 85
            #   batch_size, 3, 80, 80, 85
            # -----------------------------------------------#
            prediction = input.reshape(batch_size, len(self.anchors_mask[i]), self.bbox_attrs, input_height,
                                       input_width)
            prediction = np.transpose(prediction, (0, 1, 3, 4, 2))
            # -----------------------------------------------#
            #   先验框的中心位置的调整参数
            # -----------------------------------------------#
            x = 1 / (1 + np.exp(-prediction[..., 0]))
            y = 1 / (1 + np.exp(-prediction[..., 1]))
            # -----------------------------------------------#
            #   先验框的宽高调整参数
            # -----------------------------------------------#
            w = 1 / (1 + np.exp(-prediction[..., 2]))
            h = 1 / (1 + np.exp(-prediction[..., 3]))
            # -----------------------------------------------#
            #   获取旋转角度
            # -----------------------------------------------#
            angle = 1 / (1 + np.exp(-prediction[..., 4]))
            # -----------------------------------------------#
            #   获得置信度，是否有物体
            # -----------------------------------------------#
            conf = 1 / (1 + np.exp(-prediction[..., 5]))
            # -----------------------------------------------#
            #   种类置信度
            # -----------------------------------------------#
            pred_cls = 1 / (1 + np.exp(-prediction[..., 6:]))

            # ----------------------------------------------------------#
            #   生成网格，先验框中心，网格左上角
            #   batch_size,3,20,20
            # ----------------------------------------------------------#
            grid_x = np.linspace(0, input_width - 1, input_width)
            grid_x = np.tile(grid_x, (input_height, 1))
            grid_x = np.tile(grid_x, (batch_size * len(self.anchors_mask[i]), 1, 1)).reshape(x.shape)

            grid_y = np.linspace(0, input_height - 1, input_height)
            grid_y = np.tile(grid_y, (input_width, 1)).T
            grid_y = np.tile(grid_y, (batch_size * len(self.anchors_mask[i]), 1, 1)).reshape(y.shape)

            scaled_anchors = np.array(scaled_anchors)
            anchor_w = scaled_anchors[:, 0:1]
            anchor_h = scaled_anchors[:, 1:2]
            anchor_w = np.tile(anchor_w, (batch_size, 1)).reshape(1, -1, 1)
            anchor_w = np.tile(anchor_w, (1, 1, input_height * input_width)).reshape(w.shape)
            anchor_h = np.tile(anchor_h, (batch_size, 1)).reshape(1, -1, 1)
            anchor_h = np.tile(anchor_h, (1, 1, input_height * input_width)).reshape(h.shape)

            # ----------------------------------------------------------#
            #   利用预测结果对先验框进行调整
            #   首先调整先验框的中心，从先验框中心向右下角偏移
            #   再调整先验框的宽高。
            #   x 0 ~ 1 => 0 ~ 2 => -0.5, 1.5 => 负责一定范围的目标的预测
            #   y 0 ~ 1 => 0 ~ 2 => -0.5, 1.5 => 负责一定范围的目标的预测
            #   w 0 ~ 1 => 0 ~ 2 => 0 ~ 4 => 先验框的宽高调节范围为0~4倍
            #   h 0 ~ 1 => 0 ~ 2 => 0 ~ 4 => 先验框的宽高调节范围为0~4倍
            # ----------------------------------------------------------#
            pred_boxes = np.zeros(prediction[..., :4].shape, dtype='float32')
            pred_boxes[..., 0] = x * 2. - 0.5 + grid_x
            pred_boxes[..., 1] = y * 2. - 0.5 + grid_y
            pred_boxes[..., 2] = (w * 2) ** 2 * anchor_w
            pred_boxes[..., 3] = (h * 2) ** 2 * anchor_h
            pred_theta = (angle - 0.5) * np.pi

            # ----------------------------------------------------------#
            #   将输出结果归一化成小数的形式
            # ----------------------------------------------------------#
            _scale = np.array([input_width, input_height, input_width, input_height]).astype('float32')
            output = np.concatenate(
                (pred_boxes.reshape(batch_size, -1, 4) / _scale, pred_theta.reshape(batch_size, -1, 1),
                 conf.reshape(batch_size, -1, 1), pred_cls.reshape(batch_size, -1, self.num_classes)), -1)
            outputs.append(output)
        return outputs

    def non_max_suppression(self, prediction, num_classes, input_shape, image_shape, letterbox_image, conf_thres=0.5,
                            nms_thres=0.4):
        # ----------------------------------------------------------#
        #   prediction  [batch_size, num_anchors, 85]
        # ----------------------------------------------------------#

        output = [None for _ in range(len(prediction))]
        for i, image_pred in enumerate(prediction):
            # ----------------------------------------------------------#
            #   对种类预测部分取max。
            #   class_conf  [num_anchors, 1]    种类置信度
            #   class_pred  [num_anchors, 1]    种类
            # ----------------------------------------------------------#
            class_conf = np.max(image_pred[:, 6:6 + num_classes], axis=1, keepdims=True)
            class_pred = np.argmax(image_pred[:, 6:6 + num_classes], axis=1)
            class_pred = np.expand_dims(class_pred, axis=1)

            # ----------------------------------------------------------#
            #   利用置信度进行第一轮筛选
            # ----------------------------------------------------------#
            conf_mask = (image_pred[:, 5] * class_conf[:, 0] >= conf_thres).squeeze()
            # ----------------------------------------------------------#
            #   根据置信度进行预测结果的筛选
            # ----------------------------------------------------------#
            image_pred = image_pred[conf_mask]
            class_conf = class_conf[conf_mask]
            class_pred = class_pred[conf_mask]
            if not image_pred.shape[0]:
                continue
            # -------------------------------------------------------------------------#
            #   detections  [num_anchors, 8]
            #   8的内容为：x, y, w, h, angle, obj_conf, class_conf, class_pred
            # -------------------------------------------------------------------------#
            detections = np.concatenate((image_pred[:, :6], class_conf, class_pred), 1)

            # ------------------------------------------#
            #   获得预测结果中包含的所有种类
            # ------------------------------------------#
            unique_labels = np.unique(detections[:, -1])

            for c in unique_labels:
                # ------------------------------------------#
                #   获得某一类得分筛选后全部的预测结果
                # ------------------------------------------#
                detections_class = detections[detections[:, -1] == c]

                # ------------------------------------------#
                #   使用cv2.dnn.NMSBoxesRotated进行非极大抑制
                # ------------------------------------------#
                bboxes = [[[bbox[0], bbox[1]], [bbox[2], bbox[3]], bbox[4] * 180 / np.pi] for bbox in
                          detections_class[:, :5]]
                scores = [float(score) for score in detections_class[:, 5] * detections_class[:, 6]]
                indices = cv2.dnn.NMSBoxesRotated(bboxes, scores, conf_thres, nms_thres)
                max_detections = detections_class[indices.flatten()]
                # Add max detections to outputs
                output[i] = max_detections if output[i] is None else np.concatenate((output[i], max_detections))

            if output[i] is not None:
                output[i][:, :5] = self.yolo_correct_boxes(output[i], input_shape, image_shape, letterbox_image)
        return output

    def yolo_correct_boxes(self, output, input_shape, image_shape, letterbox_image):
        # -----------------------------------------------------------------#
        #   把y轴放前面是因为方便预测框和图像的宽高进行相乘
        # -----------------------------------------------------------------#
        box_xy = output[..., 0:2]
        box_wh = output[..., 2:4]
        angle = output[..., 4:5]
        box_yx = box_xy[..., ::-1]
        box_hw = box_wh[..., ::-1]
        input_shape = np.array(input_shape)
        image_shape = np.array(image_shape)

        if letterbox_image:
            # -----------------------------------------------------------------#
            #   这里求出来的offset是图像有效区域相对于图像左上角的偏移情况
            #   new_shape指的是宽高缩放情况
            # -----------------------------------------------------------------#
            new_shape = np.round(image_shape * np.min(input_shape / image_shape))
            offset = (input_shape - new_shape) / 2. / input_shape
            scale = input_shape / new_shape

            box_yx = (box_yx - offset) * scale
            box_hw *= scale

        box_xy = box_yx[:, ::-1]
        box_hw = box_wh[:, ::-1]

        rboxes = np.concatenate([box_xy, box_wh, angle], axis=-1)
        rboxes[:, [0, 2]] *= image_shape[1]
        rboxes[:, [1, 3]] *= image_shape[0]
        return rboxes


class YOLO(object):
    _defaults = {
        # --------------------------------------------------------------------------#
        #   使用自己训练好的模型进行预测一定要修改model_path和classes_path！
        #   model_path指向logs文件夹下的权值文件，classes_path指向model_data下的txt
        #
        #   训练好后logs文件夹下存在多个权值文件，选择验证集损失较低的即可。
        #   验证集损失较低不代表mAP较高，仅代表该权值在验证集上泛化性能较好。
        #   如果出现shape不匹配，同时要注意训练时的model_path和classes_path参数的修改
        # --------------------------------------------------------------------------#
        "model_path": '',
        # ---------------------------------------------------------------------#
        #   输入图片的大小，必须为32的倍数。
        # ---------------------------------------------------------------------#
        "input_shape": [640, 640],
        # ---------------------------------------------------------------------#
        #   只有得分大于置信度的预测框会被保留下来
        # ---------------------------------------------------------------------#
        "confidence": 0.5,
        # ---------------------------------------------------------------------#
        #   非极大抑制所用到的nms_iou大小
        # ---------------------------------------------------------------------#
        "nms_iou": 0.3,
        "anchors_path": 'model_data/yolo_anchors.txt',
        "anchors_mask": [[6, 7, 8], [3, 4, 5], [0, 1, 2]],
    }

    @classmethod
    def get_defaults(cls, n):
        if n in cls._defaults:
            return cls._defaults[n]
        else:
            return "Unrecognized attribute name '" + n + "'"

    # ---------------------------------------------------#
    #   初始化YOLO
    # ---------------------------------------------------#
    def __init__(self, config_dict):
        self.__dict__.update(self._defaults)
        for name, value in config_dict.items():
            setattr(self, name, value)
            self._defaults[name] = value

            # ---------------------------------------------------#
        #   获得种类和先验框的数量
        # ---------------------------------------------------#
        self.class_names, self.num_classes = get_classes(self.classes_path)
        self.anchors, self.num_anchors = get_anchors(self.anchors_path)
        self.bbox_util = DecodeBox(self.anchors, self.num_classes, (self.input_shape[0], self.input_shape[1]),
                                   self.anchors_mask)
        # ---------------------------------------------------#
        #   画框设置不同的颜色
        # ---------------------------------------------------#
        hsv_tuples = [(x / self.num_classes, 1., 1.) for x in range(self.num_classes)]
        self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
        self.colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors))
        # self.dtype = np.float32
        # self.size = None
        self.generate()

    # ---------------------------------------------------#
    #   生成模型
    # ---------------------------------------------------#
    # def generate(self):
    #     # ---------------------------------------------------#
    #     #   建立yolo模型，载入yolo模型的权重
    #     # ---------------------------------------------------#
    #     # 1. 建立模型，构建上下文管理器
    #     engine = self.load_engine(self.model_path)
    #     self.context = engine.create_execution_context()
    #     # 2. 读取数据，数据处理为可以和网络结构输入对应起来的的shape，数据可增加预处理
    #     # self.context.active_optimization_profile = 0
    #     # self.inputs, self.outputs, self.bindings = [], [], []
    #     # self.stream = cuda.Stream()
    #     for binding in engine:
    #         # host_mem = np.empty(size, dtype=dtype)
    #         # host_mem = np.ascontiguousarray(host_mem)
    #         # device_mem = cuda.mem_alloc(host_mem.nbytes)
    #         # self.bindings.append(int(device_mem))
    #         if engine.binding_is_input(binding):
    #             # self.inputs.append({'host': host_mem, 'device': device_mem})
    #             self.input_w = engine.get_binding_shape(binding)[-1]
    #             self.input_h = engine.get_binding_shape(binding)[-2]
    #         else:
    #             self.size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
    #             self.dtype = trt.nptype(engine.get_binding_dtype(binding))
    #             # self.outputs.append({'host': host_mem, 'device': device_mem})
    def generate(self):
        # ---------------------------------------------------#
        #   建立yolo模型，载入yolo模型的权重
        # ---------------------------------------------------#
        engine = self.load_engine(self.model_path)
        self.context = engine.create_execution_context()
        self.inputs, self.outputs, self.bindings = [], [], []
        self.stream = cuda.Stream()
        for binding in engine:
            size = engine.get_binding_shape(binding)
            dtype = trt.nptype(engine.get_binding_dtype(binding))
            host_mem = np.empty(size, dtype=dtype)
            host_mem = np.ascontiguousarray(host_mem)
            device_mem = cuda.mem_alloc(host_mem.nbytes)
            self.bindings.append(int(device_mem))
            if engine.binding_is_input(binding):
                self.inputs.append({'host': host_mem, 'device': device_mem})
            else:
                self.outputs.append({'host': host_mem, 'device': device_mem})

    def load_engine(self, engine_path):
        TRT_LOGGER = trt.Logger(trt.Logger.ERROR)
        with open(engine_path, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime:
            return runtime.deserialize_cuda_engine(f.read())

    def forward(self, img):
        self.inputs[0]['host'] = np.ravel(img)
        # transfer data to the gpu
        for inp in self.inputs:
            cuda.memcpy_htod_async(inp['device'], inp['host'], self.stream)
        # run inference
        self.context.execute_async_v2(
            bindings=self.bindings,
            stream_handle=self.stream.handle)
        # fetch outputs from gpu
        for out in self.outputs:
            cuda.memcpy_dtoh_async(out['host'], out['device'], self.stream)
        # synchronize stream
        self.stream.synchronize()
        return [out['host'] for out in self.outputs]
        # output = np.empty(self.size, dtype=self.dtype)
        # input = img.reshape(-1)
        # d_input = cuda.mem_alloc(1 * input.nbytes)
        # d_output = cuda.mem_alloc(output.nbytes)
        # bindings = [int(d_input), int(d_output)]
        # stream = cuda.Stream()
        # cuda.memcpy_htod_async(d_input, input, stream)
        # self.context.execute_async_v2(bindings, stream.handle, None)
        # cuda.memcpy_dtoh_async(output, d_output, stream)
        # # synchronize stream
        # stream.synchronize()
        # return output

    # ---------------------------------------------------#
    #   检测图片
    # ---------------------------------------------------#
    def detect_image(self, image):
        # ---------------------------------------------------#
        #   计算输入图片的高和宽
        # ---------------------------------------------------#
        image_shape = np.array(np.shape(image)[0:2])
        # ---------------------------------------------------------#
        #   在这里将图像转换成RGB图像，防止灰度图在预测时报错。
        #   代码仅仅支持RGB图像的预测，所有其它类型的图像都会转化成RGB
        # ---------------------------------------------------------#
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image_data = resize_image_(image, (self.input_shape[1], self.input_shape[0]), True)
        # ---------------------------------------------------------#
        #   添加上batch_size维度
        #   h, w, 3 => 3, h, w => 1, 3, h, w
        # ---------------------------------------------------------#
        image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, dtype='float32')), (2, 0, 1)), 0)
        # ---------------------------------------------------------#
        #   将图像输入网络当中进行预测！
        # ---------------------------------------------------------#
        # outputs = self.net.run(self.output_name, {self.input_name[0]: image_data})
        outputs = self.forward(image_data)[::-1]
        outputs = self.bbox_util.decode_box(outputs)
        # ---------------------------------------------------------#
        #   将预测框进行堆叠，然后进行非极大抑制
        # ---------------------------------------------------------#
        results = self.bbox_util.non_max_suppression(np.concatenate(outputs, axis=1), self.num_classes,
                                                     self.input_shape,
                                                     image_shape, True, conf_thres=self.confidence,
                                                     nms_thres=self.nms_iou)

        if results[0] is None:
            return image

        top_label = np.array(results[0][:, 7], dtype='int32')
        top_conf = results[0][:, 5] * results[0][:, 6]
        top_rboxes = results[0][:, :5]

        # ---------------------------------------------------------#
        #   图像绘制
        # ---------------------------------------------------------#
        for i, c in list(enumerate(top_label)):
            predicted_class = self.class_names[int(c)]
            rbox = top_rboxes[i]
            score = top_conf[i]
            rbox = ((rbox[0], rbox[1]), (rbox[2], rbox[3]), rbox[4] * 180 / np.pi)
            poly = cv2.boxPoints(rbox).astype(np.int32)
            x, y = np.min(poly[:, 0]), np.min(poly[:, 1]) - 20
            cv2.polylines(image, [poly.reshape((-1, 1, 2))], True, self.colors[c], thickness=2)
            label = '{} {:.2f}'.format(predicted_class, score)
            cv2.putText(image, label, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), thickness=1)
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        return image

    def detect_images(self, images, crop=False, count=False):
        image_shape = np.array(np.shape(images[0])[0:2])
        # ---------------------------------------------------#
        #   计算输入图片的高和宽
        # ---------------------------------------------------#
        images_data = []
        for image in images:
            # ---------------------------------------------------------#
            #   在这里将图像转换成RGB图像，防止灰度图在预测时报错。
            #   代码仅仅支持RGB图像的预测，所有其它类型的图像都会转化成RGB
            # ---------------------------------------------------------#
            # image = cvtColor(image)
            # ---------------------------------------------------------#
            #   给图像增加灰条，实现不失真的resize
            #   也可以直接resize进行识别
            # ---------------------------------------------------------#
            image_data = resize_image(image, (self.input_shape[1], self.input_shape[0]), True)
            image_data = np.transpose(preprocess_input(np.array(image_data, dtype='float32')), (2, 0, 1))
            images_data.append(image_data)
        # # ---------------------------------------------------------#
        # #   添加上batch_size维度
        # #   h, w, 3 => 3, h, w => 1, 3, h, w
        # # ---------------------------------------------------------#
        # image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, dtype='float32')), (2, 0, 1)), 0)
        # batch_size, 3, h, w
        images_data = np.stack(images_data, 0)

        outputs = self.forward(images_data)
        outputs = self.bbox_util.decode_box(outputs)

        results = self.bbox_util.non_max_suppression(np.concatenate(outputs, axis=1), self.num_classes,
                                                     self.input_shape,
                                                     image_shape, True, conf_thres=self.confidence,
                                                     nms_thres=self.nms_iou)
        # results = self.bbox_util.non_max_suppression(torch.cat(outputs, 1), self.num_classes, self.input_shape,
        #                                              image_shape, self.letterbox_image, conf_thres=self.confidence,
        #                                              nms_thres=self.nms_iou)
        # ---------------------------------------------------------#
        #   设置字体与边框厚度
        # ---------------------------------------------------------#
        font = ImageFont.truetype(font='model_data/simhei.ttf',
                                  size=np.floor(3e-2 * images[0].size[1] + 0.5).astype('int32'))
        thickness = int(max((images[0].size[0] + images[0].size[1]) // np.mean(self.input_shape), 1))
        for index, result in enumerate(results):
            if result is None:
                continue

            top_label = np.array(result[:, 7], dtype='int32')
            top_conf = result[:, 5] * result[:, 6]
            top_rboxes = result[:, :5]
            top_polys = rbox2poly(top_rboxes)

            # ---------------------------------------------------------#
            #   计数
            # ---------------------------------------------------------#
            if count:
                print("top_label:", top_label)
                classes_nums = np.zeros([self.num_classes])
                for i in range(self.num_classes):
                    num = np.sum(top_label == i)
                    if num > 0:
                        print(self.class_names[i], " : ", num)
                    classes_nums[i] = num
                print("classes_nums:", classes_nums)
            # ---------------------------------------------------------#
            #   图像绘制
            # ---------------------------------------------------------#
            for i, c in list(enumerate(top_label)):
                predicted_class = self.class_names[int(c)]
                poly = top_polys[i].astype(np.int32)
                score = top_conf[i]

                polygon_list = list(poly)
                label = '{} {:.2f}'.format(predicted_class, score)
                draw = ImageDraw.Draw(images[index])
                # label_size = draw.textsize(label, font)
                label = label.encode('utf-8')
                print(label, polygon_list)

                text_origin = np.array([poly[0], poly[1]], np.int32)

                draw.polygon(xy=polygon_list, outline=self.colors[c])
                draw.text(text_origin, str(label, 'UTF-8'), fill=self.colors[c], font=font)
                del draw

        return images


def process(mode, model_path, classes_path, input_shape, confidence, nms_iou, video_path, video_save_path,
            videos_path_txt, dir_origin_path, dir_save_path, index=0):
    try:
        yolo_config = {
            "model_path": model_path,
            "classes_path": classes_path,
            "input_shape": input_shape,
            "confidence": confidence,
            "nms_iou": nms_iou
        }
        yolo = YOLO(yolo_config)
        # ----------------------------------------------------------------------------------------------------------#
        #   mode用于指定测试的模式：
        #   'predict'           表示单张图片预测，如果想对预测过程进行修改，如保存图片，截取对象等，可以先看下方详细的注释
        #   'video'             表示视频检测，可调用摄像头或者视频进行检测，详情查看下方注释。
        #   'fps'               表示测试fps，使用的图片是img里面的street.jpg，详情查看下方注释。
        #   'dir_predict'       表示遍历文件夹进行检测并保存。默认遍历img文件夹，保存img_out文件夹，详情查看下方注释。
        #   'videos'            表示多路视频检测
        # ----------------------------------------------------------------------------------------------------------#
        # mode = "video"
        # ----------------------------------------------------------------------------------------------------------#
        #   video_path          用于指定视频的路径，当video_path=0时表示检测摄像头
        #                       想要检测视频，则设置如video_path = "xxx.mp4"即可，代表读取出根目录下的xxx.mp4文件。
        #   video_save_path     表示视频保存的路径，当video_save_path=""时表示不保存
        #                       想要保存视频，则设置如video_save_path = "yyy.mp4"即可，代表保存为根目录下的yyy.mp4文件。
        #   video_fps           用于保存的视频的fps
        #
        #   video_path、video_save_path和video_fps仅在mode='video'时有效
        #   保存视频时需要ctrl+c退出或者运行到最后一帧才会完成完整的保存步骤。
        # ----------------------------------------------------------------------------------------------------------#
        # video_path = "datas/multi_video/1683907404866.EDDCD1C3C553529487312DEE6FA35127_1.h264"
        # video_save_path = ""
        video_fps = 25.0
        # ----------------------------------------------------------------------------------------------------------#
        # videos_path_txt = r"datas/multi_video/sources.txt"
        # ----------------------------------------------------------------------------------------------------------#
        # ----------------------------------------------------------------------------------------------------------#
        #   test_interval       用于指定测量fps的时候，图片检测的次数。理论上test_interval越大，fps越准确。
        #   fps_image_path      用于指定测试的fps图片
        #
        #   test_interval和fps_image_path仅在mode='fps'有效
        # ----------------------------------------------------------------------------------------------------------#
        test_interval = 100
        fps_image_path = "img/test.jpg"
        # -------------------------------------------------------------------------#
        #   dir_origin_path     指定了用于检测的图片的文件夹路径
        #   dir_save_path       指定了检测完图片的保存路径
        #
        #   dir_origin_path和dir_save_path仅在mode='dir_predict'时有效
        # -------------------------------------------------------------------------#
        # dir_origin_path = "img/test"
        # dir_save_path = "img/result_trt"

        if mode == "predict":
            '''
            1、如果想要进行检测完的图片的保存，利用r_image.save("img.jpg")即可保存，直接在predict.py里进行修改即可。 
            2、如果想要获得预测框的坐标，可以进入yolo.detect_image函数，在绘图部分读取top，left，bottom，right这四个值。
            3、如果想要利用预测框截取下目标，可以进入yolo.detect_image函数，在绘图部分利用获取到的top，left，bottom，right这四个值
            在原图上利用矩阵的方式进行截取。
            4、如果想要在预测图上写额外的字，比如检测到的特定目标的数量，可以进入yolo.detect_image函数，在绘图部分对predicted_class进行判断，
            比如判断if predicted_class == 'car': 即可判断当前目标是否为车，然后记录数量即可。利用draw.text即可写字。
            '''
            while True:
                img = input('Input image filename:')
                try:
                    image = cv2.imread(img)
                except:
                    print('Open Error! Try again!')
                    continue
                else:
                    r_image = yolo.detect_image(image)
                    cv2.imshow('result', r_image)
                    c = cv2.waitKey(0)

        elif mode == "video":
            import os
            while 1:
                capture = cv2.VideoCapture(video_path)
                if video_save_path != "":
                    fourcc = cv2.VideoWriter_fourcc(*'XVID')
                    size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
                    out = cv2.VideoWriter(video_save_path, fourcc, video_fps, size)

                ref, frame = capture.read()
                if not ref:
                    raise ValueError("未能正确读取摄像头（视频），请注意是否正确安装摄像头（是否正确填写视频路径）。")

                fps = 0.0
                t1 = time.time()
                count = 0
                while (True):

                    # 读取某一帧
                    ref, frame = capture.read()
                    if not ref:
                        break
                    # 进行检测
                    frame = yolo.detect_image(frame)
                    count += 1
                    if time.time() - t1 > 2:
                        # fps = (fps + (1. / (time.time() - t1))) / 2
                        fps = count * 1.0 / 2
                        print("进程%d ：fps= %.2f" % (index, fps))
                        t1 = time.time()
                        count = 0
                    frame = cv2.putText(frame, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0),
                                        2)

                    cv2.imshow(f"video_{index}", cv2.resize(frame, (640, 480)))
                    c = cv2.waitKey(1) & 0xff
                    if video_save_path != "":
                        out.write(frame)

                    if c == 27:
                        capture.release()
                        break

                # print("Video Detection Done!")
                capture.release()
                if video_save_path != "":
                    print("Save processed video to the path :" + video_save_path)
                    out.release()
            cv2.destroyAllWindows()
        elif mode == "videos":
            import os
            sources = []
            if os.path.isfile(videos_path_txt):
                with open(videos_path_txt, 'r') as f:
                    sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
            if sources is None:
                raise ValueError("视频文件路径读取错误")
            n = len(sources)
            captures = []
            for video_path in sources:
                capture = cv2.VideoCapture(video_path)
                # if video_save_path != "":
                #     fourcc = cv2.VideoWriter_fourcc(*'XVID')
                #     size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
                #     out = cv2.VideoWriter(video_save_path, fourcc, video_fps, size)
                ref, frame = capture.read()
                if not ref:
                    raise ValueError("未能正确读取视频，请注意是否正确填写视频路径 {}".format(video_path))
                captures.append(capture)
            run_over = np.zeros([1, n], dtype=np.uint8)
            fps = 0.0
            while (True):
                t1 = time.time()
                frames = []
                ids = []
                if run_over.sum() == n:
                    break
                for index, capture in enumerate(captures):
                    # 读取某一帧
                    ref, frame = capture.read()
                    if not ref:
                        run_over[index] = 1
                        continue
                    # 格式转变，BGRtoRGB
                    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    # 转变成Image
                    frame = Image.fromarray(np.uint8(frame))
                    frames.append(frame)
                    ids.append([index])
                # 进行检测
                # frames = np.array(yolo.detect_images(frames))
                frames = yolo.detect_images(frames)
                for index, frame in enumerate(frames):
                    frame = np.array(frame)
                    # RGBtoBGR满足opencv显示格式
                    frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
                    frame = cv2.putText(frame, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0),
                                        2)

                    cv2.imshow("video{}".format(ids[index]), cv2.resize(frame, (640, 480)))
                fps = (fps + (1. / (time.time() - t1))) / 2
                print("fps= %.2f" % (fps))
                c = cv2.waitKey(1) & 0xff
                # if video_save_path != "":
                #     out.write(frame)

                if c == 27:
                    for capture in captures:
                        capture.release()
                    break

            print("Videos Detection Done!")
            for capture in captures:
                capture.release()
            # if video_save_path != "":
            #     print("Save processed video to the path :" + video_save_path)
            #     out.release()
            cv2.destroyAllWindows()

        elif mode == "fps":
            img = cv2.imread(fps_image_path)
            tact_time = yolo.get_FPS(img, test_interval)
            print(str(tact_time) + ' seconds, ' + str(1 / tact_time) + 'FPS, @batch_size 1')

        elif mode == "dir_predict":
            import os

            from tqdm import tqdm
            cv2.namedWindow("show", cv2.WINDOW_NORMAL)
            img_names = os.listdir(dir_origin_path)
            for img_name in tqdm(img_names):
                if img_name.lower().endswith(
                        ('.bmp', '.dib', '.png', '.jpg', '.jpeg', '.pbm', '.pgm', '.ppm', '.tif', '.tiff')):
                    image_path = os.path.join(dir_origin_path, img_name)
                    image = cv2.imread(image_path)
                    r_image = yolo.detect_image(image)
                    if dir_save_path == "":
                        cv2.imshow("show", r_image)
                        if cv2.waitKey(30) == 27:
                            cv2.destroyAllWindows()
                            break
                    else:
                        if not os.path.exists(dir_save_path):
                            os.makedirs(dir_save_path)
                        cv2.imwrite(os.path.join(dir_save_path, img_name.replace(".jpg", ".png")), r_image)
                        cv2.imshow("show", r_image)
                        if cv2.waitKey(30) == 27:
                            cv2.destroyAllWindows()
                            break
        else:
            raise AssertionError("Please specify the correct mode: 'predict', 'video', 'fps', 'dir_predict'.")
        return True
    except BaseException as e:
        print("t_predict_onnx failed： {}".format(e.args))
        return False


if __name__ == '__main__':
    mode = "dir_predict"
    model_path = "logs/20240116-shougang-cicada/shougang-cicada-240117.trt"
    classes_path = "model_data/obb_classes_shougang_cicada_ir.txt"
    input_shape = [256, 256]
    confidence = 0.7
    nms_iou = 0.2
    video_path = ""
    video_save_path = ""
    videos_path_txt = ""
    dir_origin_path = r"F:\0shougang\ir\Images_d"
    dir_save_path = ""

    process(mode, model_path, classes_path, input_shape, confidence, nms_iou, video_path, video_save_path,
            videos_path_txt, dir_origin_path, dir_save_path)
