import yaml
from easydict import EasyDict as edict
import os
import time
import math
import torch
from PIL import ImageDraw
import struct  # get_image_size
import imghdr  # get_image_size
import configuration as cfg
import cv2
import numpy as np

def sigmoid(x):
    return 1.0 / (np.exp(-x) + 1.)

def softmax(x):
    x = np.exp(x - np.expand_dims(np.max(x, axis=1), axis=1))
    x = x / np.expand_dims(x.sum(axis=1), axis=1)
    return x

def bbox_iou(box1, box2, x1y1x2y2=True):
    if x1y1x2y2:
        mx = min(box1[0], box2[0])
        Mx = max(box1[2], box2[2])
        my = min(box1[1], box2[1])
        My = max(box1[3], box2[3])
        w1 = box1[2] - box1[0]
        h1 = box1[3] - box1[1]
        w2 = box2[2] - box2[0]
        h2 = box2[3] - box2[1]
    else:
        mx = min(box1[0] - box1[2] / 2.0, box2[0] - box2[2] / 2.0)
        Mx = max(box1[0] + box1[2] / 2.0, box2[0] + box2[2] / 2.0)
        my = min(box1[1] - box1[3] / 2.0, box2[1] - box2[3] / 2.0)
        My = max(box1[1] + box1[3] / 2.0, box2[1] + box2[3] / 2.0)
        w1 = box1[2]
        h1 = box1[3]
        w2 = box2[2]
        h2 = box2[3]
    uw = Mx - mx
    uh = My - my
    cw = w1 + w2 - uw
    ch = h1 + h2 - uh
    carea = 0
    if cw <= 0 or ch <= 0:
        return 0.0

    area1 = w1 * h1
    area2 = w2 * h2
    carea = cw * ch
    uarea = area1 + area2 - carea
    return carea / uarea

def bbox_ious(boxes1, boxes2, x1y1x2y2=True):
    '''
    两种box的位置坐标格式
    '''
    if x1y1x2y2:#(左上点x，左上点y，右下点x，右下点y)
        mx = torch.min(boxes1[0], boxes2[0])
        Mx = torch.max(boxes1[2], boxes2[2])
        my = torch.min(boxes1[1], boxes2[1])
        My = torch.max(boxes1[3], boxes2[3])
        w1 = boxes1[2] - boxes1[0]
        h1 = boxes1[3] - boxes1[1]
        w2 = boxes2[2] - boxes2[0]
        h2 = boxes2[3] - boxes2[1]
    else:#(左上点x，左上点y，box的宽，box的高)
        mx = torch.min(boxes1[0] - boxes1[2] / 2.0, boxes2[0] - boxes2[2] / 2.0)
        Mx = torch.max(boxes1[0] + boxes1[2] / 2.0, boxes2[0] + boxes2[2] / 2.0)
        my = torch.min(boxes1[1] - boxes1[3] / 2.0, boxes2[1] - boxes2[3] / 2.0)
        My = torch.max(boxes1[1] + boxes1[3] / 2.0, boxes2[1] + boxes2[3] / 2.0)
        w1 = boxes1[2]
        h1 = boxes1[3]
        w2 = boxes2[2]
        h2 = boxes2[3]
    uw = Mx - mx
    uh = My - my
    cw = w1 + w2 - uw
    ch = h1 + h2 - uh
    mask = ((cw <= 0) + (ch <= 0) > 0)
    area1 = w1 * h1
    area2 = w2 * h2
    carea = cw * ch
    carea[mask] = 0
    uarea = area1 + area2 - carea
    return carea / uarea

def nms(boxes, nms_thresh):
    '''
    目标检测的非极大值抑制
    boxes:{list:n} 一个nx7的列表，
    n代表检测到的box数量，
    7代表：4个坐标(x,y,w,h其中xy表示中心坐标,wh是bbox的宽高)，2个置信度，1个类别
    nms_thresh:{float} 一个浮点数，越小表示越严格
    ----------------
    return
        返回一个mx7的列表 {list:m} m表示经过nms后的剩余box数量
    '''
    if len(boxes) == 0:
        return boxes

    #下面这个张量用来保存着与1-置信度的值，相当于反转了一下，这样就可以从小到大排序进行处理
    det_inverse_confs = [ 1 - bb[4] for bb in boxes]
    det_inverse_confs = torch.asarray(det_inverse_confs)

    _, sortIds = torch.sort(det_inverse_confs)#从小到大排序，返回排序后的值和对应的索引
    out_boxes = []  #输出列表
    for i in range(len(boxes)):
        box_i = boxes[sortIds[i]]   #从最小的值的索引开始处理，box_i是最小值的索引对应的原先的box
        if box_i[4] > 0:    #如果这个最小值大于0。
            out_boxes.append(box_i)
            for j in range(i + 1, len(boxes)):#用for循环计算这个最大置信度box与其他box的IOU
                box_j = boxes[sortIds[j]]#i的下一个box，也就是次第二大的置信度的box
                if bbox_iou(box_i, box_j, x1y1x2y2=False) > nms_thresh:#删除IOU大于阈值的box
                    box_j[4] = 0    #将IOU大于阈值的box的conf重置为0,即置信度为0后，就不会放入output_boxes，相当于删除。
    return out_boxes

def convert2cpu(gpu_matrix):
    return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)

def convert2cpu_long(gpu_matrix):
    return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix)

def get_region_boxes1(output, conf_thresh, num_classes, anchors, num_anchors,anchor_step,
                      only_objectness=1, validation=False):
    '''

    ------------------------
    output:ndarray:(1,51,19,19),yolov4预测的三个输出张量之一
    conf_thresh:
    num_classes:预测的类别数量
    anchors:
    num_anchors:当前先验框的数量
    anchor_step:
    ------------------------
    return:
    '''

    batch = output.shape[0] #获取batch大小，一般情况下为1
    assert (output.shape[1] == (5 + num_classes) * num_anchors),"预测的输出张量第一维度错误" #如果条件满足，继续执行，否则抛出断言错误

    h = output.shape[2]
    w = output.shape[3]

    t0 = time.time()
    all_boxes = []
    #======reshape输出张量===========
    output = output.reshape(batch*num_anchors,5+num_classes,h*w)    #(1,51,19,19) -> (3,17,361)
    output = output.transpose((1,0,2))                              #(3,1,361) -> (17,3,361)
    output = output.reshape(5+num_classes,batch*num_anchors*h*w)    #(17,3,361) -> (17,1083)
    #=====生成网格的信息，网格的左上角===============
    #====================生成gridx向量==================
    gridx = np.linspace(0,w-1,w)    #在(0,w-1)范围内均匀生成w个数据    (19)
    gridx = np.expand_dims(gridx,axis=0)    #在第0维增加一维          (1,19)
    gridx = gridx.repeat(h,axis=0)          #在第0维进行复制           (19,19)
    gridx = np.expand_dims(gridx,axis=0)    #(19,19) -> (1,19,19)
    gridx = gridx.repeat(batch*num_anchors,axis=0)  #(1,19,19) -> (3,19,19)
    grid_x = gridx.reshape(batch*num_anchors*h*w)    #(3,19,19) -> (1083)
    #================生成gridy向量======================
    gridy = np.linspace(0,h-1,h)    #(19)               在(0,h-1)范围内均匀生成h个数据
    gridy = np.expand_dims(gridy,axis=0)    #(1,19)     扩维
    gridy = gridy.repeat(w,axis=0)          #(19,19)    复制
    gridy = gridy.T                         #(19,19)    转置
    gridy = np.expand_dims(gridy,axis=0)    #(1,19,19)
    gridy = gridy.repeat(batch*num_anchors,axis=0)  #(3,19,19)  扩维
    grid_y = gridy.reshape(batch*num_anchors*h*w)    #(1083)

    xs = sigmoid(output[0]) + grid_x
    ys = sigmoid(output[1]) + grid_y
    #============按照网格格式，生成先验框anchors的宽和高=============
    anchor_w = np.array(anchors).reshape((num_anchors, anchor_step))[:, 0]
    anchor_h = np.array(anchors).reshape((num_anchors, anchor_step))[:, 1]
    anchor_w = np.expand_dims(np.expand_dims(anchor_w, axis=1).repeat(batch, 1), axis=2).repeat(h * w, axis=1).reshape(
        batch * num_anchors * h * w)  # cuda()
    anchor_h = np.expand_dims(np.expand_dims(anchor_h, axis=1).repeat(batch, 1), axis=2).repeat(h * w, axis=1).reshape(
        batch * num_anchors * h * w)  # cuda()
    ws = np.exp(output[2]) * anchor_w
    hs = np.exp(output[3]) * anchor_h

    det_confs = sigmoid(output[4])

    cls_confs = softmax(output[5:5 + num_classes].transpose(1, 0))
    cls_max_confs = np.max(cls_confs, 1)
    cls_max_ids = np.argmax(cls_confs, 1)

    sz_hw = h * w
    sz_hwa = sz_hw * num_anchors
    for b in range(batch):
        boxes = []
        for cy in range(h):
            for cx in range(w):
                for i in range(num_anchors):
                    ind = b * sz_hwa + i * sz_hw + cy * w + cx
                    det_conf = det_confs[ind]
                    if only_objectness:
                        conf = det_confs[ind]
                    else:
                        conf = det_confs[ind] * cls_max_confs[ind]

                    if conf > conf_thresh:
                        bcx = xs[ind]
                        bcy = ys[ind]
                        bw = ws[ind]
                        bh = hs[ind]
                        cls_max_conf = cls_max_confs[ind]
                        cls_max_id = cls_max_ids[ind]
                        box = [bcx / w, bcy / h, bw / w, bh / h, det_conf, cls_max_conf, cls_max_id]
                        if (not only_objectness) and validation:
                            for c in range(num_classes):
                                tmp_conf = cls_confs[ind][c]
                                if c != cls_max_id and det_confs[ind] * tmp_conf > conf_thresh:
                                    box.append(tmp_conf)
                                    box.append(c)
                        boxes.append(box)
        all_boxes.append(boxes)

    return all_boxes

def plot_boxes_cv2(img, boxes, savename=None, class_names=None, color=None):
    import cv2
    colors = torch.FloatTensor([[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 0], [1, 0, 0]]);

    def get_color(c, x, max_val):
        ratio = float(x) / max_val * 5
        i = int(math.floor(ratio))
        j = int(math.ceil(ratio))
        ratio = ratio - i
        r = (1 - ratio) * colors[i][c] + ratio * colors[j][c]
        return int(r * 255)

    width = img.shape[1]
    height = img.shape[0]
    for i in range(len(boxes)):
        box = boxes[i]
        x1 = int((box[0] - box[2] / 2.0) * width)
        y1 = int((box[1] - box[3] / 2.0) * height)
        x2 = int((box[0] + box[2] / 2.0) * width)
        y2 = int((box[1] + box[3] / 2.0) * height)

        if color:
            rgb = color
        else:
            rgb = (255, 0, 0)
        if len(box) >= 7 and class_names:
            cls_conf = box[5]
            cls_id = box[6]
            print('%s: %f' % (class_names[cls_id], cls_conf))
            classes = len(class_names)
            offset = cls_id * 123457 % classes
            red = get_color(2, offset, classes)
            green = get_color(1, offset, classes)
            blue = get_color(0, offset, classes)
            if color is None:
                rgb = (red, green, blue)
            img = cv2.putText(img, class_names[cls_id], (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1.2, rgb, 1)
        img = cv2.rectangle(img, (x1, y1), (x2, y2), rgb, 1)
    if savename:
        print("save plot results to %s" % savename)
        cv2.imwrite(savename, img)
    return img

def plot_boxes(img, boxes, savename=None, class_names=None):
    colors = torch.FloatTensor([[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 0], [1, 0, 0]]);

    def get_color(c, x, max_val):
        ratio = float(x) / max_val * 5
        i = int(math.floor(ratio))
        j = int(math.ceil(ratio))
        ratio = ratio - i
        r = (1 - ratio) * colors[i][c] + ratio * colors[j][c]
        return int(r * 255)

    width = img.width
    height = img.height
    draw = ImageDraw.Draw(img)
    for i in range(len(boxes)):
        box = boxes[i]
        x1 = (box[0] - box[2] / 2.0) * width
        y1 = (box[1] - box[3] / 2.0) * height
        x2 = (box[0] + box[2] / 2.0) * width
        y2 = (box[1] + box[3] / 2.0) * height

        rgb = (255, 0, 0)
        if len(box) >= 7 and class_names:
            cls_conf = box[5]
            cls_id = box[6]
            print('%s: %f' % (class_names[cls_id], cls_conf))
            classes = len(class_names)
            offset = cls_id * 123457 % classes
            red = get_color(2, offset, classes)
            green = get_color(1, offset, classes)
            blue = get_color(0, offset, classes)
            rgb = (red, green, blue)
            draw.text((x1, y1), class_names[cls_id], fill=rgb)
        draw.rectangle([x1, y1, x2, y2], outline=rgb)
    if savename:
        print("save plot results to %s" % savename)
        img.save(savename)
    return img

def read_truths(lab_path):
    if not os.path.exists(lab_path):
        return np.array([])
    if os.path.getsize(lab_path):
        truths = np.loadtxt(lab_path)
        truths = truths.reshape(truths.size / 5, 5)  # to avoid single truth problem
        return truths
    else:
        return np.array([])

def read_truths_args(lab_path, min_box_scale):
    truths = read_truths(lab_path)
    new_truths = []
    for i in range(truths.shape[0]):
        if truths[i][3] < min_box_scale:
            continue
        new_truths.append([truths[i][0], truths[i][1], truths[i][2], truths[i][3], truths[i][4]])
    return np.array(new_truths)

def load_class_names(namesfile):
    class_names = []
    with open(namesfile, 'r') as fp:
        lines = fp.readlines()
    for line in lines:
        line = line.rstrip()
        class_names.append(line)
    return class_names

def image2torch(img):
    width = img.width
    height = img.height
    img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
    img = img.view(height, width, 3).transpose(0, 1).transpose(0, 2).contiguous()
    img = img.view(1, 3, height, width)
    img = img.float().div(255.0)
    return img

def read_data_cfg(datacfg):
    options = dict()
    options['gpus'] = '0,1,2,3'
    options['num_workers'] = '10'
    with open(datacfg, 'r') as fp:
        lines = fp.readlines()

    for line in lines:
        line = line.strip()
        if line == '':
            continue
        key, value = line.split('=')
        key = key.strip()
        value = value.strip()
        options[key] = value
    return options

def scale_bboxes(bboxes, width, height):
    import copy
    dets = copy.deepcopy(bboxes)
    for i in range(len(dets)):
        dets[i][0] = dets[i][0] * width
        dets[i][1] = dets[i][1] * height
        dets[i][2] = dets[i][2] * width
        dets[i][3] = dets[i][3] * height
    return dets

def file_lines(thefilepath):
    count = 0
    thefile = open(thefilepath, 'rb')
    while True:
        buffer = thefile.read(8192 * 1024)
        if not buffer:
            break
        count += buffer.count('\n')
    thefile.close()
    return count

def get_classes(classes_path):
    '''
    获得类别数
    '''
    with open(classes_path, encoding='utf-8') as f:
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]
    print(f"类别数为{len(class_names)}")
    return class_names, len(class_names)

def get_image_size(fname):
    '''Determine the image type of fhandle and return its size.
    from draco'''
    with open(fname, 'rb') as fhandle:
        head = fhandle.read(24)
        if len(head) != 24:
            return
        if imghdr.what(fname) == 'png':
            check = struct.unpack('>i', head[4:8])[0]
            if check != 0x0d0a1a0a:
                return
            width, height = struct.unpack('>ii', head[16:24])
        elif imghdr.what(fname) == 'gif':
            width, height = struct.unpack('<HH', head[6:10])
        elif imghdr.what(fname) == 'jpeg' or imghdr.what(fname) == 'jpg':
            try:
                fhandle.seek(0)  # Read 0xff next
                size = 2
                ftype = 0
                while not 0xc0 <= ftype <= 0xcf:
                    fhandle.seek(size, 1)
                    byte = fhandle.read(1)
                    while ord(byte) == 0xff:
                        byte = fhandle.read(1)
                    ftype = ord(byte)
                    size = struct.unpack('>H', fhandle.read(2))[0] - 2
                    # We are at a SOFn block
                fhandle.seek(1, 1)  # Skip `precision' byte.
                height, width = struct.unpack('>HH', fhandle.read(4))
            except Exception:  # IGNORE:W0703
                return
        else:
            return
        return width, height

def logging(message):
    print('%s %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), message))

class YamlParser(edict):
    """
    This code comes from https://github.com/ZQPei/deep_sort_pytorch
    """
    def __init__(self, cfg_dict=None, config_file=None):
        if cfg_dict is None:
            cfg_dict = {}

        if config_file is not None:
            assert (os.path.isfile(config_file))
            with open(config_file, 'r') as fo:
                cfg_dict.update(yaml.load(fo.read()))

        super(YamlParser, self).__init__(cfg_dict)

    def merge_from_file(self, config_file):
        '''
        config_file:通过r模式打开config_file文件
        '''
        with open(config_file, 'r') as fo:
            #self指向的是edict库的类
            line = fo.read()        #读取所有的信息，并用一个str保存起来
            file_yaml = yaml.load(line) #将读取的一个str解析成嵌套字典
            self.update(file_yaml)

def get_config(config_file=None):
    return YamlParser(config_file=config_file)


def resize_image(image, size):
    '''
    对输入图像进行不失真的resize
    image:一个原始的cv2图像，等待被resize
    letterbox_image:是否进行不失真的resize
    '''
    ih,iw = image.shape[:2]
    h,w = size[:]
    if cfg.letterbox_image:
        scale = min(w / iw, h / ih)
        nw = int(iw * scale)
        nh = int(ih * scale)

        # 先求左上角
        paste_left_x = (w - nw) // 2
        paste_left_y = (h - nh) // 2
        image = cv2.resize(image,(nh,nw),interpolation=cv2.INTER_CUBIC)
        paste_img = np.ones((h,w,3))*128    #灰度图
        #粘贴
        paste_img[paste_left_y:paste_left_y+nh,paste_left_x:paste_left_x+nw,:] = image[...]
    else:
        paste_img = cv2.resize(image,(h,w),interpolation=cv2.INTER_CUBIC)
    return paste_img

class DetecteOneImage():
    def __init__(self):
        '''
        一个检测器对象，能够对一张cv2读取的图片进行检测。
        model:已经加载了权重的model
        '''
        super(DetecteOneImage, self).__init__()
        #==============创建一个目标检测器对象================
        class_names,num_classes = get_classes(cfg.classes_path)
        from pr_yolo.Models.yolo_body import YoloBody
        yolov4 = YoloBody(anchors_mask=cfg.anchors_mask,
                          num_classes=num_classes,
                          backbone=cfg.backbone,
                          pretrained=cfg.use_pre_backbone)
        if cfg.cuda:#放入cuda
            import torch.backends.cudnn as cudnn
            yolov4 = yolov4.cuda().half()
            cudnn.benchmark = True
        #===============加载权重=============================
        checkpoint = torch.load(cfg.pr_yolo_pth)
        yolov4.load_state_dict(checkpoint['net'])
        print(f"Load pth as pretrain:{cfg.yolov4_weight}")
        self.model = yolov4
        self.model.eval()
        self.strides = []       #strides等待计算
        #=============是否可视化==============================
        self.class_names,_ = get_classes(cfg.classes_path)
        if cfg.display:
            # 画框设置不同的颜色
            import colorsys
            hsv_tuples = [(x / cfg.num_classes, 1., 1.) for x in range(cfg.num_classes)]
            self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
            self.colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors))

    def detect_one_image(self,image):
        '''
        image:一个cv2读取来的原始图片
        -------------
        return:
            返回一个ndarray:(12,7)，前4个坐标以原始图片的宽高为基础已经被还原了。
            前4个坐标格式为：x,y,w,h其中xy表示中心坐标,wh是bbox的宽高
            7分别代表4个坐标，2个置信度，1个所属类别id
        '''
        height, width = image.shape[:2]  # 获取原图像图像宽高
        img = cv2.cvtColor(image,cv2.COLOR_BGR2RGB) #默认cv2打开是BGR，所以转换成RGB
        img_resized = resize_image(img,cfg.input_img_size)#resize
        #shape=(h,w,c)转换成shape=(c,h,w)并，归一化像素值，并添加一个维度，变成(1,3,608,608)
        input_img = torch.from_numpy(img_resized.transpose(2,0,1))
        input_img = input_img.float().div(255.0).unsqueeze(0)

        img = input_img.cuda().half() if cfg.cuda else input_img    #放入cuda
        outputs = self.model(img)   #推理
        #============计算strides==================
        for ele in outputs:
            f_h,f_w = ele.shape[2],ele.shape[3]
            self.strides.append(min(cfg.input_img_size[0]/f_h,cfg.input_img_size[1]/f_w))
        #============将outputs的三个特征图放入这个推理函数，计算位置，类别，置信度等===
        nms_outputs = self.decode_outputs(outputs)
        #===============坐标还原===================
        nms_outputs = np.asarray(nms_outputs)
        if len(nms_outputs) != 0:
            nms_outputs[:,0] *= width
            nms_outputs[:,1] *= height
            nms_outputs[:,2] *= width
            nms_outputs[:,3] *= height
        return nms_outputs

    def decode_outputs(self,outputs):
        '''
        解析outputs的三个特征图
        '''
        len_outs = len(outputs)
        boxes = []  #用来装载对每个输出张量处理后的结果
        for i in range(len_outs):
            masked_anchors = []
            for m in cfg.anchors_mask[i]:
                masked_anchors.extend(cfg.anchors[m])
            masked_anchors = [anchor / self.strides[i] for anchor in masked_anchors]#与stride
            #单个单个处理每个输出张量
            one_boxes = get_region_boxes1(output=outputs[i].data.cpu().numpy(),
                                          conf_thresh=cfg.min_confidence,
                                          num_classes=cfg.num_classes,
                                          anchors=masked_anchors,
                                          num_anchors=len(cfg.anchors_mask[i]),
                                          anchor_step=cfg.anchor_step)
            '''
            这个boxes装的是三个输出张量经过解析后的结果,通常是三个list，并且每个都不为空，为空意味着没有
            检测到对象。下面的示例7分别代表4个坐标，2个置信度，1个所属类别id
            boxes = {list:3}
            0:8行7列
            1:4行7列
            2:23行7列
            '''
            boxes.append(one_boxes)
        # 将list3个合成一个，也就是  变成一个-> 35行7列的数组
        boxes = boxes[0][0]+boxes[1][0]+boxes[2][0]
        boxes = nms(boxes,cfg.nms_max_overlap)  #将这些对象进行nms筛选
        return boxes

    def visualize(self,image,labels,confidence,boxed_cord):
        '''
        根据检测生成的box信息，可视化这个结果
        image:用于检测的图像{ndarray:(1080,1920,3)}是RGB格式
        labels:标签id,{ndarray:(7,)} [2 2 2 2 2 2 2]
        confidence:标签置信度,{ndarray:(7,)} [0.99 0.81 ... 0.56]
        boxes_cord:标签坐标，{ndarray:(7,4)} 为x_min,y_min,x_max,y_max
        -----------------
        return:
            返回生成的图片，可决定是否保存
        '''
        #图像绘制
        for i, c in list(enumerate(labels)):
            c = int(c)
            predicted_class = self.class_names[c]
            box = boxed_cord[i]
            score = confidence[i]

            c_x, c_y, b_w,b_h = box
            LT_x = int(c_x - b_w/2)
            LT_y = int(c_y - b_h/2)
            RB_x = int(c_x + b_w/2)
            RB_y = int(c_y + b_h/2)

            #开始绘画，我们将标签信息放在框框的里面，这样省略了很多代码
            label = '{} {:.2f}'.format(predicted_class, score)  #显示的文字信息
            cv2.rectangle(image,(LT_x,LT_y),(RB_x,RB_y),self.colors[c],2)
            cv2.putText(image,label,(LT_x+2,LT_y+15),
                              cv2.FONT_HERSHEY_PLAIN,1,color=self.colors[c],thickness=1.5)
        return image
if __name__ == '__main__':
    img = cv2.imread("/media/jiji/3b75ab20-92d3-4be6-81c2-4e1798e2fe16/private个人数据集/BDD10K/demo/影院20220428135535.jpg")
    # 前0-4对应的是坐标，这个坐标的数值介于0-1,是被原始图像归一化了的，因此需要unnormalize，到原图大小
    detecteOneImage = DetecteOneImage()
    boxes = detecteOneImage.detect_one_image(img)
    image = detecteOneImage.visualize(img,boxes[:,6],boxes[:,5]*boxes[:,4],boxes[:,:4])
    if cfg.display:
        cv2.imshow("test", image)
        cv2.waitKey(0)
