'''
yolov5实例分割实验
'''
import copy
import time
import os
import sys

# import onnxruntime as ort
import torch  # for torch.tensor
import torchvision  # for nms

import cv2
import numpy as np
import glob

import sys

# sys.path.append('Yolov5_insSeg/yolov5/')  # v7.0
sys.path.append(f'{os.path.dirname(__file__)}/yolov5')
# from models.experimental import attempt_load
from algorithom.Yolov5_insSeg.yolov5.models.experimental import attempt_load
# sys.path.pop()
class Base:

    def letterbox(self, img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True,
                  stride=32):
        '''
        Resize and pad image while meeting stride-multiple constraints
        :param img:
        :param new_shape:
        :param color:
        :param auto:  True: minimum rectangle
        :param scaleFill:
        :param scaleup:
        :param stride:
        :return:
        '''

        shape = img.shape[:2]  # current shape [height, width]
        if isinstance(new_shape, int):
            new_shape = (new_shape, new_shape)

        # Scale ratio (new / old)
        r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
        if not scaleup:  # only scale down, do not scale up (for better test mAP)
            r = min(r, 1.0)

        # Compute padding
        ratio = r, r  # width, height ratios
        new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))

        dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding
        if auto:  # minimum rectangle
            dw, dh = np.mod(dw, stride), np.mod(dh, stride)  # wh padding
        elif scaleFill:  # stretch
            dw, dh = 0.0, 0.0
            new_unpad = (new_shape[1], new_shape[0])
            ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]  # width, height ratios

        dw /= 2  # divide padding into 2 sides
        dh /= 2

        # resize and pad
        if shape[::-1] != new_unpad:  # resize
            img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
        top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
        left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
        img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
        return img, ratio, (dw, dh)

    def xywh2xyxy(self, x):
        # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
        y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
        y[:, 0] = x[:, 0] - x[:, 2] / 2  # top left x
        y[:, 1] = x[:, 1] - x[:, 3] / 2  # top left y
        y[:, 2] = x[:, 0] + x[:, 2] / 2  # bottom right x
        y[:, 3] = x[:, 1] + x[:, 3] / 2  # bottom right y
        return y

    def box_iou(self, box1, box2):
        # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
        """
        Return intersection-over-union (Jaccard index) of boxes.
        Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
        Arguments:
            box1 (Tensor[N, 4])
            box2 (Tensor[M, 4])
        Returns:
            iou (Tensor[N, M]): the NxM matrix containing the pairwise
                IoU values for every element in boxes1 and boxes2
        """

        def box_area(box):
            # box = 4xn
            return (box[2] - box[0]) * (box[3] - box[1])

        area1 = box_area(box1.T)
        area2 = box_area(box2.T)

        # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
        inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
        return inter / (area1[:, None] + area2 - inter)  # iou = inter / (area1 + area2 - inter)

    # def non_max_suppression(self, prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):
    #     """Performs Non-Maximum Suppression (NMS) on inference results
    #         prediction 1*~*17 xywh obj cls
    #     Returns:
    #          detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
    #     """
    #
    #     nc = prediction.shape[2] - 5  # number of classes
    #     xc = prediction[..., 4] > conf_thres  # candidates
    #
    #     # Settings
    #     min_wh, max_wh = 2, 4096  # (pixels) minimum and maximum box width and height
    #     max_det = 300  # maximum number of detections per image
    #     max_nms = 30000  # maximum number of boxes into torchvision.ops.nms()
    #     time_limit = 10.0  # seconds to quit after
    #     redundant = True  # require redundant detections
    #     multi_label = nc > 1  # multiple labels per box (adds 0.5ms/img)
    #     merge = False  # use merge-NMS
    #
    #     # t = time.time()
    #     output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
    #     for xi, x in enumerate(prediction):  # image index, image inference
    #         # Apply constraints
    #         # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0  # width-height
    #         x = x[xc[xi]]  # confidence  obj>conf *************
    #
    #         # Cat apriori labels if autolabelling
    #         if labels and len(labels[xi]):
    #             l = labels[xi]
    #             v = torch.zeros((len(l), nc + 5), device=x.device)
    #             v[:, :4] = l[:, 1:5]  # box
    #             v[:, 4] = 1.0  # conf
    #             v[range(len(l)), l[:, 0].long() + 5] = 1.0  # cls
    #             x = torch.cat((x, v), 0)
    #
    #         # If none remain process next image
    #         if not x.shape[0]:
    #             continue
    #
    #         # Compute conf
    #         x[:, 5:] *= x[:, 4:5]  # conf = obj_conf * cls_conf 相乘
    #
    #         # Box (center x, center y, width, height) to (x1, y1, x2, y2)
    #         box = self.xywh2xyxy(x[:, :4])
    #
    #         # Detections matrix nx6 (xyxy, conf, cls)
    #         if multi_label:
    #             i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
    #             x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()),
    #                           1)  # obj*cl_con>conf then convert to [ box,obj*cl_con,c_id ]
    #         else:  # best class only
    #             conf, j = x[:, 5:].max(1, keepdim=True)
    #             x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
    #
    #         # Filter by class
    #         if classes is not None:
    #             x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
    #
    #         # Apply finite constraint
    #         # if not torch.isfinite(x).all():
    #         #     x = x[torch.isfinite(x).all(1)]
    #
    #         # Check shape
    #         n = x.shape[0]  # number of boxes
    #         if not n:  # no boxes
    #             continue
    #         elif n > max_nms:  # excess boxes
    #             x = x[x[:, 4].argsort(descending=True)[:max_nms]]  # sort by confidence
    #
    #         # Batched NMS
    #         c = x[:, 5:6] * (0 if agnostic else max_wh)  # classes
    #         boxes, scores = x[:, :4] + c, x[:, 4]  # boxes (offset by class), scores
    #         i = torchvision.ops.nms(boxes, scores, iou_thres)  # NMS
    #         if i.shape[0] > max_det:  # limit detections
    #             i = i[:max_det]
    #         if merge and (1 < n < 3E3):  # Merge NMS (boxes merged using weighted mean)
    #             # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
    #             iou = self.box_iou(boxes[i], boxes) > iou_thres  # iou matrix
    #             weights = iou * scores[None]  # box weights
    #             x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True)  # merged boxes
    #             if redundant:
    #                 i = i[iou.sum(1) > 1]  # require redundancy
    #
    #         output[xi] = x[i]
    #         # if (time.time() - t) > time_limit:
    #         #     print(f'WARNING: NMS time limit {time_limit}s exceeded')
    #         #     break  # time limit exceeded
    #
    #     return output

    def non_max_suppression(
            self,
            prediction,
            conf_thres=0.25,
            iou_thres=0.45,
            classes=None,
            agnostic=False,
            multi_label=False,
            labels=(),
            max_det=300,
            nm=0,  # number of masks
    ):
        """Non-Maximum Suppression (NMS) on inference results to reject overlapping detections (实例分割)

        Returns:
             list of detections, on (n,6) tensor per image [xyxy, conf, cls]
        """

        if isinstance(prediction,
                      (list, tuple)):  # YOLOv5 model in validation model, output = (inference_out, loss_out)
            prediction = prediction[0]  # select only inference output

        device = prediction.device
        mps = 'mps' in device.type  # Apple MPS
        if mps:  # MPS not fully supported yet, convert tensors to CPU before NMS
            prediction = prediction.cpu()
        bs = prediction.shape[0]  # batch size
        nc = prediction.shape[2] - nm - 5  # number of classes
        xc = prediction[..., 4] > conf_thres  # candidates

        # Checks
        assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'
        assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'

        # Settings
        # min_wh = 2  # (pixels) minimum box width and height
        max_wh = 7680  # (pixels) maximum box width and height
        max_nms = 30000  # maximum number of boxes into torchvision.ops.nms()
        time_limit = 0.5 + 0.05 * bs  # seconds to quit after
        redundant = True  # require redundant detections
        multi_label &= nc > 1  # multiple labels per box (adds 0.5ms/img)
        merge = False  # use merge-NMS

        t = time.time()
        mi = 5 + nc  # mask start index
        output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs
        for xi, x in enumerate(prediction):  # image index, image inference
            # Apply constraints
            # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0  # width-height
            x = x[xc[xi]]  # confidence

            # Cat apriori labels if autolabelling
            if labels and len(labels[xi]):
                lb = labels[xi]
                v = torch.zeros((len(lb), nc + nm + 5), device=x.device)
                v[:, :4] = lb[:, 1:5]  # box
                v[:, 4] = 1.0  # conf
                v[range(len(lb)), lb[:, 0].long() + 5] = 1.0  # cls
                x = torch.cat((x, v), 0)

            # If none remain process next image
            if not x.shape[0]:
                continue

            # Compute conf
            x[:, 5:] *= x[:, 4:5]  # conf = obj_conf * cls_conf

            # Box/Mask
            box = self.xywh2xyxy(x[:, :4])  # center_x, center_y, width, height) to (x1, y1, x2, y2)
            mask = x[:, mi:]  # zero columns if no masks

            # Detections matrix nx6 (xyxy, conf, cls)
            if multi_label:
                i, j = (x[:, 5:mi] > conf_thres).nonzero(as_tuple=False).T
                x = torch.cat((box[i], x[i, 5 + j, None], j[:, None].float(), mask[i]), 1)
            else:  # best class only
                conf, j = x[:, 5:mi].max(1, keepdim=True)
                x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres]

            # Filter by class
            if classes is not None:
                x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]

            # Apply finite constraint
            # if not torch.isfinite(x).all():
            #     x = x[torch.isfinite(x).all(1)]

            # Check shape
            n = x.shape[0]  # number of boxes
            if not n:  # no boxes
                continue
            elif n > max_nms:  # excess boxes
                x = x[x[:, 4].argsort(descending=True)[:max_nms]]  # sort by confidence
            else:
                x = x[x[:, 4].argsort(descending=True)]  # sort by confidence

            # Batched NMS
            c = x[:, 5:6] * (0 if agnostic else max_wh)  # classes
            boxes, scores = x[:, :4] + c, x[:, 4]  # boxes (offset by class), scores
            i = torchvision.ops.nms(boxes, scores, iou_thres)  # NMS
            if i.shape[0] > max_det:  # limit detections
                i = i[:max_det]
            if merge and (1 < n < 3E3):  # Merge NMS (boxes merged using weighted mean)
                # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
                iou = self.box_iou(boxes[i], boxes) > iou_thres  # iou matrix
                weights = iou * scores[None]  # box weights
                x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True)  # merged boxes
                if redundant:
                    i = i[iou.sum(1) > 1]  # require redundancy

            output[xi] = x[i]
            if mps:
                output[xi] = output[xi].to(device)
            if (time.time() - t) > time_limit:
                # LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded')
                break  # time limit exceeded

        return output

    def make_grid(self, nx=20, ny=20):
        yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
        return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()

    def clip_coords(self, boxes, img_shape):
        # Clip bounding xyxy bounding boxes to image shape (height, width)
        boxes[:, 0].clip(0, img_shape[1])  # x1  tensor:clamp_
        boxes[:, 1].clip(0, img_shape[0])  # y1
        boxes[:, 2].clip(0, img_shape[1])  # x2
        boxes[:, 3].clip(0, img_shape[0])  # y2

    def scale_coords(self, img1_shape, coords, img0_shape, ratio_pad=None):
        # Rescale coords (xyxy) from img1_shape to img0_shape
        if ratio_pad is None:  # calculate from img0_shape
            gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])  # gain  = old / new
            pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2  # wh padding
        else:
            gain = ratio_pad[0][0]
            pad = ratio_pad[1]

        coords[:, [0, 2]] -= pad[0]  # x padding
        coords[:, [1, 3]] -= pad[1]  # y padding
        coords[:, :4] /= gain
        self.clip_coords(coords, img0_shape)
        return coords

    ## 实例分割相关
    def process_mask(self, protos, masks_in, bboxes, shape, upsample=False):
        """
        Crop before upsample.
        proto_out: [mask_dim, mask_h, mask_w]
        out_masks: [n, mask_dim], n is number of masks after nms
        bboxes: [n, 4], n is number of masks after nms
        shape:input_image_size, (h, w)

        return: h, w, n ?  nhw
        """

        c, mh, mw = protos.shape  # CHW
        ih, iw = shape  # 模型输入尺寸
        masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)  # CHW

        downsampled_bboxes = bboxes.clone()
        downsampled_bboxes[:, 0] *= mw / iw
        downsampled_bboxes[:, 2] *= mw / iw
        downsampled_bboxes[:, 3] *= mh / ih
        downsampled_bboxes[:, 1] *= mh / ih

        masks = self.crop_mask(masks, downsampled_bboxes)  # CHW 框外的mask值置为0
        if upsample:
            if masks.shape[0] != 0:  # 不为空时
                masks = torch.nn.functional.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[
                    0]  # CHW 缩放到输入图片尺寸
            else:
                masks = masks.reshape(0, shape[0], shape[1])
        # return masks.gt_(0.5)  # 阈值0.5
        return masks.gt_(0.95)  # 阈值0.8 调整它

    def crop_mask(self, masks, boxes):
        """
        "Crop" predicted masks by zeroing out everything not in the predicted bbox.
        Vectorized by Chong (thanks Chong).

        Args:
            - masks should be a size [h, w, n] tensor of masks
            - boxes should be a size [n, 4] tensor of bbox coords in relative point form
        """

        n, h, w = masks.shape
        x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1)  # x1 shape(1,1,n)
        r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :]  # rows shape(1,w,1)
        c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None]  # cols shape(h,1,1)

        return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))

    def scale_image(self, im1_shape, masks, im0_shape, ratio_pad=None):
        """
        mask转至原图尺寸
        img1_shape: model input shape, [h, w]
        img0_shape: origin pic shape, [h, w, 3]
        masks: [h, w, num]
        """
        # Rescale coordinates (xyxy) from im1_shape to im0_shape
        if ratio_pad is None:  # calculate from im0_shape
            gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1])  # gain  = old / new
            pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2  # wh padding
        else:
            pad = ratio_pad[1]
        top, left = int(pad[1]), int(pad[0])  # y, x
        bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0])

        if len(masks.shape) < 2:
            raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}')
        masks = masks[top:bottom, left:right]
        masks = masks.permute(2, 0, 1).contiguous()  # to chw
        masks = torch.nn.functional.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0] if masks.shape[0] != 0 else masks.reshape(0, im0_shape[0], im0_shape[1])  # masks[None]: nchw
        masks = masks.permute(1, 2, 0).contiguous()  # to hwc
        # masks = cv2.resize(masks, (im0_shape[1], im0_shape[0]))
        #
        # if len(masks.shape) == 2:
        #     masks = masks[:, :, None]
        return masks


class Inference(Base):

    def __init__(self, model_address='/home/xiancai/stu/yolov5/yolov5s-seg.pt', input_h=640, input_w=640, conf = 0.25, iou = 0.45,
                 device=torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu') ):
        self.model_address = model_address
        self.input_h, self.input_w = input_h, input_w
        self.conf = conf
        self.iou = iou
        self.device = device

        # pt load
        # Load model
        print(f'loading seg model: {model_address}')
        self.model = attempt_load(self.model_address, device=self.device)
        self.cls_name = self.model.names
        print('done')


    def infer(self, img_address):
        '''
        yolov5s_seg.pt模型推理, 模型已包括anchor操作
        :param img_address: 单个图片地址 or ndarray : h*w*c
        :return: pre: n*(x1,y1,x2,y2,conf,cls) numpy  masks: h*w*n numpy
        '''

        # 预处理
        t1 = time.time()
        img0 = cv2.imread(img_address) if isinstance(img_address, str) else img_address

        img = self.letterbox(img0, (self.input_h, self.input_w), auto=False, stride=32)[0]  # Padded resize
        # cv2.imwrite('/data1/xiancai/BABY_DATA/other/debug.jpg', img)

        img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x640x640
        img = np.ascontiguousarray(img).astype(np.float32)
        img /= 255.0
        img = img[None, ...]

        # pt inference
        img = torch.from_numpy(img).to(self.device)
        # t1 = time.time()
        (out0, proto, out2) = self.model(img)  # [1*~*85,mask,[]*3],
        # print(f'yolov5seg model inference {time.time() - t1}')

        # NMS,mask
        # z = torch.tensor(outs[0])  # todo:使用numpy进行nms
        pre = self.non_max_suppression(out0, conf_thres=self.conf, iou_thres=self.iou, nm=32)  # 无结果时为0*~
        pre = pre[0]  # 1张图片
        masks = self.process_mask(proto[0], pre[:, 6:], pre[:, :4], img.shape[2:], upsample=True)  # ndet*h*w hw为模型输入hw
        masks = masks.permute(1, 2, 0).contiguous()  # to h*w*c

        # x1y1x2y2 to 原图像素
        # mask to 原图像素
        pre[:, :4] = self.scale_coords(img.shape[2:], pre[:, :4],
                                       img0.shape).round()  # img 1*3*640*640 img0.shape 1080*810*3
        masks = self.scale_image(masks.shape[:2], masks, img0.shape)
        pre = pre.cpu().numpy()
        masks = masks.cpu().numpy()

        # for i in range(masks.shape[-1]):
        #     mask = masks[...,i].astype(np.uint8)*255
        #     cv2.imwrite(f'/home/xiancai/stu/yolov5/data/images/{i}.jpg',mask)
        print(f'yolov5seg model inference {time.time() - t1}')
        return pre, masks

    def draw_box(self, img_address, pre):
        '''
        画框
        :param img_address: 单个图片地址 or ndarray : h*w*c
        :param pre: n*(x1,y1,x2,y2,conf,cls) numpy
        :return:
        '''

        img0 = cv2.imread(img_address) if isinstance(img_address, str) else img_address
        img0=copy.deepcopy(img0)

        for i in range(pre.shape[0]):
            # draw box
            pt1 = [int(pre[i, 0]), int(pre[i, 1])]
            pt2 = [int(pre[i, 2]), int(pre[i, 3])]
            cv2.rectangle(img0, pt1, pt2, (0, 255, 0), 2)
            # draw lab
            pt3 = [int(pre[i, 0]) + 16, int(pre[i, 1]) + 16]
            lab = f'{str(np.round(pre[i, 4], 2))}:{int(pre[i, 5])}_{self.cls_name[int(pre[i, 5])]}'
            # lab = f'{str(np.round(pre[i, 4], 4))}:{int(pre[i, 5])}'
            cv2.putText(img0, lab, pt3, fontFace=cv2.FONT_HERSHEY_PLAIN, fontScale=2.0, color=[225, 40, 168],
                        thickness=2)
        return img0

    def draw_box_mask(self, img_address, pre, mask):
        '''
        画框
        :param img_address: 单个图片地址 or ndarray : h*w*c
        :param pre: n*(x1,y1,x2,y2,conf,cls) numpy
        :param masks: hwn numpy float 0-1
        :return:
        '''

        img0 = cv2.imread(img_address) if isinstance(img_address, str) else img_address
        img0 = copy.deepcopy(img0)

        # draw mask
        mask = mask * 255
        mask = np.sum(mask, axis=2).clip(0, 255)
        # mask = mask[...,0].clip(0, 255)
        mask = mask[..., None].repeat(3, axis=2)
        img0 = (img0 + mask * 0.4).clip(0, 255).astype(np.uint8)
        # cv2.imwrite('/home/xiancai/stu/yolov5/data/images/mask.jpg',mask)

        for i in range(pre.shape[0]):
            # draw box
            pt1 = [int(pre[i, 0]), int(pre[i, 1])]
            pt2 = [int(pre[i, 2]), int(pre[i, 3])]
            cv2.rectangle(img0, pt1, pt2, (0, 255, 0), 2)
            # draw lab
            pt3 = [int(pre[i, 0]) + 16, int(pre[i, 1]) + 16]
            lab = f'{str(np.round(pre[i, 4], 2))}:{self.cls_name[int(pre[i, 5])]}'
            # lab = f'{str(np.round(pre[i, 4], 4))}:{int(pre[i, 5])}'
            cv2.putText(img0, lab, pt3, fontFace=cv2.FONT_HERSHEY_PLAIN, fontScale=5.0, color=[225, 40, 168],
                        thickness=10)

        return img0


def test_cam():
    # camera selection
    # if os.getenv('CAMERA_TYPE')=='HAIKANG':
    #     from camera_haikang import Carmera
    # else: # realsense
    #     from camera_realsense import Carmera
    from camera_haikang import Carmera

    cam = Carmera(width=640, height=480, fps=15)
    # infer = Inference(model_address='yolov5/yolov5s-seg.pt',device=torch.device('cuda:0'))
    infer = Inference(
        model_address='/home/nvidia/platform/机器视觉综合实验/22.Yolov5实例分割/yolov5/runs/train-seg/exp/weights/best.pt',
        device=torch.device('cuda:0'))

    while True:
        # 获取相机图像
        color_frame, _ = cam.get_frame()
        img = np.asanyarray(color_frame.get_data())
        # detect
        pre, mask = infer.infer(img)
        # draw
        res_img = infer.draw_box_mask(img, pre, mask)
        cv2.imshow('image', res_img)
        k = cv2.waitKey(1)
        if k == 27:  # esc
            cv2.destroyAllWindows()
            cam.stop_pipline()
            sys.exit()
def test_imgs():
    imgs_glob = r'D:\data\231207霍尼\test_data\拍照测试\*.jpg'
    save_dir = r'D:\data\231207霍尼\test_data\拍照测试\out0/'
    model_path = r'D:\code\git\zxc\huoni\train\Yolov5_insSeg\yolov5\runs\train-seg\exp7\weights\best.pt'

    infer = Inference( model_address=model_path, device=torch.device('cuda:0'), input_h=416, input_w=416)
    ls = glob.glob(imgs_glob)
    for i in ls:
        img = cv2.imread(i)
        # detect
        pre, mask = infer.infer(img)
        # draw
        res_img = infer.draw_box_mask(img, pre, mask)
        cv2.imwrite(save_dir + i.split('\\')[-1], res_img)


if __name__ == '__main__':
    test_imgs()
