import easydict
from detectron2.structures import Boxes

import cjc.yolo_path

import cv2
import numpy as np
import torch
import yaml

import matplotlib

# matplotlib.use('TkAgg')  # 大小写无所谓 tkaGg ,TkAgg 都行
from IPython import get_ipython
from detectron2.layers import paste_masks_in_image
from detectron2.modeling.poolers import ROIPooler
# from detectron2.structures import Boxes
from detectron2.utils.memory import retry_if_cuda_oom
import os.path
import sys

from yolo.utils.datasets import letterbox
from yolo.utils.general import non_max_suppression_mask_conf

from matplotlib import pyplot as plt

from torchvision.transforms import transforms

def create_instance_opt():
    ed =  easydict.EasyDict()
    ed.mask_weight='./cjc/weights/yolov7-mask.pt'
    ed.mask_yaml='./yolo/data/hyp.scratch.mask.yaml'
    ed.name='instance'
    ed.view_img =False

    return ed

class InstanceDetector:

    def __init__(self,opt):
        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.opt = opt
        with open(opt.mask_yaml) as f:
            self.hyp = yaml.load(f, Loader=yaml.FullLoader)
        weigths = torch.load(opt.mask_weight)
        self.model = weigths['model']
        self.names = self.model.names
        self.name = opt.name
        self.model = self.model.float().to(self.device)  # model = model.half().to(device)
        _ = self.model.eval()

    @staticmethod
    def _random_color():
        return [np.random.randint(255), np.random.randint(255), np.random.randint(255)]

    def detect(self, image,identity=None):
        # image = cv2.imrea'./cjc/imgs/heisi.jpg'd(img)  # 504x378 image
        image = letterbox(image, 640, stride=64, auto=True)[0]
        image_ = image.copy()
        image = transforms.ToTensor()(image)
        image = torch.tensor(np.array([image.numpy()]))
        image = image.to(self.device)
        # image = image.half()  #half为了加速，可是这里不支持
        output = self.model(image)
        return self._parse_detect_result(output, image)

    def get_name_from_result(self, r: list):
        if r is None or len(r) == 0:
            return None
        lines = [_l.split(' ')[0] for _l in r]
        cls = int(lines[0])
        return self.names[cls]

    @staticmethod
    def get_cls_from_result(r: list):
        if r is None or len(r) == 0:
            return None
        lines = [_l.split(' ')[0] for _l in r]
        cls = int(lines[0])
        return cls

    def _parse_detect_result(self, output, image):
        inf_out, train_out, attn, mask_iou, bases, sem_output = output['test'], output['bbox_and_cls'], output['attn'], \
                                                                output[
                                                                    'mask_iou'], output['bases'], output['sem']
        bases = torch.cat([bases, sem_output], dim=1)
        nb, _, height, width = image.shape

        pooler_scale = self.model.pooler_scale
        pooler = ROIPooler(output_size=self.hyp['mask_resolution'], scales=(pooler_scale,), sampling_ratio=1,
                           pooler_type='ROIAlignV2', canonical_level=2)
        output, output_mask, output_mask_score, output_ac, output_ab = non_max_suppression_mask_conf(inf_out, attn,
                                                                                                     bases,
                                                                                                     pooler, self.hyp,
                                                                                                     conf_thres=0.25,
                                                                                                     iou_thres=0.65,
                                                                                                     merge=False,
                                                                                                     mask_iou=None)

        pred, pred_masks = output[0], output_mask[0]
        base = bases[0]
        if pred is None:
            return
        bboxes = Boxes(pred[:, :4])
        # print(bboxes)
        original_pred_masks = pred_masks.view(-1, self.hyp['mask_resolution'], self.hyp['mask_resolution'])
        pred_masks = retry_if_cuda_oom(paste_masks_in_image)(original_pred_masks, bboxes, (height, width),
                                                             threshold=0.5)
        pred_masks_np = pred_masks.detach().cpu().numpy()
        pred_cls = pred[:, 5].detach().cpu().numpy()
        pred_conf = pred[:, 4].detach().cpu().numpy()
        nimg = image[0].permute(1, 2, 0) * 255
        nimg = nimg.cpu().numpy().astype(np.uint8)
        nimg = cv2.cvtColor(nimg, cv2.COLOR_RGB2BGR)
        nbboxes = bboxes.tensor.detach().cpu().numpy().astype(int)
        pnimg = nimg.copy()
        return self._deal_result(pred_masks_np, nbboxes, pred_cls, pred_conf, pnimg)

    def _deal_result(self, pred_masks_np, nbboxes, pred_cls, pred_conf, pnimg):
        lines=[]
        for one_mask, bbox, cls, conf in zip(pred_masks_np, nbboxes, pred_cls, pred_conf):
            if conf < 0.25:
                continue
            color = self._random_color()
            pnimg[one_mask] = pnimg[one_mask] * 0.5 + np.array(color, dtype=np.uint8) * 0.5
            pnimg = cv2.rectangle(pnimg, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)
            line = [cls, *bbox, conf]
            lines.append(line)
            # 一个图片pnimg(画了box在上面),多个box nbboxes
        if self.opt.view_img:
            cv2.imshow(self.opt.name,pnimg)
            cv2.waitKey(3000)
        return lines

    # label = '%s %.3f' % (names[int(cls)], conf)
    # t_size = cv2.getTextSize(label, 0, fontScale=0.5, thickness=1)[0]
    # c2 = bbox[0] + t_size[0], bbox[1] - t_size[1] - 3
    # pnimg = cv2.rectangle(pnimg, (bbox[0], bbox[1]), c2, color, -1, cv2.LINE_AA)  # filled
    # pnimg = cv2.putText(pnimg, label, (bbox[0], bbox[1] - 2), 0, 0.5, [255, 255, 255], thickness=1, lineType=cv2.LINE_AA)


if __name__ == '__main__':
    opt = create_instance_opt()
    opt.view_img =True
    isd = InstanceDetector(opt)
    img= cv2.imread('./cjc/imgs/heisi.jpg')
    result= isd.detect(img)
    print(result)
    # matplotlib.use('TkAgg')  # 大小写无所谓 tkaGg ,TkAgg 都行
    # plt.figure(figsize=(8, 8))
    # plt.axis('off')
    # plt.imshow(img)
    # plt.show()
    # plt.imshow(img2)
    # plt.show()
