from PIL import Image
from ctypes import resize
from yolact import Yolact
from utils.augmentations import FastBaseTransform
from utils.functions import SavePath
from layers.output_utils import postprocess
from data import cfg, set_cfg
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import argparse
import json
import cv2


def str2bool(v):
    if v.lower() in ('yes', 'true', 't', 'y', '1'):
        return True
    elif v.lower() in ('no', 'false', 'f', 'n', '0'):
        return False
    else:
        raise argparse.ArgumentTypeError('Boolean value expected.')


def parse_args(argv=None):
    parser = argparse.ArgumentParser(
        description='YOLACT COCO Evaluation')
    parser.add_argument('--trained_model',
                        default='/home/u20/c2/yolact/weights/yolact_resnet50_156_1250.pth', type=str,
                        help='Trained state_dict file path to open. If "interrupt", this will open the interrupt file.')
    parser.add_argument('--top_k', default=5, type=int,
                        help='Further restrict the number of predictions to parse')
    parser.add_argument('--cuda', default=True, type=str2bool,
                        help='Use cuda to evaulate model')
    parser.add_argument('--display', dest='display', action='store_true',
                        help='Display qualitative results instead of quantitative ones.')

    parser.add_argument('--config', default=None,
                        help='The config object to use.')

    parser.add_argument('--display_lincomb', default=False, type=str2bool,
                        help='If the config uses lincomb masks, output a visualization of how those masks are created.')

    parser.add_argument('--score_threshold', default=0.20, type=float,
                        help='Detections with a score under this threshold will not be considered. This currently only works in display mode.')

    parser.set_defaults(no_bar=False, display=False, resume=False, output_coco_json=False, output_web_json=False, shuffle=False,
                        benchmark=False, no_sort=False, no_hash=False, mask_proto_debug=False, crop=True, detect=False, display_fps=False,
                        emulate_playback=False)
    global args
    args = parser.parse_args(argv)


def prep_display(dets_out, img, h, w):
    h, w, _ = img.shape
    aaa = np.zeros((w, h), dtype=np.uint8)

    save = cfg.rescore_bbox
    cfg.rescore_bbox = True
    t = postprocess(dets_out, w, h, visualize_lincomb=False,
                    crop_masks=False,
                    score_threshold=0.2)
    cfg.rescore_bbox = save

    idx = t[1].argsort(0, descending=True)[:10]
    if cfg.eval_mask_branch:
        # Masks are drawn on the GPU, so don't copy
        masks = t[3][idx]
    classes, scores, boxes = [x[idx].cpu().numpy() for x in t[:3]]
    if len(classes) == 0:
        return aaa
    # print(classes, scores)
    num_dets_to_consider = min(10, classes.shape[0])
    for j in range(num_dets_to_consider):
        if scores[j] < 0.2:
            num_dets_to_consider = j
            break
    masks = masks[:num_dets_to_consider, :, :]
    masks = (masks).cpu().numpy()
    # masks = masks.astype(np.uint8)

    for i in range(len(classes)):
        aaa[masks[i, :, :] == 1] = (classes[i]+1)
    # aaa *= 80
    # cv2.imshow("a", aaa)
    return aaa
import time

class Mymodel():
    def __init__(self) -> None:
        parse_args()

        if args.config is not None:
            set_cfg(args.config)
        if args.trained_model == 'interrupt':
            args.trained_model = SavePath.get_interrupt('weights/')
        elif args.trained_model == 'latest':
            args.trained_model = SavePath.get_latest('weights/', cfg.name)

        if args.config is None:
            model_path = SavePath.from_str(args.trained_model)
            # TODO: Bad practice? Probably want to do a name lookup instead.
            args.config = model_path.model_name + '_config'
            print('Config not specified. Parsed %s from the file name.\n' %
                  args.config)
            set_cfg(args.config)

        if args.detect:
            cfg.eval_mask_branch = False

        cudnn.fastest = True
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
        args.image = "demo.jpg"
        print('Loading model...', end='')
        net = Yolact()
        net.load_weights(args.trained_model)
        net.eval()
        print(' Done.')


        # im = torch.zeros(1, 3,960,960)  # image size(1,3,320,192) BCHW iDetection

        # torch.onnx.export(net, im,"aaa.onnx", verbose=False, opset_version=12,
        #             training=torch.onnx.TrainingMode.EVAL,
        #             input_names=['images'],
        #             output_names=['output'],dynamic_axes=None)



        output_onnx = 'aaa.onnx'
        inputs = torch.randn(1, 3, 550, 550).cuda()
        print('convert',output_onnx,'begin')
        torch.onnx.export(net, inputs, output_onnx, verbose=False, opset_version=12, input_names=['image'],
                        output_names=['loc', 'conf', 'mask', 'proto'])
        print('convert', output_onnx, 'to onnx finish!!!')



        self.net = net.cuda()

    @torch.no_grad()
    def __call__(self, img):
        imgsz = img.shape
        img = cv2.resize(img, (960, 960))
        self.net.detect.use_fast_nms = True
        self.net.detect.use_cross_class_nms = False
        cfg.mask_proto_debug = False
        frame = torch.from_numpy(img.copy()).cuda().float()
        batch = FastBaseTransform()(frame.unsqueeze(0))
        torch.cuda.synchronize()
        t1=time.perf_counter()
        preds = self.net(batch)
        torch.cuda.synchronize()
        print(time.perf_counter()-t1)
        img_numpy = prep_display(preds, frame, None, None)
        cv2.resize(img_numpy, (imgsz[2], imgsz[1]), cv2.INTER_NEAREST)
        return img_numpy


def init():
    model = Mymodel()
    return model
# 模型


def process_image(handle=None, input_image=None, args=None, ** kwargs):

    mask = handle(input_image)
    # print(ans)
    objs = []
    # for x, y, w, h, cf, name, color in results:
    #     obj = {
    #         "x": int(x),
    #         "y": int(y),
    #         "width": int(w),
    #         "height": int(h),
    #         "confidence": cf,
    #         "name": name
    #     }
    #     if name == 'open_bed_heavy_truck':
    #         obj['color'] = str(color)
    #         obj['plate'] = [{'name': 'back_plate', 'points': [608, 610, 943, 713, 934, 773, 607, 664], 'ocr': '皖B38489',
    #                          'confidence': 0.8},
    #                         {'name': 'size_plate', 'points': [594, 596, 929, 700, 930, 761, 594, 650], 'ocr': '皖B38489',
    #                          'confidence': 0.8}
    #                         ]
    #     objs.append(obj)
    args = json.loads(args)
    mask_output_path = args['mask_output_path']
    pred_mask_per_frame = Image.fromarray(mask)
    pred_mask_per_frame.save(mask_output_path)
    pred = {'model_data': {"objects": objs, "mask": mask_output_path}}
    return json.dumps(pred)


# def process_image(handle=None, input_image=None, args=None, ** kwargs):

#     obj_dict = handle(input_image)
#     target_info = []
#     fake_result = {}
#     fake_result["algorithm_data"] = {
#         "is_alert": len(target_info) > 0,
#         "target_count": len(target_info),
#         "target_info": target_info
#     }
#     # cv2.imwrite("a.png",obj_dict)
#     fake_result["model_data"] = {"objects": "a.png"}
#     return json.dumps(fake_result, indent=4)
if __name__ == '__main__':
    model = init()
    import glob
    args = {"mask_output_path": "mask.png"}
    im_p = glob.glob("/home/data/1441/*.jpg")
    for i in im_p:
        img = cv2.imread(i)
        ans = process_image(model, img, json.dumps(args))
        print(ans)
        # cv2.waitKey(0)
