import os
import argparse

os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'

import sanic
# grpc

from cvtools import cvio

import torch
import numpy as np
import base64
import cv2

import mmcv
from mmcv.runner import load_checkpoint

from mmdet.apis import inference_detector
from mmdet.core.mask.structures import bitmap_to_polygon
from mmdet.models import build_detector

import pdb

_detectors_configs = [
#    dict(
#        type='cascade_mask_swin',
#        config='/data/hjzhen/projects/configs/cascade_mask_rcnn_bottle/cascade_mask_swin_tiny_bottle.py',
#        checkpoint='/data/hjzhen/projects/configs/cascade_mask_rcnn_bottle/cascade_mask_swin_tiny_bottle_20220621_epoch_48-8d78358d.pth',
#        device='cuda:3'
#    ),
#    dict(
#        type='cascade_mask_x101_fpn_dcn_0.6_bottle',
#        config='/data/hjzhen/data/generalObjectDataset/drink/work_dirs/cascade_mask_x101_fpn_dcn_0.6_bottle/cascade_mask_drink_bottle2.x.py',
#        checkpoint='/data/hjzhen/data/generalObjectDataset/drink/work_dirs/cascade_mask_x101_fpn_dcn_0.6_bottle/cascade_mask_drink_bottle2.x_epoch_36.pth',
#        device='cuda:3',
#    ),
#    dict(
#        type='maskrcnn-swin-tiny-bottle',
#        config='/data/hjzhen/data/generalObjectDataset/drink/work_dirs/maskrcnn-swin-tiny-bottle/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_bottle.py',
#        checkpoint='/data/hjzhen/data/generalObjectDataset/drink/work_dirs/maskrcnn-swin-tiny-bottle/mask_rcnn_swin-tiny_bottle_epoch_36.pth',
#        device='cuda:0',
#    ),
#    dict(
#        type='queryinst_swin_tiny_bottle',
#        config='/data/hjzhen/data/generalObjectDataset/drink/work_dirs/queryinst_swin_tiny_bottle_mask/queryinst_swin_tiny_bottle_mask.py',
#        checkpoint='/data/hjzhen/data/generalObjectDataset/drink/work_dirs/queryinst_swin_tiny_bottle_mask/queryinst_swin_tiny_bottle_mask_epoch_60.pth',
#        device='cuda:3',
#    ),
#    dict(
#        type='cascade_mask_aiot_bottle',
#        config='/data/hjzhen/data/xxw/check_dir1cls/cascade_mask_aiot_bottle_v2.23.py',
#        checkpoint='/data/hjzhen/data/xxw/check_dir1cls/cascade_mask_aiot_bottle_20220505_epoch36_v2.23.pth',
#        device='cuda:0'
#    ),
#     dict(
#         type='cjfc_ty',
#         config='/data/hjzhen/xretail_workspace/deploy_plans/torch_models/cjfc/ty_cjfc_16sku_0119_28.py',
#         checkpoint='/data/hjzhen/xretail_workspace/deploy_plans/torch_models/cjfc/ty_cjfc_16sku_0119_28.pth',
#         device='cuda:2'
#     ),
    dict(
        type='cascade_mask_bottle',
        config='/data/hjzhen/projects/configs/cascade_rcnn_daiding_101/cascade_mask_rcnn_x101_dcn_fl_ohem_bottle_2.x.py',
        checkpoint='/data/hjzhen/projects/configs/cascade_rcnn_daiding_101/cascade_mask_rcnn_x101_dcn_fl_ohem_bottle_epoch_16_2.x.pth',
        device='cuda:2'
    ),
#    dict(
#        type='cascade_mask_x101_fpn_dcn_fl',
#        config='/data/hjzhen/data/generalObjectDataset/drink/work_dirs/cascade_mask_x101_fpn_dcn_0.6_bottle_0520/cascade_mask_drink_bottle2.x.py',
#        #checkpoint='/data/hjzhen/data/generalObjectDataset/drink/work_dirs/cascade_mask_x101_fpn_dcn_0.6_bottle_0520/cascade_mask_x101_fpn_dcn_2.x_bottle_0520_epoch_36-23061613.pth',
#        checkpoint='/data/hjzhen/data/generalObjectDataset/drink/work_dirs/cascade_mask_x101_fpn_dcn_0.6_bottle_0522/cascade_mask_x101_fpn_dcn_2.x_bottle_epoch_35.pth',
#        device='cuda:3'
#    ),
#   dict(
#       type='queryinst_tiny',
#       config='/data/hjzhen/data/generalObjectDataset/drink/work_dirs/queryinst_swin_tiny_bottle_mask/queryinst_swin_tiny_bottle_mask.py',
#       checkpoint='/data/hjzhen/data/generalObjectDataset/drink/work_dirs/queryinst_swin_tiny_bottle_mask/queryinst_swin_tiny_bottle_epoch_48_20220613.pth',
#       device='cuda:3'
#   ),
#   dict(
#       type='cascade_0.6',
#       config='/data/hjzhen/projects/configs/cascade_rcnn_daiding_101/cascade_mask_drink_bottle2.x.py',
#       checkpoint='/data/hjzhen/projects/configs/cascade_rcnn_daiding_101/daiding_101_sku_1_20201218_epoch_10_2.x.pth',
#       device='cuda:3'
#   ),
#    dict(
#        type='xxw',
#        config='/data/hjzhen/xretail_workspace/deploy_plans/torch_models/xxw/cascade_mask_aiot_xxw.py',
#        checkpoint='/data/hjzhen/xretail_workspace/deploy_plans/torch_models/xxw/cascade_mask_aiot_xxw_20220504_epoch_32.pth',
#        device='cuda:2'
#    )
]
#
detectors = {}

app = sanic.Sanic('Object_detector')

def decodeBase64(base64_data):
    image = base64.b64decode(base64_data)
    bs = np.asarray(bytearray(image), dtype='uint8')
    mat = cv2.imdecode(bs, cv2.IMREAD_COLOR)
    return mat

def init_detector(config, checkpoint, device='cuda:0'):
    # Load the config
    config = mmcv.Config.fromfile(config)
    # Set pretrained to be None since we do not need pretrained model here
    config.model.pretrained = None

    # Initialize the detector
    model = build_detector(config.model)

    # Load checkpoint
    checkpoint = load_checkpoint(model, checkpoint, map_location=device)

    # Set the classes of models for inference
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = config.data.train['classes']

    # We need to set the model's cfg for inference
    model.cfg = config

    # Convert the model to GPU
    model.to(device)
    # Convert the model into evaluation mode
    model.eval()

    return model

def format_result(result, classes, score_thr=0.5, mode='bbox'):
        assert mode in ('bbox', 'segm')
        if isinstance(result, tuple):
            bbox_result, segm_result = result
            if isinstance(segm_result, tuple):
                segm_result = segm_result[0]  # ms rcnn
        else:
            bbox_result, segm_result = result, None
        # pdb.set_trace()
        labels = []
        for i, bbox in enumerate(bbox_result):
            if not len(bbox):
                continue
            labels.extend([classes[i]] * len(bbox))
        labels = np.array(labels, dtype=str)
        #labels = [[classes[i]] * len(bbox)  for i, bbox in enumerate(bbox_result) if len(bbox)]
        #labels = np.array(labels)[0]
        bboxes = np.vstack(bbox_result).astype(np.float)
        # print(len(bboxes), bboxes)

        segms = []
        if segm_result is not None and len(labels) > 0:  # non empty
            segms = mmcv.concat_list(segm_result)
            if isinstance(segms[0], torch.Tensor):
                segms = torch.stack(segms, dim=0).detach().cpu().numpy()
            else:
                segms = np.stack(segms, axis=0)

        if score_thr > 0:
            assert bboxes is not None and bboxes.shape[1] == 5
            scores = bboxes[:, -1]
            inds = scores > score_thr
            bboxes = bboxes[inds, :]
            labels = labels[inds]
            if segm_result is not None and len(segms):
                segms = segms[inds, ...]

        # draw segmentation masks
        shapes = []
        if len(segms):
            for i, segm in enumerate(segms):
                contour = segm_to_polygon(segm)
                if mode == 'bbox':
                    contour = contour2minAreaBbox(contour)
                score =np.float( bboxes[i][-1])
                area1 = compute_polygon_area(contour)
                x1, y1, x2, y2, score = bboxes[i]
                area2 = (y2 - y1) * (x2 - x1)
                if not len(contour) or len(contour) < 3 or area1 <= 0.2 * area2:
                    contour = [[x1, y1], [x2, y1], [x2, y2], [x1, y2]]
                    print('Found illegal contour', i, labels[i]) # contour
                shape = dict(label=labels[i], points=contour, shape_type='polygon', score=score)
                shapes.append(shape)
        else:
            for i, (x1, y1, x2, y2, score) in enumerate(bboxes):
                contour = [[x1, y1], [x2, y1], [x2, y2], [x1, y2]]
                shape = dict(label=labels[i], points=contour, shape_type='polygon', score=score)
                shapes.append(shape)
        return shapes

def compute_polygon_area(points):
    point_num = len(points)
    if(point_num < 3):
        return 0.0
    s = points[0][1] * (points[point_num-1][0] - points[1][0])
    #for i in range(point_num): # (int i = 1 i < point_num ++i):
    for i in range(1, point_num): # 有小伙伴发现一个bug，这里做了修改，但是没有测试，需要使用的亲请测试下，以免结果不正确。
        s += points[i][1] * (points[i-1][0] - points[(i+1)%point_num][0])
    return abs(s/2.0)

def contour2minAreaBbox(contour):
    contour = np.array(contour, dtype=np.float32)
    rotatedRect = cv2.minAreaRect(contour)
    bbox = cv2.boxPoints(rotatedRect).tolist()
    return bbox

def segm_to_polygon(segm):
    contours, with_hole = bitmap_to_polygon(segm)
    if not len(contours):
        return []
    if with_hole:
        areas = []
        for contour in contours:
            x1 = contour[:, 0].min()
            y1 = contour[:, 1].min()
            x2 = contour[:, 0].max()
            y2 = contour[:, 1].max()
            area = (x2 - x1) * (y2 - y1)
            areas.append(area)
        # largest mask
        idx = np.argmax(areas)
        contour = contours[idx]
    else:
        contour = contours[0]
    contour = cv2.convexHull(contour)[:,0,:].astype(float).tolist()
    return contour

def infer_detector(model_name, imgs, score=0.5, mode='segm'):
    model = detectors[model_name]
    img_type = type(imgs)
    if img_type in (str, np.ndarray):
        results = [inference_detector(model, imgs)]
    elif img_type in (list, tuple):
        results = inference_detector(model, imgs)
    else:
        print("Type of input imgs error, expected (str/ndarray/list/tuple) but got %s" % img_type)
        raise TypeError
    det_results = []
    for result in results:
        result = format_result(result, model.CLASSES, score, mode)
        det_results.append(result)
    return det_results

def init_detectors(args):
    if args.config in ('', None) or not os.path.exists(args.config):
        detectors_configs = _detectors_configs
    else:
        detectors_configs = cvio.load_ann(args.config)
    for config in detectors_configs:
        detype = config['type']
        del config['type']
        print(detype)
        detector = init_detector(**config)
        detectors[detype] = detector

@app.route('/object_detect', methods=['POST'])
async def skuDet(request):
    try:
        model_name = request.headers.get('model_name')
        mode = 'segm'
        #pdb.set_trace()
        if 'mode' in request.headers:
            mode = request.headers.get('mode')
        #print(mode)
        imgs = [decodeBase64(img) for img in eval(request.form.get('images'))]
        score_thr = float(eval(request.form.get('score_thr')))
        results = infer_detector(model_name, imgs, score_thr, mode)
        status = 1
    except:
        results = [[] for i in range(len(imgs))]
        status = 0
    finally:
        response = dict(status=status, results=results)
        response = sanic.response.json(response)
        return response

def parse_args():
    parser = argparse.ArgumentParser(description='Auto Label Sanic Server')
    parser.add_argument('--config', '-c', type=str, default='')
    parser.add_argument('--device', type=str, default='cuda:2')
    parser.add_argument('--port', '-p', type=int, default=9997)

    args = parser.parse_args()
    return args

if __name__ == '__main__':
    args = parse_args()
    init_detectors(args)
    app.run(host='0.0.0.0', port=args.port, workers=1, access_log=True, debug=False)