from torchvision.ops.boxes import batched_nms
from jjzhk.device import device
from .model_zoo import MODEL_ZOO
import torch
from .segbasemodel import SegBaseModel
from collections import OrderedDict
from ELib.utils.block import conv_bn, sepconv_bn, conv_bias, ASFF
import cv2
import numpy as np
from ELib.utils.utils import iou_calc3


@MODEL_ZOO.register()
class StrongerV3(SegBaseModel):
    def __init__(self, cfg):
        super(StrongerV3, self).__init__(cfg, need_backbone=True)

        self.cfg = cfg
        self.numclass = self.cfg.BASE.NUM_CLASSES
        self.gt_per_grid = self.cfg.BASE.GT_PER_GRID
        self.outC = self.cfg.MODEL.BACKBONE_OUTCHANNELS

        self.heads = []
        self.activate_type = 'relu6'
        self.headslarge = torch.nn.Sequential(OrderedDict([
            ('conv0', conv_bn(self.outC[0], 512,
                              kernel=1, stride=1, padding=0)),
            ('conv1', sepconv_bn(512, 1024, kernel=3,
                                 stride=1, padding=1, seprelu=True)),
            ('conv2', conv_bn(1024, 512, kernel=1, stride=1, padding=0)),
            ('conv3', sepconv_bn(512, 1024, kernel=3,
                                 stride=1, padding=1, seprelu=True)),
            ('conv4', conv_bn(1024, 512, kernel=1, stride=1, padding=0)),
        ]))
        self.detlarge = torch.nn.Sequential(OrderedDict([
            ('conv5', sepconv_bn(512, 1024, kernel=3,
                                 stride=1, padding=1, seprelu=True)),
            ('conv6', conv_bias(1024, self.gt_per_grid *
                                (self.numclass+5), kernel=1, stride=1, padding=0))
        ]))

        self.mergelarge_conv7 = conv_bn(512, 256, kernel=1, stride=1, padding=0)

        self.headsmid = torch.nn.Sequential(OrderedDict([
            ('conv8', conv_bn(self.outC[1]+256,
                              256, kernel=1, stride=1, padding=0)),
            ('conv9', sepconv_bn(256, 512, kernel=3,
                                 stride=1, padding=1, seprelu=True)),
            ('conv10', conv_bn(512, 256, kernel=1, stride=1, padding=0)),
            ('conv11', sepconv_bn(256, 512, kernel=3,
                                  stride=1, padding=1, seprelu=True)),
            ('conv12', conv_bn(512, 256, kernel=1, stride=1, padding=0)),
        ]))
        self.detmid = torch.nn.Sequential(OrderedDict([
            ('conv13', sepconv_bn(256, 512, kernel=3,
                                  stride=1, padding=1, seprelu=True)),
            ('conv14', conv_bias(512, self.gt_per_grid *
                                 (self.numclass+5), kernel=1, stride=1, padding=0))
        ]))

        self.mergemid_conv15 = conv_bn(256, 128, kernel=1, stride=1, padding=0)

        # -----------------------------------------------
        self.headsmall = torch.nn.Sequential(OrderedDict([
            ('conv16', conv_bn(self.outC[2]+128,
                               128, kernel=1, stride=1, padding=0)),
            ('conv17', sepconv_bn(128, 256, kernel=3,
                                  stride=1, padding=1, seprelu=True)),
            ('conv18', conv_bn(256, 128, kernel=1, stride=1, padding=0)),
            ('conv19', sepconv_bn(128, 256, kernel=3,
                                  stride=1, padding=1, seprelu=True)),
            ('conv20', conv_bn(256, 128, kernel=1, stride=1, padding=0)),
        ]))
        self.detsmall = torch.nn.Sequential(OrderedDict([
            ('conv21', sepconv_bn(128, 256, kernel=3,
                                  stride=1, padding=1, seprelu=True)),
            ('conv22', conv_bias(256, self.gt_per_grid *
                                 (self.numclass+5), kernel=1, stride=1, padding=0))
        ]))

        if self.cfg.MODEL.ASFF:
            self.asff0 = ASFF(0, activate=self.activate_type)
            self.asff1 = ASFF(1, activate=self.activate_type)
            self.asff2 = ASFF(2, activate=self.activate_type)

    def decode(self, output, stride):
        bz = output.shape[0]
        gridsize = output.shape[-1]

        output = output.permute(0, 2, 3, 1)
        output = output.view(bz, gridsize, gridsize,
                             self.gt_per_grid, 5+self.numclass)
        x1y1, x2y2, conf, prob = torch.split(
            output, [2, 2, 1, self.numclass], dim=4)
        shiftx = torch.arange(0, gridsize, dtype=torch.float32)
        shifty = torch.arange(0, gridsize, dtype=torch.float32)
        shifty, shiftx = torch.meshgrid([shiftx, shifty])
        shiftx = shiftx.unsqueeze(-1).repeat(bz, 1, 1, self.gt_per_grid)
        shifty = shifty.unsqueeze(-1).repeat(bz, 1, 1, self.gt_per_grid)

        xy_grid = torch.stack([shiftx, shifty], dim=4).to(device)
        x1y1 = (xy_grid+0.5-torch.exp(x1y1))*stride
        x2y2 = (xy_grid+0.5+torch.exp(x2y2))*stride

        xyxy = torch.cat((x1y1, x2y2), dim=4)
        conf = torch.sigmoid(conf)
        prob = torch.sigmoid(prob)
        output = torch.cat((xyxy, conf, prob), 4)
        return output

    def decode_infer(self, output, stride):
        # logging.info(torch.tensor(output.shape[0]))
        # logging.info(output.shape)
        # # bz is batch-size
        # bz = tuple(torch.tensor(output.shape[0]))
        # gridsize = tuple(torch.tensor(output.shape[-1]))
        # logging.info(gridsize)
        sh = torch.tensor(output.shape)
        bz = sh[0]
        gridsize = sh[-1]

        output = output.permute(0, 2, 3, 1)
        output = output.view(bz, gridsize, gridsize, self.gt_per_grid, 5+self.numclass)
        x1y1, x2y2, conf, prob = torch.split(
            output, [2, 2, 1, self.numclass], dim=4)

        shiftx = torch.arange(0, gridsize, dtype=torch.float32)
        shifty = torch.arange(0, gridsize, dtype=torch.float32)
        shifty, shiftx = torch.meshgrid([shiftx, shifty])
        shiftx = shiftx.unsqueeze(-1).repeat(bz, 1, 1, self.gt_per_grid)
        shifty = shifty.unsqueeze(-1).repeat(bz, 1, 1, self.gt_per_grid)

        xy_grid = torch.stack([shiftx, shifty], dim=4).to(device)
        x1y1 = (xy_grid+0.5-torch.exp(x1y1))*stride
        x2y2 = (xy_grid+0.5+torch.exp(x2y2))*stride

        xyxy = torch.cat((x1y1, x2y2), dim=4)
        conf = torch.sigmoid(conf)
        prob = torch.sigmoid(prob)
        output = torch.cat((xyxy, conf, prob), 4)
        output = output.view(bz, -1, 5+self.numclass)
        return output

    def forward(self, input, **kwargs):
        feat_small, feat_mid, feat_large = self.backbone.forward(input)
        conv = self.headslarge(feat_large)
        convlarge = conv

        conv = self.mergelarge_conv7(convlarge)
        sh = torch.tensor(conv.shape[-2:])*2
        conv = torch.nn.functional.interpolate(conv, size=tuple(sh))
        conv = self.headsmid(torch.cat((conv, feat_mid), dim=1))
        convmid = conv

        conv = self.mergemid_conv15(convmid)
        # logging.info(conv.shape)
        sh = torch.tensor(conv.shape[-2:])*2
        conv = torch.nn.functional.interpolate(conv, size=tuple(sh))

        conv = self.headsmall(torch.cat((conv, feat_small), dim=1))
        convsmall = conv
        if self.cfg.MODEL.ASFF:
            convlarge = self.asff0(convlarge, convmid, convsmall)
            convmid = self.asff1(convlarge, convmid, convsmall)
            convsmall = self.asff2(convlarge, convmid, convsmall)
        outlarge = self.detlarge(convlarge)
        outmid = self.detmid(convmid)
        outsmall = self.detsmall(convsmall)
        if self.training:
            predlarge = self.decode(outlarge, 32)
            predmid = self.decode(outmid, 16)
            predsmall = self.decode(outsmall, 8)
        else:
            predlarge = self.decode_infer(outlarge, 32)
            predmid = self.decode_infer(outmid, 16)
            predsmall = self.decode_infer(outsmall, 8)
            pred = torch.cat([predsmall, predmid, predlarge], dim=1)

            return pred
        return outsmall, outmid, outlarge, predsmall, predmid, predlarge

    @torch.no_grad()
    def get_predict(self, image, info, **kwargs):
        orishape = (info[0]['height'], info[0]['width'])
        ii = torch.from_numpy(image).to(device).float().permute(0, 3, 1, 2)

        output = self(ii)
        bbox, bboxvari = _postprocess(
            self.cfg, output[0], self.cfg.TEST.IMAGE_SIZE[0], orishape)
        nms_boxes, nms_scores, nms_labels = torch_nms(self.cfg, bbox, variance=bboxvari)
        boxes = nms_boxes.cpu().numpy()
        labels = nms_labels.cpu().numpy()
        probs = nms_scores.cpu().numpy()

        result = []
        for i, (box, label, prob) in enumerate(zip(boxes, labels, probs)):
            if prob > self.cfg.BASE.CONF_THRESHOLD:
                x1, y1, x2, y2 = box[0], box[1], box[2], box[3]

                result.append(
                    [
                        (x1, y1),
                        (x2, y2),
                        self.cfg.classname(label),
                        info[0]['img_id'],
                        prob
                    ]
                )

        return [result]


def _postprocess(cfg, pred_bbox, test_input_size, org_img_shape):
    if cfg.MODEL.BOXLOSS == 'KL':
        pred_coor = pred_bbox[:, 0:4]
        pred_vari = pred_bbox[:, 4:8]
        pred_vari = torch.exp(pred_vari)
        pred_conf = pred_bbox[:, 8]
        pred_prob = pred_bbox[:, 9:]
    else:
        pred_coor = pred_bbox[:, 0:4]
        pred_conf = pred_bbox[:, 4]
        pred_prob = pred_bbox[:, 5:]
    org_h, org_w = org_img_shape

    resize_ratio = min(1.0 * test_input_size / org_w,
                       1.0 * test_input_size / org_h)
    dw = (test_input_size - resize_ratio * org_w) / 2
    dh = (test_input_size - resize_ratio * org_h) / 2
    pred_coor[:, 0::2] = 1.0 * (pred_coor[:, 0::2] - dw) / resize_ratio
    pred_coor[:, 1::2] = 1.0 * (pred_coor[:, 1::2] - dh) / resize_ratio
    x1, y1, x2, y2 = torch.split(pred_coor, [1, 1, 1, 1], dim=1)
    x1, y1 = torch.max(x1, torch.zeros_like(
        x1)), torch.max(y1, torch.zeros_like(y1))
    x2, y2 = torch.min(x2, torch.ones_like(x2)*(org_w-1)
                       ), torch.min(y2, torch.ones_like(y2)*(org_h-1))
    pred_coor = torch.cat([x1, y1, x2, y2], dim=-1)

    if pred_prob.shape[-1] == 0:
        pred_prob = torch.ones((pred_prob.shape[0], 1)).cuda()
    scores = pred_conf.unsqueeze(-1) * pred_prob
    bboxes = torch.cat([pred_coor, scores], dim=-1)
    if cfg.MODEL.BOXLOSS == 'KL' and False:
        return bboxes, pred_vari
    else:
        return bboxes, None


def torch_nms(cfg, boxes, variance=None):
    def nms_class(clsboxes):
        assert clsboxes.shape[1] == 5 or clsboxes.shape[1] == 9
        keep = []
        while clsboxes.shape[0] > 0:
            maxidx = torch.argmax(clsboxes[:, 4])
            maxbox = clsboxes[maxidx].unsqueeze(0)
            clsboxes = torch.cat((clsboxes[:maxidx], clsboxes[maxidx + 1:]), 0)
            iou = iou_calc3(maxbox[:, :4], clsboxes[:, :4])
            # KL VOTE
            if variance is not None:
                ioumask = iou > 0
                klbox = clsboxes[ioumask]
                klbox = torch.cat((klbox, maxbox), 0)
                kliou = iou[ioumask]
                klvar = klbox[:, -4:]
                pi = torch.exp(-1 * torch.pow((1 - kliou), 2) / cfg.vvsigma)
                pi = torch.cat((pi, torch.ones(1).cuda()), 0).unsqueeze(1)
                pi = pi / klvar
                pi = pi / pi.sum(0)
                maxbox[0, :4] = (pi * klbox[:, :4]).sum(0)
            keep.append(maxbox)

            weight = torch.ones_like(iou)
            if not cfg.BASE.SOFT:
                weight[iou > cfg.BASE.IOU_THRESHOLD] = 0
            else:
                weight = torch.exp(-1.0 * (iou ** 2 / cfg.softsigma))
            clsboxes[:, 4] = clsboxes[:, 4] * weight
            filter_idx = (clsboxes[:, 4] >= cfg.BASE.SCORE_THRESHOLD).nonzero().squeeze(-1)
            clsboxes = clsboxes[filter_idx]
        return torch.cat(keep, 0).to(clsboxes.device)

    bbox = boxes[:, :4].view(-1, 4)
    numcls = boxes.shape[1] - 4
    scores = boxes[:, 4:].view(-1, numcls)
    # Picked bounding boxes
    picked_boxes, picked_score, picked_label = [], [], []
    for i in range(numcls):
        filter_idx = (scores[:, i] >= cfg.BASE.SCORE_THRESHOLD).nonzero().squeeze(-1)
        if len(filter_idx) == 0:
            continue
        filter_boxes = bbox[filter_idx]
        filter_scores = scores[:, i][filter_idx].unsqueeze(1)
        if variance is not None:
            filter_variance = variance[filter_idx]
            clsbox = nms_class(torch.cat((filter_boxes, filter_scores, filter_variance), 1))
        else:
            clsbox = nms_class(torch.cat((filter_boxes, filter_scores), 1))
        if clsbox.shape[0] > 0:
            picked_boxes.append(clsbox[:, :4])
            picked_score.append(clsbox[:, 4])
            picked_label.extend([torch.ByteTensor([i]) for _ in range(len(clsbox))])
    if len(picked_boxes) == 0:
        return None, None, None
    else:
        return torch.cat(picked_boxes), torch.cat(picked_score), torch.cat(picked_label)
