"""
This file contains specific functions for computing losses of FCOS
file
"""

# from numpy.core.numeric import isfortran
# from numpy.lib.ufunclike import isneginf, isposinf
import torch
from torch.functional import unique
from torch.nn import functional as F
import torch.distributed as dist
from torch import ge, nn
import os

from ..utils import permute_to_N_HWA_K
from taa_core.layers import IOULoss
from taa_core.layers import SigmoidFocalLoss
from taa_core.modeling.matcher import Matcher
from taa_core.modeling.utils import cat
from taa_core.structures.bounding_box import BoxList
from taa_core.structures.boxlist_ops import boxlist_iou
from taa_core.structures.boxlist_ops import cat_boxlist
from sklearn.mixture import GaussianMixture
from taa_core.modeling.rpn.mla.match_gt import AnchorMatcher


INF = 100000000


def get_num_gpus():
    return int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1


def reduce_sum(tensor):
    if get_num_gpus() <= 1:
        return tensor
    import torch.distributed as dist
    tensor = tensor.clone()
    dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
    return tensor


class RetinaMLSLossComputation(object):
    """
    This class computes the FCOS losses.
    """

    def __init__(self, cfg, box_coder):
        self.cls_loss_func = SigmoidFocalLoss(
            cfg.MODEL.FCOS.LOSS_GAMMA,
            cfg.MODEL.FCOS.LOSS_ALPHA
        )
        self.fpn_strides = cfg.MODEL.FCOS.FPN_STRIDES
        self.center_sampling_radius = cfg.MODEL.FCOS.CENTER_SAMPLING_RADIUS
        self.iou_loss_type = cfg.MODEL.FCOS.IOU_LOSS_TYPE
        self.norm_reg_targets = cfg.MODEL.FCOS.NORM_REG_TARGETS
        self.topk = cfg.MODEL.MLA.TOPK
        self.rand_match_ratio = cfg.MODEL.MLA.RANDOM_MATCH_RATIO
        self.minimal_loss = cfg.MODEL.MLA.MINIMAL_LOSS

        # we make use of IOU Loss for bounding boxes regression,
        # but we found that L1 in log scale can yield a similar performance
        # self.box_reg_loss_func = IOULoss(self.iou_loss_type)
        self.centerness_loss_func = nn.BCEWithLogitsLoss(reduction="sum")
        self.anchor_matcher = AnchorMatcher(cfg.MODEL.MLA.ANCHOR_MATCH_MODE)
        self.box_coder = box_coder

    def get_offsets(self, anchors, reg_targets):
        xs = (anchors[:, 2] + anchors[:, 0]) / 2
        ys = (anchors[:, 3] + anchors[:, 1]) / 2
        l = xs[:, None] - reg_targets[:, 0][None]
        t = ys[:, None] - reg_targets[:, 1][None]
        r = reg_targets[:, 2][None] - xs[:, None]
        b = reg_targets[:, 3][None] - ys[:, None]
        offsets = torch.stack([l, t, r, b], dim=2)
        return offsets

    def GIoULoss(self, pred, target, anchor, weight=None):
        pred_boxes = self.box_coder.decode(pred.view(-1, 4), anchor.view(-1, 4))
        pred_x1 = pred_boxes[:, 0]
        pred_y1 = pred_boxes[:, 1]
        pred_x2 = pred_boxes[:, 2]
        pred_y2 = pred_boxes[:, 3]
        pred_x2 = torch.max(pred_x1, pred_x2)
        pred_y2 = torch.max(pred_y1, pred_y2)
        pred_area = (pred_x2 - pred_x1) * (pred_y2 - pred_y1)

        gt_boxes = self.box_coder.decode(target.view(-1, 4), anchor.view(-1, 4))
        target_x1 = gt_boxes[:, 0]
        target_y1 = gt_boxes[:, 1]
        target_x2 = gt_boxes[:, 2]
        target_y2 = gt_boxes[:, 3]
        target_area = (target_x2 - target_x1) * (target_y2 - target_y1)

        x1_intersect = torch.max(pred_x1, target_x1)
        y1_intersect = torch.max(pred_y1, target_y1)
        x2_intersect = torch.min(pred_x2, target_x2)
        y2_intersect = torch.min(pred_y2, target_y2)
        area_intersect = torch.zeros(pred_x1.size()).to(pred)
        mask = (y2_intersect > y1_intersect) * (x2_intersect > x1_intersect)
        area_intersect[mask] = (x2_intersect[mask] - x1_intersect[mask]) * (y2_intersect[mask] - y1_intersect[mask])

        x1_enclosing = torch.min(pred_x1, target_x1)
        y1_enclosing = torch.min(pred_y1, target_y1)
        x2_enclosing = torch.max(pred_x2, target_x2)
        y2_enclosing = torch.max(pred_y2, target_y2)
        area_enclosing = (x2_enclosing - x1_enclosing) * (y2_enclosing - y1_enclosing) + 1e-7

        area_union = pred_area + target_area - area_intersect + 1e-7
        ious = area_intersect / area_union
        gious = ious - (area_enclosing - area_union) / area_enclosing

        losses = 1 - gious

        if weight is not None and weight.sum() > 0:
            return (losses * weight).sum()
        else:
            assert losses.numel() != 0
            return losses.sum()

    def match_pred_gt(self, pred_gt_loss, is_foreground, mask, ratio=0.0):
        assert torch.isnan(pred_gt_loss[is_foreground]).sum() == 0, "has nan"
        assert torch.isinf(pred_gt_loss[is_foreground]).sum() == 0, "has inf"
        _, min_gt_inds = pred_gt_loss.min(dim=-1)

        if ratio <= 0.5:
            return min_gt_inds, False
        
        pos_gt_inds, minimal_loss = self.anchor_matcher(pred_gt_loss[is_foreground], mask[is_foreground], ratio)
        minimal_loss = self.minimal_loss and minimal_loss
        # pos_gt_inds = pos_gt_inds[:-1]
        # if (pos_gt_inds.float() == -1).any().item():
        # pos_min_gt_inds = min_gt_inds[is_foreground]
        # pos_gt_inds[pos_gt_inds == -1] = pos_min_gt_inds[pos_gt_inds == -1]

        min_gt_inds[is_foreground] = pos_gt_inds
        return min_gt_inds, minimal_loss

    def compute_mla(self, loss_per_img, min_gt_inds, labels, target_offsets, num_gt, topk=35,
        mini_loss=False
    ):
        # labels = torch.zeros_like(loss_per_img)
        device = loss_per_img.device
        is_foreground, min_gt_inds = min_gt_inds
        labels_per_img = torch.zeros_like(loss_per_img).long()
        reg_target_per_img = torch.zeros_like(target_offsets)

        for gt in range(num_gt):
            matched_idx = torch.nonzero(is_foreground & (min_gt_inds == gt), as_tuple=False)[:, 0]
            if matched_idx.numel() > 0:
                _, topk_idxs = loss_per_img[matched_idx].topk(
                    min(matched_idx.numel(), topk), largest=False)
                topk_idxs_per_gt = matched_idx[topk_idxs]

                if topk_idxs_per_gt.numel() > 1:
                    candidate_loss = loss_per_img[topk_idxs_per_gt]
                    candidate_loss, inds = candidate_loss.sort()
                    candidate_loss = candidate_loss.view(-1, 1).cpu().numpy()
                    min_loss, max_loss = candidate_loss.min(), candidate_loss.max()
                    means_init = [[min_loss], [max_loss]]
                    weights_init = [0.5, 0.5]
                    precisions_init = [[[1.0]], [[1.0]]]
                    gmm = GaussianMixture(2, 
                                    weights_init=weights_init,
                                    means_init=means_init,
                                    precisions_init=precisions_init)
                    gmm.fit(candidate_loss)
                    components = gmm.predict(candidate_loss)
                    scores = gmm.score_samples(candidate_loss)

                    components = torch.from_numpy(components).to(device)
                    scores = torch.from_numpy(scores).to(device)

                    if mini_loss:
                        candidate_loss = loss_per_img[topk_idxs_per_gt]
                        if candidate_loss[components==0].mean() <= candidate_loss[components==1].mean():
                            fgs = components == 0
                            bgs = components == 1
                        else:
                            bgs = components == 0
                            fgs = components == 1
                    else:
                        fgs = components == 0
                        bgs = components == 1

                    if torch.nonzero(fgs, as_tuple=False).numel() > 0:
                        fg_max_score = scores[fgs].max().item()
                        fg_max_idx = torch.nonzero(fgs & (scores == fg_max_score), as_tuple=False).min()
                        is_neg = inds[fgs | bgs]
                        is_pos = inds[:fg_max_idx+1]
                    else:
                        is_pos = inds
                        is_neg = None
                else:
                    is_pos = [0]
                    is_neg = None

                if is_neg is not None:
                    neg_idx = topk_idxs_per_gt[is_neg]
                    labels_per_img[neg_idx] = 0
                pos_idx = topk_idxs_per_gt[is_pos]
                labels_per_img[pos_idx] = labels[gt].view(-1, 1)
                reg_target_per_img[pos_idx, :] = target_offsets[pos_idx, :]

        return labels_per_img, reg_target_per_img
    
    def __call__(self, anchors, box_cls, box_regression, targets):
        num_classes = box_cls[0].size(1)
        device = box_cls[0].device
        box_cls_flatten = [permute_to_N_HWA_K(x, num_classes) for x in box_cls]
        box_regr_flatten = [permute_to_N_HWA_K(x, 4) for x in box_regression]
        
        box_cls_flatten = cat(box_cls_flatten, dim=1)
        box_regr_flatten = cat(box_regr_flatten, dim=1)
        anchors = [cat_boxlist(x).bbox for x in anchors]
        
        labels_all = []
        reg_targets_all = []

        for box_cls_per_img, box_regr_per_img, target_per_img, anchor_per_img in zip(
            box_cls_flatten, box_regr_flatten, targets, anchors
        ):
            labels = target_per_img.get_field('labels')
            gt_boxes = target_per_img.bbox

            foreground_offsets = self.get_offsets(anchor_per_img, gt_boxes)
            is_in_boxes = foreground_offsets.min(dim=-1).values > 0
            pred_is_foreground = is_in_boxes.sum(dim=1) > 0
            target_offsets = self.box_coder.encode(gt_boxes, anchor_per_img)
            
            with torch.no_grad():
                pred_box = self.box_coder.decode(box_regr_per_img, anchor_per_img) 
                pred_gt_ious = boxlist_iou(
                                BoxList(pred_box, target_per_img.size, mode='xyxy'), 
                                target_per_img)
                pred_gt_rloss = 1 - pred_gt_ious
                pred_gt_closs = positive_cls_loss(box_cls_per_img.detach(), labels, self.cls_loss_func)
                pred_gt_loss = pred_gt_closs + pred_gt_rloss
                pred_gt_loss[~is_in_boxes] = INF
                min_gt_inds, mini_loss = self.match_pred_gt(pred_gt_loss, pred_is_foreground, 
                                                            is_in_boxes, self.rand_match_ratio)

                hw_idx = torch.arange(target_offsets.size(0), device=device)
                pred_gt_loss = pred_gt_loss[hw_idx, min_gt_inds]
                target_offsets = target_offsets[hw_idx, min_gt_inds]

                labels_per_img, reg_target_per_img = self.compute_mla(pred_gt_loss, 
                                                        (pred_is_foreground, min_gt_inds), 
                                                        labels, 
                                                        target_offsets,
                                                        len(gt_boxes),
                                                        topk=self.topk,
                                                        mini_loss=mini_loss)
                labels_all.append(labels_per_img)
                reg_targets_all.append(reg_target_per_img)
            

        labels_all = cat(labels_all, dim=0).int()   
        reg_targets_all = cat(reg_targets_all, dim=0)
        anchors = cat(anchors, dim=0)
        
        pos_inds = torch.nonzero(labels_all > 0).squeeze(1)
        box_cls_flatten = box_cls_flatten.reshape(-1, num_classes)
        box_regr_flatten = box_regr_flatten.reshape(-1, 4)
        box_regr_flatten = box_regr_flatten[pos_inds]
        reg_targets_all = reg_targets_all[pos_inds]
        anchors = anchors[pos_inds]

        num_gpus = get_num_gpus()
        total_num_pos = reduce_sum(pos_inds.new_tensor([pos_inds.numel()])).item()
        num_pos_avg_per_gpu = max(total_num_pos / float(num_gpus), 1.0)

        cls_loss = self.cls_loss_func(
            box_cls_flatten, 
            labels_all,
        ) / num_pos_avg_per_gpu

        if pos_inds.numel() > 0:
            reg_loss = self.GIoULoss(
                box_regr_flatten,
                reg_targets_all,
                anchors
            ) / num_pos_avg_per_gpu

        else:
            reg_loss = box_regr_flatten.sum()
        return cls_loss, reg_loss


def positive_cls_loss(logits, labels, cls_loss_func):
    A = logits.size(0)
    
    labels_loss = []
    for label in labels:
        label = label.repeat(A)
        label_loss = cls_loss_func(logits, label.int(), sum=False)
        label_loss = label_loss.sum(dim=1)
        labels_loss.append(label_loss)

    labels_loss = torch.stack(labels_loss, dim=1)
    return labels_loss


def make_retinamls_loss_evaluator(cfg, box_coder):
    loss_evaluator = RetinaMLSLossComputation(cfg, box_coder)
    return loss_evaluator
