"""
This file contains specific functions for computing losses of FCOS
file
"""

# from numpy.core.numeric import isfortran
# from numpy.lib.ufunclike import isneginf, isposinf
import torch
from torch.nn import functional as F
from torch import ge, nn
import os
from ..utils import permute_to_N_HWA_K
from taa_core.layers import IOULoss
from taa_core.layers import SigmoidFocalLoss
from taa_core.modeling.matcher import Matcher
from taa_core.modeling.utils import cat
from taa_core.structures.bounding_box import BoxList
from taa_core.structures.boxlist_ops import boxlist_iou
# from taa_core.structures.boxlist_ops import cat_boxlist
from sklearn.mixture import GaussianMixture
from .match_gt import AnchorMatcher


INF = 100000000


def get_num_gpus():
    return int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1


def reduce_sum(tensor):
    if get_num_gpus() <= 1:
        return tensor
    import torch.distributed as dist
    tensor = tensor.clone()
    dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
    return tensor


class MLALossComputation(object):
    """
    This class computes the FCOS losses.
    """

    def __init__(self, cfg):
        self.cls_loss_func = SigmoidFocalLoss(
            cfg.MODEL.FCOS.LOSS_GAMMA,
            cfg.MODEL.FCOS.LOSS_ALPHA
        )
        self.fpn_strides = cfg.MODEL.FCOS.FPN_STRIDES
        self.center_sampling_radius = cfg.MODEL.FCOS.CENTER_SAMPLING_RADIUS
        self.iou_loss_type = cfg.MODEL.FCOS.IOU_LOSS_TYPE
        self.norm_reg_targets = cfg.MODEL.FCOS.NORM_REG_TARGETS
        self.topk = cfg.MODEL.MLA.TOPK
        self.rand_match_ratio = cfg.MODEL.MLA.RANDOM_MATCH_RATIO

        # we make use of IOU Loss for bounding boxes regression,
        # but we found that L1 in log scale can yield a similar performance
        self.box_reg_loss_func = IOULoss(self.iou_loss_type)
        self.centerness_loss_func = nn.BCEWithLogitsLoss(reduction="sum")
        self.anchor_matcher = AnchorMatcher()
        if os.path.exists("reg_targets.txt"):
            os.unlink("reg_targets.txt")
        self.fp = open("reg_targets.txt", "w+")

    def get_offsets(self, location, gt):
        xs, ys = location[:, 0], location[:, 1]
        l = xs[:, None] - gt[:, 0][None]
        t = ys[:, None] - gt[:, 1][None]
        r = gt[:, 2][None] - xs[:, None]
        b = gt[:, 3][None] - ys[:, None]
        offsets = torch.stack([l, t, r, b], dim=2)
        return offsets

    def encode(self, locations, preds):
        assert torch.isfinite(preds).all().item()
        locations = locations.to(preds.device)

        if preds.numel() == 0:
            return torch.empty_like(preds)
        
        preds = preds.view(preds.size()[:-1] + (-1, 4))
        boxes = torch.cat((locations.unsqueeze(-2) - preds[..., :2], 
                           locations.unsqueeze(-2) + preds[..., 2:]),
                        dim=-1).view(preds.size()[:-2] + (-1, ))

        return boxes

    def decode(self, locations, gt):
        xs, ys = locations[:, 0], locations[:, 1]
        l = xs - gt[0]
        t = ys - gt[1]
        r = gt[2] - xs
        b = gt[3] - ys
        return torch.stack([l,t,r,b], dim=1)

    def match_pred_gt(self, pred_gt_loss, is_foreground, mask, ratio=0.0):
        assert torch.isnan(pred_gt_loss[is_foreground]).sum() == 0, "has nan"
        assert torch.isinf(pred_gt_loss[is_foreground]).sum() == 0, "has inf"
        _, min_gt_inds = pred_gt_loss.min(dim=-1)

        if ratio == 0.0:
            return min_gt_inds
        
        pos_gt_inds = self.anchor_matcher(pred_gt_loss[is_foreground], mask[is_foreground], ratio)
        # if (pos_gt_inds.float() == -1).any().item():
        pos_min_gt_inds = min_gt_inds[is_foreground]
        pos_gt_inds[pos_gt_inds == -1] = pos_min_gt_inds[pos_gt_inds == -1]

        min_gt_inds[is_foreground] = pos_gt_inds
        return min_gt_inds

    def compute_mla(self, loss_per_img, min_gt_inds, labels, target_offsets, num_gt, topk=35, gt_boxes=None, locations=None, preds=None,pred=None):
        # labels = torch.zeros_like(loss_per_img)
        device = loss_per_img.device
        is_foreground, min_gt_inds = min_gt_inds
        labels_per_img = torch.zeros_like(loss_per_img).long()
        reg_target_per_img = torch.zeros_like(target_offsets)

        for gt in range(num_gt):
            matched_idx = torch.nonzero(is_foreground & (min_gt_inds == gt), as_tuple=False)[:, 0]
            # print((matched_idx == (is_foreground & (min_gt_inds == gt)).nonzero()[:, 0]).all())
            if matched_idx.numel() > 0:
                _, topk_idxs = loss_per_img[matched_idx].topk(
                    min(matched_idx.numel(), topk), largest=False)
                topk_idxs_per_gt = matched_idx[topk_idxs]

                if topk_idxs_per_gt.numel() > 1:
                    candidate_loss = loss_per_img[topk_idxs_per_gt]
                    candidate_loss, inds = candidate_loss.sort()
                    candidate_loss = candidate_loss.view(-1, 1).cpu().numpy()
                    min_loss, max_loss = candidate_loss.min(), candidate_loss.max()
                    means_init = [[min_loss], [max_loss]]
                    weights_init = [0.5, 0.5]
                    precisions_init = [[[1.0]], [[1.0]]]
                    gmm = GaussianMixture(2, 
                                    weights_init=weights_init,
                                    means_init=means_init,
                                    precisions_init=precisions_init)
                    gmm.fit(candidate_loss)
                    components = gmm.predict(candidate_loss)
                    scores = gmm.score_samples(candidate_loss)

                    components = torch.from_numpy(components).to(device)
                    scores = torch.from_numpy(scores).to(device)
                    fgs = components == 0
                    bgs = components == 1
                    # print(torch.nonzero(fgs, as_tuple=False).numel() == fgs.nonzero().numel(), 'test fgs')
                    if torch.nonzero(fgs, as_tuple=False).numel() > 0:
                        fg_max_score = scores[fgs].max().item()
                        # print((fgs & (scores == fg_max_score)).nonzero().min() == torch.nonzero(fgs & (scores == fg_max_score)).min())
                        fg_max_idx = torch.nonzero(fgs & (scores == fg_max_score), as_tuple=False).min()
                        is_neg = inds[fgs | bgs]
                        is_pos = inds[:fg_max_idx+1]
                    else:
                        is_pos = inds
                        is_neg = None
                else:
                    is_pos = [0]
                    is_neg = None

                if is_neg is not None:
                    neg_idx = topk_idxs_per_gt[is_neg]
                    labels_per_img[neg_idx] = 0
                    # print(F"neg loss: {loss_per_img[neg_idx]}")
                    # print(F"neg iou: {test[0][neg_idx]}")
                    # print(F"neg closs: {test[1][neg_idx]}")
                pos_idx = topk_idxs_per_gt[is_pos]
                labels_per_img[pos_idx] = labels[gt].view(-1, 1)
                reg_target_per_img[pos_idx, :] = target_offsets[pos_idx, :]

                # targets = self.encode(locations, target_offsets)
                # print(pos_idx)
                # print(min_gt_inds.shape)
                # print(preds[pos_idx, :])
                # print(pred[pos_idx, :])
                # print(pred[is_foreground])
                # print(locations.shape)
                # exit()
                k = 15
                _, topk_pos_idx = pred[pos_idx].topk(min(k, len(pos_idx)))
                pos_idx = pos_idx[topk_pos_idx]
                _, topk_neg_idx = pred[neg_idx].topk(min(k, len(neg_idx)), largest=False)
                neg_idx = neg_idx[topk_neg_idx]
                fp.write(f"pos {gt}: {preds[pos_idx, :].cpu().numpy().tolist()}\n")
                if is_neg is not None:
                    fp.write(f"neg {gt}: {preds[neg_idx, :].cpu().numpy().tolist()}\n")
                if not (self.decode(locations[pos_idx], gt_boxes[gt]) == target_offsets[pos_idx]).all():
                    print("target_offsets are not align")
                    exit(1)
        # exit()
        return labels_per_img, reg_target_per_img

    def calc_iou(self, pred, target):
        pred_left = pred[:, 0]
        pred_top = pred[:, 1]
        pred_right = pred[:, 2]
        pred_bottom = pred[:, 3]

        target_left = target[:, 0]
        target_top = target[:, 1]
        target_right = target[:, 2]
        target_bottom = target[:, 3]

        target_area = (target_left + target_right) * \
                      (target_top + target_bottom)
        pred_area = (pred_left + pred_right) * \
                    (pred_top + pred_bottom)

        w_intersect = torch.min(pred_left, target_left) + torch.min(pred_right, target_right)
        h_intersect = torch.min(pred_bottom, target_bottom) + torch.min(pred_top, target_top)
        area_intersect = w_intersect * h_intersect
        area_union = target_area + pred_area - area_intersect
        ious = (area_intersect + 1.0) / (area_union + 1.0)

        return ious

    def __call__(self, locations, box_cls, box_regression, box_regression_refine, centerness, targets):
        assert (box_regression[0] == box_regression_refine[0]).all().sum() == 0
        num_classes = box_cls[0].size(1)
        device = box_cls[0].device
        
        box_cls_flatten = [permute_to_N_HWA_K(x, num_classes) for x in box_cls]
        box_regr_flatten = [permute_to_N_HWA_K(x, 4) for x in box_regression]
        box_regr_refine_flatten = [permute_to_N_HWA_K(x, 4) for x in box_regression_refine]
        centerness_flatten = [permute_to_N_HWA_K(x, 1) for x in centerness]

        box_cls_flatten = cat(box_cls_flatten, dim=1)
        box_regr_flatten = cat(box_regr_flatten, dim=1)
        box_regr_refine_flatten = cat(box_regr_refine_flatten, dim=1)
        centerness_flatten = cat(centerness_flatten, dim=1)
        locations = cat(locations, dim=0)

        labels_all = []
        reg_targets_all = []
        # total_gts = 0
        
        for box_cls_per_img, box_regr_per_img, target_per_img in zip(
            box_cls_flatten, box_regr_flatten, targets
        ):
            labels = target_per_img.get_field('labels')
            gt_boxes = target_per_img.bbox
            # print(F"label length: {len(labels)}")

            target_offsets = self.get_offsets(locations, gt_boxes)
            is_in_boxes = target_offsets.min(dim=-1).values > 0
            pred_is_foreground = is_in_boxes.sum(dim=1) > 0
            
            with torch.no_grad():
                hw_idx = torch.arange(target_offsets.size(0), device=device)
                pred_box = self.encode(locations, box_regr_per_img.detach())
                pred_gt_ious = boxlist_iou(
                                BoxList(pred_box, target_per_img.size, mode='xyxy'), 
                                target_per_img,
                                box_type="giou")
                pred_gt_rloss = 1 - pred_gt_ious
                pred_gt_closs = positive_cls_loss(box_cls_per_img.detach(), labels, self.cls_loss_func)
                pred_gt_loss = pred_gt_closs + pred_gt_rloss

                # [A, G] -> [A, min(G)]
                pred_gt_loss[~is_in_boxes] = INF
                min_gt_inds = self.match_pred_gt(pred_gt_loss, pred_is_foreground, is_in_boxes, ratio=self.rand_match_ratio)
                pred_gt_loss = pred_gt_loss[hw_idx, min_gt_inds]

                target_offsets = target_offsets[hw_idx, min_gt_inds]
                
                labels_per_img, reg_target_per_img = self.compute_mla(pred_gt_loss, 
                                                        (pred_is_foreground, min_gt_inds), 
                                                        labels, 
                                                        target_offsets,
                                                        len(gt_boxes),
                                                        topk=self.topk,
                                                        gt_boxes=gt_boxes,
                                                        locations=locations)
                                                        # preds=pred_box,
                                                        # pred=pred_gt_ious.clamp_min(0.0)[hw_idx, min_gt_inds])  
        #         labels_all.append(labels_per_img)
        #         reg_targets_all.append(reg_target_per_img)

        # labels_all = cat(labels_all, dim=0).int()   
        # reg_targets_all = cat(reg_targets_all, dim=0)

        # box_cls_flatten = box_cls_flatten.reshape(-1, num_classes)
        # box_regr_flatten = box_regr_flatten.reshape(-1, 4)
        # box_regr_refine_flatten = box_regr_refine_flatten.reshape(-1, 4)
        # centerness_flatten = centerness_flatten.reshape(-1)

        # return self.loss(
        #     box_cls=box_cls_flatten,
        #     box_regression=box_regr_flatten,
        #     box_regression_refine=box_regr_refine_flatten,
        #     centerness=centerness_flatten,
        #     labels=labels_all,
        #     reg_targets=reg_targets_all
        # )

    def loss(self, box_cls, box_regression, box_regression_refine, centerness, labels, reg_targets):
        pos_inds = torch.nonzero(labels > 0).squeeze(1)

        num_gpus = get_num_gpus()
        total_num_pos = reduce_sum(pos_inds.new_tensor([pos_inds.numel()])).item()
        num_pos_avg_per_gpu = max(total_num_pos / float(num_gpus), 1.0)

        box_regression = box_regression[pos_inds]
        box_regression_refine = box_regression_refine[pos_inds]
        reg_targets = reg_targets[pos_inds]
        centerness = centerness[pos_inds]

        cls_loss = self.cls_loss_func(
            box_cls,
            labels.int()
        ) / num_pos_avg_per_gpu

        if pos_inds.numel() > 0:
            box_iou = self.calc_iou(
                box_regression,
                reg_targets
            ).clamp(min=1e-6)
            box_iou = box_iou.clone().detach()
            num_avg_box = reduce_sum(box_iou.sum()).item() / float(num_gpus)
            box_loss = self.box_reg_loss_func(
                box_regression,
                reg_targets,
                weight=box_iou
            ) / num_avg_box

            box_iou_rf = self.calc_iou(
                box_regression_refine,
                reg_targets
            ).clamp(min=1e-6)
            box_iou_rf = box_iou_rf.clone().detach()
            num_avg_box_rf = reduce_sum(box_iou_rf.sum()).item() / float(num_gpus)
            box_refine_loss = self.box_reg_loss_func(
                box_regression_refine,
                reg_targets,
                weight=box_iou_rf
            ) / num_avg_box_rf

            ious_loss = self.centerness_loss_func(
                centerness,
                box_iou_rf
            ) / num_pos_avg_per_gpu
        else:
            box_loss = box_regression.sum()
            box_refine_loss = box_regression_refine.sum()
            ious_loss = centerness.sum()
        
        return {
            "loss_cls": cls_loss,
            "loss_box": box_loss,
            "loss_box_refine": box_refine_loss,
            "loss_ious": ious_loss
        }


def positive_cls_loss(logits, labels, cls_loss_func):
    # return (1-logits)**gamma * F.binary_cross_entropy(logits, torch.ones_like(logits), reduction='none')
    A = logits.size(0)
    
    labels_loss = []
    for label in labels:
        label = label.repeat(A)
        label_loss = cls_loss_func(logits, label.int(), sum=False)
        label_loss = label_loss.sum(dim=1)
        labels_loss.append(label_loss)

    labels_loss = torch.stack(labels_loss, dim=1)
    return labels_loss


def make_mla_loss_evaluator(cfg):
    loss_evaluator = MLALossComputation(cfg)
    return loss_evaluator
