"""
This file contains specific functions for computing losses of FCOS
file
"""

# from numpy.core.numeric import isfortran
# from numpy.lib.ufunclike import isneginf, isposinf
import torch
from torch.nn import functional as F
from torch import nn
import os
from ..utils import permute_to_N_HWA_K
from taa_core.layers import IOULoss
from taa_core.layers import SigmoidFocalLoss
from taa_core.modeling.utils import cat
from taa_core.structures.bounding_box import BoxList
from taa_core.structures.boxlist_ops import boxlist_iou
from sklearn.mixture import GaussianMixture
from .match_gt import AnchorMatcher


INF = 100000000


def get_num_gpus():
    return int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1


def reduce_sum(tensor):
    if get_num_gpus() <= 1:
        return tensor
    import torch.distributed as dist
    tensor = tensor.clone()
    dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
    return tensor


class MLALossComputation(object):
    """
    This class computes the FCOS losses.
    """

    def __init__(self, cfg):
        self.cls_loss_func = SigmoidFocalLoss(
            cfg.MODEL.FCOS.LOSS_GAMMA,
            cfg.MODEL.FCOS.LOSS_ALPHA
        )
        self.focal_gamma = cfg.MODEL.FCOS.LOSS_GAMMA
        self.focal_alpha = cfg.MODEL.FCOS.LOSS_ALPHA
        self.fpn_strides = cfg.MODEL.FCOS.FPN_STRIDES
        self.center_sampling_radius = cfg.MODEL.FCOS.CENTER_SAMPLING_RADIUS
        self.iou_loss_type = cfg.MODEL.FCOS.IOU_LOSS_TYPE
        self.topk = cfg.MODEL.MLA.TOPK
        self.norm_reg_targets = cfg.MODEL.FCOS.NORM_REG_TARGETS

        # we make use of IOU Loss for bounding boxes regression,
        # but we found that L1 in log scale can yield a similar performance
        self.box_reg_loss_func = IOULoss(self.iou_loss_type)
        self.centerness_loss_func = nn.BCEWithLogitsLoss(reduction="none")
        self.anchor_matcher = AnchorMatcher()

    def get_offsets(self, location, gt):
        xs, ys = location[:, 0], location[:, 1]
        l = xs[:, None] - gt[:, 0][None]
        t = ys[:, None] - gt[:, 1][None]
        r = gt[:, 2][None] - xs[:, None]
        b = gt[:, 3][None] - ys[:, None]
        offsets = torch.stack([l, t, r, b], dim=2)
        return offsets

    def encode(self, locations, preds):
        assert torch.isfinite(preds).all().item()
        locations = locations.to(preds.device)

        if preds.numel() == 0:
            return torch.empty_like(preds)
        
        preds = preds.view(preds.size()[:-1] + (-1, 4))
        boxes = torch.cat((locations.unsqueeze(-2) - preds[..., :2], 
                           locations.unsqueeze(-2) + preds[..., 2:]),
                        dim=-1).view(preds.size()[:-2] + (-1, ))

        return boxes

    def decode(self, locations, gt):
        xs, ys = locations[:, 0], locations[:, 1]
        l = xs - gt[0]
        t = ys - gt[1]
        r = gt[2] - xs
        b = gt[3] - ys
        return torch.stack([l,t,r,b], dim=1)

    def match_pred_gt(self, pred_gt_loss, is_foreground, mask):
        # import time
        _, min_gt_inds = pred_gt_loss.min(dim=-1)
        
        # print(pred_gt_loss[is_foreground][:50])
        # print(mask[is_foreground][:50]);exit()
        
        # s = time.time()
        pos_gt_inds = self.anchor_matcher(pred_gt_loss[is_foreground], mask[is_foreground], 0.75)
        assert (pos_gt_inds == -1).sum() == 0
        # print(min_gt_inds)
        # print(pos_gt_inds)
        min_gt_inds[is_foreground] = pos_gt_inds
        # print(time.time() - s)
        # exit()
        return min_gt_inds

    def compute_mla(self, pred_gt_losses, is_in_boxes, num_gt):
        # labels = torch.zeros_like(loss_per_img)
        device = pred_gt_losses.device
        # print(pred_gt_losses.shape)
        gt_pred_losses = pred_gt_losses.permute(1, 0).contiguous()
        is_in_boxes = is_in_boxes.permute(1, 0).contiguous()
        gt_pred_flags = torch.zeros_like(gt_pred_losses).bool()

        for gt_idx, (gt_pred_loss, is_in_box) in enumerate(zip(gt_pred_losses, is_in_boxes)):
            # print(is_in_box.shape)
            # print(torch.nonzero(is_in_box))
            # print((torch.nonzero(is_in_box).flatten() == torch.nonzero(is_in_box)[:, 0]).all());exit()
            matched_idx = torch.nonzero(is_in_box).flatten()
            # print((gt_pred_loss[matched_idx] == gt_pred_loss[is_in_box]).all())
            # print((gt_pred_loss[matched_idx] != INF).all())
            # exit()
            if matched_idx.numel() > 0:
                _, topk_idxs = gt_pred_loss[matched_idx].topk(
                    min(matched_idx.numel(), self.topk), largest=False)
                topk_idxs_per_gt = matched_idx[topk_idxs]

                if topk_idxs_per_gt.numel() > 1:
                    candidate_loss = gt_pred_loss[topk_idxs_per_gt]
                    candidate_loss, inds = candidate_loss.sort()
                    candidate_loss = candidate_loss.view(-1, 1).cpu().numpy()
                    min_loss, max_loss = candidate_loss.min(), candidate_loss.max()
                    means_init = [[min_loss], [max_loss]]
                    weights_init = [0.5, 0.5]
                    precisions_init = [[[1.0]], [[1.0]]]
                    gmm = GaussianMixture(2, 
                                    weights_init=weights_init,
                                    means_init=means_init,
                                    precisions_init=precisions_init)
                    gmm.fit(candidate_loss)
                    components = gmm.predict(candidate_loss)
                    scores = gmm.score_samples(candidate_loss)

                    components = torch.from_numpy(components).to(device)
                    scores = torch.from_numpy(scores).to(device)
                    fgs = components == 0
                    bgs = components == 1
                    # print(torch.nonzero(fgs, as_tuple=False).numel() == fgs.nonzero().numel(), 'test fgs')
                    if torch.nonzero(fgs, as_tuple=False).numel() > 0:
                        fg_max_score = scores[fgs].max().item()
                        # print((fgs & (scores == fg_max_score)).nonzero().min() == torch.nonzero(fgs & (scores == fg_max_score)).min())
                        fg_max_idx = torch.nonzero(fgs & (scores == fg_max_score), as_tuple=False).min()
                        is_neg = inds[fgs | bgs]
                        is_pos = inds[:fg_max_idx+1]
                    else:
                        is_pos = inds
                        is_neg = None
                else:
                    is_pos = [0]
                    is_neg = None

                if is_neg is not None:
                    neg_idx = topk_idxs_per_gt[is_neg]
                    gt_pred_flags[gt_idx, neg_idx] = False
                #     print(gt_pred_flags[gt_idx].sum(), gt_idx)
                # print(gt_pred_flags[gt_idx].shape)
                pos_idx = topk_idxs_per_gt[is_pos]
                gt_pred_flags[gt_idx, pos_idx] = True
                # print(gt_pred_flags[gt_idx, pos_idx].sum() == pos_idx.numel(), gt_idx)
        return gt_pred_flags

    def __call__(self, locations, box_cls, box_regression, box_regression_refine, centerness, targets):
        num_classes = box_cls[0].size(1)
        device = box_cls[0].device
        num_points_per_level = [len(_) for _ in locations]
        box_cls_flatten = [permute_to_N_HWA_K(x, num_classes) for x in box_cls]
        box_regr_flatten = [permute_to_N_HWA_K(x, 4) for x in box_regression]
        box_regr_refine_flatten = [permute_to_N_HWA_K(x, 4) for x in box_regression_refine]
        centerness_flatten = [permute_to_N_HWA_K(x, 1) for x in centerness]

        box_cls_flatten = cat(box_cls_flatten, dim=1)
        box_regr_flatten = cat(box_regr_flatten, dim=1)
        box_regr_refine_flatten = cat(box_regr_refine_flatten, dim=1)
        centerness_flatten = cat(centerness_flatten, dim=1)
        locations = cat(locations, dim=0)

        num_pos = 0
        pos_cls_losses = []
        pos_reg_losses = []
        pos_reg_rf_losses = []
        neg_losses = []
        centerness_losses = []
        ious_avg = []

        for box_cls_per_img, box_regr_per_img, box_regr_rf_per_img, centerness_per_img, target_per_img in zip(
            box_cls_flatten, box_regr_flatten, box_regr_refine_flatten, centerness_flatten, targets
        ):
            labels = target_per_img.get_field('labels')
            gt_boxes = target_per_img.bbox
            # print(F"label length: {len(labels)}")
            mask = torch.zeros_like(box_cls_per_img).detach().bool()

            # gt_boxes = self.gt_sampling(locations, gt_boxes)
            target_offsets = self.get_offsets(locations, gt_boxes)
            is_in_boxes = target_offsets.min(dim=-1).values > 0
            pred_is_foreground = is_in_boxes.sum(dim=1) > 0
            # print(target_offsets.shape)

            with torch.no_grad():
                pred_box = self.encode(locations, box_regr_per_img.detach())
                pred_gt_ious = boxlist_iou(
                                BoxList(pred_box, target_per_img.size, mode='xyxy'), 
                                target_per_img)
                pred_gt_rloss = 1 - pred_gt_ious
                pred_gt_closs = positive_cls_loss(box_cls_per_img.detach(), labels, self.cls_loss_func)
                pred_gt_loss = pred_gt_closs + pred_gt_rloss
                # [A, G] -> [A, min(G)]
                pred_gt_loss[~is_in_boxes] = INF
                # pred_gt_loss, min_gt_inds = pred_gt_loss.min(dim=-1)
                # min_gt_inds = self.match_pred_gt(pred_gt_loss, pred_is_foreground, is_in_boxes)

                # hw_idx = torch.arange(target_offsets.size(0), device=device)
                # pred_gt_loss = pred_gt_loss[hw_idx, min_gt_inds]
                # target_offsets = target_offsets[hw_idx, min_gt_inds]

                gt_pred_flags = self.compute_mla(pred_gt_loss, 
                                                        is_in_boxes, 
                                                        len(gt_boxes))  
                # labels_all.append(labels_per_img)
                # reg_targets_all.append(reg_target_per_img)

            box_cls_per_img = box_cls_per_img.sigmoid()
            gt_pred_ious = pred_gt_ious.permute(1, 0).clamp(min=1e-6)
            
            for idx, gt_pred_flag in enumerate(gt_pred_flags):
                if gt_pred_flag.sum() > 0:
                    num_pos += gt_pred_flag.sum().item()
                    box_cls_per_gt = box_cls_per_img[gt_pred_flag, labels[idx]-1]
                    pos_cls_losses.append(positive_bag_loss(box_cls_per_gt, gamma=self.focal_gamma))
                    box_reg_per_gt = box_regr_per_img[gt_pred_flag, :]
                    reg_target_per_gt = target_offsets[gt_pred_flag, idx]
                    pos_reg_losses.append(self.box_reg_loss_func(
                                            box_reg_per_gt,
                                            reg_target_per_gt,
                                            sum=False))
                    box_reg_rf_per_gt = box_regr_rf_per_img[gt_pred_flag, :]
                    pos_reg_rf_losses.append(self.box_reg_loss_func(
                        box_reg_rf_per_gt,
                        reg_target_per_gt,
                        sum = False))
                    centerness_losses.append(self.centerness_loss_func(
                        centerness_per_img.reshape(-1)[gt_pred_flag],
                        gt_pred_ious[idx, gt_pred_flag]))
                    
                    mask[gt_pred_flag, labels[idx]-1] = True 
                else:
                    pos_cls_losses.append(box_cls_per_img.new_zeros(1))
                    pos_reg_losses.append(box_cls_per_img.new_zeros(1))
                # masks.append(mask)
            
            neg_losses.append(negative_bag_loss(box_cls_per_img, mask, gamma=self.focal_gamma))
            
        num_gpus = get_num_gpus()
        total_num_pos = reduce_sum(box_cls_flatten.new_tensor([num_pos])).item()
        # num_avg_box_rf = reduce_sum(torch.stack(ious_avg).sum().clamp(min=1e-6)).item() / float(num_gpus)
        num_pos_avg_per_gpu = max(total_num_pos / float(num_gpus), 1.0)

        neg_cls_loss = torch.cat(neg_losses).sum()
        pos_cls_loss = torch.cat(pos_cls_losses).sum()
        reg_loss = torch.cat(pos_reg_losses).sum() / num_pos_avg_per_gpu
        reg_rf_loss = torch.cat(pos_reg_rf_losses).sum() / num_pos_avg_per_gpu
        cen_loss = torch.cat(centerness_losses).sum() / num_pos_avg_per_gpu
            
        cls_loss = (pos_cls_loss * self.focal_alpha + neg_cls_loss * (1-self.focal_alpha)) / num_pos_avg_per_gpu

        return {
            "loss_cls": cls_loss,
            "loss_box": reg_loss,
            "loss_box_refine": reg_rf_loss,
            "loss_ious": cen_loss
        }


def positive_bag_loss(logits, gamma):
    # bag_prob = Mean-max(logits)
    # assert False
    return (1-logits)**gamma * F.binary_cross_entropy(logits,
                                  torch.ones_like(logits),
                                  reduction='none')

def negative_bag_loss(logits, mask, gamma):
    return logits**gamma * F.binary_cross_entropy(
        logits, torch.zeros_like(logits), reduction='none') * (~mask).float()


def positive_cls_loss(logits, labels, cls_loss_func):
    # return (1-logits)**gamma * F.binary_cross_entropy(logits, torch.ones_like(logits), reduction='none')
    A = logits.size(0)
    
    labels_loss = []
    for label in labels:
        label = label.repeat(A)
        label_loss = cls_loss_func(logits, label.int(), sum=False)
        label_loss = label_loss.sum(dim=1)
        labels_loss.append(label_loss)

    labels_loss = torch.stack(labels_loss, dim=1)
    return labels_loss


def make_mla_loss_evaluator(cfg):
    loss_evaluator = MLALossComputation(cfg)
    return loss_evaluator
