"""
This file contains specific functions for computing losses of FCOS
file
"""

from math import log
import torch
from torch import mode, nn
from torch import distributed as dist
from torch._C import device, dtype
from torch.nn import functional as F
import os
from ..utils import concat_box_prediction_layers
from taa_core.layers import IOULoss, smooth_l1_loss
from taa_core.layers import SigmoidFocalLoss
from taa_core.modeling.matcher import Matcher
from taa_core.modeling.utils import cat
from taa_core.structures.bounding_box import BoxList
from taa_core.structures.boxlist_ops import boxlist_iou
from taa_core.structures.boxlist_ops import cat_boxlist


INF = 100000000


def get_num_gpus():
    return int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1


def reduce_sum(tensor):
    if get_num_gpus() <= 1:
        return tensor
    tensor = tensor.clone()
    dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
    return tensor


class FCOSLossComputation(object):
    """
    This class computes the FCOS losses.
    """

    def __init__(self, cfg):
        self.focal_gamma = cfg.MODEL.FCOS.LOSS_GAMMA
        self.focal_alpha = cfg.MODEL.FCOS.LOSS_ALPHA
        self.fpn_strides = cfg.MODEL.FCOS.FPN_STRIDES
        self.center_sampling_radius = cfg.MODEL.FCOS.CENTER_SAMPLING_RADIUS
        self.iou_loss_type = cfg.MODEL.FCOS.IOU_LOSS_TYPE
        self.norm_reg_targets = cfg.MODEL.FCOS.NORM_REG_TARGETS

        self.box_reg_loss_func = IOULoss(self.iou_loss_type)
        self.cls_loss_func  = SigmoidFocalLoss(
            gamma=self.focal_gamma,
            alpha=self.focal_alpha
        )
        self.centerness_loss_func = nn.BCEWithLogitsLoss(reduction="sum")

    def get_sample_region(self, gt, strides, num_points_per, gt_xs, gt_ys, radius=1.0):
        '''
        This code is from
        https://github.com/yqyao/FCOS_PLUS/blob/0d20ba34ccc316650d8c30febb2eb40cb6eaae37/
        maskrcnn_benchmark/modeling/rpn/fcos/loss.py#L42
        '''
        num_gts = gt.shape[0]
        K = len(gt_xs)
        gt = gt[None].expand(K, num_gts, 4)
        center_x = (gt[..., 0] + gt[..., 2]) / 2
        center_y = (gt[..., 1] + gt[..., 3]) / 2
        center_gt = gt.new_zeros(gt.shape)
        # no gt
        if center_x[..., 0].sum() == 0:
            return gt_xs.new_zeros(gt_xs.shape, dtype=torch.uint8)
        beg = 0
        for level, n_p in enumerate(num_points_per):
            end = beg + n_p
            stride = strides[level] * radius
            xmin = center_x[beg:end] - stride
            ymin = center_y[beg:end] - stride
            xmax = center_x[beg:end] + stride
            ymax = center_y[beg:end] + stride
            # limit sample region in gt
            center_gt[beg:end, :, 0] = torch.where(
                xmin > gt[beg:end, :, 0], xmin, gt[beg:end, :, 0]
            )
            center_gt[beg:end, :, 1] = torch.where(
                ymin > gt[beg:end, :, 1], ymin, gt[beg:end, :, 1]
            )
            center_gt[beg:end, :, 2] = torch.where(
                xmax > gt[beg:end, :, 2],
                gt[beg:end, :, 2], xmax
            )
            center_gt[beg:end, :, 3] = torch.where(
                ymax > gt[beg:end, :, 3],
                gt[beg:end, :, 3], ymax
            )
            beg = end
        left = gt_xs[:, None] - center_gt[..., 0]
        right = center_gt[..., 2] - gt_xs[:, None]
        top = gt_ys[:, None] - center_gt[..., 1]
        bottom = center_gt[..., 3] - gt_ys[:, None]
        center_bbox = torch.stack((left, top, right, bottom), -1)
        inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0
        return inside_gt_bbox_mask

    def prepare_targets(self, points, targets):
        object_sizes_of_interest = [
            [-1, 64],
            [64, 128],
            [128, 256],
            [256, 512],
            [512, INF],
        ]
        expanded_object_sizes_of_interest = []
        for l, points_per_level in enumerate(points):
            object_sizes_of_interest_per_level = \
                points_per_level.new_tensor(object_sizes_of_interest[l])
            expanded_object_sizes_of_interest.append(
                object_sizes_of_interest_per_level[None].expand(len(points_per_level), -1)
            )

        expanded_object_sizes_of_interest = torch.cat(expanded_object_sizes_of_interest, dim=0)
        num_points_per_level = [len(points_per_level) for points_per_level in points]
        self.num_points_per_level = num_points_per_level
        points_all_level = torch.cat(points, dim=0)
        labels, reg_targets, gt_inds, total_targets = self.compute_targets_for_locations(
            points_all_level, targets, expanded_object_sizes_of_interest
        )

        for i in range(len(labels)):
            labels[i] = torch.split(labels[i], num_points_per_level, dim=0)
            reg_targets[i] = torch.split(reg_targets[i], num_points_per_level, dim=0)
            gt_inds[i] = torch.split(gt_inds[i], num_points_per_level, dim=0)

        labels_level_first = []
        reg_targets_level_first = []
        gt_inds_level_first = []
        for level in range(len(points)):
            labels_level_first.append(
                torch.cat([labels_per_im[level] for labels_per_im in labels], dim=0)
            )

            reg_targets_per_level = torch.cat([
                reg_targets_per_im[level]
                for reg_targets_per_im in reg_targets
            ], dim=0)

            gt_inds_level_first.append(
                torch.cat([gt_inds_per_im[level] for gt_inds_per_im in gt_inds], dim=0)
            )

            reg_targets_level_first.append(reg_targets_per_level)

        return labels_level_first, reg_targets_level_first, gt_inds_level_first, total_targets

    def compute_targets_for_locations(self, locations, targets, object_sizes_of_interest):
        labels = []
        reg_targets = []
        gt_inds = []
        total_target = 0
        xs, ys = locations[:, 0], locations[:, 1]

        for im_i in range(len(targets)):
            targets_per_im = targets[im_i]
            assert targets_per_im.mode == "xyxy"
            bboxes = targets_per_im.bbox
            labels_per_im = targets_per_im.get_field("labels")
            area = targets_per_im.area()
            labels_length = len(labels_per_im)

            l = xs[:, None] - bboxes[:, 0][None]
            t = ys[:, None] - bboxes[:, 1][None]
            r = bboxes[:, 2][None] - xs[:, None]
            b = bboxes[:, 3][None] - ys[:, None]
            reg_targets_per_im = torch.stack([l, t, r, b], dim=2)

            if self.center_sampling_radius > 0:
                is_in_boxes = self.get_sample_region(
                    bboxes,
                    self.fpn_strides,
                    self.num_points_per_level,
                    xs, ys,
                    radius=self.center_sampling_radius
                )
            else:
                # no center sampling, it will use all the locations within a ground-truth box
                is_in_boxes = reg_targets_per_im.min(dim=2)[0] > 0

            max_reg_targets_per_im = reg_targets_per_im.max(dim=2)[0]
            # limit the regression range for each location
            is_cared_in_the_level = \
                (max_reg_targets_per_im >= object_sizes_of_interest[:, [0]]) & \
                (max_reg_targets_per_im <= object_sizes_of_interest[:, [1]])

            locations_to_gt_area = area[None].repeat(len(locations), 1)
            locations_to_gt_area[is_in_boxes == 0] = INF
            locations_to_gt_area[is_cared_in_the_level == 0] = INF

            # if there are still more than one objects for a location,
            # we choose the one with minimal area
            locations_to_min_area, locations_to_gt_inds = locations_to_gt_area.min(dim=1)

            reg_targets_per_im = reg_targets_per_im[range(len(locations)), locations_to_gt_inds]
            labels_per_im = labels_per_im[locations_to_gt_inds]
            labels_per_im[locations_to_min_area == INF] = 0

            labels.append(labels_per_im)
            reg_targets.append(reg_targets_per_im)

            locations_to_gt_inds += (1 + total_target) # 1-based
            locations_to_gt_inds[locations_to_min_area == INF] = 0
            gt_inds.append(locations_to_gt_inds)
            total_target += labels_length

        return labels, reg_targets, gt_inds, total_target

    def calc_iou(self, preds, targets):
        eps = eps = torch.finfo(torch.float32).eps

        pred_areas = (preds[..., 2] + preds[..., 0]).clamp_(min=0) \
            * (preds[..., 3] + preds[..., 1]).clamp_(min=0)
        
        target_areas = (targets[..., 2] + targets[..., 0]).clamp_(min=0) \
            * (targets[..., 3] + targets[..., 1]).clamp_(min=0)

        w_intersect = (torch.min(preds[..., 2], targets[..., 2]) \
            + torch.min(preds[..., 0], targets[..., 0])).clamp_(min=0)
        h_intersect = (torch.min(preds[..., 3], targets[..., 3]) \
            + torch.min(preds[..., 1], targets[..., 1])).clamp_(min=0)
        
        area_intersect = w_intersect * h_intersect
        area_union = pred_areas + target_areas - area_intersect
        ious = area_intersect / area_union.clamp(min=eps)

        return ious

    def __call__(self, locations, box_cls, box_regression, box_regression_refine, centerness, targets):
        # N = box_cls[0].size(0)
        num_classes = box_cls[0].size(1)
        labels, reg_targets, gt_inds, num_target = self.prepare_targets(locations, targets)

        box_cls_flatten_ = []
        box_regression_flatten = []
        box_regression_refine_flatten = []
        labels_flatten = []
        reg_targets_flatten_ = []
        centerness_flatten = []
        for l in range(len(labels)):
            box_cls_flatten_.append(box_cls[l].permute(0, 2, 3, 1).reshape(-1, num_classes))
            box_regression_flatten.append(box_regression[l].permute(0, 2, 3, 1).reshape(-1, 4))
            box_regression_refine_flatten.append(box_regression_refine[l].permute(0, 2, 3, 1).reshape(-1, 4))
            labels_flatten.append(labels[l].reshape(-1))
            reg_targets_flatten_.append(reg_targets[l].reshape(-1, 4))
            centerness_flatten.append(centerness[l].reshape(-1))

        box_cls_flatten = torch.cat(box_cls_flatten_, dim=0)
        box_regression_flatten = torch.cat(box_regression_flatten, dim=0)
        box_regression_refine_flatten = cat(box_regression_refine_flatten, dim=0)
        centerness_flatten = cat(centerness_flatten, dim=0)
        labels_flatten = torch.cat(labels_flatten, dim=0)
        reg_targets_flatten = torch.cat(reg_targets_flatten_, dim=0)

        return self.loss(
            box_cls=box_cls_flatten,
            box_regression=box_regression_flatten,
            box_regression_refine=box_regression_refine_flatten,
            centerness=centerness_flatten,
            labels=labels_flatten,
            reg_targets=reg_targets_flatten,
        )

    def loss(self, box_cls, box_regression, box_regression_refine, centerness, labels, reg_targets):
        pos_inds = torch.nonzero(labels > 0).squeeze(1)

        num_gpus = get_num_gpus()
        total_num_pos = reduce_sum(pos_inds.new_tensor([pos_inds.numel()])).item()
        num_pos_avg_per_gpu = max(total_num_pos / float(num_gpus), 1.0)

        box_regression = box_regression[pos_inds]
        box_regression_refine = box_regression_refine[pos_inds]
        reg_targets = reg_targets[pos_inds]
        centerness = centerness[pos_inds]

        cls_loss = self.cls_loss_func(
            box_cls,
            labels.int()
        ) / num_pos_avg_per_gpu

        if pos_inds.numel() > 0:
            box_iou = self.calc_iou(
                box_regression,
                reg_targets
            ).clamp(min=1e-6)
            box_iou = box_iou.clone().detach()
            num_avg_box = reduce_sum(box_iou.sum()).item() / float(num_gpus)
            box_loss = self.box_reg_loss_func(
                box_regression,
                reg_targets,
                weight=box_iou
            ) / num_avg_box

            box_iou_rf = self.calc_iou(
                box_regression_refine,
                reg_targets
            ).clamp(min=1e-6)
            box_iou_rf = box_iou_rf.clone().detach()
            num_avg_box_rf = reduce_sum(box_iou_rf.sum()).item() / float(num_gpus)
            box_refine_loss = self.box_reg_loss_func(
                box_regression_refine,
                reg_targets,
                weight=box_iou_rf
            ) / num_avg_box_rf

            ious_loss = self.centerness_loss_func(
                centerness,
                box_iou_rf
            ) / num_pos_avg_per_gpu
        else:
            box_loss = box_regression.sum()
            box_refine_loss = box_regression_refine.sum()
            ious_loss = centerness.sum()
        
        return {
            "loss_cls": cls_loss,
            "loss_box": box_loss,
            "loss_box_refine": box_refine_loss,
            "loss_ious": ious_loss
        }


def make_align_loss_evaluator(cfg):
    loss_evaluator = FCOSLossComputation(cfg)
    return loss_evaluator
