from scipy.optimize import linear_sum_assignment

import torch
from torch import nn
import torch.nn.functional as F

from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
from util.misc import (nested_tensor_from_tensor_list,
                       accuracy, get_world_size, interpolate, focal_accuracy,
                       is_dist_avail_and_initialized)
import torch.distributed as dist
import copy
import numpy as np

class SetCriterionHOI(nn.Module):

    def __init__(self, args, matcher, weight_dict, losses, loss_type='ce_loss'):
        super().__init__()
        self.args = args
        self.matcher = matcher              #   对预测与GT进行匹配的算法
        self.losses = losses                #   指定需要计算哪些loss('labels','boxes','cardinality','masks')
        self.num_obj_classes = args.num_obj_classes  #   类别数，不包含背景
        self.num_queries = args.num_queries
        self.eos_coef = args.eos_coef        #   针对背景分类的loss权重
        assert args.verb_loss_type == 'bce' or args.verb_loss_type == 'focal'
        self.verb_loss_type = 'focal'
        
        self.num_verb_classes = args.num_verb_classes
        self.alpha = args.focal_alpha
        empty_weight = torch.ones(self.num_obj_classes + 1, device = torch.device(dist.get_rank())) \
                        if  loss_type=='ce_loss' else torch.ones(self.num_obj_classes, device = torch.device(dist.get_rank()))
        empty_weight[-1] = self.eos_coef
        self.register_buffer('empty_weight', empty_weight)
        self.weight_dict = weight_dict
        self.loss_type = loss_type
        self.eff_specific_head = args.eff_specific_head
        
        self.obj_alpha = 0.25
        self.obj_gamma = 2
        self.verb_alpha = 0.5
        self.verb_gamma = 2
        self.neg_inds_beta = 0
        self.hard_neg_inds_beta = 3
        self.k_one2many = args.k_one2many

    def forward(self, outputs, targets):
        if self.k_one2many !=1:
            # repeat the targets
            for target in targets:
                target["obj_boxes"]   = target["obj_boxes"].repeat(self.k_one2many, 1)
                target["obj_labels"]  = target["obj_labels"].repeat(self.k_one2many)
                target["sub_boxes"]   = target["sub_boxes"].repeat(self.k_one2many, 1)
                target["verb_labels"] = target["verb_labels"].repeat(self.k_one2many, 1)

        outputs_without_aux = {k: v for k, v in outputs.items() if k not in ['aux_outputs', 'enc_outputs']}
        # Retrieve the matching between the outputs of the last layer and the targets
        indices = self.matcher(outputs_without_aux, targets)

        num_interactions = sum(len(t['obj_labels']) for t in targets)

        num_interactions = torch.as_tensor([num_interactions], dtype=torch.float, device=next(iter(outputs.values())).device)
        if is_dist_avail_and_initialized() and num_interactions.device.type != 'cpu':
            torch.distributed.all_reduce(num_interactions)
        num_interactions = torch.clamp(num_interactions / get_world_size(), min=1).item()

        # Compute all the requested losses
        losses = {}
        for loss in self.losses:
            losses.update(self.get_loss(loss, outputs, targets, indices, num_interactions))

        # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
        if 'aux_outputs' in outputs:
            losses.update(self.aux_outputs_loss(outputs, targets, num_interactions))

        if 'enc_outputs' in outputs:
            losses.update(self.enc_outputs_loss(outputs, targets, num_interactions))

        if 'backbone_outputs' in outputs:
            losses.update(self.backbone_outputs_loss(outputs, targets, num_interactions))
                
        if 'aux_outputs_enc' in outputs:
            losses.update(self.aux_outputs_enc_loss(outputs, targets, num_interactions))

        return losses

    def get_loss(self, loss, outputs, targets, indices, num, **kwargs):
        loss_map = {
            'obj_labels': self.loss_obj_labels,             
            'obj_cardinality': self.loss_obj_cardinality,
            'verb_labels': self.loss_verb_labels,           
            'sub_obj_boxes': self.loss_sub_obj_boxes,
            'matching_labels': self.loss_matching_labels,    #new
            
            'masks': self.loss_masks,
            "mask_prediction": self.loss_mask_prediction,
            "corr": self.corr,
        }
        assert loss in loss_map, f'do you really want to compute {loss} loss?'
        return loss_map[loss](outputs, targets, indices, num, **kwargs)


    def loss_obj_labels(self, outputs, targets, indices, num_interactions, log=True, alpha=0.5, gamma=2, neg_inds_beta=0, hard_neg_inds_beta=3):
        assert 'pred_obj_logits' in outputs
        #   8, 100, 80
        src_logits = outputs['pred_obj_logits']
        target_classes_o = torch.cat([t['obj_labels'][J] for t, (_, J) in zip(targets, indices)])
        idx = self._get_src_permutation_idx(indices)
        
        #   (kh*N,)
        target_classes = torch.full(src_logits.shape[:2], self.num_obj_classes, dtype=torch.int64, device=target_classes_o.device)
        #   8, 100   =  (kh*N,)
        target_classes[idx] = target_classes_o
#########################  obj_reweight  #####################################################
        obj_weights = self.obj_reweight_func(target_classes)
#########################  obj_reweight  #####################################################
        #if self.verb_loss_type == 'bce':
        if self.loss_type=='ce_loss':
            loss_obj_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, obj_weights, reduction='mean')
            #loss_obj_ce = self.cross_entropy(src_logits, target_classes, obj_weights, reduction='mean')
        elif self.loss_type=='focal_loss':
            target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2] + 1],
                                                dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device)
            target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)
            target_classes_onehot = target_classes_onehot[:,:,:-1]
            loss_obj_ce = self.sigmoid_focal_loss(src_logits, target_classes_onehot, num_interactions, alpha=self.alpha, gamma=gamma) * src_logits.shape[1]
                    
        losses = {'loss_obj_ce': loss_obj_ce}
        if log:
            losses['obj_class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
        return losses


    def loss_matching_labels(self, outputs, targets, indices, num_interactions, log=True):
        assert 'pred_matching_logits' in outputs
        #   100, N, 1
        src_logits = outputs['pred_matching_logits']
        target_classes_o = torch.cat([t['matching_labels'][J] for t, (_, J) in zip(targets, indices)])
        idx = self._get_src_permutation_idx(indices)
        #   100, N
        target_classes = torch.full(src_logits.shape[:2], 0, dtype=torch.int64, device=target_classes_o.device)
        target_classes[idx] = target_classes_o
        if self.loss_type=='ce_loss':
            loss_matching = F.cross_entropy(src_logits.transpose(1, 2), target_classes, reduction='mean')
            #loss_matching = self.cross_entropy(src_logits, target_classes, reduction='mean')
        elif self.loss_type=='focal_loss':
           src_logits = src_logits.softmax(-1)
           loss_matching = self._focal_cross_entropy(src_logits, target_classes, None, self.verb_alpha, self.obj_gamma, self.neg_inds_beta, self.hard_neg_inds_beta)
        losses = {'loss_matching': loss_matching}

        if log:
            losses['matching_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
        return losses



    def loss_verb_labels(self, outputs, targets, indices, num_interactions, log=True):
        assert 'pred_verb_logits' in outputs
        src_logits = outputs['pred_verb_logits']
        target_classes_o = torch.cat([t['verb_labels'][J] for t, (_, J) in zip(targets, indices)])
        idx = self._get_src_permutation_idx(indices)
        #   100, N, 29
        target_classes = torch.zeros_like(src_logits)
        target_classes[idx] = target_classes_o
#########################  loss_verb_labels Re-weighting  #####################################################
        verb_weights = self.verb_weights_func(target_classes)
#########################  loss_verb_labels Re-weighting  #####################################################
        #   multi-object
        if self.verb_loss_type == 'bce':
            loss_verb_ce = F.binary_cross_entropy_with_logits(src_logits, target_classes)
        elif self.verb_loss_type == 'focal':#Ture
            src_logits = src_logits.sigmoid()
            loss_verb_ce = self._neg_loss(src_logits, target_classes, verb_weights, 
                                          self.verb_alpha, self.verb_gamma, self.neg_inds_beta, self.hard_neg_inds_beta)
        losses = {'loss_verb_ce': loss_verb_ce}
        #   有bug
        # if log:
        #     losses['verb_class_error'] = 100 - focal_accuracy(src_logits[idx], target_classes_o)[0]
        return losses
    
    def loss_sub_obj_boxes(self, outputs, targets, indices, num_interactions):
        assert 'pred_sub_boxes' in outputs and 'pred_obj_boxes' in outputs
        #   (kh*N,kh*N,)
        idx = self._get_src_permutation_idx(indices)
        src_sub_boxes = outputs['pred_sub_boxes'][idx]
        target_sub_boxes = torch.cat([t['sub_boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)
        
        losses = {}
        if src_sub_boxes.shape[0] == 0:
            losses['loss_sub_bbox'] = src_sub_boxes.sum()
            losses['loss_sub_giou'] = src_sub_boxes.sum()        
        else:
            loss_sub_bbox = F.l1_loss(src_sub_boxes, target_sub_boxes, reduction='none')
            losses['loss_sub_bbox'] = loss_sub_bbox.sum() / num_interactions
            
            bbox1 = box_cxcywh_to_xyxy(src_sub_boxes)
            bbox2 = box_cxcywh_to_xyxy(target_sub_boxes)
            loss_sub_giou = 1 - torch.diag(generalized_box_iou(bbox1, bbox2))
            losses['loss_sub_giou'] = loss_sub_giou.sum() / num_interactions 
            
                          
        src_obj_boxes = outputs['pred_obj_boxes'][idx]
        target_obj_boxes = torch.cat([t['obj_boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)
        if src_sub_boxes.shape[0] == 0:
            losses['loss_obj_bbox'] = src_obj_boxes.sum()
            losses['loss_obj_giou'] = src_obj_boxes.sum()
        else:     
            #   假设没有obj则不计算loss
            exist_obj_boxes = (target_obj_boxes != 0).any(dim=1)
            loss_obj_bbox = F.l1_loss(src_obj_boxes, target_obj_boxes, reduction='none') 
            losses['loss_obj_bbox'] = (loss_obj_bbox * exist_obj_boxes.unsqueeze(1)).sum() / (exist_obj_boxes.sum() + 1e-4)
            
            bbox1 = box_cxcywh_to_xyxy(src_obj_boxes)
            bbox2 = box_cxcywh_to_xyxy(target_obj_boxes)
            loss_obj_giou = 1 - torch.diag(generalized_box_iou(bbox1, bbox2))
            losses['loss_obj_giou'] = (loss_obj_giou * exist_obj_boxes).sum() / (exist_obj_boxes.sum() + 1e-4)
        return losses

    #   仅作为log存在
    @torch.no_grad()
    def loss_obj_cardinality(self, outputs, targets, indices, num_interactions):
        pred_logits = outputs['pred_obj_logits']
        tgt_lengths = torch.as_tensor([len(v['obj_labels']) for v in targets], device=pred_logits.device)

        card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
        card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
        losses = {'obj_cardinality_error': card_err}
        return losses



    def loss_masks(self, outputs, targets, indices, num_boxes):
        """
            计算与masks有关的loss: focal loss和dice loss。
            targets dicts必须包含一个包含[nb_target_boxes, h, w]的张量的键 "masks"。 
        """
        assert "pred_masks" in outputs

        src_idx = self._get_src_permutation_idx(indices)
        tgt_idx = self._get_tgt_permutation_idx(indices)

        src_masks = outputs["pred_masks"]

        # TODO 使用valid来掩盖由于损失中的padding而导致的无效区域                                          
        target_masks, valid = nested_tensor_from_tensor_list([t["masks"] for t in targets]).decompose()

        target_masks = target_masks.to(src_masks)

        src_masks = src_masks[src_idx]
        # 将预测值提高到目标尺寸
        src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False)
        src_masks = src_masks[:, 0].flatten(1)

        target_masks = target_masks[tgt_idx].flatten(1)
        losses = {
            "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes),
            "loss_dice": dice_loss(src_masks, target_masks, num_boxes),
            }
    
        return losses

    #   sparse_detr
    def loss_mask_prediction(self, outputs, targets, indices, num_boxes, layer=None):
        assert "backbone_mask_prediction" in outputs
        assert "sampling_locations_dec" in outputs
        assert "attn_weights_dec" in outputs
        assert "spatial_shapes" in outputs
        assert "level_start_index" in outputs

        mask_prediction = outputs["backbone_mask_prediction"] 
        loss_key = "loss_mask_prediction"

        sampling_locations_dec = outputs["sampling_locations_dec"]
        attn_weights_dec = outputs["attn_weights_dec"]
        spatial_shapes = outputs["spatial_shapes"]
        level_start_index = outputs["level_start_index"]

        flat_grid_attn_map_dec = attn_map_to_flat_grid(
            spatial_shapes, level_start_index, sampling_locations_dec, attn_weights_dec).sum(dim=(1,2))

        losses = {}

        if 'mask_flatten' in outputs:
            flat_grid_attn_map_dec = flat_grid_attn_map_dec.masked_fill(
                outputs['mask_flatten'], flat_grid_attn_map_dec.min()-1)
                
        sparse_token_nums = outputs["sparse_token_nums"]
        num_topk = sparse_token_nums.max()

        topk_idx_tgt = torch.topk(flat_grid_attn_map_dec, num_topk)[1]
        target = torch.zeros_like(mask_prediction)
        for i in range(target.shape[0]):
            target[i].scatter_(0, topk_idx_tgt[i][:sparse_token_nums[i]], 1)

        losses.update({loss_key: F.multilabel_soft_margin_loss(mask_prediction, target)})

        return losses

    #   sparse_detr(encoder，decoder特征图映射)
    @torch.no_grad()
    def corr(self, outputs, targets, indices, num_boxes):
        if "backbone_topk_proposals" not in outputs.keys() or outputs["backbone_topk_proposals"] is   None:
            return {}

        assert "backbone_topk_proposals" in outputs
        assert "sampling_locations_dec" in outputs
        assert "attn_weights_dec" in outputs
        assert "spatial_shapes" in outputs
        assert "level_start_index" in outputs

        backbone_topk_proposals = outputs["backbone_topk_proposals"]
        sampling_locations_dec = outputs["sampling_locations_dec"]
        attn_weights_dec = outputs["attn_weights_dec"]
        spatial_shapes = outputs["spatial_shapes"]
        level_start_index = outputs["level_start_index"]

        flat_grid_topk = idx_to_flat_grid(spatial_shapes, backbone_topk_proposals)
        flat_grid_attn_map_dec = attn_map_to_flat_grid(
            spatial_shapes, level_start_index, sampling_locations_dec, attn_weights_dec).sum(dim=(1,2))
        corr = compute_corr(flat_grid_topk, flat_grid_attn_map_dec, spatial_shapes)

        losses = {}
        losses["corr_mask_attn_map_dec_all"] = corr[0].mean()
        for i, _corr in enumerate(corr[1:]):
            losses[f"corr_mask_attn_map_dec_{i}"] = _corr.mean()
        return losses


    def _get_src_permutation_idx(self, indices):
        #从indices tuple中取得src的batch index和对应的match index
        batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
        src_idx = torch.cat([src for (src, _) in indices])
        return batch_idx, src_idx

    def _get_tgt_permutation_idx(self, indices):
        batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
        tgt_idx = torch.cat([tgt for (_, tgt) in indices])
        return batch_idx, tgt_idx


    def obj_reweight_func(self, target_classes):
        return None
    
    def verb_weights_func(self, target_classes):
        return None


    def aux_outputs_loss(self, outputs, targets, num_interactions):
        losses = {}
        for i, aux_outputs in enumerate(outputs['aux_outputs']):
            indices = self.matcher(aux_outputs, targets)
            for loss in self.losses:
                if loss in ['masks', "mask_prediction", "corr"]:
                    continue
                kwargs = {}
                if 'labels' in loss:
                    # Logging is enabled only for the last layer
                    kwargs = {'log': False}
                l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_interactions, **kwargs)
                l_dict = {k + f'_{i}': v for k, v in l_dict.items()}
                losses.update(l_dict)
        return losses

    def backbone_outputs_loss(self, outputs, targets, num_interactions):
        backbone_outputs = outputs['backbone_outputs']
        bin_targets = copy.deepcopy(targets)
        if not self.eff_specific_head:
            for bt in bin_targets:
                bt['labels'] = torch.zeros_like(bt['labels'])  # all labels are zero (meaning foreground)
        indices = self.matcher(backbone_outputs, bin_targets)
        losses = {}
        for loss in self.losses:
            if loss in ['masks', "mask_prediction", "corr"]:
                # Intermediate masks losses are too costly to compute, we ignore them.
                continue
            kwargs = {}
            if loss == 'labels':
                # Logging is enabled only for the last layer
                kwargs['log'] = False
            l_dict = self.get_loss(loss, backbone_outputs, bin_targets, indices, num_interactions, **kwargs)
            l_dict = {k + f'_backbone': v for k, v in l_dict.items()}
            losses.update(l_dict)
        return losses

    def enc_outputs_loss(self, outputs, targets, num_interactions):
        enc_outputs = outputs['enc_outputs']
        bin_targets = copy.deepcopy(targets)
        if not self.eff_specific_head:
            for bt in bin_targets:
                bt['labels'] = torch.zeros_like(bt['labels'])  # all labels are zero (meaning foreground)
        indices = self.matcher(enc_outputs, bin_targets)
        losses = {}
        for loss in self.losses:
            if loss in ['masks', "mask_prediction", "corr"]:
                # Intermediate masks losses are too costly to compute, we ignore them.
                continue
            kwargs = {}
            if loss == 'labels':
                # Logging is enabled only for the last layer
                kwargs['log'] = False
            l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_interactions, **kwargs)
            l_dict = {k + f'_enc': v for k, v in l_dict.items()}
            losses.update(l_dict)
        return losses

    def aux_outputs_enc_loss(self, outputs, targets, num_interactions):
        losses = {}
        for i, aux_outputs in enumerate(outputs['aux_outputs_enc']):
            indices = self.matcher(aux_outputs, targets)
            for loss in self.losses:
                if loss in ['masks', "mask_prediction", "corr"]:
                    # Intermediate masks losses are too costly to compute, we ignore them.
                    continue
                kwargs = {}
                if loss == 'labels':
                    # Logging is enabled only for the last layer
                    kwargs['log'] = False
                l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_interactions, **kwargs)
                l_dict = {k + f'_enc_{i}': v for k, v in l_dict.items()}
                losses.update(l_dict)
        return losses




    def _neg_loss(self, pred, gt, weights=None, 
                  alpha=0.5, gamma=2, neg_inds_beta=0, hard_neg_inds_beta=3):
        pos_inds = gt.eq(1).float()
        neg_inds = gt.lt(1).float()  
###############################  alpha  ##########################################
        #    tgtN, 29
        pred = torch.clamp(pred, 1e-12) 
        pos_loss = alpha * torch.log(pred) * torch.pow(1 - pred, gamma) * pos_inds
        if weights is not None:
            pos_loss = pos_loss * weights
        neg_loss = (1 - alpha) * torch.log(1 - pred) * torch.pow(pred, gamma) * neg_inds
###############################  alpha  ##########################################
        num_pos  = pos_inds.float().sum()
        pos_loss = pos_loss.sum()
        neg_loss = neg_loss.sum()
        loss = 0
        if num_pos == 0:
            loss = loss - neg_loss
        else:
            loss = loss - (pos_loss + neg_loss) / num_pos 
        return loss

    def _focal_cross_entropy(self, pred, gt, weights=None, alpha=0.5, gamma=2, neg_inds_beta=0, hard_neg_inds_beta=3):
        #    tgtN, 
        pos_inds = gt.eq(1).float()
        neg_inds = gt.eq(0).float()  
###############################  alpha  ##########################################
        pred = torch.clamp(pred, 1e-12) 
        pos_loss = alpha * torch.log(pred[:, 1]) * torch.pow(pred[:, 0], gamma) * pos_inds
        if weights is not None:
            pos_loss = pos_loss * weights
        neg_loss = (1 - alpha) * torch.log(pred[:, 0]) * torch.pow(pred[:, 1], gamma) * neg_inds
###############################  alpha  ##########################################
        num_pos  = pos_inds.float().sum()
        pos_loss = pos_loss.sum()
        neg_loss = neg_loss.sum()
        loss = 0
        if num_pos == 0:
            loss = loss - neg_loss
        else:
            loss = loss - (pos_loss + neg_loss) / num_pos
        return loss         


    def sigmoid_focal_loss(self, inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):

        prob = inputs.sigmoid()
        ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
        p_t = prob * targets + (1 - prob) * (1 - targets)
        loss = ce_loss * ((1 - p_t) ** gamma)

        if alpha >= 0:
            alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
            loss = alpha_t * loss

        return loss.mean(1).sum() / num_boxes

    def cross_entropy(self, input, target, weights=None, reduction="mean"):
        # input.shape: torch.size([-1, class])
        # target.shape: torch.size([-1])
        # reduction = "mean" or "sum"
        input = input.reshape(-1, input.shape[-1]) 
        target = target.reshape(-1)
        # 这里对input所有元素求exp
        exp = torch.exp(input)
        # 根据target的索引，在exp第一维取出元素值，这是softmax的分子

        tmp1 = exp.gather(1, target.unsqueeze(-1)).squeeze()
        # 在exp第一维求和，这是softmax的分母
        tmp2 = exp.sum(1)
        # softmax公式：ei / sum(ej)
        softmax = tmp1 / tmp2
        # cross-entropy公式： -yi * log(pi)
        #(-1)
        log = -torch.log(softmax)
        if weights!=None:
            log = weights[target] * log        
        if reduction == "mean": 
            return log.mean()
        else: 
            return log.sum()



#   sparse_detr
def idx_to_flat_grid(spatial_shapes, idx):
    flat_grid_shape = (idx.shape[0], int(torch.sum(spatial_shapes[..., 0] * spatial_shapes[..., 1])))
    flat_grid = torch.zeros(flat_grid_shape, device=idx.device, dtype=torch.float32)
    flat_grid.scatter_(1, idx.to(torch.int64), 1)

    return flat_grid


def attn_map_to_flat_grid(spatial_shapes, level_start_index, sampling_locations, attention_weights):
    # sampling_locations: [N, n_layers, Len_q, n_heads, n_levels, n_points, 2]
    # attention_weights: [N, n_layers, Len_q, n_heads, n_levels, n_points]
    N, n_layers, _, n_heads, *_ = sampling_locations.shape
    sampling_locations = sampling_locations.permute(0, 1, 3, 2, 5, 4, 6).flatten(0, 2).flatten(1, 2)
    # [N * n_layers * n_heads, Len_q * n_points, n_levels, 2]
    attention_weights = attention_weights.permute(0, 1, 3, 2, 5, 4).flatten(0, 2).flatten(1, 2)
    # [N * n_layers * n_heads, Len_q * n_points, n_levels]

    rev_spatial_shapes = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], dim=-1) # hw -> wh (xy)
    col_row_float = sampling_locations * rev_spatial_shapes

    col_row_ll = col_row_float.floor().to(torch.int64)
    zero = torch.zeros(*col_row_ll.shape[:-1], dtype=torch.int64, device=col_row_ll.device)
    one = torch.ones(*col_row_ll.shape[:-1], dtype=torch.int64, device=col_row_ll.device)
    col_row_lh = col_row_ll + torch.stack([zero, one], dim=-1)
    col_row_hl = col_row_ll + torch.stack([one, zero], dim=-1)
    col_row_hh = col_row_ll + 1

    margin_ll = (col_row_float - col_row_ll).prod(dim=-1)
    margin_lh = -(col_row_float - col_row_lh).prod(dim=-1)
    margin_hl = -(col_row_float - col_row_hl).prod(dim=-1)
    margin_hh = (col_row_float - col_row_hh).prod(dim=-1)

    flat_grid_shape = (attention_weights.shape[0], int(torch.sum(spatial_shapes[..., 0] * spatial_shapes[..., 1])))
    flat_grid = torch.zeros(flat_grid_shape, dtype=torch.float32, device=attention_weights.device)

    zipped = [(col_row_ll, margin_hh), (col_row_lh, margin_hl), (col_row_hl, margin_lh), (col_row_hh, margin_ll)]
    for col_row, margin in zipped:
        valid_mask = torch.logical_and(
            torch.logical_and(col_row[..., 0] >= 0, col_row[..., 0] < rev_spatial_shapes[..., 0]),
            torch.logical_and(col_row[..., 1] >= 0, col_row[..., 1] < rev_spatial_shapes[..., 1]),
        )
        idx = col_row[..., 1] * spatial_shapes[..., 1] + col_row[..., 0] + level_start_index
        idx = (idx * valid_mask).flatten(1, 2)
        weights = (attention_weights * valid_mask * margin).flatten(1)
        flat_grid.scatter_add_(1, idx, weights)

    return flat_grid.reshape(N, n_layers, n_heads, -1)


def compute_corr(flat_grid_topk, flat_grid_attn_map, spatial_shapes):
    if len(flat_grid_topk.shape) == 1:
        flat_grid_topk = flat_grid_topk.unsqueeze(0)
        flat_grid_attn_map = flat_grid_attn_map.unsqueeze(0)
        
    tot = flat_grid_attn_map.sum(-1)
    hit = (flat_grid_topk * flat_grid_attn_map).sum(-1)

    corr = [hit / tot]
    flat_grid_idx = 0

    for shape in spatial_shapes:
        level_range = np.arange(int(flat_grid_idx), int(flat_grid_idx + shape[0] * shape[1]))
        tot = (flat_grid_attn_map[:, level_range]).sum(-1)
        hit = (flat_grid_topk[:, level_range] * flat_grid_attn_map[:, level_range]).sum(-1)
        flat_grid_idx += shape[0] * shape[1]
        corr.append(hit / tot)
    return corr




def dice_loss(inputs, targets, num_boxes):
    """
    Compute the DICE loss, similar to generalized IOU for masks
    Args:
        inputs: A float tensor of arbitrary shape.
                The predictions for each example.
        targets: A float tensor with the same shape as inputs. Stores the binary
                 classification label for each element in inputs
                (0 for the negative class and 1 for the positive class).
    """
    inputs = inputs.sigmoid()
    inputs = inputs.flatten(1)
    numerator = 2 * (inputs * targets).sum(1)
    denominator = inputs.sum(-1) + targets.sum(-1)
    loss = 1 - (numerator + 1) / (denominator + 1)
    return loss.sum() / num_boxes


def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
    """
    Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
    Args:
        inputs: A float tensor of arbitrary shape.
                The predictions for each example.
        targets: A float tensor with the same shape as inputs. Stores the binary
                 classification label for each element in inputs
                (0 for the negative class and 1 for the positive class).
        alpha: (optional) Weighting factor in range (0,1) to balance
                positive vs negative examples. Default = -1 (no weighting).
        gamma: Exponent of the modulating factor (1 - p_t) to
               balance easy vs hard examples.
    Returns:
        Loss tensor
    """
    prob = inputs.sigmoid()
    ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
    p_t = prob * targets + (1 - prob) * (1 - targets)
    loss = ce_loss * ((1 - p_t) ** gamma)

    if alpha >= 0:
        alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
        loss = alpha_t * loss

    return loss.mean(1).sum() / num_boxes
