import torch

import torch.nn.functional as F

from util.misc import (accuracy, get_world_size,
                       is_dist_avail_and_initialized, focal_accuracy)
import numpy as np
from queue import Queue
import math
from  .cdn_setcriterionhoi import SetCriterionHOI as setcrithoi
from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
from scipy.optimize import linear_sum_assignment

import torch
from torch import nn
import torch.nn.functional as F

from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
from util.misc import (nested_tensor_from_tensor_list,
                       accuracy, get_world_size, interpolate, focal_accuracy,
                       is_dist_avail_and_initialized)
import torch.distributed as dist
import copy
import numpy as np

class SetCriterionHOI(setcrithoi):
    def __init__(self, args, matcher, weight_dict, losses, loss_type='ce_loss'):
        super(SetCriterionHOI, self).__init__(args, matcher, weight_dict, losses, loss_type)
        self.verb_alpha = 0.5
        self.verb_gamma = 2

    def get_loss(self, loss, outputs, targets, indices, num, **kwargs):

        loss_map = {
            'obj_labels': self.loss_obj_labels,             
            'obj_cardinality': self.loss_obj_cardinality,
            'verb_labels': self.loss_verb_labels,           
            'sub_obj_boxes': self.loss_sub_obj_boxes,
            'sub_labels': self.loss_sub_labels,
            
            # 'sub_embed': self.loss_sub_embed, 
            # 'obj_embed': self.loss_obj_embed, 
            
            'masks': self.loss_masks,
            "mask_prediction": self.loss_mask_prediction,
            "corr": self.corr,
            'matching_labels': self.loss_matching_labels,    #new
        }
        assert loss in loss_map, f'do you really want to compute {loss} loss?'
        return loss_map[loss](outputs, targets, indices, num, **kwargs)

    def loss_sub_labels(self, outputs, targets, indices, num_interactions, log=True, alpha=0.5, gamma=2, neg_inds_beta=0, hard_neg_inds_beta=3):
        assert 'pred_sub_logits' in outputs
        #   8, 100, 80
        src_logits = outputs['pred_sub_logits']
        target_classes_o = torch.cat([torch.zeros_like(t['obj_labels'][J], device=t['obj_labels'][J].device, requires_grad=False) for t, (_, J) in zip(targets, indices)])
        idx = self._get_src_permutation_idx(indices)
        
        #   (kh*N,)
        target_classes = torch.full(src_logits.shape[:2], 1, dtype=torch.int64, device=target_classes_o.device)
        #   8, 100   =  (kh*N,)
        target_classes[idx] = target_classes_o
#########################  obj_reweight  #####################################################
#         sub_weights = self.obj_reweight_func(target_classes)
# #########################  obj_reweight  #####################################################
#         #if self.verb_loss_type == 'bce':
        if self.loss_type=='ce_loss':
            loss_sub_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, None, reduction='mean')
            #loss_sub_ce = self.cross_entropy(src_logits, target_classes, obj_weights, reduction='mean')
        elif self.loss_type=='focal_loss':
            target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2] + 1],
                                                dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device)
            target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)
            target_classes_onehot = target_classes_onehot[..., :-1]
            loss_sub_ce = self.sigmoid_focal_loss(src_logits, target_classes_onehot, num_interactions, alpha=self.alpha, gamma=gamma) * src_logits.shape[1]
                    
        losses = {'loss_sub_ce': loss_sub_ce}
        if log:
            losses['sub_class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
        return losses


    def loss_verb_labels(self, outputs, targets, indices, num_interactions, log=True):
        assert 'pred_verb_logits' in outputs
        src_logits = outputs['pred_verb_logits']

        target_classes_o = torch.cat([t['verb_labels'][J] for t, (_, J) in zip(targets, indices)])
        idx = self._get_src_permutation_idx(indices)
        #   100, N, 29
        target_classes = torch.zeros_like(src_logits)
        target_classes[idx] = target_classes_o
#########################  loss_verb_labels Re-weighting  #####################################################
        verb_weights = self.verb_weights_func(target_classes)
#########################  loss_verb_labels Re-weighting  #####################################################
        #   multi-object
        if self.verb_loss_type == 'bce':
            loss_verb_ce = F.binary_cross_entropy_with_logits(src_logits, target_classes)
        elif self.verb_loss_type == 'focal':#Ture
            src_logits = src_logits.sigmoid()
            # pred_sub_scores = outputs['pred_sub_logits'].softmax(-1)[..., 0].unsqueeze(-1)
            # pred_obj_scores = outputs['pred_obj_logits'].softmax(-1)[..., :-1].max(-1)[0].unsqueeze(-1)
            src_logits = src_logits #* pred_sub_scores * pred_obj_scores
            loss_verb_ce = self._neg_loss(src_logits, target_classes, verb_weights, 
                                          self.verb_alpha, self.verb_gamma, self.neg_inds_beta, self.hard_neg_inds_beta)
        losses = {'loss_verb_ce': loss_verb_ce}
        #   有bug
        # if log:
        #     losses['verb_class_error'] = 100 - focal_accuracy(src_logits[idx], target_classes_o)[0]
        return losses