import torch
from scipy.optimize import linear_sum_assignment
from torch import nn

from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
from .matcherhoi import HungarianMatcherHOI as matcher


def build_matcher(args, loss_type='ce_loss'):
    '''
        匈牙利算法做匹配
    '''
    #   N, 100, xxx
    return HungarianMatcherHOI(cost_obj_class=args.set_cost_obj_class, cost_verb_class=args.set_cost_verb_class,
                            cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou, 
                            cost_matching=args.set_cost_matching, use_matching=args.use_matching,
                            loss_type=loss_type)


class HungarianMatcherHOI(matcher):
    def __init__(self, cost_obj_class: float = 1, cost_verb_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1,
                 cost_matching:float = 1,use_matching: bool = False,  loss_type='ce_loss'):
        super(HungarianMatcherHOI, self).__init__(cost_obj_class, cost_verb_class, cost_bbox, cost_giou, cost_matching, use_matching, loss_type)

    @torch.no_grad()
    def get_C(self, outputs, targets):
        bs, num_queries = outputs['pred_obj_logits'].shape[:2]         
        C = self.get_cost_obj_class(outputs['pred_obj_logits'], targets,  self.obj_alpha, self.obj_gamma, self.neg_inds_beta, self.hard_neg_inds_beta) + \
            self.get_cost_bbox(outputs['pred_sub_boxes'], outputs['pred_obj_boxes'], targets) + \
            self.get_cost_giou(outputs['pred_sub_boxes'], outputs['pred_obj_boxes'], targets)  + \
            self.get_cost_verb_class(outputs['pred_verb_logits'], outputs['pred_obj_logits'], outputs['pred_sub_logits'], targets,  self.verb_alpha, self.verb_gamma, self.neg_inds_beta, self.hard_neg_inds_beta) + \
            self.get_cost_sub_class(outputs['pred_sub_logits'], targets, self.obj_alpha, self.obj_gamma, self.neg_inds_beta, self.hard_neg_inds_beta) 
            
        #   主力匹配还是get_cost_bbox,优先召回率，然后精确度
        C = C.view(bs, num_queries, -1).cpu()
        return C    


    def get_cost_sub_class(self, pred_sub_logits, targets,  
                           alpha=0.25, gamma=2, neg_inds_beta=0, hard_neg_inds_beta=3):
        #   100N, 29
        out_sub_prob = pred_sub_logits.flatten(0, 1).softmax(-1)
        #   tgtN,
        tgt_sub_labels = torch.cat([torch.zeros_like(v['obj_labels'], device=v['obj_labels'].device) for v in targets])

        if self.loss_type=='ce_loss':
            cost_sub_class = -out_sub_prob[:, tgt_sub_labels]
        elif self.loss_type=='focal_loss':
            neg_cost = (1 - alpha) * (out_sub_prob ** gamma) * (-(1 - out_sub_prob + 1e-8).log())
            pos_cost = alpha * ((1 - out_sub_prob) ** gamma) * (-(out_sub_prob + 1e-8).log())
            cost_sub_class = pos_cost[:, tgt_sub_labels] - neg_cost[:, tgt_sub_labels]            
        C = self.cost_obj_class * cost_sub_class
        return C


    def get_cost_verb_class(self, pred_verb_logits, pred_obj_logits, pred_sub_logits, targets, alpha=0.5, gamma=2, neg_inds_beta=0, hard_neg_inds_beta=3):
        out_verb_prob = pred_verb_logits.flatten(0, 1).sigmoid()
        out_obj_prob = pred_obj_logits.flatten(0, 1).softmax(-1)[...,:-1].max(-1)[0].unsqueeze(-1)
        out_sub_prob = pred_sub_logits.flatten(0, 1).softmax(-1)[..., 0].unsqueeze(-1)
        out_verb_prob = out_verb_prob #* out_obj_prob * out_sub_prob
        tgt_verb_labels = torch.cat([v['verb_labels'] for v in targets])
        tgt_verb_labels = tgt_verb_labels.permute(1, 0)
        
        if self.loss_type=='ce_loss':  
            pos = out_verb_prob.matmul(tgt_verb_labels)
            pos_sum = tgt_verb_labels.sum(dim=0, keepdim=True) + 1e-4
            neg = (1 - out_verb_prob).matmul(1 - tgt_verb_labels)
            neg_num = ((1 - tgt_verb_labels).sum(dim=0, keepdim=True) + 1e-4)
            
            cost_verb_class = -(pos / pos_sum + neg / neg_num) / 2
            
        elif self.loss_type=='focal_loss':
            #out_verb_prob = torch.clamp(out_verb_prob, 1e-12) 
            pos_pred = alpha * ((1 - out_verb_prob) ** gamma) * (-(out_verb_prob + 1e-8).log())
            pos_inds = tgt_verb_labels.eq(1).float()
            pos_target = pos_inds 
            pos_verb_cost_class = pos_pred.matmul(pos_target)
            pos_tgt_verb_labels_sum = pos_inds.sum(dim=0, keepdim=True) + 1e-4
            
            neg_pred= (1 - alpha) * (out_verb_prob ** gamma) * (-(1 - out_verb_prob + 1e-8).log())
            neg_inds = (tgt_verb_labels.lt(1) & tgt_verb_labels.gt(-1)).float()
            hard_neg_inds = (tgt_verb_labels.eq(-1)).float()
            neg_weights = torch.pow(1 - tgt_verb_labels, neg_inds_beta) * neg_inds + \
                        torch.pow(1 - tgt_verb_labels, hard_neg_inds_beta) * hard_neg_inds #   2
            neg_target = neg_weights 
            neg_verb_cost_class = neg_pred.matmul(neg_target)
            neg_tgt_verb_labels_sum = (neg_inds + hard_neg_inds).sum(dim=0, keepdim=True) + 1e-4
            cost_verb_class = (pos_verb_cost_class  + neg_verb_cost_class) / pos_tgt_verb_labels_sum
                    
        #   100N, tgtN
        C = self.cost_verb_class * cost_verb_class
        return C