import torch
from scipy.optimize import linear_sum_assignment
from torch import nn

from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
from .matcherhoi import HungarianMatcherHOI as matcher


def build_matcher(args, loss_type='ce_loss'):
    '''
        匈牙利算法做匹配
    '''
    #   N, 100, xxx
    return HungarianMatcherHOI(cost_obj_class=args.set_cost_obj_class, cost_verb_class=args.set_cost_verb_class,
                            cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou, 
                            cost_matching=args.set_cost_matching, use_matching=args.use_matching,
                            loss_type=loss_type)


class HungarianMatcherHOI(matcher):
    def __init__(self, cost_obj_class: float = 1, cost_verb_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1,
                 cost_matching:float = 1,use_matching: bool = False,  loss_type='ce_loss'):
        super(HungarianMatcherHOI, self).__init__(cost_obj_class, cost_verb_class, cost_bbox, cost_giou, cost_matching, use_matching, loss_type)
    
    @torch.no_grad()
    def forward(self, outputs, targets):
        C = self.get_obj_C(outputs, targets)       
        sizes = [len(v['uni_obj_labels']) for v in targets]
        #   list长度为N，i是pred，j是GT,    寻找loss最小的(不会重复)
        indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
        obj_indices = [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
        for i, obj_indice in enumerate(obj_indices):
            obj_no = []
            for j, uni_obj_no in enumerate(targets[i]['uni_obj_no']):
                obj_no.append(obj_indice[0][obj_indice[1] == uni_obj_no.to('cpu')])
            targets[i]['match_obj_no'] = torch.tensor(obj_no, device = outputs['pred_obj_logits'].device, dtype=torch.int64)
                
        C = self.get_sub_C(outputs, targets)       
        sizes = [len(v['uni_sub_boxes']) for v in targets]
        #   list长度为N，i是pred，j是GT,    寻找loss最小的(不会重复)
        indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
        sub_indices = [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
        for i, sub_indice in enumerate(sub_indices):
            sub_no = []
            for j, uni_sub_no in enumerate(targets[i]['uni_sub_no']):
                sub_no.append(sub_indice[0][sub_indice[1] == uni_sub_no.to('cpu')])
            targets[i]['match_sub_no'] = torch.tensor(sub_no, device= outputs['pred_obj_logits'].device, dtype=torch.int64)
            
        C = self.get_verb_C(outputs, targets)       
        sizes = [len(v['verb_labels']) for v in targets]
        #   list长度为N，i是pred，j是GT,    寻找loss最小的(不会重复)
        indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
        verb_indices = [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]             
                
        return obj_indices, sub_indices, verb_indices
    
    @torch.no_grad()
    def get_obj_C(self, outputs, targets):
        bs, num_queries = outputs['pred_obj_logits'].shape[:2]         
        C = self.get_cost_obj_class(outputs['pred_obj_logits'], targets) + \
            self.get_obj_cost_bbox(outputs['pred_obj_boxes'], targets) + \
            self.get_obj_cost_giou(outputs['pred_obj_boxes'], targets)     
        C = C.view(bs, num_queries, -1).cpu()
        return C    

    @torch.no_grad()
    def get_sub_C(self, outputs, targets):
        bs, num_queries = outputs['pred_sub_boxes'].shape[:2]         
        C = self.get_sub_cost_bbox(outputs['pred_sub_boxes'], targets) + \
            self.get_sub_cost_giou(outputs['pred_sub_boxes'], targets)     
        if self.use_matching:
            C +=  self.get_cost_matching(outputs['pred_sub_boxes'], targets, self.obj_alpha, self.obj_gamma, self.neg_inds_beta, self.hard_neg_inds_beta) 
        C = C.view(bs, num_queries, -1).cpu()
        return C   

    @torch.no_grad()
    def get_verb_C(self, outputs, targets):
        bs, num_queries = outputs['pred_verb_logits'].shape[:2]         
        #C = #self.get_cost_verb_class(outputs['pred_verb_logits'], targets) + \
        C = self.get_cost_sub_no_class(outputs['pred_sub_no'], outputs['pred_sub_logits'], targets) + \
            self.get_cost_obj_no_class(outputs['pred_obj_no'], outputs['pred_obj_logits'], targets)
        C = C.view(bs, num_queries, -1).cpu()
        return C   

    def get_cost_obj_class(self, pred_obj_logits, targets,  
                           alpha=0.25, gamma=2, neg_inds_beta=0, hard_neg_inds_beta=3):
        #   100N, 29
        out_obj_prob = pred_obj_logits.flatten(0, 1).softmax(-1)
        #   tgtN,
        tgt_obj_labels = torch.cat([v['uni_obj_labels'] for v in targets])

        
        if self.loss_type=='ce_loss':
            cost_obj_class = -out_obj_prob[:, tgt_obj_labels]#1-out_obj_prob[:, tgt_obj_labels]
        elif self.loss_type=='focal_loss':
            neg_cost = (1 - alpha) * (out_obj_prob ** gamma) * (-(1 - out_obj_prob + 1e-8).log())
            pos_cost = alpha * ((1 - out_obj_prob) ** gamma) * (-(out_obj_prob + 1e-8).log())
            cost_obj_class = pos_cost[:, tgt_obj_labels] - neg_cost[:, tgt_obj_labels]            

        C = self.cost_obj_class * cost_obj_class
        return C

    def get_obj_cost_bbox(self, pred_boxes, targets):
        out_bbox = pred_boxes.flatten(0, 1) 
        tgt_bbox = torch.cat([v["uni_obj_boxes"] for v in targets])
        cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)
        C = self.cost_bbox * cost_bbox
        return C

    def get_obj_cost_giou(self, pred_boxes, targets):
        out_bbox = pred_boxes.flatten(0, 1) 
        tgt_bbox = torch.cat([v["uni_obj_boxes"] for v in targets])
        bbox1 = box_cxcywh_to_xyxy(out_bbox)
        bbox2 = box_cxcywh_to_xyxy(tgt_bbox)
        cost_giou = -generalized_box_iou(bbox1, bbox2)
        C = self.cost_giou * cost_giou
        return C 

    def get_sub_cost_bbox(self, pred_boxes, targets):
        out_bbox = pred_boxes.flatten(0, 1) 
        tgt_bbox = torch.cat([v["uni_sub_boxes"] for v in targets])
        cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)
        C = self.cost_bbox * cost_bbox
        return C


    def get_sub_cost_giou(self, pred_boxes, targets):
        out_bbox = pred_boxes.flatten(0, 1) 
        tgt_bbox = torch.cat([v["uni_sub_boxes"] for v in targets])
        bbox1 = box_cxcywh_to_xyxy(out_bbox)
        bbox2 = box_cxcywh_to_xyxy(tgt_bbox)
        cost_giou = -generalized_box_iou(bbox1, bbox2)
        C = self.cost_giou * cost_giou
        return C       


    def get_cost_verb_class(self, pred_verb_logits, targets, alpha=0.5, gamma=2, neg_inds_beta=0, hard_neg_inds_beta=3):
        out_verb_prob = pred_verb_logits.flatten(0, 1).sigmoid()
        tgt_verb_labels = torch.cat([v['verb_labels'] for v in targets])
        tgt_verb_labels = tgt_verb_labels.permute(1, 0)
        
        if self.loss_type=='ce_loss':  
            pos = out_verb_prob.matmul(tgt_verb_labels)
            pos_sum = tgt_verb_labels.sum(dim=0, keepdim=True) + 1e-4
            neg = (1 - out_verb_prob).matmul(1 - tgt_verb_labels)
            neg_num = ((1 - tgt_verb_labels).sum(dim=0, keepdim=True) + 1e-4)
            
            cost_verb_class = -(pos / pos_sum + neg / neg_num) / 2
            
        elif self.loss_type=='focal_loss':
            #out_verb_prob = torch.clamp(out_verb_prob, 1e-12) 
            pos_pred = alpha * ((1 - out_verb_prob) ** gamma) * (-(out_verb_prob + 1e-8).log())
            pos_inds = tgt_verb_labels.eq(1).float()
            pos_target = pos_inds 
            pos_verb_cost_class = pos_pred.matmul(pos_target)
            pos_tgt_verb_labels_sum = pos_inds.sum(dim=0, keepdim=True) + 1e-4
            
            neg_pred= (1 - alpha) * (out_verb_prob ** gamma) * (-(1 - out_verb_prob + 1e-8).log())
            neg_inds = (tgt_verb_labels.lt(1) & tgt_verb_labels.gt(-1)).float()
            hard_neg_inds = (tgt_verb_labels.eq(-1)).float()
            neg_weights = torch.pow(1 - tgt_verb_labels, neg_inds_beta) * neg_inds + \
                        torch.pow(1 - tgt_verb_labels, hard_neg_inds_beta) * hard_neg_inds #   2
            neg_target = neg_weights 
            neg_verb_cost_class = neg_pred.matmul(neg_target)
            neg_tgt_verb_labels_sum = (neg_inds + hard_neg_inds).sum(dim=0, keepdim=True) + 1e-4
            cost_verb_class = (pos_verb_cost_class  + neg_verb_cost_class) / pos_tgt_verb_labels_sum
                    
        #   100N, tgtN
        C = self.cost_verb_class * cost_verb_class
        return C
    
    def get_cost_obj_no_class(self, pred_obj_no, pred_obj_logits, targets, alpha=0.5, gamma=2, neg_inds_beta=0, hard_neg_inds_beta=3):
        #   100N, 29
        # out_obj_prob = pred_obj_no.flatten(0, 1) * pred_obj_logits.softmax(-1).max(-1)[0].unsqueeze(1).repeat(1,100,1).flatten(0, 1)
        out_obj_prob = pred_obj_no.flatten(0, 1)
        #   tgtN,
        tgt_obj_labels = torch.cat([v['match_obj_no'] for v in targets])
        if self.loss_type=='ce_loss':
            cost_obj_class = -out_obj_prob[:, tgt_obj_labels]#1-out_obj_prob[:, tgt_obj_labels]
        elif self.loss_type=='focal_loss':
            neg_cost = (1 - alpha) * (out_obj_prob ** gamma) * (-(1 - out_obj_prob + 1e-8).log())
            pos_cost = alpha * ((1 - out_obj_prob) ** gamma) * (-(out_obj_prob + 1e-8).log())
            cost_obj_class = pos_cost[:, tgt_obj_labels] - neg_cost[:, tgt_obj_labels]            

        C = 1 * cost_obj_class
        return C


    def get_cost_sub_no_class(self, pred_sub_no, pred_sub_logits, targets, alpha=0.5, gamma=2, neg_inds_beta=0, hard_neg_inds_beta=3):
        #   100N, 29
        # out_obj_prob = pred_sub_no.flatten(0, 1) * pred_sub_logits.softmax(-1)[..., 0].unsqueeze(1).repeat(1, 100, 1).flatten(0, 1)
        out_obj_prob = pred_sub_no.flatten(0, 1)
        #   tgtN,
        tgt_obj_labels = torch.cat([v['match_sub_no'] for v in targets])

        if self.loss_type=='ce_loss':
            cost_obj_class = -out_obj_prob[:, tgt_obj_labels]#1-out_obj_prob[:, tgt_obj_labels]
        elif self.loss_type=='focal_loss':
            neg_cost = (1 - alpha) * (out_obj_prob ** gamma) * (-(1 - out_obj_prob + 1e-8).log())
            pos_cost = alpha * ((1 - out_obj_prob) ** gamma) * (-(out_obj_prob + 1e-8).log())
            cost_obj_class = pos_cost[:, tgt_obj_labels] - neg_cost[:, tgt_obj_labels]            
        C = 1 * cost_obj_class
        return C

    def _get_src_permutation_idx(self, indices):
        #从indices tuple中取得src的batch index和对应的match index
        batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
        src_idx = torch.cat([src for (src, _) in indices])
        return batch_idx, src_idx