import torch
from torch import nn
import torch.nn.functional as F

from util.box_ops import box_cxcywh_to_xyxy
import numpy as np

class PostProcessHOI(nn.Module):
    def __init__(self, args, mode='train', correct_mat = None, trans_type='detr'):
        super().__init__()
        self.args = args
        self.use_matching = args.use_matching
        self.mode = mode
        self.hoi_thres = args.hoi_thres
        self.subject_category_id = args.subject_category_id 
        self.trans_type = trans_type
        self.max_hois = args.max_hois
        self.HO_scale = args.HO_scale
        self.query_thres = args.query_thres
        self.use_nms_filter = args.use_nms_filter
        self.nms_thresh = args.nms_thresh
        self.nms_alpha = args.nms_alpha
        self.nms_beta = args.nms_beta
        
        if mode=='test':
            correct_mat = np.concatenate((correct_mat, np.ones((correct_mat.shape[0], 1))), axis=1)
            self.register_buffer('correct_mat', torch.from_numpy(correct_mat))
            print('using use_nms_filter: ', self.use_nms_filter)

    @torch.no_grad()
    def forward(self, outputs, target_sizes, thres=0.):

        sub_boxes, obj_boxes, obj_scores, obj_labels, verb_scores, matching_scores  = \
            self.get_outputs(outputs, target_sizes)
    
        results = []
        for per_img in range(len(obj_scores)):
            os, ol, vs, sb, ob =  \
                obj_scores[per_img],  obj_labels[per_img], verb_scores[per_img], sub_boxes[per_img], obj_boxes[per_img]
            sl = torch.full_like(ol, self.subject_category_id)#0
            #   200, 
            l = torch.cat((sl, ol))
            #   200, 4
            b = torch.cat((sb, ob))        
            #   100, 29
            if self.use_matching:
                ms = matching_scores[per_img]
                s = torch.cat((ms, os))
                #hoi_scores = vs * (os.unsqueeze(1) ** 2.8) * (ms.unsqueeze(1) ** 2.8)#   verb_scores x obj_scores x hm_scores
                hoi_scores = vs
                
            else:#Ture
                ss = torch.full_like(os, 1.)
                s = torch.cat((ss, os))
                hoi_scores = vs * os.unsqueeze(1) 
                # if self.args.dataset_file=='vcoco':
                #     hoi_scores = vs
                # elif self.args.dataset_file=='hico':
                #     hoi_scores = vs * os.unsqueeze(1) 

                
            ids = torch.arange(b.shape[0])#200, 
            
            if self.mode=='train':
                #   ['boxes', 'labels', 'score', 'sub_ids', 
                #   'obj_ids', 'verb_scores']
                results.append({'boxes':  np.clip(b.to('cpu'), 0, 1333),
                                'labels': l.to('cpu'),  
                                'score':  s.to('cpu')})
                results[-1].update({'sub_ids':     ids[:ids.shape[0] // 2],
                                    'obj_ids':     ids[ids.shape[0] // 2:],
                                    'verb_scores': hoi_scores.to('cpu'), })
#############################################  PNMS ###########################################################
            elif self.mode=='test':
                bboxes = [{'bbox':          np.clip(bbox, 0, 1333), 
                           'category_id':   label, 
                           'score':         score} 
                            for bbox, label, score in zip(b.to('cpu').numpy(), 
                                                          l.to('cpu').numpy(), 
                                                          s.to('cpu').numpy())]
                object_labels = ol.view(-1, 1).expand(-1, hoi_scores.shape[1])
                verb_labels = torch.arange(hoi_scores.shape[1], device=self.correct_mat.device).view(1, -1).expand(hoi_scores.shape[0], -1).to('cpu').numpy()
                subject_ids = np.tile(ids[:ids.shape[0] // 2].to('cpu').numpy(), (hoi_scores.shape[1], 1)).T
                object_ids = np.tile(ids[ids.shape[0] // 2:].to('cpu').numpy(), (hoi_scores.shape[1], 1)).T
                #   2900, 
                hoi_scores = hoi_scores.to('cpu').numpy().ravel()
                verb_labels = verb_labels.ravel()
                subject_ids = subject_ids.ravel()
                object_ids = object_ids.ravel()
                
                #   语义级别过滤矩阵
                masks = self.correct_mat[verb_labels.reshape(-1), 
                                         object_labels.reshape(-1)].view(hoi_scores.shape).to('cpu').numpy()
                hoi_scores *= masks
                hoi_scores[hoi_scores<self.query_thres] = 0.
                new_hoi_scores = hoi_scores[hoi_scores>0.] 
                verb_labels = verb_labels[hoi_scores>0.]
                object_ids =  object_ids[hoi_scores>0.]
                subject_ids = subject_ids[hoi_scores>0.]
                
                
                hois = [{'subject_id':  subject_id, 
                         'object_id':   object_id, 
                         'category_id': category_id, 
                         'score': score} for
                        subject_id, object_id, category_id, score in zip(subject_ids, object_ids, verb_labels, new_hoi_scores)]
                # hois.sort(key=lambda k: (k.get('score', 0)), reverse=True)
                # hois = hois[:self.max_hois]
                current_result = {'predictions': bboxes, 
                                  'hoi_prediction': hois}
                if self.use_nms_filter:
                    current_result = self.triplet_nms_filter(current_result)
                current_result['hoi_prediction'].sort(key=lambda k: (k.get('score', 0)), reverse=True)
                current_result['hoi_prediction'] = current_result['hoi_prediction'][:self.max_hois]
                results.append(current_result)
#############################################  PNMS ###########################################################
        return results

    @torch.no_grad()
    def get_outputs(self, outputs, target_sizes):
        assert target_sizes.shape[1] == 2
        if self.use_matching:
            matching_scores, matching_labels, topk_boxes = self.get_matching_scores(outputs['pred_matching_logits'], target_sizes)
            sub_boxes = self.get_sub_bbox(outputs['pred_sub_boxes'], target_sizes, topk_boxes)
            obj_scores, obj_labels, topk_boxes = self.get_obj_cls(outputs['pred_obj_logits'], target_sizes)
            obj_boxes = self.get_obj_bbox(outputs['pred_obj_boxes'], target_sizes, topk_boxes)
            verb_scores = self.get_verb_score(outputs['pred_verb_logits'], target_sizes, topk_boxes)
        else:
            matching_scores = None
            obj_scores, obj_labels, topk_boxes = self.get_obj_cls(outputs['pred_obj_logits'], target_sizes)
            obj_boxes = self.get_obj_bbox(outputs['pred_obj_boxes'], target_sizes, topk_boxes)
            sub_boxes = self.get_sub_bbox(outputs['pred_sub_boxes'], target_sizes, topk_boxes)
            verb_scores = self.get_verb_score(outputs['pred_verb_logits'], target_sizes, topk_boxes)

        return sub_boxes, obj_boxes, obj_scores, obj_labels, verb_scores, matching_scores
    

    def get_obj_cls(self, pred_obj_logits, target_sizes):
        out_obj_logits = pred_obj_logits
        #out_obj_logits = outputs['pred_obj_logits_cascade'].permute(1,0,2,3).flatten(1,2)
        assert len(out_obj_logits) == len(target_sizes)
        obj_prob = F.softmax(out_obj_logits, -1)
        
        N, num_query, num_class = obj_prob.shape
        if self.trans_type=='detr':
            obj_scores, obj_labels = obj_prob[..., :-1].max(-1)
            topk_boxes = None
        elif self.trans_type=='ddetr':
            # top 100
            topk_values, topk_indexes = torch.topk(obj_prob.flatten(1,2), num_query, dim=1)
            obj_scores = topk_values
            topk_boxes = topk_indexes // num_class
            obj_labels = topk_indexes % num_class               
        # if self.args.dataset_file=='vcoco':
        #     obj_scores[obj_labels==0]=0.

        return obj_scores, obj_labels, topk_boxes

    def get_verb_score(self, pred_verb_logits, target_sizes, topk_boxes, topK=20):
        out_verb_logits = pred_verb_logits
        #out_verb_logits = outputs['pred_verb_logits_cascade'].permute(1,0,2,3).flatten(1,2)
        verb_scores = out_verb_logits.sigmoid()
        N, num_query, num_class = verb_scores.shape
        #   100, 29
        # if self.trans_type=='detr':
        #     _, verb_index  = torch.topk(-verb_scores, num_class - topK, dim=-1)
        #     verb_scores = verb_scores.scatter(-1, verb_index, 0)
        # elif self.trans_type=='ddetr':
        #     verb_scores = torch.gather(verb_scores, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, num_class))
        return verb_scores

    def get_obj_bbox(self, pred_obj_boxes, target_sizes, topk_boxes):
        out_obj_boxes = pred_obj_boxes
        #out_obj_boxes = pred_obj_boxes_cascade.permute(1,0,2,3).flatten(1,2)
        if self.trans_type=='ddetr':
            out_obj_boxes = torch.gather(out_obj_boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4))
        img_h, img_w = target_sizes.unbind(1)
        scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(out_obj_boxes.device)
        obj_boxes = box_cxcywh_to_xyxy(out_obj_boxes)
        obj_boxes = obj_boxes * scale_fct[:, None, :]
        return obj_boxes

    def get_sub_bbox(self, pred_sub_boxes, target_sizes, topk_boxes):
        out_sub_boxes = pred_sub_boxes
        #out_sub_boxes = outputs['pred_sub_boxes_cascade'].permute(1,0,2,3).flatten(1,2)
        if self.trans_type=='ddetr':
            out_sub_boxes = torch.gather(out_sub_boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4))
        img_h, img_w = target_sizes.unbind(1)
        scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(out_sub_boxes.device)
        sub_boxes = box_cxcywh_to_xyxy(out_sub_boxes)
        sub_boxes = sub_boxes * scale_fct[:, None, :]  
        return sub_boxes

    def get_matching_scores(self, pred_matching_logits, target_sizes):
        out_matching_logits = pred_matching_logits
        #out_matching_logits = pred_matching_logits_cascade.permute(1,0,2,3).flatten(1,2)
        matching_prob = F.softmax(out_matching_logits, -1)
        matching_prob = matching_prob[..., :-1]
        if self.trans_type=='detr':
            matching_scores, matching_labels = matching_prob.max(-1)
            topk_boxes = None
        elif self.trans_type=='ddetr':
            topk_values, topk_indexes = torch.topk(matching_prob.view(out_matching_logits.shape[0], -1), out_matching_logits.shape[1], dim=1)
            matching_scores = topk_values
            topk_boxes = topk_indexes // out_matching_logits.shape[2]
            matching_labels = topk_indexes % out_matching_logits.shape[2]  
        matching_scores[matching_labels==0]= 1-matching_scores[matching_labels==0] 
        return matching_scores, matching_labels, topk_boxes



#############################################  PNMS ###########################################################
    @torch.no_grad()
    def triplet_nms_filter(self, preds):
        pred_od = preds['predictions']
        pred_hois = preds['hoi_prediction']
        all_triplets = {}
        for index, pred_hoi in enumerate(pred_hois):
            triplet = str(pred_od[pred_hoi['subject_id']]['category_id']) + '_' + \
                      str(pred_od[pred_hoi['object_id']]['category_id']) + '_' + \
                      str(pred_hoi['category_id'])

            if triplet not in all_triplets:
                all_triplets[triplet] = {'subs':[], 
                                         'objs':[], 
                                         'scores':[], 
                                         'indexes':[]}
            all_triplets[triplet]['subs'].append(pred_od[pred_hoi['subject_id']]['bbox'])
            all_triplets[triplet]['objs'].append(pred_od[pred_hoi['object_id']]['bbox'])
            all_triplets[triplet]['scores'].append(pred_hoi['score'])
            all_triplets[triplet]['indexes'].append(index)

        all_keep_inds = []
        for triplet, values in all_triplets.items():
            subs, objs, scores = values['subs'], values['objs'], values['scores']
            keep_inds = self.pairwise_nms(np.array(subs), np.array(objs), np.array(scores))
            keep_inds = list(np.array(values['indexes'])[keep_inds])
            all_keep_inds.extend(keep_inds)

        preds_filtered = {
            'predictions': pred_od,
            'hoi_prediction': list(np.array(preds['hoi_prediction'])[all_keep_inds])}
        return preds_filtered
    
    @torch.no_grad()
    def pairwise_nms(self, subs, objs, scores):
        sx1, sy1, sx2, sy2 = subs[:, 0], subs[:, 1], subs[:, 2], subs[:, 3]
        ox1, oy1, ox2, oy2 = objs[:, 0], objs[:, 1], objs[:, 2], objs[:, 3]

        sub_areas = (sx2 - sx1 + 1) * (sy2 - sy1 + 1)
        obj_areas = (ox2 - ox1 + 1) * (oy2 - oy1 + 1)
        #   TODO: debug
        order = scores.argsort()[::-1]

        keep_inds = []
        while order.size > 0:
            i = order[0]
            keep_inds.append(i)

            sxx1 = np.maximum(sx1[i], sx1[order[1:]])
            syy1 = np.maximum(sy1[i], sy1[order[1:]])
            sxx2 = np.minimum(sx2[i], sx2[order[1:]])
            syy2 = np.minimum(sy2[i], sy2[order[1:]])

            sw = np.maximum(0.0, sxx2 - sxx1 + 1)
            sh = np.maximum(0.0, syy2 - syy1 + 1)
            sub_inter = sw * sh
            sub_union = sub_areas[i] + sub_areas[order[1:]] - sub_inter

            oxx1 = np.maximum(ox1[i], ox1[order[1:]])
            oyy1 = np.maximum(oy1[i], oy1[order[1:]])
            oxx2 = np.minimum(ox2[i], ox2[order[1:]])
            oyy2 = np.minimum(oy2[i], oy2[order[1:]])

            ow = np.maximum(0.0, oxx2 - oxx1 + 1)
            oh = np.maximum(0.0, oyy2 - oyy1 + 1)
            obj_inter = ow * oh
            obj_union = obj_areas[i] + obj_areas[order[1:]] - obj_inter

            ovr = np.power(sub_inter/sub_union, self.nms_alpha) * np.power(obj_inter / obj_union, self.nms_beta)
            inds = np.where(ovr <= self.nms_thresh)[0]

            order = order[inds + 1]
        return keep_inds


#############################################  PNMS ###########################################################
