# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Xingyi Zhou
# ------------------------------------------------------------------------------

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import torch
from torch import nn

from .detr import DETR
from .module.modules import MLP
import math

from .backbones.backbone import build_backbone
from .matcher.matcherhoi import build_matcher

from .postprocess.postprocesshoi import PostProcessHOI

import numpy as np
import torch.nn.functional as F
from .Semantic.semanticgraph import SemanticGraph

from .postprocess.crossmodal_calibration import CrossModalCalibration

BN_MOMENTUM = 0.1


def build(args):
    device = torch.device(args.device)
    '''
        搭建模型
    '''
    #   如果为coco数据集则有91个类（实际只有80个）
    num_classes = 20 if args.dataset_file != 'coco' else 91
    #   coco扩大版250类
    if args.dataset_file == "coco_panoptic":
        num_classes = 250
    #   backbone: resnet + pos_embedding
    #   N, 3, 512, 512 -> N, 2048, 16, 16
    backbone = build_backbone(args)
    #   encoder + decoder
    #   N, 2048, 16, 16 -> N, 100, 2048
    if args.stochastic_context_transformer:
        print('Building stochastic context transformer...')
        from .necks.StochasticContextTransformer import build_transformer
        transformer = build_transformer(args)
    elif args.IterativeDETRHOI:
        print('Building Iterative transformer...')
        from .necks.IterativeTransformer import build_transformer
        transformer = build_transformer(args)
    elif args.DETRHOIhm:
        from .necks.transformer import build_transformer
        transformer = build_transformer(args)
    #   TODO: 1修改网络结构
    model = OCN(
        backbone,
        transformer,
        num_obj_classes = args.num_obj_classes,    
        num_verb_classes = args.num_verb_classes,   #   多了这个
        num_queries = args.num_queries,
        aux_loss = args.aux_loss,)
    #######################################################################
    #
    #       计算loss
    #
    #####################################################################
    #   将预测结果和GT进行匹配的算法（这里使用的是匈牙利算法）
    #   N, 100, xxx
    #   TODO：2修改匹配算法
    matcher = build_matcher(args)
    weight_dict = get_weight_dict(args)
    losses = get_losses(args)

    if args.HOICVAE:
        from .losses.SetCriterionHOICVAE import SetCriterionHOI
        criterion = SetCriterionHOI(args, matcher=matcher, losses=losses, weight_dict=weight_dict)
    elif args.SemanticDETRHOI: 
        from .losses.SetCriterionHOISemantic import SetCriterionHOI
        criterion = SetCriterionHOI(args, matcher=matcher, losses=losses, weight_dict=weight_dict)
    else:
        from .losses.setcriterionhoi import SetCriterionHOI
        criterion = SetCriterionHOI(args, matcher=matcher, losses=losses, weight_dict=weight_dict)
    criterion.to(device)
    postprocessors = {'hoi': PostProcessHOI(args)}

    return model, criterion, postprocessors


class OCN(DETR):
    def __init__(self, backbone, transformer, num_obj_classes, num_verb_classes, num_queries,  dataset = 'hico', aux_loss=False):
        super().__init__(backbone, transformer, num_obj_classes, num_queries, num_verb_classes, aux_loss)
        self.dataset = dataset

    def build_head(self, num_obj_classes, num_verb_classes):
        hidden_dim = self.hidden_dim
        dataset = self.dataset
        self.obj_class_embed = nn.Linear(hidden_dim, num_obj_classes + 1)
        self.obj_bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
        self.verb_class_embed = nn.Linear(hidden_dim, num_verb_classes)
        self.sub_bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)

        prior_prob = 0.01
        bias_value = -math.log((1 - prior_prob) / prior_prob)
        self.obj_class_embed.bias.data = torch.ones(num_obj_classes+1) * bias_value
        self.verb_class_embed.bias.data = torch.ones(num_verb_classes) * bias_value
        nn.init.constant_(self.obj_bbox_embed.layers[-1].weight.data, 0)
        nn.init.constant_(self.obj_bbox_embed.layers[-1].bias.data, 0)
        nn.init.constant_(self.sub_bbox_embed.layers[-1].weight.data, 0)
        nn.init.constant_(self.sub_bbox_embed.layers[-1].bias.data, 0)
        
      
        # obj_verb_co with smoothing v2 (81 is uniform)
        if dataset == 'hico':
            # Laplacian Smoothing (Also dubbed Additive Smoothing)
            obj_verb_co = np.load('datasets/priors/obj_verb_cooccurrence.npz')['cond_prob_co_matrices']
            obj_verb_co = torch.cat((torch.tensor(obj_verb_co).float(), torch.zeros((1, num_verb_classes))), dim = 0)
            obj_verb_co = obj_verb_co + 0.1/obj_verb_co.shape[1]
            # obj_verb_co = torch.ones(obj_verb_co.shape)  # beta = infinity
            # obj_verb_co = torch.cat((torch.tensor(obj_verb_co).float(), torch.ones((1, num_verb_classes))), dim = 0)
                # obj_verb_co = obj_verb_co / np.expand_dims(obj_verb_co.sum(axis=1), axis = 1)
            obj_verb_co = obj_verb_co / obj_verb_co.sum(dim=1).unsqueeze(dim = 1)
            self.register_buffer('obj_verb_co', obj_verb_co)
            print('obj_verb_co has nan ? ' + str(np.isnan(obj_verb_co).sum()))
            # Jelinek-Mercer Method
            # obj_verb_co = np.load('datasets/priors/obj_verb_cooccurrence.npz')['cond_prob_co_matrices']
            # obj_verb_co = torch.cat((torch.tensor(obj_verb_co).float(), torch.zeros((1, num_verb_classes))), dim = 0)
            # obj_verb_co = obj_verb_co * (1 - 0.7) + 0.7 / obj_verb_co.shape[1]
            # obj_verb_co = obj_verb_co / obj_verb_co.sum(dim=1).unsqueeze(dim = 1)
            # self.register_buffer('obj_verb_co', obj_verb_co)
            # print('obj_verb_co has nan ? ' + str(np.isnan(obj_verb_co).sum()))

            verb_verb_co = np.load('datasets/priors/verb_verb_cooccurrence.npz')['cond_prob_co_matrices']  # 实际上用的是Joint Probability
            verb_verb_co = verb_verb_co / np.expand_dims(verb_verb_co.sum(axis=1), axis = 1)
            verb_verb_co[np.isnan(verb_verb_co)] = 0  # add to prevent nan
            self.register_buffer('verb_verb_co', torch.tensor(verb_verb_co).float())
            print('verb_verb_co has nan ? ' + str(np.isnan(verb_verb_co).sum()))
            print('verb_verb_co sum: ' + str(verb_verb_co.sum()))

            # Prerained Model embedding
            verb_word_embedding = torch.tensor(np.load('datasets/word_embedding/hico_verb_glove-wiki-gigaword-300.npz')['embedding_list']) # [:,None]
            # verb_word_embedding = torch.tensor(np.load('datasets/word_embedding/hico_verb_glove-wiki-gigaword-50.npz')['embedding_list']) # [:,None]
            # verb_word_embedding = torch.tensor(np.load('datasets/word_embedding/hico_verb_fasttext-wiki-news-subwords-300.npz')['embedding_list'])# [:,None]
            # verb_word_embedding = torch.tensor(np.load('datasets/word_embedding/hico_verb_word2vec-google-news-300.npz')['embedding_list'])# [:,None]
            verb_word_embedding = norm_tensor(verb_word_embedding)
            self.register_buffer('verb_word_embedding', verb_word_embedding)
            
        elif dataset == 'vcoco':
            obj_verb_co = np.load('datasets/priors/obj_verb_cooccurrence_vcoco.npz')['joint_prob_co_matrices']
            print('obj_verb_co has nan ? ' + str(np.isnan(obj_verb_co).sum()))
            obj_verb_co[np.isnan(obj_verb_co)] = 0.1/obj_verb_co.shape[1]  # Eliminate nan entries in the matrix
            obj_verb_co = torch.cat((torch.tensor(obj_verb_co).float(), torch.zeros((1, num_verb_classes))), dim = 0)
            obj_verb_co = obj_verb_co + 0.1/obj_verb_co.shape[1] 
            obj_verb_co = obj_verb_co / obj_verb_co.sum(dim=1).unsqueeze(dim = 1)
            self.register_buffer('obj_verb_co', obj_verb_co)
            print('obj_verb_co has nan ? ' + str(np.isnan(obj_verb_co).sum()))

            verb_verb_co = np.load('datasets/priors/verb_verb_cooccurrence_vcoco.npz')['cond_prob_co_matrices']  # 实际上用的是Joint Probability
            verb_verb_co = verb_verb_co / np.expand_dims(verb_verb_co.sum(axis=1), axis = 1)
            verb_verb_co[np.isnan(verb_verb_co)] = 0  # add to prevent nan
            self.register_buffer('verb_verb_co', torch.tensor(verb_verb_co).float())
            print('verb_verb_co sum: ' + str(verb_verb_co.sum()))

            verb_word_embedding = torch.tensor(np.load('datasets/word_embedding/vcoco_verb_glove-wiki-gigaword-300.npz')['embedding_list'])# [:,None]
            # verb_word_embedding = torch.tensor(np.load('datasets/word_embedding/vcoco_verb_fasttext-wiki-news-subwords-300.npz')['embedding_list'])# [:,None]
            # verb_word_embedding = torch.tensor(np.load('datasets/word_embedding/vcoco_verb_word2vec-google-news-300.npz')['embedding_list'])# [:,None]
            verb_word_embedding = norm_tensor(verb_word_embedding)
            self.register_buffer('verb_word_embedding', verb_word_embedding)

        # Semantic Reasoning
        self.semantic_graph = SemanticGraph(300, 256, 1, attention_type='embedded_dot_pro')
        # self.semantic_graph = SemanticGraph(117, 256, 1, attention_type='MLP_GNN')
        # self.semantic_graph = SemanticGraph(300, 256, 1, attention_type='multihead_transformer', head_num = 2)
        # self.semantic_obj_graph = SemanticGraph(300, 256, 1, attention_type='embedded_dot_pro')
        # self.semantic_graph = SemanticGraph(117, 256, 1, attention_type='MLP')
        

        # Cross modality operation
        # self.cross_modality_graph = CrossModalityGraph(hidden_dim, hidden_dim, 1, attention_type='multihead_transformer')
        # self.cross_modality_graph = CrossModalityGraph(hidden_dim, hidden_dim, 1, attention_type='embedded_dot_pro')
        # self.semantic_gate1 = nn.Linear(hidden_dim, num_verb_classes)
        self.semantic_gate2 = nn.Linear(hidden_dim, hidden_dim)
        self.hs_gate = nn.Linear(hidden_dim, hidden_dim)
        # self.semantic_gate2_1 = nn.Linear(hidden_dim, hidden_dim//16)
        # self.semantic_gate2_2 = nn.Linear(hidden_dim//16, hidden_dim)
        self.cross_modal_calibration = CrossModalCalibration(hidden_dim, nlayers = 1)
        self.fusion_1 = nn.Linear(hidden_dim, hidden_dim)
        self.fusion_2 = nn.Linear(hidden_dim, hidden_dim)


    def head_forward(self, outputs):
        x = outputs['out_query']
        #   hs   N, 100, 2048->
        #   outputs_obj_class N, 100, 80
        outputs_obj_class = self.obj_class_embed(x)
        #   outputs_verb_class N, 100, 127
        outputs_verb_class = self.verb_class_embed(x)
        #   outputs_obj_coord N, 100, 4 
        outputs_obj_coord = self.obj_bbox_embed(x).sigmoid()
        #   outputs_sub_coord N, 100, 4 
        outputs_sub_coord = self.sub_bbox_embed(x).sigmoid()
        
        out = {'pred_obj_logits': outputs_obj_class[-1], 
               'pred_verb_logits': outputs_verb_class[-1],
               'pred_sub_boxes': outputs_sub_coord[-1], 
               'pred_obj_boxes': outputs_obj_coord[-1],

               'pred_obj_logits_cascade': outputs_obj_class,
               'pred_verb_logits_cascade': outputs_verb_class,
               'pred_sub_boxes_cascade': outputs_sub_coord,
               'pred_obj_boxes_cascade': outputs_obj_coord,
               
               }
        if self.aux_loss and self.training:
            out['aux_outputs'] = self._set_aux_loss(outputs_obj_class, outputs_verb_class, outputs_sub_coord, outputs_obj_coord)
        return out


    def forward(self, samples: NestedTensor, **kwargs):
        inf_time = time.time()
        if not isinstance(samples, NestedTensor):
            samples = nested_tensor_from_tensor_list(samples)
        features, pos = self.backbone(samples)
        # inf_time_list[0].append(time.time()-inf_time)
        # features: [features_tensor from layer4,]  
        # pos: [pos embedding for features from layer4, ]  
        #      layer4 pos embedding shape like: [2, 256, 18, 25]

        src, mask = features[-1].decompose()
        assert mask is not None
        # self.transformer return decoder info and memory
        # hs: tensor [6, 2, 100, 256]  6 is the #decoder_layers

        # New semantic implementation
        # semantic = self.semantic_graph(self.verb_word_embedding, self.verb_verb_co)
        semantic = self.semantic_graph(self.verb_word_embedding)

        # inf_time_list[1].append(time.time()-inf_time)
        # Save semantic embedding
        # np.savez_compressed('GloVe_semantic_embeddings.npz', 
        #                      semantic = np.array(semantic.cpu()))
        
        hs = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos[-1])[0]
        # self.transformer return decoder info and memory
        # hs: tensor [6, 2, 100, 256]  6 is the #decoder_layers

        outputs_obj_class = self.obj_class_embed(hs)
        outputs_sub_coord = self.sub_bbox_embed(hs).sigmoid()
        outputs_obj_coord = self.obj_bbox_embed(hs).sigmoid()
        # inf_time_list[3].append(time.time()-inf_time)

        
        # cross_enhance
        # Attention aggregation
        # semantic_aug = self.cross_modality_graph(hs, semantic)
        # Statistical Prior Aggregation
        outputs_obj_81 = outputs_obj_class.argmax(dim =-1).unsqueeze(-1).expand(-1,-1,-1,self.num_verb_classes) # [6,2,100]
        obj_verb_co = self.obj_verb_co.expand(outputs_obj_81.shape[:-2]+(-1,-1))
        outputs_obj_co = torch.gather(obj_verb_co, dim =2, index = outputs_obj_81) # [6, 2, 100, 117]
        semantic_aug = torch.einsum('abcd,de->abce', outputs_obj_co, semantic)
        cross_hs, cross_semantic_aug = self.cross_modal_calibration(hs, semantic_aug)
        hs_aug = count_fusion(self.fusion_1(cross_hs), self.fusion_2(cross_semantic_aug))


        # Verb Model
            # vanilla
        outputs_verb_class = self.verb_class_embed(hs_aug)


        # Original
        out = {'pred_obj_logits': outputs_obj_class[-1], 'pred_verb_logits': outputs_verb_class[-1],
               'pred_sub_boxes': outputs_sub_coord[-1], 'pred_obj_boxes': outputs_obj_coord[-1], 
               'semantic':semantic, 'verb_verb_co':self.verb_verb_co,}# 'joint_verb_verb_co':self.joint_verb_verb_co,} # 'semantic_low':semantic_low}
        if self.aux_loss and self.training: 
            # Using aux loss means that you will add loss to every intermidiate layer.
            out['aux_outputs'] = self._set_aux_loss(outputs_obj_class, outputs_verb_class,
                                                    outputs_sub_coord, outputs_obj_coord)
        return out


    @torch.jit.unused
    def _set_aux_loss(self, outputs_obj_class, outputs_verb_class, outputs_sub_coord, outputs_obj_coord):
        # this is a workaround to make torchscript happy, as torchscript
        # doesn't support dictionary with non-homogeneous values, such
        # as a dict having both a Tensor and a list.
        return [{'pred_obj_logits':  a, 
                 'pred_verb_logits': b, 
                 'pred_sub_boxes':   c, 
                 'pred_obj_boxes':   d}
                for a, b, c, d in zip(outputs_obj_class[:-1], outputs_verb_class[:-1],
                                      outputs_sub_coord[:-1], outputs_obj_coord[:-1])]


def norm_tensor(tensor):
    norm = torch.norm(tensor, p = 'fro', dim = -1).unsqueeze(dim = -1).expand_as(tensor)
    return tensor/norm


def count_fusion(x, y):
    return F.relu(x + y) - (x - y)*(x - y)

def get_weight_dict(args):
    weight_dict = {}
    weight_dict['loss_obj_ce'] = args.obj_loss_coef
    weight_dict['loss_verb_ce'] = args.verb_loss_coef
    
    weight_dict['loss_sub_bbox'] = args.bbox_loss_coef
    weight_dict['loss_obj_bbox'] = args.bbox_loss_coef
    weight_dict['loss_sub_giou'] = args.giou_loss_coef
    weight_dict['loss_obj_giou'] = args.giou_loss_coef
    
    weight_dict['loss_entropy_bound'] = args.entropy_bound_coef
    weight_dict['loss_kl_divergence'] = args.kl_divergence_coef
    weight_dict['loss_verb_gt_recon'] = args.verb_gt_recon_coef
    weight_dict['loss_ranking_verbs'] = args.ranking_verb_coef
    weight_dict['loss_verb_hm'] = args.verb_hm_coef
    weight_dict['loss_semantic_similar'] = args.semantic_similar_coef
    weight_dict['loss_verb_threshold'] = args.verb_threshold_coef
    
    
    exponential_dict = ['loss_sub_bbox', 'loss_obj_bbox', 'loss_sub_giou', 'loss_obj_giou', 'loss_obj_ce', 'loss_verb_ce']
    

    if args.aux_loss:
        if not args.exponential_loss:
            aux_weight_dict = {}
            for i in range(args.dec_layers - 1):
                aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})
            weight_dict.update(aux_weight_dict)
        else:
            aux_weight_dict = {}
            for i in range(args.dec_layers - 1):
                
                aux_weight_dict.update({k + f'_{i}': math.pow(args.exponential_hyper, args.dec_layers-1-i)*v \
                    if k in exponential_dict else v for k, v in weight_dict.items()})
            
            weight_dict.update(aux_weight_dict)
    print('weight_dict' + str(weight_dict))        

    return weight_dict

def get_losses(args):
    losses = ['obj_labels', 'verb_labels', 'sub_obj_boxes', 'obj_cardinality']
    if args.entropy_bound:
        losses.append('entropy_bound')
    if args.kl_divergence:
        losses.append('kl_divergence')
    if args.verb_gt_recon:
        losses.append('loss_gt_verb_recon')
    if args.ranking_verb:
        losses.append('ranking_verb')
    if args.no_verb_bce_focal:
        losses.remove('verb_labels')
    if args.verb_hm:
        losses.append('verb_hm')
    if args.semantic_similar:
        losses.append('semantic_similar')
    if args.verb_threshold:
        losses.append('verb_threshold')
    if args.frozen_vision:
        losses.remove('sub_obj_boxes')


    return losses