
import torch
from torch import nn
from util.misc import NestedTensor
import math
from .module.modules import MLP
import numpy as np

from .cdn import CDN, get_weight_dict, get_losses


from .backbones.backbone import build_backbone
from .necks.my_transformer import build_transformer
from .matcher.my_matcherhoi import build_matcher
from .losses.my_setcriterionhoi import SetCriterionHOI
from .postprocess.my_postprocesshoi import PostProcessHOI



def build(args):
    args.use_matching = False
    device = torch.device(args.device)
    backbone = build_backbone(args)
    transformer = build_transformer(args)
    model = MYNET(
        backbone,
        transformer,
        num_obj_classes=args.num_obj_classes,
        num_verb_classes=args.num_verb_classes,
        num_queries=args.num_queries,
        aux_loss=args.aux_loss,
        args=args
    )
    matcher = build_matcher(args)
    weight_dict = get_weight_dict(args)
    losses = get_losses(args)
    
    criterion = SetCriterionHOI(args=args, matcher=matcher, weight_dict=weight_dict, losses=losses)
    criterion.to(device)
    
    postprocessors = {'hoi': PostProcessHOI(args)}

    return model, criterion, postprocessors


class MYNET(CDN):
    def __init__(self, backbone, transformer, num_obj_classes, num_verb_classes, num_queries, aux_loss=False, args=None):
        super(MYNET, self).__init__(backbone, transformer, num_obj_classes, num_verb_classes, num_queries, aux_loss, args)
        
        #self.gamma = nn.Parameter(torch.zeros(1, 3), requires_grad=True) 

        # self.obj_class_embed_1 = nn.Linear(hidden_dim, num_obj_classes + 1)
        # self.matching_embed_1 = nn.Linear(hidden_dim, 1)
        
        #self.query_embed_hm = nn.Embedding(num_queries, self.hidden_dim)
        #self.query_embed_interaction = nn.Embedding(num_queries, self.hidden_dim)
        # self.rel_bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
        # nn.init.constant_(self.rel_bbox_embed.layers[-1].weight.data, 0)
        # nn.init.constant_(self.rel_bbox_embed.layers[-1].bias.data, 0)
        self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))

    def build_head(self, num_classes, num_verb_classes):
        hidden_dim = self.hidden_dim
        self.obj_class_embed = nn.Linear(hidden_dim, num_classes + 1)
        self.sub_class_embed = nn.Linear(hidden_dim, num_classes + 1)
        self.obj_bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
        self.verb_class_embed = nn.Linear(hidden_dim, num_verb_classes)
        self.sub_bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
        
        self.sub_No = nn.Linear(hidden_dim, hidden_dim)
        self.obj_No = nn.Linear(hidden_dim, hidden_dim)
        if self.use_matching:
            self.matching_embed = nn.Linear(hidden_dim, 2) 
                   
        # prior_prob = 0.01
        # bias_value = -math.log((1 - prior_prob) / prior_prob)
        # self.obj_class_embed.bias.data = torch.ones(num_classes+1) * bias_value
        # self.verb_class_embed.bias.data = torch.ones(num_verb_classes) * bias_value

    def neck_forward(self, srcs, masks, poses, query_embed):
        outputs = self.transformer(srcs, masks, query_embed, poses)
        return outputs

    def head_forward(self, outputs):
        with torch.no_grad():
            object_decoder_out = outputs['out_query']
            outputs_obj_coord = self.obj_bbox_embed(object_decoder_out).sigmoid()    
            outputs_obj_class = self.obj_class_embed(object_decoder_out)
            
        with torch.no_grad():
            human_decoder_out = outputs['human_decoder_out']
            outputs_sub_coord = self.sub_bbox_embed(human_decoder_out).sigmoid() 
            outputs_sub_class = self.sub_class_embed(human_decoder_out)
            if self.use_matching:
                outputs_matching = self.matching_embed(human_decoder_out)

        interaction_decoder_out = outputs['interaction_decoder_out']        
        outputs_verb_class = self.verb_class_embed(interaction_decoder_out)
        outputs_obj_No = self.obj_No(interaction_decoder_out)
        outputs_sub_No = self.sub_No(interaction_decoder_out)
        pre_outputs_obj_No = torch.zeros((outputs_obj_No.shape[0],outputs_obj_No.shape[1], outputs_obj_No.shape[2], self.num_queries),
                                         device=outputs_obj_No.device, dtype=torch.float)
        pre_outputs_sub_No = torch.zeros((outputs_sub_No.shape[0],outputs_sub_No.shape[1], outputs_sub_No.shape[2], self.num_queries),
                                         device=outputs_sub_No.device, dtype=torch.float)
        pre_outputs_obj_No = 1 - torch.abs((outputs_obj_No.unsqueeze(3).repeat(1,1,1,100,1) - object_decoder_out.unsqueeze(2)).mean(-1)).tanh()
        pre_outputs_sub_No = 1 - torch.abs((outputs_sub_No.unsqueeze(3).repeat(1,1,1,100,1) - object_decoder_out.unsqueeze(2)).mean(-1)).tanh()
        
        out = {'pred_obj_logits': outputs_obj_class[-1], 
               'pred_obj_boxes': outputs_obj_coord[-1],
               
               'pred_sub_logits': outputs_sub_class[-1], 
               'pred_sub_boxes': outputs_sub_coord[-1], 
               
                'pred_verb_logits': outputs_verb_class[-1],
                'pred_obj_no': pre_outputs_obj_No[-1],
                'pred_sub_no': pre_outputs_sub_No[-1],
               
               'pred_obj_logits_cascade': outputs_obj_class,
               'pred_verb_logits_cascade': outputs_verb_class,
               'pred_sub_boxes_cascade': outputs_sub_coord,
               'pred_obj_boxes_cascade': outputs_obj_coord,
               
               }
        if self.use_matching:
            out['pred_matching_logits'] = outputs_matching[-1]
        else:
            outputs_matching =  None
        if self.aux_loss and self.training:
            out['aux_outputs'] = self._set_aux_loss(outputs_obj_class, outputs_verb_class,
                                                    outputs_sub_coord, outputs_obj_coord,
                                                    pre_outputs_obj_No, pre_outputs_sub_No,
                                                    outputs_matching, outputs_sub_class)
        return out

    def forward(self, samples: NestedTensor, targets=None):
        srcs, masks, poses = self.backbone_forward(samples)
        outputs = self.neck_forward(srcs, masks, poses, self.query_embed.weight) # hs, memory
        out = self.head_forward(outputs)
        out.update(outputs)
        return out

    @torch.jit.unused
    def _set_aux_loss(self, outputs_obj_class, outputs_verb_class, outputs_sub_coord, outputs_obj_coord, 
                      outputs_obj_No, outputs_sub_No, 
                      outputs_matching=None, outputs_sub_class=None):
        min_dec_layers_num = min(self.dec_layers_hopd, self.dec_layers_interaction)
        if self.use_matching:
            return [{'pred_obj_logits': a, 
                     'pred_verb_logits': b, 
                     'pred_sub_boxes': c, 
                     'pred_obj_boxes': d, 
                     'pred_matching_logits': e,
                     'pred_obj_no':f,
                     'pred_sub_no':g, }
                    for a, b, c, d, e, f, g  in zip(outputs_obj_class[-min_dec_layers_num : -1], outputs_verb_class[-min_dec_layers_num : -1], \
                                                outputs_sub_coord[-min_dec_layers_num : -1], outputs_obj_coord[-min_dec_layers_num : -1], \
                                                outputs_matching[-min_dec_layers_num : -1],
                                                outputs_obj_No[-min_dec_layers_num : -1], outputs_sub_No[-min_dec_layers_num : -1])]
        else:
            return [{'pred_obj_logits': a, 
                     'pred_verb_logits': b, 
                     'pred_sub_boxes': c, 
                     'pred_obj_boxes': d,
                     'pred_obj_no':f,
                     'pred_sub_no':g,
                     'pred_sub_logits': h, 
                     }for a, b, c, d, f, g, h in zip(outputs_obj_class[-min_dec_layers_num : -1], outputs_verb_class[-min_dec_layers_num : -1], \
                                          outputs_sub_coord[-min_dec_layers_num : -1], outputs_obj_coord[-min_dec_layers_num : -1],
                                          outputs_obj_No[-min_dec_layers_num : -1], outputs_sub_No[-min_dec_layers_num : -1],
                                          outputs_sub_class[-min_dec_layers_num : -1])]


    def textfeat_logits(self, hoi_feat, txt_feat):
        hoi_feat = hoi_feat / hoi_feat.norm(dim=-1, keepdim=True)
        txt_feat = txt_feat / txt_feat.norm(dim=-1, keepdim=True)
        # cosine similarity as logits
        logit_scale = self.logit_scale.exp()
        logits_per_pair = logit_scale * hoi_feat @ txt_feat.t()
        logits_per_pair =  torch.diag(logits_per_pair).sigmoid()
        return logits_per_pair

def get_weight_dict(args):
    weight_dict = {}
    weight_dict['loss_obj_ce'] = args.obj_loss_coef
    weight_dict['loss_verb_ce'] = args.verb_loss_coef
    
    weight_dict['loss_sub_bbox'] = args.bbox_loss_coef
    weight_dict['loss_obj_bbox'] = args.bbox_loss_coef
    weight_dict['loss_sub_giou'] = args.giou_loss_coef
    weight_dict['loss_obj_giou'] = args.giou_loss_coef
    weight_dict['loss_sub_no_ce'] = args.obj_loss_coef
    weight_dict['loss_obj_no_ce'] = args.obj_loss_coef
    if args.use_matching:
        weight_dict['loss_matching'] = args.matching_loss_coef

    if args.aux_loss:
        min_dec_layers_num = min(args.dec_layers_hopd, args.dec_layers_interaction)#new
        aux_weight_dict = {}
        for i in range(min_dec_layers_num - 1):
            aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})
        weight_dict.update(aux_weight_dict)
    return weight_dict


def get_losses(args):
    losses = ['obj_labels', 'verb_labels', 'sub_obj_boxes', 
              'obj_cardinality', 'sub_no', 'obj_no',]
    if args.use_matching:
        losses.append('matching_labels')
    return losses
