
import torch
from torch import nn
from util.misc import NestedTensor
import torch.nn.functional as F


from .cdn import CDN, get_weight_dict, get_losses


from .backbones.backbone import build_backbone
from .necks.my_transformer import build_transformer
from .matcher.my_matcherhoi import build_matcher
from .losses.my_setcriterionhoi import SetCriterionHOI
from .postprocess.my_postprocesshoi import PostProcessHOI
from .module.modules import MLP


def build(args):
    device = torch.device(args.device)
    backbone = build_backbone(args)
    transformer = build_transformer(args)
    model = MYNET(
        backbone,
        transformer,
        num_obj_classes=args.num_obj_classes,
        num_verb_classes=args.num_verb_classes,
        num_queries=args.num_queries,
        aux_loss=args.aux_loss,
        args=args
    )
    matcher = build_matcher(args)
    weight_dict = get_weight_dict(args)
    losses = get_losses(args)
        
    criterion = SetCriterionHOI(args=args, matcher=matcher, weight_dict=weight_dict, losses=losses)
    criterion.to(device)
    
    postprocessors = {'hoi': PostProcessHOI(args)}

    return model, criterion, postprocessors


class MYNET(CDN):
    def __init__(self, backbone, transformer, num_obj_classes, num_verb_classes, num_queries, aux_loss=False, args=None):
        super(MYNET, self).__init__(backbone, transformer, num_obj_classes, num_verb_classes, num_queries, aux_loss, args)
        self.gamma = nn.Parameter(torch.zeros(1, 3), requires_grad=True) 

        # self.obj_class_embed_1 = nn.Linear(hidden_dim, num_obj_classes + 1)
        # self.matching_embed_1 = nn.Linear(hidden_dim, 1)
        
        #self.query_embed_hm = nn.Embedding(num_queries, self.hidden_dim)
        #self.query_embed_interaction = nn.Embedding(num_queries, self.hidden_dim)
        # self.rel_bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
        # nn.init.constant_(self.rel_bbox_embed.layers[-1].weight.data, 0)
        # nn.init.constant_(self.rel_bbox_embed.layers[-1].bias.data, 0)

    def build_head(self, num_classes, num_verb_classes):
        hidden_dim = self.hidden_dim
        self.obj_class_embed = nn.Linear(hidden_dim, num_classes + 1)
        self.obj_bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
        self.verb_class_embed = nn.Linear(hidden_dim, num_verb_classes)
        self.sub_bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
        self.matching_embed = nn.Linear(hidden_dim, 2)        

    def neck_forward(self, srcs, masks, poses, query_embed):
        outputs = self.transformer(srcs, masks, query_embed, poses)
        return outputs

    def head_forward(self, outputs):
        x = outputs['out_query']
        #object_decoder_out = outputs['object_decoder_out']
        outputs_obj_class = self.obj_class_embed(x)
        outputs_obj_coord = self.obj_bbox_embed(x).sigmoid()    

        human_decoder_out = outputs['human_decoder_out']
        outputs_matching = self.matching_embed(human_decoder_out)
        outputs_sub_coord = self.sub_bbox_embed(human_decoder_out).sigmoid() 

        interaction_decoder_out = outputs['interaction_decoder_out']        
        outputs_verb_class = self.verb_class_embed(interaction_decoder_out)
        #outputs_rel_coord = self.rel_bbox_embed(interaction_decoder_out).sigmoid()
        
        # hm_scores =  outputs_matching.softmax(-1)[...,1:].repeat(1, 1, 1, outputs_verb_class.shape[-1])
        # obj_scores =  outputs_obj_class.softmax(-1).max(-1,keepdim=True)[0].repeat(1, 1, 1, outputs_verb_class.shape[-1])
        # verb_score = outputs_verb_class.sigmoid()
        # HOI_score = \
        #     self.gamma[:, 0].sigmoid() * hm_scores +\
        #     self.gamma[:, 1].sigmoid() * obj_scores +\
        #     self.gamma[:, 2].sigmoid() * verb_score
    
        out = {'pred_obj_logits': outputs_obj_class[-1], 
               'pred_verb_logits': outputs_verb_class[-1],
               'pred_sub_boxes': outputs_sub_coord[-1], 
               'pred_obj_boxes': outputs_obj_coord[-1],
               'pred_matching_logits': outputs_matching[-1],
               #'pred_hoi_score': HOI_score[-1], 
               #'pred_rel_boxes':outputs_rel_coord[-1], 
               #'pred_verb_topK':outputs_verb_topK[-1]
               'pred_obj_logits_cascade': outputs_obj_class,
               'pred_verb_logits_cascade': outputs_verb_class,
               'pred_sub_boxes_cascade': outputs_sub_coord,
               'pred_obj_boxes_cascade': outputs_obj_coord,
               }
        if self.aux_loss and self.training:
            out['aux_outputs'] = self._set_aux_loss(outputs_obj_class, outputs_verb_class,
                                                    outputs_sub_coord, outputs_obj_coord,
                                                    None, None, None, 
                                                    outputs_matching)
        return out

    def forward(self, samples: NestedTensor, targets=None):
        srcs, masks, poses = self.backbone_forward(samples)
        outputs = self.neck_forward(srcs, masks, poses, self.query_embed.weight) # hs, memory
        out = self.head_forward(outputs)
        out.update(outputs)
        return out

    @torch.jit.unused
    def _set_aux_loss(self, outputs_obj_class, outputs_verb_class, outputs_sub_coord, outputs_obj_coord, 
                      outputs_rel_coord=None, outputs_verb_topK=None, HOI_score=None, outputs_matching=None):
        min_dec_layers_num = min(self.dec_layers_hopd, self.dec_layers_interaction)
        return [{'pred_obj_logits': a, 
                    'pred_verb_logits': b, 
                    'pred_sub_boxes': c, 
                    'pred_obj_boxes': d, 
                    #'pred_rel_boxes': e,
                    #'pred_verb_topK': f,
                    #'pred_hoi_score':f,
                    'pred_matching_logits': g,
                }for a, b, c, d, g in zip(outputs_obj_class[-min_dec_layers_num : -1], outputs_verb_class[-min_dec_layers_num : -1], 
                                            outputs_sub_coord[-min_dec_layers_num : -1], outputs_obj_coord[-min_dec_layers_num : -1], 
                                            #outputs_rel_coord[-min_dec_layers_num : -1], #outputs_verb_topK[-min_dec_layers_num : -1], 
                                            #HOI_score[-min_dec_layers_num : -1], 
                                            outputs_matching[-min_dec_layers_num : -1], 
                                        )]



def get_weight_dict(args):
    weight_dict = {}
    weight_dict['loss_obj_ce'] = args.obj_loss_coef
    weight_dict['loss_verb_ce'] = args.verb_loss_coef
    
    weight_dict['loss_sub_bbox'] = args.bbox_loss_coef
    weight_dict['loss_obj_bbox'] = args.bbox_loss_coef
    weight_dict['loss_sub_giou'] = args.giou_loss_coef
    weight_dict['loss_obj_giou'] = args.giou_loss_coef
    
    #weight_dict['pred_hoi_ce'] = args.obj_loss_coef
    #weight_dict['loss_rel_bbox'] = args.bbox_loss_coef
    #weight_dict['loss_rel_giou'] = args.giou_loss_coef
    #weight_dict['loss_verb_topK'] = args.verb_topK_coef
    
    weight_dict['loss_matching'] = args.matching_loss_coef

    if args.aux_loss:
        min_dec_layers_num = min(args.dec_layers_hopd, args.dec_layers_interaction)#new
        aux_weight_dict = {}
        for i in range(min_dec_layers_num - 1):
            aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})
        weight_dict.update(aux_weight_dict)
    return weight_dict

def get_losses(args):
    losses = ['obj_labels', 'verb_labels', 'sub_obj_boxes', 
              'obj_cardinality', #'hoi_score',
              'matching_labels'
              #'rel_obj_boxes', #'loss_verb_topK'
              ]
    return losses