
import torch
from torch import nn
from util.misc import NestedTensor
import math
from .module.modules import MLP
import numpy as np

from .cdn import CDN, get_weight_dict, get_losses


from .backbones.backbone import build_backbone
from .necks.my_transformer import build_transformer
from .matcher.newmy_matcherhoi import build_matcher
from .losses.newmy_setcriterionhoi import SetCriterionHOI
from .postprocess.newmy_postprocesshoi import PostProcessHOI


def build(args):
    args.use_matching = False
    device = torch.device(args.device)
    backbone = build_backbone(args)
    transformer = build_transformer(args)
    model = MYNET(
        backbone,
        transformer,
        num_obj_classes=args.num_obj_classes,
        num_verb_classes=args.num_verb_classes,
        num_queries=args.num_queries,
        aux_loss=args.aux_loss,
        args=args
    )
    matcher = build_matcher(args)
    weight_dict = get_weight_dict(args)
    losses = get_losses(args)
    
    criterion = SetCriterionHOI(args=args, matcher=matcher, weight_dict=weight_dict, losses=losses)
    criterion.to(device)
    
    postprocessors = {'hoi': PostProcessHOI(args)}

    return model, criterion, postprocessors


class MYNET(CDN):
    def __init__(self, backbone, transformer, num_obj_classes, num_verb_classes, num_queries, aux_loss=False, args=None):
        super(MYNET, self).__init__(backbone, transformer, num_obj_classes, num_verb_classes, num_queries, aux_loss, args)
        self.epoch_limit = args.epoch_limit

    def build_head(self, num_classes, num_verb_classes):
        hidden_dim = self.hidden_dim
        self.obj_class_embed = nn.Linear(hidden_dim, num_classes + 1)
        self.obj_bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
        
        self.sub_class_embed = nn.Linear(hidden_dim, 1 + 1)
        self.sub_bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
        
        self.verb_class_embed = nn.Linear(hidden_dim, num_verb_classes)
        
        # self.sub_embed = nn.Linear(hidden_dim, hidden_dim)
        # self.obj_embed = nn.Linear(hidden_dim, hidden_dim)
        self.human_query_embed = nn.Embedding(100, 256)

    def forward(self, samples, targets=None, epoch=0):
        #   bug
        srcs, masks, poses = self.backbone_forward(samples)
        outputs = self.neck_forward(srcs, masks, poses, self.query_embed.weight, self.human_query_embed.weight, epoch) # hs, memory
        # outputs = self.neck_forward(srcs, masks, poses, self.query_embed.weight) # hs, memory
        out = self.head_forward(outputs, epoch)
        out.update(outputs)
        return out
    
    def neck_forward(self, srcs, masks, poses, query_embed, human_query_embed=None,epoch=0):
        outputs = self.transformer(srcs, masks, query_embed, poses, human_query_embed, epoch)
        return outputs
    
    def head_forward(self, outputs, epoch=0):
        if epoch>=self.epoch_limit:
            with torch.no_grad():
                object_decoder_out = outputs['out_query']      
                outputs_obj_class = self.obj_class_embed(object_decoder_out)
                outputs_obj_coord = self.obj_bbox_embed(object_decoder_out).sigmoid()

                human_decoder_out = outputs['human_decoder_out']
                outputs_sub_class = self.sub_class_embed(human_decoder_out)
                outputs_sub_coord = self.sub_bbox_embed(human_decoder_out).sigmoid()
        else:
            object_decoder_out = outputs['out_query']      
            outputs_obj_class = self.obj_class_embed(object_decoder_out)
            outputs_obj_coord = self.obj_bbox_embed(object_decoder_out).sigmoid()

            human_decoder_out = outputs['human_decoder_out']
            outputs_sub_class = self.sub_class_embed(human_decoder_out)
            outputs_sub_coord = self.sub_bbox_embed(human_decoder_out).sigmoid()
                
        interaction_decoder_out = outputs['interaction_decoder_out']
        outputs_verb_class = self.verb_class_embed(interaction_decoder_out)#change
        # outputs_sub_embed = self.sub_embed(interaction_decoder_out)
        # outputs_obj_embed = self.obj_embed(interaction_decoder_out)
        
        
        out = {'pred_obj_logits': outputs_obj_class[-1],             
               'pred_verb_logits': outputs_verb_class[-1],
               'pred_sub_boxes': outputs_sub_coord[-1], 
               'pred_obj_boxes': outputs_obj_coord[-1],
               'pred_sub_logits': outputs_sub_class[-1],
            #    'pred_sub_embed': outputs_sub_embed[-1],
            #    'pred_obj_embed': outputs_obj_embed[-1],
               
            #    'sub_embed': human_decoder_out[-1],
            #    'obj_embed': object_decoder_out[-1],
               
               'pred_obj_logits_cascade': outputs_obj_class,
               'pred_sub_logits_cascade': outputs_sub_class,
               'pred_verb_logits_cascade': outputs_verb_class,
               'pred_sub_boxes_cascade': outputs_sub_coord,
               'pred_obj_boxes_cascade': outputs_obj_coord,
               }

        if self.aux_loss and self.training:                          
            out['aux_outputs'] = self._set_aux_loss(outputs_obj_class, outputs_verb_class,
                                                    outputs_sub_coord, outputs_obj_coord,
                                                    outputs_sub_class)
        return out


   
    @torch.jit.unused
    def _set_aux_loss(self, outputs_obj_class, outputs_verb_class, outputs_sub_coord, outputs_obj_coord, 
                      outputs_sub_class = None):
        min_dec_layers_num = min(self.dec_layers_hopd, self.dec_layers_interaction)
        min_dec_layers_num = 6
        return [{'pred_obj_logits': a, 
                'pred_verb_logits': b, 
                'pred_sub_boxes': c, 
                'pred_obj_boxes': d,
                'pred_sub_logits': e, 
                }for a, b, c, d, e  in zip(outputs_obj_class[-min_dec_layers_num : -1], outputs_verb_class[-min_dec_layers_num : -1], \
                                        outputs_sub_coord[-min_dec_layers_num : -1], outputs_obj_coord[-min_dec_layers_num : -1],
                                        outputs_sub_class[-min_dec_layers_num : -1])]



def get_weight_dict(args):
    weight_dict = {}
    weight_dict['loss_obj_ce'] = args.obj_loss_coef
    weight_dict['loss_sub_ce'] = args.obj_loss_coef
    weight_dict['loss_verb_ce'] = args.verb_loss_coef
    
    weight_dict['loss_sub_bbox'] = args.bbox_loss_coef
    weight_dict['loss_obj_bbox'] = args.bbox_loss_coef
    weight_dict['loss_sub_giou'] = args.giou_loss_coef
    weight_dict['loss_obj_giou'] = args.giou_loss_coef
    # weight_dict['loss_sub_embed_ce'] = args.obj_loss_coef
    # weight_dict['loss_obj_embed_ce'] = args.obj_loss_coef


    if args.aux_loss:
        min_dec_layers_num = min(args.dec_layers_hopd, args.dec_layers_interaction)#new
        min_dec_layers_num = 6
        aux_weight_dict = {}
        for i in range(min_dec_layers_num - 1):
            aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})
        weight_dict.update(aux_weight_dict)
    return weight_dict

def get_losses(args):
    losses = ['obj_labels', 'verb_labels', 'sub_obj_boxes', 'obj_cardinality',
            # 'sub_embed','obj_embed', 
            'sub_labels'
            ]

    return losses