
import torch
from torch import nn
from .module.modules import MLP
import math

from .smca_detr import SMCA

from .backbones.backbone import build_backbone
from .necks.smca_cdn_transformer import build_transformer
from .matcher.matcherhoi import build_matcher
from .postprocess.postprocesshoi import PostProcessHOI
from .losses.cdn_setcriterionhoi import SetCriterionHOI

def build(args):
    device = torch.device(args.device)
    backbone = build_backbone(args)
    transformer = build_transformer(args)
    model = SMCA_CDN(
        backbone,
        transformer,
        num_obj_classes=args.num_obj_classes,
        num_verb_classes=args.num_verb_classes,
        num_queries=args.num_queries,
        aux_loss=args.aux_loss,
        args=args)
    matcher = build_matcher(args)
    weight_dict = get_weight_dict(args)
    losses = get_losses(args)
        
    criterion = SetCriterionHOI(args=args, matcher=matcher, weight_dict=weight_dict, losses=losses)
    criterion.to(device)
    
    postprocessors = {'hoi': PostProcessHOI(args)}

    return model, criterion, postprocessors


class SMCA_CDN(SMCA):
    def __init__(self, backbone, transformer, num_obj_classes, num_verb_classes, num_queries, aux_loss=False, args=None):
        super(SMCA_CDN, self).__init__(backbone, transformer, num_obj_classes, num_queries, aux_loss, num_verb_classes)
        hidden_dim = self.hidden_dim
        self.use_matching = args.use_matching
        self.dec_layers_hopd = args.dec_layers_hopd
        self.dec_layers_interaction = args.dec_layers_interaction
        #self.query_embed_2 = nn.Embedding(num_queries, self.hidden_dim)    
        if self.use_matching:
            self.matching_embed = nn.Linear(hidden_dim, 2)
            nn.init.constant_(self.matching_embed.weight.data, 0)
            nn.init.constant_(self.matching_embed.bias.data, 0)


    def build_head(self, num_classes, num_verb_classes):
        hidden_dim = self.hidden_dim
        self.obj_class_embed = nn.Linear(hidden_dim, num_classes + 1)
        self.obj_bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
        self.verb_class_embed = nn.Linear(hidden_dim, num_verb_classes)
        self.sub_bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
        
        prior_prob = 0.01
        bias_value = -math.log((1 - prior_prob) / prior_prob)
        self.obj_class_embed.bias.data = torch.ones( num_classes+1) * bias_value
        self.verb_class_embed.bias.data = torch.ones(num_verb_classes) * bias_value
        nn.init.constant_(self.obj_bbox_embed.layers[-1].weight.data, 0)
        nn.init.constant_(self.obj_bbox_embed.layers[-1].bias.data, 0)
        nn.init.constant_(self.sub_bbox_embed.layers[-1].weight.data, 0)
        nn.init.constant_(self.sub_bbox_embed.layers[-1].bias.data, 0)


    def head_forward(self, outputs):
        x = outputs['out_query']
        points = outputs['points']
        interaction_decoder_out = outputs['interaction_decoder_out']
        
        num_decoder = x.shape[0]
        points = points.unsqueeze(0).repeat(num_decoder, 1, 1, 1)
        
        if self.use_matching:
            outputs_matching = self.matching_embed(x)
            #outputs_matching = self.matching_embed(interaction_decoder_out)
        outputs_sub_coord = self.sub_bbox_embed(x)
        outputs_sub_coord[..., :2] = outputs_sub_coord[..., :2] + points
        outputs_sub_coord = outputs_sub_coord.sigmoid()
        
        outputs_obj_class = self.obj_class_embed(x)
        outputs_obj_class[..., :2] = outputs_obj_class[..., :2] + points
        outputs_obj_class = outputs_obj_class.sigmoid()
        
        outputs_obj_coord = self.obj_bbox_embed(x).sigmoid()

        outputs_verb_class = self.verb_class_embed(interaction_decoder_out)#change


        out = {'pred_obj_logits': outputs_obj_class[-1], 
               'pred_verb_logits': outputs_verb_class[-1],
               'pred_sub_boxes': outputs_sub_coord[-1], 
               'pred_obj_boxes': outputs_obj_coord[-1],
               
               'pred_obj_logits_cascade': outputs_obj_class,
               'pred_verb_logits_cascade': outputs_verb_class,
               'pred_sub_boxes_cascade': outputs_sub_coord,
               'pred_obj_boxes_cascade': outputs_obj_coord,
               }
        

        if self.aux_loss and self.training:  
            out['aux_outputs'] = self._set_aux_loss(outputs_obj_class, outputs_verb_class,
                                                        outputs_sub_coord, outputs_obj_coord)
        return out


    
    @torch.jit.unused
    def _set_aux_loss(self, outputs_obj_class, outputs_verb_class, outputs_sub_coord, outputs_obj_coord, outputs_matching=None):
        min_dec_layers_num = min(self.dec_layers_hopd, self.dec_layers_interaction)
        return [{'pred_obj_logits': a, 
                'pred_verb_logits': b, 
                'pred_sub_boxes': c, 
                'pred_obj_boxes': d}
                for a, b, c, d in zip(outputs_obj_class[-min_dec_layers_num : -1], outputs_verb_class[-min_dec_layers_num : -1], \
                                        outputs_sub_coord[-min_dec_layers_num : -1], outputs_obj_coord[-min_dec_layers_num : -1])]



def get_weight_dict(args):
    weight_dict = {}
    weight_dict['loss_obj_ce'] = args.obj_loss_coef
    weight_dict['loss_verb_ce'] = args.verb_loss_coef
    
    weight_dict['loss_sub_bbox'] = args.bbox_loss_coef
    weight_dict['loss_obj_bbox'] = args.bbox_loss_coef
    weight_dict['loss_sub_giou'] = args.giou_loss_coef
    weight_dict['loss_obj_giou'] = args.giou_loss_coef
    if args.use_matching:
        weight_dict['loss_matching'] = args.matching_loss_coef

    if args.aux_loss:
        min_dec_layers_num = min(args.dec_layers_hopd, args.dec_layers_interaction)#new
        aux_weight_dict = {}
        for i in range(min_dec_layers_num - 1):
            aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})
        weight_dict.update(aux_weight_dict)
    return weight_dict

def get_losses(args):
    losses = ['obj_labels', 'verb_labels', 'sub_obj_boxes', 'obj_cardinality']
    if args.use_matching:
        losses.append('matching_labels')
    return losses

