# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Xingyi Zhou
# ------------------------------------------------------------------------------

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import torch
from torch import nn

from .detr import DETR
from .module.modules import MLP

from .backbones.backbone import build_backbone
from .necks.transformer import build_transformer
from .matcher.matcherhoi import build_matcher
from .losses.qpic_plus_setcriterionhoi import SetCriterionHOI
from .postprocess.postprocesshoi import PostProcessHOI

BN_MOMENTUM = 0.1

def build(args):
    device = torch.device(args.device)
    '''
        搭建模型
    '''
    #   如果为coco数据集则有91个类（实际只有80个）
    num_classes = 20 if args.dataset_file != 'coco' else 91
    #   coco扩大版250类
    if args.dataset_file == "coco_panoptic":
        num_classes = 250
    #   backbone: resnet + pos_embedding
    #   N, 3, 512, 512 -> N, 2048, 16, 16
    backbone = build_backbone(args)
    #   encoder + decoder
    #   N, 2048, 16, 16 -> N, 100, 2048
    transformer = build_transformer(args)
    #   TODO: 1修改网络结构
    model = QPIC(
        backbone,
        transformer,
        num_obj_classes = args.num_obj_classes,    
        num_verb_classes = args.num_verb_classes,   #   多了这个
        num_queries = args.num_queries,
        aux_loss = args.aux_loss,)
    #######################################################################
    #
    #       计算loss
    #
    #####################################################################
    #   将预测结果和GT进行匹配的算法（这里使用的是匈牙利算法）
    #   N, 100, xxx
    #   TODO：2修改匹配算法
    matcher = build_matcher(args)
    weight_dict = get_weight_dict(args)
    losses = get_losses(args)

    criterion = SetCriterionHOI(args, matcher=matcher, losses=losses, weight_dict=weight_dict)
    criterion.to(device)
    postprocessors = {'hoi': PostProcessHOI(args)}

    return model, criterion, postprocessors 

class QPIC(DETR):
    def __init__(self, backbone, transformer, num_obj_classes, num_verb_classes, num_queries, aux_loss=False):
        super().__init__(backbone, transformer, num_obj_classes, num_queries, num_verb_classes, aux_loss)
        #   检测槽 100

    def build_head(self, num_obj_classes, num_verb_classes):
        hidden_dim = self.hidden_dim
        self.obj_class_embed = nn.Linear(hidden_dim, num_obj_classes + 1)
        self.obj_bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
        self.verb_class_embed = nn.Linear(hidden_dim, num_verb_classes)
        self.sub_bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)


    def head_forward(self, outputs):
        split = int(self.num_queries/3)
        x = outputs['out_query']
        #   outputs_obj_class N, 300, 80
        outputs_obj_class = self.obj_class_embed(x)
        #   outputs_obj_coord N, 300, 4 
        outputs_obj_coord = self.obj_bbox_embed(x).sigmoid()
        #   outputs_verb_class N, 300, 127
        outputs_verb_class = self.verb_class_embed(x)
        #   outputs_sub_coord N, 300, 4 
        outputs_sub_coord = self.sub_bbox_embed(x).sigmoid()
        
        outputs_obj_class = torch.cat((
            outputs_obj_class[:, :, 0*split : 1*split, :], 
            outputs_obj_class[:, :, 1*split : 2*split, :], 
            outputs_obj_class[:, :, 2*split : 3*split, :],), 2)
        # outputs_obj_class = outputs_obj_class[:, :, 0*split : 1*split, :]
        outputs_obj_coord = torch.cat((
            outputs_obj_coord[:, :, 0*split : 1*split, :], 
            outputs_obj_coord[:, :, 1*split : 2*split, :], 
            outputs_obj_coord[:, :, 2*split : 3*split, :],), 2)
        # outputs_obj_coord = outputs_obj_coord[:, :, 0*split : 1*split, :]
        outputs_sub_coord = torch.cat((
            outputs_sub_coord[:, :, 1*split : 2*split, :], 
            outputs_sub_coord[:, :, 2*split : 3*split, :], 
            outputs_sub_coord[:, :, 0*split : 1*split, :],), 2)
        #outputs_sub_coord = outputs_sub_coord[:, :, 1*split : 2*split, :]
        outputs_verb_class = torch.cat((
            outputs_verb_class[:, :, 2*split : 3*split, :], 
            outputs_verb_class[:, :, 0*split : 1*split, :], 
            outputs_verb_class[:, :, 1*split : 2*split, :],), 2)      
        #outputs_verb_class = outputs_verb_class[:, :, 2*split : 3*split, :]
        out = {'pred_obj_logits': outputs_obj_class[-1], 
               'pred_verb_logits': outputs_verb_class[-1],
               'pred_sub_boxes': outputs_sub_coord[-1], 
               'pred_obj_boxes': outputs_obj_coord[-1],

               'pred_obj_logits_cascade': outputs_obj_class,
               'pred_verb_logits_cascade': outputs_verb_class,
               'pred_sub_boxes_cascade': outputs_sub_coord,
               'pred_obj_boxes_cascade': outputs_obj_coord, }
        if self.aux_loss and self.training:
            out['aux_outputs'] = self._set_aux_loss(outputs_obj_class, outputs_verb_class, outputs_sub_coord, outputs_obj_coord)
        return out
    
    @torch.jit.unused
    def _set_aux_loss(self, outputs_obj_class, outputs_verb_class, outputs_sub_coord, outputs_obj_coord):
        # this is a workaround to make torchscript happy, as torchscript
        # doesn't support dictionary with non-homogeneous values, such
        # as a dict having both a Tensor and a list.
        return [{'pred_obj_logits':  a, 
                 'pred_verb_logits': b, 
                 'pred_sub_boxes':   c, 
                 'pred_obj_boxes':   d}
                for a, b, c, d in zip(outputs_obj_class[:-1], outputs_verb_class[:-1],
                                      outputs_sub_coord[:-1], outputs_obj_coord[:-1])]

def get_weight_dict(args):
    weight_dict = {}
    for i in range(3):
        weight_dict['loss_obj_ce_{}'.format(i)] = args.obj_loss_coef
        weight_dict['loss_verb_ce_{}'.format(i)] = args.verb_loss_coef
        
        weight_dict['loss_sub_bbox_{}'.format(i)] = args.bbox_loss_coef
        weight_dict['loss_obj_bbox_{}'.format(i)] = args.bbox_loss_coef
        weight_dict['loss_sub_giou_{}'.format(i)] = args.giou_loss_coef
        weight_dict['loss_obj_giou_{}'.format(i)] = args.giou_loss_coef
    # TODO this is a hack 
    if args.aux_loss:
        aux_weight_dict = {}
        for i in range(args.dec_layers_hopd - 1):
            #   为中间层输出的loss也加上对应权重
            aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})
        weight_dict.update(aux_weight_dict)
    return weight_dict

def get_losses(args):
    losses = ['obj_labels', 'verb_labels', 'sub_obj_boxes', 'obj_cardinality']
    return losses