import os
import torch
import torch.nn as nn
import torch.nn.functional as F

from external.pytorch_pretrained_bert.modeling import BertPredictionHeadTransform
from common.module import Module

BERT_WEIGHTS_NAME = 'pytorch_model.bin'


class FTransformer(Module):
    def __init__(self, config):

        super(FTransformer, self).__init__(config)
        self.embeddings = nn.Embedding( 30000, config.NETWORK.VLBERT.hidden_size)
        self.transformer_model = nn.Transformer(d_model = 768, nhead=16, num_encoder_layers=12, )
 
        dim = 768#config.NETWORK.VLBERT.hidden_size
        if config.NETWORK.CLASSIFIER_TYPE == "2fc":
            self.final_mlp = torch.nn.Sequential(
                torch.nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
                torch.nn.Linear(dim, config.NETWORK.CLASSIFIER_HIDDEN_SIZE),
                torch.nn.ReLU(inplace=True),
                torch.nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
                torch.nn.Linear(config.NETWORK.CLASSIFIER_HIDDEN_SIZE, config.DATASET.ANSWER_VOCAB_SIZE),
            )
        elif config.NETWORK.CLASSIFIER_TYPE == "1fc":
            self.final_mlp = torch.nn.Sequential(
                torch.nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
                torch.nn.Linear(dim, config.DATASET.ANSWER_VOCAB_SIZE)
            )
        elif config.NETWORK.CLASSIFIER_TYPE == 'mlm':
            print("config.DATASET.ANSWER_VOCAB_SIZE:",config.DATASET.ANSWER_VOCAB_SIZE)
            print("config.NETWORK.VLBERT.hidden_size",config.NETWORK.VLBERT.hidden_size)#768
            transform = BertPredictionHeadTransform(config.NETWORK.VLBERT)
            linear = nn.Linear(config.NETWORK.VLBERT.hidden_size, config.DATASET.ANSWER_VOCAB_SIZE)
            self.final_mlp = nn.Sequential(
                transform,
                nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
                linear
            )
        else:
            raise ValueError("Not support classifier type: {}!".format(config.NETWORK.CLASSIFIER_TYPE))



    def _forward(self,question,):
        #input_v = torch.randn((32, 10, 512))
        em = self.embeddings(question)
        tr = self.transformer_model.encoder(em)
        tm = torch.max(tr, dim=1)[0]
        #tm = text_mask.unsqueeze(-1).expand(-1,-1,768)
        #text_tran_rep = (text_tran_according_pic * tm).sum(dim=1)/tm.sum(dim=1)
           
        logits = self.final_mlp(tm)
        return logits

    def train_forward(self,
                      image,
                      boxes,
                      im_info,
                      question,
                      label,
                      ):
        ###########################################
        logits = self._forward(question)
        outputs = {}
        # loss
        ans_loss = F.binary_cross_entropy_with_logits(logits, label) * label.size(1)

        outputs.update({'label_logits': logits,
                        'label': label,
                        'ans_loss': ans_loss})

        loss = ans_loss.mean()

        return outputs, loss

    def inference_forward(self,
                          image,
                          boxes,
                          im_info,
                          question):

        ###########################################
        outputs = {}
        logits = self._forward(question)
        outputs.update({'label_logits': logits})
        return outputs
