import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from external.pytorch_pretrained_bert import BertTokenizer
from external.pytorch_pretrained_bert.modeling import BertPredictionHeadTransform
from common.module import Module
from common.fast_rcnn import FastRCNN
from common.visual_linguistic_bert import VisualLinguisticBert
from common.TransformerBlock import MultiheadAttention
from common.utils.feature_fusion import FeatureFusion
from transformers import BertForSequenceClassification
bert_path = '../TextAttack/model/distilbert_nohttp/'

import numpy as np


BERT_WEIGHTS_NAME = 'pytorch_model.bin'


class ResNetVLBERT(Module):
    def __init__(self, config):

        super(ResNetVLBERT, self).__init__(config)

        self.enable_cnn_reg_loss = config.NETWORK.ENABLE_CNN_REG_LOSS#false
        if not config.NETWORK.BLIND:#true
            self.image_feature_extractor = FastRCNN(config,
                                                    average_pool=True,
                                                    final_dim=config.NETWORK.IMAGE_FINAL_DIM,
                                                    enable_cnn_reg_loss=self.enable_cnn_reg_loss)#视觉特征提取
            if config.NETWORK.VLBERT.object_word_embed_mode == 1:
                self.object_linguistic_embeddings = nn.Embedding(81, config.NETWORK.VLBERT.hidden_size)
            elif config.NETWORK.VLBERT.object_word_embed_mode == 2:#2
                self.object_linguistic_embeddings = nn.Embedding(1, config.NETWORK.VLBERT.hidden_size)
            elif config.NETWORK.VLBERT.object_word_embed_mode == 3:
                self.object_linguistic_embeddings = None
            else:
                raise NotImplementedError
        self.image_feature_bn_eval = config.NETWORK.IMAGE_FROZEN_BN

        self.tokenizer = BertTokenizer.from_pretrained(config.NETWORK.BERT_MODEL_NAME)

        language_pretrained_model_path = None
        if config.NETWORK.BERT_PRETRAINED != '':
            language_pretrained_model_path = '{}-{:04d}.model'.format(config.NETWORK.BERT_PRETRAINED,
                                                                      config.NETWORK.BERT_PRETRAINED_EPOCH)
        elif os.path.isdir(config.NETWORK.BERT_MODEL_NAME):
            weight_path = os.path.join(config.NETWORK.BERT_MODEL_NAME, BERT_WEIGHTS_NAME)
            if os.path.isfile(weight_path):
                language_pretrained_model_path = weight_path
        self.language_pretrained_model_path = language_pretrained_model_path
        if language_pretrained_model_path is None:
            print("Warning: no pretrained language model found, training from scratch!!!")

        print("language_pretrained_model_path:",language_pretrained_model_path)#./model/pretrained_model/bert-base-uncased/pytorch_model.bin

        self.vlbert = VisualLinguisticBert(config.NETWORK.VLBERT,
                                         language_pretrained_model_path=language_pretrained_model_path)

        # for name, param in self.vlbert.named_parameters():
        #     print("self.vlbert.name:", name)

        # self.hm_out = nn.Linear(config.NETWORK.VLBERT.hidden_size, config.NETWORK.VLBERT.hidden_size)
        # self.hi_out = nn.Linear(config.NETWORK.VLBERT.hidden_size, config.NETWORK.VLBERT.hidden_size)

        dim = config.NETWORK.VLBERT.hidden_size
        # if config.NETWORK.CLASSIFIER_TYPE == "2fc":
        #     self.final_mlp = torch.nn.Sequential(
        #         torch.nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
        #         torch.nn.Linear(dim, config.NETWORK.CLASSIFIER_HIDDEN_SIZE),
        #         torch.nn.ReLU(inplace=True),
        #         torch.nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
        #         torch.nn.Linear(config.NETWORK.CLASSIFIER_HIDDEN_SIZE, config.DATASET.ANSWER_VOCAB_SIZE),
        #     )
        # elif config.NETWORK.CLASSIFIER_TYPE == "1fc":
        #     self.final_mlp = torch.nn.Sequential(
        #         torch.nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
        #         torch.nn.Linear(dim, config.DATASET.ANSWER_VOCAB_SIZE)
        #     )
        # elif config.NETWORK.CLASSIFIER_TYPE == 'mlm':
        #     print("config.DATASET.ANSWER_VOCAB_SIZE:",config.DATASET.ANSWER_VOCAB_SIZE)
        #     print("config.NETWORK.VLBERT.hidden_size",config.NETWORK.VLBERT.hidden_size)#768
        #     transform = BertPredictionHeadTransform(config.NETWORK.VLBERT)
        #     linear = nn.Linear(config.NETWORK.VLBERT.hidden_size, config.DATASET.ANSWER_VOCAB_SIZE)
        #     self.final_mlp = nn.Sequential(
        #         transform,
        #         nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
        #         linear
        #     )
        # else:
        #     raise ValueError("Not support classifier type: {}!".format(config.NETWORK.CLASSIFIER_TYPE))
        transform = BertPredictionHeadTransform(config.NETWORK.VLBERT)
        linear = nn.Linear(config.NETWORK.VLBERT.hidden_size, config.DATASET.ANSWER_VOCAB_SIZE)
        self.final_mlp = nn.Sequential(
            transform,
            nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
            linear
        )
        # 保存梯度
        self.grad = {}

        # #扰动
        self.batch_size = config.TRAIN.BATCH_IMAGES #MyFix
        self.adv_coefficient = config.NETWORK.ADV_COEFFICIENT  #0.0000001
        self.fusion_mode = config.NETWORK.FUSION_MODE #TRAN/RAS_MAX/MEAN_RAW/ALIGN_MINUS/HADAMARD
        print("self.adv_coefficient={}".format(self.adv_coefficient))
        # self.test_tensor = nn.Parameter(torch.zeros(1, 768)) #扰动hm
        self.text_adv_tensor = nn.Parameter(torch.zeros(self.batch_size, 768))  # 扰动text
        self.pic_adv_tensor = nn.Parameter(torch.zeros(self.batch_size, 768))  # 扰动pic
        self.cls_adv_tensor = nn.Parameter(torch.zeros(self.batch_size, 768))  # 扰动cls

        # co-attention-transformer
        self.text_transformer = MultiheadAttention(768, 768)  # text_transformer
        # self.text_conv = nn.Conv1d(in_channels=768, out_channels=768,kernel_size=2) #text_cnn
        self.text_maxpooling = nn.MaxPool1d(150 - 2 + 1)  # text_maxpooling

        self.pic_transformer = MultiheadAttention(768, 768)  # pic_transformer
        # self.pic_conv = nn.Conv1d(in_channels=768, out_channels=768,kernel_size=2) #pic_cnn
        self.pic_maxpooling = nn.MaxPool1d(150 - 2 + 1)  # pic_maxpooling


        # init weights
        self.init_weight()
        self.fix_params()

        #yl
        cat_count = 4
        if 'ALIGN' in self.fusion_mode and 'MINUS' in self.fusion_mode:
            self.feature_fusion = FeatureFusion(feature_dim=768, mode = 'MINUS', align=True)
        elif 'ALIGN' in self.fusion_mode and 'HADAMARD' in self.fusion_mode:
            self.feature_fusion = FeatureFusion(feature_dim=768, mode = 'HADAMARD', align=True)
        elif 'ALIGN' not in self.fusion_mode and 'HADAMARD' in self.fusion_mode:
            self.feature_fusion = FeatureFusion(feature_dim=768, mode = 'HADAMARD', align=False)
        elif 'ALIGN' not in self.fusion_mode and 'MINUS' in self.fusion_mode:
            self.feature_fusion = FeatureFusion(feature_dim=768, mode = 'MINUS', align=False)
        elif 'NOFU' in self.fusion_mode:
            cat_count = 3
        self.text_pic_connect = nn.Linear(768 * cat_count, 768)  # full connect
        #if 'SKIPVLB' in self.fusion_mode:
        self.object_embedding_linear = nn.Linear(768 *2,768)
        #self.text_bert = BertForSequenceClassification.from_pretrained(bert_path, num_labels=2, output_attentions=False, output_hidden_states=True)
        self.text_bert = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2, output_attentions=False, output_hidden_states=True)
        self.pic_trans = nn.Transformer(d_model = 768, )
        self.perturbation = config.NETWORK.VLBERT.perturbation

    def init_weight(self):
        # self.hm_out.weight.data.normal_(mean=0.0, std=0.02)
        # self.hm_out.bias.data.zero_()
        # self.hi_out.weight.data.normal_(mean=0.0, std=0.02)
        # self.hi_out.bias.data.zero_()

        #初始化权重
        #12.14要把参与对抗的参数加到这里，包括图片向量和文本向量

        self.image_feature_extractor.init_weight()
        if self.object_linguistic_embeddings is not None:
            self.object_linguistic_embeddings.weight.data.normal_(mean=0.0, std=0.02)
        for m in self.final_mlp.modules():
            if isinstance(m, torch.nn.Linear):
                torch.nn.init.xavier_uniform_(m.weight)
                torch.nn.init.constant_(m.bias, 0)
        if self.config.NETWORK.CLASSIFIER_TYPE == 'mlm':
            language_pretrained = torch.load(self.language_pretrained_model_path)
            mlm_transform_state_dict = {}
            pretrain_keys = []
            for k, v in language_pretrained.items():
                if k.startswith('cls.predictions.transform.'):
                    pretrain_keys.append(k)
                    k_ = k[len('cls.predictions.transform.'):]
                    if 'gamma' in k_:
                        k_ = k_.replace('gamma', 'weight')
                    if 'beta' in k_:
                        k_ = k_.replace('beta', 'bias')
                    mlm_transform_state_dict[k_] = v
            print("loading pretrained classifier transform keys: {}.".format(pretrain_keys))
            self.final_mlp[0].load_state_dict(mlm_transform_state_dict)

    def train(self, mode=True):
        super(ResNetVLBERT, self).train(mode)
        # turn some frozen layers to eval mode
        if self.image_feature_bn_eval:
            self.image_feature_extractor.bn_eval()

    def fix_params(self):
        #冻结参数
        # unfreeze_layers = ['layer.10', 'layer.11', 'bert.pooler', 'out.']
        # #unfreeze_layers = ['text_adv_tensor','pic_adv_tensor','text_adv_embedding','pic_adv_embedding',
        # #                   'layer.10', 'layer.11','bert.pooler', 'out.']
        # # unfreeze_layers = ['layer.11.output', 'bert.pooler', 'out.']
        # for name, param in self.named_parameters():
        #     param.requires_grad = False
        #     for ele in unfreeze_layers:
        #         if ele in name:
        #             param.requires_grad = True
        #             break
        pass

    def _collect_obj_reps(self, span_tags, object_reps):
        """
        Collect span-level object representations
        :param span_tags: [batch_size, ..leading_dims.., L]
        :param object_reps: [batch_size, max_num_objs_per_batch, obj_dim]
        :return:
        """

        span_tags_fixed = torch.clamp(span_tags, min=0)  # In case there were masked values here
        row_id = span_tags_fixed.new_zeros(span_tags_fixed.shape)
        row_id_broadcaster = torch.arange(0, row_id.shape[0], step=1, device=row_id.device)[:, None]

        # Add extra diminsions to the row broadcaster so it matches row_id
        leading_dims = len(span_tags.shape) - 2
        for i in range(leading_dims):
            row_id_broadcaster = row_id_broadcaster[..., None]
        row_id += row_id_broadcaster
        return object_reps[row_id.view(-1), span_tags_fixed.view(-1)].view(*span_tags_fixed.shape, -1)

    def prepare_text_from_qa(self, question, question_tags, question_mask, answer, answer_tags, answer_mask):
        batch_size, max_q_len = question.shape
        _, max_a_len = answer.shape
        # print("question_mask.sum(1):",question_mask.sum(1) )
        max_len = (question_mask.sum(1) + answer_mask.sum(1)).max() + 3     # 68
        # print("max_len:", max_len)
        cls_id, sep_id = self.tokenizer.convert_tokens_to_ids(['[CLS]', '[SEP]'])
        q_end = 1 + question_mask.sum(1, keepdim=True)
        a_end = q_end + 1 + answer_mask.sum(1, keepdim=True)
        input_ids = torch.zeros((batch_size, max_len), dtype=question.dtype, device=question.device)
        input_mask = torch.ones((batch_size, max_len), dtype=torch.uint8, device=question.device)
        input_type_ids = torch.zeros((batch_size, max_len), dtype=question.dtype, device=question.device)
        text_tags = input_type_ids.new_zeros((batch_size, max_len))
        grid_i, grid_j = torch.meshgrid(torch.arange(batch_size, device=question.device),
                                        torch.arange(max_len, device=question.device))

        input_mask[grid_j > a_end] = 0
        input_type_ids[(grid_j > q_end) & (grid_j <= a_end)] = 1
        q_input_mask = (grid_j > 0) & (grid_j < q_end)
        a_input_mask = (grid_j > q_end) & (grid_j < a_end)
        input_ids[:, 0] = cls_id
        input_ids[grid_j == q_end] = sep_id
        input_ids[grid_j == a_end] = sep_id
        input_ids[q_input_mask] = question[question_mask]
        input_ids[a_input_mask] = answer[answer_mask]
        text_tags[q_input_mask] = question_tags[question_mask]
        text_tags[a_input_mask] = answer_tags[answer_mask]

        return input_ids, input_type_ids, text_tags, input_mask, (a_end - 1).squeeze(1)


    def test_update(self):#根据梯度对hm扰动赋值
        # print("O_div", self.r_div)
        test_grad = self.grad['hm']
        z_adv = (test_grad / torch.norm(test_grad, p=2)) * self.adv_coefficient
        self.test_tensor.data = z_adv.cuda()
    def test_restore(self):#hm扰动归0
        # print("self.r_div.shape", self.r_div.shape)
        self.test_tensor.data = torch.zeros(self.test_tensor.shape).cuda()

    def text_update(self,f_text_file):#根据梯度对text扰动赋值
        text_grad = self.grad['text_tran_rep']
        text_adv = (text_grad / torch.norm(text_grad, p=2)) * self.adv_coefficient
        #print(text_adv)
        self.text_adv_tensor.data = text_adv.cuda()

        text_grad_list = text_grad.cpu().numpy().tolist()

        write_grad = ''
        for g in text_grad_list:
            write_grad = write_grad + str(g) + '\n'
        f_text_file.write(write_grad)

    def text_restore(self):#text扰动归0
        # print("self.r_div.shape", self.r_div.shape)
        self.text_adv_tensor.data = torch.zeros(self.text_adv_tensor.shape).cuda()

    def pic_update(self,f_pic_file):#根据梯度对pic扰动赋值
        pic_grad = self.grad['pic_tran_rep']
        pic_adv = (pic_grad / torch.norm(pic_grad, p=2)) * self.adv_coefficient
        self.pic_adv_tensor.data = pic_adv.cuda()

        pic_grad_list = pic_grad.cpu().numpy().tolist()
        write_grad = ''
        for g in pic_grad_list:
            write_grad = write_grad + str(g) + '\n'
        f_pic_file.write(write_grad)

    def pic_restore(self):#pic扰动归0
        self.pic_adv_tensor.data = torch.zeros(self.pic_adv_tensor.shape).cuda()

    def cls_update(self,f_cls_file):#根据梯度对cls扰动赋值
        # print("O_div", self.r_div)
        cls_grad = self.grad['pooled_rep']
        cls_adv = (cls_grad / torch.norm(cls_grad, p=2)) * self.adv_coefficient
        self.cls_adv_tensor.data = cls_adv.cuda()

        cls_grad_list = cls_grad.cpu().numpy().tolist()
        write_grad = ''
        for g in cls_grad_list:
            write_grad = write_grad + str(g) + '\n'
        f_cls_file.write(write_grad)

    def cls_restore(self):#cls扰动归0
        self.cls_adv_tensor.data = torch.zeros(self.cls_adv_tensor.shape).cuda()

    def _forward_(self,image,boxes,im_info,question,):
        pre_box = boxes
        images = image
        box_mask = (boxes[:, :, 0] > - 1.5)#过滤掉一些box
        # [batch_size, num_objects, 4]
        max_len = int(box_mask.sum(1).max().item())#.sum(1)行向量相加
        box_mask = box_mask[:, :max_len]#超过最大长度也被mask掉了
        boxes = boxes[:, :max_len]

        obj_reps = self.image_feature_extractor(images=images,
                                                boxes=boxes,
                                                box_mask=box_mask,
                                                im_info=im_info,
                                                classes=None,
                                                segms=None)

        question_ids = question
        question_tags = question.new_zeros(question_ids.shape)
        question_mask = (question > 0.5)
        # print("question_mask:",question_mask)

        # print("question_ids:", question_ids)

        answer_ids = question_ids.new_zeros((question_ids.shape[0], 1)).fill_(
            self.tokenizer.convert_tokens_to_ids(['[MASK]'])[0])##将answer_ids都置为[MASK] 103   [32,1]
        answer_mask = question_mask.new_zeros(answer_ids.shape).fill_(1)
        answer_tags = question_tags.new_zeros(answer_ids.shape)

        # print("answer_ids:",answer_ids)

        ############################################

        # prepare text
        text_input_ids, text_token_type_ids, text_tags, text_mask, ans_pos = self.prepare_text_from_qa(question_ids,
                                                                                                       question_tags,
                                                                                                       question_mask,
                                                                                                       answer_ids,
                                                                                                       answer_tags,
                                                                                                       answer_mask)
        # print("text_input_ids.size():",text_input_ids.size())
        # print("text_token_type_ids.size():", text_token_type_ids.size())
        # print("text_tags.size():", text_tags.size())
        # print("text_mask.size():", text_mask.size())

        #padding到150
        # if text_input_ids.size()[1] > 155:
        #     print("over 155!!!!!!!!!!!!!:",text_input_ids.size()[1])
        #     text_input_ids = text_input_ids[:,0:155]
        #     text_token_type_ids = text_token_type_ids[:,0:155]
        #     text_tags = text_token_type_ids[:, 0:155]
        #     text_mask = text_token_type_ids[:, 0:155]
        # else:
        #     # print("not over 150,question.size()[1]:",question.size()[1])
        #     padding = nn.ZeroPad2d(padding=(0, 155-text_input_ids.size()[1], 0, 0))
        #     text_input_ids = padding(text_input_ids)
        #     text_token_type_ids = padding(text_token_type_ids)
        #     text_tags = padding(text_tags)
        #     text_mask = padding(text_mask)

        # print("text_input_ids.size():", text_input_ids.size())

        # if self.config.NETWORK.NO_GROUNDING:
        #     obj_rep_zeroed = obj_reps['obj_reps'].new_zeros(obj_reps['obj_reps'].shape)
        #     text_tags.zero_()
        #     text_visual_embeddings = self._collect_obj_reps(text_tags, obj_rep_zeroed)
        # else:
        #     text_visual_embeddings = self._collect_obj_reps(text_tags, obj_reps['obj_reps'])

        #文本和图片对应
        text_visual_embeddings = self._collect_obj_reps(text_tags, obj_reps['obj_reps'])

        assert self.config.NETWORK.VLBERT.object_word_embed_mode == 2
        object_linguistic_embeddings = self.object_linguistic_embeddings(
            boxes.new_zeros((boxes.shape[0], boxes.shape[1])).long()
        )
        object_vl_embeddings = torch.cat((obj_reps['obj_reps'], object_linguistic_embeddings), -1)

       # # Visual Linguistic BERT  #扰动最后一层hm
        # hidden_states, hc = self.vlbert(text_input_ids, # 文本id，[batch_size,word_nums]
        #                               text_token_type_ids, #segment embeddings [batch_size,word_nums]
        #                               text_visual_embeddings,# 文本embedding[batch_size,word_nums,768]
        #                               text_mask,#[batch_size,word_nums]
        #                               object_vl_embeddings,#图片embedding[batch_size,box_nums,1536]
        #                               box_mask,#[batch_size,box_nums]
        #                               output_all_encoded_layers=False)#encoded_layers, pooled_output
        #
        # _batch_inds = torch.arange(question.shape[0], device=question.device)#
        # hm = hidden_states[_batch_inds, ans_pos]
        # hm.register_hook(save_grad('hm',self.grad))
        # hm = hm + self.test_tensor #加扰动
        # outputs = {}
        # logits = self.final_mlp(hm)

        hidden_states_text, hidden_states_objects, pooled_rep = self.vlbert(text_input_ids,
                                                                            text_token_type_ids,
                                                                            text_visual_embeddings,
                                                                            text_mask,
                                                                            object_vl_embeddings,
                                                                            box_mask,
                                                                            output_all_encoded_layers=False,
                                                                            output_text_and_object_separately=True)
        ##co-transformer, different from vlbert
        #if 'TRAN' in self.fusion_mode:
        hidden_states_text = self.text_bert(text_input_ids, text_mask)['hidden_states'][-1]
        hidden_states_objects = self.pic_trans.encoder(self.object_embedding_linear(object_vl_embeddings))
        if 'TRAN' in self.fusion_mode:
            pic_tran_according_text = self.text_transformer(hidden_states_objects, hidden_states_text, hidden_states_text)
            text_tran_according_pic = self.pic_transformer(hidden_states_text, hidden_states_objects, hidden_states_objects)
        else:
            text_tran_according_pic = hidden_states_text
            pic_tran_according_text = hidden_states_objects
        # ##text-lstm
        # text_hn = rnn_utils.pad_sequence(text_tran_according_pic, batch_first=True)
        # text_h0 = torch.rand(1, text_hn.size()[0], 768).cuda()
        # text_c0 = torch.rand(1, text_hn.size()[0], 768).cuda()
        # text_out, (text_h1, text_c1) = self.text_lstm(text_hn, (text_h0, text_c0))
        # text_hm = text_h1.reshape(text_h1.size()[1], 768)
        # ##pic-lstm
        # pic_hn = rnn_utils.pad_sequence(pic_tran_according_text, batch_first=True)
        # pic_h0 = torch.rand(1, pic_hn.size()[0], 768).cuda()
        # pic_c0 = torch.rand(1, pic_hn.size()[0], 768).cuda()
        # pic_out, (pic_h1, pic_c1) = self.pic_lstm(pic_hn, (pic_h0, pic_c0))
        # pic_hm = pic_h1.reshape(pic_h1.size()[1], 768)
        # hm = torch.cat((pic_hm, text_hm, pooled_rep), 1)
        # text 150 -55, pic 150-101
        if 'MAX' in self.fusion_mode:
            tup1 = (0, 0, 0, 150 - text_tran_according_pic.size()[1])
            tup2 = (0, 0, 0, 150 - pic_tran_according_text.size()[1])
            # pad to [32,150,768]
            text_tran_according_pic2 = nn.ZeroPad2d(tup1)(text_tran_according_pic)
            pic_tran_according_text2 = nn.ZeroPad2d(tup2)(pic_tran_according_text)
            # permute to [32,768,150]
            text_tran_according_pic2 = text_tran_according_pic2.permute(0, 2, 1)
            pic_tran_according_text2 = pic_tran_according_text2.permute(0, 2, 1)
            # print("text_tran_according_pic2:",text_tran_according_pic2.size())

            # # cnn
            # text_tran_according_pic2 = self.text_conv(text_tran_according_pic2)
            # pic_tran_according_text2 = self.pic_conv(pic_tran_according_text2)

            # maxpooling to [32,768,1]
            text_tran_according_pic3 = self.text_maxpooling(text_tran_according_pic2)
            pic_tran_according_text3 = self.text_maxpooling(pic_tran_according_text2)
            #reshape to [32,768]
            text_tran_rep = text_tran_according_pic3.reshape(text_tran_according_pic3.size()[0], 768)
            pic_tran_rep = pic_tran_according_text3.reshape(pic_tran_according_text3.size()[0], 768)
        elif 'MEAN' in self.fusion_mode:
            tm = text_mask.unsqueeze(-1).expand(-1,-1,768)
            bm = box_mask.unsqueeze(-1).expand(-1,-1,768)
            text_tran_rep = (text_tran_according_pic * tm).sum(dim=1)/tm.sum(dim=1)
            pic_tran_rep = (pic_tran_according_text * bm).sum(dim=1)/bm.sum(dim=1)
            # print("text_tran_rep3:", text_tran_rep)
        # print("pic_tran_rep3:", pic_tran_rep)
        # print("pooled_rep3:", pooled_rep)
        
        #[32,768]
        return text_tran_rep, pic_tran_rep, pooled_rep

    def train_forward(self,
                      image,
                      boxes,
                      im_info,
                      question,
                      label,
                      ):
        ###########################################
        self.text_bert.train()
        text_tran_rep, pic_tran_rep, pooled_rep = self._forward_(image,boxes,im_info, question)
        def save_grad(name,grad_dict):
            def hook(grad):
                grad_dict[name] = grad
            return hook
        if self.perturbation != 'NO':
            text_tran_rep.register_hook(save_grad('text_tran_rep', self.grad))
            text_tran_rep = text_tran_rep + self.text_adv_tensor #加扰动
            # print("text_tran_rep:",text_tran_rep.size())
            # print("self.text_adv_tensor:", self.text_adv_tensor.size())

            #pic_tran_rep扰动
            pic_tran_rep.register_hook(save_grad('pic_tran_rep', self.grad))
            pic_tran_rep = pic_tran_rep + self.pic_adv_tensor  # 加扰动
            # print("pic_tran_rep.size():", pic_tran_rep.size())


            #pooled_rep扰动
            pooled_rep.register_hook(save_grad('pooled_rep', self.grad))
            pooled_rep = pooled_rep + self.cls_adv_tensor  # 加扰动
        # print("pooled_rep.size():", pooled_rep.size())
        if 'NOFU' in self.fusion_mode:
            hm = torch.cat((text_tran_rep, pic_tran_rep, pooled_rep), 1) 
        else:
            text_pic_fusion = self.feature_fusion(text_tran_rep,pic_tran_rep)
            hm = torch.cat((text_pic_fusion, text_tran_rep, pic_tran_rep, pooled_rep), 1)

        ##connect
        hm1 = self.text_pic_connect(hm)
        # print("hm1:",hm1)
        outputs = {}
        # classifier
        logits = self.final_mlp(hm1)

        ans_loss = F.binary_cross_entropy_with_logits(logits, label) * label.size(1)

        outputs.update({'label_logits': logits,
                        'label': label,
                        'ans_loss': ans_loss})


        loss = ans_loss.mean()

        return outputs, loss

    def inference_forward(self,
                          image,
                          boxes,
                          im_info,
                          question): 

        ###########################################
        self.text_bert.eval()
        text_tran_rep, pic_tran_rep, pooled_rep = self._forward_(image,boxes,im_info, question)  
        if 'NOFU' in self.fusion_mode:
            hm = torch.cat((text_tran_rep, pic_tran_rep, pooled_rep), 1) 
        else:
            text_pic_fusion = self.feature_fusion(text_tran_rep,pic_tran_rep)
            hm = torch.cat((text_pic_fusion, text_tran_rep, pic_tran_rep, pooled_rep), 1)

        ##connect
        hm1 = self.text_pic_connect(hm)
        outputs = {}
        # classifier
        logits = self.final_mlp(hm1)
        outputs.update({'label_logits': logits})

        return outputs
