

import copy
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np


import math


def _get_clones(module, N):
    return nn.ModuleList([copy.deepcopy(module) for i in range(N)])


def count_fusion(x, y):
    return F.relu(x + y) - (x - y)*(x - y)


class CrossModalCalibration(nn.Module):
    def __init__(self, hidden_dim, nlayers = 1):
        super().__init__()
        # Inter
        # self.vision_W1 = nn.Linear(hidden_dim, hidden_dim)
        # self.vision_res = nn.Linear(hidden_dim, hidden_dim)
        # self.vision_W2 = nn.Linear(hidden_dim, hidden_dim)
        # self.semantic_W1 = nn.Linear(hidden_dim, hidden_dim)
        # self.semantic_res = nn.Linear(hidden_dim, hidden_dim)
        # self.semantic_W2 = nn.Linear(hidden_dim, hidden_dim)
        # Intra
        # self.vision_intra_trans = TransformerLayer(hidden_dim, nheads = 2)
        # self.semantic_intra_trans = TransformerLayer(hidden_dim, nheads = 2)

        self.nlayers = nlayers
        # Inter-Modal Calibration (InterC)
        # self.CrossAtt = VanillaCrossAttLayer(hidden_dim)
        # self.CrossAtt = MHCrossAttLayer(hidden_dim, nheads = 4, relation = 'VanillaTrans')
        self.CrossAtt = MHCrossAttLayer(hidden_dim, nheads = 2)
        self.CrossAtt = _get_clones(self.CrossAtt, nlayers)
        # self.SelfAtt = MHSelfAttLayer(hidden_dim, nheads=2)
        # self.SelfAtt = _get_clones(self.SelfAtt, nlayers)

        # Inter-Transformer
        # self.vision_inter_trans = InterTransformerLayer(hidden_dim, nheads=2)
        # self.semantic_inter_trans = InterTransformerLayer(hidden_dim, nheads=2)
        # self.vision_inter_trans = _get_clones(self.vision_inter_trans, nlayers)
        # self.semantic_inter_trans = _get_clones(self.semantic_inter_trans, nlayers)

        # Intra-Modal Enhance Calibration (IntraEC)
        # self.vision_intra_trans = TransformerLayer(hidden_dim, nheads = 2, relation = 'embedded_dot_pro')
        # self.semantic_intra_trans = TransformerLayer(hidden_dim, nheads = 2, relation = 'embedded_dot_pro')
        # self.vision_intra_trans = TransformerLayer(hidden_dim, nheads = 4, relation = 'VanillaTrans')
        # self.semantic_intra_trans = TransformerLayer(hidden_dim, nheads = 4, relation = 'VanillaTrans')
        self.vision_intra_trans = TransformerLayer(hidden_dim, nheads = 2)
        self.semantic_intra_trans = TransformerLayer(hidden_dim, nheads = 2)
        self.vision_intra_trans = _get_clones(self.vision_intra_trans, nlayers)
        self.semantic_intra_trans = _get_clones(self.semantic_intra_trans, nlayers)

    def forward(self, vx, sx):
        '''
        vx: vision features [6,2,100,256]
        sx: semantic features [6,2,100,256]
        '''

        for l in range(self.nlayers):
            # MH2CrossAttLayer_intraTrans2_nlayers1, Highest
            # Inter
            att_vx, att_sx = self.CrossAtt[l](vx, sx)
            # Intra
            vx = self.vision_intra_trans[l](att_vx)
            sx = self.semantic_intra_trans[l](att_sx)
            # vx, sx = att_vx, att_sx
            # vx = self.vision_intra_trans[l](vx)
            # sx = self.semantic_intra_trans[l](sx)

            # # SelfAtt
            # # Inter
            # att_vx, att_sx = self.SelfAtt[l](vx, sx)
            # att_vx, att_sx = self.CrossAtt[l](att_vx, att_sx)
            # # Intra
            # vx = self.vision_intra_trans[l](att_vx)
            # sx = self.semantic_intra_trans[l](att_sx)

            # # Inter-Intra
            # # Inter
            # att_vx = self.vision_inter_trans[l](vx, sx)
            # att_sx = self.semantic_inter_trans[l](sx, vx)
            # # Intra
            # vx = self.vision_intra_trans[l](att_vx)
            # sx = self.semantic_intra_trans[l](att_sx)

            # # CrossEnhance-Inter-Intra
            # # Inter
            # att_vx, att_sx = self.CrossAtt[l](vx, sx)
            # att_vx = self.vision_inter_trans[l](att_vx, att_sx)
            # att_sx = self.semantic_inter_trans[l](att_sx, att_vx)
            # # Intra
            # vx = self.vision_intra_trans[l](att_vx)
            # sx = self.semantic_intra_trans[l](att_sx)

            # Inter-Intra-CrossEnhance
            # # Inter
            # att_vx = self.vision_inter_trans[l](vx, sx)
            # att_sx = self.semantic_inter_trans[l](sx, vx)
            # # Intra
            # att_vx = self.vision_intra_trans[l](att_vx)
            # att_sx = self.semantic_intra_trans[l](att_sx)
            # # Cross-Enhance
            # vx, sx = self.CrossAtt[l](att_vx, att_sx)

        return vx, sx


class CrossModalityGraph(nn.Module):
    def __init__(self, input_dim, hidden_dim, num_layers, attention_type='multihead_transformer'):
        super().__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.num_layers = num_layers
        self.attention_type = attention_type

        if attention_type == 'embedded_dot_pro':
            self.semantic_q = [nn.Linear(input_dim, hidden_dim),]
            self.semantic_k = [nn.Linear(input_dim, hidden_dim),]
            self.semantic_v = [nn.Linear(input_dim, hidden_dim),]
            # self.semantic_proj_res = nn.Linear(input_dim, hidden_dim)
            for _ in range(num_layers-1):
                self.semantic_q.append(nn.Linear(hidden_dim, hidden_dim))
                self.semantic_k.append(nn.Linear(hidden_dim, hidden_dim))
                self.semantic_v.append(nn.Linear(hidden_dim, hidden_dim))
            self.semantic_q = nn.ModuleList(self.semantic_q)
            self.semantic_k = nn.ModuleList(self.semantic_k)
            self.semantic_v = nn.ModuleList(self.semantic_v)
        elif attention_type == 'multihead_transformer':
            assert self.num_layers == 1
            self.head_num = 4
            self.semantic_q = nn.Linear(input_dim, hidden_dim)
            self.semantic_k = nn.Linear(input_dim, hidden_dim)
            self.semantic_q = _get_clones(self.semantic_q, self.head_num)
            self.semantic_k = _get_clones(self.semantic_k, self.head_num)
            self.semantic_v = nn.Linear(input_dim, hidden_dim)
            self.coef = nn.ParameterList([nn.Parameter(torch.ones((hidden_dim, ), dtype = torch.float)/math.sqrt(hidden_dim), requires_grad = True) for _ in range(self.head_num)])

            self.LayerNorm = nn.LayerNorm([hidden_dim,])
            self.W_t1 = nn.Linear(hidden_dim*self.head_num, hidden_dim)
            self.W_t2 = nn.Linear(hidden_dim, hidden_dim)
        

        self.fusion_1 = nn.Linear(hidden_dim, hidden_dim)
        self.fusion_2 = nn.Linear(hidden_dim, hidden_dim)
        self.semantic_gate = nn.Linear(hidden_dim, hidden_dim)
            
    
    def forward(self, x, y, cooccur_prior = None):
        '''
        x : vision features [6, 2, 100, 256]
        y : language features [117, 256]
        '''
        if self.attention_type == 'embedded_dot_pro':
            assert self.num_layers == 1
            for i in range(self.num_layers):
                x_q = self.semantic_q[i](x)
                y_k = self.semantic_k[i](y)
                y_v = self.semantic_v[i](y)
                # x_att = torch.einsum('ac,bc->ab', x_q, x_k)
                x_att = torch.einsum('abce,de->abcd', x_q, y_k) / math.sqrt(self.hidden_dim) # [6, 2, 100, 117]
                x_att = F.softmax(x_att, dim = -1)
                if cooccur_prior is not None:
                    x_att = x_att + cooccur_prior
                    # print('cooccur')
                semantic_agg = torch.einsum('abcd,de->abce', x_att, y_v)
            return semantic_agg
            
                
                # if i == 0:
                #     x = F.relu(torch.matmul(x_att, x_v)) + self.semantic_proj_res(x) # self.verb_calibration_embedding
                # else:
                #     x = F.relu(torch.matmul(x_att, x_v)) + x
        
        if self.attention_type == 'multihead_transformer':
            assert len(x.shape) == 4
            l, bs, q, hiddim = x.shape
            x = x.reshape((l*bs, q, hiddim))

            assert len(y.shape) == 2
            y = y.unsqueeze(dim = 0)

            y_v = self.semantic_v(y).expand(l*bs, -1, -1)
            multihead_ft = []
            for i in range(self.head_num):
                x_q = self.semantic_q[i](x)  # lbs, q, hiddim
                y_k = self.semantic_k[i](y).expand(l*bs, -1, -1)  # lbs, q, hiddim
                y_k = y_k * self.coef[i].expand_as(y_k)

                x_att = torch.einsum('abc,adc->abd', x_q, y_k)
                x_att = F.softmax(x_att, dim = -1)
                att_ft = torch.bmm(x_att, y_v)
                multihead_ft.append(att_ft)

            multihead_ft = torch.cat(multihead_ft, dim = -1)
            semantic_aug = self.W_t2(F.relu(self.LayerNorm(self.W_t1(multihead_ft)), inplace = True))
            semantic_aug = semantic_aug.view((l, bs, q, hiddim))

            modality_fus = count_fusion(self.fusion_1(semantic_aug), self.fusion_2(x.view((l, bs, q, hiddim))))
            # semantic_gate2 = torch.sigmoid(self.semantic_gate2(x))
            # modality_fus = count_fusion(self.fusion_1(semantic_gate2 * trans_ft), self.fusion_2(x))
            return modality_fus



class TransformerLayer(nn.Module):
    def __init__(self, hidden_dim, nheads, relation = 'bilinear', dropout = 0.1):
        super().__init__()
        self.relation = relation
        self.hidden_dim = hidden_dim
        if self.relation == 'bilinear':
            self.nheads = nheads
            self.bilinear1 = nn.Linear(hidden_dim, hidden_dim)
            self.bilinear2 = nn.Linear(hidden_dim, hidden_dim)
            self.bilinear1 = _get_clones(self.bilinear1, nheads)
            self.bilinear2 = _get_clones(self.bilinear2, nheads)
            self.coef = nn.ParameterList([nn.Parameter(torch.ones((hidden_dim, ), dtype = torch.float)/math.sqrt(hidden_dim), requires_grad = True) for _ in range(nheads)])
        
            hid_hid_dim = hidden_dim//nheads
            self.W3 = nn.Linear(hidden_dim, hid_hid_dim)
            self.W3 = _get_clones(self.W3, nheads)
            self.W2 = nn.Linear(hidden_dim, hidden_dim)
            self.W1 = nn.Linear(hidden_dim, hidden_dim)
            self.nonlinear = nn.ReLU(inplace = True)
            self.LayerNorm = nn.LayerNorm([hidden_dim,])
        
        if self.relation == 'embedded_dot_pro':
            self.nheads = nheads
            self.hidden_dim = hidden_dim
            self.hid_hid_dim = hidden_dim//nheads
            self.relation_q = nn.Linear(self.hidden_dim, self.hid_hid_dim)
            self.relation_k = nn.Linear(self.hidden_dim, self.hid_hid_dim)
            self.relation_q = _get_clones(self.relation_q, nheads)
            self.relation_k = _get_clones(self.relation_k, nheads)
            # self.coef = nn.ParameterList([nn.Parameter(torch.ones((hidden_dim, ), dtype = torch.float)/math.sqrt(hidden_dim), requires_grad = True) for _ in range(nheads)])
        
            self.W3 = nn.Linear(self.hidden_dim, self.hid_hid_dim)
            self.W3 = _get_clones(self.W3, nheads)
            self.bottleneck_dim = int(self.hidden_dim//0.5)
            self.W2 = nn.Linear(self.hidden_dim, self.bottleneck_dim)
            self.W1 = nn.Linear(self.bottleneck_dim, self.hidden_dim)
            self.nonlinear = nn.ReLU(inplace = True)
            self.LayerNorm = nn.LayerNorm([self.bottleneck_dim,])

        if self.relation == 'VanillaTrans':
            self.nheads = nheads
            self.hidden_dim = hidden_dim
            self.hid_hid_dim = hidden_dim//nheads
            self.relation_q = nn.Linear(self.hidden_dim, self.hid_hid_dim)
            self.relation_k = nn.Linear(self.hidden_dim, self.hid_hid_dim)
            self.relation_q = _get_clones(self.relation_q, nheads)
            self.relation_k = _get_clones(self.relation_k, nheads)
            # self.coef = nn.ParameterList([nn.Parameter(torch.ones((hidden_dim, ), dtype = torch.float)/math.sqrt(hidden_dim), requires_grad = True) for _ in range(nheads)])
        
            self.W3 = nn.Linear(self.hidden_dim, self.hidden_dim)
            self.W3 = _get_clones(self.W3, nheads)
            self.bottleneck_dim = int(self.hidden_dim)
            self.W2 = nn.Linear(self.hidden_dim, self.bottleneck_dim)
            self.W1 = nn.Linear(self.bottleneck_dim, self.hidden_dim)
            self.nonlinear = nn.ReLU(inplace = True)
            self.norm2 = nn.LayerNorm(self.hidden_dim)
            self.norm1 = nn.LayerNorm(self.hidden_dim)
            self.dropout3 = nn.Dropout(dropout)
            self.dropout2 = nn.Dropout(dropout)
            self.dropout1 = nn.Dropout(dropout)

    def forward(self, x):
        '''
        x: shape [6,2,100,256]
        '''
        # cal multi-head attention
        if self.relation == 'bilinear':
            x_trans = []
            for i in range(self.nheads):
                x_b1 = self.bilinear1[i](x) # [6,2,100,256]
                x_b2 = self.bilinear2[i](x)
                # x_b1 = torch.sigmoid(self.bilinear1[i](x)) # [6,2,100,256]
                # x_b2 = torch.sigmoid(self.bilinear2[i](x))

                x_b1 = x_b1 * self.coef[i]
                x_att = torch.einsum('abcd,abed->abce', x_b1, x_b2)
                x_att = torch.softmax(x_att, dim = -1)
                x_emb = self.W3[i](x)
                x_i = torch.einsum('abce,abef->abcf', x_att, x_emb)
                x_trans.append(x_i)  # [6,2,100,256/nheads]
            x_trans = torch.cat(x_trans, dim = -1)
            x_trans = self.W1(self.nonlinear(self.LayerNorm(self.W2(x_trans))))
            x_trans = x + x_trans
        
        if self.relation == 'embedded_dot_pro':
            x_trans = []
            for i in range(self.nheads):
                x_r1 = self.relation_q[i](x) # [6,2,100,256]
                x_r2 = self.relation_k[i](x)
                x_att = torch.einsum('abcd,abed->abce', x_r1, x_r2) / math.sqrt(self.hid_hid_dim)
                x_att = torch.softmax(x_att, dim = -1)
                x_emb = self.W3[i](x)
                x_i = torch.einsum('abce,abef->abcf', x_att, x_emb)
                x_trans.append(x_i)  # [6,2,100,256/nheads]
            x_trans = torch.cat(x_trans, dim = -1)
            x_trans = self.W1(self.nonlinear(self.LayerNorm(self.W2(x_trans))))
            x_trans = x + x_trans
        
        if self.relation == 'VanillaTrans':
            x_n = self.norm2(x)
            x_trans = []
            for i in range(self.nheads):
                x_r1 = self.relation_q[i](x_n) # [6,2,100,256]
                x_r2 = self.relation_k[i](x_n)
                x_att = torch.einsum('abcd,abed->abce', x_r1, x_r2) / math.sqrt(self.hid_hid_dim)
                x_att = torch.softmax(x_att, dim = -1)
                x_emb = self.W3[i](x_n)
                x_i = torch.einsum('abce,abef->abcf', x_att, x_emb)
                x_trans.append(x_i)  # [6,2,100,256/nheads]
            x_trans = torch.stack(x_trans, dim = -1).sum(dim = -1)
            x_trans = x + self.dropout3(x_trans)
            x_trans2 = self.norm1(x_trans)
            x_trans2 = self.W1(self.dropout2(self.nonlinear((self.W2(x_trans2)))))
            x_trans = x_trans + self.dropout1(x_trans2)
            
        return x_trans


# class InterTransformerLayer(nn.Module):
#     def __init__(self, hidden_dim, nheads):
#         super().__init__()
#         self.nheads = nheads
#         self.hidden_dim = hidden_dim
#         self.bilinear1 = nn.Linear(hidden_dim, hidden_dim)
#         self.bilinear2 = nn.Linear(hidden_dim, hidden_dim)
#         self.bilinear1 = _get_clones(self.bilinear1, nheads)
#         self.bilinear2 = _get_clones(self.bilinear2, nheads)
#         self.coef = nn.ParameterList([nn.Parameter(torch.ones((hidden_dim, ), dtype = torch.float)/math.sqrt(hidden_dim), requires_grad = True) for _ in range(nheads)])
    
#         hid_hid_dim = hidden_dim//nheads
#         self.W3 = nn.Linear(hidden_dim, hid_hid_dim)
#         self.W3 = _get_clones(self.W3, nheads)
#         self.W2 = nn.Linear(hidden_dim, hidden_dim)
#         self.W1 = nn.Linear(hidden_dim, hidden_dim)
#         self.nonlinear = nn.ReLU(inplace = True)
#         self.LayerNorm = nn.LayerNorm([hidden_dim,])
    
#     def forward(self, x, y):
#         '''
#         Gather y features to x.
#         x: [6,2,100,256]
#         y: [6,2,100,256]
#         '''
#         x_trans = []
#         for i in range(self.nheads):
#             x_b1 = self.bilinear1[i](x) # [6,2,100,256]
#             y_b2 = self.bilinear2[i](y)
#             x_b1 = x_b1 * self.coef[i]
#             x_att = torch.einsum('abcd,abed->abce', x_b1, y_b2)
#             x_att = torch.softmax(x_att, dim = -1)
#             y_emb = self.W3[i](y)
#             x_i = torch.einsum('abce,abef->abcf', x_att, y_emb)
#             x_trans.append(x_i)  # [6,2,100,256/nheads]
#         x_trans = torch.cat(x_trans, dim = -1)
#         x_trans = self.W1(self.nonlinear(self.LayerNorm(self.W2(x_trans))))
#         x_trans = x + x_trans
#         return x_trans


# class InterLambdaLayer(nn.Module):
#     def __init__(self, hidden_dim, nheads):
#         super().__init__()
#         self.nheads = nheads
#         self.hidden_dim = hidden_dim
#         self.bilinear1 = nn.Linear(hidden_dim, hidden_dim)
#         self.bilinear2 = nn.Linear(hidden_dim, hidden_dim)
#         self.bilinear1 = _get_clones(self.bilinear1, nheads)
#         self.bilinear2 = _get_clones(self.bilinear2, nheads)
#         self.coef = nn.ParameterList([nn.Parameter(torch.ones((hidden_dim, ), dtype = torch.float)/math.sqrt(hidden_dim), requires_grad = True) for _ in range(nheads)])
    
#         hid_hid_dim = hidden_dim//nheads
#         self.W3 = nn.Linear(hidden_dim, hid_hid_dim)
#         self.W3 = _get_clones(self.W3, nheads)
#         self.W2 = nn.Linear(hidden_dim, hidden_dim)
#         self.W1 = nn.Linear(hidden_dim, hidden_dim)
#         self.nonlinear = nn.ReLU(inplace = True)
#         self.LayerNorm = nn.LayerNorm([hidden_dim,])
    
#     def forward(self, x, y):
#         '''
#         Gather y features to x.
#         x: [6,2,100,256]
#         y: [6,2,100,256]
#         '''
#         x_trans = []
#         for i in range(self.nheads):
#             x_b1 = self.bilinear1[i](x) # [6,2,100,256]
#             y_b2 = self.bilinear2[i](y)
#             x_b1 = x_b1 * self.coef[i]
#             x_att = torch.einsum('abcd,abed->abce', x_b1, y_b2)
#             x_att = torch.softmax(x_att, dim = -1)
#             y_emb = self.W3[i](y)
#             x_i = torch.einsum('abce,abef->abcf', x_att, y_emb)
#             x_trans.append(x_i)  # [6,2,100,256/nheads]
#         x_trans = torch.cat(x_trans, dim = -1)
#         x_trans = self.W1(self.nonlinear(self.LayerNorm(self.W2(x_trans))))
#         x_trans = x + x_trans
#         return x_trans



class MHCrossAttLayer(nn.Module):
    def __init__(self, hidden_dim, nheads, relation = 'GClike', dropout = 0.1):
        super().__init__()
        self.nheads = nheads
        self.hidden_dim = hidden_dim
        hid_hid_dim = hidden_dim//nheads
        self.bottleneck_dim = int(self.hidden_dim)
        self.relation = relation

        if self.relation == 'GClike':
            self.vision_W3 = nn.Linear(hidden_dim, hid_hid_dim)
            self.vision_sq = nn.Linear(hidden_dim, hid_hid_dim)
            self.vision_ex = nn.Linear(hid_hid_dim, hid_hid_dim)
            self.vision_W3 = _get_clones(self.vision_W3, nheads)
            self.vision_sq = _get_clones(self.vision_sq, nheads)
            self.vision_ex = _get_clones(self.vision_ex, nheads)
            self.vision_W2 = nn.Linear(hidden_dim, self.bottleneck_dim)
            self.vision_W1 = nn.Linear(self.bottleneck_dim, hidden_dim)
            self.vision_LayerNorm = nn.LayerNorm([self.bottleneck_dim,])

            self.semantic_W3 = nn.Linear(hidden_dim, hid_hid_dim)
            self.semantic_sq = nn.Linear(hidden_dim, hid_hid_dim)
            self.semantic_ex = nn.Linear(hid_hid_dim, hid_hid_dim)
            self.semantic_W3 = _get_clones(self.semantic_W3, nheads)
            self.semantic_sq = _get_clones(self.semantic_sq, nheads)
            self.semantic_ex = _get_clones(self.semantic_ex, nheads)
            self.semantic_W2 = nn.Linear(hidden_dim, self.bottleneck_dim)
            self.semantic_W1 = nn.Linear(self.bottleneck_dim, hidden_dim)
            self.semantic_LayerNorm = nn.LayerNorm([self.bottleneck_dim,])
        
        if self.relation == 'VanillaTrans':
            self.vision_W3 = nn.Linear(hidden_dim, hidden_dim)
            self.vision_sq = nn.Linear(hidden_dim, hid_hid_dim)
            self.vision_ex = nn.Linear(hid_hid_dim, hidden_dim)
            self.vision_W3 = _get_clones(self.vision_W3, nheads)
            self.vision_sq = _get_clones(self.vision_sq, nheads)
            self.vision_ex = _get_clones(self.vision_ex, nheads)
            self.vision_W2 = nn.Linear(hidden_dim, self.bottleneck_dim)
            self.vision_W1 = nn.Linear(self.bottleneck_dim, hidden_dim)
            self.vision_LayerNorm2 = nn.LayerNorm(self.hidden_dim)
            self.vision_LayerNorm1 = nn.LayerNorm(self.hidden_dim)
            self.vision_dropout3 = nn.Dropout(dropout)
            self.vision_dropout2 = nn.Dropout(dropout)
            self.vision_dropout1 = nn.Dropout(dropout)

            self.semantic_W3 = nn.Linear(hidden_dim, hidden_dim)
            self.semantic_sq = nn.Linear(hidden_dim, hid_hid_dim)
            self.semantic_ex = nn.Linear(hid_hid_dim, hidden_dim)
            self.semantic_W3 = _get_clones(self.semantic_W3, nheads)
            self.semantic_sq = _get_clones(self.semantic_sq, nheads)
            self.semantic_ex = _get_clones(self.semantic_ex, nheads)
            self.semantic_W2 = nn.Linear(hidden_dim, self.bottleneck_dim)
            self.semantic_W1 = nn.Linear(self.bottleneck_dim, hidden_dim)
            self.semantic_LayerNorm2 = nn.LayerNorm(self.hidden_dim)
            self.semantic_LayerNorm1 = nn.LayerNorm(self.hidden_dim)
            self.semantic_dropout3 = nn.Dropout(dropout)
            self.semantic_dropout2 = nn.Dropout(dropout)
            self.semantic_dropout1 = nn.Dropout(dropout)

    
    def forward(self, vx, sx):
        if self.relation == 'GClike':
            vx_enhance = []
            for i in range(self.nheads):
                vx_att = torch.sigmoid(self.vision_ex[i](torch.relu(self.vision_sq[i](sx))))
                vx_emb = vx_att * self.vision_W3[i](vx) # Self Aggregation (Initial)
                # vx_emb = vx_att * self.vision_W3[i](sx) # Cross Aggregation
                vx_enhance.append(vx_emb)
            vx_enhance = torch.cat(vx_enhance, dim = -1)
            vx_enhance = vx + self.vision_W1(torch.relu(self.vision_LayerNorm(self.vision_W2(vx_enhance))))

            sx_enhance = []
            for i in range(self.nheads):
                sx_att = torch.sigmoid(self.semantic_ex[i](torch.relu(self.semantic_sq[i](vx))))
                sx_emb = sx_att * self.semantic_W3[i](sx) # Self Aggregation (Initial)
                # sx_emb = sx_att * self.semantic_W3[i](vx) # Cross Aggregation
                sx_enhance.append(sx_emb)
            sx_enhance = torch.cat(sx_enhance, dim = -1)
            sx_enhance = sx + self.semantic_W1(torch.relu(self.semantic_LayerNorm(self.semantic_W2(sx_enhance))))
        

        if self.relation == 'VanillaTrans':
            vx_n = self.vision_LayerNorm2(vx)
            sx_n = self.semantic_LayerNorm2(sx)

            vx_enhance = []
            for i in range(self.nheads):
                vx_att = torch.sigmoid(self.vision_ex[i](torch.relu(self.vision_sq[i](sx_n))))
                vx_emb = vx_att * self.vision_W3[i](vx_n) # Self Aggregation (Initial)
                # vx_emb = vx_att * self.vision_W3[i](sx) # Cross Aggregation
                vx_enhance.append(vx_emb)
            vx_enhance = torch.stack(vx_enhance, dim = -1).sum(dim = -1)
            vx = vx + self.vision_dropout3(vx_enhance)
            vx2 = self.vision_LayerNorm1(vx)
            # src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
            # src = src + self.dropout2(src2)
            vx2 = self.vision_W1(self.vision_dropout2(torch.relu(self.vision_W2(vx2))))
            vx = vx + self.vision_dropout1(vx2)

            sx_enhance = []
            for i in range(self.nheads):
                sx_att = torch.sigmoid(self.semantic_ex[i](torch.relu(self.semantic_sq[i](vx_n))))
                sx_emb = sx_att * self.semantic_W3[i](sx_n) # Self Aggregation (Initial)
                # sx_emb = sx_att * self.semantic_W3[i](vx) # Cross Aggregation
                sx_enhance.append(sx_emb)
            sx_enhance = torch.stack(sx_enhance, dim = -1).sum(dim = -1)
            sx = sx + self.semantic_dropout3(sx_enhance)
            sx2 = self.semantic_LayerNorm1(sx)
            sx2 = self.semantic_W1(self.semantic_dropout2(torch.relu((self.semantic_W2(sx2)))))
            sx = sx + self.semantic_dropout1(sx)

        return vx_enhance, sx_enhance


# class MHSelfAttLayer(nn.Module):
#     def __init__(self, hidden_dim, nheads):
#         super().__init__()
#         self.nheads = nheads
#         hid_hid_dim = hidden_dim//nheads
#         self.vision_W3 = nn.Linear(hidden_dim, hid_hid_dim)
#         self.vision_sq = nn.Linear(hidden_dim, hid_hid_dim)
#         self.vision_ex = nn.Linear(hid_hid_dim, hid_hid_dim)
#         self.vision_W3 = _get_clones(self.vision_W3, nheads)
#         self.vision_sq = _get_clones(self.vision_sq, nheads)
#         self.vision_ex = _get_clones(self.vision_ex, nheads)
#         self.vision_W2 = nn.Linear(hidden_dim, hidden_dim)
#         self.vision_W1 = nn.Linear(hidden_dim, hidden_dim)
#         self.vision_LayerNorm = nn.LayerNorm([hidden_dim,])

#         self.semantic_W3 = nn.Linear(hidden_dim, hid_hid_dim)
#         self.semantic_sq = nn.Linear(hidden_dim, hid_hid_dim)
#         self.semantic_ex = nn.Linear(hid_hid_dim, hid_hid_dim)
#         self.semantic_W3 = _get_clones(self.semantic_W3, nheads)
#         self.semantic_sq = _get_clones(self.semantic_sq, nheads)
#         self.semantic_ex = _get_clones(self.semantic_ex, nheads)
#         self.semantic_W2 = nn.Linear(hidden_dim, hidden_dim)
#         self.semantic_W1 = nn.Linear(hidden_dim, hidden_dim)
#         self.semantic_LayerNorm = nn.LayerNorm([hidden_dim,])
    
#     def forward(self, vx, sx):
#         vx_enhance = []
#         for i in range(self.nheads):
#             vx_att = torch.sigmoid(self.vision_ex[i](torch.relu(self.vision_sq[i](vx))))
#             vx_emb = vx_att * self.vision_W3[i](vx)
#             vx_enhance.append(vx_emb)
#         vx_enhance = torch.cat(vx_enhance, dim = -1)
#         vx_enhance = vx + self.vision_W1(torch.relu(self.vision_LayerNorm(self.vision_W2(vx_enhance))))

#         sx_enhance = []
#         for i in range(self.nheads):
#             sx_att = torch.sigmoid(self.semantic_ex[i](torch.relu(self.semantic_sq[i](sx))))
#             sx_emb = sx_att * self.semantic_W3[i](sx)
#             sx_enhance.append(sx_emb)
#         sx_enhance = torch.cat(sx_enhance, dim = -1)
#         sx_enhance = sx + self.semantic_W1(torch.relu(self.semantic_LayerNorm(self.semantic_W2(sx_enhance))))
        
#         return vx_enhance, sx_enhance


# class VanillaCrossAttLayer(nn.Module):
#     def __init__(self, hidden_dim):
#         super().__init__()
#         self.vision_W1 = nn.Linear(hidden_dim, hidden_dim)
#         self.vision_res = nn.Linear(hidden_dim, hidden_dim)
#         self.vision_W2 = nn.Linear(hidden_dim, hidden_dim)

#         self.semantic_W1 = nn.Linear(hidden_dim, hidden_dim)
#         self.semantic_res = nn.Linear(hidden_dim, hidden_dim)
#         self.semantic_W2 = nn.Linear(hidden_dim, hidden_dim)
    
#     def forward(self, vx, sx):
#         '''
#         vx: vision features [6,2,100,256]
#         sx: semantic features [6,2,100,256]
#         '''
#         # Inter
#         res_vx = self.vision_res(vx)
#         att_vx = res_vx + res_vx * torch.sigmoid(self.vision_W1(sx))
#         # att_vx = res_vx + res_vx * torch.sigmoid(self.vision_W2(torch.relu(self.vision_W1(sx))))
#         res_sx = self.semantic_res(sx)
#         att_sx = res_sx + res_sx * torch.sigmoid(self.semantic_W1(vx))
#         # att_sx = res_sx + res_sx * torch.sigmoid(self.semantic_W2(torch.relu(self.semantic_W1(vx))))

#         return att_vx, att_sx
