import torch
import torch.autograd
import torch.nn as nn
import torch.nn.functional as F
import copy


class cascade_comp_inf_model(nn.Module):
    def __init__(self, input_data_dim, args):
        super(cascade_comp_inf_model, self).__init__()

        self.input_data_dim = input_data_dim
        self.d_q = args.cma_vl_hidden
        self.d_k = args.cma_vl_hidden
        self.d_v = args.cma_vl_hidden
        self.n_head = args.cma_n_head
        self.dropout = args.cma_vl_dropout

        self.sa_op_x = SA(self.input_data_dim, args.cma_vl_hidden, self.dropout)
        self.ff_sa_op_x = FeedForwardLayer_SA(self.input_data_dim, dropout=self.dropout)
        self.sa_op_q = copy.deepcopy(self.sa_op_x)
        self.ff_sa_op_q = copy.deepcopy(self.ff_sa_op_x)

        self.ca_op = CA(self.input_data_dim, args.cma_vl_hidden, self.dropout)
        # self.ff_ca_op = FeedForwardLayer_CA(self.d_v * self.n_head, self.d_v * self.n_head, self.input_data_dim, dropout=self.dropout)

    def forward(self, x, q, ifsa_x):
        if ifsa_x:
            x = self.ff_sa_op_x(self.sa_op_x(x))
        sa_q = self.ff_sa_op_q(self.sa_op_q(q))

        # sa_x = self.ff_sa_op_x(self.sa_op_x(x))
        # sa_q = self.ff_sa_op_q(self.sa_op_q(q))

        ca = self.ca_op(x, sa_q)

        return ca


class Attention(nn.Module):
    def __init__(self, temperature, attn_dropout=0.1):
        super().__init__()
        self.temperature = temperature
        self.dropout = nn.Dropout(attn_dropout)

    def forward(self, q, k, v, mask=None):
        attn = torch.matmul(q / self.temperature, k)
        if mask is not None:
            attn = attn.masked_fill(mask == 0, -1e9)
        # softmax+dropout
        # attn = attn / abs(attn.min())
        attn = self.dropout(F.softmax(attn, dim=-1))
        output = torch.matmul(attn, v)

        return output, v


class SA(nn.Module):
    def __init__(self, input_data_dim, d_qkv, dropout):
        super(SA, self).__init__()
        self.dim = input_data_dim
        self.d_qkv = d_qkv

        self.w_q = nn.Linear(self.dim, d_qkv, bias=False)
        self.w_k = nn.Linear(self.dim, d_qkv, bias=False)
        self.w_v = nn.Linear(self.dim, d_qkv, bias=False)

        self.add_module('linear_q_%d' % (self.dim), self.w_q)
        self.add_module('linear_k_%d' % (self.dim), self.w_k)
        self.add_module('linear_v_%d' % (self.dim), self.w_v)

        self.attention = Attention(temperature=d_qkv ** 0.5, attn_dropout=dropout)
        # self.fc = nn.Linear(d_qkv, d_qkv)
        self.layer_norm = nn.LayerNorm(input_data_dim, eps=1e-6)

    def forward(self, input_data):

        input_data = self.layer_norm(input_data)

        q = torch.unsqueeze(self.w_q(input_data), dim=-1) # batch,dim,1
        k = torch.unsqueeze(self.w_k(input_data), dim=1) # batch,1,dim
        v = torch.unsqueeze(self.w_v(input_data), dim=-1) # batch,dim,1

        q, residual = self.attention(q, k, v) # 注意因为没有同输入相比维度发生变化，因此以v作为残差

        q = torch.squeeze(q, dim=-1)
        residual = torch.squeeze(residual, dim=-1)

        q += residual

        return q


# class CA(nn.Module):
#     def __init__(self, input_data_dim, d_qkv, n_head, dropout):
#         super(CA, self).__init__()
#         self.n_head = n_head
#         self.dim = input_data_dim
#         self.d_qkv = d_qkv
#
#         self.w_q = nn.Linear(self.dim, n_head * d_qkv, bias=False)
#         self.w_k = nn.Linear(self.dim, n_head * d_qkv, bias=False)
#         self.w_v = nn.Linear(self.dim, n_head * d_qkv, bias=False)
#
#         self.add_module('linear_q_%d' % (self.dim), self.w_q)
#         self.add_module('linear_k_%d' % (self.dim), self.w_k)
#         self.add_module('linear_v_%d' % (self.dim), self.w_v)
#
#         self.attention = Attention(temperature=d_qkv ** 0.5, attn_dropout=dropout)
#         self.fc = nn.Linear(n_head * d_qkv, n_head * d_qkv)
#         self.fc_return = nn.Linear(n_head * d_qkv*2, n_head * d_qkv)
#         self.dropout = nn.Dropout(dropout)
#         self.layer_norm = nn.LayerNorm(d_qkv, eps=1e-6)
#
#     def forward(self, input_data, input_q):
#
#         bs = input_data.size(0)
#         input_data = self.layer_norm(input_data)
#         input_q = self.layer_norm(input_q)
#
#         q = torch.zeros(bs, 1, self.n_head * self.d_qkv).cuda()
#         k = torch.zeros(bs, 1, self.n_head * self.d_qkv).cuda()
#         v = torch.zeros(bs, 1, self.n_head * self.d_qkv).cuda()
#
#         q[:, 0, :] = self.w_q(input_q)
#         k[:, 0, :] = self.w_k(input_data)
#         v[:, 0, :] = self.w_v(input_data)
#
#         q = q.view(bs, 1, self.n_head, self.d_qkv)
#         k = k.view(bs, 1, self.n_head, self.d_qkv)
#         v = v.view(bs, 1, self.n_head, self.d_qkv)
#         q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
#
#         q, residual = self.attention(q, k, v)  # 注意因为没有同输入相比维度发生变化，因此以v作为残差
#         q = q.transpose(1, 2).contiguous().view(bs, -1)
#
#         residual = residual.transpose(1, 2).contiguous().view(bs, -1)
#         q = self.dropout(self.fc(q))
#
#         q = self.fc_return(torch.cat((residual,q),dim=-1))
#         return q


class CA(nn.Module):
    def __init__(self, input_data_dim, d_qkv, dropout):
        super(CA, self).__init__()
        self.dim = input_data_dim
        self.d_qkv = d_qkv

        self.w_q = nn.Linear(self.dim, d_qkv, bias=False)
        self.w_k = nn.Linear(self.dim, d_qkv, bias=False)
        self.w_v = nn.Linear(self.dim, d_qkv, bias=False)

        self.add_module('linear_q_%d' % (self.dim), self.w_q)
        self.add_module('linear_k_%d' % (self.dim), self.w_k)
        self.add_module('linear_v_%d' % (self.dim), self.w_v)

        self.attention = Attention(temperature=d_qkv ** 0.5, attn_dropout=dropout)
        self.fc = nn.Linear(d_qkv, d_qkv)
        self.fc_return = nn.Linear(d_qkv*2, d_qkv)
        self.layer_norm = nn.LayerNorm(input_data_dim, eps=1e-6)
        self.relu = nn.ReLU()

    def forward(self, input_data, input_q):

        input_data = self.layer_norm(input_data)
        input_q = self.layer_norm(input_q)

        q = torch.unsqueeze(self.w_q(input_q),dim=-1)
        k = torch.unsqueeze(self.w_k(input_data),dim=1)
        v = torch.unsqueeze(self.w_v(input_data),dim=-1)

        q, residual = self.attention(q, k, v)  # 注意因为没有同输入相比维度发生变化，因此以v作为残差

        q = torch.squeeze(q,dim=-1)
        residual = torch.squeeze(residual,dim=-1)

        q = self.fc_return(F.gelu(torch.cat((residual,q),dim=-1)))
        return q


class FeedForwardLayer_SA(nn.Module):

    def __init__(self, input_data_dim, dropout=0.1):
        super().__init__()
        self.w_1 = nn.Linear(input_data_dim, int(input_data_dim/2))  # position-wise
        self.w_2 = nn.Linear(int(input_data_dim/2), input_data_dim)  # position-wise
        self.layer_norm = nn.LayerNorm(input_data_dim, eps=1e-6)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        residual = x
        x = self.layer_norm(x)

        x = self.w_2(F.gelu(self.w_1(F.gelu(x))))
        x = residual+x

        return x


# class FeedForwardLayer_CA(nn.Module):
#
#     def __init__(self, input_data_dim):
#         super().__init__()
#
#         self.w_1 = nn.Linear(input_data_dim, input_data_dim/2)  # position-wise
#         self.w_2 = nn.Linear(input_data_dim/2, input_data_dim)  # position-wise
#         self.w_3 = nn.Linear(input_data_dim*2, input_data_dim)
#
#         self.layer_norm = nn.LayerNorm(input_data_dim, eps=1e-6)
#
#     def forward(self, x):
#         residual = x
#         x = self.layer_norm(x)
#
#         x = self.w_2(F.gelu(self.w_1(F.gelu(x))))
#
#         x = self.w_3(F.gelu(torch.cat((residual,x),dim=-1)))
#
#         return x


# class VariLengthInputLayer(nn.Module):
#     def __init__(self, input_data_dim, d_k, d_v, n_head, dropout):
#         super(VariLengthInputLayer, self).__init__()
#         self.n_head = n_head
#         self.dim = input_data_dim
#         self.d_k = d_k
#         self.d_v = d_v
#         # self.w_qs = []
#         # self.w_ks = []
#         # self.w_vs = []
#         # for i, dim in enumerate(self.dims):
#         self.w_q = nn.Linear(self.dim, n_head * d_k, bias=False)
#         self.w_k = nn.Linear(self.dim, n_head * d_k, bias=False)
#         self.w_v = nn.Linear(self.dim, n_head * d_v, bias=False)
#         # self.w_qs.append(self.w_q)
#         # self.w_ks.append(self.w_k)
#         # self.w_vs.append(self.w_v)
#         self.add_module('linear_q_%d' % (self.dim), self.w_q)
#         self.add_module('linear_k_%d' % (self.dim), self.w_k)
#         self.add_module('linear_v_%d' % (self.dim), self.w_v)
#
#         self.attention = Attention(temperature=d_k ** 0.5, attn_dropout=dropout)
#         self.fc = nn.Linear(n_head * d_v, n_head * d_v)
#         self.dropout = nn.Dropout(dropout)
#         self.layer_norm = nn.LayerNorm(n_head * d_v, eps=1e-6)
#
#     def forward(self, input_data, mask=None):
#
#         bs = input_data.size(0)
#         modal_num = 1
#         q = torch.zeros(bs, modal_num, self.n_head * self.d_k).cuda()
#         k = torch.zeros(bs, modal_num, self.n_head * self.d_k).cuda()
#         v = torch.zeros(bs, modal_num, self.n_head * self.d_v).cuda()
#
#         q[:, 0, :] = self.w_q(input_data)
#         k[:, 0, :] = self.w_k(input_data)
#         v[:, 0, :] = self.w_v(input_data)
#
#         q = q.view(bs, modal_num, self.n_head, self.d_k)
#         k = k.view(bs, modal_num, self.n_head, self.d_k)
#         v = v.view(bs, modal_num, self.n_head, self.d_v)
#         q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
#
#         q, attn, residual = self.attention(q, k, v)  # 注意因为没有同输入相比维度发生变化，因此以v作为残差
#         q = q.transpose(1, 2).contiguous().view(bs, modal_num, -1)
#
#         residual = residual.transpose(1, 2).contiguous().view(bs, modal_num, -1)
#         q = self.dropout(self.fc(q))
#
#         q += residual
#         q = self.layer_norm(q)
#         return q, attn
#
#
# class EncodeLayer(nn.Module):
#     def __init__(self, d_model, d_k, d_v, n_head, dropout):
#         super(EncodeLayer, self).__init__()
#         self.n_head = n_head
#         self.d_k = d_k
#         self.d_v = d_v
#
#         self.w_q = nn.Linear(d_model, n_head * d_k, bias=False)
#         self.w_k = nn.Linear(d_model, n_head * d_k, bias=False)
#         self.w_v = nn.Linear(d_model, n_head * d_v, bias=False)
#
#         self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
#         self.attention = Attention(temperature=d_k ** 0.5)
#         self.dropout = nn.Dropout(dropout)
#         self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
#
#     def forward(self, q, k, v, modal_num, mask=None):
#         bs = q.size(0)
#         residual = q
#         q = self.w_q(q).view(bs, modal_num, self.n_head, self.d_k)
#         k = self.w_k(k).view(bs, modal_num, self.n_head, self.d_k)
#         v = self.w_v(v).view(bs, modal_num, self.n_head, self.d_v)
#         q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
#
#         q, attn, _ = self.attention(q, k, v, mask=mask)
#         q = q.transpose(1, 2).contiguous().view(bs, modal_num, -1)
#         q = self.dropout(self.fc(q))
#         q += residual
#         q = self.layer_norm(q)
#         return q, attn

if __name__ == '__main__':

    model = CMA_T(input_data_dim=64, cma_vl_hidden=64, n_head=2, vl_dropout=0.1)
    model.cuda()

    input = torch.rand(size=(32, 64))
    q = torch.rand(size=(32, 64))

    out = model(input.cuda(), q.cuda())
    print(out.shape)
    # print(out['spec_cls_t1_loss'])