import torch
import torch.nn as nn
import math
import torch.nn.functional as F


class MSA(nn.Module):
    def __init__(self, hidden_size):
        super(MSA, self).__init__()

        num_heads = 12

        self.n_heads = num_heads
        self.dim_per_head = hidden_size // self.n_heads

        self.query = nn.Linear(hidden_size, self.dim_per_head * self.n_heads)
        self.key = nn.Linear(hidden_size, self.dim_per_head * self.n_heads)
        self.value = nn.Linear(hidden_size, self.dim_per_head * self.n_heads)

        self.concat_heads = nn.Linear(self.dim_per_head * self.n_heads, hidden_size)

    def forward(self, x):
        bs, l = x.size()[0:2]  # l = h*w
        num_heads = self.n_heads
        # print("111")
        ## x经过与矩阵W相乘得到q,k,v
        q = self.query(x).view(bs, l, num_heads, self.dim_per_head)
        k = self.key(x).view(bs, l, num_heads, self.dim_per_head)
        v = self.value(x).view(bs, l, num_heads, self.dim_per_head)

        ## reshape变形为了后续注意力矩阵计算
        q = q.transpose(1, 2).reshape(bs * num_heads, l, self.dim_per_head)
        k = k.transpose(1, 2).reshape(bs * num_heads, l, self.dim_per_head)
        v = v.transpose(1, 2).reshape(bs * num_heads, l, self.dim_per_head)

        # 计算注意力
        qk = torch.bmm(q, k.transpose(1, 2))
        qk = qk // math.sqrt(self.dim_per_head)
        qk = F.softmax(qk, dim=2)

        output = torch.bmm(qk, v).view(bs, num_heads, l, self.dim_per_head)
        output = output.transpose(1, 2).reshape(bs, l, num_heads * self.dim_per_head)
        output = self.concat_heads(output)
        print(output.shape)

        return output


class MLP(nn.Module):
    def __init__(self, hidden_size):
        percep_dim = 3072  ### 没明白，是和预训练模型有关???

        super(MLP, self).__init__()
        self.linear1 = nn.Linear(hidden_size, percep_dim)
        self.linear2 = nn.Linear(percep_dim, hidden_size)
        self.activate = nn.ReLU()

    def forward(self, x):
        out = self.linear1(x)
        out = self.activate(out)
        out = self.linear2(out)

        return out


class MSA_MLP_block(nn.Module):
    def __init__(self, hidden_size):
        super(MSA_MLP_block, self).__init__()
        self.hid_size = hidden_size
        self.attn_layer = MSA(self.hid_size)
        self.norm1 = nn.LayerNorm(self.hid_size)

        self.FFN = MLP(self.hid_size)
        self.norm2 = nn.LayerNorm(self.hid_size)

    def forward(self, x):
        # print("input_size",x.shape)   ## torch.Size([2, 768, 576])
        # x = x.permute(0, 2, 1)
        out = x

        x = self.norm1(x)
        # print("norm1", x.shape)

        y = self.attn_layer(x)
        # print("y.shape",y.shape)
        x = self.attn_layer(x) + out

        out = x
        # print("out.shape",out.shape)
        x = self.norm2(x)
        # print("norm2", x.shape)
        x = self.FFN(x) + out
        # print("out", x.shape)   ##out torch.Size([2, 576, 768])
        return x


class Transformer(nn.Module):
    def __init__(self, channels, img_size, hidden_size=768, num_layers=12, patch_size=16):
        super(Transformer, self).__init__()

        num_patches = (img_size // patch_size) ** 2  ### 24*24=576
        self.channels = channels
        # 词嵌入  本质就是基于patch大小的卷积，经公式计算卷积后图片大小从384->24
        self.embed_patch = nn.Conv2d(in_channels=self.channels,
                                     out_channels=hidden_size,
                                     kernel_size=patch_size,
                                     stride=patch_size)
        # 位置编码
        self.embed_pos = nn.Parameter(torch.zeros(1, hidden_size, num_patches))  ## 即embed_pos.size:[1, 768, 576]
        ##经过多个TF核心模块
        self.layers = nn.ModuleList([MSA_MLP_block(hidden_size) for i in range(num_layers)])
        self.norm = nn.LayerNorm(hidden_size)

    def forward(self, x):
        x = self.embed_patch(x)
        # print(x.shape)
        x = x.flatten(2)
        # print(x.shape)

        x = x + self.embed_pos
        # print("333",x.shape)  #333 torch.Size([2, 768, 576])

        x = x.permute(0, 2, 1)

        for layer in self.layers:
            x = layer(x)
        # print("222")

        out = self.norm(x)
        return out

class Transformer_2(nn.Module):
    def __init__(self, channels, img_size, hidden_size=768, num_layers=12):
        super(Transformer_2, self).__init__()

        # num_patches = (img_size // patch_size) ** 2  ### 24*24=576
        # self.channels = channels
        # # 词嵌入  本质就是基于patch大小的卷积，经公式计算卷积后图片大小从384->24
        # self.embed_patch = nn.Conv2d(in_channels=self.channels,
        #                              out_channels=hidden_size,
        #                              kernel_size=patch_size,
        #                              stride=patch_size)
        # # 位置编码
        # self.embed_pos = nn.Parameter(torch.zeros(1, hidden_size, num_patches))  ## 即embed_pos.size:[1, 768, 576]
        ##经过多个TF核心模块
        self.layers = nn.ModuleList([MSA_MLP_block(hidden_size) for i in range(num_layers)])
        self.norm = nn.LayerNorm(hidden_size)

    def forward(self, x):
        # x = self.embed_patch(x)
        # # print(x.shape)
        # x = x.flatten(2)
        # # print(x.shape)
        #
        # x = x + self.embed_pos
        # print("333",x.shape)  #333 torch.Size([2, 768, 576])

        # x = x.permute(0, 2, 1)

        for layer in self.layers:   ##输入【bs,hw,c】
            x = layer(x)
        # print("222")

        out = self.norm(x)
        return out
if __name__ == '__main__':
    img = torch.Tensor(2, 1, 384, 384)
    net = Transformer(1, 384)
    out = net(img)
    print(out.shape)  # torch.Size([2, 576, 768])