import torch
import torch.nn as nn


class Patch_embeded(nn.Module):
    def __init__(self,
                 image_size=224,
                 patch_size=16,
                 embed_dim=768,
                 in_channel=3):
        super(Patch_embeded, self).__init__()
        self.patch_size = patch_size
        self.embed_dim = embed_dim
        self.in_channel = in_channel
        self.image_size = image_size
        # 生成这么多patch
        self.num_patches = (self.image_size // self.patch_size) ** 2
        # 每个patch维度为embed_dim
        self.proj = nn.Conv2d(self.in_channel, self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size)

        self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
        self.pos_embed = nn.Parameter(torch.zeros(1, self.num_patches + 1, self.embed_dim))

    def forward(self, x):
        # x:[b,c,h,w]
        '''
            当前的shape:torch.Size([2, 768, 2, 2])
            flatten后的维度:torch.Size([2, 768, 4])
            xxx:torch.Size([2, 4, 768])
            cls.token:torch.Size([2, 1, 768])
            cat后的维度:torch.Size([2, 5, 768])
            pos:torch.Size([1, 5, 768])
            :param x:
            :return:
        '''
        x = self.proj(x)
        # print(f'当前的shape:{x.shape}')
        # 将16*16合并成一个维度
        x = x.flatten(2)  # [B, embed_dim, num_patches,num_patches]
        # 转换维度为[b,num_patches,embed_dim]
        x = x.transpose(1, 2)

        cls_token = self.cls_token.expand(x.shape[0], -1, -1)
        # print(f'cls.token:{cls_token.shape}')
        x = torch.cat((x, cls_token), dim=1)
        # print(f'cat后的维度:{x.shape}')
        pos_embed = self.pos_embed
        # print(f'pos:{pos_embed.shape}')
        out = x + pos_embed
        return out


class attention(nn.Module):
    def __init__(self, dim, num_heads=8, qkv_bias=False):
        super(attention, self).__init__()
        self.num_heads = num_heads
        prehead_dim = dim // self.num_heads
        self.scale = prehead_dim ** -0.5
        self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
        self.proj = nn.Linear(dim, dim)

    def forward(self, x):
        # print(f'进入attention')
        B, num_patches, total_dim = x.shape
        # print(f'B:{B}, num_patches:{num_patches}, total_dim:{total_dim}')
        # 第三维度*3
        qkv = self.qkv(x)
        # print(f'qkv:{qkv.shape}')
        # print(f'第四个维度:{total_dim//self.num_heads}')
        qkv = qkv.reshape(B, num_patches, 3, self.num_heads, total_dim // self.num_heads)
        qkv = qkv.permute(2, 0, 3, 1, 4)
        # print(f'转换之后:{qkv.shape}')
        q, k, v = qkv[0], qkv[1], qkv[2]
        # print(f'q:{q.shape},k:{k.shape},v:{v.shape}')
        # print(f'k.trans:{k.transpose(-2,-1).shape}')
        # print(f'scale;{self.scale}')
        # k*v得到自注意力
        atten = (q @ k.transpose(-2, -1)) * self.scale
        atten = atten.softmax(dim=-1)
        atten = atten @ v
        atten = atten.transpose(1, 2)
        atten = atten.reshape(B, num_patches, total_dim)
        # print(f'atten:{atten.shape}')

        out = self.proj(atten)
        # print(out.shape)
        # [b,257,768]
        return out


class MLP(nn.Module):
    def __init__(self, in_dim, hidden_dim=None, out_dim=None):
        super(MLP, self).__init__()
        self.fc1 = nn.Linear(in_dim, hidden_dim)
        self.actlayer = nn.GELU()
        self.fc2 = nn.Linear(hidden_dim, out_dim)

    def forward(self, x):
        x = self.fc1(x)
        x = self.actlayer(x)
        x = self.fc2(x)
        x = self.actlayer(x)
        return x


class Encoder_block(nn.Module):
    def __init__(self, dim, num_heads, mlp_ration=4, qkv_bias=False):
        super(Encoder_block, self).__init__()
        self.normlayer = nn.LayerNorm(dim)
        self.atten = attention(dim, num_heads, qkv_bias=qkv_bias)
        self.hidden_dim = int(dim * mlp_ration)
        self.mlp = MLP(in_dim=dim, hidden_dim=self.hidden_dim, out_dim=dim)

    def forward(self, x):
        # 进入Encoder的维度: torch.Size([2, 257, 768])
        # normlayer后的维度: torch.Size([2, 257, 768])
        # print(f'进入Encoder的维度:{x.shape}')
        # print(f'normlayer后的维度:{x.shape}')
        # print(f'attenton维度:{x.shape}')
        x = x + self.atten(self.normlayer(x))
        # x=self.mlp(x)
        x = x + self.mlp(self.normlayer(x))
        return x


class Vision_Model(nn.Module):
    def __init__(self,
                 in_channel=3,
                 dim=48,
                 num_heads=8,
                 image_size=32,
                 patch_size=2,
                 num_classes=10,
                 depth=3,
                 qkv_bias=True):
        super(Vision_Model, self).__init__()
        self.image_size = image_size
        self.patch_size = patch_size
        self.patch_embed = Patch_embeded(image_size=self.image_size, patch_size=self.patch_size, embed_dim=dim,
                                         in_channel=in_channel)
        self.depth = depth
        self.norm = nn.LayerNorm(dim)
        self.encoder = nn.Sequential(*[
            Encoder_block(dim, num_heads, mlp_ration=4, qkv_bias=qkv_bias)
            for i in range(depth)
        ])
        self.head = nn.Linear(dim, num_classes)

    def forward(self, x):
        # [b,image/patch_size,768]
        x = self.patch_embed(x)
        x = self.encoder(x)
        x = self.norm(x)
        x = self.head(x)
        return x
