import torch
import torch.nn as nn


class PatchEmbedding(nn.Module):
    def __init__(self, img_size, patch_size, in_channels, embed_size):
        super().__init__()
        self.img_size = img_size  # 图像尺寸，例如224
        self.patch_size = patch_size  # patch尺寸，例如16
        self.in_channels = in_channels  # 输入通道数，例如3
        self.embed_size = embed_size  # 嵌入维度，例如768

        # 计算patch的数量
        self.num_patches = (img_size // patch_size) ** 2
        # 简单理解就是把一个正方形里的所有像素点抽象成一个长768的序列
        self.proj = nn.Conv2d(in_channels, embed_size, kernel_size=patch_size, stride=patch_size)

    def forward(self, x):
        x = self.proj(x)  # 提取并嵌入patches，形状：(batch_size, embed_size, num_patches_sqrt, num_patches_sqrt)
        x = x.flatten(2)  # 展平成形状：(batch_size, embed_size, num_patches)
        x = x.transpose(1, 2)  # 交换维度，形状：(batch_size, num_patches, embed_size)
        return x


class PositionalEncoding(nn.Module):
    def __init__(self, num_patches, embed_size):
        super().__init__()
        self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, embed_size))
        self.cls_token = nn.Parameter(torch.randn(1, 1, embed_size))

    def forward(self, x):
        B, N, _ = x.size()
        cls_tokens = self.cls_token.expand(B, -1, -1)  # 扩展cls_token到(batch_size, 1, embed_size)
        x = torch.cat((cls_tokens, x), dim=1)  # 连接cls_token，形状：(batch_size, num_patches + 1, embed_size)
        nn.init.trunc_normal_(self.pos_embedding, std=0.02)  # 使用截断正态分布
        x = x + self.pos_embedding  # 添加位置编码
        return x


class TransformerBlock(nn.Module):
    def __init__(self, embed_size, heads, forward_expansion, dropout):
        super().__init__()
        self.attention = nn.MultiheadAttention(embed_dim=embed_size, num_heads=heads, dropout=dropout, batch_first=True)
        self.norm1 = nn.LayerNorm(embed_size)
        self.norm2 = nn.LayerNorm(embed_size)
        self.mlp = nn.Sequential(
            nn.Linear(embed_size, forward_expansion * embed_size),
            nn.GELU(),
            nn.Linear(forward_expansion * embed_size, embed_size),
            nn.Dropout(dropout)
        )

    def forward(self, x):
        identity = x
        x = self.norm1(x)
        # 所有序列两两做自注意力机制
        x, _ = self.attention(x, x, x)  # 注意：batch_first=True，因此x的形状为(batch_size, seq_len, embed_size)
        x = x + identity
        identity = x
        x = self.norm2(x)
        x = self.mlp(x)
        x = x + identity
        return x


class VisionTransformer(nn.Module):
    def __init__(self, img_size, patch_size, in_channels, embed_size, num_patches, heads, forward_expansion, layers,
                 dropout, num_classes):
        super().__init__()
        self.patch_embedding = PatchEmbedding(img_size, patch_size, in_channels, embed_size)
        self.pos_encoding = PositionalEncoding(num_patches, embed_size)
        self.transformer_blocks = nn.Sequential(
            *[TransformerBlock(embed_size, heads, forward_expansion, dropout) for _ in range(layers)]
        )
        self.to_cls_token = nn.Identity()
        self.mlp_head = nn.Sequential(
            nn.LayerNorm(embed_size),
            nn.Linear(embed_size, num_classes)
        )

    def forward(self, x):
        x = self.patch_embedding(x)
        x = self.pos_encoding(x)
        x = self.transformer_blocks(x)
        cls_token = self.to_cls_token(x[:, 0])  # 提取cls_token对应的输出
        x = self.mlp_head(cls_token)
        return x


if __name__ == '__main__':
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = VisionTransformer(
        img_size=224,
        patch_size=16,
        in_channels=3,
        embed_size=768,
        num_patches=(224 // 16) ** 2,
        heads=4,
        forward_expansion=4,
        layers=6,
        dropout=0.1,
        num_classes=10
    )
    model.to(device)
    input_image = torch.randn(5, 3, 224, 224).to(device)  # 输入5张图片，批次大小为5
    output = model(input_image)
    print(output.shape)  # 应输出torch.Size([5, 10])
