import torch
import torch.nn as nn
import torch.nn.functional as F

class PatchEmbedding(nn.Module):
    def __init__(self, image_size, patch_size, in_channels, embed_dim):
        super(PatchEmbedding, self).__init__()
        self.image_size = image_size
        self.patch_size = patch_size
        self.num_patches = (image_size // patch_size) ** 2
        self.patch_embedding = nn.Conv2d(in_channels, embed_dim, kernel_size=patch_size, stride=patch_size)

    def forward(self, x):
        x = self.patch_embedding(x)
        x = x.flatten(2)
        x = x.transpose(1, 2)
        return x

class TransformerEncoder(nn.Module):
    def __init__(self, embed_dim, num_heads, mlp_ratio):
        super(TransformerEncoder, self).__init__()
        self.self_attention = nn.MultiheadAttention(embed_dim, num_heads)
        self.norm1 = nn.LayerNorm(embed_dim)
        self.mlp = nn.Sequential(
            nn.Linear(embed_dim, embed_dim * mlp_ratio),
            nn.GELU(),
            nn.Linear(embed_dim * mlp_ratio, embed_dim)
        )
        self.norm2 = nn.LayerNorm(embed_dim)

    def forward(self, x):
        # Self-attention
        attn_output, _ = self.self_attention(x, x, x)
        x = x + attn_output
        x = self.norm1(x)

        # MLP
        mlp_output = self.mlp(x)
        x = x + mlp_output
        x = self.norm2(x)
        return x

class VisionTransformer(nn.Module):
    def __init__(self, image_size, patch_size, in_channels, num_classes, embed_dim = 384, num_heads = 12, num_layers = 12, mlp_ratio = 4):
        super(VisionTransformer, self).__init__()
        self.patch_embedding = PatchEmbedding(image_size, patch_size, in_channels, embed_dim)
        self.transformer_encoder = nn.Sequential(
            *[TransformerEncoder(embed_dim, num_heads, mlp_ratio) for _ in range(num_layers)]
        )
        self.classification_head = nn.Linear(embed_dim, num_classes)

    def forward(self, x):
        # print(x.size())
        x = self.patch_embedding(x)
        # print(x.size())
        x = self.transformer_encoder(x)
        # print(x.size())
        x = x.mean(1)  # Global average pooling
        # print(x.size())
        x = self.classification_head(x)
        # print(x.size())
        result = {'output': x}
        return result

def ViT_T12_cifar10(**kwargs):
    return VisionTransformer(image_size = 32, patch_size = 4, in_channels = 3,num_layers = 12 ,**kwargs)



# class PatchEmbedding(nn.Module):
#     def __init__(self, image_size, patch_size, in_channels, embed_dim):
#         super(PatchEmbedding, self).__init__()
#         self.image_size = image_size
#         self.patch_size = patch_size
#         self.num_patches = (image_size // patch_size) ** 2
#         self.projection = nn.Conv2d(in_channels, embed_dim, kernel_size=patch_size, stride=patch_size)

#     def forward(self, x):
#         x = self.projection(x)
#         x = x.flatten(2)
#         x = x.transpose(1, 2)
#         return x

# class MLP(nn.Module):
#     def __init__(self, input_dim, hidden_dim, output_dim, dropout):
#         super(MLP, self).__init__()
#         self.fc1 = nn.Linear(input_dim, hidden_dim)
#         self.fc2 = nn.Linear(hidden_dim, output_dim)
#         self.dropout = nn.Dropout(dropout)

#     def forward(self, x):
#         x = F.gelu(self.fc1(x))
#         x = self.dropout(x)
#         x = self.fc2(x)
#         return x

# class ViT_T12(nn.Module):
#     def __init__(self, image_size, patch_size, in_channels, num_classes, embed_dim=384, mlp_ratio=4, num_heads=12, num_layers=12, dropout=0.1):
#         super(ViT_T12, self).__init__()
#         self.patch_embedding = PatchEmbedding(image_size, patch_size, in_channels, embed_dim)
#         self.class_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
#         self.num_patches = (image_size // patch_size) ** 2
#         self.position_embedding = nn.Parameter(torch.zeros(1, self.num_patches + 1, embed_dim))
#         self.dropout = nn.Dropout(dropout)
#         # self.transformer_encoder = nn.TransformerEncoder(nn.TransformerEncoderLayer(embed_dim, num_heads, mlp_dim, dropout), num_layers)
#         self.transformer_encoder = nn.Sequential(
#             *[TransformerEncoder(embed_dim, num_heads, mlp_ratio) for _ in range(num_layers)]
#         )
#         self.fc = nn.Linear(embed_dim, num_classes)

#     def forward(self, x):
#         x = self.patch_embedding(x)
#         x = torch.cat((self.class_token.repeat(x.shape[0], 1, 1), x), dim=1)
#         x = x + self.position_embedding
#         x = self.dropout(x)
#         x = self.transformer_encoder(x)
#         x = x[:, 0]
#         x = self.fc(x)
#         result = {'output': x}
#         return result
# def ViT_T12_cifar10(**kwargs):
#     return ViT_T12(image_size = 32, patch_size = 4, in_channels = 3,num_layers = 12 ,**kwargs)