import torch
import torch.nn as nn
from einops import rearrange
#from numpy import rearrange, einsum

class PatchEmb(nn.Module):
    def __init__(self, img_size=32, img_channel=3, patch_size=8, out_dim=256, batch_size=64) -> None:
        super().__init__()
        self.img_size = img_size
        self.img_channel = img_channel
        self.patch_size = patch_size
        self.patch_num = (img_size // patch_size)**2
        self.patch_embedding = nn.Conv2d(in_channels=img_channel,out_channels=out_dim,kernel_size=patch_size,stride=patch_size)
        self.class_embedding = nn.Parameter(torch.zeros(1, 1, out_dim))
        self.position_embedding = nn.Parameter(torch.zeros(1, self.patch_num + 1, out_dim))
    def forward(self, x):
        # flatten: [B, C, H, W] -> [B, C, HW]
        # transpose: [B, C, HW] -> [B, HW, C]
        x = self.patch_embedding(x).flatten(2).transpose(1, 2)
        x = torch.cat((self.class_embedding.expand(x.shape[0], -1, -1), x), dim=1)+self.position_embedding
        return x
    
class VisionTransformer(nn.Module):
    def __init__(self, img_size=32, img_channel=3, patch_size=8, emb_dim=256, batch_size=64, head_num=8, mlp_ratio=4, depth=12) -> None:
        super().__init__()
        self.patch_embedding = PatchEmb(img_size=img_size, img_channel=img_channel, patch_size=patch_size, out_dim=emb_dim, batch_size=batch_size)
        encoder_layer=nn.TransformerEncoderLayer(d_model=emb_dim,nhead=head_num,dim_feedforward=emb_dim*mlp_ratio,batch_first=True)
        self.transformer = nn.TransformerEncoder(encoder_layer=encoder_layer,num_layers=depth)
        #nn.Sequential(*[nn.TransformerEncoderLayer(d_model=emb_dim,nhead=head_num,dim_feedforward=emb_dim*mlp_ratio,batch_first=True) for _ in range(depth)])
    
    def forward(self, x):
        x = self.patch_embedding(x)
        x = self.transformer(x)
        return x

class ViTPred(nn.Module):
    def __init__(self, img_size=32, img_channel=3, patch_size=8, emb_dim=256, batch_size=64, head_num=8, mlp_ratio=4, depth=12, class_num=10) -> None:
        super().__init__()
        self.vit = VisionTransformer(img_size=img_size,img_channel=img_channel,patch_size=patch_size,emb_dim=emb_dim,batch_size=batch_size,head_num=head_num,mlp_ratio=mlp_ratio,depth=depth)
        self.pred = nn.Linear(in_features=emb_dim,out_features=class_num)
    def forward(self, x):
        feature = self.vit(x)
        prediction = self.pred(feature[:,0])
        return prediction
    
class MyAttention(nn.Module):
    def __init__(self, emb_dim=256, head_num=8, head_dim=64, dropout= 0.):
        super().__init__()
        inner_dim = head_dim * head_num
        project_out = not (head_num == 1 and head_dim == emb_dim)

        self.head_num = head_num
        self.scale = head_dim ** -0.5

        self.attend = nn.Softmax(dim=-1)
        self.to_qkv = nn.Linear(emb_dim, inner_dim * 3, bias=False)

        self.to_out = nn.Sequential(
            nn.Linear(inner_dim, emb_dim),
            nn.Dropout(dropout),
        ) if project_out else nn.Identity()
    def forward(self, x):
        b, n, _, h = *x.shape, self.head_num
        qkv = self.to_qkv(x).chunk(3, dim=-1)           # (b, n(65), dim*3) ---> 3 * (b, n, dim)
        q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), qkv)          # q, k, v   (b, h, n, dim_head(64))

        dots = torch.einsum('b h i d, b h j d -> b h i j', q, k) * self.scale

        attn = self.attend(dots)

        out = torch.einsum('b h i j, b h j d -> b h i d', attn, v)
        out = rearrange(out, 'b h n d -> b n (h d)')
        return self.to_out(out)

class MyFeedForward(nn.Module):
    def __init__(self, emb_dim=256, hidden_dim=1024, dropout = 0.):
        super().__init__()
        self.ff = nn.Sequential(
            nn.LayerNorm(emb_dim),
            nn.Linear(emb_dim, hidden_dim),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, emb_dim),
            nn.Dropout(dropout)
        )

    def forward(self, x):
        return self.ff(x)
    
class MyTransformer(nn.Module):
    def __init__(self, emb_dim=256, head_num=8, head_dim=64, dropout= 0., hidden_dim=1024, depth=12):
        super().__init__()
        self.norm = nn.LayerNorm(emb_dim)
        self.layers = nn.ModuleList([])
        for _ in range(depth):
            self.layers.append(nn.ModuleList([
                MyAttention(emb_dim=emb_dim, head_num = head_num, head_dim = head_dim, dropout = dropout),
                MyFeedForward(emb_dim=emb_dim, hidden_dim=hidden_dim, dropout = dropout)
            ]))

    def forward(self, x):
        for attn, ff in self.layers:
            x = attn(x) + x
            x = ff(x) + x

        return self.norm(x)
    
class MyVisionTransformer(nn.Module):
    def __init__(self, img_size=32, img_channel=3, patch_size=8, emb_dim=256, batch_size=64, head_num=8, mlp_ratio=4, depth=12, head_dim=64) -> None:
        super().__init__()
        self.patch_embedding = PatchEmb(img_size=img_size, img_channel=img_channel, patch_size=patch_size, out_dim=emb_dim, batch_size=batch_size)
        self.transformer = MyTransformer(emb_dim=emb_dim, head_num=head_num, head_dim=head_dim, dropout= 0., hidden_dim=emb_dim*mlp_ratio, depth=depth)
    
    def forward(self, x):
        x = self.patch_embedding(x)
        x = self.transformer(x)
        return x

class MyViTPred(nn.Module):
    def __init__(self, img_size=32, img_channel=3, patch_size=8, emb_dim=256, batch_size=64, head_num=8, mlp_ratio=4, depth=12, class_num=10, head_dim=64) -> None:
        super().__init__()
        self.vit = MyVisionTransformer(img_size=img_size,img_channel=img_channel,patch_size=patch_size,emb_dim=emb_dim,batch_size=batch_size,head_num=head_num,mlp_ratio=mlp_ratio,depth=depth,head_dim=head_dim)
        self.pred = nn.Linear(in_features=emb_dim,out_features=class_num)
    def forward(self, x):
        feature = self.vit(x)
        prediction = self.pred(feature[:,0])
        return prediction