import torch

from torch import nn
from TransformerEncoder import TransformerEncoder
from Embed import PatchEmbedding

from einops.layers.torch import Rearrange, Reduce


class ClassificationHead(nn.Sequential):
    def __init__(self, emb_size: int = 768, n_classes: int = 1000):
        super().__init__(
            Reduce('b n e -> b e', reduction='mean'),
            nn.LayerNorm(emb_size),
            nn.Linear(emb_size, n_classes))


class VisionTransformer(nn.Module):
    def __init__(self,
                in_channels: int = 3,
                patch_size: int = 16,
                img_size: int = 224,
                num_heads: int = 8,
                num_layers: int = 12,
                dropout: float = 0.1,
                expand_forward: int = 4,
                n_classes: int = 100):
        """
        VIT
        :param in_channels: 通道数
        :param patch_size: 打patch的单边size，patch大小 patch_size * patch_size
        :param img_size: 输入图片大小（单边）
        :param num_heads: 分头数
        :param num_layers:  N个EncoderBlock拼接
        :param dropout: 默认0.1
        :param expand_forward: feed_forward层，扩展层节点倍数，如 [N, len, embed_size] -> [N, len, 4*embed_size] -> [N, len, embed_size]
        :param n_classes: 分类输出类的个数
        """
        super().__init__()
        # 计算embed_size，就是每个pacth大小 * 通道数
        emb_size = patch_size**2 * in_channels

        self.patch_embed = PatchEmbedding(in_channels, patch_size, emb_size, img_size)
        self.encoder = TransformerEncoder(embed_size=emb_size, num_heads=num_heads,
                                          dropout=dropout, expend=expand_forward,
                                          num_layers=num_layers)
        self.outputLayer = ClassificationHead(emb_size, n_classes)

    def forward(self, input):
        """
        :param input: [N, channel, img_size, img_size]
        :return: output: [N, n_classes]
        """
        x = self.patch_embed(input)
        x = self.encoder(x)
        x = self.outputLayer(x)
        return x


if __name__ == '__main__':
    device = torch.device('cuda')
    x = torch.rand([2, 3, 64, 64]).to(device)

    vit = VisionTransformer(
        in_channels=3,
        patch_size=8,
        img_size=64,
        num_heads=8,
        num_layers=6,
        dropout=0.1,
        expand_forward=4
    ).to(device)

    print(vit(x).shape)