import torch

from torch import nn
from torch import Tensor

from einops import repeat
from einops.layers.torch import Rearrange, Reduce


class PatchEmbedding(nn.Module):
    def __init__(self, in_channels: int = 3, patch_size: int = 16, emb_size: int = 768, img_size: int = 224):
        """
        PatchEmbedding
        :param in_channels: 通道数
        :param patch_size: 打patch的单边size，patch大小 patch_size * patch_size
        :param emb_size: patch_size^2 * channel 算好再填
        :param img_size: 输入图片大小（单边）
        """
        super().__init__()

        self.patch_size = patch_size
        # using a conv layer instead of a linear one -> performance gains
        self.patch_cnn = nn.Conv2d(in_channels, emb_size, kernel_size=patch_size, stride=patch_size)
        # 铺平
        self.flaten = Rearrange('b e h w -> b (h w) e')
        # 生成一个维度为emb_size的向量当做cls_token
        self.cls_token = nn.Parameter(torch.randn(1, 1, emb_size))
        # 位置编码信息，一共有(img_size / patch_size)**2 + 1(cls token)个位置向量
        self.positions = nn.Parameter(torch.randn(int((img_size / patch_size) ** 2 + 1), emb_size))

    def forward(self, x):
        """
        :param x: x: [N, c, x, y]
        :return: out: [N, (x/patch_size) * (y/patch_size), patch_size * patch_size * c]
        """
        N = x.shape[0]  # 单独先将batch_size缓存起来

        # 1. 将cls_token 扩展成 N个: [1, 1, emb_size] => [N, 1, 1, emb_size]
        cls_tokens = repeat(self.cls_token, '() n e -> b n e', b=N)

        # 2. patch and flaten
        x = self.patch_cnn(x)
        x = self.flaten(x)

        # 3. prepend the cls token to the input on the dim 1
        x = torch.cat([cls_tokens, x], dim=1)

        # 4. add position encoding
        x += self.positions
        return x


if __name__ == '__main__':
    x = torch.rand([2, 3, 64, 64])

    patchEmbedding = PatchEmbedding(3, 8, 192, 64)
    print(patchEmbedding(x).shape)