import torch

from torch import nn


class TransformerEncoderBlock(nn.Module):
    def __init__(self, embed_size=768, num_heads=8, dropout=0.1, expend=4):
        """
        单个 Encoder Block
        :param embed_size: patch_size^2 * channel 算好再填
        :param num_heads: 分头数
        :param dropout: 默认0.1
        :param expend: feed_forward层，扩展层节点倍数，如 [N, len, embed_size] -> [N, len, 4*embed_size] -> [N, len, embed_size]
        """
        super().__init__()

        self.layernorm1 = nn.LayerNorm(embed_size)
        self.self_attention = nn.MultiheadAttention(embed_size, num_heads, dropout)

        self.feed_forward = nn.Sequential(
            nn.Linear(embed_size, expend * embed_size),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(expend * embed_size, embed_size),
        )

    def forward(self, input):
        """
        :param input: [N, len, embed_size]
        :return: output: [N, len, embed_size]
        """
        # 1.1. layernorm
        x = self.layernorm1(input)

        # 1.2. reshape: [N, len, embed_size] => [len, N, embed_size]
        # Multi_head_Attention's old version needs batch to be the dim1
        x = x.transpose(0, 1)

        # 1.3. muti_head_attention
        x, x_w = self.self_attention(x, x, x)
        # 1.3.1. reshape the output to [N, len, embed_size]
        x = x.transpose(0, 1)

        # 1.4. ResidualAdd
        x = x + input

        # 2.1. feed forward
        out = self.feed_forward(x)

        # 2.2. ResidualAdd
        out = out + x

        return out


class TransformerEncoder(nn.Module):
    def __init__(self, embed_size=768, num_heads=8, dropout=0.1, expend=4, num_layers=6):
        """
        N个EncoderBlock
        :param embed_size: patch_size^2 * channel 算好再填
        :param num_heads: 分头数
        :param dropout: 默认0.1
        :param expend: feed_forward层，扩展层节点倍数，如 [N, len, embed_size] -> [N, len, 4*embed_size] -> [N, len, embed_size]
        :param num_layers: N个EncoderBlock拼接
        """
        super().__init__()

        self.layers = nn.ModuleList(  # we use that to map several different modules
            [
                TransformerEncoderBlock(embed_size=embed_size,
                                        num_heads=num_heads,
                                        dropout=dropout, expend=expend)
                for _ in range(num_layers)
            ]
        )

    def forward(self, input):
        out = input
        for layer in self.layers:
            out = layer(out)

        return out


if __name__ == '__main__':
    x = torch.rand([2, 65, 192])

    encoder = TransformerEncoder(embed_size=192, num_heads=8, dropout=0.1, expend=4, num_layers=6)

    print(encoder(x).shape)
