import math

import torch
import torch.nn as nn
import torch.nn.functional as F



class VisionTransformerBlock(nn.Module):
    def __init__(self,
                 patch_num,     # 半脑分的块数
                 embed_size=128,    # 每块提取特征的向量长度
                 heads=4,           # 多头自注意力模块的头数
                 dropout=None
                 ):
        super(VisionTransformerBlock, self).__init__()
        if dropout is None:
            dropout = 0.
        self.patch_num = patch_num
        self.position_encoding = nn.Parameter(torch.zeros(1, self.patch_num, embed_size))  # 位置编码器的参数
        self._initialize_position_encoding(embed_size)                                     # 初始化

        # 多头自注意力模块
        encoder_layers = nn.TransformerEncoderLayer(embed_size, heads, dim_feedforward=embed_size * 4, dropout=dropout)
        # Transformer 编码器模块
        self.transformer_encoder = nn.TransformerEncoder(encoder_layers, 2)

    def _initialize_position_encoding(self, embed_dim):
        """
        位置编码器，老师可以重点看这里
        Args:
            embed_dim: Transformer 模块的输入向量长度

        Returns: None

        """
        position = torch.arange(0, self.patch_num, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, embed_dim, 2).float() * (-math.log(10000.0) / embed_dim))
        position_encoding = torch.zeros(1, self.patch_num, embed_dim)
        position_encoding[:, :, 0::2] = torch.sin(position * div_term)
        position_encoding[:, :, 1::2 ] = torch.cos(position * div_term)
        self.position_encoding.data = position_encoding

    def forward(self, x, mask=None):

        x += self.position_encoding[:, :x.size(1), :]

        x = self.transformer_encoder(x)  # Shape: (B, N, embed_dim)

        return x


if __name__ == '__main__':
    batch_size = 8
    image_features_flattened = torch.randn(batch_size, 20, 128)
    # 实例化 Vision Transformer 块
    vit_block = VisionTransformerBlock(20, embed_size=128, heads=8, dropout=0.1)

    # 前向传播
    output = vit_block(image_features_flattened)
    print(output.shape)  # 输出的形状: [batch_size, N, channels]
