import torch
import torch.nn as nn


class TransformerSelfAttention(nn.Module):
    def __init__(self, input_size, num_heads=8, hidden_size=512, dropout=0.1):
        super(TransformerSelfAttention, self).__init__()

        self.hidden_size = hidden_size
        self.num_heads = num_heads

        self.query = nn.Linear(input_size, hidden_size)
        self.key = nn.Linear(input_size, hidden_size)
        self.value = nn.Linear(input_size, hidden_size)

        self.attention = nn.MultiheadAttention(hidden_size, num_heads, dropout=dropout)
        self.layer_norm = nn.LayerNorm(hidden_size)

    def forward(self, x):
        q = self.query(x)
        k = self.key(x)
        v = self.value(x)

        x, _ = self.attention(q, k, v)
        x = self.layer_norm(x + x)

        return x


class TransformerAutoRegressiveModel(nn.Module):
    def __init__(self, input_size, output_size, num_layers=6, num_heads=8, hidden_size=512, dropout=0.1):
        super(TransformerAutoRegressiveModel, self).__init__()

        self.layers = nn.ModuleList([
            TransformerSelfAttention(input_size, num_heads, hidden_size, dropout)
            for _ in range(num_layers)
        ])

        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        for layer in self.layers:
            x = layer(x)

        x = self.fc(x)
        return x


# 使用示例
input_size = 512
output_size = 10
sample = 10

model = TransformerAutoRegressiveModel(input_size, output_size)
input_data = torch.randn(sample, input_size)  # 输入数据形状为 (seq_length, batch_size, input_size)
output = model(input_data)
print(output.shape)  # 输出形状为 (seq_length, batch_size, output_size)

# torch.nn.Linear的使用方法 https://developer.aliyun.com/article/1467540
if __name__ == '__main__':
    print("=====")
