import torch
import torch.nn as nn
import torch.optim as optim



class TransformerLayer(nn.Module):
    def __init__(self, d_model, heads):
        super(TransformerLayer, self).__init__()
        self.attention = nn.MultiheadAttention(d_model, heads)
        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.feed_forward = nn.Sequential(
            nn.Linear(d_model, d_model * 4),
            nn.ReLU(),
            nn.Linear(d_model * 4, d_model)
        )

    def forward(self, x, mask=None):
        attn_output, _ = self.attention(x, x, x, key_padding_mask=mask)
        x = self.norm1(x + attn_output)
        ff_output = self.feed_forward(x)
        return self.norm2(x + ff_output)


class TransformerModel(nn.Module):
    def __init__(self, vocab_size, d_model, n_layers, heads):
        super(TransformerModel, self).__init__()
        self.embedding = nn.Embedding(vocab_size, d_model)
        self.layers = nn.ModuleList([TransformerLayer(d_model, heads) for _ in range(n_layers)])
        self.fc_out = nn.Linear(d_model, vocab_size)

    def forward(self, x, mask=None):
        x = self.embedding(x)
        for layer in self.layers:
            x = layer(x, mask)
        x = self.fc_out(x)
        return x


# 假设的参数设置
vocab_size = 100  # 假设词汇表大小为100
d_model = 3  # 模型维度
n_layers = 1  # 层数
heads = 1  # 头数

# 实例化模型
model = TransformerModel(vocab_size, d_model, n_layers, heads)

# 假设的训练数据
input_ids = torch.tensor([[1, 2, 3]])  # 假设"我", "是", "谁"的索引分别为1, 2, 3

# 损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 训练模型
model.train()
optimizer.zero_grad()
outputs = model(input_ids)
loss = criterion(outputs.view(-1, vocab_size), input_ids.view(-1))
loss.backward()
optimizer.step()

# 打印注意力分数矩阵
attention_scores = model.layers[0].attention.in_proj_weight.weight.data
print(attention_scores)

if __name__=='__main__':
    print("end")