import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np


# 定义Transformer的Encoder层
class EncoderLayer(nn.Module):
    def __init__(self, d_model, nhead, dim_feedforward=256, dropout=0.1):
        super(EncoderLayer, self).__init__()
        self.self_attn = nn.MultiheadAttention(d_model, nhead)
        self.linear1 = nn.Linear(d_model, dim_feedforward)
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model)
        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)

    def forward(self, src):
        src2 = self.self_attn(src, src, src)[0]
        src = src + self.dropout(self.linear2(F.relu(self.linear1(src2))))
        src = self.norm1(src)
        return src


# 定义Transformer的Decoder层
class DecoderLayer(nn.Module):
    def __init__(self, d_model, nhead, dim_feedforward=256, dropout=0.1):
        super(DecoderLayer, self).__init__()
        self.self_attn = nn.MultiheadAttention(d_model, nhead)
        self.multihead_attn = nn.MultiheadAttention(d_model, nhead)
        self.linear1 = nn.Linear(d_model, dim_feedforward)
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model)
        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.norm3 = nn.LayerNorm(d_model)

    def forward(self, tgt, memory):
        tgt2 = self.self_attn(tgt, tgt, tgt)[0]
        tgt = tgt + self.dropout(self.linear2(F.relu(self.linear1(tgt2))))
        tgt = self.norm1(tgt)
        tgt2 = self.multihead_attn(tgt, memory, memory)[0]
        tgt = tgt + self.dropout(tgt2)
        tgt = self.norm2(tgt)
        return tgt


# 定义Transformer模型
class TransformerModel(nn.Module):
    def __init__(self, encoder_layer, decoder_layer, num_layers):
        super(TransformerModel, self).__init__()
        self.encoder = nn.ModuleList([encoder_layer for _ in range(num_layers)])
        self.decoder = nn.ModuleList([decoder_layer for _ in range(num_layers)])
        self.linear = nn.Linear(d_model, 1)

    def forward(self, src, tgt):
        for layer in self.encoder:
            src = layer(src)
        for layer in self.decoder:
            tgt = layer(tgt, src)
        output = self.linear(tgt)
        return output


# 设置参数
d_model = 4
nhead = 2
num_layers = 6
dropout = 0.1

# 初始化模型
encoder_layer = EncoderLayer(d_model, nhead, dropout=dropout)
decoder_layer = DecoderLayer(d_model, nhead, dropout=dropout)
model = TransformerModel(encoder_layer, decoder_layer, num_layers)

# 定义优化器和损失函数
optimizer = optim.Adam(model.parameters(), lr=0.001)
criterion = nn.MSELoss()

# 模拟数据
src = torch.tensor([[2.0, 3.0, 4.0, 5.0]])  # 输入：加数和被减数
tgt = torch.tensor([[1.0, 1.0, 1.0, 1.0]])  # 输入：操作符

# 训练模型
epochs = 100
for epoch in range(epochs):
    optimizer.zero_grad()
    output = model(src, tgt)
    print(output)
    loss = criterion(output, torch.tensor([[3.0, 2.0, 3.0, 4.0]]))  # 目标输出：和、差、和、差
    loss.backward()
    optimizer.step()
    if (epoch + 1) % 10 == 0:
        print(f'Epoch [{epoch + 1}/{epochs}], Loss: {loss.item()}')

# 测试模型
with torch.no_grad():
    output = model(src, tgt)
    print('输入:', src)
    print('操作符:', tgt)
    print('预测输出:', output)

if __name__ == '__main__':
    print("over")
