import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import math
import numpy as np

# 定义位置编码函数
def get_positional_encoding(max_seq_len, d_model):
    """
    生成位置编码矩阵。
    
    参数:
    max_seq_len (int): 最大序列长度。
    d_model (int): 模型的维度。
    
    返回:
    torch.Tensor: 位置编码矩阵，形状为 (max_seq_len, d_model)。
    """
    # 初始化一个形状为 (max_seq_len, d_model) 的零矩阵作为位置编码
    positional_encoding = np.zeros((max_seq_len, d_model))
    
    # 遍历每个位置和模型维度，计算位置编码值
    for pos in range(max_seq_len):
        for i in range(0, d_model, 2):
            # 计算偶数维度上的正弦编码
            positional_encoding[pos, i] = math.sin(pos / (10000 ** ((2 * i) / d_model)))
            # 计算奇数维度上的余弦编码
            positional_encoding[pos, i + 1] = math.cos(pos / (10000 ** ((2 * (i + 1)) / d_model)))
    
    # 将位置编码矩阵转换为 PyTorch 张量，并返回
    return torch.tensor(positional_encoding, dtype=torch.float32)

# 定义注意力机制
class MultiHeadAttention(nn.Module):
    def __init__(self, d_model, num_heads):
        super(MultiHeadAttention, self).__init__()
        self.d_model = d_model
        self.num_heads = num_heads
        self.d_k = d_model // num_heads
        self.W_q = nn.Linear(d_model, d_model)
        self.W_k = nn.Linear(d_model, d_model)
        self.W_v = nn.Linear(d_model, d_model)
        self.W_o = nn.Linear(d_model, d_model)

    def forward(self, Q, K, V, mask=None):
        batch_size = Q.size(0)
        # 线性变换
        Q = self.W_q(Q).view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2)
        K = self.W_k(K).view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2)
        V = self.W_v(V).view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2)
        # 计算注意力得分
        attn_scores = torch.matmul(Q, K.transpose(-2, -1)) / math.sqrt(self.d_k)
        if mask is not None:
            attn_scores = attn_scores.masked_fill(mask == 0, -1e9)
        attn_probs = nn.Softmax(dim=-1)(attn_scores)
        # 计算注意力输出
        attn_output = torch.matmul(attn_probs, V)
        attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, -1, self.d_model)
        # 线性变换得到最终输出
        output = self.W_o(attn_output)
        return output

# 定义前馈神经网络
class FeedForward(nn.Module):
    def __init__(self, d_model, d_ff):
        super(FeedForward, self).__init__()
        self.fc1 = nn.Linear(d_model, d_ff)
        self.fc2 = nn.Linear(d_ff, d_model)
        self.relu = nn.ReLU()

    def forward(self, x):
        x = self.relu(self.fc1(x))
        x = self.fc2(x)
        return x

# 定义编码器层
class EncoderLayer(nn.Module):
    def __init__(self, d_model, num_heads, d_ff, dropout_rate):
        super(EncoderLayer, self).__init__()
        self.self_attn = MultiHeadAttention(d_model, num_heads)
        self.feed_forward = FeedForward(d_model, d_ff)
        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.dropout = nn.Dropout(dropout_rate)

    def forward(self, x, mask):
        # 自注意力机制
        attn_output = self.self_attn(x, x, x, mask)
        x = self.norm1(x + self.dropout(attn_output))
        # 前馈神经网络
        ff_output = self.feed_forward(x)
        x = self.norm2(x + self.dropout(ff_output))
        return x

# 定义编码器
class Encoder(nn.Module):
    def __init__(self, num_layers, d_model, num_heads, d_ff, dropout_rate):
        super(Encoder, self).__init__()
        self.layers = nn.ModuleList([EncoderLayer(d_model, num_heads, d_ff, dropout_rate) for _ in range(num_layers)])

    def forward(self, x, mask):
        for layer in self.layers:
            x = layer(x, mask)
        return x

# 定义解码器层
class DecoderLayer(nn.Module):
    def __init__(self, d_model, num_heads, d_ff, dropout_rate):
        super(DecoderLayer, self).__init__()
        self.self_attn = MultiHeadAttention(d_model, num_heads)
        self.cross_attn = MultiHeadAttention(d_model, num_heads)
        self.feed_forward = FeedForward(d_model, d_ff)
        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.norm3 = nn.LayerNorm(d_model)
        self.dropout = nn.Dropout(dropout_rate)

    def forward(self, x, enc_output, src_mask, tgt_mask):
        # 自注意力机制
        attn_output1 = self.self_attn(x, x, x, tgt_mask)
        x = self.norm1(x + self.dropout(attn_output1))
        # 交叉注意力机制
        attn_output2 = self.cross_attn(x, enc_output, enc_output, src_mask)
        x = self.norm2(x + self.dropout(attn_output2))
        # 前馈神经网络
        ff_output = self.feed_forward(x)
        x = self.norm3(x + self.dropout(ff_output))
        return x

# 定义解码器
class Decoder(nn.Module):
    def __init__(self, num_layers, d_model, num_heads, d_ff, dropout_rate):
        super(Decoder, self).__init__()
        self.layers = nn.ModuleList([DecoderLayer(d_model, num_heads, d_ff, dropout_rate) for _ in range(num_layers)])

    def forward(self, x, enc_output, src_mask, tgt_mask):
        for layer in self.layers:
            x = layer(x, enc_output, src_mask, tgt_mask)
        return x

# 定义Transformer模型
class Transformer(nn.Module):
    def __init__(self, src_vocab_size, tgt_vocab_size, num_layers, d_model, num_heads, d_ff, dropout_rate):
        super(Transformer, self).__init__()
        self.encoder = Encoder(num_layers, d_model, num_heads, d_ff, dropout_rate)
        self.decoder = Decoder(num_layers, d_model, num_heads, d_ff, dropout_rate)
        self.embedding_src = nn.Embedding(src_vocab_size, d_model)
        self.embedding_tgt = nn.Embedding(tgt_vocab_size, d_model)
        self.positional_encoding = get_positional_encoding(1000, d_model)
        self.fc_out = nn.Linear(d_model, tgt_vocr ab_size)

    def forward(self, src, tgt, src_mask, tgt_mask):
        src_embedded = self.embedding_src(src) + self.positional_encoding[:src.size(1), :]
        tgt_embedded = self.embedding_tgt(tgt) + self.positional_encoding[:tgt.size(1), :]
        enc_output = self.encoder(src_embedded, src_mask)
        dec_output = self.decoder(tgt_embedded, enc_output, src_mask, tgt_mask)
        output = self.fc_out(dec_output)
        return output

# 自定义数据集类
class MyDataset(Dataset):
    def __init__(self, data):
        self.data = data

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        return self.data[idx]

# 训练函数
def train(model, train_loader, optimizer, criterion, device):
    model.train()
    total_loss = 0
    for batch in train_loader:
        src, tgt = batch
        src = src.to(device)
        tgt = tgt.to(device)
        optimizer.zero_grad()
        output = model(src, tgt[:, :-1], src_mask=None, tgt_mask=None)
        loss = criterion(output.view(-1, output.size(-1)), tgt[:, 1:].contiguous().view(-1))
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
    return total_loss / len(train_loader)

# 测试函数
def test(model, test_loader, criterion, device):
    model.eval()
    total_loss = 0
    with torch.no_grad():
        for batch in test_loader:
            src, tgt = batch
            src = src.to(device)
            tgt = tgt.to(device)
            output = model(src, tgt[:, :-1], src_mask=None, tgt_mask=None)
            loss = criterion(output.view(-1, output.size(-1)), tgt[:, 1:].contiguous().view(-1))
            total_loss += loss.item()
    return total_loss / len(test_loader)

# 示例数据
data = [([1, 2, 3], [2, 3, 4]), ([3, 4, 5], [4, 5, 6])]
train_dataset = MyDataset(data)
test_dataset = MyDataset(data)
train_loader = DataLoader(train_dataset, batch_size=2, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=2)

# 模型参数设置
src_vocab_size = 10
tgt_vocab_size = 10
num_layers = 2
d_model = 512
num_heads = 8
d_ff = 2048
dropout_rate = 0.1
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 初始化模型、优化器和损失函数
model = Transformer(src_vocab_size, tgt_vocab_size, num_layers, d_model, num_heads, d_ff, dropout_rate).to(device)
optimizer = optim.Adam(model.parameters(), lr=0.001)
criterion = nn.CrossEntropyLoss()

# 训练和测试循环
num_epochs = 10
for epoch in range(num_epochs):
    train_loss = train(model, train_loader, optimizer, criterion, device)
    test_loss = test(model, test_loader, criterion, device)
    print(f"Epoch {epoch + 1}, Train Loss: {train_loss}, Test Loss: {test_loss}")
