import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F


class Attention(nn.Module):
    def __init__(self, hidden_dim):
        super(Attention, self).__init__()
        # 用于计算注意力分数的全连接层
        self.attn = nn.Linear(hidden_dim * 2, hidden_dim)
        self.v = nn.Parameter(torch.rand(hidden_dim))  # 可学习的权重向量

    def forward(self, hidden, encoder_outputs):
        # hidden: 当前时间步解码器的隐藏状态 (batch_size, hidden_dim)
        # encoder_outputs: 编码器的所有时间步输出 (batch_size, seq_len, hidden_dim)

        batch_size = encoder_outputs.size(0)
        seq_len = encoder_outputs.size(1)
        hidden = hidden.unsqueeze(1).repeat(1, seq_len, 1)  # (batch_size, seq_len, hidden_dim)

        # 计算注意力分数
        energy = torch.tanh(self.attn(torch.cat((hidden, encoder_outputs), dim=2)))  # (batch_size, seq_len, hidden_dim)
        energy = energy.permute(0, 2, 1)  # (batch_size, hidden_dim, seq_len)
        v = self.v.repeat(batch_size, 1).unsqueeze(1)  # (batch_size, 1, hidden_dim)
        scores = torch.bmm(v, energy).squeeze(1)  # (batch_size, seq_len)

        # 归一化注意力分数
        attention_weights = F.softmax(scores, dim=1)  # (batch_size, seq_len)
        context = torch.bmm(attention_weights.unsqueeze(1), encoder_outputs)  # (batch_size, 1, hidden_dim)
        return context.squeeze(1), attention_weights  # (batch_size, hidden_dim), (batch_size, seq_len)


class Encoder(nn.Module):
    def __init__(self, input_dim, hidden_dim):
        super(Encoder, self).__init__()
        self.embedding = nn.Embedding(input_dim, hidden_dim)
        self.lstm = nn.LSTM(hidden_dim, hidden_dim, batch_first=True)

    def forward(self, src):
        # src: (batch_size, seq_len)
        embedded = self.embedding(src)  # (batch_size, seq_len, hidden_dim)
        outputs, (hidden, cell) = self.lstm(embedded)  # outputs: (batch_size, seq_len, hidden_dim)
        return outputs, hidden, cell


class Decoder(nn.Module):
    def __init__(self, output_dim, hidden_dim, attention):
        super(Decoder, self).__init__()
        self.attention = attention
        self.embedding = nn.Embedding(output_dim, hidden_dim)
        self.lstm = nn.LSTM(hidden_dim * 2, hidden_dim, batch_first=True)
        self.fc_out = nn.Linear(hidden_dim * 2, output_dim)

    def forward(self, input, hidden, cell, encoder_outputs):
        # input: 当前时间步的目标词索引 (batch_size)
        # hidden: 解码器的隐藏状态 (batch_size, hidden_dim)
        # cell: 解码器的细胞状态 (batch_size, hidden_dim)
        # encoder_outputs: 编码器的所有时间步输出 (batch_size, seq_len, hidden_dim)

        input = input.unsqueeze(1)  # (batch_size, 1)
        embedded = self.embedding(input)  # (batch_size, 1, hidden_dim)

        # 注意力机制
        context, attention_weights = self.attention(hidden, encoder_outputs)  # (batch_size, hidden_dim)

        # 拼接嵌入向量和上下文向量
        rnn_input = torch.cat((embedded, context.unsqueeze(1)), dim=2)  # (batch_size, 1, hidden_dim*2)
        output, (hidden, cell) = self.lstm(rnn_input, (hidden.unsqueeze(0), cell.unsqueeze(0)))
        hidden = hidden.squeeze(0)

        # 最后输出预测
        prediction = self.fc_out(torch.cat((output.squeeze(1), context), dim=1))  # (batch_size, output_dim)
        return prediction, hidden, cell, attention_weights


class Seq2Seq(nn.Module):
    def __init__(self, encoder, decoder):
        super(Seq2Seq, self).__init__()
        self.encoder = encoder
        self.decoder = decoder

    def forward(self, src, trg, teacher_forcing_ratio=0.5):
        # src: 源序列 (batch_size, src_len)
        # trg: 目标序列 (batch_size, trg_len)
        trg_len = trg.size(1)
        batch_size = trg.size(0)
        output_dim = self.decoder.fc_out.out_features
        outputs = torch.zeros(batch_size, trg_len, output_dim).to(src.device)

        encoder_outputs, hidden, cell = self.encoder(src)

        hidden = hidden[0]
        cell = cell[0]
        input = trg[:, 0]  # 起始标志 <sos>
        for t in range(1, trg_len):
            output, hidden, cell, _ = self.decoder(input, hidden, cell, encoder_outputs)
            cell = cell[0]
            outputs[:, t, :] = output
            teacher_force = torch.rand(1).item() < teacher_forcing_ratio
            input = trg[:, t] if teacher_force else output.argmax(1)
        return outputs


# 初始化超参数
INPUT_DIM = 50    # 源语言词表大小
OUTPUT_DIM = 50   # 目标语言词表大小
HIDDEN_DIM = 256  # 隐藏层大小
LEARNING_RATE = 0.002
EPOCHS = 1000

# 创建模型
attention = Attention(HIDDEN_DIM)
encoder = Encoder(INPUT_DIM, HIDDEN_DIM)
decoder = Decoder(OUTPUT_DIM, HIDDEN_DIM, attention)
model = Seq2Seq(encoder, decoder).to('cuda')

# 定义损失函数和优化器
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
criterion = nn.CrossEntropyLoss(ignore_index=0)  # 忽略填充标志

# 模拟数据
src = torch.randint(1, INPUT_DIM, (32, 10)).to('cuda')  # (batch_size, seq_len)
trg = torch.randint(1, OUTPUT_DIM, (32, 10)).to('cuda')  # (batch_size, seq_len)

# 训练
for epoch in range(EPOCHS):
    model.train()
    optimizer.zero_grad()
    output = model(src, trg)  # (batch_size, trg_len, output_dim)
    loss = criterion(output.view(-1, OUTPUT_DIM), trg.view(-1))
    loss.backward()
    optimizer.step()
    print(f'Epoch {epoch+1}/{EPOCHS}, Loss: {loss.item()}')
