from torch import optim
from torch.nn.utils import clip_grad_norm_

from model import *

import matplotlib
import matplotlib.pyplot as plt

matplotlib.use('TkAgg')

EMB_DIM = 128  # 词嵌入维度
HID_DIM = 256  # 隐藏层维度
N_LAYERS = 1  # 层数
DROPOUT = 0.1  # 丢弃概率
LR = 3e-3  # 学习率
EPOCHS = 300  # 训练轮数

encoder = Encoder(src_vocab.size, EMB_DIM, HID_DIM, N_LAYERS, DROPOUT).to(device)
decoder = Decoder(tgt_vocab.size, EMB_DIM, HID_DIM, N_LAYERS, DROPOUT).to(device)
model = seq2seq(encoder, decoder).to(device)

pad_id = tgt_vocab.stoi[PAD]
criterion = nn.CrossEntropyLoss(ignore_index=pad_id)  # 忽略填充标记的交叉熵损失
optimizer = optim.Adam(model.parameters(), lr=LR)  # 优化器


# train_model.train() - 设置模型为训练模式
# 遍历训练数据加载器，获取批次数据
# 前向传播计算logits
# 计算交叉熵损失
# 反向传播和参数更新
def train(train_data: DataLoader,
          train_model: seq2seq,
          train_criterion: nn.CrossEntropyLoss,
          train_optimizer: optim.Optimizer):
    train_model.train()
    total_epoch_loss = 0
    for src_ids, tgt_in_batch, tgt_out_batch in train_data:
        # 模型预测
        outputs = train_model(src_ids, tgt_in_batch)
        # 预测结果维度
        B, T, V = outputs.size()
        # 计算损失
        loss = train_criterion(outputs.reshape(B * T, V), tgt_out_batch.reshape(B * T))
        # 梯度清零
        train_optimizer.zero_grad()
        # 反向传播
        loss.backward()
        # 梯度裁剪
        clip_grad_norm_(train_model.parameters(), 1)  # 梯度裁剪
        # 参数更新
        train_optimizer.step()
        total_epoch_loss += loss.item()
    return total_epoch_loss / len(train_data)


# 将输入句子编码为词索引序列
# 通过编码器获取上下文表示
# 以<SOS>标记开始，逐个预测输出词
# 循环解码直到遇到<EOS>结束标记或达到最大长度
# 将预测的词索引序列解码为可读文本
def translate(translate_model: seq2seq,
              sentence: str,
              MAX_LEN: int = 20):
    translate_model.eval()
    with torch.no_grad():
        # src_ids:[batch_size, seq_len]
        src_ids = torch.tensor([src_vocab.encode(sentence)], device=device)
        # hidden:[num_layers * num_directions, batch_size, hidden_size]
        _, hidden = translate_model.encoder(src_ids)
        # y_prev:[batch_size, 1]
        y_prev = torch.tensor([[tgt_vocab.stoi[SOS]]], device=device)
        outputs = []
        for t in range(MAX_LEN):
            logits, hidden = translate_model.decoder(y_prev, hidden)
            next_id = logits.argmax(dim=-1)
            token = next_id.item()
            if token == tgt_vocab.stoi[EOS]:
                break
            outputs.append(token)
            y_prev = next_id
        return tgt_vocab.decode(outputs)


def main():
    total_loss = []
    for epoch in range(EPOCHS):
        train_epoch_loss = train(loader, model, criterion, optimizer)
        total_loss.append(train_epoch_loss)
        print(f"Epoch: {epoch + 1}, Loss: {train_epoch_loss:.4f}")
    return total_loss


if __name__ == '__main__':
    loss_list = main()
    plt.figure(figsize=(8, 6))
    plt.plot(range(1, len(loss_list) + 1), loss_list)
    plt.xlabel("Epoch")
    plt.ylabel("Loss")
    plt.title("Training Loss")
    plt.grid(True)
    plt.show()
    sentences = [
        "i love you",
        "how are you",
        "i am fine",
        "good morning",
        "see you later",
        "i like apples",
        "this is apples"
    ]
    for sentence in sentences:
        print(f"{sentence} -> {translate(model, sentence)}")
