import torch

from backbones.rnn import RNN
from dataset.dataloader import generate_loader_vocab
from torch import nn
import torch.nn.functional as F

if __name__ == '__main__':
    train_loader, vocab = generate_loader_vocab(batch_size=10)
    vocab_size = len(vocab)
    hidden_size = 256

    device = torch.device("cuda")
    model = RNN(vocab_size, hidden_size, num_layers=2, device=device)
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    epochs = 1
    for epoch in range(epochs):
        loss_list = []
        for inputs, outputs in train_loader:
            inputs = F.one_hot(inputs.to(device), vocab_size)  # 设备设置为CUDA
            outputs = outputs.to(device)  # 设备设置CUDA
            optimizer.zero_grad()

            # 由于每个批次的数据都不一样，所以隐藏状态都需要初始化才可以
            h0 = torch.zeros((2, inputs.shape[0], hidden_size), device=device)
            predicts, _ = model(inputs.float(), h0)

            loss = criterion(predicts.reshape(-1, vocab_size), outputs.view(-1))
            loss.backward()

            nn.utils.clip_grad_value_(model.parameters(), clip_value=0.5)

            optimizer.step()

            loss_list.append(loss.item())

        avg_loss = sum(loss_list) / len(loss_list)
        print(f"epoch {epoch + 1}/{epochs} -- loss:{avg_loss:.4f}")
        torch.save(model.state_dict(), "./save/best.pt")
