import torch
from torch import nn
from backbones.seq2seq import Seq2SeqDecoder, Seq2SeqEncoder
from backbones.encoder_decoder import EncoderDecoder
from dataset.dataset import generate_vocab, generate_loader

if __name__ == '__main__':
    zh_vocab, en_vocab = generate_vocab()
    dataloader = generate_loader(20)

    device = torch.device("cuda")
    encoder = Seq2SeqEncoder(len(zh_vocab), 64, 128, 2, device=device)
    decoder = Seq2SeqDecoder(len(en_vocab), 64, 128, 2, device=device)
    model = EncoderDecoder(encoder, decoder)

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    epochs = 1000
    for epoch in range(epochs):
        loss_list = []
        for x1, x2, len1, len2 in dataloader:
            x1 = x1.to(device)
            x2 = x2.to(device)
            optimizer.zero_grad()
            inputs = x2[:, :-1]
            outputs = x2[:, 1:]
            predict = model(x1, inputs)
            loss = criterion(predict, outputs.reshape(-1).long())
            loss.backward()
            optimizer.step()

            loss_list.append(loss.item())

        avg_loss = sum(loss_list) / len(loss_list)
        print(f"epoch:{epoch + 1}/{epochs} -- loss:{avg_loss:.4f}")
