from backbones.transformer import ChatNet
from dataset.dataloader import generate_loader, generate_vocab
import torch
from torch import nn
from tqdm import tqdm

if __name__ == '__main__':
    loader = generate_loader(batch_size=20)
    vocab_zh, vocab_en = generate_vocab()
    vocab_size_zh = len(vocab_zh)
    vocab_size_en = len(vocab_en)
    device = torch.device("cuda")
    model = ChatNet(vocab_size_zh, vocab_size_en, 512, device=device)

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), 0.0001)

    epochs = 1000
    for epoch in range(epochs):
        loop = tqdm(loader)
        loss_list = []
        for in_pad, out_pad, in_valid_len, out_valid_len in loop:
            optimizer.zero_grad()
            in_pad = in_pad.to(device)
            out_pad = out_pad.to(device)
            predicts = model(in_pad, out_pad[:, :-1],in_valid_len, out_valid_len)
            loss = criterion(predicts.reshape(-1, vocab_size_en), out_pad[:, 1:].reshape(-1))
            loss.backward()
            optimizer.step()

            nn.utils.clip_grad_value_(model.parameters(), clip_value=0.5)

            loop.set_description(f"batch_loss:{loss.item():.4f}")
            loss_list.append(loss.item())

        avg_loss = sum(loss_list) / len(loss_list)
        print(f"epoch {epoch + 1}/{epochs} -- loss:{avg_loss:.4f}")
