from dataset.dataloader import generate_loader, generate_vocab
from backbones.transformer import ChatNet
import torch
from torch import nn
import math


def _sequence_mask(scores, valid_lens):
    valid_lens = torch.tensor(valid_lens, dtype=torch.float32)
    max_len = scores.shape[1]
    # 广播机制
    mask = torch.arange((max_len), dtype=torch.float32)[None, :] > valid_lens[:, None]
    mask = mask.float()
    mask[mask == 1] = float(-math.inf)
    return mask


if __name__ == '__main__':
    dataloader = generate_loader(batch_size=20)
    vocab = generate_vocab()

    vocab_size = len(vocab)
    embedding_dims = 512

    device = torch.device("cuda")
    model = ChatNet(vocab_size, embedding_dims, device=device)

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)

    epochs = 1000
    best_loss = math.inf
    for epoch in range(epochs):
        loss_list = []
        for inputs, outputs, input_valid_lens, output_valid_lens in dataloader:
            optimizer.zero_grad()
            inputs = inputs.to(device)
            outputs = outputs.to(device)

            # pad 掩码
            # input_padding_mask = _sequence_mask(inputs, input_valid_lens)
            # output_padding_mask = _sequence_mask(outputs, output_valid_lens)

            predicts = model(inputs, outputs[:, :-1])
            loss = criterion(predicts.reshape(-1, vocab_size), outputs[:, 1:].reshape(-1))
            loss.backward()
            optimizer.step()
            loss_list.append(loss.item())

        avg_loss = sum(loss_list) / len(loss_list)
        if best_loss >= avg_loss:
            best_loss = avg_loss
            torch.save(model.state_dict(),"./save/best.pt")
        print(f"epoch:{epoch + 1}/{epochs} -- loss:{avg_loss:.4f}")
