import torch
from torch import nn
from backbones.text_classifier_model import TextClassifierModel
from dataset.dataloader import generate_vocab, generate_loader

if __name__ == '__main__':
    loader = generate_loader(batch_size=20)
    vocab = generate_vocab()

    vocab_size = len(vocab)
    embedding_dims = 256
    device = torch.device("cuda")
    model = TextClassifierModel(vocab_size, embedding_dims, 8, 2)
    model = model.to(device)

    criterion = nn.BCELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)

    epochs = 1000
    for epoch in range(epochs):
        for sentences, sentences_valid_lens, labels in loader:
            sentences = sentences.to(device)
            labels = labels.to(device)
            optimizer.zero_grad()
            predicts = model(sentences,sentences_valid_lens).squeeze(-1)
            loss = criterion(predicts, labels)
            loss.backward()
            optimizer.step()

            print(f"loss--{loss.item():.4f}")
