import glob
import random
import torch
from torch.optim import Adam
from torch.utils.data import dataset, dataloader
from PIL import Image
from torchvision import transforms
import torch.nn as nn
import torch.nn.functional as F

from nlp.config import Config
from nlp.datasets import data_loader, TextCls
from nlp.models import NlpModule

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

cfg = Config()
data_path = "sources/weibo_senti_100k.csv"
data_stop_path = "sources/hit_stopwords.txt"
dict_path = "sources/dict"
dataset = TextCls(data_path, data_stop_path, dict_path)
cfg.pad_size = dataset.max_len_seq
cfg.n_vocab = len(dataset.voc_dict)

train_dataloader = data_loader(dataset,cfg.batch_size, cfg.is_shuffle)

model = NlpModule(cfg).to(device)

loss = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=cfg.learn_rate)

for epoch in range(cfg.epochs):
    for i, batch in enumerate(train_dataloader):
        label, data = batch
        label = torch.tensor(label,dtype=torch.int64).to(device)
        data = torch.tensor(data).to(device)
        optimizer.zero_grad()
        pred = model.forward(data)
        loss_val = loss(pred, label)
        print("loss:", loss_val.item())
        loss_val.backward()
        optimizer.step()

torch.save(model.state_dict(), "models_save/model.pkl")
torch.save(optimizer.state_dict(), "models_save/opt.pkl")