import glob
import random
import torch
from torch.optim import Adam
from torch.utils.data import dataset, dataloader
from PIL import Image
from torchvision import transforms
import torch.nn as nn
import torch.nn.functional as F

from nlp.config import Config
from nlp.datasets import data_loader, TextCls
from nlp.models import NlpModule

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

cfg = Config()
data_path = "sources/weibo_senti_100k.csv"
data_stop_path = "sources/hit_stopwords.txt"
dict_path = "sources/dict"
dataset = TextCls(data_path, data_stop_path, dict_path)
cfg.pad_size = dataset.max_len_seq
cfg.n_vocab = len(dataset.voc_dict)

train_dataloader = data_loader(dataset,cfg.batch_size, cfg.is_shuffle)

model = NlpModule(cfg).to(device)
model.load_state_dict(torch.load("models_save/model.pkl"))

loss = nn.CrossEntropyLoss()


for i, batch in enumerate(train_dataloader):
    label, data = batch
    label = torch.tensor(label,dtype=torch.int64).to(device)
    data = torch.tensor(data).to(device)
    pred=model.forward(data)
    pred=torch.argmax(pred,dim=1)
    # print(pred)

    out=torch.eq(pred,label)
    print(out)

    print(out.sum()*1.0/pred.size()[0])
