import torch
from tqdm import tqdm
import jieba
from chinese_text_classification.utils import read_vocab, word_id, clean_str

# 选取运行模型的工具（cpu或者gpu）
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


# 训练函数
def train(train_loader, dve_loader, model, loss_func, optimizer, epoch):
    model.train()
    best_acc = 0.
    for e in range(epoch):
        train_total = 0.
        dev_total = 0.
        train_loss = 0.
        train_acc = 0.
        dev_loss = 0.
        dev_acc = 0.

        # 训练
        model.train()
        for batch_x, batch_y in tqdm(train_loader):
            out = model(batch_x)
            loss = loss_func(out, batch_y)
            train_loss += loss.item()
            pred = torch.max(out, 1)[1]
            train_correct = (pred == batch_y).sum()
            train_acc += train_correct.item()
            train_total = len(train_loader.dataset)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        # 验证
        model.eval()
        for batch_x, batch_y in tqdm(dve_loader):
            out = model(batch_x)
            loss = loss_func(out, batch_y)
            dev_loss += loss.item()
            pred = torch.max(out, 1)[1]
            num_correct = (pred == batch_y).sum()
            dev_acc += num_correct.item()
            dev_total = len(dve_loader.dataset)
        # print(len(train_loader.dataset), len(dve_loader.dataset))
        print('Epoch:{}\t Train loss:{:.6f} | Train acc:{:.6f} | Test loss:{:.6f} | Test acc:{:.6f}'.format(e,
                                                                                                            (
                                                                                                                    train_loss / train_total),
                                                                                                            (
                                                                                                                    train_acc / train_total),
                                                                                                            (
                                                                                                                    dev_loss / dev_total),
                                                                                                            (
                                                                                                                    dev_acc / dev_total)))
        if (dev_acc / dev_total) > best_acc:
            best_acc = dev_acc
            torch.save(model,
                       'C:/Users/Administrator/PycharmProjects/pytorch/chinese_text_classification/THUCNews/saved_dict/best_model.plk')


# 测试函数（输入为句子）
def test(texts):
    classes = ['finance',
               'realty',
               'stocks',
               'education',
               'science',
               'society',
               'politics',
               'sports',
               'game',
               'entertainment']
    vocab = read_vocab()
    texts = clean_str(texts)
    texts = [[word for word in jieba.cut(texts) if word != ' ']]
    inputs = word_id(texts, vocab, 35)
    inputs = torch.LongTensor(inputs).to(device)
    net = torch.load(
        'C:/Users/Administrator/PycharmProjects/pytorch/chinese_text_classification/THUCNews/saved_dict/best_model.plk')
    net = net.to(device)
    # print(net)
    outputs = net(inputs)
    _, pre = torch.max(outputs, 1)
    # print(outputs.size())
    print('This maybe:', classes[int(pre)])


# 测试demo
a = "我国超7000种姓氏仍在使用 最长有10个字"
test(a)
