from entity.DataEntity import vocab_size, get_dataloader
from entity.Model import SMASH
from utils.Predictor import evaluate
import torch
from torch import optim
from torch import nn as nn
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import os

device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))

embed_dim = 256  # 词向量维度
hidden_dim = 128  # 隐藏层大小
num_layers = 1  # 隐藏层数
attention_size = 128  # attention机制中的超参数
fd_out = 32

epochs = 10
lr = 10e-5
smash = SMASH(vocab_size, embed_dim, hidden_dim, num_layers, attention_size=attention_size, fd_out=fd_out, ff_out=2).to(
    device)

if not os.path.exists(r"D:\data\sohu\LL_topic\smash_model.pkl"):
    print("Start Training")
    criterion = nn.BCELoss()  # 二元交叉熵损失函数
    optimizer = optim.Adam(smash.parameters(), lr=lr)

    valid_dataset = get_dataloader(r'D:\data\sohu\LL_topic\valid.txt')

    writer = SummaryWriter(r'D:\data\sohu\LL_topic\tensorboard')
    for epoch in tqdm(range(epochs)):
        running_loss = 0
        for idx, (source_w, target_w, source_s, target_s, source_p, target_p, label) in enumerate(
                valid_dataset):  # label需要做成one-hot

            source_w = source_w.to(device)
            target_w = target_w.to(device)
            source_s = source_s.to(device)
            target_s = target_s.to(device)
            source_p = source_p.to(device)
            target_p = target_p.to(device)
            label = label.to(device)

            optimizer.zero_grad()
            label_ = smash(source_w, target_w, source_s, target_s, source_p, target_p, 32)
            loss = criterion(label_, label)  # 第一个参数是预测值，第二个是真实值
            running_loss += loss.item()
            loss.backward()
            optimizer.step()

            if idx % 3 == 0 and idx != 0:  # 100个batch输出一个平均loss
                # print('[%d, %5d] loss: %.3f' % (epoch + 1, idx + 1, running_loss / 100))
                print("loss",loss.item())
                writer.add_scalar('Loss/train', running_loss / 100, idx)
                running_loss = 0.0

    print("Save model to disk")
    torch.save(smash, r"D:\data\sohu\LL_topic\smash_model.pkl")
    print("Model was saved")
    writer.close()

print("Finished Training")

# 测试集评估
print("Test")
test_dataset = get_dataloader(r'D:\data\sohu\LL_topic\train.txt')
accuracy = evaluate(test_dataset)
print("测试集准确率", accuracy)
