import torch

from chatbot.chatbot_core.dataset import train_data_loader
from chatbot.chatbot_core.seq2seq import Seq2seq
import chatbot.config as config
from torch.optim import Adam
import torch.nn.functional as f
import torch.nn.utils.clip_grad as clip_grad
from tqdm import tqdm

# 训练流程：
# 1、实例化model,optimizer.loss
# 2、遍历dataloader
# 3、调用output
# 4、计算损失
# 5、模型保存和加载

seq2seq = Seq2seq().to(config.device)
optimizer = Adam(seq2seq.parameters(), lr=0.001)


def train(epoch):
    bar = tqdm(enumerate(train_data_loader), total=len(train_data_loader), ascii=True, desc="train")
    for index, (input, target, input_length, target_length) in bar:
        input = input.to(config.device)
        target = target.to(config.device)
        input_length = input_length.to(config.device)
        target_length = target_length.to(config.device)

        optimizer.zero_grad()
        decoder_outputs, decoder_hidden = seq2seq(input, target, input_length, target_length)
        decoder_outputs = decoder_outputs.view(decoder_outputs.size(0) * decoder_outputs.size(1), -1)  # 强转一下，三维变两维
        target = target.view(-1)
        loss = f.nll_loss(decoder_outputs, target, ignore_index=config.chatbot_ws_target.PAD)
        loss.backward()
        # 梯度裁剪，把梯度过大的数据改成设置好的阈值
        clip_grad.clip_grad_norm_(seq2seq.parameters(), config.clip)
        if index % 100 == 0:
            torch.save(seq2seq.state_dict(), config.chatbot_model_save_path)
            torch.save(optimizer.state_dict(), config.chatbot_optimizer_save_path)
        optimizer.step()
        bar.set_description("epoch:{}\tindex:{}\tloss:{:.3f}".format(epoch, index, loss.item()))
