"""
模型的训练
"""
import torch
from torch import optim
from tqdm import tqdm
import torch.nn as nn
import os

from chatbot.seq2seq import Seq2seq
from chatbot.dataset import train_data_loader
import config

# 1. 创建模型对象、优化器对象
model = Seq2seq().to(config.device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)

# 2. 加载模型和优化器
if os.path.exists(config.chatbot_save_model_path):
    model.load_state_dict(torch.load(config.chatbot_save_model_path))
    optimizer.load_state_dict(torch.load(config.chatbot_save_optimizer_path))


# 3. 创建训练方法
def train(epoch):
    bar = tqdm(enumerate(train_data_loader), total=len(train_data_loader), ascii=True, desc="训练")
    for idx, (input_data, target_data, input_length, target_length) in bar:
        # input_data: [batch_size, max_len]
        # input_length: [batch_size,]
        # target_data: [batch_size, max_len+1]
        # target_length: [batch_size,]
        input_data = input_data.to(config.device)
        input_length = input_length.to(config.device)
        target_data = target_data.to(config.device)
        target_length = target_length.to(config.device)

        # 3.1 进行预测
        # decoder_predict: [batch_size, max_len+1, vocab_size]
        decoder_predict = model(input_data, input_length, target_data)

        # 3.2 计算损失
        decoder_predict = decoder_predict.view(decoder_predict.size(0) * decoder_predict.size(1), -1)
        target_data = target_data.view(-1)
        loss = nn.functional.nll_loss(decoder_predict, target_data)

        # 3.3 梯度置零
        optimizer.zero_grad()

        # 3.4 反向传播
        loss.backward()

        # 3.5 更新参数
        optimizer.step()

        bar.set_description("epoch:{} idx:{} loss:{:.6f}".format(epoch, idx, loss.item()))

        # 模型的保存
        if idx % 100 == 0:
            torch.save(model.state_dict(), config.chatbot_save_model_path)
            torch.save(optimizer.state_dict(), config.chatbot_save_optimizer_path)
        pass


if __name__ == '__main__':
    for i in range(10):
        train(i)
