"""训练模型"""
from torch import optim
from tqdm import tqdm
import torch.nn.functional as F
import torch
import os

from seq2seq import Seq2seq
from dataset import get_loader_data
import config


# 实例化模型
model = Seq2seq().to(config.device)
# 实例化优化器
optimizer = optim.Adam(model.parameters(), 0.001)

# 模型的加载
if os.path.exists("./model/train.model"):
    model.load_state_dict(torch.load("./model/train.model"))
    optimizer.load_state_dict(torch.load("./model/optimizer.model"))


def train(epoch):
    bar = tqdm(enumerate(get_loader_data()), ascii=True, desc="train", total=len(get_loader_data()))
    for idx, (input, target, input_length, target_length) in bar:
        # input: [batch_size, max_len]
        # target: [batch_size, max_len]
        # input_length: input原始的长度
        # target_length: input原始长度+1
        input = input.to(config.device)
        input_length = input_length.to(config.device)
        target = target.to(config.device)
        # 1. 将模型调整为训练模式
        model.train(mode=True)

        # 2. 模型预测，不是一次性将数据全部放到模型中，是一次放入batch_size条数据
        # decoder_pre: [batch_size, max_len+2, vocab_size]
        decoder_pre = model(input, input_length)

        # 3. 计算损失
        # print(decoder_pre.size())  # [128, 12, 14]
        # print(target.size())  # [128, 12]
        # raise Exception
        decoder_pre = decoder_pre.view(decoder_pre.size(0)*decoder_pre.size(1), -1)
        loss = F.nll_loss(decoder_pre, target.view(-1), ignore_index=config.num_seq.PAD)

        # 4. 梯度归零
        optimizer.zero_grad()

        # 5. 反向传播
        loss.backward()

        # 6. 更新参数
        optimizer.step()

        # 7. 打印结果
        bar.set_description("epoch:{} idx:{} loss:{:.6f}".format(epoch, idx, loss.item()))

        # 模型的保存
        if idx % 100 == 0:
            torch.save(model.state_dict(), "./model/train.model")
            torch.save(optimizer.state_dict(), "./model/optimizer.model")


if __name__ == '__main__':
    for i in range(2):
        train(i)




