"""
训练模型
"""
from torch import nn, optim
from lib import ws
from torch.nn import functional as F
from dataset import get_dataloader
import numpy as np
import lib
import torch
import os
from tqdm import tqdm


class MyModel(nn.Module):
    def __init__(self):
        super().__init__()
        self.embedding = nn.Embedding(len(ws), lib.input_size)
        self.lstm = nn.LSTM(input_size=lib.input_size, hidden_size=lib.hidden_size, num_layers=lib.num_layers,
                            dropout=lib.dropout, batch_first=True, bidirectional=True)
        self.fc = nn.Linear(lib.hidden_size * 2, 2)

    def forward(self, input):
        """
        :param input: 输入形状：[batch_size, seq_len]
        :return:
        """
        embed = self.embedding(input)  # 输出形状：[batch_size, seq_len, 200]
        # print(np.any(x.data.numpy()) == 0)
        # print("embed_size:", embed.size())  # embed_size: torch.Size([128, 200, 200])

        # h_0 = torch.rand(2*lib.num_layers, embed.size[0], lib.hidden_size)
        # c_0 = torch.rand(2*lib.num_layers, embed.size[0], lib.hidden_size)
        output, (h_n, c_n) = self.lstm(
            embed)  # output: [batch_size, seq_len, hidden_size*2], h_n/c_n: [2*num_layers, batch_size, hidden_size]
        # print("output_size:", output.size())  # output_size: torch.Size([128, 200, 256])
        # print("h_n:", h_n.size())  # h_n: torch.Size([4, 128, 128])
        # 在输出层获取正向最后一个句子的输出
        # 方法一
        output_fw = output[:, -1, :lib.hidden_size]  # output_fw: [batch_size, lib.hidden_size]
        # print("output_fw:", output_fw.size())  # output_fw: torch.Size([128, 128])
        # # 方法二
        # output_fw = h_n[-2, :, :]
        # 在输出层获取反向最后一个句子的输出
        # 方法一
        output_bw = output[:, 0, lib.hidden_size:]
        # # 方法二
        # output_bw = h_n[-1, :, :]
        # 将正反两个输出结果进行合并
        output = torch.cat([output_fw, output_bw], dim=-1)  # output: [batch_size, 2*lib.hidden_size]
        # print("output:", output.size())  # output: torch.Size([128, 256])

        # 创建全连接层
        out = self.fc(output)  # out: [batch_size, 2]
        # print("out:", out.size())  # out: torch.Size([128, 2])

        loss = F.log_softmax(out, dim=-1)
        # print("loss", loss.size())  # loss torch.Size([128, 2])
        return loss  # 不指定dim会报一个警告


# 实例化模型，优化器
model = MyModel().to(lib.device)
optimizer = optim.Adam(model.parameters(), 0.001)
# 模型的加载
if os.path.exists("./model/model.pkl"):
    model.load_state_dict(torch.load("./model/model.pkl"))
    optimizer.load_state_dict(torch.load("./model/optimizer.pkl"))


def train(epoch):
    loader_data = get_dataloader()
    for idx, (input, target) in enumerate(loader_data):
        input = input.to(lib.device)
        target = target.to(lib.device)
        # 调整为训练模式
        model.train(mode=True)

        # 进行估计
        output = model(input)

        # print("target_size:", target.size())  # target_size: torch.Size([128])
        # print("output_size:", output.size())  # output_size: torch.Size([128, 2])

        # 计算损失
        loss = F.nll_loss(output, target)  # 预测值， 真实值

        # 梯度置零
        optimizer.zero_grad()

        # 反向传播
        loss.backward()

        # 更新参数
        optimizer.step()

        print(epoch, idx, loss.item())

        if idx % 100 == 0:
            # 模型保存
            torch.save(model.state_dict(), "./model/model.pkl")
            torch.save(optimizer.state_dict(), "./model/optimizer.pkl")


def test():
    """
    测试
    :return:
    """
    model.eval()
    loss_list = []
    acc_list = []
    test_data = get_dataloader(train=False, batch_size=lib.test_batch_size)
    for idx, (input, target) in tqdm(enumerate(test_data), total=len(test_data)):
        input = input.to(lib.device)
        target = target.to(lib.device)
        with torch.no_grad():
            output = model(input)
            # 计算损失
            cur_loss = F.nll_loss(output, target)
            loss_list.append(cur_loss.cpu().item())
            # 计算准确率
            pred = output.max(dim=-1)[
                -1]  # tensor.max() dim=-1时是查询每行的最大值，返回的是[value_tensor, index_tensor]  pred是取每行最大值的位置的tensor
            cur_acc = pred.eq(
                target).float().mean()  # pred.eq(target)是比较target中pred位置的值是否和output对应pred位置的值是否相等，返回的是0，1的numpy
            acc_list.append(cur_acc.cpu().item())

    print(np.mean(loss_list), np.mean(acc_list))
    pass


if __name__ == '__main__':
    # for i in range(5):
    #     train(i)

    test()
