# encoding: utf-8

import torch
from torch import nn
from torch.utils.data import DataLoader
import numpy as np

from transformers import BertTokenizer

from utils.data import load_txt, split_text
from utils.models import MyDataSet


# 搭建LSTM网络
class LSTMNet(nn.Module):
    def __init__(self, vocab_size, embedding_dim, hidden_dim, layer_dim, output_dim):
        super(LSTMNet, self).__init__()  # 继承父类的初始化方法
        self.hidden_dim = hidden_dim  # LSTM神经元个数
        self.layer_dim = layer_dim  # LSTM的层数
        #  对文本进行词向量处理
        self.embedding = nn.Embedding(vocab_size, embedding_dim)
        #  LSTM+全连接层
        self.lstm = nn.LSTM(embedding_dim, hidden_dim, layer_dim, batch_first=True)
        # batch_first=True,batch在输入输出时都在第一维。
        # 数据加载器返回数据时候一般第一维都是batch，但是！pytorch的LSTM层默认输入和输出都是batch在第二维。
        self.fc1 = nn.Linear(hidden_dim, output_dim)

    def forward(self, x):
        embeds = self.embedding(x)
        # r_out, (h_n, h_c) = self.lstm(embeds, None)  # None:隐藏层的初始值使用零，#h_n是隐状态，h_c是单元状态
        r_out, _ = self.lstm(embeds, None)  # None:隐藏层的初始值使用零，#h_n是隐状态，h_c是单元状态
        out = self.fc1(r_out[:, -1, :])
        return out


labels = []


def get_label(x):
    if isinstance(x, str):
        if x not in labels:
            labels.append(x)
        return labels.index(x)
    else:
        return labels[x]


def get_train_dataset(file_name: str):
    lines = load_txt(file_name)
    texts, str_labels = split_text(lines)
    tokenizers = [tokenizer.encode(x, max_length=max_length, padding="max_length", truncation="longest_first") for x in texts]
    int_labels = []
    for token, str_lab in zip(tokenizers, str_labels):
        int_lab = get_label(str_lab)
        int_labels.append(int_lab)
    train_datasets = MyDataSet(data=tokenizers, label=int_labels)
    return train_datasets


pretrained_model = "G:/nlp_about/pretrained_models/hfl-chinese-roberta-wwm-ext"
tokenizer = BertTokenizer.from_pretrained(pretrained_model)

# vocab_size = len(TEXT.vocab)
vocab_size = tokenizer.vocab_size
embedding_dim = 100
hidden_dim = 128
layer_dim = 1
output_dim = 10
# output_dim = len(labels)
model = LSTMNet(vocab_size, embedding_dim, hidden_dim, layer_dim, output_dim)
print(model)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device=device)

train_file = "../../datas/train.txt"
batch_size = 16
max_length = 64

train_datasets = get_train_dataset(train_file)

train_dataloader = DataLoader(train_datasets, batch_size=batch_size)
test_dataloader = DataLoader(train_datasets, batch_size=batch_size)  # 测试，为了方便使用相同的数据了


def train_model(_model, traindataloder, valdataloader, criterion, optimizer, num_epochs=20):
    # 设置四个空列表用于储存损失和精度
    train_loss_all = []
    train_acc_all = []
    val_loss_all = []
    val_acc_all = []
    # since = time.time()
    for epoch in range(num_epochs):
        print("-" * 10)
        print("Epoch{}/{}".format(epoch, num_epochs - 1))
        # 每个epoch有两个阶段：训练阶段和验证阶段
        train_loss = 0.0
        train_corrects = 0
        train_num = 0
        val_loss = 0.0
        val_corrects = 0
        val_num = 0
        # 1.将模型设置为训练模式，训练后得到训练集的损失和精度
        _model.train()
        for step, batch in enumerate(traindataloder):
            textdata, target = batch[0].to(device), batch[1].long()  # 我暂时理解为每批次的cutword[0]就是该批次的文本内容，view(-1)把标签都拉成一维的了
            out = _model(textdata)  # 将文本输入到网络模型中 out.dtype=torch.float32
            pre_lab = torch.argmax(out, dim=1)  # 预测标签。找出这个一维向量(按行)里面最大值的索引。可以理解为，输出是一个1×10的向量，每个数值都是0-9的概率，找到概率最大的就是其预测的值
            loss = criterion(out, target)  # 利用输出(预测的概率最大的)和目标值(已知的标签)计算损失函数值
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            train_loss += loss.item() * len(target)
            train_corrects += torch.sum(pre_lab == target.data)
            train_num += len(target)
        # 计算一个epoch在训练集上的损失和精度
        train_loss_all.append(train_loss / train_num)
        train_acc_all.append(train_corrects.double().item() / train_num)
        print("{} Train Loss:{:.4f} Train Acc:{:.4f}".format(epoch, train_loss_all[-1], train_acc_all[-1]))
        # 2.将模型设置为验证模式，训练后得到验证集的损失和精度
        _model.eval()
        for step, batch in enumerate(valdataloader):
            textdata, target = batch[0].to(device), batch[1].long()
            out = _model(textdata)
            pre_lab = torch.argmax(out, 1)  # 预测标签
            loss = criterion(out, target)  # 计算损失函数值
            val_loss += loss.item() * len(target)
            val_corrects += torch.sum(pre_lab == target.data)
            val_num += len(target)
        # 计算一个epoch在训练集上的损失和精度
        val_loss_all.append(val_loss / val_num)
        val_acc_all.append(val_corrects.double().item() / val_num)
        print("{} Val Loss:{:.4f} Val Acc:{:.4f}".format(epoch, val_loss_all[-1], val_acc_all[-1]))

    # 用一个DataFrame将每个epoch训练和验证的损失和精度打包在一起
    print({"epoch": range(num_epochs),
           "train_loss_all": train_loss_all,
           "train_acc_all": train_acc_all,
           "val_loss_all": val_loss_all,
           "val_acc_all": val_acc_all})
    return model


# 训练模型
# 定义优化器
optimizer = torch.optim.Adam(model.parameters(), lr=0.003)  # 学习率为0.003
loss_func = nn.CrossEntropyLoss()  # 使用交叉熵损失更新参数
# 对模型进行迭代训练，对所有数据训练20轮
model = train_model(model, train_dataloader, test_dataloader, loss_func, optimizer, num_epochs=20)
