# 导入相关的工具包
import torch
from keras.preprocessing import sequence
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
from collections import Counter
from functools import reduce
from sklearn.utils import shuffle

# 设定模型源
source = 'huggingface/pytorch-transformers'
# 设定使用的预训练模型
model_name = 'bert-base-chinese'

# 加载模型
model = torch.hub.load(source, 'model', model_name)
# 加载字符映射器
tokenizer = torch.hub.load(source, 'tokenizer', model_name)

# 设定超参数: 句子长度
cutlen = 32


# 编码得到BERT编码的函数
def get_bert_encode(text):
    # text: 将要进行编码的中文文本
    # 第一步使用字符映射器对中文文本进行编码
    # 注意一点: 经过了BERT编码后的结果前后会添加两个标志位101, 102， 但是对我们的任务没有意义, 所以去除掉
    indexed_tokens = tokenizer.encode(text[:cutlen])[1:-1]
    # 第二步使用sequence对句子进行长度规范, 长度超出了进行截断, 长度不足进行补齐
    indexed_tokens = sequence.pad_sequences([indexed_tokens], cutlen)
    # 第三步对结果进行封装
    tokens_tensor = torch.LongTensor(indexed_tokens)
    # 第四步进入评估模式
    with torch.no_grad():
        encoded_output, _ = model(tokens_tensor)
    # 进行一次降维
    encoded_output = encoded_output[0]
    return encoded_output


# if __name__ == '__main__':
#     text = "早餐不好,服务不到位,晚餐无西餐,早餐晚餐相同,房间条件不好"
#     encoded_result = get_bert_encode(text)
#     print(encoded_result)
#     print(encoded_result.shape)


# 编写微调网络类
class Net(nn.Module):
    def __init__(self, char_size=32, embedding_size=768):
        # char_size: 代表句子中字符的数量, 输入句子规范化后变成128
        # embedding_size: 代表词嵌入的维度, 因为使用BERT中文模型, 预定的词嵌入维度就是768
        super(Net, self).__init__()
        # 将参数传入类中
        self.char_size = char_size
        self.embedding_size = embedding_size
        # 实例化全连接层
        self.fc1 = nn.Linear(char_size * embedding_size, 2)

    def forward(self, x):
        # x: 代表输入张量, 已经经过数字化编码
        # 首先对输入张量x改变形状
        x = x.view(-1, self.char_size * self.embedding_size)
        x = self.fc1(x)
        return x


# if __name__ == '__main__':
#     x = torch.randn(1, 32, 768)
#     net = Net()
#     nr = net(x)
#     print(nr)

net = Net()


# 编写构建数据批次生成器的函数
def data_loader(train_data_path, valid_data_path, batch_size):
    # train_data_path: 代表训练数据的存储路径
    # valid_data_path: 代表验证数据的存储路径
    # batch_size: 代表批次数据的条数
    # 首先使用pandas读取数据
    train_data = pd.read_csv(train_data_path, header=None, sep="\t").drop([0])
    valid_data = pd.read_csv(valid_data_path, header=None, sep="\t").drop([0])

    # 打印训练集和验证集上正负样本的数量
    # print("训练数据集的正负样本数量:")
    # print(dict(Counter(train_data[0].values)))
    # print("验证数据集的正负样本数量:")
    # print(dict(Counter(valid_data[0].values)))

    # 验证数据集中的样本条数至少能够满足一个批次
    if len(valid_data) < batch_size:
        raise "Batch size or split not match!"

    def _loader_generator(data):
        # data: 代表训练数据或者验证数据
        # 每个批次为间隔遍历数据集
        for batch in range(0, len(data), batch_size):
            # 初始化数据的列表和标签的列表
            batch_encoded = []
            batch_labels = []
            # 将一个批次的数据转换为列表形式
            for item in shuffle(data.values.tolist())[batch:batch + batch_size]:
                # 对每一条数据进行BERT编码
                encoded = get_bert_encode(item[0])
                # 将编码后的张量追加进结果列表
                batch_encoded.append(encoded)
                # 将标签张量追加进结果列表中
                batch_labels.append([int(item[1])])
            # 使用reduce高阶函数将列表中的数据转换成模型需要的张量形式
            encoded = reduce(lambda x, y: torch.cat((x, y), dim=0), batch_encoded)
            labels = torch.tensor(reduce(lambda x, y: x + y, batch_labels))
            # 以生成器的方式返回数据和标签
            yield encoded, labels

    return _loader_generator(train_data), _loader_generator(valid_data), len(train_data), len(valid_data)


# if __name__ == '__main__':
#     train_data_path = "./cn_data/SST-2/train.tsv"
#     valid_data_path = "./cn_data/SST-2/dev.tsv"
#     batch_size = 16
#     train_data_labels, valid_data_labels, train_data_len, valid_data_len
#       = data_loader(train_data_path, valid_data_path, batch_size)
#     print(next(train_data_labels))
#     print(next(valid_data_labels))
#     print("train_data_len:", train_data_len)
#     print("valid_data_len:", valid_data_len)

# 导入一个优化包
import torch.optim as optim

# 定义交叉熵损失函数
criterion = nn.CrossEntropyLoss()
# 定义优化器
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)


# 编写训练函数
def train(train_data_labels):
    # train_data_labels: 代表训练数据和标签的生成器对象
    # 初始化训练损失和准确率的累加值
    train_running_loss = 0.0
    train_running_acc = 0.0
    # 遍历数据生成器, 每个批次更新一次参数
    for train_tensor, train_labels in train_data_labels:
        # 训练的开始要将优化器的梯度归零
        optimizer.zero_grad()
        # 使用微调网络进行预测
        train_outputs = net(train_tensor)
        # 计算损失值
        train_loss = criterion(train_outputs, train_labels)
        # 累加损失值
        train_running_loss += train_loss.item()
        # 反向传播求导数
        train_loss.backward()
        # 更新模型参数
        optimizer.step()
        # 将该批次的正确预测样本数量进行累加
        train_running_acc += (train_outputs.argmax(1) == train_labels).sum().item()
    return train_running_loss, train_running_acc


# 编写验证函数的代码
def valid(valid_data_labels):
    # valid_data_labels: 代表验证集上的数据和标签的生成器对象
    # 初始化损失值和正确率的值
    valid_running_loss = 0.0
    valid_running_acc = 0.0
    # 遍历验证数据和标签的生成器
    for valid_tensor, valid_labels in valid_data_labels:
        # 进入评估模式, 不更新参数
        with torch.no_grad():
            # 使用微调网络进行预测
            valid_outputs = net(valid_tensor)
            # 计算损失值
            valid_loss = criterion(valid_outputs, valid_labels)
            # 进行损失值的累加
            valid_running_loss += valid_loss.item()
            # 将该批次的正确预测样本数量进行累加
            valid_running_acc += (valid_outputs.argmax(1) == valid_labels).sum().item()
    return valid_running_loss, valid_running_acc


# if __name__ == '__main__':
# 设定数据的路径
# train_data_path = "./cn_data/SST-2/train.tsv"
# valid_data_path = "./cn_data/SST-2/dev.tsv"
# 定义训练的轮次数
# epochs = 4
# 定义批次样本的数量
# batch_size = 16
# 按批次进行训练
# for epoch in range(epochs):
#     print("Epoch:", epoch + 1)
# 调用数据生成器函数
#     train_data_labels, valid_data_labels, train_data_len, \
#     valid_data_len = data_loader(train_data_path, valid_data_path, batch_size)
# 调用训练函数进行训练
#     train_running_loss, train_running_acc = train(train_data_labels)
# 调用验证函数进行验证
#     valid_running_loss, valid_running_acc = valid(valid_data_labels)
# 计算轮次的平均损失值和准确率
#     train_average_loss = train_running_loss * batch_size / train_data_len
#     valid_average_loss = valid_running_loss * batch_size / valid_data_len
#     train_average_acc = train_running_acc / train_data_len
#     valid_average_acc = valid_running_acc / valid_data_len
# 打印本轮次的执行信息
#     print("Train Loss:", train_average_loss, "|", "Train Acc:", train_average_acc)
#     print("Valid Loss:", valid_average_loss, "|", "Valid Acc:", valid_average_acc)

# print("Finished Training")

# 设定模型保存的路径
# MODEL_PATH = "./BERT_net.pth"
# 保存模型
# torch.save(net.state_dict(), MODEL_PATH)
# print("Finished Saving")

if __name__ == '__main__':
    MODEL_PATH = './BERT_net.pth'
    # 将模型加载进来
    net.load_state_dict(torch.load(MODEL_PATH))

    text = "酒店设备一般，套房里卧室的不能上网，要到客厅去。"
    print("输入文本是:", text)
    with torch.no_grad():
        output = net(get_bert_encode(text))
        print("预测标签是:", torch.argmax(output).item())
