import random
import time

import math
import pandas as pd
from collections import Counter
import torch
import torch.nn as nn
from bert_chinese_encode import get_bert_encode_for_single
from rnn_model import RNN
from torch.optim import SGD
import matplotlib.pyplot as plt

# 读取数据
train_data_path = "./train_data.csv"
train_data= pd.read_csv(train_data_path, header=None, sep="\t")

# 打印正负标签比例
# print(dict(Counter(train_data[0].values)))

# 转换数据到列表形式
train_data = train_data.values.tolist()
# print(train_data[:10])

# 学习率为0.005
learning_rate = 0.005

# 定义参数
input_size = 768  # bert模型输出的维度
hidden_size = 128  # 自定义的
n_categories = 2  # 类别数量

rnn = RNN(input_size, hidden_size, n_categories)  # 实例化模型
# outputs, hidden = rnn(input, hidden)
optimizer = SGD(rnn.parameters(), lr=learning_rate)

# 选取损失函数为NLLLoss()
# CrossEntropyLoss就是把以上Softmax–Log–NLLLoss
criterion = nn.NLLLoss()


def train(category_tensor, text_tensor):
    """
    每次训练一个样本
    :param category_tensor: 真实标签
    :param text_tensor: 文本张量
    :return:
    """
    hidden = rnn.init_hidden()
    rnn.zero_grad()

    for i in range(text_tensor.size()[1]):
        output, hidden = rnn(text_tensor[0][i].unsqueeze(0), hidden)

    loss = criterion(output, category_tensor)
    loss.backward()
    optimizer.step()

    return output, loss.item()


def random_train_example(train_data):
    """
    接收原始样本，并且转成张量
    :param train_data:
    :return:
    """
    category, text= random.choice(train_data)
    text_tensor = get_bert_encode_for_single(text)

    category_tensor = torch.tensor([int(category)])

    return category, text, category_tensor, text_tensor

def valid(category_tensor, text_tensor):
    """
    验证函数
    :param categor_tensor:
    :param text_tensor:
    :return:
    """
    hidden = rnn.init_hidden()
    with torch.no_grad():
        for i in range(text_tensor.size()[1]):
            output, hidden = rnn(text_tensor[0][i].unsqueeze(0), hidden)
        loss = criterion(output, category_tensor)

    return output, loss.item()

# 构建时间计算函数
def timeSince(since):
    "获得每次打印的训练耗时，since是训练开始时间"
    # 获得当前时间
    now = time.time()
    # 获得时间差，就是训练耗时
    s = now - since
    # 将秒转化为分钟，并取整
    m = math.floor(s / 60)
    # 计算剩下不够凑成1分钟的秒数
    s -= m * 60
    # 返回指定格式的耗时
    return '%dm %ds' % (m, s)


def main():
    # 设置迭代次数为50000步
    n_iters = 50000

    # 打印间隔为1000步
    plot_every = 1000

    # 初始化打印间隔中训练和验证的损失和准确率
    train_current_loss = 0
    train_current_acc = 0
    valid_current_loss = 0
    valid_current_acc = 0

    # 初始化盛装每次打印间隔的平均损失和准确率
    all_train_losses = []
    all_train_acc = []
    all_valid_losses = []
    all_valid_acc = []

    # 获取开始时间戳
    start = time.time()

    for i in range(1, n_iters+1):
        category, text, category_tensor, text_tensor = random_train_example(train_data[:10000])
        category_valid, text_valid, category_tensor_valid, text_tensor_valid = random_train_example(train_data[10000:])

        # 训练验证
        train_output, train_loss = train(category_tensor, text_tensor)
        valid_output, valid_loss = valid(category_tensor_valid, text_tensor_valid)

        # 累计 损失值 准确率
        train_current_loss += train_loss
        train_current_acc += (train_output.argmax(1) == category_tensor).sum().item()

        valid_current_loss += valid_loss
        valid_current_acc += (valid_output.argmax(1) == category_tensor_valid).sum().item()

        # 每个1000次 打印输入
        if i % plot_every == 0:
            train_average_loss = train_current_loss / plot_every
            train_average_acc = train_current_acc / plot_every

            valid_average_loss = valid_current_loss /plot_every
            valid_average_acc = valid_current_acc / plot_every

            # 打印迭代步, 耗时, 训练损失和准确率, 验证损失和准确率
            print("Iter:", i, "|", "TimeSince:", timeSince(start))
            print("Train Loss:", train_average_loss, "|", "Train Acc:", train_average_acc)
            print("Valid Loss:", valid_average_loss, "|", "Valid Acc:", valid_average_acc)

            # 保存结果到列表中，方便画图
            all_train_losses.append(train_average_loss)
            all_train_acc.append(train_average_acc)

            all_valid_losses.append(valid_average_loss)
            all_valid_acc.append(valid_average_acc)

            # 把中间结果 归零
            train_current_loss = 0
            train_current_acc = 0
            valid_current_loss = 0
            valid_current_acc = 0

            # 保存路径
            MODEL_PATH = f'./BERT_RNN_{i}.pth'
            # 保存模型参数
            torch.save(rnn.state_dict(), MODEL_PATH)


    plt.figure(0)
    plt.plot(all_train_losses, label="Train Loss")
    plt.plot(all_valid_losses, color="red", label="Valid Loss")
    plt.legend(loc='upper left')
    plt.savefig("./loss.png")

    plt.figure(1)
    plt.plot(all_train_acc, label="Train Acc")
    plt.plot(all_valid_acc, color="red", label="Valid Acc")
    plt.legend(loc='upper left')
    plt.savefig("./acc.png")



if __name__ == '__main__':
    # category, text, category_tensor, text_tensor = random_train_example(train_data)

    # print(train(category_tensor, text_tensor))
    main()