import math
import random
import time
import torch.nn as nn
import pandas as pd
import torch
from torch.optim import SGD
from tqdm import tqdm

from application.config.config import Config
from application.model.rnn_model import RNN


class TrainValid:
    def __init__(self):
        self.config = Config()
        self.rnn = RNN()
        #初始化隐藏层
        self.hidden = self.rnn.init_hidden()
        #转换数据为列表形式
        self.rnn_train_data = pd.read_csv(self.config.rnn_train_data_path, header=None, sep='\t').values.tolist()

    def get_bert_encode_for_single(self, text):
        """
        使用bert-base-chinese模型对文本进行编码
        :param text: 文本
        :return: 编码后的张量
        """
        #通过tokenizer对文本进行编码
        indexed_tokens = self.config.bert_tokenizer.encode(text)[1: -1]
        #把列表转换成张量
        token_tensor = torch.LongTensor([indexed_tokens])
        # #不自动更新梯度计算
        with torch.no_grad():
            output = self.config.bert_model(token_tensor)

        return output[0]

    def randomTrainingExamples(self, train_data):
        """
        随机选择一条数据进行编码
        :return: 分类、文本、文本张量、分类张量
        """
        #获取一条数据  张量
        category, text = random.choice(train_data)
        #对文本进行编码
        text_tensor = self.get_bert_encode_for_single(text)
        #将标签封装成张量
        category_tensor = torch.LongTensor([int(category)])

        return category, text, text_tensor, category_tensor

    def model_train_valid(self):
        """
        模型训练及验证
        :return:
        """
        #初始化打印间隔中训练与验证的损失和准确率
        train_current_loss = 0
        train_current_acc = 0
        valid_current_loss = 0
        valid_current_acc = 0

        #初始化每次打印间隔的平均损失和准确率
        all_train_losses = []
        all_train_acc = []
        all_valid_losses = []
        all_valid_acc = []

        #初始化时间戳
        start = time.time()

        #迭代
        for iter in tqdm(range(1, self.config.n_iters + 1)):
            #分别获取一条训练数据，一条验证数据
            category, text, text_tensor, category_tensor = self.randomTrainingExamples(self.rnn_train_data[:9000])
            category_valid, text_valid, text_tensor_valid , category_tensor_valid= self.randomTrainingExamples(self.rnn_train_data[9000:])

            #训练验证
            train_output, train_loss = self.train(category_tensor, text_tensor)
            valid_output, valid_loss = self.valid(category_tensor_valid, text_tensor_valid)

            #累计损失， 准确率计算
            train_current_loss += train_loss
            train_current_acc += (train_output.argmax(1) == category_tensor).sum().item()

            valid_current_loss += valid_loss
            valid_current_acc += (valid_output.argmax(1) == category_tensor_valid).sum().item()

            #判断迭代1000次，打印输出
            if iter % Config.plot_entry == 0:
                train_average_loss = train_current_loss / Config.plot_entry
                train_average_acc = train_current_acc / Config.plot_entry
                valid_average_loss = valid_current_loss / Config.plot_entry
                valid_average_acc = valid_current_acc / Config.plot_entry

                #打印迭代1000次 耗时、训练损失、准确率、验证损失、准确率
                print("iter: ", iter, "|", "TimeSince: ", self.timeSince(start))
                print("Train Loss: ", train_average_loss, "|", "Train acc: ", train_average_acc)
                print("Valid Loss: ", valid_average_loss, "|", "Valid acc: ", valid_average_acc)

                #保存结果到列表中，方便绘图
                all_train_losses.append(train_average_loss)
                all_train_acc.append(train_average_acc)
                all_valid_losses.append(valid_average_loss)
                all_valid_acc.append(valid_average_acc)

                #把中间结果归零
                train_current_loss = 0
                train_current_acc = 0
                valid_current_loss = 0
                valid_current_acc = 0

        torch.save(self.rnn.state_dict(), self.config.rnn_model_save_path)

    def train(self, category_tensor, line_tensor):
        """
        模型训练函数
        :param category_tensor: 标签
        :param line_tensor: 文本对应的编码
        :return: 结果和损失的值
        """
        # 梯度归0
        self.rnn.zero_grad()
        # # 循环取出每个字对应的张量
        for i in range(line_tensor.size()[1]):
            output, hidden = self.rnn(line_tensor[0][i].unsqueeze(0), self.hidden)
        # 计算损失值
        criterion = nn.NLLLoss()
        loss = criterion(output, category_tensor)
        # 反向传播
        loss.backward()
        # 更新参数
        optimizer = SGD(self.rnn.parameters(), lr=Config.LEARNING_RATE)
        optimizer.step()
        # for p in self.rnn.parameters():
        #     p.data.add_(-Config.LEARNING_RATE, p.grad.data)
        return output, loss.item()

    def valid(self, category_tensor, line_tensor):
        """
        模型验证函数
        :param category_tensor代表类别张量
        :param line_tensor代表编码后的文本张量
        :return: 结果和损失的值
        """
        # 验证模型不自动求解梯度
        with torch.no_grad():
            # 遍历line_tensor中的每一个字的张量表示
            for i in range(line_tensor.size()[1]):
                # 然后将其输入到rnn模型中, 因为模型要求是输入必须是二维张量, 因此需要拓展一个维度, 循环调用rnn直到最后一个字
                output, hidden = self.rnn(line_tensor[0][i].unsqueeze(0), self.hidden)
            # 计算损失值
            criterion = nn.NLLLoss()
            loss = criterion(output, category_tensor)
         # 返回结果和损失的值
        return output, loss.item()

    def timeSince(self, since):
        """
        获得每次打印的训练耗时, since是训练开始时间
        :param since: 训练耗时
        :return: 指定格式的耗时
        """
        # 获得当前时间
        now = time.time()
        # 获得时间差，就是训练耗时
        s = now - since
        # 将秒转化为分钟, 并取整
        m = math.floor(s / 60)
        # 计算剩下不够凑成1分钟的秒数
        s -= m * 60
        # 返回指定格式的耗时
        return '%dm %ds' % (m, s)