import torch
import os
# 时间
from datetime import datetime
from torch.optim import AdamW

# 配置定义GPT2模型
import transformers
from transformers import GPT2LMHeadModel, GPT2Config

from parameter_config import TrainConfig
from data_preprocess.dataloader import get_dataloader
# 导入数据：dataloader
from model_evaluation import ModelEvaluation
from tokenizer import Tokenizer


class TrainGPT2:

    def __init__(self):
        # 初始化配置参数
        self.config = TrainConfig()
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        # 如果你的电脑有大于1张的显卡，可以选择使用
        # os.environ["CUDA_VISIBLE_DEVICES"] = '0'数字0代表你的第一张显卡
        # os.environ["CUDA_VISIBLE_DEVICES"] = '1'数字1代表你的第二张显卡
        # os.environ["CUDA_VISIBLE_DEVICES"] ='0, 1'代表同时利用0和1两张显卡

        self.model_evaluation = ModelEvaluation()

        self.save_model_path = self.build_save_model_path()
        # 训练数据 和 验证数据
        self.train_dataloader, self.validate_dataloader = get_dataloader(self.config.train_path,
                                                                         self.config.valid_path)

        self.tokenizer = Tokenizer(self.config.tokenizer_path).get_tokenizer()
        self.model = None
        self.optimizer = None
        self.scheduler = None

    def build_save_model_path(self):
        """
        构建模型保存路径
        :return:
        """
        save_model_path = self.config.save_model_path
        # 如果没有创建会自动的创建输出目录
        if not os.path.exists(save_model_path):
            os.makedirs(save_model_path, exist_ok=True)
        return save_model_path

    def loading_model(self):
        """
        加载模型参数
        :return:
        """
        if self.config.pretrained_model:  # 加载预训练模型
            self.model = GPT2LMHeadModel.from_pretrained(self.config.pretrained_model)
        else:  # 初始化模型
            model_config = GPT2Config.from_json_file(self.config.model_config)
            print(model_config)
            self.model = GPT2LMHeadModel(config=model_config)
        self.model = self.model.to(self.device)
        # print(f'model.config.vocab_size-->{model.config.vocab_size}')
        # print(f'tokenizer.vocab_size-->{tokenizer.vocab_size}')
        # assert这里相当于确认：
        assert self.model.config.vocab_size == self.tokenizer.vocab_size

    def calculate_model_parameters(self):
        """
        计算模型参数数量
        :return:
        """
        num_parameters = 0
        parameters = self.model.parameters()
        for parameter in parameters:
            num_parameters += parameter.numel()
        print(f'模型参数总量---》{num_parameters}')

    def get_optimizer(self):
        """
        获取优化器
        :return:
        """
        optimizer = AdamW(self.model.parameters(), lr=self.config.lr, eps=self.config.eps)
        return optimizer

    def get_scheduler(self):
        """
        获取调度器
        :return:
        """
        """
        在初始阶段将学习率从较小的值逐步增加到设定的初始值，然后按照设定的学习率调整策略进行训练。
        学习率预热的目的是让模型在初始阶段更快地适应数据，避免训练过程中因为学习率过大导致的梯度爆炸等问题，
        从而提高模型的训练效果和泛化性能。
        optimizer： 优化器
        num_warmup_steps：初始预热步数
        num_training_steps：整个训练过程的总步数
        参数的解析如下：
        optimizer：这个参数需要传入一个优化器对象（optimizer object）。它代表在训练过程中用于更新模型参数的优化器，比如Adam或SGD等。
        num_warmup_steps：这个参数确定学习率在开始阶段从0线性增加到初始值的步数。
        在Transformer模型中，通过逐渐增加学习率来稳定和加速训练过程是常见的做法。通常，这个值是总训练步数的一小部分。
        num_training_steps：这个参数指定了总的训练步数或迭代次数。它表示优化器将在给定数据集上进行多少次参数更新。
        """
        # 训练一次完整的数据，需要迭代多少步7544
        train_length = len(self.train_dataloader)

        # t_total模型训练完毕，一共要迭代多少步
        t_total = train_length // self.config.gradient_accumulation_steps * self.config.epochs
        # eps，为了增加数值计算的稳定性而加到分母里的项，其为了防止在实现中除以零
        scheduler = transformers.get_linear_schedule_with_warmup(self.optimizer,
                                                                 num_warmup_steps=self.config.warmup_steps,
                                                                 num_training_steps=t_total
                                                                 )
        return scheduler

    def train_batch(self, epoch, batch_index, inputs, labels_ids):
        """
        批次训练
        :param epoch: 训练伦次
        :param batch_index: 训练批次
        :param inputs:
        :param labels_ids:
        :return:
        """
        # 对于ignore_index的label token不计算梯度
        ignore_index = self.config.ignore_index
        inputs = inputs.to(self.device)
        labels_ids = labels_ids.to(self.device)
        # print(f'inputs-->{inputs.shape}')
        # print(f'labels_ids-->{labels_ids.shape}')
        # 如果对模型输入不仅包含input还包含标签，那么得到结果直接就有loss值
        outputs = self.model.forward(inputs, labels=labels_ids)
        """
        返回一个对象（如SequenceClassifierOutput），包含以下关键属性：
        loss：模型预测与标签的交叉熵损失（仅当提供labels时存在）。
        logits：模型的原始输出（未归一化的预测值，形状为[batch_size, num_classes]）"""
        # print(f'outputs-->{outputs}')
        print(f'outputs_keys-->{outputs.keys()}')
        # print(f'outputs.logits-->{outputs.logits.shape}')
        # print(f'outputs.loss-->{outputs.loss}')
        # 如果对模型的输入只有input，那么模型的结果不会含有loss值，此时，可以自定义函数来计算损失
        # outputs1 = self.model.forward(input_ids)
        # print(f'outputs1.logits-->{outputs1.logits.shape}')
        # print(f'outputs1.loss-->{outputs1.loss}')
        logits = outputs.logits
        loss = outputs.loss
        loss_mean = loss.mean()
        # 统计该batch的预测token的正确数与总数
        batch_correct_num, batch_total_num = self.model_evaluation.calculate_acc(logits, labels_ids,
                                                                                 ignore_index=ignore_index)

        # 计算该batch的accuracy
        batch_acc = batch_correct_num / batch_total_num
        loss_item = loss_mean.item()
        # 当 batch_size 较小时，可通过累积多个batch的梯度，再更新参数，模拟更大的batch_size
        if self.config.gradient_accumulation_steps > 1:
            loss_mean = loss_mean / self.config.gradient_accumulation_steps

        # 反向传播
        loss_mean.backward()

        # 梯度裁剪 避免梯度爆炸的方式。梯度乘以缩放系数。
        torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.max_grad_norm)
        # 进行一定step的梯度累计之后，更新参数
        if batch_index % self.config.gradient_accumulation_steps == 0:
            # 更新参数
            self.optimizer.step()
            # 更新学习率
            self.scheduler.step()
            # 清空梯度信息
            self.optimizer.zero_grad()

        lr = self.scheduler.get_lr()
        if batch_index % self.config.loss_step == 0:
            print(f"epoch:{epoch}, batch:{batch_index},loss:{loss_item}, batch_acc:{batch_acc}, lr:{lr}")

        del inputs, outputs
        return batch_correct_num, batch_total_num, loss_item

    def train_epoch(self, epoch):
        """
        训练轮次
        :param epoch: 当前的轮次
        :return:
        """
        print("start training")
        # 1.指明模型训练
        self.model.train()

        epoch_start_time = datetime.now()

        # 记录下整个epoch的loss的总和
        total_loss = 0

        # epoch_correct_num: 每个epoch中,output预测正确的word的数量
        epoch_correct_num = 0

        # epoch_total_num: 每个epoch中,output预测的word的总数量
        epoch_total_num = 0

        for batch_index, (inputs_ids, label_ids) in enumerate(self.train_dataloader):
            batch_correct_num, batch_total_num, loss_item = self.train_batch(epoch, batch_index + 1, inputs_ids,
                                                                             label_ids)
            epoch_correct_num += batch_correct_num
            epoch_total_num += batch_total_num
            total_loss += loss_item

        # 记录当前epoch的平均loss与accuracy
        epoch_mean_loss = total_loss / len(self.train_dataloader)
        epoch_mean_acc = epoch_correct_num / epoch_total_num
        print(f"epoch：{epoch}: loss：{epoch_mean_loss}, predict_acc：{epoch_mean_acc}")

        # save model
        save_model_epochs = self.config.save_model_epochs
        if epoch % save_model_epochs == 0 or epoch == self.config.epochs:
            print(f"saving model for epoch {epoch}")
            model_path = os.path.join(self.save_model_path, f"bj_epoch{epoch}")
            if not os.path.exists(model_path):
                os.makedirs(model_path, exist_ok=True)
            # 保存预训练模型的方式
            self.model.save_pretrained(model_path)
            print(f"epoch {epoch} finished")
            epoch_finish_time = datetime.now()
            used_time = (epoch_finish_time - epoch_start_time).total_seconds()
            print(f"time for one epoch: {used_time}")
        return epoch_mean_loss

    def training(self):
        self.optimizer = self.get_optimizer()
        self.scheduler = self.get_scheduler()

        # 用于记录每个epoch训练和验证的loss
        train_losses, validate_losses = [], []
        # 记录验证集的最小loss
        best_val_loss = 10000
        # 开始训练
        for epoch in range(self.config.epochs):
            # ========== train ========== #
            train_loss = self.train_epoch(epoch + 1)
            train_losses.append(train_loss)
            # # ========== validate ========== #
            validate_loss = self.validate_epoch(epoch + 1)
            validate_losses.append(validate_loss)

            # 保存当前困惑度最低的模型，困惑度低，模型的生成效果不一定会越好
            if validate_loss < best_val_loss:
                best_val_loss = validate_loss
                print('saving current best model for epoch {}'.format(epoch + 1))
                model_path = os.path.join(self.save_model_path, 'min_ppl_model_bj'.format(epoch + 1))
                if not os.path.exists(model_path):
                    os.mkdir(model_path)
                self.model.save_pretrained(model_path)

    def validate_epoch(self, epoch):
        """
        轮次验证
        :param epoch:
        :return:
        """
        print("start validating")
        self.model.eval()
        epoch_start_time = datetime.now()
        total_loss = 0
        # 捕获cuda out of memory exception
        with torch.no_grad():
            for batch_index, (input_ids, labels) in enumerate(self.validate_dataloader):
                input_ids = input_ids.to(self.device)
                labels = labels.to(self.device)
                outputs = self.model.forward(input_ids, labels=labels)
                logits = outputs.logits
                loss = outputs.loss
                loss = loss.mean()
                total_loss += loss.item()
                del input_ids, outputs

            # 记录当前epoch的平均loss
            epoch_mean_loss = total_loss / len(self.validate_dataloader)
            print(f"validate epoch {epoch}: loss:{epoch_mean_loss}")
            epoch_finish_time = datetime.now()
            used_time = (epoch_finish_time - epoch_start_time).total_seconds()
            print(f"time for validating one epoch: {used_time}")
            return epoch_mean_loss

    def running(self):
        """
        运行
        :return:
        """
        self.loading_model()
        self.training()


def main():
    train_gpt2 = TrainGPT2()
    train_gpt2.running()


if __name__ == '__main__':
    main()
