import torch
import os
from torch.nn import CrossEntropyLoss
from torch.optim import AdamW
from transformers import AutoModelForMaskedLM, get_scheduler
from pet.data_handle.data_manager import DataManager
from pet.parameter_config import TrainConfig
from utils.common_utils import mlm_loss, convert_logits_to_ids
from utils.label_verbalize import LabelVerbalize
from utils.model_evaluation import ModelEvaluation
from pet.model_tokenizer import ModelTokenizer


class TrainBertPet:

    def __init__(self):
        # 初始化配置参数
        self.config = TrainConfig()
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.model_evaluation = ModelEvaluation()
        self.save_model_path = self.build_save_model_path()

        self.loss_function = CrossEntropyLoss()

        self.tokenizer = None
        self.data_manager = None
        self.model = None
        self.optimizer = None
        self.scheduler = None
        # 训练数据 和 验证数据
        self.train_dataloader = None
        self.validate_dataloader = None

        self.label_verbalize = None

        self.best_f1 = 0
        self.step_number = 0
        self.loss_list = []

    def build_save_model_path(self):
        """
        构建模型保存路径
        :return:
        """
        save_model_path = self.config.save_model_path
        # 如果没有创建会自动的创建输出目录
        if not os.path.exists(save_model_path):
            os.makedirs(save_model_path, exist_ok=True)
        return save_model_path

    def load_model_tokenizer(self):
        """
        加载模型参数 和 tokenizer
        :return:
        """
        # 加载预训练模型
        self.model = AutoModelForMaskedLM.from_pretrained(self.config.pretrained_model)
        self.model.to(self.device)
        # print(f'model.config.vocab_size-->{model.config.vocab_size}')
        # print(f'tokenizer.vocab_size-->{tokenizer.vocab_size}')

        self.tokenizer = ModelTokenizer(self.config.tokenizer_path).get_tokenizer()
        # assert这里相当于确认：
        assert self.model.config.vocab_size == self.tokenizer.vocab_size

        self.label_verbalize = LabelVerbalize(
            tokenizer=self.tokenizer,
            max_label_len=self.config.max_label_len
        )

    def get_optimizer(self):
        """
        获取优化器
        :return:
        """
        no_decay = ["bias", "LayerNorm.weight"]
        optimizer_grouped_parameters = [
            {
                "params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
                "weight_decay": self.config.weight_decay,
            },
            {
                "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
                "weight_decay": 0.0,
            },
        ]
        self.optimizer = AdamW(optimizer_grouped_parameters, lr=self.config.learning_rate, eps=self.config.eps)

    def get_scheduler(self):
        """
        获取调度器
        :return:
        """
        num_update_steps_per_epoch = len(self.train_dataloader)
        # 指定总的训练步数，它会被学习率调度器用来确定学习率的变化规律，确保学习率在整个训练过程中得以合理地调节
        max_train_steps = self.config.epochs * num_update_steps_per_epoch
        warm_steps = int(self.config.warmup_ratio * max_train_steps)  # 预热阶段的训练步数
        self.scheduler = get_scheduler(
            name='linear',
            optimizer=self.optimizer,
            num_warmup_steps=warm_steps,
            num_training_steps=max_train_steps,
        )

    def load_datasets(self):
        """
        加载数据集
        :return:
        """
        self.data_manager = DataManager(self.config, self.tokenizer)
        self.train_dataloader, self.validate_dataloader = self.data_manager.get_data_loder()

    def calculate_model_parameters(self):
        """
        计算模型参数数量
        :return:
        """
        num_parameters = 0
        parameters = self.model.parameters()
        for parameter in parameters:
            num_parameters += parameter.numel()
        print(f'模型参数总量---》{num_parameters}')

    def train_batch(self, batch_info):
        """
        批次训练
        :param batch_info: 批次信息
        :return:
        """
        if 'token_type_ids' in batch_info:
            logits = self.model(input_ids=batch_info['input_ids'].to(self.device),
                                token_type_ids=batch_info['token_type_ids'].to(self.device),
                                attention_mask=batch_info['attention_mask'].to(self.device)).logits
        else:
            # 兼容不需要 token_type_id 的模型, e.g. Roberta-Base
            logits = self.model(input_ids=batch_info['input_ids'].to(self.device),
                                attention_mask=batch_info['attention_mask'].to(self.device)).logits

        # 真实标签
        mask_labels = batch_info['mask_labels'].numpy().tolist()
        sub_labels = self.label_verbalize.batch_find_sub_labels(mask_labels)
        sub_labels_id = [sub_label['token_ids'] for sub_label in sub_labels]

        loss = mlm_loss(logits,
                        batch_info['mask_positions'].to(self.device),
                        sub_labels_id,
                        self.loss_function,
                        self.device,
                        )
        # print(f'计算损失值--》{loss}')
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        self.scheduler.step()
        loss_batch = float(loss.cpu().detach())
        return loss_batch

    def train_epoch(self, epoch):
        """
        训练轮次
        :param epoch: 当前的轮次
        :return:
        """
        print("start training")
        for batch_index, batch_info in enumerate(self.train_dataloader):
            loss_item = self.train_batch(batch_info)
            self.loss_list.append(loss_item)
            self.step_number += 1
            loss_avg = sum(self.loss_list) / len(self.loss_list)
            self._info_print(epoch, loss_avg)
            if self.step_number % self.config.save_model_steps == 0:
                self.model_save(f"model_pet_{self.step_number}")
            acc, precision, recall, f1, class_metrics = self.evaluate_model(epoch)
            print(f"Evaluation precision: {precision}, recall:{recall:.5f}, F1: {f1:.5f}")
            if f1 > self.best_f1:
                print(f"best F1 performence has been updated: {self.best_f1:.5f} --> {f1:.5f}")
                print(f'Each Class Metrics are: {class_metrics}')
                self.best_f1 = f1
                self.model_save("model_best_pet")

    def training(self):
        # 开始训练
        for epoch in range(self.config.epochs):
            self.train_epoch(epoch + 1)

    def evaluate_model(self, epoch):
        """
        轮次验证
        :param epoch:
        :return:
        """
        print("start validating")
        self.model.eval()
        self.model_evaluation.reset()

        with torch.no_grad():
            for step, batch in enumerate(self.validate_dataloader):
                if 'token_type_ids' in batch:
                    logits = self.model(input_ids=batch['input_ids'].to(self.device),
                                        attention_mask=batch['attention_mask'].to(self.device),
                                        token_type_ids=batch['token_type_ids'].to(self.device)).logits
                else:
                    logits = self.model(input_ids=batch['input_ids'].to(self.device),
                                        attention_mask=batch['attention_mask'].to(self.device)).logits
                # (batch, label_num)
                mask_labels = batch['mask_labels'].numpy().tolist()

                for i in range(len(mask_labels)):  # 去掉label中的[PAD] token
                    while self.tokenizer.pad_token_id in mask_labels[i]:
                        mask_labels[i].remove(self.tokenizer.pad_token_id)
                # print(f'mask_labels-1-->{mask_labels}')

                mask_labels = [''.join(self.tokenizer.convert_ids_to_tokens(t)) for t in mask_labels]  # id转文字
                # print(f'真实的结果主标签：mask_labels_str-->{mask_labels}')

                # (batch, label_num)
                predictions = convert_logits_to_ids(logits, batch["mask_positions"]).cpu().numpy().tolist()

                # print(f'模型预测的子标签的结果--》{predictions}')
                predictions = self.label_verbalize.batch_find_super_label(predictions)  # 找到子label属于的主label
                # print(f"找到模型预测的子标签对应的主标签的结果--》{predictions}')")

                predictions = [ele['label'] for ele in predictions]
                # print(f"只获得预测的主标签的结果string--》{predictions}')")

                self.model_evaluation.add_batch(pred_batch=predictions, gold_batch=mask_labels)
        eval_metric = self.model_evaluation.compute()
        self.model.train()

        return eval_metric['accuracy'], eval_metric['precision'], eval_metric['recall'], eval_metric['f1'], eval_metric[
            'class_metrics']

    def _info_print(self, epoch, loss_avg):
        if self.step_number % self.config.loss_print_step == 0:
            print(f"global step {self.step_number}, epoch:{epoch}, loss:{loss_avg}")

    def model_save(self, save_model_dir):
        cur_save_dir = os.path.join(self.config.save_model_path, save_model_dir)
        if not os.path.exists(cur_save_dir):
            os.makedirs(cur_save_dir)
        self.model.save_pretrained(cur_save_dir)
        self.tokenizer.save_pretrained(cur_save_dir)

    def running(self):
        """
        运行
        :return:
        """
        self.load_model_tokenizer()
        self.load_datasets()
        self.get_optimizer()
        self.get_scheduler()
        self.training()


def main():
    train_model = TrainBertPet()
    train_model.running()


if __name__ == '__main__':
    main()
