import torch
import time
import os

from tqdm import tqdm
from transformers import AutoModelForMaskedLM, AutoTokenizer, get_scheduler
from PET.pet_config import ProjectConfig
from PET.data_handle.data_dataloader import get_data
from PET.utils.common_utils import mlm_loss, convert_logits_to_ids
from PET.utils.verbalizer import Verbalizer
from PET.utils.metric_utils import ClassEvaluator


def evaluate_model(pc, model, metric, data_loader, tokenizer, verbalizer):
    """
    在测试集上评估当前模型的训练效果。
    Args:
        model: 当前模型
        metric: 评估指标类(metric)
        data_loader: 测试集的data_loader
        global_step: 当前训练步数
    """
    model.eval()
    metric.reset()

    with torch.no_grad():
        for batch in data_loader:
            # print('batch.keys():', batch.keys())
            # print("batch['input_ids'].shape:", batch['input_ids'].shape)  # torch.Size([8, 512])
            # print("batch['token_type_ids'].shape:", batch['token_type_ids'].shape)  # torch.Size([8, 512])
            # print("batch['attention_mask'].shape:", batch['attention_mask'].shape)  # torch.Size([8, 512])
            # print("batch['mask_positions'].shape:", batch['mask_positions'].shape)  # torch.Size([8, 2])
            # print("batch['mask_labels'].shape:", batch['mask_labels'].shape) # torch.Size([8, 2])
            # 将数据送入模型
            logits = model(input_ids=batch['input_ids'].to(pc.device),
                           token_type_ids=batch['token_type_ids'].to(pc.device),
                           attention_mask=batch['attention_mask'].to(pc.device)).logits
            # print('logits.shape:', logits.shape)  # torch.Size([8, 512, 21128])

            mask_labels = batch['mask_labels'].numpy().tolist()
            # print('mask_labels1:', mask_labels)
            # [[3717, 3362], [2398, 3352], [2797, 3322], [3819, 3861], [3717, 3362], [3717, 3362], [6983, 2421], [3717, 3362]]
            # 去掉label中的[PAD] token
            for i in range(len(mask_labels)):
                while tokenizer.pad_token_id in mask_labels[i]:
                    mask_labels[i].remove(tokenizer.pad_token_id)

            # token转文字
            mask_labels = [''.join(tokenizer.convert_ids_to_tokens(t)) for t in mask_labels]
            # print('mask_labels2:', mask_labels)
            # ['洗浴', '电脑', '衣服', '水果', '电脑', '书籍', '酒店', '衣服']

            # 从logits找出mask_positions位置的token
            predictions = convert_logits_to_ids(logits, batch['mask_positions'])
            # print('predictions:', predictions)
            predictions = predictions.cpu().numpy().tolist()
            # print('predictions1:', predictions)
            # [[5381, 1769], [6983, 4638], [6983, 2421], [3180, 4638], [5381, 1351], [4500, 4500], [4500, 4638], [4500, 1501]]
            # 找到模型预测的子标签对应的主标签
            predictions = verbalizer.batch_find_main_label(predictions, hard_mapping=True)
            # print('predictions2:', predictions)
            # 只取主标签的文本
            predictions = [e['label'] for e in predictions]
            # print('predictions3:', predictions)
            # ['平板', '衣服', '电脑', '平板', '电器', '手机', '电脑', '衣服']

            metric.add_batch(predictions, mask_labels)

        results = metric.compute()
        # print('results:',results)
        model.train()  # 将模型恢复训练状态
        """
        {'accuracy': 0.25, 
         'precision': 0.3,
         'recall': 0.25,
         'f1': 0.26,
         'class_metrics': {
          '书籍': {'precision': 0.36, 'recall': 0.59, 'f1': 0.45},
          '平板': {'precision': 0.25, 'recall': 0.14, 'f1': 0.18},
          '手机': {'precision': 0.07, 'recall': 0.24, 'f1': 0.11}, 
          '水果': {'precision': 0.46, 'recall': 0.35, 'f1': 0.4}, 
          '洗浴': {'precision': 0.17, 'recall': 0.09, 'f1': 0.12}, 
          '电器': {'precision': 0.0, 'recall': 0.0, 'f1': 0.0}, 
          '电脑': {'precision': 0.08, 'recall': 0.12, 'f1': 0.1}, 
          '蒙牛': {'precision': 0.12, 'recall': 0.32, 'f1': 0.17}, 
          '衣服': {'precision': 0.32, 'recall': 0.15, 'f1': 0.2}, 
          '酒店': {'precision': 0.42, 'recall': 0.41, 'f1': 0.42}}}
        """
        return results


def model_train(pc):
    # 加载预训练模型
    model = AutoModelForMaskedLM.from_pretrained(pc.pre_model)
    model = model.to(pc.device)

    # 加载分词器
    tokenizer = AutoTokenizer.from_pretrained(pc.pre_model)
    # 标签词映射
    verbalizer = Verbalizer(verbalizer_file=pc.verbalizer,
                            tokenizer=tokenizer,
                            max_label_len=pc.max_label_len)

    # 加载数据
    train_dataloader, dev_dataloader = get_data(pc)

    # 不需要权重衰减的元素：偏置和规范化层(分布改变成正态分布)的权重。衰减类似与正则化。
    # 模型简单的话也可以不衰减
    no_decay = ["bias", "LayerNorm.weight"]
    optimizer_grouped_parameters = [
        {
            "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
            "weight_decay": pc.weight_decay,
        },
        {
            "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
            "weight_decay": 0.0,
        },
    ]
    # 优化器
    # optimizer = torch.optim.AdamW(model.parameters(), lr=pc.learning_rate)
    optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=pc.learning_rate)

    # 学习率预热
    # 根据训练轮数计算最大训练步数，以便于scheduler动态调整lr
    num_update_steps_per_epoch = len(train_dataloader)
    # 指定总的训练步数，它会被学习率调度器用来确定学习率的变化规律，确保学习率在整个训练过程中得以合理地调节
    max_train_steps = pc.epochs * num_update_steps_per_epoch
    warm_steps = int(pc.warmup_ratio * max_train_steps)  # 预热阶段的训练步数
    lr_scheduler = get_scheduler(
        name='linear',
        optimizer=optimizer,
        num_warmup_steps=warm_steps,
        num_training_steps=max_train_steps,
    )

    # 损失函数
    criterion = torch.nn.CrossEntropyLoss()

    # 实例化指标评估类
    metric = ClassEvaluator()

    loss_list = []
    global_step = 0  # 统计总共的批次数
    start_train = time.time()
    best_f1 = 0

    # 遍历轮次
    for epoch in range(pc.epochs):
        # 遍历批次
        for batch in tqdm(train_dataloader):
            # print(batch)
            # print('batch.keys():', batch.keys())
            # print("batch['input_ids'].shape:", batch['input_ids'].shape)  # torch.Size([8, 512])
            # print("batch['token_type_ids'].shape:", batch['token_type_ids'].shape)  # torch.Size([8, 512])
            # print("batch['attention_mask'].shape:", batch['attention_mask'].shape)  # torch.Size([8, 512])
            # print("batch['mask_positions'].shape:", batch['mask_positions'].shape)  # torch.Size([8, 2])
            # print("batch['mask_labels'].shape:", batch['mask_labels'].shape) # torch.Size([8, 2])
            # 将数据送入模型
            logits = model(input_ids=batch['input_ids'].to(pc.device),
                           token_type_ids=batch['token_type_ids'].to(pc.device),
                           attention_mask=batch['attention_mask'].to(pc.device)).logits
            # print('logits.shape:', logits.shape)  # torch.Size([8, 512, 21128])

            # 计算损失：预测结果和真实标签
            mask_labels = batch['mask_labels'].numpy().tolist()
            # print('mask_labels:', mask_labels)
            # [[6983, 2421], [3717, 3362], [2398, 3352], [6132, 3302], [2398, 3352], [6983, 2421], [6983, 2421], [3717, 3362]]
            sub_labels = verbalizer.batch_find_sub_labels(mask_labels)
            # print(sub_labels)
            sub_labels = [e['token_ids'] for e in sub_labels]
            # print(sub_labels)
            loss = mlm_loss(logits, batch['mask_positions'], sub_labels, criterion, pc.device)
            # print('loss:', loss)

            # 梯度清零、反向传播、更新参数、更新学习率
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            lr_scheduler.step()

            # 记录损失
            loss_list.append(round(float(loss.item()), 6))
            global_step += 1

            # 日志打印
            if global_step % pc.logging_steps == 0:
                time_diff = time.time() - start_train
                loss_avg = sum(loss_list) / len(loss_list)
                print("--train-- global step %d, epoch: %d, loss: %.5f, speed: %.2f step/s" % (
                    global_step, epoch, loss_avg, pc.logging_steps / time_diff))
                start_train = time.time()

            # 模型评估
            if global_step % pc.valid_steps == 0:
                cur_save_dir = os.path.join(pc.save_dir, 'model_%d' % global_step)
                # print('cur_save_dir:', cur_save_dir) # ./checkpoints/model_10
                if not os.path.exists(cur_save_dir):
                    os.makedirs(cur_save_dir)
                model.save_pretrained(os.path.join(cur_save_dir))
                tokenizer.save_pretrained(os.path.join(cur_save_dir))

                # 计算评估指标
                results = evaluate_model(pc, model, metric, dev_dataloader, tokenizer, verbalizer)
                accuracy = results['accuracy']
                precision = results['precision']
                recall = results['recall']
                f1 = results['f1']
                class_metrics = results['class_metrics']
                print("--val-- Evaluation accuracy: %.5f, precision: %.5f, recall: %.5f, F1: %.5f" % (
                    accuracy, precision, recall, f1))

                if f1 > best_f1:
                    print(f"--model save-- best F1 performance has been updated: {best_f1:.5f} -> {f1:.5f}")
                    print(f'--model save-- Each Class Metrics are: {class_metrics}')
                    best_f1 = f1
                    cur_save_dir = os.path.join(pc.save_dir, 'model_best')
                    # print('cur_save_dir:', cur_save_dir) # ./checkpoints/model_best
                    if not os.path.exists(cur_save_dir):
                        os.makedirs(cur_save_dir)
                    model.save_pretrained(os.path.join(cur_save_dir))
                    tokenizer.save_pretrained(os.path.join(cur_save_dir))
                start_train = time.time()


if __name__ == '__main__':
    # 加载配置参数
    pc = ProjectConfig()
    # 模型训练
    model_train(pc)
