import os
import time
from transformers import AutoModelForMaskedLM, get_scheduler
import sys
sys.path.append('D:\Project\AIStudent\LLMProject\BERT_Fintuning\P-tuning\data_handle')
sys.path.append('D:\Project\AIStudent\LLMProject\BERT_Fintuning\P-tuning\\utils')
from utils.metirc_utils import *
from utils.common_utils import *
from data_handle.data_loader import *
from utils.verbalizer import Verbalizer
from torch.optim import AdamW
import torch.nn as nn
from tqdm import tqdm
from p_tuning_config import *
pt = p_tuning_config()


def evaluate_model(model, metric, dev_dataloader, tokenizer, verbalizer):
    model.eval()
    metric.reset()

    with torch.no_grad():
        for batch in dev_dataloader:
            logits = model(input_ids=batch['input_ids'].to(device=pt.device),
                           attention_mask=batch['attention_mask'].to(device=pt.device),
                           token_type_ids=batch['token_type_ids'].to(device=pt.device)).logits

            mask_labels = batch['mask_labels'].numpy().tolist()

            for i in range(len(mask_labels)):
                while tokenizer.pad_token_id in mask_labels[i]:
                    mask_labels[i].remove(tokenizer.pad_token_id)

            mask_labels = [''.join(tokenizer.convert_ids_to_tokens(t)) for t in mask_labels]

            predictions = convert_logits_to_ids(logits,
                                                batch['mask_positions']).cpu().numpy().tolist()

            predictions = verbalizer.batch_find_main_label(predictions)
            predictions = [ele['label'] for ele in predictions]
            metric.add_batch(predictions, mask_labels)

    eval_metric = metric.compute()
    model.train()
    return eval_metric['accuracy'], eval_metric['precision'], eval_metric['recall'], eval_metric['f1'], eval_metric['class_metrics']

def model2train():
    model = AutoModelForMaskedLM.from_pretrained(pt.pre_model)
    tokenizer = AutoTokenizer.from_pretrained(pt.pre_model)
    verbalizer = Verbalizer(verbalizer_file=pt.verbalizer,
                            tokenizer=tokenizer,
                            max_label_len=pt.max_label_len)
    no_decay = ['bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
         "weight_decay": 0.01
         },
        {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
         "weight_decay": 0.0
         }]
    optimizer = AdamW(optimizer_grouped_parameters, lr=pt.learning_rate)
    model.to(pt.device)
    train_dataloader, dev_dataloader = get_data()
    num_update_steps_per_epoch = len(train_dataloader)
    max_train_steps = pt.epochs * num_update_steps_per_epoch
    warm_steps = int(pt.warmup_ratio * max_train_steps)
    lr_scheduler = get_scheduler(
        name='linear',
        optimizer=optimizer,
        num_warmup_steps=warm_steps,
        num_training_steps=max_train_steps
    )
    loss_list = []
    tic_train = time.time()
    metric = ClassEvaluator()
    criterion = nn.CrossEntropyLoss()
    global_step, best_f1 = 0, 0
    print('开始训练')
    for epoch in range(pt.epochs):
        for batch in tqdm(train_dataloader):
            if 'token_type_ids' in batch:
                logits = model(input_ids=batch['input_ids'].to(device=pt.device),
                               attention_mask=batch['attention_mask'].to(device=pt.device),
                               token_type_ids=batch['token_type_ids'].to(device=pt.device)).logits
            else:
                logits = model(input_ids=batch['input_ids'].to(device=pt.device),
                               attention_mask=batch['attention_mask'].to(device=pt.device)).logits

            mask_labels = batch['mask_label'].numpy().tolist()
            sub_labels = verbalizer.batch_find_sub_labels(mask_labels)
            sub_labels = [ele['token_ids'] for ele in sub_labels]

            loss = mlm_loss(logits,
                            batch['mask_positions'].to(device=pt.device),
                            sub_labels,
                            criterion,
                            pt.device)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            lr_scheduler.step()
            loss_list.append(float(loss.cpu().detach()))

            global_step += 1
            if global_step % pt.logging_step == 0:
                time_diff = time.time() - tic_train
                loss_avg = sum(loss_list) / len(loss_list)
                print(f'global_step: {global_step}, speed:{pt.logging_step / time_diff}, loss: {loss_avg}, epoch: {epoch}')
                tic_train = time.time()

            if global_step % pt.valid_steps == 0:
                cur_save_dir = os.path.join(pt.save_dir, "model_%d" % global_step)
                if not os.path.exists(cur_save_dir):
                    os.makedirs(cur_save_dir)
                model.save_pretrained(os.path.join(cur_save_dir))

                tokenizer.save_pretrained(os.path.join(cur_save_dir))
                acc, precision, recall, f1, class_metrics = evaluate_model(model,
                                                                           metric,
                                                                           dev_dataloader,
                                                                           tokenizer,
                                                                           verbalizer)
                print(f'Evaluate_precision:{precision}, recall:{recall}, f1:{f1}')

                if f1 > best_f1:
                    print(f'best F1 {best_f1} --> {f1}')
                    print(f'each class metrics are:{class_metrics}')
                    best_f1 = f1
                    cur_save_dir = os.path.join(pt.save_dir, "model_best")

                    if not os.path.exists(cur_save_dir):
                        os.makedirs(cur_save_dir)
                    model.save_pretrained(os.path.join(cur_save_dir))
                    tokenizer.save_pretrained(os.path.join(cur_save_dir))
                tic_train = time.time()
    print("训练结束")
if __name__ == '__main__':
    model2train()
