# coding:utf-8
import gc
import re
import os
import sys
import pickle
import random
import shutil
import warnings
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from pathlib import Path
from typing import List, Tuple, Optional
from argparse import ArgumentParser
from transformers import BertTokenizer, AdamW
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
import torch
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import Dataset, DataLoader
from torch.cuda.amp import autocast as autocast, GradScaler
from nezha.modeling.modeling import NeZhaConfig, NeZhaForMaskedLM
from loader.vocab import Vocab

from fish_tool import sys_tool, logs, BaseConfig
from fish_tool.ai import torch_tool

warnings.filterwarnings('ignore')


class DGDataset(Dataset):
    def __init__(self, data_dict: dict):
        super(Dataset, self).__init__()
        self.data_dict = data_dict

    def __getitem__(self, index: int) -> tuple:
        data = (self.data_dict['input_ids'][index],
                self.data_dict['token_type_ids'][index],
                self.data_dict['attention_mask'][index])

        return data

    def __len__(self) -> int:
        return len(self.data_dict['input_ids'])


class DGDataCollator:
    def __init__(self, max_seq_len: int, vocab, mlm_probability=0.15):
        self.max_seq_len = max_seq_len
        self.vocab = vocab
        self.mlm_probability = mlm_probability

    def pad_and_truncate(self, input_ids_list, token_type_ids_list,
                         attention_mask_list, max_seq_len):

        input_ids = torch.zeros((len(input_ids_list), max_seq_len), dtype=torch.long)
        token_type_ids = torch.zeros_like(input_ids)
        attention_mask = torch.zeros_like(input_ids)

        for i in range(len(input_ids_list)):
            seq_len = len(input_ids_list[i])

            if seq_len <= max_seq_len:
                input_ids[i, :seq_len] = torch.tensor(input_ids_list[i], dtype=torch.long)
                token_type_ids[i, :seq_len] = torch.tensor(token_type_ids_list[i], dtype=torch.long)
                attention_mask[i, :seq_len] = torch.tensor(attention_mask_list[i], dtype=torch.long)

            else:
                input_ids[i] = torch.tensor(input_ids_list[i][:max_seq_len - 1] + [self.vocab.sep_token_id],
                                            dtype=torch.long)
                token_type_ids[i] = torch.tensor(token_type_ids_list[i][:max_seq_len], dtype=torch.long)
                attention_mask[i] = torch.tensor(attention_mask_list[i][:max_seq_len], dtype=torch.long)
        return input_ids, token_type_ids, attention_mask

    def mask_tokens(self, inputs: torch.Tensor, special_tokens_mask: Optional[torch.Tensor] = None):
        """
        Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
        """
        labels = inputs.clone()
        # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
        probability_matrix = torch.full(labels.shape, self.mlm_probability)
        if special_tokens_mask is None:
            special_tokens_mask = [
                self.vocab.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
            ]
            special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
        else:
            special_tokens_mask = special_tokens_mask.bool()

        probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
        masked_indices = torch.bernoulli(probability_matrix).bool()
        labels[~masked_indices] = -100  # We only compute loss on masked tokens

        # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
        indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
        inputs[indices_replaced] = self.vocab.convert_tokens_to_ids(self.vocab.mask_token)

        # 10% of the time, we replace masked input tokens with random word
        indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
        random_words = torch.randint(len(self.vocab), labels.shape, dtype=torch.long)
        inputs[indices_random] = random_words[indices_random]

        # The rest of the time (10% of the time) we keep the masked input tokens unchanged
        return inputs, labels

    def __call__(self, examples: list) -> dict:
        input_ids_list, token_type_ids_list, attention_mask_list = list(zip(*examples))
        cur_max_seq_len = max(len(input_id) for input_id in input_ids_list)
        max_seq_len = min(cur_max_seq_len, self.max_seq_len)

        input_ids, token_type_ids, attention_mask = self.pad_and_truncate(input_ids_list,
                                                                          token_type_ids_list,
                                                                          attention_mask_list,
                                                                          max_seq_len)
        input_ids, mlm_labels = self.mask_tokens(input_ids)
        data_dict = {
            'input_ids': input_ids,
            'attention_mask': attention_mask,
            'token_type_ids': token_type_ids,
            'labels': mlm_labels
        }

        return data_dict


def load_data(tokenizer):
    with open(Config.pre_train_data_path, 'rb') as f:
        train_data = pickle.load(f)

    collate_fn = DGDataCollator(Config.max_seq_len, tokenizer)
    train_dataset = DGDataset(train_data)
    train_dataloader = DataLoader(dataset=train_dataset, batch_size=Config.batch_size, shuffle=True,
                                  num_workers=Config.num_workers, collate_fn=collate_fn)
    return train_dataloader


class WarmupLinearSchedule(LambdaLR):
    def __init__(self, optimizer, warmup_steps, t_total, last_epoch=-1):
        self.warmup_steps = warmup_steps
        self.t_total = t_total
        super(WarmupLinearSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)

    def lr_lambda(self, step):
        if step < self.warmup_steps:
            return float(step) / float(max(1, self.warmup_steps))
        return max(0.0, float(self.t_total - step) / float(max(1.0, self.t_total - self.warmup_steps)))


def build_model():
    model_config = NeZhaConfig.from_pretrained(Config.pre_model_dir)
    model = NeZhaForMaskedLM.from_pretrained(pretrained_model_name_or_path=Config.pre_model_dir,
                                             config=model_config)
    model.to(Config.device)
    return model


def build_optimizer(model, train_steps):
    no_decay = ['bias', 'LayerNorm.weight']

    param_optimizer = list(model.named_parameters())
    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
         'weight_decay_rate': Config.weight_decay},
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
         'weight_decay_rate': 0.0}
    ]

    optimizer = AdamW(optimizer_grouped_parameters, lr=Config.learning_rate, eps=Config.eps)
    scheduler = WarmupLinearSchedule(optimizer, warmup_steps=train_steps * Config.max_grad_norm, t_total=train_steps)

    return optimizer, scheduler


def batch2cuda(batch):
    return {item: value.to(Config.device) for item, value in list(batch.items())}


def save_model(model, tokenizer, global_steps, is_last=False):
    if isinstance(model, torch.nn.DataParallel):
        model = model.module
    model_to_save = model.module if hasattr(model, 'module') else model
    if is_last:
        model_save_path = os.path.join(Config.new_pretrain_dir, f'checkpoint-{global_steps}')
    else:
        model_save_path = os.path.join(Config.record_save_path, f'checkpoint-{global_steps}')
    model_to_save.save_pretrained(model_save_path)
    tokenizer.save_vocabulary(model_save_path)

    logs.print(f'\n>> model saved in : {model_save_path}')


def sorted_checkpoints(best_model_checkpoint, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False):
    ordering_and_checkpoint_path = []

    glob_checkpoints = [str(x) for x in Path(Config.record_save_path).glob(f"{checkpoint_prefix}-*")]

    for path in glob_checkpoints:
        if use_mtime:
            ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
        else:
            regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
            if regex_match and regex_match.groups():
                ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))

    checkpoints_sorted = sorted(ordering_and_checkpoint_path)
    checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
    # Make sure we don't delete the best model.
    if best_model_checkpoint is not None:
        best_model_index = checkpoints_sorted.index(str(Path(best_model_checkpoint)))
        checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (
            checkpoints_sorted[-1],
            checkpoints_sorted[best_model_index],
        )
    return checkpoints_sorted


def rotate_checkpoints(best_model_checkpoint, use_mtime=False) -> None:
    if Config.save_total_limit is None or Config.save_total_limit <= 0:
        return

    # Check if we should delete older checkpoint(s)
    checkpoints_sorted = sorted_checkpoints(best_model_checkpoint, use_mtime=use_mtime)
    if len(checkpoints_sorted) <= Config.save_total_limit:
        return

    number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - Config.save_total_limit)
    checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
    for checkpoint in checkpoints_to_be_deleted:
        shutil.rmtree(checkpoint)


def pretrain():
    logs.print('\n>> start pretraining ... ...')
    logs.print(f'\n>> loading from pretrain model path -> {Config.pre_model_dir}')

    model = build_model()
    tokenizer = Vocab(Config.pre_model_dir)

    train_dataloader = load_data(tokenizer)

    total_steps = Config.num_epochs * len(train_dataloader)

    optimizer, scheduler = build_optimizer(model, total_steps)

    total_loss, cur_avg_loss, global_steps = 0., 0., 0

    if Config.fp16:
        scaler = GradScaler()

    pretrain_loss_list, global_steps_list = [], []

    for epoch in range(1, Config.num_epochs + 1):

        train_iterator = tqdm(train_dataloader, desc=f'Epoch : {epoch}', total=len(train_dataloader))

        model.train()

        for step, batch in enumerate(train_iterator):
            batch_cuda = batch2cuda(batch)

            if Config.fp16:
                with autocast():
                    loss, logits = model(**batch_cuda)[:2]
                scaler.scale(loss).backward()
            else:
                loss, logits = model(**batch_cuda)[:2]
                loss.backward()

            if Config.gradient_accumulation_steps > 1:
                loss = loss / Config.gradient_accumulation_steps

            pretrain_loss_list.append(loss.item())
            global_steps_list.append(global_steps + 1)

            total_loss += loss.item()
            cur_avg_loss += loss.item()

            if (step + 1) % Config.gradient_accumulation_steps == 0:
                if Config.fp16:
                    scaler.unscale_(optimizer)

                torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)

                if Config.fp16:
                    scaler.step(optimizer)
                    scaler.update()
                else:
                    optimizer.step()

                scheduler.step()
                optimizer.zero_grad()

                if (global_steps + 1) % Config.logging_step == 0:
                    epoch_avg_loss = cur_avg_loss / Config.logging_step
                    global_avg_loss = total_loss / (global_steps + 1)

                    logs.print(f"\n>> epoch - {epoch},  global steps - {global_steps + 1}, "
                               f"epoch avg loss - {epoch_avg_loss:.4f}, global avg loss - {global_avg_loss:.4f}.")

                    cur_avg_loss = 0.0
                global_steps += 1

                lr = scheduler.get_last_lr()[0]
                train_iterator.set_postfix_str(f'loss : {loss.item():.4f}, lr : {lr}, global steps : {global_steps} .')
            if Config.debug:
                break
        save_model(model, tokenizer, global_steps)
        last_checkpoint_save_path = os.path.join(Config.record_save_path, f'checkpoint-{global_steps}')
        rotate_checkpoints(last_checkpoint_save_path, use_mtime=False)

    logs.print('\n>> saving model at last epoch ... ...')
    save_model(model, tokenizer, global_steps, True)

    fig, ax = plt.subplots()
    ax.plot(global_steps_list, pretrain_loss_list, 'k', label='pretrain_loss')
    legend = ax.legend(loc='best', shadow=True, fontsize='large')
    legend.get_frame().set_facecolor('#00FFCC')

    fig_save_path = os.path.join(Config.new_pretrain_dir, 'train_loss_curve.jpg')
    plt.savefig(fig_save_path)
    # plt.show()  # 显示图片 在服务器上没法执行

    del model, tokenizer, optimizer, scheduler
    torch.cuda.empty_cache()
    gc.collect()


class Config(BaseConfig):
    pre_model_dir = 'E:/code/data/pretrain_model_file/nezha-cn-base'
    pre_train_data_path = f'E:/code/data/LIC2022-百度比赛/百度-2022语言与智能技术竞赛：情感可解释评测/nezha_pretrain_data_20220508.pkl'
    if not os.path.exists(pre_train_data_path):
        pre_model_dir = '/home/wangxiaoyu/data/pretrain/nezha/nezha-cn-base'
        pre_train_data_path = f'/home/wangxiaoyu/data/LIC2022-baidu/s情感可解释评测/nezha_pretrain_data_20220508.pkl'
    new_pretrain_dir = 'data/baidu_senti_20220508'
    record_save_path = f'{new_pretrain_dir}/record'

    fp16 = True
    max_seq_len = 350
    logging_step = 2000
    num_workers = 0
    max_grad_norm = 1.0
    num_epochs = 150
    batch_size = 10
    save_total_limit = 10
    learning_rate = 6e-5
    eps = 1e-8
    gradient_accumulation_steps = 1
    warmup_ratio = 0.1
    weight_decay = 0.01
    save_steps = 5000
    # n_gpus = torch.cuda.device_count() if torch.cuda.is_available() else 1
    n_gpus = 1  # 只用一个GPU把
    batch_size *= n_gpus

    os.makedirs(new_pretrain_dir, exist_ok=True)
    os.makedirs(record_save_path, exist_ok=True)

    seed = 9527
    torch_tool.seed_everything(seed)
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    # debug = True
    debug = False
    if debug:
        save_steps = 10
        batch_size = 2
        num_epochs = 2
        max_seq_len = 20


if __name__ == '__main__':
    pretrain()
