# -*- coding:utf8 -*-
# @Time : 2023/3/28 15:07
# @Author : WanJie Wu


import os
import json
import math
import torch
import pickle
import random
import argparse
import numpy as np
from tqdm import tqdm
from loguru import logger
from functools import partial
from torch.optim import AdamW
from torch.backends import cudnn
import torch.distributed as dist
from tensorboardX import SummaryWriter
from torch.nn.parallel import DistributedDataParallel as DDP
from transformers import (
    get_cosine_schedule_with_warmup,
    AutoConfig,
    AutoModelForQuestionAnswering,
    AutoTokenizer,
)

from dataset import init_data_loader, post_process_of_qa_predictions
from metrics import du_reader_metrics


def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    cudnn.deterministic = True


setup_seed(100)

class CustomWriter(object):
    def __init__(self, summary_log_dir):
        # 加载summary writer
        self.writer = SummaryWriter(summary_log_dir)
        self._init()

    def _init(self):
        self.train_loss = []
        self.dev_loss = []

    def train_writer(self, metrics):
        self.train_loss.append(metrics.loss)
        self.writer.add_scalar("training_loss", float(np.mean(self.train_loss)), metrics.global_step)
        self.writer.add_scalar("learning_rate", metrics.learning_rate, metrics.global_step)
        self._init()

    def dev_writer(self, loss, global_step):
        self.dev_loss.append(loss.cpu().item())
        self.writer.add_scalar("dev_loss", float(np.mean(self.dev_loss)), global_step)
        self._init()


class MidVariables(object):
    def __init__(self,
                 epoch=0,
                 learning_rate=0.,
                 curr_step=0,
                 global_step=0,
                 loss=10.,
                 metrics=None,
                 early_stop_counter=0
                 ):
        self.curr_epoch = epoch
        self.global_step = global_step
        self.learning_rate = learning_rate
        self.curr_step = curr_step
        self.loss = loss
        self.metrics = metrics
        self.early_stop_counter = early_stop_counter

    def update_metrics(self, local_step, loss, learning_rate):
        self.curr_step = local_step
        self.global_step += 1
        self.loss = loss
        self.learning_rate = learning_rate


def wrapper_model_tokenizer(args):
    """按照实际情况包装模型"""
    logger.info("正在初始化模型...")
    if args.is_distributed:
        dist.init_process_group(backend="nccl")

    if args.is_distributed and args.local_rank != 0:  # 阻塞并发进程
        torch.distributed.barrier()

    config = AutoConfig.from_pretrained(args.model_name_or_path)
    model = AutoModelForQuestionAnswering.from_pretrained(args.model_name_or_path, config=config)
    tokenizer = AutoTokenizer.from_pretrained(
        pretrained_model_name_or_path=args.model_name_or_path,
        do_lower_case=args.do_lower_case,
        use_fast=True
    )
    if args.is_distributed and args.local_rank == 0:
        torch.distributed.barrier()

    model = model.to(args.device)
    if not args.is_distributed:
        logger.info("开始单GPU执行...")
        return model, tokenizer

    logger.info("开始多GPU执行...")
    # find_unused_parameters=True 会遍历计算图带来开销，好处是：标记并还原不需要梯度计算的参数, 本模型无多余参数
    model = DDP(model, device_ids=[args.local_rank], find_unused_parameters=False)
    return model, tokenizer


def optimizer_func(args, model):
    # 部分参数不参与weight decay
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
         'weight_decay': args.weight_decay},
        {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
    ]
    # optimizer和scheduler
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
    return optimizer


def gather_loss_from_multi_gpu(args, each_loss):
    """收集不同GPU上面的loss，汇总"""
    rt = each_loss.clone()
    dist.all_reduce(rt, op=dist.ReduceOp.SUM)
    rt /= args.usable_gpu_count
    return rt


def mrc_forward(args, model, batch, mrc_mode="train"):
    """模型训练"""
    for key, val in batch.items():
        batch[key] = val.to(args.device)

    outputs = model(**batch)
    if mrc_mode == "train":
        loss = outputs.loss.mean()
        if args.is_distributed:
            torch.distributed.barrier()
            loss = gather_loss_from_multi_gpu(args, loss)
        return loss
    start_logits = outputs.start_logits.cpu().numpy()
    end_logits = outputs.end_logits.cpu().numpy()
    return start_logits, end_logits


def eval_metrics_update(mid_variable, eval_metrics):
    if mid_variable.metrics is None:
        mid_variable.metrics = eval_metrics
        return
    # 更新指标，并判定是否应该早停
    if mid_variable.metrics["BLEU-4"] < eval_metrics["BLEU-4"] or \
            mid_variable.metrics["ROUGE-L"] < eval_metrics["ROUGE-L"] or \
            mid_variable.metrics["recall"] < eval_metrics["recall"] or \
            mid_variable.metrics["precision"] < eval_metrics["precision"]:
        mid_variable.early_stop_counter += 1
    else:
        mid_variable.early_stop_counter = 0
    mid_variable.metrics = eval_metrics


def update_variable_and_evaluate(args, local_step, loss, scheduler, c_writer, train_tqdm, train_loader, mid_variable):
    """
    :return:  返回是否进行评估过程, 进入评估True, 不进入评估为False
    """
    # 主进程更新指标和打印日志；更新tqdm进度条描述信息；判定是否进入评估环节
    # 1. 主进程更新指标和打印日志
    mid_variable.update_metrics(local_step, loss.cpu().item(), scheduler.get_last_lr()[0])
    c_writer.train_writer(mid_variable)

    # 2. 更新tqdm进度条描述信息
    desc = f"Epoch: {mid_variable.curr_epoch} Step: {mid_variable.curr_step} LR: {round(mid_variable.learning_rate, 2)} Loss: {round(mid_variable.loss, 2)}"
    train_tqdm.set_description(desc)

    # 3. 判定是否进入评估环节
    if args.evaluation_per_epoch == 0:
        return False

    return mid_variable.global_step  % math.ceil(len(train_loader) / (args.accumulation_steps * args.evaluation_per_epoch)) != 0

def train_epoch(args, model, optimizer, scheduler, amp, tokenizer, mid_variable, c_writer, train_loader, dev_loader):
    model.train()
    train_tqdm = tqdm(enumerate(train_loader), total=len(train_loader)-1, leave=True)
    for local_step, batch in train_tqdm:
        loss = mrc_forward(args, model, batch, mrc_mode="train")
        # 反向传播，梯度计算
        if args.fp16:
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
        else:
            loss.backward()

        # 判定是否累积梯度
        if (local_step + 1) % args.accumulation_steps != 0:
            continue

        # 梯度裁剪， 需要在step()执行之前
        if args.fp16:
            torch.nn.utils.clip_grad_norm(amp.master_params(optimizer), args.max_grad_norm)
        else:
            torch.nn.utils.clip_grad_norm(model.parameters(), args.max_grad_norm)

        # 梯度更新
        optimizer.step()
        # 学习速率更新
        scheduler.step()
        # 梯度清零
        optimizer.zero_grad()

        # 是否评估判定
        evaluate = update_variable_and_evaluate(args, local_step, loss, scheduler, c_writer, train_tqdm, train_loader, mid_variable)
        if not evaluate or dev_loader is None:
            continue

        # 模型验证
        eval_metrics = evaluation(
            args=args,
            model=model,
            dev_loader=dev_loader
        )
        eval_metrics_update(mid_variable, eval_metrics)
        if mid_variable.early_stop_counter > args.early_stop_count:
            break

        # 模型验证之后需要保存模型
        if args.local_rank == 0:
            save_checkpoint(args, model, optimizer, scheduler, amp, tokenizer, mid_variable)

        # 模型保存
    if args.local_rank == 0:
        save_checkpoint(args, model, optimizer, scheduler, amp, tokenizer, mid_variable)


def evaluation(args, model, dev_loader):
    model.eval()
    dev_iterator, dev_datasets = dev_loader
    all_start_logits = []
    all_end_logits = []
    all_data_ids = []
    all_passage_ids = []
    for step, batch in tqdm(enumerate(dev_iterator), desc="验证进度", total=len(dev_iterator)-1):
        with torch.no_grad():
            inputs = {
                "input_ids": batch["input_ids"],
                "attention_mask": batch["attention_mask"],
                "token_type_ids": batch["token_type_ids"]
            }
            start_logit, end_logit = mrc_forward(args, model, inputs, mrc_mode="dev")
            all_start_logits.extend(start_logit)
            all_end_logits.extend(end_logit)
            all_data_ids.extend(batch["data_ids"].tolist())
            all_passage_ids.extend(batch["passage_ids"].tolist())

    predictions_result, ground_truth_result = post_process_of_qa_predictions(all_start_logits, all_end_logits, all_data_ids, all_passage_ids, dev_datasets)
    eval_metric = du_reader_metrics(ground_truth_result, predictions_result)
    model.train()
    return eval_metric


def save_checkpoint(args, model, optimizer, scheduler, amp, tokenizer, mid_variable):
    checkpoint = {
        "optimizer": optimizer.state_dict(),
        "scheduler": scheduler.state_dict(),
        "mid_variable": mid_variable,
    }
    if args.fp16:
        checkpoint["amp"] = amp.state_dict()

    with open(args.mid_checkpoint, "wb") as f:
        pickle.dump(checkpoint, f)

    model_path = os.path.join(args.model_dir, f"epoch_{mid_variable.curr_epoch}")
    if args.is_distributed:
        model.module.save_pretrained(model_path)
    else:
        model.save_pretrained(model_path)
    tokenizer.save_pretrained(model_path)


def resume_checkpoint(args, optimizer, scheduler, amp):
    """从上次保存节点恢复目录"""
    logger.info("正在恢复模型参数")
    with open(args.mid_checkpoint, "rb") as f:
        checkpoint = pickle.load(f)

    mid_variables = checkpoint["mid_variable"]
    optimizer.load_state_dict(checkpoint["optimizer"])
    scheduler.load_state_dict(checkpoint["scheduler"])
    # 判定是否执行混合精度训练
    if args.fp16:
        amp = amp.load_state_dict(checkpoint["amp"])

    return optimizer, scheduler, mid_variables, amp


def main_train(args, model, tokenizer, train_loader, dev_loader):
    # 共计需要执行的step次数
    total_steps = (int(len(train_loader) / args.accumulation_steps) + 1) * args.epochs
    # 初始化优化器和学习策略
    optimizer = optimizer_func(args, model)
    scheduler = get_cosine_schedule_with_warmup(
        optimizer, num_warmup_steps=int(total_steps * args.warm_up_portion), num_training_steps=total_steps)

    # 初始化日志和指标对象
    c_writer = CustomWriter(args.log_dir)
    # 各项指标对象
    mid_variable = MidVariables()

    # 是否采用混合精度训练模型
    amp = None
    if args.fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.opt_level)

    # 是否恢复模型中间变量
    # we recommend calling the load_state_dict methods after amp.initialize.
    if os.path.exists(args.mid_checkpoint):
        optimizer, scheduler, mid_variable, amp = resume_checkpoint(args, optimizer, scheduler, amp)

    logger.info("开始模型训练...")
    for _ in range(mid_variable.curr_epoch, args.epochs):
        if mid_variable.early_stop_counter > args.early_stop_count:
            logger.info(f"指标连续{args.early_stop_count}次没有优化，因此停止训练!")
            break

        mid_variable.curr_epoch += 1
        logger.info(f"--- train start epoch {mid_variable.curr_epoch} ---\n")

        train_epoch(
            args=args,
            model=model,
            optimizer=optimizer,
            scheduler=scheduler,
            amp=amp,
            tokenizer=tokenizer,
            mid_variable=mid_variable,
            c_writer=c_writer,
            train_loader=train_loader,
            dev_loader=dev_loader
        )


def main_worker(args):
    # 加载模型
    model, tokenizer = wrapper_model_tokenizer(args)
    # 加载训练集和验证集
    data_loader_func = partial(
        init_data_loader,
        tokenizer=tokenizer,
        num_workers=args.num_workers,
        batch_size=args.batch_size_per_gpu,
        is_distributed=args.is_distributed,
        max_seq_len=args.max_seq_len,
        max_query_len=args.max_query_len,
        max_title_len=args.max_title_len,
    )

    if args.is_distributed and args.local_rank != 0:  # 阻塞并发进程
        torch.distributed.barrier()
    train_loader = data_loader_func(data_path=args.train_path, mode="train") if args.train_path else None
    dev_loader = None
    test_loader = data_loader_func(data_path=args.test_path, mode="test") if args.test_path else None
    if args.is_distributed and args.local_rank == 0:  # 阻塞并发进程
        torch.distributed.barrier()

    # 开始训练模型
    if train_loader:
        main_train(args, model, tokenizer, train_loader, dev_loader)

    # 模型验证
    if test_loader:
        test_metrics = evaluation(
            args=args,
            model=model,
            dev_loader=test_loader
        )
        classify_report = test_metrics.pop("classifyReport")
        logger.info(f"\n{classify_report}")
        logger.info("\n" + json.dumps(test_metrics, indent=2, ensure_ascii=False))

def main(args):
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
    # 可用GPU数量
    usable_gpu_count = torch.cuda.device_count()

    if args.is_distributed:
        assert usable_gpu_count > 1

    device = torch.device("cuda", args.local_rank) if torch.cuda.is_available() else torch.device("cpu")
    args.device = device
    args.usable_gpu_count = usable_gpu_count
    args.log_dir = os.path.join(args.output_dir, "logs")
    args.model_dir = os.path.join(args.output_dir, "models")
    args.mid_checkpoint = os.path.join(args.output_dir, "mid_checkpoint.pkl")

    if not os.path.exists(args.log_dir):
        os.makedirs(args.log_dir)
    if not os.path.exists(args.model_dir):
        os.makedirs(args.model_dir)

    main_worker(args)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    # 分布式及模型加速参数
    parser.add_argument("--is_distributed", action="store_true", help="是否分布式")
    parser.add_argument("--fp16", action="store_true", help="是否采用混合精度训练")
    parser.add_argument("--gpus", type=str, default="0", help="GPU参数,格式为: 0,1,2")
    parser.add_argument("--opt_level", type=str, default="O1", help="O0纯FP32, O3纯FP16, O1混合精度训练, O2几乎FP16 详见: https://nvidia.github.io/apex/amp.html")
    parser.add_argument("--local_rank", type=int, default=0, help="GPU号从0开始")

    # 数据集路径及保存目录参数(必传)
    parser.add_argument("--train_path", type=str, default=None, required=False, help="训练数据路径")
    parser.add_argument("--dev_path", type=str, default=None, required=False, help="验证集路径")
    parser.add_argument("--test_path", type=str, default=None, required=False, help="测试集路径")
    parser.add_argument("--model_name_or_path", type=str, default=None, required=True, help="基模型权重目录")
    parser.add_argument("--output_dir", type=str, default=None, required=True, help="输出目录")

    # 数据相关参数
    parser.add_argument("--max_seq_len", type=int, default=512, help="序列最大长度")
    parser.add_argument("--max_query_len", type=int, default=32, help="问题最大长度")
    parser.add_argument("--max_title_len", type=int, default=0, help="标题最大长度")
    parser.add_argument("--num_workers", type=int, default=0, help="dataloader子进程数,默认和GPU数一致")

    # 模型超参数, 默认参数基于GeForce RTX 3090设置
    parser.add_argument("--epochs", type=int, default=3, help="微调轮次")
    parser.add_argument("--max_grad_norm", type=str, default=1.0, help="最大梯度值")
    parser.add_argument("--weight_decay", type=float, default=0.01, help="权重衰减")
    parser.add_argument("--learning_rate", type=float, default=5e-5, help="学习速率")
    parser.add_argument("--warm_up_portion", type=float, default=0.1, help="学习速率递增时所占数据的比例")
    parser.add_argument("--accumulation_steps", type=int, default=8, help="一个完整batch_size需要积累次数")
    parser.add_argument("--batch_size_per_gpu", type=int, default=8, help="一个GPU每个批次数据条目, 需要根据GPU显存和实际数据进行调整")

    # others(训练过程中的参数)
    parser.add_argument("--evaluation_per_epoch", type=int, default=1, help="每个Epoch过程中评估次数")
    parser.add_argument("--early_stop_count", type=int, default=5, help="评估过程中如果出现连续N次指标不下降，则早停")
    parser.add_argument("--do_lower_case", action="store_true", help="tokenizer是否执行store_true")

    parser_args = parser.parse_args()
    main(parser_args)
