# -*- coding: utf-8 -*-
# @Time    : 2023/5/17 3:09 下午
# @Author  : Wu WanJie

import copy
import json
import os
import random
import torch
import argparse
import datetime
import numpy as np
from tqdm import tqdm
import torch.nn as nn
from typing import Any
from loguru import logger
from torch.optim import AdamW
from torch.backends import cudnn
from dataclasses import dataclass
from accelerate import Accelerator
from torch.utils.data import DataLoader
from transformers import (
    BertTokenizer,
    RobertaTokenizer,
    AutoTokenizer,

    BertConfig,
    RobertaConfig,
    ErnieConfig,
    BloomConfig,

    BertForSequenceClassification,
    RobertaForSequenceClassification,
    ErnieForSequenceClassification,
    BloomForSequenceClassification,

    get_linear_schedule_with_warmup
)
from tensorboardX import SummaryWriter
from sklearn.metrics import precision_recall_fscore_support, precision_score, recall_score, f1_score, classification_report

from dataset import load_label_system, ClfDataset, ClfCollate
from loss import FocalLoss


MODEL_CLASSES = {
    "bert": (BertConfig, BertForSequenceClassification, BertTokenizer),
    "roberta": (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
    "ernie": (ErnieConfig, ErnieForSequenceClassification, AutoTokenizer),
    "bloom": (BloomConfig, BloomForSequenceClassification, AutoTokenizer)
}


def setup_seed(seed=42):
    """固定随机种子数"""
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    random.seed(seed)
    np.random.seed(seed)
    cudnn.benchmark = True
    cudnn.deterministic = True


def argument_parser():
    parser = argparse.ArgumentParser()

    group_ds = parser.add_argument_group("dataset", description="路径相关参数配置")
    group_ds.add_argument("--max_seq_length", type=int, default=512, help="序列最大长度")
    group_ds.add_argument("--dataset_dir", type=str, default="/note/nlp_algo/app/data/competition", help="数据文件地址")
    group_ds.add_argument("--output_dir", type=str, default="/data/output", help="输出目录")
    group_ds.add_argument("--model_name_or_path", type=str, default="/data/transformers/chinese-roberta-wwm-ext-large", help="基模型目录")
    group_ds.add_argument("--threshold", type=float, default=0.5, help="sigmoid阈值")
    group_ds.add_argument("--model_type", type=str, default="bert", choices=["bert", "bloom", "ernie", "roberta"], help="模型类型")
    group_ds.add_argument("--cuda_visible_id", type=str, default="1",  help="CUDA 的id")
    group_ds.add_argument("--train_file_name", type=str, default="train_data.csv",  help="训练文件")
    group_ds.add_argument("--style", type=str, default="style_9",  help="类型")
    group_ds.add_argument("--target_name", type=str, default="", help="二分类栏目")

    group_hyper = parser.add_argument_group("hyper_params", description="超参数配置")
    group_hyper.add_argument("--loss_type", type=str, default="ce", choices=["ce", "focal"], help="loss损失函数")
    group_hyper.add_argument("--task_type", type=str, default="multi", choices=["multi", "binary"], help="任务类型")
    group_hyper.add_argument("--epochs", type=int, default=20, help="训练轮次")
    group_hyper.add_argument("--batch_size", type=int, default=8, help="训练批次大小")
    group_hyper.add_argument("--learning_rate", type=float, default=1e-5, help="学习速率")
    group_hyper.add_argument("--adam_epsilon", type=float, default=1e-8, help="学习速率")
    group_hyper.add_argument("--max_grad_norm", type=float, default=1.0, help="最大梯度,超过就丢弃")
    group_hyper.add_argument("--eval_count_per_epoch", type=int, default=2, help="每个Epoch评估多少次")
    group_hyper.add_argument("--min_count", type=int, default=200, help="最少样本数量")
    group_hyper.add_argument("--log_iter_num", type=int, default=10, help="每隔多少个Iteration打印一次日志")
    group_hyper.add_argument("--warmup_proportion", type=float, default=0.1, help="学习速率递增时所占数据的比例")
    group_hyper.add_argument("--gradient_accumulation_steps", type=int, default=1,
                             help="一个完整的batch需要梯度累积的小的batch数,即多少个batch更新一次权重")
    group_hyper.add_argument("--weight_decay", type=float, default=0.0001, help="权重衰减速率")
    group_hyper.add_argument("--early_stop_count", type=int, default=10, help="评估过程中如果出现连续N次效果不提升，则早停")

    args = parser.parse_args()
    return args


def argument_handler():
    """
    完善args配置
    """
    args = argument_parser()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_visible_id
    os.environ["CUDA_DEVICES_ORDER"] = "PCI_BUS_ID"

    args.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")

    now_time = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
    model_output_dir = os.path.join(args.output_dir, f"{now_time}")
    log_dir = os.path.join(args.output_dir, f"{now_time}/logs")
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    logger.add(os.path.join(log_dir, "train.log"), format="{time} {level} {message}", rotation="500 MB")
    args.model_output_dir = model_output_dir
    args.log_dir = log_dir
    return args


@dataclass
class MidVariables:
    learning_rate: float = 0.
    metric:  Any = None
    curr_epoch: int = 0
    curr_step: int = 0
    global_step: int = 0
    early_stop_count: int = 0
    best_model_path: str = ""


def optimizer_func(args, model):
    """部分参数不参与weight decay"""
    no_decay = ["bias", "LayerNorm.weight"]
    optimizer_grouped_parameters = [
        {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
         "weight_decay": args.weight_decay},
        {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}
    ]
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
    return optimizer


def get_train_loader(args, tokenizer):
    train_dataset = ClfDataset(
        data_dir=args.dataset_dir,
        file_name=args.train_file_name,
        mode="train",
        task_type=args.task_type,
        style=args.style,
        target_name=args.target_name,
        min_count=args.min_count
    )
    train_loader = DataLoader(
        dataset=train_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        collate_fn=ClfCollate(
            tokenizer=tokenizer,
            max_seq_length=args.max_seq_length,
        )
    )
    return train_loader, train_dataset.weight


def train(args, model, tokenizer, accelerator):
    mid_vars = MidVariables()
    writer = SummaryWriter(logdir=args.log_dir)
    train_loader, weight = get_train_loader(args, tokenizer)
    num_training_steps = len(train_loader) // args.gradient_accumulation_steps * args.epochs
    if args.task_type == "binary":
        if args.loss_type == "ce":
            criterion = nn.CrossEntropyLoss(weight=torch.tensor(weight, dtype=torch.float32))
        elif args.loss_type == "focal":
            criterion = FocalLoss()
        else:
            raise NotImplementedError("loss未实现")

    elif args.task_type == "multi":
        criterion = nn.BCEWithLogitsLoss(weight=torch.tensor(weight, dtype=torch.float32))
    else:
        raise NotImplementedError("任务未实现")

    criterion.cuda()
    optimizer = optimizer_func(args, model)
    scheduler = get_linear_schedule_with_warmup(
        optimizer=optimizer,
        num_warmup_steps=int(args.warmup_proportion * num_training_steps),
        num_training_steps=num_training_steps
    )
    model, optimizer, scheduler, train_loader = accelerator.prepare(model, optimizer, scheduler, train_loader)
    model.train()
    for _epoch in tqdm(range(args.epochs), desc="Epoch: "):
        if _epoch != 0:
            train_loader, weight = get_train_loader(args, tokenizer)
        mid_vars.curr_epoch += 1
        for step, batch in enumerate(tqdm(train_loader, desc="Iteration: ")):
            if mid_vars.early_stop_count >= args.early_stop_count:
                logger.info(f"模型训练效果已经连续有{args.early_stop_count}次未提升，退出训练")
                return
            mid_vars.curr_step += 1

            for key, val in batch.items():
                batch[key] = val.to(args.device)
            inputs = {
                "input_ids": batch["input_ids"],
                "attention_mask": batch["attention_mask"],
            }
            if args.model_type not in ["distilbert", "bloom"]:
                # XLM, DistilBERT and RoBERTa don't use segment_ids
                inputs["token_type_ids"] = batch["token_type_ids"] if args.model_type in ["bert", "xlnet"] else None

            outputs = model(**inputs)
            logits = outputs.logits
            if args.task_type == "multi":
                loss = criterion(logits, batch["labels"])
            else:
                loss = criterion(logits, batch["labels"].to(torch.int64))
            if args.gradient_accumulation_steps > 1:
                loss = loss / args.gradient_accumulation_steps
            accelerator.backward()
            # loss.backward()

            if (step+1) % args.gradient_accumulation_steps == 0:
                torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
                writer.add_scalars("loss", {"train_loss": loss.cpu().item()}, mid_vars.global_step)
                writer.add_scalar("lr", scheduler.get_last_lr()[0], mid_vars.global_step)

                optimizer.step()
                scheduler.step()
                optimizer.zero_grad()
                mid_vars.global_step += 1

                y_true = batch["labels"].cpu().numpy()
                if args.task_type == "multi":
                    y_true = y_true.astype(int)
                    y_pred = torch.sigmoid(logits).cpu().detach().numpy()
                    y_pred = np.where(y_pred >= args.threshold, 1, 0)
                    precision, recall, f1score, _ = precision_recall_fscore_support(
                        y_true, y_pred, average="macro", zero_division=1)

                else:
                    y_pred = torch.argmax(logits, dim=1).cpu().numpy()
                    precision = precision_score(y_true, y_pred, average="macro", zero_division=0)
                    recall = recall_score(y_true, y_pred, average="macro", zero_division=0)
                    f1score = f1_score(y_true, y_pred, average="macro", zero_division=0)

                if mid_vars.global_step % args.log_iter_num == 0:
                    logger.info(f"Epoch: {mid_vars.curr_epoch} "
                                f"Global_step: {mid_vars.global_step}  "
                                # f"Learning_rate: {scheduler.get_last_lr()[0]}\t "
                                f"Loss: {loss.cpu().item()} "
                                f"Precision: 【{str(round(precision, 3))}】 "
                                f"Recall: 【{str(round(recall, 3))}】 "
                                f"F1_score: 【{str(round(f1score, 3))}】 ")

                if mid_vars.global_step % (len(train_loader) // (args.gradient_accumulation_steps * args.eval_count_per_epoch)) == 0:
                    logger.info("开始评估数据...")
                    metric = evaluate(args, model, criterion, tokenizer)
                    logger.info(json.dumps(metric, indent=2, ensure_ascii=False))
                    writer.add_scalars("loss", {"dev_loss": metric["loss"]}, mid_vars.global_step)
                    if not mid_vars.metric:
                        mid_vars.metric = metric

                    if metric["f1"] < mid_vars.metric["f1"]:
                        mid_vars.early_stop_count += 1
                        continue
                    mid_vars.early_stop_count = 0
                    mid_vars.best_model_path = os.path.join(
                        args.model_output_dir, f"checkpoint_{str(mid_vars.global_step)}")
                    mid_vars.metric = metric
                    save_checkpoint(args, model, optimizer, scheduler, mid_vars)


def save_checkpoint(args, model, optimizer, scheduler, mid_var):
    model_dir = os.path.join(args.model_output_dir, f"checkpoint_{str(mid_var.global_step)}")
    if not os.path.isdir(model_dir):
        os.makedirs(model_dir)

    logger.info("开始保存模型...")
    checkpoint = {
        "optimizer": optimizer.state_dict(),
        "scheduler": scheduler.state_dict(),
        "mid_var": mid_var,
        "args": args
    }
    torch.save(checkpoint, os.path.join(model_dir, "checkpoint.pt"))
    model.save_pretrained(model_dir)
    logger.info(f"模型保存成功, 地址为: {model_dir}")


def evaluate(args, model, criterion, tokenizer):
    model.eval()
    pred_lst = []
    true_lst = []
    loss_lst = []
    eval_dataset = ClfDataset(
        data_dir=args.dataset_dir,
        file_name="eval_data.csv",
        mode="eval",
        task_type=args.task_type,
        style=args.style,
        target_name=args.target_name,
        min_count=args.min_count
    )
    dev_loader = DataLoader(
        dataset=eval_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        collate_fn=ClfCollate(
            tokenizer=tokenizer,
            max_seq_length=args.max_seq_length,
        )
    )
    for step, batch in enumerate(tqdm(dev_loader, desc="Evaluation: ")):
        with torch.no_grad():
            for key, val in batch.items():
                batch[key] = val.to(args.device)

            inputs = {
                "input_ids": batch["input_ids"],
                "attention_mask": batch["attention_mask"],
                # "labels": batch["labels"]
            }
            if args.model_type not in ["distilbert", "bloom"]:
                # XLM, DistilBERT and RoBERTa don't use segment_ids
                inputs["token_type_ids"] = batch["token_type_ids"] if args.model_type in ["bert", "xlnet"] else None

            outputs = model(**inputs)
            logits = outputs.logits
            y_true = batch["labels"].cpu().numpy()
            if args.task_type == "multi":
                loss = criterion(logits, batch["labels"])
                y_true = y_true.astype(int)
                y_pred = torch.sigmoid(logits).cpu().detach().numpy()
                y_pred = np.where(y_pred >= args.threshold, 1, 0)
            else:
                loss = criterion(logits, batch["labels"].to(torch.int64))
                y_pred = torch.argmax(logits, dim=1).cpu().detach().numpy()

            pred_lst.extend(y_pred)
            true_lst.extend(y_true)
            loss_lst.append(loss.cpu().item())

    if args.task_type == "multi":
        precision, recall, f1, support = precision_recall_fscore_support(
            np.array(true_lst), np.array(pred_lst), zero_division=0)
        for i in range(35):
            logger.info(
                f"Class {i} - Precision: {precision[i]}, Recall: {recall[i]}, F1-score: {f1[i]}, Support: {support[i]}")
        precision, recall, f1score, support = precision_recall_fscore_support(
            np.array(true_lst), np.array(pred_lst), average="macro", zero_division=0)
    else:
        precision = precision_score(np.array(true_lst), np.array(pred_lst), average="macro", zero_division=0)
        recall = recall_score(np.array(true_lst), np.array(pred_lst), average="macro", zero_division=0)
        f1score = f1_score(np.array(true_lst), np.array(pred_lst), average="macro", zero_division=0)
        logger.info(classification_report(np.array(true_lst), np.array(pred_lst)))

    loss = np.average(np.array(loss_lst))
    model.train()
    return {
        "precision": precision,
        "recall": recall,
        "f1": f1score,
        "loss": loss
    }


def parse_conf():
    """
    1. 设置参数等基础信息
    """
    setup_seed(42)
    args = argument_handler()
    argument_dict = copy.deepcopy(vars(args))
    argument_dict.pop("device")
    logger.info("\n" + json.dumps(argument_dict, ensure_ascii=False, indent=2))
    return args


def main():
    """
    1. (配置/参数)解析
    2. 模型、数据初始化
    3. 训练模型
    """
    args = parse_conf()
    data_conf = load_label_system(os.path.join(args.dataset_dir, "readme.txt"))
    args.data_conf = data_conf
    if args.task_type == "binary":
        num_labels = 2
        assert args.target_name in args.data_conf["label2id"].keys()
    else:
        num_labels = len(args.data_conf["label2id"])

    config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
    tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)

    accelerator = Accelerator()
    config = config_class.from_pretrained(args.model_name_or_path, num_labels=num_labels)
    config.problem_type = "multi_label_classification"
    model = model_class.from_pretrained(args.model_name_or_path, config=config)
    # model.to(args.device)
    train(
        args=args,
        model=model,
        tokenizer=tokenizer,
        accelerator=accelerator,
    )


if __name__ == "__main__":
    main()
