# -*- coding:utf8 -*-
# @Time : 2023/2/23 15:42
# @Author : WanJie Wu
import json
import os
import torch
import random
import argparse
import datetime
import numpy as np
import copy
from tqdm import tqdm
from loguru import logger
from torch.backends import cudnn
from tensorboardX import SummaryWriter
from transformers import BertConfig, BertTokenizer, get_linear_schedule_with_warmup
from torch.utils.data import DataLoader
from torch.optim import AdamW
from dataclasses import dataclass
from typing import Any

from models import BertCRF, BertSoftmax
from metrics import SeqEntityMetric
from dataset import ExampleProcessor, load_and_cache_examples, convert_pred2label, get_entity, gen_classify_entity, timer


def setup_seed(seed=42):
    """随机数种子生成器， 保证每次执行结果一致性"""
    torch.manual_seed(seed) # 设置CPU生成随机数的种子
    torch.cuda.manual_seed_all(seed) # 为所有GPU设置随机数种子
    random.seed(seed)
    np.random.seed(seed)
    cudnn.benchmark = True # 如果网络的输入数据维度或类型变化不大，该设置可以增加运行效率。 该flag可以自动寻找最适合当前配置的高效算法
    cudnn.deterministic = True # 设置cuda的随机数种子，即每次返回的卷积算法固定。 (当输入数据每次都在变化benchmark=True效率低下)


@dataclass
class MidVariables:
    learning_rate: float = 0.
    metric:  Any = None
    curr_epoch: int = 0
    curr_step: int = 0
    global_step: int = 0
    early_stop_count: int = 0
    best_model_path: str = ""


def arguments_parser():
    parser = argparse.ArgumentParser()

    group_ds = parser.add_argument_group("dataset", description="路径相关参数配置")
    group_ds.add_argument("--max_seq_length", type=int, default=256, help="序列最大长度")
    group_ds.add_argument("--dataset_dir", type=str,  default=None, help="数据文件地址")
    group_ds.add_argument("--output_dir", type=str, default=None,  help="输出目录")
    group_ds.add_argument("--model_name_or_path", type=str, default=False, help="基模型目录")
    group_ds.add_argument("--markup", type=str, default="BIO", choices=["BIO", "BIOE"],  help="基模型目录")
    group_ds.add_argument("--do_train", action="store_true", help="是否执行训练流程")
    group_ds.add_argument("--do_dev", action="store_true", help="是否执行验证流程")
    group_ds.add_argument("--do_predict", action="store_true", help="是否执行测试流程")

    model_hyper = parser.add_argument_group("model_select", description="模型选择，以及对应的配置")
    model_hyper.add_argument("--model_type", type=str, choices=["bert_crf", "bert_softmax"], help="抽取模型类型")
    model_hyper.add_argument("--loss_type", type=str, default="ce", choices=["ce"], help="当模型类型选择为bert_softmax的时候，loss选取的类型")
    model_hyper.add_argument("--add_bilstm", action="store_true", help="是否添加bilstm层")
    model_hyper.add_argument("--num_layers", type=int, default=2, help="层数")
    model_hyper.add_argument("--lstm_hidden_size", type=int, default=256, help="Bilstm层隐藏神经元数量")

    group_hyper = parser.add_argument_group("hyper_params", description="超参数配置")
    group_hyper.add_argument("--epochs", type=int, default=1, help="训练轮次")
    group_hyper.add_argument("--batch_size", type=int, default=256, help="训练批次大小")
    group_hyper.add_argument("--learning_rate", type=float, default=3e-5, help="学习速率")
    group_hyper.add_argument("--adam_epsilon", type=float, default=1e-8, help="学习速率")
    group_hyper.add_argument("--max_grad_norm", type=float, default=1.0, help="最大梯度,超过就丢弃")
    group_hyper.add_argument("--dropout", type=float, default=0.5, help="连接失效比例")
    group_hyper.add_argument("--eval_count_per_epoch", type=int, default=3, help="每个Epoch评估多少次")
    group_hyper.add_argument("--log_iter_num", type=int, default=10, help="每隔多少个Iteration打印一次日志")
    group_hyper.add_argument("--warmup_proportion", type=float, default=0.1, help="学习速率递增时所占数据的比例")
    group_hyper.add_argument("--gradient_accumulation_steps", type=int, default=1, help="一个完整的batch需要梯度累积的小的batch数,即多少个batch更新一次权重")
    group_hyper.add_argument("--weight_decay", type=float, default=0.01, help="权重衰减速率")
    group_hyper.add_argument("--early_stop_count", type=int, default=5, help="评估过程中如果出现连续N次效果不提升，则早停")
    parser.add_argument("--fp16", action="store_true", help="是否采用混合精度训练")
    group_hyper.add_argument("--fp16_opt_level", type=str, default="O1", help="详见: https://nvidia.github.io/apex/amp.html; 01代表采用FP32精度模式、01混合精度模式、02近似FP16、03全FP16")

    args = parser.parse_args()
    return args


def arguments_handler():
    """参数处理"""
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")

    args = arguments_parser()
    args.device = device


    time_ = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M')
    model_output_dir = os.path.join(args.output_dir, f"{time_}")
    log_dir = os.path.join(args.output_dir, f"{time_}/logs")
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    logger.add(os.path.join(log_dir, "train.log"), format="{time} {level} {message}", rotation="500 MB")
    args.model_output_dir = model_output_dir
    args.log_dir = log_dir
    return args


def collate_fn(batch):
    """
    batch = [(x1, x2), (x1, x2), (x1, x2)]
    zip(*batch) = [(x1, x1, x1), (x2, x2, x2)]
    torch.stack按照某个纬度进行拼接
    """

    all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_lens = map(torch.stack, zip(*batch))
    max_len = max(all_lens).item()
    all_input_ids = all_input_ids[:, :max_len]
    all_attention_mask = all_attention_mask[:, :max_len]
    all_token_type_ids = all_token_type_ids[:, :max_len]
    all_labels = all_labels[:,:max_len]
    return all_input_ids, all_attention_mask, all_token_type_ids, all_labels,all_lens


@timer
def train(args, model, train_dataset, dev_dataset, processor, tokenizer):
    mid_var = MidVariables()
    c_writer = SummaryWriter(args.log_dir)
    train_dataloader = DataLoader(
        dataset=train_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        collate_fn=collate_fn
    )
    no_decay = ["bias", "LayerNorm.weight"]

    optimizer_grouped_parameters = [
        {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay},
        {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}
    ]
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
    num_training_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.epochs
    scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(args.warmup_proportion * num_training_steps), num_training_steps=num_training_steps)

    if args.fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError("查看https://www.github.com/nvidia/apex进行安装")
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)

    if os.path.isfile(os.path.join(args.model_name_or_path, "checkpoint.pt")):
        if args.fp16:
            mid_var = resume_checkpoint(args, optimizer, scheduler, amp)
        else:
            mid_var = resume_checkpoint(args, optimizer, scheduler, None)

    logger.info("开始模型训练....")
    model.train()
    for _epoch in tqdm(range(args.epochs), desc="Epoch："):
        mid_var.curr_epoch += 1
        for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration：")):
            if mid_var.early_stop_count >= args.early_stop_count:
                logger.info(f"模型训练效果已经连续有{args.early_stop_count}次未提升，退出训练")
                return
            if _epoch < mid_var.curr_epoch-1 and step < mid_var.curr_step:
                continue
            mid_var.curr_step += 1
            batch = tuple(t.to(args.device) for t in batch)
            inputs = {
                "input_ids": batch[0],
                "attention_mask": batch[1],
                "token_type_ids": batch[2],
                "labels": batch[3],
            }
            outputs = model(**inputs)
            loss = outputs[0]
            if args.gradient_accumulation_steps > 1:
                loss = loss / args.gradient_accumulation_steps

            # 损失值返向传播
            if args.fp16:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss.backward()

            if (step + 1) % args.gradient_accumulation_steps == 0: # 梯度累积
                if args.fp16:
                    torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
                else:
                    torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)# 梯度裁剪

                c_writer.add_scalars("loss", {"train_loss": loss.cpu().item()}, mid_var.global_step)
                c_writer.add_scalar("lr", scheduler.get_last_lr()[0], mid_var.global_step)
                # optimizer和scheduler有现有顺序...
                optimizer.step() # 更新优化器
                scheduler.step()# 更新学习速率策略
                optimizer.zero_grad()
                mid_var.global_step += 1
                # 是否打印日志
                if mid_var.global_step % args.log_iter_num == 0:
                    logger.info(f"Epoch: {mid_var.curr_epoch}\t global_step: {mid_var.global_step}\t lr_1: {scheduler.get_last_lr()[0]}\t loss: {loss.cpu().item()}")

                # 每个Epoch评估多少次
                if mid_var.global_step % (len(train_dataloader) / (args.gradient_accumulation_steps * args.eval_count_per_epoch)) == 0:
                    logger.info("开始评估数据...")
                    result = evaluate(args, model, dev_dataset, processor)
                    c_writer.add_scalars("loss", {"dev_loss": result["loss"]}, mid_var.global_step)
                    if not mid_var.metric:
                        mid_var.metric = result

                    if result["total"]["f1"] < mid_var.metric["total"]["f1"]:
                        mid_var.early_stop_count += 1
                        continue
                    mid_var.early_stop_count = 0
                    mid_var.best_model_path = os.path.join(args.model_output_dir, f"checkpoint_{str(mid_var.global_step)}")
                    mid_var.metric = result
                    if args.fp16:
                        save_checkpoint(args, model, tokenizer, optimizer, scheduler, amp, mid_var)
                    else:
                        save_checkpoint(args, model, tokenizer, optimizer, scheduler, None, mid_var)


def save_checkpoint(args, model, tokenizer, optimizer, scheduler, amp, mid_var):
    model_dir = os.path.join(args.model_output_dir, f"checkpoint_{str(mid_var.global_step)}")
    if not os.path.isdir(model_dir):
        os.makedirs(model_dir)

    logger.info("开始保存模型...")
    checkpoint = {
        "optimizer": optimizer.state_dict(),
        "scheduler": scheduler.state_dict(),
        "mid_var": mid_var,
        "args": args
    }
    if amp:
        checkpoint["amp"] = amp.state_dict()
    torch.save(checkpoint, os.path.join(model_dir, "checkpoint.pt"))
    model.save_pretrained(model_dir)
    tokenizer.save_pretrained(model_dir)
    logger.info(f"模型保存成功, 地址为: {model_dir}")


def resume_checkpoint(args, optimizer, scheduler, amp):
    checkpoint = torch.load(os.path.join(args.model_name_or_path, "checkpoint.pt"))
    optimizer.load_state_dict(checkpoint["optimizer"])
    scheduler.load_state_dict(checkpoint["scheduler"])
    if amp:
        amp.load_state_dict(checkpoint["amp"])
    mid_vars = checkpoint["mid_var"]
    return mid_vars


def evaluate(args, model, dev_dataset, processor):
    model.eval()
    sem = SeqEntityMetric(markup=args.markup, processor=processor)
    dev_dataloader = DataLoader(
        dataset=dev_dataset,
        batch_size=args.batch_size,
        collate_fn=collate_fn,
        shuffle=True
    )
    for step, batch in enumerate(tqdm(dev_dataloader, desc="Evaluation: ")):
        batch = tuple(t.to(args.device) for t in batch)
        with torch.no_grad():
            inputs = {
                "input_ids": batch[0],
                "attention_mask": batch[1],
                "token_type_ids": batch[2],
                "labels": batch[3],
            }
            outputs = model(**inputs)
            tmp_eval_loss, logits = outputs
            if args.model_type == "bert_crf":
                tags = model.crf.decode(logits, inputs["attention_mask"])
            elif args.model_type == "bert_softmax":
                tags = np.argmax(logits.cpu().numpy(), axis=2).tolist()
            else:
                raise NotImplementedError
            pred_labels = [tag[1:-1] for tag in tags]
            input_lens = batch[4].cpu().numpy().tolist()
            ground_labels = [label_ids[1: input_lens[idx]-1] for idx, label_ids in enumerate(inputs['labels'].cpu().numpy().tolist())]
            sem.update(ground_labels, pred_labels, tmp_eval_loss.cpu().item())
    metrics = sem.result()
    logger.info("数据评估结果如下: \n" + json.dumps(metrics, indent=2, ensure_ascii=False))
    model.train()
    return metrics


@timer
def predict(args, model, test_dataset, processor, tokenizer):
    model.eval()
    test_dataloader = DataLoader(
        dataset=test_dataset,
        batch_size=1,
        collate_fn=collate_fn,
        shuffle=True
    )
    result = list()

    for step, batch in enumerate(tqdm(test_dataloader, desc="Predicting: ")):
        batch = tuple(t.to(args.device) for t in batch)
        with torch.no_grad():
            inputs = {
                "input_ids": batch[0],
                "attention_mask": batch[1],
                "token_type_ids": batch[2],
            }
            outputs = model(**inputs)
            _, logits = outputs
            if args.model_type == "bert_crf":
                tags = model.crf.decode(logits, inputs["attention_mask"])
            elif args.model_type == "bert_softmax":
                tags = np.argmax(logits.cpu().numpy(), axis=2).tolist()
            else:
                raise NotImplementedError
            pred_labels = [tag[1:-1] for tag in tags][0]
            pred_entities = get_entity(pred_labels, processor.id2label, args.markup)
            classify_entities = gen_classify_entity(pred_entities)
            input_lens = batch[4].cpu().numpy().tolist()[0]
            input_ids = [input_ids[1: input_lens - 1] for idx, input_ids in enumerate(inputs['input_ids'].cpu().numpy().tolist())][0]
            text_a = "".join(tokenizer.convert_ids_to_tokens(input_ids))
            result.append({
                "text": text_a,
                "label": convert_pred2label(classify_entities, text_a)
            })
    with open(os.path.join(args.model_output_dir, "pred.txt"), "w", encoding="utf8") as f:
        for item in result:
            f.write(json.dumps(item, ensure_ascii=False)+"\n")


def arguments_print(args):
    arguments_dict = copy.deepcopy(vars(args))
    arguments_dict.pop("device")
    logger.info("\n"+json.dumps(arguments_dict, ensure_ascii=False))


def main():
    setup_seed()
    args = arguments_handler()
    arguments_print(args)

    tokenizer = BertTokenizer.from_pretrained(args.model_name_or_path)
    processor = ExampleProcessor(args.dataset_dir, args.markup)

    config = BertConfig.from_pretrained(args.model_name_or_path)

    if args.model_type == "bert_crf":
        model = BertCRF.from_pretrained(
            args.model_name_or_path,
            config=config,
            num_labels=len(processor.id2label),
            add_bilstm=args.add_bilstm,
            num_layers=args.num_layers,
            lstm_hidden_size=args.lstm_hidden_size
        )
    elif args.model_type == "bert_softmax":
        model = BertSoftmax.from_pretrained(
            args.model_name_or_path,
            config=config,
            num_labels=len(processor.id2label),
            loss_type=args.loss_type,
            add_bilstm=args.add_bilstm,
            num_layers=args.num_layers,
            lstm_hidden_size=args.lstm_hidden_size
        )
    else:
        raise NotImplementedError(f"{args.model_type}还未实现!")

    model.to(args.device)
    if args.do_train:
        train_dataset = load_and_cache_examples(args, tokenizer, processor, "train", logger)
        dev_dataset = load_and_cache_examples(args, tokenizer, processor, "dev", logger)
        train(args, model, train_dataset, dev_dataset, processor, tokenizer)

    if args.do_dev:
        dev_dataset = load_and_cache_examples(args, tokenizer, processor, "dev", logger)
        evaluate(args, model, dev_dataset, processor)

    if args.do_predict:
        test_dataset = load_and_cache_examples(args, tokenizer, processor, "test", logger)

        predict(args, model, test_dataset, processor, tokenizer)


if __name__ == "__main__":
    main()
