# -*- coding:utf-8 -*-
# @project: GPT2-Medical-QA
# @filename: train.py
# @author: 闫广庆 赵梦媛
# @contact: ygq624576166@163.com
# @time: 2020年12月21日 21:10:39
"""
    文件说明：
    通过新闻正文生成新闻标题的GPT2模型的训练文件
"""
import json
import logging
import os
import random
from typing import List, Dict

import numpy as np
import pandas as pd
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data import Dataset
from tqdm import tqdm, trange
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import BertTokenizer
from transformers.modeling_gpt2 import GPT2Config

from image.data_set_sample import collate_func
from model import GPT2LMHeadModel
from utils import convert_feature

try:
    from torch.utils.tensorboard import SummaryWriter
except ImportError:
    from tensorboardX import SummaryWriter

logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
                    datefmt='%m/%d/%Y %H:%M:%S',
                    level=logging.INFO)
logger = logging.getLogger(__name__)


def train(model, device, train_data, test_data, train_batch_size, test_batch_size, gradient_accumulation_steps=None,
          num_train_epochs=None, learning_rate=None, adam_epsilon=None, warmup_proportion=None, logging_steps=None,
          max_grad_norm=None, eval_steps=None, output_dir=None):
    """
    训练模型
    Args:
        model: 模型
        device: 设备信息
        train_data: 训练数据类
        test_data: 测试数据类
        args: 训练参数配置信息

    Returns:
    :param output_dir:
    :param eval_steps:
    :param max_grad_norm:
    :param logging_steps:
    :param warmup_proportion:
    :param adam_epsilon:
    :param learning_rate:
    :param num_train_epochs:
    :param gradient_accumulation_steps:
    :param train_batch_size:

    """
    tb_write = SummaryWriter()
    if gradient_accumulation_steps < 1:
        raise ValueError("gradient_accumulation_steps参数无效，必须大于等于1")
    # 计算真实的训练batch_size大小
    train_batch_size = int(train_batch_size / gradient_accumulation_steps)
    train_sampler = RandomSampler(train_data)
    train_data_loader = DataLoader(train_data, sampler=train_sampler,
                                   batch_size=train_batch_size, collate_fn=collate_func)
    total_steps = int(len(train_data_loader) * num_train_epochs / gradient_accumulation_steps)
    logger.info("总训练步数为:{}".format(total_steps))
    model.to(device)
    # 获取模型所有参数
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if not any(
            nd in n for nd in no_decay)], 'weight_decay': 0.01},
        {'params': [p for n, p in param_optimizer if any(
            nd in n for nd in no_decay)], 'weight_decay': 0.0}
    ]
    # 设置优化器
    optimizer = AdamW(optimizer_grouped_parameters,
                      lr=learning_rate, eps=adam_epsilon)
    scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(warmup_proportion * total_steps),
                                                num_training_steps=total_steps)
    # 清空cuda缓存
    torch.cuda.empty_cache()
    # 将模型调至训练状态
    model.train()
    title_id = train_data.title_id
    tr_loss, logging_loss, min_loss = 0.0, 0.0, 0.0
    global_step = 0
    # 开始训练模型
    for _ in trange(0, int(num_train_epochs), desc="Epoch", disable=False):
        iter_bar = tqdm(train_data_loader, desc="Iter (loss=X.XXX)", disable=False)
        for step, batch in enumerate(iter_bar):
            input_ids = batch["input_ids"].to(device)
            token_type_ids = batch["token_type_ids"].to(device)
            # 获取训练结果
            outputs = model.forward(input_ids=input_ids, token_type_ids=token_type_ids, labels=input_ids,
                                    title_id=title_id)
            loss = outputs[0]
            tr_loss += loss.item()
            # 将损失值放到Iter中，方便观察
            iter_bar.set_description("Iter (loss=%5.3f)" % loss.item())
            # 判断是否进行梯度累积，如果进行，则将损失值除以累积步数
            if gradient_accumulation_steps > 1:
                loss = loss / gradient_accumulation_steps
            # 损失进行回传
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
            # 当训练步数整除累积步数时，进行参数优化
            if (step + 1) % gradient_accumulation_steps == 0:
                optimizer.step()
                scheduler.step()
                optimizer.zero_grad()
                global_step += 1
                # 如果步数整除logging_steps，则记录学习率和训练集损失值
                if logging_steps > 0 and global_step % logging_steps == 0:
                    tb_write.add_scalar("lr", scheduler.get_lr()[0], global_step)
                    tb_write.add_scalar("train_loss", (tr_loss - logging_loss) /
                                        (logging_steps * gradient_accumulation_steps), global_step)
                    logging_loss = tr_loss
                # 如果步数整除eval_steps，则进行模型测试，记录测试集的损失
                if eval_steps > 0 and global_step % eval_steps == 0:
                    eval_loss = evaluate(model, device, test_data, test_batch_size)
                    tb_write.add_scalar("test_loss", eval_loss, global_step)
                    model.train()
        # 每个epoch进行完，则保存模型
        output_dir_save = os.path.join(output_dir, "checkpoint-{}".format(global_step))
        model_to_save = model.module if hasattr(model, "module") else model
        model_to_save.save_pretrained(output_dir_save)
        # 清空cuda缓存
        torch.cuda.empty_cache()


def evaluate(model, device, test_data, test_batch_size=8):
    """
    对测试数据集进行模型测试
    Args:
        model: 模型
        device: 设备信息
        test_data: 测试数据类
        test_batch_size: 测试同批最大数量

    Returns:
    :param

    """
    # 构造测试集的DataLoader
    test_sampler = SequentialSampler(test_data)
    test_data_loader = DataLoader(test_data, sampler=test_sampler,
                                  batch_size=test_batch_size, collate_fn=collate_func)
    iter_bar = tqdm(test_data_loader, desc="iter", disable=False)
    title_id = test_data.title_id
    total_loss, total = 0.0, 0.0
    # 进行测试
    for step, batch in enumerate(iter_bar):
        # 模型设为eval
        if not batch:
            continue
        model.eval()
        with torch.no_grad():
            input_ids = batch["input_ids"].to(device)
            token_type_ids = batch["token_type_ids"].to(device)
            # 获取预测结果
            outputs = model.forward(input_ids=input_ids, token_type_ids=token_type_ids, labels=input_ids,
                                    title_id=title_id)
            loss = outputs[0]
            loss = loss.item()
            # 对loss进行累加
            total_loss += loss * len(batch["input_ids"])
            total += len(batch["input_ids"])
    # 计算最终测试集的loss结果
    test_loss = total_loss / total
    return test_loss


def get_data_from_file(path_file=None, tokenizer=None, data_dir=None, data_set_name=None, max_len=None,
                       is_overwrite=None):
    """
    获取数据
    :param is_overwrite:
    :param max_len:
    :param data_set_name:
    :param data_dir:
    :param path_file:
    :param tokenizer:
    :return:
    """
    cached_feature_file = os.path.join(data_dir, "cached_{}_{}".format(data_set_name, max_len))
    # 判断缓存文件是否存在，如果存在，则直接加载处理后数据
    if os.path.exists(cached_feature_file) and not is_overwrite:
        logger.info("已经存在缓存文件{}，直接加载".format(cached_feature_file))
        data_set = torch.load(cached_feature_file)["data_set"]
        return data_set
    data_set = []
    with open(path_file, "r", encoding="utf-8") as fh:
        data = json.load(fh)
        for idx, sample in enumerate(tqdm(data, desc="iter", disable=False)):
            # 使用convert_feature函数，对新闻正文和标题进行索引化，生成模型所需数据格式
            # 格式为{"content": content, "title": title}
            input_ids, token_type_ids = convert_feature(sample, tokenizer=tokenizer)
            data_set.append({"input_ids": input_ids, "token_type_ids": token_type_ids})

    torch.save({"data_set": data_set}, cached_feature_file)
    return data_set


class GPT2MedicalQADataSet(Dataset):
    def __init__(self, tokenizer, max_len, data_dir, data_set_name, data: List[Dict] = None, is_overwrite=False):
        """
        初始化函数
        Args:
            tokenizer: 文本特征转换器
            max_len: 数据的最大长度
            data_dir: 保存缓存文件的路径
            data_set_name: 数据集名字
            is_overwrite: 是否重新生成缓存文件
        """
        self.tokenizer = tokenizer
        # content_id和title_id分别对应新闻的正文和标题，为了在模型中区分的更明显
        self.content_id = self.tokenizer.convert_tokens_to_ids("[Content]")
        self.title_id = self.tokenizer.convert_tokens_to_ids("[Title]")
        # space_id表示空格标记，由于一些标题中带有空格，如果直接使用tokenizer进行分词，会导致空格消失，会显得标题很奇怪
        # 但是又不方便同一替换成任意一个标点，因此将其用[Space]替换。
        self.space_id = self.tokenizer.convert_tokens_to_ids("[Space]")
        self.max_len = max_len
        cached_feature_file = os.path.join(data_dir, "cached_{}_{}".format(data_set_name, max_len))
        # 判断缓存文件是否存在，如果存在，则直接加载处理后数据
        if os.path.exists(cached_feature_file) and not is_overwrite:
            logger.info("已经存在缓存文件{}，直接加载".format(cached_feature_file))
            self.data_set = torch.load(cached_feature_file)["data_set"]
        # 如果缓存数据不存在，则对原始数据进行数据处理操作，并将处理后的数据存成缓存文件
        else:
            logger.info("不存在缓存文件{}，进行数据预处理操作".format(cached_feature_file))
            self.data_set = self.get_data_from_list(data=data, tokenizer=tokenizer, max_len=max_len)
            logger.info("数据预处理操作完成，将处理后的数据存到{}中，作为缓存文件".format(cached_feature_file))
            # 保存数据到cached_feature_file文件下
            torch.save({"data_set": self.data_set}, cached_feature_file)

    def get_data_from_list(self, data: List[Dict] = None, tokenizer=None, max_len=None):

        """
        获取数据
        :param data: 集合形式的文本对 {"content": content, "title": title}
        :param max_len: 最大序列长度
        :param tokenizer:
        :return:
        """
        data_set = []

        for idx, sample in enumerate(tqdm(data, desc="iter", disable=False)):
            # 使用convert_feature函数，对新闻正文和标题进行索引化，生成模型所需数据格式
            # 格式为{"content": content, "title": title}
            input_ids, token_type_ids = convert_feature(sample, tokenizer=tokenizer, max_len=max_len)
            data_set.append({"input_ids": input_ids, "token_type_ids": token_type_ids})

        # torch.save({"data_set": data_set}, cached_feature_file)
        return data_set

    def __len__(self):
        return len(self.data_set)

    def __getitem__(self, idx):
        instance = self.data_set[idx]
        return instance


def get_train_test(file_path, max_len=None):
    """

    :param max_len: 文本序列最大长度
    :param file_path:
    :return:
    """

    qcc_news = json.load(open(file_path,"r",encoding="utf-8")).values.tolist()
    qcc_train = []
    qcc_test = []
    set_data = []
    for index, qcc_data in enumerate(qcc_news):
        # {"content": content, "title": title}
        if isinstance(qcc_data[1], str) and isinstance(qcc_data[0], str) and len(qcc_data[0]) > 10 and len(
                qcc_data[1]) > 10:

            qcc_one = {"content": qcc_data[1][:max_len], "title": qcc_data[0][:max_len], "department": qcc_data[2]}
            if qcc_one not in set_data:
                set_data.append(qcc_one)
                if index % 5 != 1:
                    qcc_train.append(qcc_one)
                else:
                    qcc_test.append(qcc_one)
    return qcc_train, qcc_test


def main():
    # 设置模型训练参数
    # 设置显卡信息
    max_len = 256
    device = "2"
    pre_trained_model_path = None
    seed = 2020
    vocab_path = "../vocab/vocab.txt"
    config_path = "../config/config.json"
    output_dir = "儿科_questions_medical_gpt2"
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICE"] = device
    # 获取device信息，用于模型训练
    device = torch.device("cuda" if torch.cuda.is_available() and int(device) >= 0 else "cpu")
    # 设置随机种子，方便模型复现
    if seed:
        torch.manual_seed(seed)
        random.seed(seed)
        np.random.seed(seed)
    # 加载模型的config
    model_config = GPT2Config.from_json_file(config_path)
    # 实例化GPT2LMHeadModel模型，这里我们没有加载预训练好的模型，而是直接从头开始训练。
    # 为什么从头开始训练？我们采用的是小模型，只有6层，并且词表也做了修改，没有找到合适的预训练模型。（其实是，穷人，卡不行。）
    # 判断是否使用预训练好的GPT2模型
    model = GPT2LMHeadModel(config=model_config)
    # model = GPT2LMHeadModel(config=model_config)
    # 实例化tokenizer
    tokenizer = BertTokenizer.from_pretrained(vocab_path, do_lower_case=True)
    # 将[space]作为一个分割整体，例如："我爱[Space]中国。"，使用原始tokenizer分词结果为"['我', '爱', '[', 'Space', ']', '中', '国', '。']";
    # 增加分割符号后的结果为"['我', '爱', '[Space]', '中', '国', '。']"
    tokenizer.add_tokens("[Space]", special_tokens=True)
    # 创建模型的输出目录
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)
    if os.path.exists("../data/ner/clean_medical_ner_entities_train.json") and os.path.exists(
            "../data/ner/clean_medical_ner_entities_test.json"):
        qcc_train = json.load(open("../data/ner/clean_medical_ner_entities_train.json", "r", encoding="utf-8"))
        qcc_test = json.load(open("../data/ner/clean_medical_ner_entities_test.json", "r", encoding="utf-8"))
    else:
        qcc_train, qcc_test = get_train_test("../data/ner/clean_medical_ner_entities_1229.json")
        json.dump(qcc_train, open("../data/ner/clean_medical_ner_entities_train.json", "w", encoding="utf-8"),
                  ensure_ascii=False,
                  indent=2)
        json.dump(qcc_test, open("../data/ner/clean_medical_ner_entities_test.json", "w", encoding="utf-8"),
                  ensure_ascii=False,
                  indent=2)
    # 加载训练数据和测试数据
    medical_train_data = GPT2MedicalQADataSet(data=qcc_train, tokenizer=tokenizer, max_len=max_len, data_dir=output_dir,
                                              data_set_name="train", is_overwrite=False)
    medical_test_data = GPT2MedicalQADataSet(data=qcc_test, tokenizer=tokenizer, max_len=max_len, data_dir=output_dir,
                                             data_set_name="test", is_overwrite=False)
    # 开始训练
    train(model, device, medical_train_data, medical_test_data,
          train_batch_size=4,
          test_batch_size=4,
          gradient_accumulation_steps=4,
          num_train_epochs=5, learning_rate=1e-4, adam_epsilon=1e-8, warmup_proportion=0.1, logging_steps=20,
          max_grad_norm=1.0, eval_steps=100, output_dir=output_dir)


if __name__ == '__main__':
    main()
