"""
@author：石沙
@date：2020-12-20
@reference：https://www.cnblogs.com/romangao/articles/13052663.html
"""
# 如下导入时为保证训练时的任务流能正常执行
import sys
sys.path.append('..')
import configs.settings as conf
sys.path.extend([conf.MAIN_PATH, conf.SRC_PATH])


from transformers import BertModel, BertConfig, BertTokenizer, BertForSequenceClassification
import torch
from torch.optim import Adam
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
from site_packages.utils.job import DataOp
from configs.settings import PROCESSED_DATA_PATH, SAVED_MODEL_PATH
import os
import pandas as pd


pd.set_option('display.max_columns', 10)

# 限制低频词
MIN_COUNT = 2

# 预训练模型保存路径
BERT_CACHED_PATH = os.path.join(SAVED_MODEL_PATH, 'bert')

# 指定GPU/CPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 预训练模型参数
PRETRAINED_BERT_NAME = BERT_CACHED_PATH # 'bert-base-chinese'


class PreTrainedBert:
    """读取预训练模型所有的文件"""
    tokenizer = BertTokenizer.from_pretrained(PRETRAINED_BERT_NAME)
    config = BertConfig.from_pretrained(PRETRAINED_BERT_NAME)
    config.update({'num_labels': 2})
    # model = BertModel.from_pretrained(PRETRAINED_BERT_NAME)
    model = BertForSequenceClassification.from_pretrained(PRETRAINED_BERT_NAME, config=config)


# 实例化
PRE_BERT = PreTrainedBert()


class BertInputProcessor:

    """Bert输入处理器"""
    # [CLS] 表示开始
    # [SEP] 表示结尾
    CLS = '[CLS]'
    SEP = '[SEP]'
    SPACE = ' '

    def __init__(self, max_len=200):
        """
        :param max_len: 限制最大句子长度
        """
        self.tokenizer = PRE_BERT.tokenizer
        self.max_len = max_len
        self.processors = [
            self.add_mark_at_both_ends,
            self.tokenize,
            self.token2idx,
            self.padding
        ]

    def add_mark_at_both_ends(self, text):
        """
        在句子开始和结尾加标识符

        """
        return self.CLS + self.SPACE + text + self.SPACE + self.SEP

    def tokenize(self, text):
        """
        将句子处理成字符列表
        """
        return self.tokenizer.tokenize(text)

    def token2idx(self, text):
        """将字符转换为索引"""
        return self.tokenizer.convert_tokens_to_ids(text)

    def padding(self, text):
        """对长度不足的句子进行padding操作"""
        return (
            text[:self.max_len]
            if len(text) > self.max_len
            else text + [0] * (self.max_len - len(text))
        )

    def transform_single_text(self, text):
        """对单个text进行一系列变换"""
        for processor in self.processors:
            text = processor(text)
        return text

    def transform(self, texts):
        """对多个text组成的文档集合，逐个进行变换"""
        return np.array(list(map(self.transform_single_text, texts)))


class BertDataLoader:
    """
    创建Bert专用的DataLoader
    """
    def __init__(self, batch_size=16, test_size=0.2, random_state=1, is_stratify=True, num_workers=0):
        """
        :param batch_size: 批次大小，默认16，在本机上超过16会爆内存
        :param test_size: 测试集大小，默认20%
        :param random_state: 随机状态，默认1
        :param is_stratify: 是否分层抽样
        :param num_workers: 进程数
        """
        self.batch_size = batch_size
        self.text_size = test_size
        self.random_state = random_state
        self.is_stratify = is_stratify
        self.intermediates = {}
        self.num_workers = num_workers

    def build_attention_mask(self, X):
        """建立attention mask"""
        attention_mask = [[float(i > 0) for i in seq] for seq in X]
        train_masks, valid_masks, _, _ = train_test_split(
            attention_mask,
            X,
            random_state=self.random_state,
            test_size=self.text_size
        )
        self.intermediates.update(
            {
                'train_masks': torch.tensor(train_masks),
                'valid_masks': torch.tensor(valid_masks)
            }
        )

    def train_test_split(self, X, y):
        """拆分测试集合和验证集"""
        X_train, X_test, y_train, y_test = \
            train_test_split(X, y,
            test_size=0.2,
            random_state=1,
            stratify=y if self.is_stratify else None
        )
        self.intermediates.update(
            {
                'X_train': torch.LongTensor(X_train),
                'X_test': torch.LongTensor(X_test),
                'y_train': torch.tensor(y_train),
                'y_test': torch.tensor(y_test)
            }
        )

    def get_data_loader(self):
        """生成训练DataLoader和验证DataLoader"""
        # 训练集
        train_data = TensorDataset(
            self.intermediates['X_train'],
            self.intermediates['train_masks'],
            self.intermediates['y_train']
        )
        self.train_dataloader = DataLoader(train_data, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers)

        # 验证集
        valid_data = TensorDataset(
            self.intermediates['X_test'],
            self.intermediates['valid_masks'],
            self.intermediates['y_test']
        )
        self.valid_dataloader = DataLoader(valid_data, batch_size=self.batch_size, num_workers=self.num_workers)

    def transform(self, X, y):
        self.build_attention_mask(X)
        self.train_test_split(X, y)
        self.get_data_loader()


def get_adam(model):
    """设置adam参数"""
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'gamma', 'beta']  # 这三个变量不做梯度微调
    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
         'weight_decay_rate': 0.01},
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
         'weight_decay_rate': 0.0},
    ]
    return Adam(optimizer_grouped_parameters, lr=0.0005)


def flat_accuracy(preds, labels):
    """
    :param preds: 预测值
    :param labels: 实际值
    :return: 准确率
    """
    pred_flat = np.argmax(preds, axis=1).flatten()
    label_flat = labels.flatten()
    return np.sum(pred_flat == label_flat) / len(label_flat)


def main():
    # 加载小样本数据
    data = DataOp.load_data('data_train')
    X = list(map(lambda s1, s2: ' '.join(s1 + ['[SEP]'] + s2), data['question1_clean'] , data['question2_clean']))
    y = data['label'].astype(int).values.tolist()
    # print("data['label'].dtypes", data['label'].dtypes)
    # print(data.head())

    # 数据预处理
    input_ids = BertInputProcessor().transform(X)
    data_loader = BertDataLoader(batch_size=8,test_size=0.2, random_state=1, is_stratify=True)
    data_loader.transform(input_ids, y)

    # 加载GPU
    model = PRE_BERT.model.to(device)

    # 本地训练用optimizer会内存不足
    optimizer = get_adam(model)

    # 训练
    epochs = 10
    for epoch in range(epochs):
        print('----第{}轮-----'.format(epoch))
        model.train()
        train_loss_set = []
        for step, batch in enumerate(data_loader.train_dataloader):
            # 激活train模式
            model.train()

            # batch放入GPU
            b_input_ids, b_input_mask, b_labels = [t.to(device) for t in batch]

            # 梯度归零
            optimizer.zero_grad()

            # BertForSequenceClassification第一个位置是Loss，第二个位置是[CLS]的logits
            loss, logits = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)
            train_loss_set.append(loss.item())
            loss.backward()
            optimizer.step()
            # pred = (((logits >= 0.5) * 1) == b_labels).numpy()
            print("logits:", logits)
            if step % 50 == 0:
                print("epoch:{}, step{}, Train loss: {}".format(epoch, step, np.mean(train_loss_set))) #  np.sum(pred) / len(pred)
        print("Finally, epoch:{}, Train loss: {}".format(epoch,np.mean(train_loss_set)))

    # 保存模型
    torch.save(model, os.path.join(SAVED_MODEL_PATH, 'bert_classifier.pkl'))

    # 模型评估
    model.eval()
    eval_loss, eval_accuracy = 0, 0
    nb_eval_steps, nb_eval_examples = 0, 0
    for batch in data_loader.valid_dataloader:
        b_input_ids, b_input_mask, b_labels = [t.to(device) for t in batch]
        with torch.no_grad():
            logits = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)[0]

        logits = logits.detach().cpu().numpy()
        label_ids = b_labels.to('cpu').numpy()
        tmp_eval_accuracy = flat_accuracy(logits, label_ids)
        eval_accuracy += tmp_eval_accuracy
        nb_eval_steps += 1
    print("Validation Accuracy: {}".format(eval_accuracy / nb_eval_steps))

    # 保存模型
    torch.save(model, os.path.join(SAVED_MODEL_PATH, 'bert_classifier.pkl'))


if __name__ == '__main__':
    main()

