#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : TorchBERTFGM.py
# @Author: Richard Chiming Xu
# @Date  : 2022/2/8
# @Desc  :

'''
    基于bert的裸跑
    1. 直接引用bert裸跑
    2. 添加数据增强
    3. 添加对抗训练
'''

# pgd对抗训练
class PGD:
    def __init__(self, model):
        self.model = model
        self.emb_backup = {}
        self.grad_backup = {}

    def attack(self, epsilon=1., alpha=0.3, emb_name='word_embeddings', is_first_attack=False):
        # emb_name这个参数要换成你模型中embedding的参数名
        for name, param in self.model.named_parameters():
            if param.requires_grad and emb_name in name:
                if is_first_attack:
                    self.emb_backup[name] = param.data.clone()
                norm = torch.norm(param.grad)
                if norm != 0 and not torch.isnan(norm):
                    r_at = alpha * param.grad / norm
                    param.data.add_(r_at)
                    param.data = self.project(name, param.data, epsilon)

    def restore(self, emb_name='word_embeddings'):
        # emb_name这个参数要换成你模型中embedding的参数名
        for name, param in self.model.named_parameters():
            if param.requires_grad and emb_name in name:
                assert name in self.emb_backup
                param.data = self.emb_backup[name]
        self.emb_backup = {}

    def project(self, param_name, param_data, epsilon):
        r = param_data - self.emb_backup[param_name]
        if torch.norm(r) > epsilon:
            r = epsilon * r / torch.norm(r)
        return self.emb_backup[param_name] + r

    def backup_grad(self):
        for name, param in self.model.named_parameters():
            if param.requires_grad and param.grad is not None:
                self.grad_backup[name] = param.grad.clone()

    def restore_grad(self):
        for name, param in self.model.named_parameters():
            if param.requires_grad and param.grad is not None:
                param.grad = self.grad_backup[name]


# fgm对抗训练
class FGM:
    def __init__(self, model):
        self.model = model
        self.backup = {}

    def attack(self, epsilon=0.1, emb_name='word_embeddings'):
        # emb_name这个参数要换成你模型中embedding的参数名
        # 拿出原始的模型参数
        for name, param in self.model.named_parameters():
            if param.requires_grad and emb_name in name:
                print(param)
        for name, param in self.model.named_parameters():
            if param.requires_grad and emb_name in name:
                # 把原视的参数进行备份
                self.backup[name] = param.data.clone()
                norm = torch.norm(param.grad)
                if norm != 0 and not torch.isnan(norm):
                    # 计算x+r的前向loss，反向传播得到梯度，然后累加到原来的梯度上
                    r_at = epsilon * param.grad / norm
                    param.data.add_(r_at)
        for name, param in self.model.named_parameters():
            if param.requires_grad and emb_name in name:
                print(param)

    def restore(self, emb_name='word_embeddings'):
        # emb_name这个参数要换成你模型中embedding的参数名
        for name, param in self.model.named_parameters():
            if param.requires_grad and emb_name in name:
                assert name in self.backup
                param.data = self.backup[name]
        self.backup = {}


# 数据增强
def aug_group_by_a(df):
    aug_data = defaultdict(list)
    # 以text_a中的句子为 a
    for g, data in df.groupby(by=['text_a']):
        if len(data) < 2:
            continue
        for i in range(len(data)):
            for j in range(i + 1, len(data)):
                # 取出b的值，a,b的label
                row_i_text = data.iloc[i, 1]
                row_i_label = data.iloc[i, 2]

                # 取出c的值，a,c的label
                row_j_text = data.iloc[j, 1]
                row_j_label = data.iloc[j, 2]

                if row_i_label == row_j_label == 0:
                    continue

                aug_label = 1 if row_i_label == row_j_label == 1 else 0

                aug_data['text_a'].append(row_i_text)
                aug_data['text_b'].append(row_j_text)
                aug_data['label'].append(aug_label)
    return pd.DataFrame(aug_data)


import numpy as np
import pandas as pd
from tqdm import tqdm

import torch

from transformers import AutoTokenizer, PreTrainedTokenizer, BertTokenizer
from collections import defaultdict


class Config:
    # 数据加载部分
    dataset = 'paws-x'
    max_seq_len = 64  # 句子长度
    need_data_aug = True  # 使用数据增强
    # 模型部分
    model_path = 'hfl/chinese-bert-wwm-ext'  # 本地模型路径
    tokenizer = None  # tokenizer对象
    load_model = False  # 是否加载已有模型预测
    save_model = True  # 是否保存训练好的模型
    # 训练部分
    device = 'cpu'
    learning_rate = 1e-5
    batch_size = 2  # batch大小
    epochs = 8  # 训练次数
    print_loss = 200  # 打印loss次数
    num_labels = 2  # 分类数
    adv = 'fgm' # 对抗训练
    eps = 0.1  # pgd需要的参数
    alpha = 0.3  # 对抗模型需要的参数


# 读取数据
def read_data(config: Config):
    # train = pd.read_csv('../input/qianyan-textsim/' + config.dataset + '_train.tsv', sep='\t',
    #                     names=['text_a', 'text_b', 'label'])
    # dev = pd.read_csv('../input/qianyan-textsim/' + config.dataset + '_dev.tsv', sep='\t',
    #                   names=['text_a', 'text_b', 'label'])
    # test = pd.read_csv('../input/qianyan-textsim/' + config.dataset + '_test.tsv', sep='\t', names=['text_a', 'text_b'])
    train = pd.read_csv('data/'+config.dataset+'/' + config.dataset + '_train.tsv', sep='\t',
                        names=['text_a', 'text_b', 'label'])
    dev = pd.read_csv('data/'+config.dataset+'/' + config.dataset + '_dev.tsv', sep='\t',
                      names=['text_a', 'text_b', 'label'])
    test = pd.read_csv('data/'+config.dataset+'/' + config.dataset + '_test.tsv', sep='\t', names=['text_a', 'text_b'])

    if len(set(train['label'])) > 2:
        train = train[train['label'].isin(['0', '1'])]
        train['label'] = train['label'].astype('int')
    train = train.dropna()

    if len(set(train['label'])) > 2:
        dev = dev[dev['label'].isin(['0', '1'])]
        dev['label'] = dev['label'].astype('int')
    dev = dev.dropna()
    test['label'] = 0

    # 数据增强，加大训练集数据量
    if config.need_data_aug is True:
        aug_train = aug_group_by_a(train)
        aug_dev = aug_group_by_a(dev)
        # 拼接数据
        train = pd.concat([train, aug_train, aug_dev])

    # tokenizer
    tokenizer = config.tokenizer
    data_df = {'train': train, 'dev': dev, 'test': test}
    full_data_dict = {}
    for k, df in data_df.items():
        inputs = defaultdict(list)
        for i, row in tqdm(df.iterrows(), desc='encode {} data'.format(k), total=len(df)):
            seq_a = row[0]
            seq_b = row[1]
            label = row[2]
            try:
                inputs_dict = tokenizer.encode_plus(seq_a, seq_b, add_special_tokens=True, return_token_type_ids=True,
                                                    return_attention_mask=True)
            except TypeError as ex:
                print(row)
            inputs['input_ids'].append(inputs_dict['input_ids'])
            inputs['token_type_ids'].append(inputs_dict['token_type_ids'])
            inputs['attention_mask'].append(inputs_dict['attention_mask'])
            inputs['labels'].append(label)
        full_data_dict[k] = inputs

    return full_data_dict['train'], full_data_dict['dev'], full_data_dict['test']


from torch.utils.data import DataLoader, Dataset


class SimDataset(Dataset):
    def __init__(self, data_dict):
        super(SimDataset, self).__init__()
        self.input_ids = data_dict['input_ids']
        self.token_type_ids = data_dict['token_type_ids']
        self.attention_mask = data_dict['attention_mask']
        self.labels = data_dict['labels']
        self.len = len(self.input_ids)

    def __getitem__(self, index):
        data = (self.input_ids[index],
                self.token_type_ids[index],
                self.attention_mask[index],
                self.labels[index])

        return data

    def __len__(self):
        return self.len


# 统一处理数据
class Collator:

    def __init__(self, tokenizer, max_seq_len):
        self.tokenizer = tokenizer
        self.max_seq_len = max_seq_len

    def pad(self, input_ids_list, token_type_ids_list, attention_mask_list, labels_list, max_seq_len):
        # 初始化填充长度
        input_ids = torch.zeros((len(input_ids_list), max_seq_len), dtype=torch.long)
        token_type_ids = torch.zeros_like(input_ids)
        attention_mask = torch.zeros_like(input_ids)
        # 遍历获取输入
        for i in range(len(input_ids_list)):
            seq_len = len(input_ids_list[i])

            if seq_len < max_seq_len:  # 如果小于最大长度
                input_ids[i, :seq_len] = torch.tensor(input_ids_list[i], dtype=torch.long)
                token_type_ids[i, :seq_len] = torch.tensor(token_type_ids_list[i], dtype=torch.long)
                attention_mask[i, :seq_len] = torch.tensor(attention_mask_list[i], dtype=torch.long)
            else:  # 如果大于或等于
                # 最后一位加上tokenizer的特殊占位
                input_ids[i] = torch.tensor(
                    input_ids_list[i][:max_seq_len - 1] + [self.tokenizer.sep_token_id], dtype=torch.long)
                token_type_ids[i] = torch.tensor(
                    token_type_ids_list[i][:max_seq_len], dtype=torch.long)
                attention_mask[i] = torch.tensor(
                    attention_mask_list[i][:max_seq_len], dtype=torch.long)
        # 格式化输出
        labels = torch.tensor([[label] for label in labels_list], dtype=torch.long)

        return input_ids, token_type_ids, attention_mask, labels

    def __call__(self, examples):
        # 获取数据
        input_ids_list, token_type_ids_list, attention_mask_list, labels_list = list(zip(*examples))
        # 求句子最大长度
        cur_seq_len = max([len(ids) for ids in input_ids_list])  # 当前数据最大长度
        max_seq_len = min(cur_seq_len, self.max_seq_len)  # 最大长度
        # 填充句子
        input_ids, token_type_ids, attention_mask, labels = self.pad(input_ids_list, token_type_ids_list,
                                                                     attention_mask_list, labels_list, max_seq_len)
        # 返回结果
        data = {
            'input_ids': input_ids,
            'token_type_ids': token_type_ids,
            'attention_mask': attention_mask,
            'labels': labels,
        }
        return data


def create_dataloader(config: Config):
    # 读取数据
    train, dev, test = read_data(config)
    # 构建dataset
    train_dataset = SimDataset(train)
    dev_dataset = SimDataset(dev)
    test_dataset = SimDataset(test)
    # 构建dataloader
    collate_fn = Collator(config.tokenizer, config.max_seq_len)
    train_dataloader = DataLoader(train_dataset, batch_size=config.batch_size, collate_fn=collate_fn, shuffle=True,
                                  num_workers=0)
    dev_dataloader = DataLoader(dev_dataset, batch_size=config.batch_size, collate_fn=collate_fn, shuffle=True,
                                num_workers=0)
    test_dataloader = DataLoader(test_dataset, batch_size=config.batch_size, collate_fn=collate_fn, shuffle=False,
                                 num_workers=0)
    return train_dataloader, dev_dataloader, test_dataloader


from torch import nn
from transformers import AutoModelForSequenceClassification, BertForNextSentencePrediction, AdamW
from sklearn.metrics import f1_score, accuracy_score


# 校验
def evaluation(config, model, val_dataloader):
    model.eval()
    preds = []
    labels = []
    val_loss = 0.
    # val_iterator = tqdm(val_dataloader, desc='Evaluation', total=len(val_dataloader))
    with torch.no_grad():
        for mini_batch in val_dataloader:
            batch_cuda = {item: value.to(config.device) for item, value in mini_batch.items()}
            labels += batch_cuda['labels'].view(-1)
            # 获取数据
            result = model(**batch_cuda)
            loss = result[0]
            logits = result[1]
            # 返回逻辑值最大的位置，要么0，要么1
            _, indices = torch.max(logits, dim=1)
            preds += indices

            val_loss += loss.item()

    avg_val_loss = val_loss / len(val_dataloader)
    labels = torch.tensor(labels).numpy()
    preds = torch.tensor(preds).numpy()
    f1 = f1_score(labels, preds, average='macro')
    # -----------new ----------------#
    acc = accuracy_score(labels, preds)
    # -----------new ----------------#
    return avg_val_loss, f1, acc


def predict(config, model, test_dataloader):
    predict_labels = []
    # 创建dataloader
    model.eval()
    with torch.no_grad():
        for mini_batch in test_dataloader:
            batch_cuda = {item: value.to(config.device) for item, value in mini_batch.items()}
            # 获取数据
            result = model(**batch_cuda)
            logits = result[1]
            _, indices = torch.max(logits, dim=1)
            predict_labels += indices
    return torch.tensor(predict_labels).numpy()


def train(config: Config, train_dataloader: DataLoader, dev_dataloader: DataLoader):
    # 创建模型
    model = AutoModelForSequenceClassification.from_pretrained(config.model_path, num_labels=config.num_labels)
    # model = BertForNextSentencePrediction.from_pretrained('bert-base-chinese')

    model.to(config.device)
    # 定义优化器
    opt = AdamW(lr=config.learning_rate, params=model.parameters())
    # 定义损失函数
    loss_fn = nn.CrossEntropyLoss()  # 遍历epoch，开始训练

    # 初始化对抗训练的参数
    if config.adv == 'fgm':
        fgm = FGM(model)
    else:
        pgd = PGD(model)
        K = 3


    # 遍历训练次数训练
    for epoch in range(config.epochs):
        model.train()
        for iter_id, mini_batch in enumerate(train_dataloader):
            batch_cuda = {item: value.to(config.device) for item, value in mini_batch.items()}

            result = model(**batch_cuda)
            loss = result[0]
            logits = result[1]

            _, indices = torch.max(logits, dim=1)
            correct = torch.sum(indices == batch_cuda['labels'].view(-1))

            # 初始化梯度为0
            model.zero_grad()
            # 反向传播
            loss.backward()


            #------------------ 对抗训练 ------------------#
            if config.adv == 'fgm':
                # 计算x+r的前向loss, 反向传播得到梯度，然后累加到(1)的梯度上；
                fgm.attack(epsilon=config.eps)
                # 计算x+r的前向loss
                loss_adv = model(**batch_cuda)[0]
                # 反向传播得到梯度，然后累加到(1)的梯度上；
                loss_adv.backward()
                # 将embedding恢复为（1）时的embedding；
                fgm.restore()
            elif config.adv == 'pgd':
                pgd.backup_grad()
                for t in range(K):
                    # 根据embedding矩阵计算的梯度计算出r, 并加到当前embedding上，相当于x + r
                    pgd.attack(epsilon=config.eps, alpha=config.alpha, is_first_attack=(t == 0))
                    if t != K - 1:
                        # t如果不是最后一步，将梯度归0， 根据2的x + r计算前后向并得到梯度
                        model.zero_grad()
                    else:
                        # t是最后一步，恢复1的梯度，计算最后的x + r并将梯度累加到(1)
                        pgd.restore_grad()
                    loss_adv = model(**batch_cuda)[0]
                    loss_adv.backward()
                #将embedding恢复
                pgd.restore()

            # ------------------ 对抗训练 ------------------#

            # 更新参数
            opt.step()
            # 打印模型性能
            if iter_id % config.print_loss == 0:
                print('epoch:{}, iter_id:{}, loss:{}, acc:{}'.format(epoch, iter_id, loss,
                                                                     correct.item() * 1.0 / len(batch_cuda['labels'])))
        # 运行完一个epoch验证机校验
        avg_val_loss, f1, acc = evaluation(config, model, dev_dataloader)
        print('-' * 50)
        print('epoch: {}, val_loss: {}, val_f1: {}, val_acc: {}'.format(epoch, avg_val_loss, f1, acc))
        print('-' * 50)
    return model


import os

os.environ['CUDA_LAUNCH_BLOCKING'] = '1'

data_list = ['paws-x', 'bq_corpus', 'lcqmc']
for data in data_list:
    conf = Config()
    conf.device = 'cuda' if torch.cuda.is_available() else 'cpu'
    conf.dataset = data

    conf.tokenizer = AutoTokenizer.from_pretrained(conf.model_path)
    # 读取数据
    train_dataloader, dev_dataloader, test_dataloader = create_dataloader(conf)
    # 训练
    model = train(conf, train_dataloader, dev_dataloader)
    # 推理模型
    predict_labels = predict(conf, model, test_dataloader)
    # 保存结果
    test_df = pd.DataFrame(predict_labels, columns=['prediction'])
    test_df['index'] = test_df.index
    print(test_df)
    test_df.to_csv(conf.dataset + '.tsv', index=False, columns=['index', 'prediction'], sep='\t')
    print('保存结果成功')
