#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : PaddleUDA.py
# @Author: Richard Chiming Xu
# @Date  : 2022/1/25
# @Desc  : 基于paddle写的uda数据增强


import numpy as np
import pandas as pd
from tqdm import tqdm

from collections import defaultdict
import paddle
from paddle import nn

from dataprocess import Exchange

import pickle


# 保存pickle
def dump_pickle(obj, file_path):
    with open(file_path, 'wb') as f:
        pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)


# 加载pickle
def load_pickle(file_path):
    with open(file_path, 'rb') as f:
        return pickle.load(f)


class Config:
    # 数据加载部分
    dataset = 'paws-x'  # 数据集
    need_data_aug = False  # 是否使用数据增强
    max_seq_len = 64  # 句子最大长度
    load_data = True # 加载处理好的数据
    # 模型部分
    tokenizer = None  # 分词器
    model_path = None  # 模型

    # 训练部分
    batch_size = 16   # 批大小
    unsup_data_ratio = 1.5  # 非监督学习批比例，unsup_batch_size = batch_size*unsup_data_ratio
    uda_softmax_temp = 0.4  # softmax比例
    uda_confidence_threshold = 0.8  # 阈值
    learning_rate = 1e-4  # 学习率
    weight_decay = 0.01  # 衰减
    epochs = 5  # 训练次数
    num_labels = 2 # 分类数
    print_loss = 10 # 打印训练参数


# 准备数据
def parse_data(config: Config):
    train = pd.read_csv('data/' + config.dataset + '/train.tsv', sep='\t', names=['text_a', 'text_b', 'label'])
    dev = pd.read_csv('data/' + config.dataset + '/dev.tsv', sep='\t', names=['text_a', 'text_b', 'label'])
    test = pd.read_csv('data/' + config.dataset + '/test.tsv', sep='\t', names=['text_a', 'text_b'])

    if len(set(train['label'])) > 2:
        train = train[train['label'].isin(['0', '1'])]
        train['label'] = train['label'].astype('int')
    train = train.dropna()

    if len(set(train['label'])) > 2:
        dev = dev[dev['label'].isin(['0', '1'])]
        dev['label'] = dev['label'].astype('int')
    dev = dev.dropna()
    test['label'] = 0

    # 数据增强，加大训练集数据量
    if config.need_data_aug is True:
        aug_train = Exchange.aug_group_by_a(train)
        aug_dev = Exchange.aug_group_by_a(dev)
        # 拼接数据
        train = pd.concat([train, aug_train, aug_dev])

    return train, dev, test


# 构建无监督部分输入
def build_unsup_bert_inputs(inputs, label, sentence_a, sentence_b, tokenizer):
    # test_a - test_b 的encode
    lr_inputs_dict = tokenizer.encode(sentence_a, sentence_b,
                                      return_special_tokens_mask=True,
                                      return_token_type_ids=True,
                                      return_attention_mask=True)
    # test_b - test_a 的encode
    rl_inputs_dict = tokenizer.encode(sentence_b, sentence_a,
                                      return_special_tokens_mask=True,
                                      return_token_type_ids=True,
                                      return_attention_mask=True)
    inputs['input_ids'].append((lr_inputs_dict['input_ids'], rl_inputs_dict['input_ids']))
    inputs['token_type_ids'].append((lr_inputs_dict['token_type_ids'], rl_inputs_dict['token_type_ids']))
    inputs['attention_mask'].append((lr_inputs_dict['attention_mask'], rl_inputs_dict['attention_mask']))
    inputs['labels'].append(label)


# 构建监督部分输入
def build_bert_inputs(inputs, label, sentence_a, sentence_b, tokenizer):
    inputs_dict = tokenizer.encode(sentence_a, sentence_b,
                                   return_special_tokens_mask=True,
                                   return_token_type_ids=True,
                                   return_attention_mask=True)
    inputs['input_ids'].append(inputs_dict['input_ids'])
    inputs['token_type_ids'].append(inputs_dict['token_type_ids'])
    inputs['attention_mask'].append(inputs_dict['attention_mask'])
    inputs['labels'].append(label)


from collections import defaultdict


# 读取数据
def read_data(config):
    # 获取分词器
    tokenizer = config.tokenizer
    # 读取原数据
    train_df, dev_df, test_df = parse_data(config)
    data_df = {'train': train_df, 'dev': dev_df, 'test': test_df}

    processed_data = {}

    unsup_data = defaultdict(list)
    # 遍历数据集转换数据
    for data_type, df in data_df.items():
        inputs = defaultdict(list)
        for i, row in tqdm(df.iterrows(), desc='Preprocessing {} data'.format(data_type), total=len(df)):
            sentence_a, sentence_b, label = row[0], row[1], row[2]
            # 构建监督的数据
            build_bert_inputs(inputs, label, sentence_a, sentence_b, tokenizer)

            # 对偶反向，暂不设置
            # if data_type.startswith('test'):
            #     build_bert_inputs(inputs, label, sentence_b, sentence_a, tokenizer)

            # 构建非监督的数据
            build_unsup_bert_inputs(unsup_data, label, sentence_a, sentence_b, tokenizer)

        processed_data[data_type] = inputs

    processed_data['unsup_data'] = unsup_data

    return processed_data


from paddle.io import DataLoader, Dataset


class SupDataset(Dataset):
    def __init__(self, data_dict):
        super(SupDataset, self).__init__()
        self.input_ids = data_dict['input_ids']
        self.token_type_ids = data_dict['token_type_ids']
        self.attention_mask = data_dict['attention_mask']
        self.labels = data_dict['labels']
        self.len = len(self.input_ids)

    def __getitem__(self, index):
        data = (self.input_ids[index],
                self.token_type_ids[index],
                self.attention_mask[index],
                self.labels[index])

        return data

    def __len__(self):
        return self.len


# 统一处理数据
class Collator:

    def __init__(self, config: Config):
        self.tokenizer = config.tokenizer
        self.max_seq_len = config.max_seq_len

    def pad(self, input_ids_list, token_type_ids_list, attention_mask_list, labels_list, max_seq_len):
        # 初始化填充长度
        input_ids = paddle.zeros((len(input_ids_list), max_seq_len), dtype='int64')
        token_type_ids = paddle.zeros_like(input_ids)
        attention_mask = paddle.zeros_like(input_ids)
        # 遍历获取输入
        for i in range(len(input_ids_list)):
            seq_len = len(input_ids_list[i])

            if seq_len < max_seq_len:  # 如果小于最大长度
                input_ids[i, :seq_len] = paddle.to_tensor(input_ids_list[i], dtype='int64')
                token_type_ids[i, :seq_len] = paddle.to_tensor(token_type_ids_list[i], dtype='int64')
                attention_mask[i, :seq_len] = paddle.to_tensor(attention_mask_list[i], dtype='int64')
            else:  # 如果大于或等于
                # 最后一位加上tokenizer的特殊占位
                input_ids[i] = paddle.to_tensor(
                    input_ids_list[i][:max_seq_len - 1] + [self.tokenizer.sep_token_id], dtype='int64')
                token_type_ids[i] = paddle.to_tensor(
                    token_type_ids_list[i][:max_seq_len], dtype='int64')
                attention_mask[i] = paddle.to_tensor(
                    attention_mask_list[i][:max_seq_len], dtype='int64')
        # 格式化输出
        labels = paddle.to_tensor([[label] for label in labels_list], dtype='int64')

        return input_ids, token_type_ids, attention_mask, labels

    def __call__(self, examples):
        # 获取数据
        input_ids_list, token_type_ids_list, attention_mask_list, labels_list = list(zip(*examples))
        # 求句子最大长度
        cur_seq_len = max([len(ids) for ids in input_ids_list])  # 当前数据最大长度
        max_seq_len = min(cur_seq_len, self.max_seq_len)  # 最大长度
        # 填充句子
        input_ids, token_type_ids, attention_mask, labels = self.pad(input_ids_list, token_type_ids_list,
                                                                     attention_mask_list, labels_list, max_seq_len)
        # 返回结果
        data = {
            'input_ids': input_ids,
            'token_type_ids': token_type_ids,
            'attention_mask': attention_mask,
            'labels': labels,
        }
        return data


class UnsupDataset(Dataset):
    # UDA
    def __init__(self, data_dict):
        super(UnsupDataset, self).__init__()
        self.input_ids = data_dict['input_ids']
        self.token_type_ids = data_dict['token_type_ids']
        self.attention_mask = data_dict['attention_mask']
        self.labels = data_dict['labels']
        self.len = len(self.input_ids)

    def __getitem__(self, index):
        input_ids = self.input_ids[index]
        token_type_ids = self.token_type_ids[index]
        attention_mask = self.attention_mask[index]
        labels = self.labels[index]
        return (input_ids[0], token_type_ids[0], attention_mask[0],
                input_ids[1], token_type_ids[1], attention_mask[1],
                labels)

    def __len__(self):
        return self.len


class UnsupCollator(Collator):

    def __init__(self, config):
        super(UnsupCollator, self).__init__(config)

    def __call__(self, examples):
        # TODO 暂未修改
        (ab_input_ids_list, ab_token_type_ids_list, ab_attention_mask_list,
         ba_input_ids_list, ba_token_type_ids_list, ba_attention_mask_list,
         labels_list) = list(zip(*examples))

        cur_max_seq_len = max(len(input_id) for input_id in ab_input_ids_list)
        max_seq_len = min(cur_max_seq_len, self.max_seq_len)

        ab_input_ids, ab_token_type_ids, ab_attention_mask, labels = self.pad(
            ab_input_ids_list, ab_token_type_ids_list, ab_attention_mask_list, labels_list, max_seq_len
        )

        ba_input_ids, ba_token_type_ids, ba_attention_mask, labels = self.pad(
            ba_input_ids_list, ba_token_type_ids_list, ba_attention_mask_list, labels_list, max_seq_len
        )

        data_dict = {
            'ab_input_ids': ab_input_ids,
            'ab_token_type_ids': ab_token_type_ids,
            'ab_attention_mask': ab_attention_mask,
            'ba_input_ids': ba_input_ids,
            'ba_token_type_ids': ba_token_type_ids,
            'ba_attention_mask': ba_attention_mask,
            'labels': labels
        }

        return data_dict


def create_dataloader(config: Config):
    if config.load_data is True:
        data = load_pickle('data/' + config.dataset + '_UDA.pkl')
    else:
        data = read_data(conf)
        dump_pickle(data, 'data/'+config.dataset+'_UDA.pkl')





    train_dataset = SupDataset(data['train'])
    dev_dataset = SupDataset(data['dev'])
    test_dataset = SupDataset(data['test'])
    unsup_dataset = UnsupDataset(data['unsup_data'])

    collate_fn = Collator(config)
    unsup_collate_fn = UnsupCollator(config)

    train_dataloader = DataLoader(dataset=train_dataset, batch_size=config.batch_size, shuffle=True,
                                  collate_fn=collate_fn)

    unsup_dataloader = DataLoader(dataset=unsup_dataset, shuffle=True, collate_fn=unsup_collate_fn,
                                  batch_size=int(config.batch_size * config.unsup_data_ratio))

    dev_dataloader = DataLoader(dataset=dev_dataset, batch_size=config.batch_size, shuffle=False, collate_fn=collate_fn)

    test_dataloader = DataLoader(dataset=test_dataset, batch_size=config.batch_size, shuffle=False,
                                 collate_fn=collate_fn)

    return unsup_dataloader, train_dataloader, dev_dataloader, test_dataloader


# 返回 grad_data：需要计算梯度，需要进行反向传播的数据
# 返回 no_grad_data: 不需要计算梯度，不需要进行反向传播的数据
def get_data(sup_batch, unsup_batch, config):
    grad_data = {}
    no_grad_data = {}
    # sup_batch [bs, seq_len]
    # unsup_batch [bs, seq_len]
    # 监督数据的 最长 长度
    sup_max_len = sup_batch['input_ids'].shape[1]

    # 无监督数据 的最长 长度
    unsup_max_len = unsup_batch['ba_input_ids'].shape[1]

    # 当前数据 的最长 长度
    cur_max_len = max(sup_max_len, unsup_max_len)

    for item, sup_value in sup_batch.items():
        if item == 'labels':
            grad_data[item] = sup_value
            continue

        # 获取对应的ab与ba的无监督值
        ba_unsup_value = unsup_batch['ba_{}'.format(item)]
        ab_unsup_value = unsup_batch['ab_{}'.format(item)]

        # 监督学习的比较长，填充无监督学习数据
        if sup_max_len == cur_max_len:
            padding_value = paddle.zeros((ba_unsup_value.shape[0], cur_max_len - unsup_max_len), dtype='int64')
            ba_unsup_value = paddle.concat([ba_unsup_value, padding_value], axis=-1)

        else:  # 无监督学习的比较长，填充监督学习数据
            padding_value = paddle.zeros((sup_value.shape[0], cur_max_len - sup_max_len), dtype='int64')
            sup_value = paddle.concat([sup_value, padding_value], axis=-1)

        # 把 sup_batch 和 ba 的 数据放在一起
        grad_value = paddle.concat([sup_value, ba_unsup_value], axis=0)

        grad_data[item] = grad_value
        no_grad_data[item] = ab_unsup_value

    return grad_data, no_grad_data


from paddlenlp.transformers import AutoModelForSequenceClassification
from paddlenlp.transformers import AutoTokenizer

'''
    训练信号退火 tsa
    用于防止半监督学习中标记数据上过拟合，同时在未标记数据欠拟合
'''


def get_tsa_threshold(total_steps, global_steps):
    return np.exp((global_steps / total_steps - 1) * 5) / 2 + 0.5


# 无监督数据只需要正向传播
@paddle.no_grad()
def forward_no_grad(no_grad_data, config: Config, model):
    input_ids = no_grad_data['input_ids']
    token_type_ids = no_grad_data['token_type_ids']

    np_grad_logits = model(input_ids=input_ids, token_type_ids=token_type_ids)
    # ----------- sharpen -------------#
    no_grad_probs = paddle.nn.functional.softmax(np_grad_logits / config.uda_softmax_temp, axis=-1)
    # ----------- sharpen -------------#
    largest_probs = no_grad_probs.max(axis=-1)

    # unsup_loss_mask = largest_probs.gt(config.uda_confidence_threshold).float()
    # TODO 未实现[True, False] 转 [1, 0]

    unsup_loss_mask = paddle.greater_than(largest_probs, paddle.to_tensor(config.uda_confidence_threshold, dtype='float32'))
    return paddle.to_tensor(unsup_loss_mask,dtype='int64'), no_grad_probs


# 监督数据需反向传播更新梯度
def forward_with_grad(unsup_loss_mask, unsup_probs, cur_bs,
                      model, grad_data, total_steps, global_steps,
                      loss_fn, metric):
    # 得到eta值， 随着训练的进行，阈值逐渐变大，最后是1，把所有监督数据都用上了
    tsa_threshold = get_tsa_threshold(total_steps, global_steps)

    input_ids = grad_data['input_ids']
    token_type_ids = grad_data['token_type_ids']
    attention_mask = grad_data['attention_mask']
    labels = grad_data['labels']

    logits = model(input_ids=input_ids, token_type_ids=token_type_ids)
    # --------- 有监督损失 -------#
    # cur_bs 无监督 ba 的 batch_size
    # 前面一部分是 train 的 sup_data, 后面是unsup_data
    sup_logits, unsup_logits = logits.split([logits.shape[0] - cur_bs, cur_bs])

    # 得到 sup_labels
    sup_labels = labels[:logits.shape[0] - cur_bs]

    per_example_loss = loss_fn(sup_logits, sup_labels)

    # 拿出 正确标签 对应的概率
    paddle.reshape(sup_labels,(-1,1))
    correct_label_probs = nn.functional.softmax(sup_logits, axis=-1).gather(axis=-1, index=paddle.reshape(sup_labels,(-1,1)))

    # 监督数据 过于自信不要，留下小于等于 tsa_threshold 的计算损失
    sup_loss_mask = paddle.to_tensor(paddle.less_equal(correct_label_probs,paddle.to_tensor(tsa_threshold, dtype='float32')).squeeze(),dtype='float32')

    # 应用mask掩盖有监督数据过度自信的样本损失
    per_example_loss *= sup_loss_mask

    # 有效监督样本的平均损失
    sup_loss = per_example_loss.sum() / max(sup_loss_mask.sum(), 1)  # max(sup_loss_mask.sum(), 1) 有效个数
    # --------- 有监督损失 -------#

    # --------- 无监督损失 -------#
    unsup_log_probs = nn.functional.log_softmax(unsup_logits, axis=-1)
    # input 希望是一个对数概率
    # Target 目标为概率值
    per_example_kl_loss = nn.KLDivLoss(reduction='none')(unsup_log_probs, unsup_probs).sum(axis=-1)

    # 应用mask掩盖无监督数据中不自信的样本损失
    per_example_kl_loss *= unsup_loss_mask

    # 计算无监督样本的平均损失
    unsup_loss = per_example_kl_loss.sum() / max(unsup_loss_mask.sum(), 1)
    # --------- 无监督损失 -------#

    # 加权两种损失
    loss = sup_loss + unsup_loss

    # 计算具体值并校验
    correct = metric.compute(correct_label_probs, labels)
    metric.update(correct)
    acc = metric.accumulate()

    return loss, tsa_threshold, unsup_loss, sup_loss, acc


from paddle.optimizer import AdamW


# 验证方法
@paddle.no_grad()
def evaluation(model, loss_fn, metric, val_dataloder):
    accu = []
    model.eval()
    metric.reset()
    losses = []
    for iter_id, mini_batch in enumerate(val_dataloder):
        input_ids = mini_batch['input_ids']
        token_type_ids = mini_batch['token_type_ids']
        attention_mask = mini_batch['attention_mask']
        labels = mini_batch['labels']

        logits = model(input_ids=input_ids, token_type_ids=token_type_ids)

        loss = loss_fn(logits, labels)
        losses.append(loss.numpy())
        correct = metric.compute(logits, labels)
        metric.update(correct)
        acc = metric.accumulate()
        accu.append(acc)
    model.train()
    metric.reset()
    return np.mean(losses), np.mean(accu)


def train(config: Config, train_dataloader, dev_dataloader, unsup_dataloader=None):
    # 创建模型
    model = AutoModelForSequenceClassification.from_pretrained(config.model_path, num_classes=config.num_labels)
    # 梯度裁剪
    clip = paddle.nn.ClipGradByValue(max=1)
    # 优化器
    optimizer = AdamW(learning_rate=config.learning_rate, parameters=model.parameters(),
                      weight_decay=config.weight_decay, grad_clip=clip)
    metric = paddle.metric.Accuracy()
    # 损失函数
    loss_fn = nn.loss.CrossEntropyLoss()
    # unsup_dataloader train, dev, test
    # 使用 unsup_dataloader，因为unsup_dataloader比较大
    total_steps = len(unsup_dataloader) * config.epochs
    epoch_iterator = range(config.epochs)
    global_steps = 0
    train_loss = 0.
    logging_loss = 0.
    best_acc = 0.
    best_model_path = ''

    train_iterator = iter(train_dataloader)
    for epoch in epoch_iterator:
        model.train()
        # ----------------------- new ----------------------#
        for iter_id, unsup_batch in enumerate(unsup_dataloader):
            cur_bs = unsup_batch['ab_input_ids'].shape[0]
            try:
                sup_batch = next(train_iterator)
            except StopIteration:
                train_iterator = iter(train_dataloader)
                sup_batch = next(train_iterator)

            # 返回 grad_data：需要计算梯度，需要进行反向传播的数据
            # 返回 no_grad_data: 不需要计算梯度，不需要进行反向传播的数据
            grad_data, no_grad_data = get_data(sup_batch, unsup_batch, config)

            # 无监督数据 (ab) 只需要正向传播
            # mask, ab_logits
            unsup_loss_mask, unsup_probs = forward_no_grad(no_grad_data, config, model)

            # 得出loss
            loss, tsa_threshold, unsup_loss, sup_loss, acc = forward_with_grad(
                unsup_loss_mask, unsup_probs, cur_bs, model, grad_data, total_steps, global_steps, loss_fn, metric
            )

            # 反向传播
            loss.backward()
            optimizer.step()
            optimizer.clear_grad()

            train_loss += loss.item()
            global_steps += 1

            # 打印模型性能
            if global_steps % config.print_loss == 0:
                print('epoch:{}, iter_id:{}, loss:{}, acc:{}'.format(epoch, iter_id, loss, acc))

        # 运行完一个epoch验证机校验
        avg_val_loss, acc = evaluation(model, loss_fn, metric, dev_dataloader)
        print('-' * 50)
        print('epoch: {}, val_loss: {}, val_acc: {}'.format(epoch, avg_val_loss, acc))
        print('-' * 50)

    return model


paddle.device.set_device('gpu:0')  # 设置GPU
paddle.disable_static()  # 设置动态图
# 加载配置
conf = Config()
conf.model_path = 'ernie-gram-zh'
conf.tokenizer = AutoTokenizer.from_pretrained(conf.model_path)
# 读取数据
unsup_dataloader, train_dataloader, dev_dataloader, test_dataloader = create_dataloader(conf)

model = train(conf, train_dataloader, dev_dataloader, unsup_dataloader)
