#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : ErnieGram-分开填充.py
# @Author: Richard Chiming Xu
# @Date  : 2022/2/14
# @Desc  :

'''
    1. 分开填充
    2. 裸跑BERT
'''

import numpy as np
import pandas as pd
from tqdm import tqdm

from collections import defaultdict
import paddle
from paddlenlp.transformers import AutoTokenizer
from paddle import optimizer, nn
from paddlenlp.transformers import AutoModelForSequenceClassification,ErnieForSequenceClassification
from extra import  extra_fgm
from paddlenlp.transformers import LinearDecayWithWarmup
from paddle.io import Dataset, DataLoader
import pandas as pd
from collections import defaultdict


class Config:
    # 数据加载部分
    dataset = 'paws-x'
    max_seq_len = 64  # 句子长度
    need_data_aug = True
    operation = 'Train' # 训练/预测
    # 模型部分
    model_path = 'D:/env/bert_model/hfl/chinese-bert-wwm-ext'  # 本地模型路径
    model_suffix = '-fgm' # 模型后缀
    tokenizer = None  # tokenizer对象
    load_model = False  # 是否加载已有模型预测
    save_model = True  # 是否保存训练好的模型
    # 训练部分
    device = 'cpu'
    learning_rate = 5e-6
    batch_size = 512  # batch大小
    epochs = 20  # 训练次数
    print_loss = 20  # 打印loss次数
    num_labels = 2  # 分类数
    adv = 'fgm'
    eps = 0.1


# 数据增强
def aug_group_by_a(df):
    aug_data = defaultdict(list)
    # 以text_a中的句子为 a
    for g, data in df.groupby(by=['text_a']):
        if len(data) < 2:
            continue
        for i in range(len(data)):
            for j in range(i + 1, len(data)):
                # 取出b的值，a,b的label
                row_i_text = data.iloc[i, 1]
                row_i_label = data.iloc[i, 2]

                # 取出c的值，a,c的label
                row_j_text = data.iloc[j, 1]
                row_j_label = data.iloc[j, 2]

                if row_i_label == row_j_label == 0:
                    continue

                aug_label = 1 if row_i_label == row_j_label == 1 else 0

                aug_data['text_a'].append(row_i_text)
                aug_data['text_b'].append(row_j_text)
                aug_data['label'].append(aug_label)
    return pd.DataFrame(aug_data)

# 读取数据
def read_data(config: Config):
    if config.operation == 'train':
        train = pd.read_csv('data/' + config.dataset + '/train.tsv', sep='\t',
                            names=['text_a', 'text_b', 'label'])
        dev = pd.read_csv('data/' + config.dataset + '/dev.tsv', sep='\t',
                          names=['text_a', 'text_b', 'label'])

        if len(set(train['label'])) > 2:
            train = train[train['label'].isin(['0', '1'])]
            train['label'] = train['label'].astype('int')
        train = train.dropna()

        if len(set(train['label'])) > 2:
            dev = dev[dev['label'].isin(['0', '1'])]
            dev['label'] = dev['label'].astype('int')
        dev = dev.dropna()

        train = train[:100]
        dev = dev[:100]

        # 数据增强，加大训练集数据量
        if config.need_data_aug is True:
            aug_train = aug_group_by_a(train)
            aug_dev = aug_group_by_a(dev)
            # 拼接数据
            train = pd.concat([train, aug_train, aug_dev])

        # tokenizer
        tokenizer = config.tokenizer
        data_df = {'train': train, 'dev': dev}
        full_data_dict = {}
        for k, df in data_df.items():
            inputs = defaultdict(list)
            for i, row in tqdm(df.iterrows(), desc='encode {} data'.format(k), total=len(df)):
                seq_a = row[0]
                seq_b = row[1]
                label = row[2]
                inputs_dict = tokenizer.encode(seq_a, seq_b, return_special_tokens_mask=True,
                                               return_token_type_ids=True,
                                               return_attention_mask=True,
                                               max_seq_len=config.max_seq_len,
                                               pad_to_max_seq_len=True)
                inputs['input_ids'].append(inputs_dict['input_ids'])
                inputs['token_type_ids'].append(inputs_dict['token_type_ids'])
                inputs['attention_mask'].append(inputs_dict['attention_mask'])
                inputs['labels'].append(label)
            full_data_dict[k] = inputs

        return full_data_dict['train'], full_data_dict['dev']
    elif config.operation == 'predict':
        test = pd.read_csv('data/' + config.dataset + '/test.tsv', sep='\t', names=['text_a', 'text_b'])
        test['label'] = 0

        # tokenizer
        tokenizer = config.tokenizer
        data_df = {'test': test}
        full_data_dict = {}
        for k, df in data_df.items():
            inputs = defaultdict(list)
            for i, row in tqdm(df.iterrows(), desc='encode {} data'.format(k), total=len(df)):
                seq_a = row[0]
                seq_b = row[1]
                label = row[2]
                inputs_dict = tokenizer.encode(seq_a, seq_b, return_special_tokens_mask=True,
                                               return_token_type_ids=True,
                                               return_attention_mask=True,
                                               max_seq_len=config.max_seq_len)
                inputs['input_ids'].append(inputs_dict['input_ids'])
                inputs['token_type_ids'].append(inputs_dict['token_type_ids'])
                inputs['attention_mask'].append(inputs_dict['attention_mask'])
                inputs['labels'].append(label)
            full_data_dict[k] = inputs

        return full_data_dict['test']
    else:
        raise Exception('错误的模型行为!')


class SimDataset(Dataset):
    def __init__(self, data_dict):
        super(SimDataset, self).__init__()
        self.input_ids = data_dict['input_ids']
        self.token_type_ids = data_dict['token_type_ids']
        self.attention_mask = data_dict['attention_mask']
        self.labels = data_dict['labels']
        self.len = len(self.input_ids)

    def __getitem__(self, index):
        data = (self.input_ids[index],
                self.token_type_ids[index],
                self.attention_mask[index],
                self.labels[index])
        return data

    def __len__(self):
        return self.len

# 统一处理数据
class Collator:

    def __init__(self, tokenizer):
        self.tokenizer = tokenizer

    def to_tensor(self, input_ids_list, token_type_ids_list, attention_mask_list, labels_list):
        print(input_ids_list)
        input_ids = paddle.to_tensor([input_id for input_id in input_ids_list], dtype='int64')
        token_type_ids = paddle.to_tensor([token_type_id for token_type_id in token_type_ids_list], dtype='int64')
        attention_masks = paddle.to_tensor([attention_mask for attention_mask in attention_mask_list], dtype='int64')
        labels = paddle.to_tensor([[label]for label in labels_list], dtype='int64')
        return input_ids, token_type_ids, attention_masks, labels

    def __call__(self, examples):
        # 获取数据
        input_ids_list, token_type_ids_list, attention_mask_list, labels_list = list(zip(*examples))
        # 填充句子
        input_ids, token_type_ids, attention_mask, labels = self.to_tensor(input_ids_list, token_type_ids_list, attention_mask_list, labels_list)
        # 返回结果
        data = {
            'input_ids': input_ids,
            'token_type_ids': token_type_ids,
            'attention_mask': attention_mask,
            'labels': labels,
        }
        return data


# 创建dataloader
def create_dataloader(config: Config):
    if config.operation == 'train':
        # 读取数据
        train, dev = read_data(config)
        # 构建dataset
        train_dataset = SimDataset(train)
        dev_dataset = SimDataset(dev)

        # 构建dataloader
        collate_fn = Collator(config.tokenizer)
        train_dataloader = DataLoader(train_dataset, batch_size=config.batch_size, collate_fn=collate_fn, shuffle=True,
                                      num_workers=0)
        dev_dataloader = DataLoader(dev_dataset, batch_size=config.batch_size, collate_fn=collate_fn, shuffle=True,
                                    num_workers=0)

        return train_dataloader, dev_dataloader
    elif config.operation == 'predict':
        test = read_data(config)
        test_dataset = SimDataset(test)
        collate_fn = Collator(config.tokenizer, config.max_seq_len)
        test_dataloader = DataLoader(test_dataset, batch_size=config.batch_size, collate_fn=collate_fn, shuffle=False,
                                     num_workers=0)
        return test_dataloader
    else:
        raise Exception('错误的模型行为!')


# 运行程序
paddle.device.set_device('gpu:0')

conf = Config()
conf.operation = 'train'
conf.model_path = 'ernie-gram-zh'
conf.dataset = 'paws-x'
conf.batch_size = 8
conf.epochs = 15
conf.max_seq_len = 88 # 两句话
conf.learning_rate = 5e-6

conf.tokenizer = AutoTokenizer.from_pretrained(conf.model_path)
# 读取数据
# train_dataloader, dev_dataloader = create_dataloader(conf)

# for iter_id, mini_batch in enumerate(train_dataloader):
#     input_ids = mini_batch['input_ids']
#     print(input_ids)
#     break

train, dev = read_data(conf)
train_dataset = SimDataset(train)
collate_fn = Collator(conf.tokenizer)
train_dataloader = DataLoader(train_dataset, batch_size=conf.batch_size, collate_fn=collate_fn, shuffle=True,
                                      num_workers=0)

l = [len(i) for i in train['input_ids']]
print(l)
for iter_id, mini_batch in enumerate(train_dataloader):
    print(mini_batch)