#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : PaddleBERT.py
# @Author: Richard Chiming Xu
# @Date  : 2022/1/25
# @Desc  : 基于paddlenlp的各种bert


import numpy as np
import pandas as pd
from tqdm import tqdm

from collections import defaultdict
import paddle


class Config:
    # 数据加载部分
    dataset = 'paws-x'
    max_seq_len = 64  # 句子长度
    # 模型部分
    model_path = None  # 本地模型路径
    tokenizer = None  # tokenizer对象
    load_model = False  # 是否加载已有模型预测
    save_model = True  # 是否保存训练好的模型
    # 训练部分
    device = 'cpu'
    learning_rate = 3e-5
    batch_size = 384  # batch大小
    epochs = 15  # 训练次数
    print_loss = 50  # 打印loss次数
    num_labels = 2  # 分类数

# 读取数据
def read_data(config: Config):

    train = pd.read_csv('data/data52714/' + config.dataset + '/train.tsv', sep='\t', names=['text_a', 'text_b', 'label'])
    dev = pd.read_csv('data/data52714/' + config.dataset + '/dev.tsv', sep='\t', names=['text_a', 'text_b', 'label'])
    test = pd.read_csv('data/data52714/' + config.dataset + '/test.tsv', sep='\t', names=['text_a', 'text_b'])

    if len(set(train['label'])) > 2:
        train = train[train['label'].isin(['0', '1'])]
        train['label'] = train['label'].astype('int')
    train = train.dropna()

    if len(set(train['label'])) > 2:
        dev = dev[dev['label'].isin(['0', '1'])]
        dev['label'] = dev['label'].astype('int')
    dev = dev.dropna()
    test['label'] = 0

    # tokenizer
    tokenizer = config.tokenizer
    data_df = {'train': train, 'dev': dev, 'test': test}
    full_data_dict = {}
    for k, df in data_df.items():
        inputs = defaultdict(list)
        for i, row in tqdm(df.iterrows(), desc='encode {} data'.format(k), total=len(df)):
            seq_a = row[0]
            seq_b = row[1]
            label = row[2]
            inputs_dict = tokenizer.encode(seq_a, seq_b, return_special_tokens_mask=True, return_token_type_ids=True,
                                                    return_attention_mask=True)
            inputs['input_ids'].append(inputs_dict['input_ids'])
            inputs['token_type_ids'].append(inputs_dict['token_type_ids'])
            inputs['attention_mask'].append(inputs_dict['attention_mask'])
            inputs['labels'].append(label)
        full_data_dict[k] = inputs

    return full_data_dict['train'], full_data_dict['dev'], full_data_dict['test']

# 将数据转换成dataset
from paddle.io import Dataset, DataLoader

class SimDataset(Dataset):
    def __init__(self, data_dict):
        super(SimDataset, self).__init__()
        self.input_ids = data_dict['input_ids']
        self.token_type_ids = data_dict['token_type_ids']
        self.attention_mask = data_dict['attention_mask']
        self.labels = data_dict['labels']
        self.len = len(self.input_ids)

    def __getitem__(self, index):
        data = (self.input_ids[index],
                self.token_type_ids[index],
                self.attention_mask[index],
                self.labels[index])

        return data

    def __len__(self):
        return self.len

# 统一处理数据
class Collator:

    def __init__(self, tokenizer, max_seq_len):
        self.tokenizer = tokenizer
        self.max_seq_len = max_seq_len

    def pad(self, input_ids_list, token_type_ids_list, attention_mask_list, labels_list, max_seq_len):
        # 初始化填充长度
        input_ids = paddle.zeros((len(input_ids_list), max_seq_len), dtype='int64')
        token_type_ids = paddle.zeros_like(input_ids)
        attention_mask = paddle.zeros_like(input_ids)
        # 遍历获取输入
        for i in range(len(input_ids_list)):
            seq_len = len(input_ids_list[i])

            if seq_len < max_seq_len:  # 如果小于最大长度
                input_ids[i, :seq_len] = paddle.to_tensor(input_ids_list[i], dtype='int64')
                token_type_ids[i, :seq_len] = paddle.to_tensor(token_type_ids_list[i], dtype='int64')
                attention_mask[i, :seq_len] = paddle.to_tensor(attention_mask_list[i], dtype='int64')
            else:  # 如果大于或等于
                # 最后一位加上tokenizer的特殊占位
                input_ids[i] = paddle.to_tensor(
                    input_ids_list[i][:max_seq_len - 1] + [self.tokenizer.sep_token_id], dtype='int64')
                token_type_ids[i] = paddle.to_tensor(
                    token_type_ids_list[i][:max_seq_len], dtype='int64')
                attention_mask[i] = paddle.to_tensor(
                    attention_mask_list[i][:max_seq_len], dtype='int64')
        # 格式化输出
        labels = paddle.to_tensor([[label]for label in labels_list], dtype='int64')

        return input_ids, token_type_ids, attention_mask, labels

    def __call__(self, examples):
        # 获取数据
        input_ids_list, token_type_ids_list, attention_mask_list, labels_list = list(zip(*examples))
        # 求句子最大长度
        cur_seq_len = max([len(ids) for ids in input_ids_list])  # 当前数据最大长度
        max_seq_len = min(cur_seq_len, self.max_seq_len)  # 最大长度
        # 填充句子
        input_ids, token_type_ids, attention_mask, labels = self.pad(input_ids_list, token_type_ids_list,
                                                                     attention_mask_list, labels_list, max_seq_len)
        # 返回结果
        data = {
            'input_ids': input_ids,
            'token_type_ids': token_type_ids,
            'attention_mask': attention_mask,
            'labels': labels,
        }
        return data


# 创建dataloader
def create_dataloader(config: Config):
    # 读取数据
    train, dev, test = read_data(config)
    # 构建dataset
    train_dataset = SimDataset(train)
    dev_dataset = SimDataset(dev)
    test_dataset = SimDataset(test)
    # 构建dataloader
    collate_fn = Collator(config.tokenizer, config.max_seq_len)
    train_dataloader = DataLoader(train_dataset, batch_size=config.batch_size, collate_fn=collate_fn, shuffle=True,
                                  num_workers=0)
    dev_dataloader = DataLoader(dev_dataset, batch_size=config.batch_size, collate_fn=collate_fn, shuffle=True,
                                num_workers=0)
    test_dataloader = DataLoader(test_dataset, batch_size=config.batch_size, collate_fn=collate_fn, shuffle=False,
                                 num_workers=0)
    return train_dataloader, dev_dataloader, test_dataloader

# 验证方法
@paddle.no_grad()
def evaluation(model, loss_fn, metric, val_dataloder):
    accu = []
    model.eval()
    metric.reset()
    losses = []
    for iter_id, mini_batch in enumerate(val_dataloder):
        input_ids = mini_batch['input_ids']
        token_type_ids = mini_batch['token_type_ids']
        attention_mask = mini_batch['attention_mask']
        labels = mini_batch['labels']

        logits = model(input_ids=input_ids, token_type_ids=token_type_ids)

        loss = loss_fn(logits, labels)
        losses.append(loss.numpy())
        correct = metric.compute(logits, labels)
        metric.update(correct)
        acc = metric.accumulate()
        accu.append(accu)
    model.train()
    metric.reset()
    return np.mean(losses), np.mean(accu)

from paddle import optimizer, nn
# 预测方法
def predict(model, test_dataloader):
    """
    预测函数
    """
    results = []
    model.eval()
    for iter_id, mini_batch in tqdm(enumerate(test_dataloader)):
        input_ids = mini_batch['input_ids']
        token_type_ids = mini_batch['token_type_ids']
        attention_mask = mini_batch['attention_mask']

        logits = model(input_ids=input_ids, token_type_ids=token_type_ids)
        probs = nn.functional.softmax(logits, axis=1)
        results.append(probs.numpy())

    return np.argmax(np.vstack(results), axis=1)

from paddlenlp.transformers import AutoModelForSequenceClassification


def train(config: Config, train_dataloader: DataLoader, dev_dataloader: DataLoader):
    # 创建模型
    model = AutoModelForSequenceClassification.from_pretrained(config.model_path,num_classes=config.num_labels)
    # 定义优化器
    opt = optimizer.AdamW(learning_rate=config.learning_rate, parameters=model.parameters())
    # 定义损失函数
    loss_fn = nn.loss.CrossEntropyLoss()
    metric = paddle.metric.Accuracy()
    # 遍历训练次数训练
    for epoch in range(config.epochs):
        model.train()
        for iter_id, mini_batch in enumerate(train_dataloader):
            input_ids = mini_batch['input_ids']
            token_type_ids = mini_batch['token_type_ids']
            attention_mask = mini_batch['attention_mask']
            labels = mini_batch['labels']
            # ----------------------------   报错部分, 百度paddle的bug, 已提交PR修复   ---------------------------- #
            # logits = model(input_ids=input_ids, token_type_ids=token_type_ids,attention_mask=attention_mask)
            # ----------------------------   报错部分, 百度paddle的bug, 已提交PR修复   ---------------------------- #

            # ----------------------------   正常部分部分   ---------------------------- #
            logits = model(input_ids=input_ids, token_type_ids=token_type_ids)
            # ----------------------------   正常部分部分   ---------------------------- #
            # 计算损失值
            loss = loss_fn(logits, labels)
            # 计算具体值并校验
            probs = paddle.nn.functional.softmax(logits, axis=1)
            correct = metric.compute(probs, labels)
            metric.update(correct)
            acc = metric.accumulate()

            # 反向传播
            loss.backward()
            opt.step()
            opt.clear_grad()
            # 打印模型性能
            if iter_id%config.print_loss == 0:
                print('epoch:{}, iter_id:{}, loss:{}, acc:{}'.format(epoch, iter_id, loss, acc))
        # 运行完一个epoch验证机校验
        avg_val_loss, acc = evaluation(model, loss_fn, metric, dev_dataloader)
        print('-' * 50)
        print('epoch: {}, val_loss: {}, val_acc: {}'.format(epoch, avg_val_loss, acc))
        print('-' * 50)
    return model

from paddlenlp.datasets import MapDataset




