#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : TorchBERT.py
# @Author: Richard Chiming Xu
# @Date  : 2022/1/20
# @Desc  :

'''
    基于bert的裸跑
    1. 直接引用bert裸跑
'''

import numpy as np
import pandas as pd
from tqdm import tqdm

import torch

from transformers import AutoTokenizer
from collections import defaultdict

from dataprocess import Exchange
from torch.utils.data import DataLoader, Dataset
from torch import nn
from transformers import AutoModelForSequenceClassification, AdamW
from sklearn.metrics import f1_score, accuracy_score
class Config:
    # 数据加载部分
    dataset = 'paws-x'
    max_seq_len = 64  # 句子长度
    need_data_aug = True # 使用数据增强
    # 模型部分
    model_path = 'D:/env/bert_model/hfl/chinese-bert-wwm-ext'  # 本地模型路径
    tokenizer = None  # tokenizer对象
    load_model = False  # 是否加载已有模型预测
    save_model = True  # 是否保存训练好的模型
    # 训练部分
    device = 'cpu'
    learning_rate = 1e-5
    batch_size = 32  # batch大小
    epochs = 15  # 训练次数
    print_loss = 200  # 打印loss次数
    num_labels = 2  # 分类数


# 读取数据
def read_data(config: Config):
    train = pd.read_csv('data/' + config.dataset + '/train.tsv', sep='\t', names=['text_a', 'text_b', 'label'])
    dev = pd.read_csv('data/' + config.dataset + '/dev.tsv', sep='\t', names=['text_a', 'text_b', 'label'])
    test = pd.read_csv('data/' + config.dataset + '/test.tsv', sep='\t', names=['text_a', 'text_b'])

    if len(set(train['label'])) > 2:
        train = train[train['label'].isin(['0', '1'])]
        train['label'] = train['label'].astype('int')
    train = train.dropna()

    if len(set(train['label'])) > 2:
        dev = dev[dev['label'].isin(['0', '1'])]
        dev['label'] = dev['label'].astype('int')
    dev = dev.dropna()
    test['label'] = 0

    # 数据增强，加大训练集数据量
    if config.need_data_aug is True:
        aug_train = Exchange.aug_group_by_a(train)
        aug_dev = Exchange.aug_group_by_a(dev)
        # 拼接数据
        train = pd.concat([train, aug_train, aug_dev])

    # tokenizer
    tokenizer = config.tokenizer
    data_df = {'train': train, 'dev': dev, 'test': test}
    full_data_dict = {}
    for k, df in data_df.items():
        inputs = defaultdict(list)
        for i, row in tqdm(df.iterrows(), desc='encode {} data'.format(k), total=len(df)):
            seq_a = row[0]
            seq_b = row[1]
            label = row[2]
            try:
                inputs_dict = tokenizer.encode_plus(seq_a, seq_b, add_special_tokens=True, return_token_type_ids=True,
                                                    return_attention_mask=True)
            except TypeError as ex:
                print(row)
            inputs['input_ids'].append(inputs_dict['input_ids'])
            inputs['token_type_ids'].append(inputs_dict['token_type_ids'])
            inputs['attention_mask'].append(inputs_dict['attention_mask'])
            inputs['labels'].append(label)
        full_data_dict[k] = inputs

    return full_data_dict['train'], full_data_dict['dev'], full_data_dict['test']




class SimDataset(Dataset):
    def __init__(self, data_dict):
        super(SimDataset, self).__init__()
        self.input_ids = data_dict['input_ids']
        self.token_type_ids = data_dict['token_type_ids']
        self.attention_mask = data_dict['attention_mask']
        self.labels = data_dict['labels']
        self.len = len(self.input_ids)

    def __getitem__(self, index):
        data = (self.input_ids[index],
                self.token_type_ids[index],
                self.attention_mask[index],
                self.labels[index])

        return data

    def __len__(self):
        return self.len


# 统一处理数据
class Collator:

    def __init__(self, tokenizer, max_seq_len):
        self.tokenizer = tokenizer
        self.max_seq_len = max_seq_len

    def pad(self, input_ids_list, token_type_ids_list, attention_mask_list, labels_list, max_seq_len):
        # 初始化填充长度
        input_ids = torch.zeros((len(input_ids_list), max_seq_len), dtype=torch.long)
        token_type_ids = torch.zeros_like(input_ids)
        attention_mask = torch.zeros_like(input_ids)
        # 遍历获取输入
        for i in range(len(input_ids_list)):
            seq_len = len(input_ids_list[i])

            if seq_len < max_seq_len:  # 如果小于最大长度
                input_ids[i, :seq_len] = torch.tensor(input_ids_list[i], dtype=torch.long)
                token_type_ids[i, :seq_len] = torch.tensor(token_type_ids_list[i], dtype=torch.long)
                attention_mask[i, :seq_len] = torch.tensor(attention_mask_list[i], dtype=torch.long)
            else:  # 如果大于或等于
                # 最后一位加上tokenizer的特殊占位
                input_ids[i] = torch.tensor(
                    input_ids_list[i][:max_seq_len - 1] + [self.tokenizer.sep_token_id], dtype=torch.long)
                token_type_ids[i] = torch.tensor(
                    token_type_ids_list[i][:max_seq_len], dtype=torch.long)
                attention_mask[i] = torch.tensor(
                    attention_mask_list[i][:max_seq_len], dtype=torch.long)
        # 格式化输出
        labels = torch.tensor([[label]for label in labels_list], dtype=torch.long)

        return input_ids, token_type_ids, attention_mask, labels

    def __call__(self, examples):
        # 获取数据
        input_ids_list, token_type_ids_list, attention_mask_list, labels_list = list(zip(*examples))
        # 求句子最大长度
        cur_seq_len = max([len(ids) for ids in input_ids_list])  # 当前数据最大长度
        max_seq_len = min(cur_seq_len, self.max_seq_len)  # 最大长度
        # 填充句子
        input_ids, token_type_ids, attention_mask, labels = self.pad(input_ids_list, token_type_ids_list,
                                                                     attention_mask_list, labels_list, max_seq_len)
        # 返回结果
        data = {
            'input_ids': input_ids,
            'token_type_ids': token_type_ids,
            'attention_mask': attention_mask,
            'labels': labels,
        }
        return data


def create_dataloader(config: Config):
    # 读取数据
    train, dev, test = read_data(config)
    # 构建dataset
    train_dataset = SimDataset(train)
    dev_dataset = SimDataset(dev)
    test_dataset = SimDataset(test)
    # 构建dataloader
    collate_fn = Collator(config.tokenizer, config.max_seq_len)
    train_dataloader = DataLoader(train_dataset, batch_size=config.batch_size, collate_fn=collate_fn, shuffle=True,
                                  num_workers=0)
    dev_dataloader = DataLoader(dev_dataset, batch_size=config.batch_size, collate_fn=collate_fn, shuffle=True,
                                num_workers=0)
    test_dataloader = DataLoader(test_dataset, batch_size=config.batch_size, collate_fn=collate_fn, shuffle=False,
                                 num_workers=0)
    return train_dataloader, dev_dataloader, test_dataloader




# 校验
def evaluation(config, model, val_dataloader):
    model.eval()
    preds = []
    labels = []
    val_loss = 0.
    # val_iterator = tqdm(val_dataloader, desc='Evaluation', total=len(val_dataloader))
    with torch.no_grad():
        for mini_batch in val_dataloader:
            batch_cuda = {item: value.to(config.device) for item, value in mini_batch.items()}
            labels += batch_cuda['labels'].view(-1)
            # 获取数据
            result = model(**batch_cuda)
            loss = result[0]
            logits = result[1]
            # 返回逻辑值最大的位置，要么0，要么1
            _, indices = torch.max(logits, dim=1)
            preds += indices

            val_loss += loss.item()

    avg_val_loss = val_loss / len(val_dataloader)
    labels = torch.tensor(labels).numpy()
    preds = torch.tensor(preds).numpy()
    f1 = f1_score(labels, preds, average='macro')
    # -----------new ----------------#
    acc = accuracy_score(labels, preds)
    # -----------new ----------------#
    return avg_val_loss, f1, acc

def predict(config, model, test_dataloader):
    predict_labels = []
    # 创建dataloader
    model.eval()
    with torch.no_grad():
        for mini_batch in test_dataloader:
            batch_cuda = {item: value.to(config.device) for item, value in mini_batch.items()}
            # 获取数据
            result = model(**batch_cuda)
            logits = result[1]
            _, indices = torch.max(logits, dim=1)
            predict_labels += indices
    return torch.tensor(predict_labels).numpy()

def train(config: Config, train_dataloader: DataLoader, dev_dataloader: DataLoader):
    # 创建模型
    model = AutoModelForSequenceClassification.from_pretrained(config.model_path,num_labels=config.num_labels)
    # model = BertForNextSentencePrediction.from_pretrained('bert-base-chinese')

    model.to(config.device)
    # 定义优化器
    opt = AdamW(lr=config.learning_rate, params=model.parameters())
    # 定义损失函数
    loss_fn = nn.CrossEntropyLoss()# 遍历epoch，开始训练
    last_val_acc = 0
    # 遍历训练次数训练
    for epoch in range(config.epochs):
        model.train()
        for iter_id, mini_batch in enumerate(train_dataloader):
            batch_cuda = {item: value.to(config.device) for item, value in mini_batch.items()}

            result = model(**batch_cuda)
            loss = result[0]
            logits = result[1]

            _, indices = torch.max(logits, dim=1)
            correct = torch.sum(indices == batch_cuda['labels'].view(-1))

            # 反向传播
            loss.backward()
            # 梯度累加，开发机上的显存不够导致的
            if iter_id%2 == 0:

                if last_val_acc > 0.95: # 精确率很高的时候，做梯度裁剪
                    nn.utils.clip_grad_norm_(model.parameters(), max_norm=20, norm_type=2)
                opt.step()
                opt.zero_grad()
            # 打印模型性能
            if iter_id%config.print_loss == 0:
                print('epoch:{}, iter_id:{}, loss:{}, acc:{}'.format(epoch, iter_id, loss, correct.item() * 1.0 / len(batch_cuda['labels'])))
        # 运行完一个epoch验证机校验
        avg_val_loss, f1, acc = evaluation(config, model, dev_dataloader)
        last_val_acc = acc
        print('-' * 50)
        print('epoch: {}, val_loss: {}, val_f1: {}, val_acc: {}'.format(epoch, avg_val_loss, f1, acc))
        print('-' * 50)
    return model


import os
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'

data_list = ['bq_corpus', 'lcqmc', 'paws-x']
for data in data_list:
    conf = Config()
    conf.device = 'cuda' if torch.cuda.is_available() else 'cpu'
    conf.dataset = data
    conf.model_path = 'peterchou/nezha-chinese-base'
    conf.batch_size = 64

    conf.tokenizer = AutoTokenizer.from_pretrained(conf.model_path)
    # 读取数据
    train_dataloader, dev_dataloader, test_dataloader = create_dataloader(conf)
    # 训练
    model = train(conf, train_dataloader, dev_dataloader)
    # 推理模型
    predict_labels = predict(conf, model, test_dataloader)
    # 保存结果
    test_df = pd.DataFrame(predict_labels, columns=[ 'prediction'])
    test_df['index'] = test_df.index
    test_df.to_csv('result/' + data + '.tsv', index=False, columns=['index', 'prediction'], sep='\t')

