#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : BERTWritening.py
# @Author: Richard Chiming Xu
# @Date  : 2022/1/24
# @Desc  :


'''
    1. 使用BERT-writening直接进行NLI任务
'''

import numpy as np
import pandas as pd
import scipy
from tqdm import tqdm

import torch

from transformers import AutoTokenizer, AutoModelForNextSentencePrediction
import pickle


# 保存pickle
def dump_pickle(obj, file_path):
    with open(file_path, 'wb') as f:
        pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)


# 加载pickle
def load_pickle(file_path):
    with open(file_path, 'rb') as f:
        return pickle.load(f)


class Config:
    # 数据加载部分
    dataset = 'paws-x'
    max_seq_len = 64
    load_pickle = True
    # 模型部分
    model_path = 'D:/env/bert_model/hfl/chinese-bert-wwm-ext'
    tokenizer = None
    model = None
    # 训练部分
    device = 'cpu'
    pooling = 'first_last_avg'
    use_writen = True
    n_components = 256


# 读取数据
def read_data(config: Config):
    print('读取数据集...')
    train = pd.read_csv('data/' + config.dataset + '/train.tsv', sep='\t', names=['text_a', 'text_b', 'label'])
    dev = pd.read_csv('data/' + config.dataset + '/dev.tsv', sep='\t', names=['text_a', 'text_b', 'label'])
    test = pd.read_csv('data/' + config.dataset + '/test.tsv', sep='\t', names=['text_a', 'text_b'])

    if len(set(train['label'])) > 2:
        train = train[train['label'].isin(['0', '1'])]
        train['label'] = train['label'].astype('int')
    train = train.dropna()

    if len(set(train['label'])) > 2:
        dev = dev[dev['label'].isin(['0', '1'])]
        dev['label'] = dev['label'].astype('int')
    dev = dev.dropna()
    test['label'] = 0

    datasets = {
        'train': train,
        'dev': dev,
        'test': test,
    }
    return datasets


def load_model(config: Config):
    print('加载模型...')
    tokenizer = AutoTokenizer.from_pretrained(conf.model_path)
    model = AutoModelForNextSentencePrediction.from_pretrained(config.model_path)
    model.to(config.device)
    config.tokenizer = tokenizer
    config.model = model


def sents_to_vecs(sent_list: list, config: Config, type:str):
    print('将句子转化成bert向量...')
    '''
    将句子转换成bert方法
    :param sent_list: 句子列表，[句子A，句子B]
    :param config: 配置文件
    :return:
    '''
    if config.load_pickle is True:
        result = load_pickle('data/' + config.dataset + '_' + type + '_bert_vector.pkl')
        return result[0], result[1]
    else:
        result = []
        with torch.no_grad():
            for sents in sent_list:
                vecs = []
                for sent in tqdm(sents):
                    inputs = config.tokenizer(sent, return_tensors="pt", padding=True, truncation=True,
                                              max_length=config.max_seq_len)
                    inputs['input_ids'] = inputs['input_ids'].to(config.device)
                    inputs['token_type_ids'] = inputs['token_type_ids'].to(config.device)
                    inputs['attention_mask'] = inputs['attention_mask'].to(config.device)

                    hidden_states = config.model(**inputs, return_dict=True, output_hidden_states=True).hidden_states

                    if config.pooling == 'first_last_avg':
                        output_hidden_state = (hidden_states[-1] + hidden_states[1]).mean(dim=1)
                    elif config.pooling == 'last_avg':
                        output_hidden_state = (hidden_states[-1]).mean(dim=1)
                    elif config.pooling == 'last2avg':
                        output_hidden_state = (hidden_states[-1] + hidden_states[-2]).mean(dim=1)
                    else:
                        raise Exception("unknown pooling {}".format(config.pooling))

                    vec = output_hidden_state.cpu().numpy()[0]
                    vecs.append(vec)
                result.append(np.array(vecs))

            dump_pickle(result, 'data/' + config.dataset + '_' + type + '_bert_vector.pkl')

        return result[0], result[1]


def calc_spearmanr_corr(x, y):
    return scipy.stats.spearmanr(x, y).correlation


def compute_kernel_bias(vecs, n_components):
    """计算kernel和bias
    最后的变换：y = (x + bias).dot(kernel)
    """
    vecs = np.concatenate(vecs, axis=0)
    mu = vecs.mean(axis=0, keepdims=True)
    cov = np.cov(vecs.T)
    u, s, vh = np.linalg.svd(cov)
    W = np.dot(u, np.diag(s ** 0.5))
    W = np.linalg.inv(W.T)
    W = W[:, :n_components]
    return W, -mu


def transform_and_normalize(vecs, kernel, bias):
    """应用变换，然后标准化
    """
    if not (kernel is None or bias is None):
        vecs = (vecs + bias).dot(kernel)
    return vecs / (vecs ** 2).sum(axis=1, keepdims=True) ** 0.5


def normalize(vecs):
    """标准化
    """
    return vecs / (vecs ** 2).sum(axis=1, keepdims=True) ** 0.5


def main(datasets: dict, conf: Config):
    # 转换测试集成BERT向量
    train_df = datasets['train']
    dev_df = datasets['dev']
    test_df = datasets['test']

    a_vecs_test, b_vecs_test = sents_to_vecs([test_df['text_a'], test_df['text_b']], conf, 'test')

    if conf.use_writen is True:
        a_vecs_train, b_vecs_train = sents_to_vecs([train_df['text_a'], train_df['text_b']], conf, 'train')
        a_vecs_dev, b_vecs_dev = sents_to_vecs([dev_df['text_a'], dev_df['text_b']], conf, 'dev')

        # 计算kernel 和 bias
        kernel, bias = compute_kernel_bias(
            [a_vecs_train, b_vecs_train, a_vecs_dev, b_vecs_dev, a_vecs_test, b_vecs_test],
            n_components=conf.n_components)

        # 转换test
        a_vecs_test = transform_and_normalize(a_vecs_test, kernel, bias)
        b_vecs_test = transform_and_normalize(b_vecs_test, kernel, bias)
    else:
        a_vecs_test = normalize(a_vecs_test)
        b_vecs_test = normalize(b_vecs_test)

    # 计算结果
    test_sims = (a_vecs_test * b_vecs_test).sum(axis=1)
    print(test_sims)

import os

if __name__ == '__main__':
    os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
    # 加载配置
    conf = Config()
    conf.device = 'cuda' if torch.cuda.is_available() else 'cpu'
    # 读取数据
    datasets = read_data(conf)
    # 加载模型
    load_model(conf)
    main(datasets, conf)
