#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : InferSent.py
# @Author: Richard Chiming Xu
# @Date  : 2022/1/20
# @Desc  :

'''
    1. 使用训练好的wv初始化embedding
    2. 使用与SiamCNN_LSTM相同的encoder手段
    3. 得到两个文本的encoder之后，分别使用 拼接/差/积 得到3个不同的张量表示
    4.1 对3种方式分别填入全连接层， 分别推理
    4.2 对3种方式分别填入全连接层并推理，对推理结果求和再softmax
    4.3 对3种方式拼接，填入全连接层， 统一推理
'''

import re
import jieba
import pandas as pd
import numpy as np
from gensim.models import Word2Vec
import pickle
from tqdm import tqdm


class Config():
    # 数据加载部分
    dataset = 'paws-x'
    build_vocab = False  # 是否重构词典
    build_wv = False  # 是否重构词向量
    load_with_words = True  # 加载带分词的数据
    vocab_path = 'vocab'  # 路径
    wv_model_path = 'wv_model'  # 词向量名
    seq_len = 27  # 句子长度
    # 模型部分
    model_type = 'RNN'
    embed_dim = 100  # 词向量或embedding维度
    vocab_size = 100  # 词典大小，读取词典后会更新
    update_embed = True  # embedding是否随网络训练更新
    load_model = False # 是否加载已有模型预测
    save_model = True # 是否保存训练好的模型
    # 训练部分
    device = 'cpu'
    learning_rate = 5e-4
    batch_size = 128  # batch大小
    epochs = 75  # 训练次数
    print_loss = 100  # 打印loss次数


# 读取数据
def get_stopwords():
    stop_words = []
    with open('dataprocess/baidu_stopwords.txt', 'r', encoding='utf-8') as f:
        for line in f.readlines():
            stop_words.append(line.replace('\n', ''))
    return stop_words


# jieba分词
def cut(content, stop_words, config):
    # 去除符号
    content = re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[+——！，。？、~@#￥%……&*（）]", "", content)

    result = []

    seg_list = jieba.lcut(content, cut_all=True)
    for i in seg_list:
        if i not in stop_words:
            result.append(i)

    if len(result) < config.seq_len:  # 小于规定长度，填充
        new_result = ['PAD' for i in range(config.seq_len)]
        new_result[:len(result)] = result
        return new_result
    else:
        return result[:config.seq_len]


# 保存pickle
def dump_pickle(obj, file_path):
    with open(file_path, 'wb') as f:
        pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)


# 加载pickle
def load_pickle(file_path):
    with open(file_path, 'rb') as f:
        return pickle.load(f)


# 分词并去除停用词
def data_anaysis(df, stop_words, config):
    # 分词
    df['words_a'] = df['text_a'].apply(lambda x: cut(x, stop_words, config))
    df['words_b'] = df['text_b'].apply(lambda x: cut(x, stop_words, config))
    return df


# 将中文词转换为词典数字
def word2num(content, vocab):
    result = []
    for word in content:
        result.append(vocab[word])
    return result


# 构建词典
def build_vocab(train, dev, test, config, file_name):
    word_index = 0
    vocab_dict = {}
    dataset = [train, test, dev]
    for data in tqdm(dataset, desc='构建词典'):
        for i in range(len(data)):
            row = data.iloc[i]
            words_a = row['words_a']
            words_b = row['words_b']
            for word in words_a:
                if word not in vocab_dict.keys():
                    vocab_dict[word] = word_index
                    word_index += 1
            for word in words_b:
                if word not in vocab_dict.keys():
                    vocab_dict[word] = word_index
                    word_index += 1
    # 保存词典
    dump_pickle(vocab_dict, config.vocab_path + '/vocab_' + file_name + '.pkl')
    return vocab_dict


def data_reader(file_name, config):
    print('加载数据...')
    if config.load_with_words is True:
        train = load_pickle('data/' + file_name + '_train.pkl')
        dev = load_pickle('data/' + file_name + '_dev.pkl')
        test = load_pickle('data/' + file_name + '_test.pkl')
    else:
        # 获取停用词
        stop_words = get_stopwords()
        train = pd.read_csv('data/' + file_name + '/train.tsv', sep='\t', names=['text_a', 'text_b', 'label'])
        dev = pd.read_csv('data/' + file_name + '/dev.tsv', sep='\t', names=['text_a', 'text_b', 'label'])
        test = pd.read_csv('data/' + file_name + '/test.tsv', sep='\t', names=['text_a', 'text_b'])

        if len(set(train['label'])) > 2:
            train = train[train['label'].isin(['0', '1'])]
            train['label'] = train['label'].astype('int')

        if len(set(train['label'])) > 2:
            dev = dev[dev['label'].isin(['0', '1'])]
            dev['label'] = dev['label'].astype('int')

        test['label'] = -1
        train = train.dropna()
        dev = dev.dropna()

        # 分词并去除停用词
        print('分词并去除停用词...')
        train = data_anaysis(train, stop_words, config)
        dev = data_anaysis(dev, stop_words, config)
        test = data_anaysis(test, stop_words, config)
        # 保存分词后的数据
        dump_pickle(train, 'data/' + file_name + '_train.pkl')
        dump_pickle(train, 'data/' + file_name + '_dev.pkl')
        dump_pickle(test, 'data/' + file_name + '_test.pkl')

    # 统计词典
    print('加载词典...')
    if config.build_vocab is True:
        vocab = build_vocab(train, dev, test, config, file_name)
    else:
        vocab = load_pickle(config.vocab_path + '/vocab_' + file_name + '.pkl')

    # 将每个词转为数字
    print('将每个词转为词典数字...')
    train['word2num_a'] = train['words_a'].apply(lambda x: word2num(x, vocab))
    train['word2num_b'] = train['words_b'].apply(lambda x: word2num(x, vocab))
    dev['word2num_a'] = dev['words_a'].apply(lambda x: word2num(x, vocab))
    dev['word2num_b'] = dev['words_b'].apply(lambda x: word2num(x, vocab))
    test['word2num_a'] = test['words_a'].apply(lambda x: word2num(x, vocab))
    test['word2num_b'] = test['words_b'].apply(lambda x: word2num(x, vocab))

    return train, dev, test, vocab


# 加载词向量
def load_wv(train, dev, test, config, file_name):
    if config.build_wv is True:
        print('构建词向量...')
        # 若不存在词向量，则重新训练训练词向量
        datalist = [train, dev, test]

        context = []
        for data in datalist:
            for i in range(len(data)):
                row = data.iloc[i]
                context.append(row['word2num_a'])
                context.append(row['word2num_b'])

        wv_model = Word2Vec(sentences=context, vector_size=config.embed_dim, window=5, min_count=1, workers=4)
        wv_model.train(context, total_examples=1, epochs=1)
        wv_model.save(config.wv_model_path + '/wv_' + file_name)
    else:
        print('加载词向量...')
        wv_model = Word2Vec.load(config.wv_model_path + '/wv_' + file_name)

    return wv_model


from torch.utils.data import DataLoader, Dataset


# 自定义数据集
class SimDataset(Dataset):
    def __init__(self, df):
        super(SimDataset, self).__init__()
        self.text_a = df['word2num_a']
        self.text_b = df['word2num_b']
        self.label = df['label']
        self.len = len(df)

    def __getitem__(self, idx):
        vector_a = np.array(self.text_a.iloc[idx], dtype='int64')
        vector_b = np.array(self.text_b.iloc[idx], dtype='int64')
        label = np.array(self.label.iloc[idx]).astype("int64")

        return {'vector_a': vector_a,
                'vector_b': vector_b,
                'label': label}

    def __len__(self):
        return self.len


from torch import nn
from torch import functional as F
import torch

class LinModel(nn.Module):

    def __init__(self, in_features, out_features):
        super(LinModel, self).__init__()


        self.fc_1 = nn.Sequential(
            nn.Linear(in_features, 256),
            nn.ReLU(),
            nn.Dropout(0.02)
        )
        self.fc_2 = nn.Sequential(
            nn.Linear(256, 32),
            nn.ReLU(),
            nn.Dropout(0.02)
        )
        self.fc_3 = nn.Sequential(
            nn.Linear(32, 4),
            nn.ReLU(),
            nn.Dropout(0.02)
        )
        self.fc_4 = nn.Sequential(
            nn.Linear(4, out_features),
        )
        self.softmax = nn.Softmax(1)

    def forward(self, X):
        X = self.fc_1(X)
        X = self.fc_2(X)
        X = self.fc_3(X)
        output = self.fc_4(X)

        return self.softmax(output)


class InferSent(nn.Module):

    def __init__(self, wv_mode, config):
        super(InferSent, self).__init__()
        self.device = config.device

        word_vectors = torch.randn([config.vocab_size, config.embed_dim])
        for i in range(0, config.vocab_size):
            word_vectors[i, :] = torch.from_numpy(wv_mode.wv[i])
        # 创建embedding层
        self.embedding = nn.Embedding.from_pretrained(word_vectors, freeze=config.update_embed)  # (32, 27, 100)
        if config.update_embed is False:
            self.embedding.weight.requires_grad = False

        # 创建 双向 两层 RNN
        self.rnn = nn.LSTM(input_size=config.embed_dim, hidden_size=10, num_layers=2, bidirectional=True)

        # 创建线性层
        self.lin_model = LinModel(2160, 2)


    def forward(self, words_a, words_b):
        # 计算a
        x_a = self.embedding(words_a)  # embedding转换
        # rnn
        x_a = x_a.transpose(0, 1)  # 交换维度，因为RNN的输入是 （L, D, H）
        x_a, _ = self.rnn(x_a)
        x_a = x_a.transpose(0, 1)  # 还原维度，因为RNN的输出是 （L, D, H）

        # 计算b
        x_b = self.embedding(words_b)
        x_b = x_b.transpose(0, 1)
        x_b, _ = self.rnn(x_b)
        x_b = x_b.transpose(0, 1)


        '''
            三种编码的交叉方式
            shape:
                句子1编码 x_a: (128, 27, 20)
                句子2编码 x_b: (128, 27, 20)
                拼接交叉 X_1: (128, 27, 40)
                乘法交叉 X_2: (128, 27, 20)
                减法交叉 X_3: (128, 27, 20)
        '''
        # 方法一：拼接
        X_1 = torch.cat([x_a, x_b], 2) #
        # 方法二：乘法
        X_2 = torch.mul(x_a, x_b)
        # 方法三：减法
        X_3 = torch.sub(x_a, x_b)

        # 拼接3种方式，展平张量
        X = torch.cat([X_1, X_2, X_3], 2) # (128, 27, 80)
        X = X.view(X.size(0), -1) # (128, 2160)

        # 线性推理
        output = self.lin_model(X)

        return output



from torch.optim import AdamW
from torch.optim.lr_scheduler import ExponentialLR
from sklearn.metrics import f1_score, accuracy_score

def predict(config, model, test_df):
    predict_labels = []
    # 创建dataloader
    test_dataset = SimDataset(test_df)
    test_dataloader = DataLoader(test_dataset, batch_size=config.batch_size)
    model.eval()
    with torch.no_grad():
        for mini_batch in test_dataloader:
            mini_batch = {item: value.to(config.device) for item, value in mini_batch.items()}
            # 获取数据
            text_a = mini_batch['vector_a']
            text_b = mini_batch['vector_b']
            y_pred = model(text_a, text_b)
            _, indices = torch.max(y_pred, dim=1)
            predict_labels += indices
    return torch.tensor(predict_labels).numpy()

# 校验
def evaluation(config, model, val_dataloader, loss_fn):
    model.eval()
    preds = []
    labels = []
    val_loss = 0.
    # val_iterator = tqdm(val_dataloader, desc='Evaluation', total=len(val_dataloader))
    with torch.no_grad():
        for mini_batch in val_dataloader:
            mini_batch = {item: value.to(config.device) for item, value in mini_batch.items()}
            # 获取数据
            text_a = mini_batch['vector_a']
            text_b = mini_batch['vector_b']
            label = mini_batch['label']
            labels += label
            y_pred = model(text_a, text_b)

            loss = loss_fn(y_pred, label)

            val_loss += loss.item()
            # 返回逻辑值最大的位置，要么0，要么1
            _, indices = torch.max(y_pred, dim=1)
            preds += indices

    avg_val_loss = val_loss / len(val_dataloader)
    labels = torch.tensor(labels).numpy()
    preds = torch.tensor(preds).numpy()
    f1 = f1_score(labels, preds, average='macro')
    # -----------new ----------------#
    acc = accuracy_score(labels, preds)
    # -----------new ----------------#
    return avg_val_loss, f1, acc


#  训练数据
def train(train_df, dev_df, wv_model, config):
    # 创建数据集
    train_dataset = SimDataset(train_df)
    dev_dataset = SimDataset(dev_df)
    train_dataloader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True, drop_last=True)
    dev_dataloader = DataLoader(dev_dataset, batch_size=config.batch_size)

    # 创建模型
    model = InferSent(wv_model, config)

    model.to(config.device)
    # 定义优化器
    opt = AdamW(lr=config.learning_rate, params=model.parameters())
    # scheduler = ExponentialLR(opt, gamma=0.9)
    # 定义损失函数
    loss_fn = nn.CrossEntropyLoss()

    # 遍历epoch，开始训练
    for epoch in range(config.epochs):
        model.train()
        for iter_id, mini_batch in enumerate(train_dataloader):
            mini_batch = {item: value.to(config.device) for item, value in mini_batch.items()}
            # 获取数据
            text_a = mini_batch['vector_a']
            text_b = mini_batch['vector_b']
            label = mini_batch['label']
            # 训练
            y_pred = model(text_a, text_b)
            # 计算损失
            loss = loss_fn(y_pred, label)
            # 打印loss
            _, indices = torch.max(y_pred, dim=1)
            correct = torch.sum(indices == label)

            avg_loss = torch.mean(loss)
            if iter_id % config.print_loss == 0:
                print('epoch: {}, iter: {}, loss is: {}, acc is: {}'.format(epoch, iter_id, avg_loss,
                                                                            correct.item() * 1.0 / len(text_a)))
            # 反向传播
            avg_loss.backward()
            # 最小化loss,更新参数
            opt.step()
            # scheduler.step()
            # 清除梯度
            model.zero_grad()

        # 完成1个epoch，验证
        avg_val_loss, f1, acc = evaluation(config, model, dev_dataloader, loss_fn)
        print('-'*50)
        print('epoch: {}, val_loss: {}, val_f1: {}, val_acc: {}'.format(epoch, avg_val_loss, f1,acc))
        print('-' * 50)

    return model

data_list = ['bq_corpus','lcqmc','paws-x']

for data in data_list:
    # 配置文件
    conf = Config()
    conf.device = 'cuda' if torch.cuda.is_available() else 'cpu'
    conf.dataset = data
    # 读取数据
    train_df, dev_df, test_df, vocab = data_reader(conf.dataset, conf)


    if conf.load_model is False:
        # 加载测向量
        wv_model = load_wv(train_df, dev_df, test_df, conf, conf.dataset)
        # 更新词典大小
        conf.vocab_size = len(vocab)
        # 训练模型
        model = train(train_df, dev_df, wv_model, conf)
        if conf.save_model is True:
            torch.save(model, 'model/'+conf.dataset+'.pkl')
    else:
        model = torch.load('model/'+conf.dataset+'.pkl')


    # 推理模型
    predict_labels = predict(conf, model, test_df)
    # 保存结果
    test_df['index'] = test_df.index
    test_df['prediction'] = predict_labels
    test_df.to_csv('result/'+conf.dataset+'.tsv', index=False, columns=['index','prediction'], sep='\t')








