"""
@Description :   数据处理
@Author      :   python_assignment_group
@Time        :   2022/10/30 07:24:15
"""

import os
import shutil

import matplotlib
import numpy as np
import pandas as pd
import torch
from gensim.models import Word2Vec
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset, TensorDataset
from torchtext.data import get_tokenizer
from torchtext.vocab import GloVe
from transformers import BertTokenizer

matplotlib.use("Agg")
plt.style.use(["science", "grid", "no-latex", "cjk-sc-font"])
plt.rcParams["font.sans-serif"] = ['KaiTi']
plt.rcParams["axes.unicode_minus"] = False
plt.rcParams["figure.figsize"] = (15.2, 8.8)
fontdict = {"fontsize": 15}


class DataSplit:
    def __init__(self, data_name: str, test_percent=0.1, data_split_num=3, resplit_data=True):
        """
        data_name:原始数据文件名称
        test_percent:测试集占比
        data_split_num:数据分割的组数
        resplit_data:是否重新进行分割
        """
        assert os.path.exists(data_name), "文件不存在"
        self.data = pd.read_csv(data_name, sep="\t",
                                header=0, usecols=[2, 3])  # 原始文件
        self.resplit_data = resplit_data  # 是否重新进行分割
        self.test_percent = test_percent  # 测试集占比
        self.data_split_num = data_split_num  # 数据分割的组数

    @staticmethod
    def make_dir(path: str):
        """
        自定义新建文件夹，如果已存在就删除重建。
        """
        if os.path.exists(path):
            shutil.rmtree(path)
        os.makedirs(path)

    def split_data(self):
        """
        数据分割，使用train_test_split随机分割，分割后存入相应文件夹
        """
        if self.resplit_data == True:
            for i in range(1, 1 + self.data_split_num):
                train, valid = train_test_split(
                    self.data, test_size=self.test_percent)
                train = pd.DataFrame(train)
                valid = pd.DataFrame(valid)
                # self.make_dir("data")
                split_path = os.path.join("data", "data" + str(i))
                self.make_dir(split_path)
                train_name = os.path.join(split_path, "train.csv")
                valid_name = os.path.join(split_path, "valid.csv")
                train.to_csv(train_name, index=False)
                valid.to_csv(valid_name, index=False)

    def load_data(self, data_num: int):
        """
        从文件加载数据集，需要首先调用split_data
        data_num:加载第几个数据集
        """
        split_path = os.path.join("data", "data" + str(data_num))
        if not os.path.exists(split_path):
            print("未找到对应的文件夹！")
            return pd.DataFrame([]), pd.DataFrame([])
        train_name = os.path.join(split_path, "train.csv")
        valid_name = os.path.join(split_path, "valid.csv")
        if not os.path.exists(train_name) or not os.path.exists(valid_name):
            print("未找到对应的文件！")
            return pd.DataFrame([]), pd.DataFrame([])
        train = pd.read_csv(train_name, header=0)
        valid = pd.read_csv(valid_name, header=0)
        print("第{}个数据集，训练集大小：{}".format(data_num, len(train)))
        print("第{}个数据集，验证集大小：{}".format(data_num, len(valid)))
        return train, valid

    def data_preprocess(self, data_num: int, mode: int, data=pd.DataFrame([])):
        """
        数据预处理（画图）
        统计每个分类的句子个数和每个句子的长度数量
        data_num:数据集的编号，0为raw_data
        mode:训练集：0/验证集：1
        data:data_num不为0时传入的数据
        """
        # 获取编号和句子
        sentences = []
        labels = []
        len_sentence = []
        path = os.path.join("figs", "data_preprocess",
                            "data"+str(data_num)+("valid" if mode == 1 else "train"))  # 图片保存路径
        if data_num == 0:
            sentences = self.data.iloc[:, 0]
            labels = self.data.iloc[:, 1]
            path = os.path.join("figs", "data_preprocess", "raw_data")
        else:
            sentences = data.iloc[:, 0]
            labels = data.iloc[:, 1]
        self.make_dir(path)

        # 统计每个分类的句子个数和每个句子的长度数量
        classifier_labels = [sum(labels == i) for i in range(5)]
        len_sentence = sentences.apply(lambda x: len(x.split()))
        max_sentences_len = max(len_sentence)
        classifier_sentences = [sum(len_sentence == i)
                                for i in range(1, 1+max_sentences_len)]

        plt.title("训练数据类别统计", fontdict=fontdict)
        plt.xlabel("类别", fontdict=fontdict)
        plt.ylabel("数量", fontdict=fontdict)
        for i in range(0, len(classifier_labels)):
            plt.bar(i, classifier_labels[i])
            plt.text(i-0.1, classifier_labels[i] +
                     1000, str(classifier_labels[i]), fontdict=fontdict)
        plt.savefig(os.path.join(path, "训练数据类别统计.pdf"))
        plt.clf()

        plt.title("训练数据词句长度统计", fontdict=fontdict)
        plt.xlabel("长度", fontdict=fontdict)
        plt.ylabel("数量", fontdict=fontdict)
        for i in range(0, len(classifier_sentences)):
            plt.bar(1+i, classifier_sentences[i])
        plt.savefig(os.path.join(path, "训练数据词句长度统计.pdf"))
        plt.clf()

    def __call__(self):
        self.data_preprocess(data_num=0, mode=0)
        self.split_data()


def mark_sentence(data, tokenizer):
    """
    标记化句子
    """
    # 获取句子和标签
    sentences = data.iloc[:, 0]
    labels = data.iloc[:, 1]

    # 求句子长度的最大值
    max_len = 0
    for sentence in sentences:
        input_id = tokenizer.encode(sentence, add_special_tokens=True)
        max_len = max(max_len, len(input_id))

    print("数据集中句子的最大长度是{}".format(max_len))

    input_ids = []
    attention_masks = []

    for sentence in sentences:
        # 'encode_plus' 使句子符合BERT的格式:
        #   (1) Tokenize the sentence.
        #   (2) Prepend the '[CLS]' token to the start.
        #   (3) Append the '[SEP]' token to the end.
        #   (4) Map tokens to their IDs.
        #   (5) Pad or truncate the sentence to 'max_length'
        #   (6) Create attention masks for [PAD] tokens.
        encoded_dict = tokenizer.encode_plus(
            sentence,
            add_special_tokens=True,  # 加句子起始符 '[CLS]'和句子结束符'[SEP]'
            max_length=20 + max_len,
            pad_to_max_length=True,  # Pad & truncate all sentences.
            return_attention_mask=True,  # 构建注意力掩码
            return_tensors='pt',  # 转换成Pytorch的张量
        )

        # 把编码后的句子加入列表
        input_ids.append(encoded_dict['input_ids'])
        attention_masks.append(encoded_dict['attention_mask'])

    # 转换成张量
    input_ids = torch.cat(input_ids, dim=0)
    attention_masks = torch.cat(attention_masks, dim=0)
    labels = torch.tensor(labels)

    return input_ids, attention_masks, labels


class BertDataset():
    """
    bert模型dataset构建
    """

    def __init__(self, data):
        self.data = data
        self.dataset = self.get_datasets()

    def get_datasets(self):
        # 加载BERT分词器
        tokenizer = BertTokenizer.from_pretrained(
            'bert-base-uncased', do_lower_case=True)

        # 句子->tensor
        input_ids, attention_masks, labels = mark_sentence(
            self.data, tokenizer)

        # 构建dataset
        dataset = TensorDataset(
            input_ids, attention_masks, labels)

        return dataset

    def __len__(self):
        return self.dataset.__len__()

    def __getitem__(self, index):
        return self.dataset.__getitem__(index)


class FastTextDataset(Dataset):
    def __init__(self, data):
        super().__init__()
        self.data = data
        self.sentences = data.iloc[:, 0]
        self.tokenizer = get_tokenizer('basic_english')
        self.glove = GloVe(name='6B', dim=100)
        self.sentence_vecs = []
        self.labels = []
        self.preprocess()

    def preprocess(self):
        # 求句子长度的最大值
        max_len = 90
        # for sentence in self.sentences:
        #     input_id = self.tokenizer(sentence)
        #     max_len = max(max_len, len(input_id))
        for i, sentence in enumerate(self.sentences):
            token = self.tokenizer(sentence)
            if len(token) == 0:
                continue
            sentence_vec = self.glove.get_vecs_by_tokens(token, True)
            sentence_vec = torch.cat((sentence_vec, torch.zeros(
                (max_len-sentence_vec.shape[0], sentence_vec.shape[1]))))
            self.sentence_vecs.append(sentence_vec)
            self.labels.append(self.data.iloc[i, 1])

    def __len__(self):
        return len(self.sentence_vecs)

    def __getitem__(self, index):
        return self.sentence_vecs[index], self.labels[index]

    # def debug(self):
    #     print(len(self.sentence_vecs))
    #     print(len(self.labels))


class Word2VecDataset(Dataset):
    """
    Word2VecCNN模型dataset构建
    """

    def __init__(self, data):
        super().__init__()
        self.data = data  # 将原先数据存储一下
        self.sentence_vecs = []  # 初始为空
        self.labels = []  # 初始为空
        self.tokenizer = get_tokenizer('basic_english')
        self.initialize()  # 初始化

    def initialize(self):
        sentence_len_max = 90  # 所有句子的最大长度为90
        sentences = self.data.iloc[:, 0].apply(
            lambda s: self.tokenizer(s))
        # Word2Vec操作 将每个单词都用1 x 100的向量表示
        model = Word2Vec(sentences, min_count=1)
        # 将句子表示变换为 90 x 100的二维矩阵
        for i, sentence in enumerate(sentences):
            sentence_len = len(sentence)
            # model.wv[sentence[j]]是一个Numpy对象,表示这个词对应的向量
            vec = [model.wv[sentence[j]] for j in range(sentence_len)]
            # 若句子长度小于90, 后面的词用一个1 x 100的零向量填入
            vec.extend([np.zeros(100)
                       for _ in range(sentence_len_max - sentence_len)])
            vec = torch.tensor(np.array(vec))  # 转换为tensor

            # 初始化
            self.sentence_vecs.append(vec)
            self.labels.append(self.data.iloc[i, 1])

    def __getitem__(self, index):
        return self.sentence_vecs[index], self.labels[index]

    def __len__(self):
        return len(self.sentence_vecs)

    # def debug(self):
    #     print(len(self.sentence_vecs))
    #     print(self.labels)


if __name__ == "__main__":
    # config
    resplit_data = False
    test_percent = 0.1
    raw_data_path = os.path.join("raw_data", "train.tsv")

    # 数据集分割
    data_split = DataSplit(
        raw_data_path, test_percent, resplit_data=resplit_data)
    data_split()

    # 预处理
    for data_num in range(1, 4):
        train_data, valid_data = data_split.load_data(data_num)
        data_split.data_preprocess(data_num, mode=0, data=train_data)
        data_split.data_preprocess(data_num, mode=1, data=valid_data)
