# -*- coding: utf-8 -*- 
# @Time    : 2021/3/23
# @Author  : WangHong 
# @FileName: data_preprocessing.py
# @Software: PyCharm
# 进行数据预处理的文件

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import re
from tools.langconv import *
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split

## 设置显示图形
plt.style.use('seaborn')
sns.set(font_scale=2)

plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号


def load_data(train_path):
    """
    加载数据
    :param train_path:
    :param test_path:
    :return:
    """
    df_train = pd.read_csv(train_path, engine='python')
    return df_train


def clean_abnormal_label(df_data):
    """
    去除异常标签数据
    :param df_train:
    :return:
    """
    df_data = df_data[df_data['情感倾向'].isin(['-1', '0', '1'])]
    df_data['情感倾向'] = df_data['情感倾向'].astype(np.int32)
    df_data.info(memory_usage='deep')
    df_data.head()
    print("不合规标签清理完成")
    return df_data


def show_word_cloud(df_data):
    """
    显示词云
    :param df_data:
    :return:
    """
    import wordcloud
    import re
    import jieba
    data_text = ''
    for index, text in df_data['微博中文内容'].items():
        if isinstance(text, str):
            data_text += text
    WC = wordcloud.WordCloud(font_path='C://Windows//Fonts/simfang.ttf', max_words=2000, height=400, width=400,
                             background_color='white', repeat=False, mode='RGBA')  # 设置词云图对象属性
    st1 = re.sub('[，。、“”‘ ’]', '', str(data_text))  # 使用正则表达式将符号替换掉。
    conten = ' '.join(jieba.lcut(st1))  # 此处分词之间要有空格隔开，联想到英文书写方式，每个单词之间都有一个空格。
    con = WC.generate(conten)
    plt.imshow(con)
    plt.axis("off")
    WC.to_file('a_wordcloud.png')


def clean_abnormal_comments(text):
    """
    去除异常评论数据
    :param df_data:
    :return:
    """
    text = re.sub(r"(回复)?(//)?\s*@\S*?\s*:", "@", text)  # 去除正文中的@和回复/转发中的用户名
    URL_REGEX = re.compile(
        r'^(http|https|ftp)\://([a-zA-Z0-9\.\-]+(\:[a-zA-Z0-9\.&%\$\-]+)*@)?((25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])|([a-zA-Z0-9\-]+\.)*[a-zA-Z0-9\-]+\.[a-zA-Z]{2,4})(\:[0-9]+)?(/[^/][a-zA-Z0-9\.\,\?\'\\/\+&%\$#\=~_\-@]*)*$'
        , re.IGNORECASE)
    text = re.sub(URL_REGEX, "", text)  # 去除网址
    text = text.replace("转发微博", "")  # 出去无意义的词
    text = text.replace("O网页链接?", "")
    text = text.replace("O网页链接", "")
    text = text.replace("?展开全文c", "")
    text = text.replace("网页链接", "")
    text = text.replace("展开全文", "")
    # text = text.replace("#", "")
    text = re.sub(r'\s+', " ", text)  # 合并正文中过多的空格
    return text.strip()


def TraditionalToSimplified(content):
    """
    繁体字转化为简体字
    :param content:
    :return:
    """
    line = Converter("zh-hans").convert(content)
    return line


def SimplifiedToTraditional(content):
    """
    简体转繁体
    :param content:
    :return:
    """
    line = Converter("zh-hant").convert(content)
    return line


def saveHash(data):
    f = open('savedHash', 'w', encoding="utf-8")
    f.write(str(data))
    f.close()


# def pad_sequences(text_int, maxlen):
#     """
#     填充文本的长度
#     :param text_int:
#     :param maxlen:
#     :return:
#     """
#     from pandas.core.frame import DataFrame
#     test_int = DataFrame(text_int)
#     num = maxlen - len(test_int)
#     if num <= 0:
#         return test_int[0:maxlen]
#     test_int.extend([0] * num)
#     return test_int


def data_participles(df_train):
    """
    对数据进行分词
    :param df_data:
    :param df_test:
    :return:
    """
    import jieba
    num_index = {}

    train_text_list = []
    for index, text in df_train['微博中文内容'].items():
        sequences = ''
        text = str(text)
        for card in text:
            card = TraditionalToSimplified(card)  # 对每一个字化简
            sequences += card
        train_text_list.append(sequences)
    print("繁体字装换完成")

    # test_text_list = []
    # for index, text in df_test['微博中文内容'].items():
    #     sequences = ''
    #     text = str(text)
    #     for card in text:
    #         card = TraditionalToSimplified(card)
    #         sequences += card
    #     test_text_list.append(sequences)

    train_texts = []
    for sentence in train_text_list:
        sequences = jieba.lcut(str(sentence), cut_all=False, HMM=True)
        train_texts.append(sequences)
        for sequence in sequences:
            num_index[sequence] = num_index.get(sequence, 0) + 1
    print("分词完成")
    # test_texts = []
    # for sentence in test_text_list:
    #     sequences = jieba.lcut(str(sentence), cut_all=False, HMM=True)
    #     test_texts.append(sequences)
    #     for sequence in sequences:
    #         num_index[sequence] = num_index.get(sequence, 0) + 1
    num_index = sorted(num_index.items(), key=lambda x: x[1], reverse=True)
    word_index = {}
    i = 1
    for key in num_index:
        word_index[key[0]] = i
        i += 1

    saveHash(word_index)

    print('found %s words' % len(num_index))

    train_texts_int = []
    # test_texts_int = []
    for sentences in train_texts:
        sentence_int = []
        for sentence in sentences:
            if word_index[sentence] < 10000:
                sentence_int.append(word_index[sentence])
            else:
                sentence_int.append(0)
        train_texts_int.append(sentence_int)
    print("词编码完成")
    # for sentences in test_texts:
    #     sentence_int = []
    #     for sentence in sentences:
    #         if word_index.get(sentence, 0) < 10000:
    #             sentence_int.append(word_index.get(sentence, 0))
    #         else:
    #             sentence_int.append(0)
    #     test_texts_int.append(sentence_int)
    # print("测试集前五条数据为：\n", test_texts_int[:5])
    # print(df_test['微博中文内容'].shape)
    maxlen = 200
    train_texts_int = pad_sequences(train_texts_int, maxlen=maxlen)
    return train_texts_int


def to_one_hot(labels, dimension=3):
    results = np.zeros((len(labels), dimension))
    for i, label in enumerate(labels):
        results[i, label] = 1.
    return results


def get_data():
    TRAIN_PATH = './data/train_ dataset/nCoV_100k_train.labled.csv'
    TEST_PATH = './data/test_dataset/nCov_10k_test.csv'
    df_train = load_data(TRAIN_PATH)
    df_train = clean_abnormal_label(df_train)
    df_train['微博中文内容'] = df_train['微博中文内容'].astype(str).apply(clean_abnormal_comments)
    df_data = data_participles(df_train)  # 将中文评论进行编码和提取
    data_label = df_train['情感倾向']
    one_hot_data_labels = to_one_hot(data_label)
    df_train, df_test, df_train_labels, df_test_labels = train_test_split(df_data, one_hot_data_labels, test_size=0.2,
                                                                          random_state=0)
    print("训练数据集：", df_train.shape)
    print("测试数据集：", df_test.shape)
    print("训练数据集标签：", df_train_labels.shape)
    print("训练数据集标签：", df_test_labels.shape)
    return df_train, df_train_labels, df_test, df_test_labels


if __name__ == "__main__":
    get_data()
