import jieba
import re
import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences

# 停用词表和标点符号集
punc = r'~`!#$%^&*()_+-=|\';":/.,?><~·！@#￥%……&*（）——+-=“：’；、。，？》《{}'
stopwords = [line.strip() for line in open("stopwords.txt", 'r', encoding='utf-8').readlines()]


def data_clean(line):
    """
    进行数据清洗工作，去除停用词和标点符号
    :param line: 输入的(一行)数据
    :return: 清洗后的数据
    """
    seg_list = list(jieba.lcut_for_search(line))
    rline = ''
    for word in seg_list:
        if word not in stopwords and punc:
            rline += word + " "
    return rline


def data_preprocessing(data_path):
    """
    数据预处理，包括数据清洗、文本数据向量化、标签数据one-hot
    :param data_path:数据文件路径
    :return:预处理后的数据
    """
    data = pd.read_csv(data_path)
    print(data.head())
    print(data.isnull().sum())
    data = data.dropna()
    data = shuffle(data)

    t = pd.DataFrame(data.astype(str))
    data["data"] = t["Title"]
    t = pd.DataFrame(data.astype(str))
    data["data"] = t["data"].apply(data_clean)
    train_data = data["data"]
    print(train_data)

    label = np.asarray(data["label"])
    y_label = tf.keras.utils.to_categorical(label)
    # tokenizer序列化
    # todo:这里直接采用文本序列化的方法，后续改进一下编写word2vec嵌入层模型代替
    tokenize = Tokenizer()
    tokenize.fit_on_texts(train_data)
    sequences = tokenize.texts_to_sequences(train_data)
    word_index = tokenize.word_index

    # 对序列化数据填充使得长度对齐
    title_pad = pad_sequences(sequences, maxlen=300, padding='post', truncating='post')
    return title_pad, word_index, y_label

