import os,sys
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from sklearn.utils import shuffle
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
sys.path.append("../")
from config import *

def getDataset():
    train_df = pd.read_csv(thu_data / 'cnews_train.csv')
    val_df = pd.read_csv(thu_data / 'cnews_val.csv')
    test_df = pd.read_csv(thu_data / 'cnews_test.csv')

    # 打乱数据
    train_df =shuffle(train_df)
    val_df = shuffle(val_df)
    test_df = shuffle(test_df)
    ## 对数据集的标签数据进行编码
    train_y = train_df.label
    val_y = val_df.label
    test_y = test_df.label

    le = LabelEncoder()
    train_y = le.fit_transform(train_y).reshape(-1, 1)
    val_y = le.transform(val_y).reshape(-1, 1)
    test_y = le.transform(test_y).reshape(-1, 1)

    ## 对数据集的标签数据进行one-hot编码
    ohe = OneHotEncoder()
    train_y = ohe.fit_transform(train_y).toarray()
    val_y = ohe.transform(val_y).toarray()
    test_y = ohe.transform(test_y).toarray()
    ## 使用Tokenizer对词组进行编码
    ## 当我们创建了一个Tokenizer对象后，使用该对象的fit_on_texts()函数，以空格去识别每个词,
    ## 可以将输入的文本中的每个词编号，编号是根据词频的，词频越大，编号越小。
    max_words = 5000
    max_len = 600
    tok = Tokenizer(num_words=max_words)  ## 使用的最大词语数为5000
    tok.fit_on_texts(train_df.cutword)

    ## 对每个词编码之后，每句新闻中的每个词就可以用对应的编码表示，即每条新闻可以转变成一个向量了：
    train_seq = tok.texts_to_sequences(train_df.cutword)
    val_seq = tok.texts_to_sequences(val_df.cutword)
    test_seq = tok.texts_to_sequences(test_df.cutword)

    ## 将每个序列调整为相同的长度
    train_seq_mat = sequence.pad_sequences(train_seq, maxlen=max_len)
    val_seq_mat = sequence.pad_sequences(val_seq, maxlen=max_len)
    test_seq_mat = sequence.pad_sequences(test_seq, maxlen=max_len)
    print(train_seq_mat.shape)
    print(val_seq_mat.shape)
    print(test_seq_mat.shape)
    return train_seq_mat,train_y,val_seq_mat,val_y,test_seq_mat,test_y

if __name__ == '__main__':
    getDataset()