# -*- coding: utf-8 -*-
# https://www.bilibili.com/video/av97491076
# https://blog.csdn.net/Chile_Wang/article/details/104305712
import os
import glob
import numpy as np
import jieba
import jieba.analyse as analyse
from keras.preprocessing.text import Tokenizer
import keras.preprocessing.sequence as sequence
from keras.models import Sequential 
from keras.layers.embeddings import Embedding
from keras.layers import Dense, Dropout, Activation, Flatten, MaxPool1D, Conv1D

def write(filename,content):
    with open(filename, 'w', encoding='utf-8') as file_obj:
        file_obj.write(content)
        
def load_file_content(filename):
    with open(filename, encoding='utf-8') as file_obj:
        contents = file_obj.read()
        return (contents.rstrip())

def num_to_one_hot(listdata, class_num):
    listdata = np.array(listdata)
    train_labels = np.zeros((listdata.shape[0], class_num))
    train_labels[np.arange(listdata.shape[0]),listdata] = 1
    return train_labels
    
# 中文分词
def chinese_word_cut(word):
    return " ".join(jieba.lcut(word, cut_all = False))

# 提取关键字
def get_key_word(word):
    return " ".join(analyse.extract_tags(word, topK = 50))



def load_data():
    train_data = []
    origin_data = []
    label_data = []
    category = {}
    class_num = 0
    for f in glob.glob("data/*.txt"):
        data = load_file_content(f)
        for line in data.split("\n"):
            word_cut = chinese_word_cut(line)
            key_word = get_key_word(word_cut)
            origin_data.append(line)
            train_data.append(key_word)
            label_data.append(class_num)
        category[f] = class_num
        class_num += 1
        
    return np.array(train_data), np.array(label_data), np.array(origin_data), category


def word2Vector(train_data):
    # 建立2000个词的字典  
    token = Tokenizer(num_words = 2000)   
    token.fit_on_texts(train_data) #按单词出现次数排序，排序前2000的单词会列入词典中  
       
    # 使用token字典将“文字”转化为“数字列表”  
    Job_Description_Seq = token.texts_to_sequences(train_data)  
    # 截长补短让所有“数字列表”长度都是50  
    Job_Description_Seq_Padding = sequence.pad_sequences(Job_Description_Seq, maxlen=50)  
    x_train = Job_Description_Seq_Padding  
    return x_train
    
def build_model():
    model = Sequential()  
    model.add(Embedding(output_dim = 32,  # 词向量的维度  
    				 input_dim = 2000, # Size of the vocabulary 字典大小  
    				  input_length = 50  # 每个数字列表的长度  
    				 )    
    	   )  
    
    model.add(Dropout(0.2))   
    model.add(Flatten())  # 平展  
    model.add(Dense(units = 256,  
    			  activation = "relu"))  
    model.add(Dropout(0.25))  
    model.add(Dense(units = 3,  
    			  activation = "softmax"))  
    
    print(model.summary())  # 打印模型  
    # CPU版本  
    model.compile(loss = "sparse_categorical_crossentropy",  # 多分类  
    		  optimizer = "adam",  
    		  metrics = ["accuracy"]  
    		   )  
    return model

def saveTokenCached(train_data):
    str_total = ""
    for each in train_data:
        str_total += each
        str_total += "\n"
    write('modal_token_cached.txt', str_total)



# 加载训练数据
train_data, label_data, origin_data, category = load_data()
print(train_data.shape) # (1912,)
print(label_data.shape) # (1912, 3)
print(category) 

print('-------------------------------------------------------')
print('原顺序x:', train_data)
print('原顺序y:', label_data)
print('-------------------------------------------------------')

# 打乱训练数据
permutation = list(np.random.permutation(train_data.shape[0]))
origin_data = origin_data[permutation]
train_data = train_data[permutation]
label_data = label_data[permutation]


print('打乱后x', train_data)
print('打乱后y', label_data)
print('-------------------------------------------------------')
#print(chinese_word_cut("我要去吃饭")) # 我要 去 吃饭
#print(get_key_word(chinese_word_cut("我要去吃饭"))) # 我要 吃饭

# 保存分词后的缓存，以确保在预测时生成索引的 id 唯一对应
saveTokenCached(train_data)

# 对训练数据索引化 
x_train = word2Vector(train_data)  
y_train = label_data

# 构建训练模型
model = build_model()

batch_size = 30
epochs = 20 
    
# 开始训练
history = model.fit(  
	  x_train,   
	  y_train,   
	  batch_size = batch_size,  
	  epochs = epochs,  
	  verbose = 2,  
	  validation_split = 0.2  # 训练集的20%用作验证集  
)


# 保存模型  
model.save('model_log_classific.h5')  # 生成模型文件 'my_model.h5'

#loss：训练集损失值
#
#accuracy:训练集准确率
#
#val_loss:测试集损失值
#
#val_accruacy:测试集准确率