# -*- coding: utf-8 -*-

from keras.models import load_model 
import glob
import numpy as np
import jieba
import jieba.analyse as analyse
from keras.preprocessing.text import Tokenizer
import keras.preprocessing.sequence as sequence

def load_file_content(filename):
    with open(filename, encoding='utf-8') as file_obj:
        contents = file_obj.read()
        return (contents.rstrip())
    
# 中文分词
def chinese_word_cut(word):
    return " ".join(jieba.lcut(word, cut_all = False))

# 提取关键字
def get_key_word(word):
    return " ".join(analyse.extract_tags(word, topK = 50))


def load_data():
    train_data = []
    origin_data = []
    for f in glob.glob("predict/*.txt"):
        data = load_file_content(f)
        for line in data.split("\n"):
            origin_data.append(line)
            word_cut = chinese_word_cut(line)
            key_word = get_key_word(word_cut)
            train_data.append(key_word)
    return np.array(train_data), np.array(origin_data)

def load_model_token_data():
    train_data = []
    data = load_file_content("modal_token_cached.txt")
    for line in data.split("\n"):
        train_data.append(line)
    return np.array(train_data)

def word2Vector(train_data, token_data):
    # 建立2000个词的字典  
    token = Tokenizer(num_words = 2000)   
    token.fit_on_texts(token_data) #按单词出现次数排序，排序前2000的单词会列入词典中  
       
    # 使用token字典将“文字”转化为“数字列表”  
    Job_Description_Seq = token.texts_to_sequences(train_data)  
    # 截长补短让所有“数字列表”长度都是50  
    Job_Description_Seq_Padding = sequence.pad_sequences(Job_Description_Seq, maxlen=50)  
    x_train = Job_Description_Seq_Padding  
    return x_train
    
# 加载模型  
model = load_model('model_log_classific.h5')  
# 加载训练模型时产生的缓存分词， 以确保生成索引的 id 唯一对应
token_data = load_model_token_data()

train_data, origin_data = load_data();

x_train = word2Vector(train_data, token_data)  

type_label = {}
type_label[0] = "bugfix"
type_label[1] = "业务"
type_label[2] = "优化"

for index in range(len(x_train)):
    text_x = x_train[index]
    y_new = model.predict(text_x.reshape(1, 50))  
    title = origin_data[index]
    result_idx = list(y_new[0]).index(max(y_new[0]))
    print("%s => %s" % (title, type_label[result_idx]))
