# -*- coding: utf-8 -*-

import numpy as np
import jieba
import jieba.analyse as analyse
import tensorflow
import keras
from keras.preprocessing.text import Tokenizer
import keras.preprocessing.sequence as sequence

# 中文分词
def chinese_word_cut(word):
    return " ".join(jieba.lcut(word, cut_all = False))

# 提取关键字
def get_key_word(word):
    return " ".join(analyse.extract_tags(word, topK = 50))

def word2Vector(train_data):
    # 建立2000个词的字典  
    token = Tokenizer(num_words = 2000)   
    token.fit_on_texts(train_data) #按单词出现次数排序，排序前2000的单词会列入词典中  
       
    # 使用token字典将“文字”转化为“数字列表”  
    Job_Description_Seq = token.texts_to_sequences(train_data)  
    # 截长补短让所有“数字列表”长度都是50  
    Job_Description_Seq_Padding = sequence.pad_sequences(Job_Description_Seq, maxlen=50)  
    x_train = Job_Description_Seq_Padding  
    return x_train



#listdata = [
#        "我要准备吃饭了",
#        "我要吃饭了"
#        ]

listdata = [
        "要准备吃饭了我",
        "要吃饭了我"
        ]

print(listdata)

listdata_new = []

for d in listdata:
    listdata_new.append(chinese_word_cut(d))
print(listdata_new)

vector = word2Vector(listdata_new)

print(vector)

print(keras.__version__)
print(tensorflow.__version__)
