# coding:utf-8
# Author : hiicy redldw
# Date : 2019/01/29
import jieba
import keras.preprocessing.text as T
from keras.preprocessing.text import Tokenizer

# china = '我爱维维，胜过爱我自己。如果有一天，维维离开了我，我大概就只有落入了' \
#         '无边的黑暗之中，我找不到还有什么值得我开心的，无人能拯救我！'
# text = 'Does anyone want to make friends with me? ' \
#        'I want to practice my English. You might ask me what kinds of people I need,' \
#        'whatever.I just need a person who love English.'
# out1 = T.text_to_word_sequence(text)  # 按空格划分 # 中文 选jieba.cut
# # print(out1)
# chnout1 = jieba.lcut(china, cut_all=True, HMM=True)
# print(out1)
# print(T.one_hot(text[:30], 10))
#
# text1 = 'some thing to eat'
# text2 = 'thing some drink'
# text3 = 'love to some to dream'
# texts = [text1, text2, text3]
#
# # print(T.text_to_word_sequence(text1)) # 以空格区分，中文也不例外 ['some', 'thing', 'to', 'eat']
# # print(T.one_hot(text1,10))  # （10表示数字化向量为10以内的数字）
# # print(T.one_hot(text2, 10))  # [7, 9, 3, 1]
#
# tokenizer = Tokenizer(num_words=None)  # num_words:None或整数,处理的最大单词数量。少于此数的单词丢掉
# tokenizer.fit_on_texts(texts)
#
# """
# document_count 处理的文档数量
# word_index 一个dict，保存所有word对应的编号id，从1开始
# word_counts 一个dict，保存每个word在所有文档中出现的次数
# word_docs 一个dict，保存每个word出现的文档的数量
# index_docs 一个dict，保存word的id出现的文档的数量
# """
#
# print(
#     tokenizer.word_counts)  # [('some', 3), ('thing', 2), ('to', 3), ('eat', 1), ('drink', 1),('love', 1), ('dream', 1)]
# print('**********************')
# print(tokenizer.word_index)  # {'some': 1, 'to': 2, 'thing': 3, 'eat': 4, 'drink': 5, 'love': 6, 'dream': 7}
# print('**********************')
# print(tokenizer.word_docs)  # {'some': 3, 'to': 2, 'thing': 2, 'eat': 1, 'drink': 1, 'dream': 1, 'love': 1})
# print('**********************')
# print(tokenizer.index_docs)  # {1: 3, 2: 2, 3: 2, 4: 1, 5: 1, 7: 1, 6: 1})
# print('^^^^^^^^^^^^^^^^^^\n\n')
#
# """
# fit_on_text(texts) 使用一系列文档来生成token词典，texts为list类，每个元素为一个文档。
# texts_to_sequences(texts) 将多个文档转换为word下标的向量形式,shape为[len(texts)，len(text)]
# texts_to_matrix(texts) 将多个文档转换为矩阵表示,shape为[len(texts),num_words]
# """
# # num_words=多少会影响下面的结果，行数=num_words
# print(tokenizer.texts_to_sequences(texts))  # 得到词索引[[1, 3, 2, 4], [3, 1, 5], [6, 2, 1, 2, 7]]
# print('^^^^^^^^^^^^^^^^^^\n\n')
# print(tokenizer.texts_to_matrix(texts))  #
# """[[0. 1. 1. 1. 1. 0. 0. 0.]
#  [0. 1. 0. 1. 0. 1. 0. 0.]
#  [0. 1. 1. 0. 0. 0. 1. 1.]]
# """
#
# print('^^^^^^^^^^^^^^^^^^\n\n')
# # import keras.preprocessing.sequence as S
# # print(S.pad_sequences([[1,2,3]],10,padding='post'))
# import keras.preprocessing.sequence as S
#
# z0 = S.skipgrams([1, 2, 3], 3)
# print(z0, '\n\n\n', z0[0], '\n\n', z0[1], '\n')
# res = list(zip(z0[0], z0[1]))
# for s in res:
#     print(s)
# print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n\n')

text1 = 'some thing to eat'
text2 = 'thing some drink'
text3 = 'love to some to dream'
text4 = 'ldw is my lover'
texts = [text1, text2]
ts = [text3, text4]
tokenizer = Tokenizer(num_words=5, oov_token='<PAD>')
tokenizer.fit_on_texts(texts)
print(tokenizer.word_index, '-' * 5, )
print(tokenizer.texts_to_sequences(texts))
print('-' * 5, tokenizer.texts_to_sequences(ts))
