# -*- coding: UTF-8 -*-
__author__ = 'Jinkey'

# 显示日志的代码，需要的时候可以取消注释
# import logging
# logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)




from gensim import corpora, models, similarities

documents = ["Human machine interface for lab abc computer applications",
             "A survey of user opinion of computer system response time",
             "The EPS user interface management system",
             "System and human system engineering testing of EPS",
             "Relation of user perceived response time to error measurement",              "The generation of random binary unordered trees",
             "The intersection graph of paths in trees",
             "Graph minors IV Widths of trees and well quasi ordering",
             "Graph minors A survey"]


# ====================去除停用词并分词=================================
stoplist = 'for a of the and to in'.split()
texts = [[word for word in document.lower().split() if word not in stoplist]
         for document in documents]


# ====================去除只出现一次的单词==============================
from collections import defaultdict

# 标准化字典
frequency = defaultdict(int)

# 统计频次组成键值对「单词-次数」，用frequency[单词]来读取某个单词的频次
for text in texts:
    for token in text:
        frequency[token] += 1

# 当单词次数大于1的时候保留单词
texts = [[token for token in text if frequency[token] > 1]
         for text in texts]

# ===========最终获取到语料库中出现大于一次的词语组成的标签库（特征值集合）=====================
print '\n' + '='*30 + '清洗完的语料库' + '='*30 + '\n'
# 格式化输出texts字典，用print texts也可以，不过会全部显示在一行里面
from pprint import pprint
pprint(texts)

# 持久化储存一个字典，便于以后使用
dictionary = corpora.Dictionary(texts)
dictionary.save('LSI/deerwester.dict')

# 打印标签及其对应的ID
print '\n' + '='*30 + '标签库' + '='*30 + '\n'
print(dictionary.token2id)

# ==========【推荐】获取标签库的方法2：一次处理一行而无需全部加载到内存======================
# # 获取特征值组成标签库
# dictionary = corpora.Dictionary(line.lower().split() for line in open('mycorpus.txt'))
# # 去除停用词和出现一次的单词
# stop_ids = [dictionary.token2id[stopword] for stopword in stoplist
#             if stopword in dictionary.token2id]
# once_ids = [tokenid for tokenid, docfreq in dictionary.dfs.iteritems() if docfreq == 1]
# dictionary.filter_tokens(stop_ids + once_ids)
# # remove gaps in id sequence after words that were removed
# # dictionary.compactify()
# print(dictionary.token2id)

# ======================向机器输入一个测试样本试试=================================
new_doc = "Human computer interaction"

# doc2bow函数用来将分词后的文本（即一个列表）转换成单词id及其出现频率组成的二维向量集合（list）
# 没有在训练集样本中出现过的单词将会被忽略
new_vec = dictionary.doc2bow(new_doc.lower().split())
print '\n' + '='*30 + '弄个样本玩玩' + '='*30 + '\n'
print(new_vec)



# ====================用标签库向量化训练集文本==============================

# 方法1：全部加载到内存
corpus = [dictionary.doc2bow(text) for text in texts]
corpora.MmCorpus.serialize('LSI/deerwester.mm', corpus) # store to disk, for later use
pprint(corpus)

# 【推荐】
# 方法2：一次加载一行到内存
# class MyCorpus(object):
#     def __iter__(self):
#         for line in open('mycorpus.txt'):
#             # 假设每行代表一个文档，且单词以空格键划分，中文分词的时候这部分逻辑需要重写
#             yield dictionary.doc2bow(line.lower().split())
#
# print '\n' + '='*30 + '特征化或者说向量化的语料库（文档集）' + '='*30 + '\n'
# corpus_memory_friendly = MyCorpus()
# pprint(list(corpus_memory_friendly))





# ======================高潮来了：主题模型=================================
print '\n' + '\n' + '='*60 + '='*60
print '='*50 + '高潮来了：主题模型' + '='*50 + '\n'

from gensim import corpora, models, similarities

# 导入文档集
print '\n' + '='*30 + '读出向量化的读出文档集' + '='*30 + '\n'
dictionary = corpora.Dictionary.load('LSI/deerwester.dict')
corpus = corpora.MmCorpus('LSI/deerwester.mm')
pprint(list(corpus))

# 用向量化的文档集初始化TF-IDF词频统计模型
tfidf = models.TfidfModel(corpus)

# 用上一步建立的TF-IDF模型处理文档集，使文档中每篇文章中的每个词出现频率转换为TF-IDF权重
corpus_tfidf = tfidf[corpus]
print '\n' + '='*30 + 'TF-IDF' + '='*30 + '\n'
pprint(list(corpus_tfidf))

# 初始化一个LSI模型
lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=3)
# create a double wrapper over the original corpus: bow->tfidf->fold-in-lsi
corpus_lsi = lsi[corpus_tfidf]
print '\n' + '='*30 + 'LSI' + '='*30 + '\n'
pprint(list(corpus_lsi))
pprint(lsi.print_topics())

# 保存LSI模型（TD-IDF和LDA也一样）
lsi.save('LSI/model.lsi')
lsi = models.LsiModel.load('LSI/model.lsi')

# 转换文档集为LSI空间向量模型并建立索引
index = similarities.MatrixSimilarity(lsi[corpus])

index.save('LSI/deerwester.index')
index = similarities.MatrixSimilarity.load('LSI/deerwester.index')

# 将前面『输入一个测试样本试试』中的的文档LSI向量化
vec_lsi = lsi[new_vec]
# 对语料库进行相似性查询
sims = index[vec_lsi]
sims = sorted(enumerate(sims), key=lambda item: -item[1])
pprint(list(sims)[0])

model = models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=50, update_every=1, chunksize=10000, passes=1)
topics = [model[c] for c in corpus]
print '\n'
pprint(topics)
