# LDA主题模型
# 有监督学习，从文章+已知主题中训练模型。训练好后，输入一篇文章，输出对应的主题概率分布

from sklearn import datasets
news_dataset = datasets.fetch_20newsgroups(subset='all', remove=('headers', 'footers', 'quotes'))

# A list of text document is contained in the data variable
documents = news_dataset.data

print("数据库里有", len(documents), "篇文本")
print("第一篇是这样:\n", documents[0])

import gensim
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import STOPWORDS

# 分词器
def tokenize(text):
    return [token for token in gensim.utils.simple_preprocess(text) if token not in gensim.parsing.preprocessing.STOPWORDS]

print("分词后，刚刚的文档变成了:\n", tokenize(documents[0]))

processed_docs = [tokenize(doc) for doc in documents]
word_count_dict = gensim.corpora.Dictionary(processed_docs)
print("语料库中有", len(word_count_dict), "个不一样的词")

word_count_dict.filter_extremes(no_below=20, no_above=0.1) # 单词出现10次以上，且存在于不超过20%的文档
print("过滤后，语料库中还有", len(word_count_dict), "个不一样的词")

bag_of_words_corpus = [word_count_dict.doc2bow(pdoc) for pdoc in processed_docs]
bow_doc1 = bag_of_words_corpus[0]

print("第一个文档的词袋描述 （元组由token_id和频率表示）:\n", bow_doc1)
print
for i in range(5):
    print("文档中, topic_id {} (单词 \"{}\") 出现 {} 次[s]".format(bow_doc1[i][0], word_count_dict[bow_doc1[i][0]], bow_doc1[i][1]))
print("...")

# LDA mono-core
lda_model = gensim.models.LdaModel(bag_of_words_corpus, num_topics=10, id2word=word_count_dict, passes=5)

# LDA multicore (in this configuration, defaulty, uses n_cores-1)
# lda_model = gensim.models.LdaMulticore(bag_of_words_corpus, num_topics=10, id2word=word_count_dict, passes=5)

lda_model[bag_of_words_corpus[0]]
sorted(lda_model[bag_of_words_corpus[0]], key=lambda tup: -1*tup[1])

documents[0]

for index, score in sorted(lda_model[bag_of_words_corpus[0]], key=lambda tup: -1*tup[1]):
    print("Score: {}\t Topic: {}".format(score, lda_model.print_topic(index, 10)))

news_dataset.target_names[news_dataset.target[0]]
for index, score in sorted(lda_model[bag_of_words_corpus[0]], key=lambda tup: -1*tup[1]):
    print("Score: {}\t Topic: {}".format(score, lda_model.print_topic(index, 10)))

unseen_document = "In my spare time I either play badmington or drive my car"
print("要预测的句子:", unseen_document)
print

bow_vector = word_count_dict.doc2bow(tokenize(unseen_document))
for index, score in sorted(lda_model[bow_vector], key=lambda tup: -1*tup[1]):
    print("Score: {}\t Topic: {}".format(score, lda_model.print_topic(index, 5)))

print("Log perplexity of the model is", lda_model.log_perplexity(bag_of_words_corpus))