# coding=utf-8
from gensim import corpora
from gensim import models
import matplotlib.pyplot as plt
import demo_common
import numpy as np


def get_lda_model(dictionary, corpus, num_topics=20):
    return models.LdaModel(corpus=corpus, id2word=dictionary, num_topics=num_topics)


def dump_topics(lda):
    print('打印lda模型的关键词')
    topic_list = lda.print_topics(num_topics=50, num_words=30)
    for topic in topic_list:
        print(topic)


def get_all_distribution(lda, corpus):
    '''
    获取所有文档在每个主题的分布个数
    :param lda:
    :param corpus:
    :return:
    '''
    class_distribution = lda.get_document_topics(bow=corpus, minimum_probability=0.1)
    dic = dict()
    for distribution in class_distribution:
        # 根据概率排序
        # distribution: 主题概率分布 [(4, 0.43114725), (7, 0.49003074), (9, 0.0728376)]
        # distribution.sort(key=lambda k: k[1], reverse=-1)
        for item in distribution:
            if item[0] in dic:
                dic[item[0]] = dic.get(item[0]) + 1
            else:
                dic[item[0]] = 1

    print(dic)
    return dic


if __name__ == '__main__':
    num_topics = 20
    train_set = demo_common.get_train_set(max_count=55000)
    print('train_set len', len(train_set))
    dictionary = demo_common.get_dictionary(train_set)
    corpus = demo_common.get_bow_corpus(dictionary=dictionary, train_set=train_set)
    tfidf = demo_common.get_tfidf_model(dictionary=dictionary, corpus=corpus)
    corpus_tfidf = tfidf[corpus]
    lda = get_lda_model(dictionary, corpus_tfidf, num_topics)
    dump_topics(lda)
    distribution_dic = get_all_distribution(lda=lda, corpus=corpus_tfidf)
    distribution_list = list()
    for i in range(num_topics):
        distribution_list.append(distribution_dic.get(i))
    plt.bar(range(num_topics), distribution_list)
    plt.show()

