'''
    使用pyLDAvis对LDA模型进行可视化分析。
'''

from gensim import corpora
from gensim.models import LdaModel
from gensim.corpora import Dictionary
import pyLDAvis.gensim
import jieba
import re
import logging
from gensim.models import LdaModel

#此函数来此Question2，并进行了一些细小的改动，文档处理后返回列表。
def sep_words_by_path(file_path,
                      using_stopwords=True,
                      stop_words_path='C:/Users/Administrator/Desktop/using_python/stopwords.txt'):
    '''
    这个函数依据文档路径读取文档进行分词。传入参数为文件路径，返回的是一个字符串。
    :param file_path:需要传入一个字符串，表示需要分词的文档的路径。例如'C:/exp.txt'
    :return:返回一个列表。
    '''
    ret=[]
    text = "aksjn ekljfk # ! len223"
    if using_stopwords :
        stop_words=[]
        with open(stop_words_path,'r',encoding='utf-8',errors='ignore') as stopwords_file:
            for line in stopwords_file:
                appended=line[:-1]
                stop_words.append(appended)
        with open(file_path,'r',encoding='utf-8',errors='ignore') as file:
            for line in file:
                temp_string=line[:-1]
                body=temp_string.split('\t')
                temp_list=jieba.lcut(body[1])
                for word in temp_list:
                    if word not in stop_words:
                        if len(word)==1 :
                            continue
                        if word.isdigit():
                            continue
                        ret.append(word)
    else:
        with open(file_path,'rb',encoding='gb18030',errors='ignore') as file:
            for line in file:
                temp_string=line[:-1]
                body=temp_string.split(b'\t')
                temp_list=jieba.lcut(body[1])
                for word in temp_list:
                    ret.append(word)
    return ret


def lda_processing(docu_list):
    corpus=[]
    for docu_path in docu_list:
        corpus.append(sep_words_by_path(docu_path))
    dictionary=corpora.Dictionary(corpus)
    dictionary.save('weibo.dict')
    corpus=[dictionary.doc2bow(s) for s in corpus]
    corpora.MmCorpus.serialize('corpus_bow',corpus)
    logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
    num_topics=3
    chunksize=2000
    passes=5
    iterations=10
    id2word=dictionary.id2token
    model=LdaModel(
        corpus=corpus,
        id2word=id2word,
        chunksize=chunksize,
        alpha='auto',
        eta='auto',
        iterations=iterations,
        num_topics=num_topics,
        passes=passes,
    )
    model.save('weibo.model')
    vis = pyLDAvis.gensim.prepare(model, corpus, dictionary)
    pyLDAvis.show(vis)

