# 学习课程：中文文本挖掘
# 学习学生：姜浩然

import jieba
import jieba.analyse
import pandas as pd
from gensim import corpora,models
from gensim.models.ldamodel import LdaModel

raw = pd.read_csv(r"D:\python\金庸-射雕英雄传txt精校版.txt",
                      names=['txt'], sep='aaa', encoding="GBK", engine='python')

# 章节判断用变量预处理
def predeal_variable(raw):
    def m_head(tmpstr):
        return tmpstr[:1]

    def m_mid(tmpstr):
        return tmpstr.find("回 ")

    raw['head'] = raw.txt.apply(m_head)
    raw['mid'] = raw.txt.apply(m_mid)
    raw['len'] = raw.txt.apply(len)
    raw['chap'] = 0
    raw.head(50)

#章节判断
def judge_chapter(raw):
    chapnum = 0
    for i in range(len(raw)):
        if raw['head'][i] == "第" and raw['mid'][i] > 0 and raw['len'][i] < 30 :
            chapnum += 1
        if chapnum >= 40 and raw['txt'][i] == "附录一：成吉思汗家族" :
            chapnum = 0
        raw.loc[i, 'chap'] = chapnum

    #删除临时变量
    del raw['head']
    del raw['mid']
    del raw['len']
    raw.head(50)

    return raw

#提取章节内容
def extract_chapter(raw,chapter_num):
    return "".join([raw['txt'][i] for i in range(len(raw)) if raw['chap'][i] == chapter_num])

def word_cut(alltext):
    stoplist = list(pd.read_csv(r"D:\python\停用词.txt", names=['w'], sep='aaa',
                                encoding='utf-8', engine='python').w)
    return [w for w in jieba.cut(alltext)
                if w not in stoplist and len(w) > 1]

def gensim_keyword(chapter):
    # 文档预处理，提取主题词——清理后原始矩阵
    cleanchap = [extract_chapter(chapter, i) for i in range(1, 41)]
    chaplist = [word_cut(alltext) for alltext in cleanchap]
    # 生成文档对应的字典和bow稀疏向量
    dictionary = corpora.Dictionary(chaplist)
    corpus = [dictionary.doc2bow(text) for text in chaplist]  # 仍为list in list
    tfidf_model = models.TfidfModel(corpus)  # 建立TF-IDF模型

    # 列出最重要的前若干个主题,进行训练得到各个主题下的词条信息
    for i in [10, 20, 30]:
        ldamodel = LdaModel(corpus, id2word=dictionary, num_topics=i, passes=2)
        novel_topics = ldamodel.print_topics()
        print('预设' + str(i) + '个主题成功')

        for i in range(len(novel_topics)):
            print(novel_topics[i])


   # 检索和文本内容最接近的主题
    for i in range(1, 41, 2):
        query = cleanchap[i]  # 检索奇数章节所包含的主题
        query_bow = dictionary.doc2bow(word_cut(query))  # 频数向量
        # query_tfidf = tfidf_model[query_bow]  # TF-IDF向量
        ldamodel.get_document_topics(query_bow)
        topic = ldamodel.get_document_topics(query_bow)  # 需要输入和文档对应的bow向量
        print(f'第{i}回文档的主题为：{topic}') # 打印包含的主题编号

    return

if __name__=="__main__":
    # 编码实现
    predeal_variable(raw)
    chapter = judge_chapter(raw)
    # print(chapter)
    gensim_keyword(chapter)
    print('至此，文档主题全部提取结束')

