import pandas as pd
import jieba
from gensim import corpora, models
from gensim.models.ldamodel import LdaModel


raw = pd.read_csv("金庸-射雕英雄传txt精校版.txt",
                  names = ['txt'], sep ='aaa', encoding ="utf-8" ,engine='python')

# 章节判断用变量预处理
def m_head(tmpstr):
    return tmpstr[:1]

def m_mid(tmpstr):
    return tmpstr.find("回 ")

raw['head'] = raw.txt.apply(m_head)
raw['mid'] = raw.txt.apply(m_mid)
raw['len'] = raw.txt.apply(len)

chapnum = 0
for i in range(len(raw)):
    if raw['head'][i] == "第" and raw['mid'][i] > 0 and raw['len'][i] < 30:
        chapnum += 1
    if chapnum >= 40 and raw['txt'][i] == "附录一：成吉思汗家族":
        chapnum = 0
    raw.loc[i, 'chap'] = chapnum
# 删除临时变量
del raw['head']
del raw['mid']
del raw['len']

rawgrp = raw.groupby('chap')
chapter = rawgrp.agg(sum) # 只有字符串列的情况下，sum函数自动转为合并字符串
chapter = chapter[chapter.index != 0]

def read_stopwords():
    stopwords = pd.read_table("stopwords.txt", names=['sw'])
    return stopwords['sw'].to_list()


def cut_text(text, stopwords):
    return [w for w in jieba.cut(text)
            if w not in stopwords and len(w) > 1]

def prepared_topics(chap_list: list, topic_num: int, passes: int):
    # 生成文档对应的字典和bow稀疏向量
    dictionary = corpora.Dictionary(chap_list)
    corpus = [dictionary.doc2bow(text) for text in chap_list]  # 仍为list in list
    tfidf_model = models.TfidfModel(corpus)  # 建立TF-IDF模型
    corpus_tfidf = tfidf_model[corpus]
    lda_model = LdaModel(corpus, id2word=dictionary, num_topics=topic_num, passes=passes)

if __name__ == '__main__':
    jieba.load_userdict("金庸人名.txt")
    jieba.load_userdict("金庸地名.txt")
    # 读取停用词
    stopwords = set()
    stopwords.update(read_stopwords())
    # 章节分词
    chapter_cut_list = [cut_text(text, stopwords) for text in chapter['txt']]
    # 创建字典
    dic = corpora.Dictionary(chapter_cut_list)
    # 建立词袋模型
    bag_of_words = [dic.doc2bow(text) for text in chapter_cut_list]
    # 建立tfidf模型
    tfidf_model = models.TfidfModel(bag_of_words)
    # 转为tfidf向量
    bag_of_words_tfidf = tfidf_model[bag_of_words]
    lda_model = None
    for i in range(10, 31, 10):
        lda_model = LdaModel(bag_of_words_tfidf, id2word=dic, num_topics=i, passes=1)
        print("预设主题个数为{}".format(i))
        for idx, words in lda_model.print_topics():
            print("主题{}的关键词条为: {}".format(idx, words))
        print()
    # 检索和文本内容最接近的主题
    for i in range(1, 41, 2):
        # 获取基数章节
        chap = chapter["txt"][i]
        chap_bow = dic.doc2bow(jieba.lcut(chap))  # 频数向量
        chap_tfidf = tfidf_model[chap_bow]  # TF-IDF向量
        topic = lda_model.get_document_topics(chap_bow)  # 需要输入和文档对应的bow向量)
        print('第{}章文档为主题{}的概率为: {}'.format(i, topic[0][0], topic[0][1]))