import jieba
from gensim.corpora import Dictionary
from gensim.models import LdaModel

# 从用户定义的词典文件中读取词汇列表
userdict_file = "userdict.txt"

# 读取用户自定义词汇表
userdict_words = open(userdict_file, "r", encoding="utf-8").read().split('\n')

# 分词处理示例
train_doc = [jieba.lcut(text) for text in userdict_words]

# 使用用户定义的词汇表构建训练语料
dictionary = Dictionary(train_doc)
corpus = [dictionary.doc2bow(text) for text in train_doc]

# 使用LDA模型训练
lda = LdaModel(corpus=corpus, id2word=dictionary, num_topics=20)

# 读取待分析的文本
other_doc_file = "demo.txt"
other_doc = open(other_doc_file, "r", encoding="utf-8").read()

# 分词和处理待分析的文本
other_doc = jieba.lcut(other_doc)
other_doc_bow = dictionary.doc2bow(other_doc)

# 使用已训练好的LDA模型推断新文本的主题分布
other_doc_lda = lda[other_doc_bow]

# 输出新文本的主题分布
print("主题分布:", other_doc_lda)

# 获取关键词
keywords = []
for topic_id, weight in other_doc_lda:
    topic_words = lda.show_topic(topic_id, topn=10)
    keywords.extend([word for word, _ in topic_words])

print("关键词:", keywords)
