import pandas as pd
import jieba
from gensim import corpora, models
import pyLDAvis.gensim_models as gensimvis
import pyLDAvis
import matplotlib.pyplot as plt

# 读取xlsx文件
df = pd.read_excel('new_document.xlsx')
# 加载停用词表
stopwords = []
with open('thesaurus/stop_word.txt', 'r', encoding='utf-8') as f:
    for line in f:
        stopwords.append(line.strip())

# 加载自定义词典
jieba.load_userdict('thesaurus/user_word.txt')

# 分段，根据label列进行分组
groups = df.groupby('label')['content'].apply(list)

# 对每个分组进行分词、删除停用词、生成corpus和dictionary并训练LDA模型
for label, content_list in groups.items():
    # 分词、删除停用词
    seg_list = [
        [word for word in jieba.cut(content) if word not in stopwords and len(word) > 1]
        for content in content_list
    ]

    # 生成corpus和dictionary
    dictionary = corpora.Dictionary(seg_list)
    corpus = [dictionary.doc2bow(seg) for seg in seg_list]

    # 训练LDA模型，并提取每个主题的前10个关键词
    lda = models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=5)
    topic_keywords = lda.show_topics(num_topics=5, num_words=10, formatted=False)

    # 打印出每个主题的关键词
    for topic_id, keywords in topic_keywords:
        print(f"Label:{label}, Topic ID:{topic_id}")
        print([word[0] for word in keywords])

    # 计算主题强度
    topic_strength = lda.inference(corpus)[0].sum(axis=0) / len(corpus)

    # 生成标签、主题强度、关键词表格csv
    df_result = pd.DataFrame({
        'Label': [label] * 5,
        'Topic_ID': range(5),
        'Topic_Strength': topic_strength,
        'Keywords': [','.join([word[0] for word in keywords]) for _, keywords in topic_keywords],
    })
    df_result.to_csv(f'{label}_result.csv', index=False)

    # 可视化主题
    vis_data = gensimvis.prepare(lda, corpus, dictionary)
    pyLDAvis.save_html(vis_data, f'{label}_vis.html')