import codecs
import pandas as pd
from gensim import corpora
from gensim.models import LdaModel
import os

def load_data(file_path):
    """加载预处理后的数据"""
    train = []
    with codecs.open(file_path, 'r', encoding='utf8') as fp:
        for line in fp:
            if line.strip():
                line = line.split()
                train.append([w for w in line])
    return train

def create_lda_model(train):
    """创建LDA模型"""
    # 创建词典和语料库
    dictionary = corpora.Dictionary(train)
    corpus = [dictionary.doc2bow(text) for text in train]
    
    # 训练LDA模型
    lda = LdaModel(corpus=corpus, 
                   id2word=dictionary, 
                   num_topics=9,  # 修改为9个主题
                   passes=30)
    
    return lda, dictionary

def generate_topic_word_table(lda_model, num_words=12):
    """生成主题词汇分布表"""
    # 创建一个空的DataFrame来存储结果
    topics_df = pd.DataFrame()
    
    # 获取每个主题的词汇分布
    for topic_id in range(lda_model.num_topics):
        # 获取主题的词汇和概率
        topic_words = lda_model.show_topic(topic_id, num_words)
        
        # 分离词汇和概率
        words = [word for word, prob in topic_words]
        probs = [prob for word, prob in topic_words]
        
        # 创建该主题的DataFrame
        topic_df = pd.DataFrame({
            f'主题{topic_id+1}_词汇': words,
            f'主题{topic_id+1}_概率': probs
        })
        
        # 合并到主表中
        if topics_df.empty:
            topics_df = topic_df
        else:
            topics_df = pd.concat([topics_df, topic_df], axis=1)
    
    return topics_df

def save_results(topics_df, output_dir='主题分析/results'):
    """保存结果"""
    # 创建输出目录
    os.makedirs(output_dir, exist_ok=True)
    
    # 保存为Excel文件
    excel_path = os.path.join(output_dir, 'topic_word_distribution.xlsx')
    topics_df.to_excel(excel_path, index=False, engine='openpyxl')
    
    # 保存为CSV文件（作为备份）
    csv_path = os.path.join(output_dir, 'topic_word_distribution.csv')
    topics_df.to_csv(csv_path, index=False, encoding='utf-8-sig')
    
    # 生成文本报告
    txt_path = os.path.join(output_dir, 'topic_word_distribution.txt')
    with open(txt_path, 'w', encoding='utf-8') as f:
        f.write('主题词汇分布分析报告\n')
        f.write('=' * 50 + '\n\n')
        
        for topic_id in range(0, topics_df.shape[1], 2):
            topic_num = topic_id // 2 + 1
            f.write(f'主题 {topic_num}:\n')
            f.write('-' * 30 + '\n')
            
            words = topics_df.iloc[:, topic_id]
            probs = topics_df.iloc[:, topic_id + 1]
            
            for word, prob in zip(words, probs):
                f.write(f'{word:<15} {prob:.4f}\n')
            f.write('\n')

def main():
    print("开始生成主题词汇分布表...")
    
    # 加载数据
    train = load_data('CNKI-output.txt')
    
    # 创建LDA模型
    lda_model, dictionary = create_lda_model(train)
    
    # 生成主题词汇分布表
    topics_df = generate_topic_word_table(lda_model)
    
    # 保存结果
    save_results(topics_df)
    
    print("分析完成！结果已保存到 主题分析/results 目录")
    print("- topic_word_distribution.xlsx (Excel格式)")
    print("- topic_word_distribution.csv (CSV格式)")
    print("- topic_word_distribution.txt (文本报告)")

if __name__ == "__main__":
    main() 