import codecs
import pandas as pd
import numpy as np
from gensim import corpora
from gensim.models import LdaModel
import os

def load_data(file_path):
    """加载预处理后的数据"""
    train = []
    with codecs.open(file_path, 'r', encoding='utf8') as fp:
        for line in fp:
            if line.strip():
                line = line.split()
                train.append([w for w in line])
    return train

def create_lda_model(train):
    """创建LDA模型"""
    dictionary = corpora.Dictionary(train)
    corpus = [dictionary.doc2bow(text) for text in train]
    
    lda = LdaModel(corpus=corpus, 
                   id2word=dictionary, 
                   num_topics=9,
                   passes=30)
    
    return lda, corpus, dictionary

def calculate_topic_strength(lda_model, corpus):
    """计算主题强度"""
    # 获取每个文档的主题分布
    doc_topics = [lda_model.get_document_topics(doc) for doc in corpus]
    
    # 创建一个矩阵存储每个文档的主题分布
    num_docs = len(corpus)
    num_topics = lda_model.num_topics
    topic_dist_matrix = np.zeros((num_docs, num_topics))
    
    # 填充矩阵
    for i, doc_topic in enumerate(doc_topics):
        for topic_id, prob in doc_topic:
            topic_dist_matrix[i, topic_id] = prob
    
    # 计算每个主题的平均强度
    topic_strengths = topic_dist_matrix.mean(axis=0)
    
    return topic_strengths

def create_strength_table(topic_strengths):
    """创建主题强度分析表"""
    data = {
        '主题编号': [f'主题{i+1}' for i in range(len(topic_strengths))],
        '主题强度': topic_strengths
    }
    # 创建DataFrame并按主题强度降序排序
    df = pd.DataFrame(data)
    df = df.sort_values('主题强度', ascending=False)
    # 重置索引
    df = df.reset_index(drop=True)
    # 将主题强度格式化为百分比
    df['主题强度'] = df['主题强度'].apply(lambda x: f'{x:.2%}')
    return df

def save_results(df, output_dir='主题强度分析/results'):
    """保存分析结果"""
    # 创建输出目录
    os.makedirs(output_dir, exist_ok=True)
    
    # 保存为Excel文件
    excel_path = os.path.join(output_dir, 'topic_strength_analysis.xlsx')
    df.to_excel(excel_path, index=False, engine='openpyxl')
    
    # 生成文本报告
    txt_path = os.path.join(output_dir, 'topic_strength_analysis.txt')
    with open(txt_path, 'w', encoding='utf-8') as f:
        f.write('主题强度分析报告\n')
        f.write('=' * 30 + '\n\n')
        f.write('主题强度排序（从高到低）：\n')
        f.write('-' * 30 + '\n')
        for _, row in df.iterrows():
            f.write(f'{row["主题编号"]}: {row["主题强度"]}\n')

def main():
    print("开始计算主题强度...")
    
    # 加载数据和创建模型
    train = load_data('CNKI-output.txt')
    lda_model, corpus, dictionary = create_lda_model(train)
    
    # 计算主题强度
    topic_strengths = calculate_topic_strength(lda_model, corpus)
    
    # 创建分析表格
    strength_df = create_strength_table(topic_strengths)
    
    # 保存结果
    save_results(strength_df)
    
    print("分析完成！结果已保存到 主题强度分析/results 目录")
    print("- topic_strength_analysis.xlsx (Excel表格)")
    print("- topic_strength_analysis.txt (文本报告)")

if __name__ == "__main__":
    main() 