import codecs
import pandas as pd
from gensim import corpora
from gensim.models import LdaModel
import os
import numpy as np
from collections import defaultdict

def load_data(file_path):
    """加载预处理后的数据"""
    train = []
    with codecs.open(file_path, 'r', encoding='utf8') as fp:
        for line in fp:
            if line.strip():
                line = line.split()
                train.append([w for w in line])
    return train

def create_lda_model(train):
    """创建LDA模型"""
    dictionary = corpora.Dictionary(train)
    corpus = [dictionary.doc2bow(text) for text in train]
    
    lda = LdaModel(corpus=corpus, 
                   id2word=dictionary, 
                   num_topics=9,  # 与CNKI-LDAVIEW.py保持一致
                   passes=30)
    
    return lda, corpus, dictionary

def extract_topic_words(lda_model, num_words=12):
    """提取每个主题的关键词"""
    topic_words = {}
    for topic_id in range(lda_model.num_topics):
        words = lda_model.show_topic(topic_id, num_words)
        topic_words[topic_id] = words
    return topic_words

def suggest_topic_names(topic_words):
    """根据主题词自动生成主题名称"""
    topic_names = {}
    for topic_id, words in topic_words.items():
        # 使用前3个权重最高的词组合成主题名称
        top_words = [word for word, _ in words[:3]]
        topic_names[topic_id] = "、".join(top_words)
    return topic_names

def create_word_topic_mapping(lda_model, corpus, dictionary):
    """创建词-主题映射关系"""
    word_topic_mapping = defaultdict(list)
    
    # 获取每个文档中词的主题分布
    for doc_bow in corpus:
        word_topics = lda_model.get_document_topics(doc_bow, per_word_topics=True)[1]
        for word_id, topic_dist in word_topics:
            word = dictionary[word_id]
            # 获取该词最可能属于的主题
            if topic_dist:  # 确保topic_dist不为空
                main_topic = topic_dist[0]
                word_topic_mapping[word].append(main_topic)
    
    # 计算每个词最常见的主题
    word_main_topic = {}
    for word, topics in word_topic_mapping.items():
        if topics:  # 确保topics不为空
            # 计算最频繁出现的主题
            main_topic = max(set(topics), key=topics.count)
            word_main_topic[word] = main_topic
    
    return word_main_topic

def generate_topic_word_tables(topic_words, topic_names, word_topic_mapping):
    """生成主题词分布表"""
    # 创建主题概览表
    overview_data = []
    for topic_id, words in topic_words.items():
        topic_name = topic_names[topic_id]
        keywords = ", ".join([f"{word}({prob:.4f})" for word, prob in words])
        overview_data.append({
            "主题编号": f"主题{topic_id+1}",
            "主题名称": topic_name,
            "关键词(权重)": keywords
        })
    overview_df = pd.DataFrame(overview_data)
    
    # 创建词-主题对应表
    word_topic_data = []
    for word, topic_id in word_topic_mapping.items():
        word_topic_data.append({
            "词语": word,
            "所属主题": f"主题{topic_id+1}",
            "主题名称": topic_names[topic_id]
        })
    word_topic_df = pd.DataFrame(word_topic_data)
    word_topic_df = word_topic_df.sort_values("所属主题")
    
    return overview_df, word_topic_df

def save_results(overview_df, word_topic_df, output_dir='自动生成主题词/results'):
    """保存分析结果"""
    # 创建输出目录
    os.makedirs(output_dir, exist_ok=True)
    
    # 保存主题概览表
    overview_path = os.path.join(output_dir, 'topic_overview.xlsx')
    overview_df.to_excel(overview_path, index=False, engine='openpyxl')
    
    # 保存词-主题对应表
    word_topic_path = os.path.join(output_dir, 'word_topic_mapping.xlsx')
    word_topic_df.to_excel(word_topic_path, index=False, engine='openpyxl')
    
    # 生成文本报告
    txt_path = os.path.join(output_dir, 'topic_analysis_report.txt')
    with open(txt_path, 'w', encoding='utf-8') as f:
        f.write('主题分析报告\n')
        f.write('=' * 50 + '\n\n')
        
        f.write('1. 主题概览\n')
        f.write('-' * 30 + '\n')
        for _, row in overview_df.iterrows():
            f.write(f"【{row['主题编号']}】{row['主题名称']}\n")
            f.write(f"关键词：{row['关键词(权重)']}\n\n")
        
        f.write('\n2. 主题-词语分布统计\n')
        f.write('-' * 30 + '\n')
        topic_word_counts = word_topic_df['所属主题'].value_counts()
        for topic, count in topic_word_counts.items():
            f.write(f"{topic}: {count}个词语\n")

def main():
    print("开始生成主题词分析...")
    
    # 加载数据和创建模型
    train = load_data('CNKI-output.txt')
    lda_model, corpus, dictionary = create_lda_model(train)
    
    # 提取主题词
    topic_words = extract_topic_words(lda_model)
    
    # 生成主题名称
    topic_names = suggest_topic_names(topic_words)
    
    # 创建词-主题映射
    word_topic_mapping = create_word_topic_mapping(lda_model, corpus, dictionary)
    
    # 生成分析表格
    overview_df, word_topic_df = generate_topic_word_tables(
        topic_words, topic_names, word_topic_mapping)
    
    # 保存结果
    save_results(overview_df, word_topic_df)
    
    print("分析完成！结果已保存到 自动生成主题词/results 目录")
    print("- topic_overview.xlsx (主题概览表)")
    print("- word_topic_mapping.xlsx (词-主题对应表)")
    print("- topic_analysis_report.txt (分析报告)")

if __name__ == "__main__":
    main() 