import codecs
import numpy as np
from gensim import corpora
from gensim.models import LdaModel
from gensim.models.coherencemodel import CoherenceModel
import matplotlib.pyplot as plt
import os
from multiprocessing import freeze_support

# 设置matplotlib中文显示
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

def compute_metrics(texts, dictionary, corpus, start=1, end=16, step=1):
    perplexities = []
    coherence_scores = []
    
    for num_topics in range(start, end, step):
        print(f'\n计算主题数 {num_topics} 的评估指标...')
        
        # 训练LDA模型
        lda = LdaModel(
            corpus=corpus,
            id2word=dictionary,
            num_topics=num_topics,
            random_state=42,
            passes=60
        )
        
        # 计算困惑度
        perplexity = lda.log_perplexity(corpus)
        perplexities.append(perplexity)
        print(f'困惑度: {perplexity:.4f}')
        
        # 计算一致性得分
        coherence_model = CoherenceModel(
            model=lda,
            texts=texts,
            dictionary=dictionary,
            coherence='c_v'
        )
        coherence = coherence_model.get_coherence()
        coherence_scores.append(coherence)
        print(f'一致性得分: {coherence:.4f}')
    
    return perplexities, coherence_scores

def plot_metrics(topic_numbers, perplexities, coherence_scores, output_dir):
    # 创建两个子图
    fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 10))
    
    # 绘制困惑度曲线
    ax1.plot(topic_numbers, perplexities, 'b-', marker='o')
    ax1.set_xlabel('主题数量')
    ax1.set_ylabel('困惑度')
    ax1.set_title('主题数量与困惑度关系')
    ax1.grid(True)
    
    # 标记困惑度局部最小值
    for i in range(1, len(perplexities)-1):
        if perplexities[i] < perplexities[i-1] and perplexities[i] < perplexities[i+1]:
            ax1.plot(topic_numbers[i], perplexities[i], 'ro', markersize=10)
            ax1.annotate(f'主题数={topic_numbers[i]}\n困惑度={perplexities[i]:.4f}',
                        xy=(topic_numbers[i], perplexities[i]),
                        xytext=(10, 10), textcoords='offset points')
    
    # 绘制一致性得分曲线
    ax2.plot(topic_numbers, coherence_scores, 'g-', marker='o')
    ax2.set_xlabel('主题数量')
    ax2.set_ylabel('一致性得分')
    ax2.set_title('主题数量与一致性得分关系')
    ax2.grid(True)
    
    # 标记一致性得分局部最大值
    for i in range(1, len(coherence_scores)-1):
        if coherence_scores[i] > coherence_scores[i-1] and coherence_scores[i] > coherence_scores[i+1]:
            ax2.plot(topic_numbers[i], coherence_scores[i], 'ro', markersize=10)
            ax2.annotate(f'主题数={topic_numbers[i]}\n一致性得分={coherence_scores[i]:.4f}',
                        xy=(topic_numbers[i], coherence_scores[i]),
                        xytext=(10, 10), textcoords='offset points')
    
    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'evaluation_metrics.png'), dpi=300, bbox_inches='tight')
    plt.close()

def main():
    try:
        # 获取当前脚本所在的目录
        current_dir = os.path.dirname(os.path.abspath(__file__))
        project_dir = os.path.dirname(current_dir)
        input_file = os.path.join(project_dir, 'CNKI-output.txt')
        
        print(f"读取文件: {input_file}")
        
        # 读取数据
        train = []
        with codecs.open(input_file, 'r', encoding='utf8') as f:
            for line in f:
                if line.strip():
                    train.append(line.strip().split())

        if not train:
            raise ValueError("没有读取到有效数据")

        # 创建词典和语料库
        dictionary = corpora.Dictionary(train)
        corpus = [dictionary.doc2bow(text) for text in train]

        # 计算评估指标
        topic_numbers = list(range(1, 16))
        perplexities, coherence_scores = compute_metrics(train, dictionary, corpus)

        # 创建输出目录
        os.makedirs(current_dir, exist_ok=True)

        # 绘制评估指标图
        plot_metrics(topic_numbers, perplexities, coherence_scores, current_dir)

        # 保存结果
        results_file = os.path.join(current_dir, 'best_topic_number.txt')
        with open(results_file, 'w', encoding='utf-8') as f:
            f.write('主题数量评估结果\n')
            f.write('=' * 50 + '\n\n')
            
            # 找出困惑度局部最小值
            f.write('困惑度局部最小值：\n')
            for i in range(1, len(perplexities)-1):
                if perplexities[i] < perplexities[i-1] and perplexities[i] < perplexities[i+1]:
                    f.write(f'主题数: {topic_numbers[i]}, 困惑度: {perplexities[i]:.4f}\n')
            
            # 找出一致性得分局部最大值
            f.write('\n一致性得分局部最大值：\n')
            for i in range(1, len(coherence_scores)-1):
                if coherence_scores[i] > coherence_scores[i-1] and coherence_scores[i] > coherence_scores[i+1]:
                    f.write(f'主题数: {topic_numbers[i]}, 一致性得分: {coherence_scores[i]:.4f}\n')
            
            # 记录所有评估指标
            f.write('\n所有主题数的评估指标：\n')
            f.write('主题数\t困惑度\t\t一致性得分\n')
            for i, (p, c) in enumerate(zip(perplexities, coherence_scores), 1):
                f.write(f'{i}\t{p:.4f}\t\t{c:.4f}\n')
            
            # 基于困惑度的最优主题数
            best_perplexity_idx = np.argmin(perplexities)
            f.write(f'\n基于困惑度的最优主题数: {topic_numbers[best_perplexity_idx]}\n')
            f.write(f'对应困惑度: {perplexities[best_perplexity_idx]:.4f}\n')
            
            # 基于一致性得分的最优主题数
            best_coherence_idx = np.argmax(coherence_scores)
            f.write(f'\n基于一致性得分的最优主题数: {topic_numbers[best_coherence_idx]}\n')
            f.write(f'对应一致性得分: {coherence_scores[best_coherence_idx]:.4f}\n')
            
            # 综合建议
            f.write('\n综合建议：\n')
            f.write('考虑到困惑度和一致性得分，建议选择在局部最优点中折中的主题数。\n')
            f.write('特别关注一致性得分局部最大值对应的主题数，因为这些主题数下的主题更加连贯和可解释。\n')

        print(f'\n分析完成！')
        print(f'结果已保存到: {results_file}')
        print(f'评估指标图表已保存到: {os.path.join(current_dir, "evaluation_metrics.png")}')

    except Exception as e:
        print(f"错误: {str(e)}")
        print(f"当前工作目录: {os.getcwd()}")

if __name__ == '__main__':
    freeze_support()
    main() 