import os
import sys
import psutil
import traceback
from colorama import Fore, Style
from tqdm import tqdm
from gensim.models import LdaModel
from gensim.corpora import Dictionary
from sklearn.feature_extraction.text import TfidfVectorizer
import pyLDAvis
import pyLDAvis.gensim_models as gensimvis
import plotly.graph_objects as go
import plotly.express as px
import base64
from io import BytesIO

# 内存监控函数
def get_memory_usage():
    """获取当前进程的内存使用情况(MB)"""
    return psutil.Process(os.getpid()).memory_info().rss / 1024 ** 2

def analyze_lda_from_file(texts_path, title, num_topics=2, passes=5):
    """从文本文件进行 LDA 主题分析（完整数据集版本）"""
    print(f"\n{Fore.CYAN}===== 开始处理 {title} 的 LDA 分析 =====")
    print(f"{Fore.WHITE}PID: {os.getpid()} | 初始内存: {get_memory_usage():.2f} MB")

    try:
        # --------------------------------------
        # 1. 数据加载
        # --------------------------------------
        print(f"{Fore.WHITE}[1/5] 加载数据...")
        with open(texts_path, "r", encoding="utf-8") as f:
            texts = [line.strip() for line in f if line.strip()]

        if not texts:
            print(f"{Fore.RED}错误: 没有加载到任何文本数据！")
            return None, None, None

        print(f"已加载 {len(texts)} 条文本")
        print(f"内存占用: {get_memory_usage():.2f} MB")

        # --------------------------------------
        # 2. TF-IDF 向量化（全数据集）
        # --------------------------------------
        print(f"{Fore.WHITE}[2/5] TF-IDF 向量化...")
        vectorizer = TfidfVectorizer(
            min_df=5,  # 词至少在5个文档中出现
            max_df=0.8,  # 词最多出现在80%文档中
            stop_words=None  # 使用自定义停用词更安全
        )

        # 使用进度条包装fit_transform过程
        with tqdm(total=1, desc="全局向量化", unit="dataset") as pbar:
            tfidf = vectorizer.fit_transform(texts)
            pbar.update(1)

        print(f"特征维度: {tfidf.shape}")
        print(f"内存占用: {get_memory_usage():.2f} MB")

        # --------------------------------------
        # 3. 转换为gensim格式
        # --------------------------------------
        print(f"{Fore.WHITE}[3/5] 转换为gensim格式...")

        # 使用更高效的方式构建词典
        print("构建词典...")
        id2word = Dictionary()
        id2word.add_documents([text.split() for text in texts])

        # 过滤极端值（重要！）
        id2word.filter_extremes(
            no_below=5,  # 出现少于5次过滤
            no_above=0.8,  # 出现在80%以上文档过滤
            keep_n=10000  # 最多保留10000词
        )

        print("构建语料库...")
        # 使用生成器表达式减少内存占用
        corpus_generator = (id2word.doc2bow(text.split()) for text in texts)
        corpus = [doc for doc in tqdm(corpus_generator, total=len(texts), desc="转换语料库")]

        # 清理不再需要的大型对象
        del corpus_generator
        print(f"词典大小: {len(id2word)}")
        print(f"内存占用: {get_memory_usage():.2f} MB")

        # --------------------------------------
        # 4. 训练LDA模型
        # --------------------------------------
        print(f"{Fore.WHITE}[4/5] 训练LDA模型...")
        print(f"参数设置: topics={num_topics}, passes={passes}")

        # 直接训练LDA模型，移除了回调相关设置
        lda = LdaModel(
            corpus=corpus,
            id2word=id2word,
            num_topics=num_topics,
            alpha='auto',
            eta='auto',
            passes=passes,
            iterations=50,
            chunksize=2000,
            random_state=42
        )

        print(f"模型训练完成 | 内存占用: {get_memory_usage():.2f} MB")

        # --------------------------------------
        # 5. 可视化与输出
        # --------------------------------------
        print(f"{Fore.WHITE}[5/5] 生成可视化...")

        # 创建输出目录（如果不存在）
        output_dir = os.path.dirname(title)
        if output_dir and not os.path.exists(output_dir):
            os.makedirs(output_dir)

        # 生成交互式可视化
        print("生成pyLDAvis可视化...")
        vis = gensimvis.prepare(lda, corpus, id2word)
        pyLDAvis.save_html(vis, f"{title}_lda.html")

        # 生成Plotly柱状图
        print("生成主题词分布图...")
        fig = go.Figure()
        for topic_id in range(num_topics):
            topic_words = lda.show_topic(topic_id, topn=10)
            words, weights = zip(*topic_words)

            # 确保颜色索引在可用范围内
            color_idx = topic_id % len(px.colors.qualitative.Plotly)
            fig.add_trace(go.Bar(
                x=words,
                y=weights,
                name=f'Topic {topic_id + 1}',
                marker_color=px.colors.qualitative.Plotly[color_idx]
            ))

        fig.update_layout(
            title=f'{title} LDA 主题词分布',
            xaxis_title='主题词',
            yaxis_title='权重',
            template='plotly_white',
            font=dict(family='SimHei, Arial'),
            barmode='group'  # 设置为分组模式
        )
        fig.write_image(f"{title}_topic_words.png")

        # 保存结果
        print(f"{Fore.GREEN}✓ 分析完成！结果已保存至：")
        print(f"  - {title}_lda.html（交互可视化）")
        print(f"  - {title}_topic_words.png（主题词图）")
        print(f"{Fore.WHITE}最终内存占用: {get_memory_usage():.2f} MB")

        return lda, corpus, id2word

    except Exception as e:
        print(f"{Fore.RED}✗ 分析过程中出现错误: {str(e)}")
        traceback.print_exc()
        return None, None, None

# 使用示例
if __name__ == "__main__":
    high_output_prefix = "processed_data/high_star"
    # 确保输出目录存在
    os.makedirs(os.path.dirname(high_output_prefix), exist_ok=True)
    analyze_lda_from_file(
        texts_path=f"{high_output_prefix}_cleaned_texts.txt",
        title="高分评论",
        num_topics=5,
        passes=10
    )