import pandas as pd
import jieba
import random
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from tqdm import tqdm  # 新增导入

def load_local_data(max_lines=1000):
    try:
        # 读取 dataset/train.txt 文件，用制表符分隔
        with open('./LDA/Dataset/train.txt', 'r', encoding='utf-8') as file:
            lines = file.readlines()
        # 随机选取 max_lines 数量的行
        if len(lines) > max_lines:
            lines = random.sample(lines, max_lines)
        # 提取第三列数据,读不出来则跳过
        documents = []
        for line in tqdm(lines, desc="Loading data"):  # 添加进度条
            parts = line.strip().split('\t')
            if len(parts) >= 3:
                documents.append(parts[2])
    except FileNotFoundError:
        # 如果没有找到文件，使用示例数据
        documents = [
            "人工智能技术在医疗领域的应用越来越广泛",
            "新能源汽车的发展推动了电池技术的进步",
            "教育行业借助在线平台实现了数字化转型",
            "体育赛事的举办促进了城市基础设施的建设",
            "电子商务的兴起改变了人们的购物习惯"
        ]

    # 简单的中文停用词列表
    stopwords = ['的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一个', '也', '这', '那', '要', '技术']
    segmented_docs = []
    for doc in documents:
        words = jieba.cut(doc)
        filtered_words = [word for word in words if word not in stopwords]
        segmented_docs.append(' '.join(filtered_words))

    return segmented_docs

def preprocess_data():
    # 加载新闻数据集
    documents = load_local_data()

    # 文本向量化，移除不恰当的 stop_words 参数
    vectorizer = CountVectorizer(max_df=0.95, min_df=1)
    X = vectorizer.fit_transform(documents)
    return X, vectorizer

def build_lda_model(X, n_topics=5):
    # 构建 LDA 模型
    lda = LatentDirichletAllocation(n_components=n_topics, random_state=42)
    lda.fit(X)
    return lda

def print_top_words(model, feature_names, n_top_words=10):
    for topic_idx, topic in enumerate(model.components_):
        top_words_idx = topic.argsort()[:-n_top_words - 1:-1]
        top_words = [feature_names[i] for i in top_words_idx]
        print(f"Topic #{topic_idx}: {' '.join(top_words)}")

if __name__ == "__main__":
    # 数据预处理
    X, vectorizer = preprocess_data()

    # 构建 LDA 模型，需要输入数据集和主题数量（重要超参数）
    lda_model = build_lda_model(X, n_topics=3)

    # 获取特征名称
    feature_names = vectorizer.get_feature_names_out()

    # 打印每个主题的关键词, n_top_words 为主题中关键词的个数
    print_top_words(lda_model, feature_names, n_top_words=7)