import pandas as pd
import jieba
import re
import logging
from tqdm import tqdm
from sklearn.feature_extraction.text import TfidfVectorizer
import matplotlib.pyplot as plt

# 屏蔽 DEBUG 日志以减少冗余输出
logging.getLogger().setLevel(logging.ERROR)

# 启用 tqdm 进度条以显示处理进度
tqdm.pandas()

# 尝试启用 Paddle 分词模式以提高分词效果
try:
    jieba.enable_paddle()
    USE_PADDLE = True
    print("Paddle 分词已启用")
except Exception as e:
    print(f"未启用 Paddle（将使用普通模式）：{e}")
    USE_PADDLE = False

# 文本清洗：只保留中文和英文字符，去除其他符号
def clean_text(text):
    return re.sub(r"[^\u4e00-\u9fa5a-zA-Z]+", " ", str(text)).strip()

# 中文分词器：根据是否启用 Paddle 模式进行分词
def chinese_tokenizer(text):
    return jieba.lcut(text, use_paddle=USE_PADDLE)

# 加载停用词并结合高频词过滤
def load_stopwords(file_path, corpus_texts, df_threshold=0.8):
    try:
        # 读取停用词文件并分词
        with open(file_path, encoding='utf-8') as f:
            stop_words = set(word for line in f for word in jieba.cut(line.strip()) if word)

        # 计算词语的文档频率，过滤高频词
        word_doc_freq = {}
        total_docs = len(corpus_texts)
        for doc in tqdm(corpus_texts, desc="分析高频词"):
            words = set(chinese_tokenizer(doc))
            for word in words:
                word_doc_freq[word] = word_doc_freq.get(word, 0) + 1

        # 筛选文档频率高于阈值的词语作为停用词
        high_df_words = {w for w, c in word_doc_freq.items() if c / total_docs > df_threshold}
        return stop_words.union(high_df_words)
    except FileNotFoundError:
        print(f"停用词文件未找到：{file_path}")
        return None

# 可视化 Top N TF-IDF 关键词
def plot_top_keywords(tfidf_matrix, feature_names, top_n=20):
    # 计算每个词的平均 TF-IDF 分数
    mean_tfidf = tfidf_matrix.mean(axis=0).A1
    # 选取 Top N 关键词的索引
    top_indices = mean_tfidf.argsort()[-top_n:][::-1]
    top_keywords = [feature_names[i] for i in top_indices]
    top_values = [mean_tfidf[i] for i in top_indices]

    # 绘制条形图展示 Top N 关键词
    plt.figure(figsize=(10, 5))
    plt.bar(top_keywords, top_values)
    plt.xticks(rotation=45)
    plt.title("Top TF-IDF Keywords")
    plt.tight_layout()
    plt.show()

# 主流程：提取训练集和测试集的 TF-IDF 特征
def build_tfidf(train_file, test_file, output_train, output_test,
                stopwords_file="cn_stopwords.txt", max_features=1024):
    # 读取训练集数据并清洗文本
    train_df = pd.read_csv(train_file)
    train_df["report_content"] = train_df["report_content"].astype(str).map(clean_text)
    print("合并训练集文本...")
    # 按公司ID合并文本内容
    train_grouped = train_df.groupby("company_id")["report_content"].progress_apply(lambda x: " ".join(x))

    # 读取测试集数据并清洗文本
    test_df = pd.read_csv(test_file)
    test_df["report_content"] = test_df["report_content"].astype(str).map(clean_text)
    print("合并测试集文本...")
    # 按公司ID合并文本内容
    test_grouped = test_df.groupby("company_id")["report_content"].progress_apply(lambda x: " ".join(x))

    # 加载停用词并结合高频词过滤
    stop_words = load_stopwords(stopwords_file, list(train_grouped.values))
    if stop_words is not None:
        stop_words = list(stop_words)  # 转换为列表以兼容 TfidfVectorizer

    # 初始化 TF-IDF 向量化器
    vectorizer = TfidfVectorizer(
        tokenizer=chinese_tokenizer,
        stop_words=stop_words,
        max_features=max_features,  # 限制最大特征数量
        ngram_range=(1, 2),  # 包含一元和二元词组
        min_df=2,  # 忽略出现次数少于2的词
        max_df=0.8,  # 忽略文档频率高于80%的词
        token_pattern=None  # 使用自定义分词器
    )

    # 提取训练集 TF-IDF 特征
    print("正在提取训练集 TF-IDF 特征...")
    train_matrix = vectorizer.fit_transform(train_grouped.values)
    train_df_out = pd.DataFrame(train_matrix.toarray(), index=train_grouped.index)
    train_df_out.to_csv(output_train)
    print(f"训练集特征已保存：{output_train}")

    # 可视化训练集的 Top N 关键词
    plot_top_keywords(train_matrix, vectorizer.get_feature_names_out())

    # 提取测试集 TF-IDF 特征
    print("正在提取测试集 TF-IDF 特征...")
    test_matrix = vectorizer.transform(test_grouped.values)
    test_df_out = pd.DataFrame(test_matrix.toarray(), index=test_grouped.index)
    test_df_out.to_csv(output_test)
    print(f"测试集特征已保存：{output_test}")

    # 导出 TF-IDF 关键词词典
    feature_list = list(vectorizer.get_feature_names_out())
    pd.Series(feature_list).to_csv("tfidf_features_vocab.csv", index=False, header=["keyword"])
    print("TF-IDF 关键词词典已保存：tfidf_features_vocab.csv")

# 启动主程序
if __name__ == "__main__":
    build_tfidf(
        train_file="text_train_data.csv",
        test_file="text_test_data.csv",
        output_train="tfidf_train_features.csv",
        output_test="tfidf_test_features.csv",
        stopwords_file="cn_stopwords.txt",
        max_features=1024
    )