import os
import time
import re
import jieba # 用于中文分词
import pandas as pd
import numpy as np
from sklearn.datasets import load_iris, load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectKBest, chi2, f_classif, SelectFromModel
from sklearn.decomposition import PCA

from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import MultinomialNB # 适合文本

from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, roc_curve
from sklearn.pipeline import Pipeline

import matplotlib.pyplot as plt
import warnings

warnings.filterwarnings('ignore')

# --- 0. 辅助函数 ---
def load_thucnews_subset(base_path='THUCNews_small', num_samples_per_category=10):
    """
    加载THUCNews的子集。
    为了演示，这里会生成一些虚拟数据。
    在实际应用中，你需要从真实数据路径加载。
    """
    texts = []
    labels = []
    categories = []

    if not os.path.exists(base_path):
        print(f"'{base_path}' not found. Generating dummy THUCNews data.")
        os.makedirs(base_path, exist_ok=True)
        dummy_categories = {
            "体育": ["篮球 比赛 精彩", "足球 运动员 训练", "奥运会 金牌 纪录"],
            "财经": ["股票 市场 上涨", "经济 发展 报告", "公司 利润 增长"],
            "科技": ["人工智能 创新 应用", "互联网 技术 发展", "智能手机 新品 发布"]
        }
        for i, (cat, phrases) in enumerate(dummy_categories.items()):
            categories.append(cat)
            cat_path = os.path.join(base_path, cat)
            os.makedirs(cat_path, exist_ok=True)
            for j in range(num_samples_per_category):
                doc_content = f"{phrases[j % len(phrases)]} 这是第{j+1}篇关于{cat}的示例文档。"
                # 模拟写入文件
                with open(os.path.join(cat_path, f"{j+1:02d}.txt"), 'w', encoding='utf-8') as f:
                    f.write(doc_content)
                texts.append(doc_content)
                labels.append(i) # 使用数字标签
        print("Dummy THUCNews data generated.")

    print(f"Loading THUCNews data from '{base_path}'...")
    if not categories: # 如果不是上面生成的虚拟数据，则从目录读取
        categories = [d for d in os.listdir(base_path) if os.path.isdir(os.path.join(base_path, d))]
        categories.sort()

    category_map = {name: i for i, name in enumerate(categories)}

    for category_name in categories:
        category_path = os.path.join(base_path, category_name)
        if not os.path.isdir(category_path):
            continue
        for filename in os.listdir(category_path):
            if filename.endswith(".txt"):
                file_path = os.path.join(category_path, filename)
                try:
                    with open(file_path, 'r', encoding='utf-8') as f:
                        texts.append(f.read())
                        labels.append(category_map[category_name])
                except Exception as e:
                    print(f"Error reading {file_path}: {e}")
    
    if not texts:
        raise ValueError("No text data loaded from THUCNews_small. Ensure the directory structure and files are correct.")

    return texts, np.array(labels), categories

def preprocess_text_data(texts, labels):
    """中文文本预处理"""
    # 1. 分词
    corpus = [" ".join(jieba.cut(text)) for text in texts]
    # 2. TF-IDF向量化
    vectorizer = TfidfVectorizer(max_features=1000, min_df=2, max_df=0.9) # 限制特征数量
    X = vectorizer.fit_transform(corpus)
    return X, labels, vectorizer.get_feature_names_out()

# --- 1. 数据加载和初步预处理 ---
datasets_config = {
    "Iris": {
        "loader": load_iris,
        "is_text": False,
        "k_selectkbest": 2, # Iris 只有4个特征，选2个
        "n_pca": 2
    },
    "Breast Cancer": {
        "loader": load_breast_cancer,
        "is_text": False,
        "k_selectkbest": 10, # BC 有30个特征
        "n_pca": 10
    },
    "THUCNews": {
        "loader": load_thucnews_subset, # 使用我们自己的加载函数
        "is_text": True,
        "k_selectkbest": 100, # 假设TF-IDF后有1000个特征，选100
        "n_pca": 50,
        "params": {"base_path": 'THUCNews_small', "num_samples_per_category": 50} # 增加样本量
    }
}

# --- 2. 分类算法选择 ---
classifiers = {
    "Logistic Regression": LogisticRegression(solver='liblinear', random_state=42, multi_class='auto'),
    "SVC": SVC(probability=True, random_state=42), # probability=True for ROC AUC
    "Random Forest": RandomForestClassifier(random_state=42),
    "Multinomial NB": MultinomialNB() # 尤其适合文本
}
# 移除 MultinomialNB 用于非文本数据
non_text_classifiers = {k: v for k, v in classifiers.items() if k != "Multinomial NB"}


# --- 3. 特征选择方法 ---
# feature_selectors 定义将在循环中根据数据类型动态调整
# For SelectFromModel, we use RandomForest as the estimator
select_from_model_estimator = RandomForestClassifier(random_state=42, n_estimators=50)

results_list = []

# --- 主循环 ---
for ds_name, config in datasets_config.items():
    print(f"\n--- Processing Dataset: {ds_name} ---")
    
    # 加载数据
    if "params" in config:
        data = config["loader"](**config["params"])
    else:
        data = config["loader"]()

    if ds_name == "THUCNews":
        texts, y, class_names = data
        X_full, y, feature_names_text = preprocess_text_data(texts, y)
        print(f"THUCNews preprocessed shape: {X_full.shape}")
    else:
        X_full, y = data.data, data.target
        class_names = data.target_names if hasattr(data, 'target_names') else [str(i) for i in np.unique(y)]
        print(f"{ds_name} original shape: {X_full.shape}")

    X_train_full, X_test_full, y_train, y_test = train_test_split(X_full, y, test_size=0.3, random_state=42, stratify=y)

    # 定义特征选择器
    # SelectKBest的score_func根据数据类型调整
    score_func_kbest = chi2 if ds_name == "THUCNews" else f_classif
    
    feature_selectors_config = {
        "All Features": None,
        f"SelectKBest (k={config['k_selectkbest']})": SelectKBest(score_func=score_func_kbest, k=config['k_selectkbest']),
        "SelectFromModel (RF)": SelectFromModel(estimator=select_from_model_estimator, threshold='median'), # threshold='median' 自动选择
        f"PCA (n={config['n_pca']})": PCA(n_components=config['n_pca'], random_state=42)
    }
    
    # 为当前数据集选择合适的分类器
    current_classifiers = classifiers if ds_name == "THUCNews" else non_text_classifiers

    # 针对每个特征选择方法
    for fs_name, selector in feature_selectors_config.items():
        print(f"  Feature Selection: {fs_name}")
        X_train, X_test = X_train_full, X_test_full
        
        start_fs_time = time.time()
        if selector:
            if ds_name == "THUCNews" and isinstance(selector, (SelectKBest, SelectFromModel)):
                 # 对于稀疏数据，确保 selector 能处理
                X_train = selector.fit_transform(X_train_full, y_train)
                X_test = selector.transform(X_test_full)
            elif isinstance(selector, PCA): # PCA返回密集矩阵
                # 非文本数据需要先标准化
                if not config["is_text"]:
                    scaler_pca = StandardScaler()
                    X_train_scaled_pca = scaler_pca.fit_transform(X_train_full)
                    X_test_scaled_pca = scaler_pca.transform(X_test_full)
                    X_train = selector.fit_transform(X_train_scaled_pca)
                    X_test = selector.transform(X_test_scaled_pca)
                else: # 文本数据已经是 TF-IDF，可以直接PCA，但通常PCA前也会尝试使其中心化
                    # PCA on sparse data can be tricky, Tfidf is already somewhat scaled.
                    # For simplicity, we apply directly. Dense conversion might be needed for some PCA impl.
                    # scikit-learn's PCA handles sparse input if svd_solver is 'arpack' or 'randomized'
                    # Default 'auto' should choose appropriately.
                    try:
                        X_train = selector.fit_transform(X_train_full.toarray() if hasattr(X_train_full, "toarray") else X_train_full)
                        X_test = selector.transform(X_test_full.toarray() if hasattr(X_test_full, "toarray") else X_test_full)
                    except TypeError: # If PCA insists on dense
                        X_train = selector.fit_transform(X_train_full.toarray())
                        X_test = selector.transform(X_test_full.toarray())

            else: # SelectKBest/SelectFromModel for non-text
                scaler_fs = StandardScaler() # Scale before these selectors for non-text
                X_train_scaled_fs = scaler_fs.fit_transform(X_train_full)
                X_test_scaled_fs = scaler_fs.transform(X_test_full)
                X_train = selector.fit_transform(X_train_scaled_fs, y_train)
                X_test = selector.transform(X_test_scaled_fs)
        
        # 如果不是文本数据且没有PCA（PCA已包含缩放），则进行标准化
        elif not config["is_text"]:
            scaler = StandardScaler()
            X_train = scaler.fit_transform(X_train_full)
            X_test = scaler.transform(X_test_full)
        
        fs_time = time.time() - start_fs_time
        
        num_features_selected = X_train.shape[1]
        print(f"    Selected features: {num_features_selected}")

        # 针对每个分类算法
        for clf_name, classifier_model in current_classifiers.items():
            print(f"    Classifier: {clf_name}")
            
            # 特殊处理：如果MultinomialNB用于PCA降维后的数据（可能是负值），则不合适
            if clf_name == "Multinomial NB" and isinstance(selector, PCA):
                print(f"      Skipping Multinomial NB for PCA-transformed data in {ds_name} as it may contain negative values.")
                continue
            if clf_name == "Multinomial NB" and not config["is_text"] and fs_name == "All Features":
                 print(f"      Skipping Multinomial NB for non-text, non-TFIDF data {ds_name}.")
                 continue


            pipeline = Pipeline([('classifier', classifier_model)])
            
            start_train_time = time.time()
            pipeline.fit(X_train, y_train)
            train_time = time.time() - start_train_time
            
            start_pred_time = time.time()
            y_pred = pipeline.predict(X_test)
            pred_time = time.time() - start_pred_time
            
            y_proba = None
            if hasattr(pipeline, "predict_proba"):
                y_proba = pipeline.predict_proba(X_test)

            # 计算指标
            accuracy = accuracy_score(y_test, y_pred)
            # For multiclass, specify average method for precision, recall, f1
            avg_method = 'weighted' if len(np.unique(y)) > 2 else 'binary'
            precision = precision_score(y_test, y_pred, average=avg_method, zero_division=0)
            recall = recall_score(y_test, y_pred, average=avg_method, zero_division=0)
            f1 = f1_score(y_test, y_pred, average=avg_method, zero_division=0)
            
            roc_auc = None
            if y_proba is not None:
                if len(np.unique(y)) == 2: # Binary classification
                    roc_auc = roc_auc_score(y_test, y_proba[:, 1])
                else: # Multiclass classification
                    try:
                        roc_auc = roc_auc_score(y_test, y_proba, multi_class='ovr', average=avg_method)
                    except ValueError as e:
                        print(f"      Could not calculate ROC AUC for {clf_name} on {ds_name}: {e}")
                        roc_auc = np.nan # Or some other placeholder

            results_list.append({
                "Dataset": ds_name,
                "Feature Selector": fs_name,
                "Num Features Original": X_full.shape[1],
                "Num Features Selected": num_features_selected,
                "Classifier": clf_name,
                "Accuracy": accuracy,
                "Precision": precision,
                "Recall": recall,
                "F1-score": f1,
                "ROC AUC": roc_auc,
                "Train Time (s)": train_time,
                "Predict Time (s)": pred_time,
                "FS Time (s)": fs_time if selector else 0,
                "y_test": y_test, # Storing for ROC curve plotting later
                "y_proba": y_proba, # Storing for ROC curve plotting later
                "class_names": class_names
            })

# --- 4. 结果展示与分析 ---
results_df = pd.DataFrame(results_list)

# （1）比较算法在不同特征选择下的性能指标
print("\n\n--- (1) Algorithm Performance under different Feature Selection ---")
for ds_name in datasets_config.keys():
    print(f"\n--- Dataset: {ds_name} ---")
    ds_results = results_df[results_df['Dataset'] == ds_name].copy()
    
    # Drop columns not needed for this specific table view and that cause issues with display
    display_cols = ["Feature Selector", "Num Features Selected", "Classifier", "Accuracy", "F1-score", "ROC AUC", "Train Time (s)"]
    
    # Pivot table for better comparison or simple print
    # For simplicity, printing sorted results
    print(ds_results[display_cols].sort_values(by=["Feature Selector", "F1-score"], ascending=[True, False]).to_string())

    # Plot ROC curves for this dataset
    plt.figure(figsize=(8, 5))
    plt.title(f'ROC Curves for {ds_name}')
    
    # Group by classifier and feature selector to plot distinct lines
    # For simplicity, let's plot ROC for the "All Features" case or best F1 case
    
    # Find best feature selector for each classifier on this dataset based on F1
    best_fs_results = ds_results.loc[ds_results.groupby("Classifier")["F1-score"].idxmax()]

    for idx, row in best_fs_results.iterrows():
        if row['y_proba'] is not None and row['ROC AUC'] is not np.nan:
            n_classes = len(row['class_names'])
            fpr = dict()
            tpr = dict()
            roc_auc_val = dict()

            if n_classes == 2:
                fpr[0], tpr[0], _ = roc_curve(row['y_test'], row['y_proba'][:, 1])
                roc_auc_val[0] = row['ROC AUC'] # Already calculated
                plt.plot(fpr[0], tpr[0], label=f"{row['Classifier']} (AUC = {roc_auc_val[0]:.2f}) (FS: {row['Feature Selector'][:15]})")
            else: # Multi-class OvR
                y_test_binarized = pd.get_dummies(row['y_test']).values
                for i in range(n_classes):
                    fpr[i], tpr[i], _ = roc_curve(y_test_binarized[:, i], row['y_proba'][:, i])
                    # roc_auc_val[i] = auc(fpr[i], tpr[i]) # This would be class-specific AUC
                # Plot macro-average ROC (or weighted, but needs more calculation here)
                # For simplicity, we'll just use the reported weighted AUC.
                # The actual multi-class ROC plot is more complex. Here we just label with the weighted AUC.
                # A common practice is to plot one class vs rest, or micro/macro average.
                # For now, we will use a simplified representation for the legend.
                # Let's plot for the first class as an example if multi-class
                plt.plot(fpr[0], tpr[0], linestyle='--', label=f"{row['Classifier']} - {row['class_names'][0]} vs Rest (AUC {row['ROC AUC']:.2f}) (FS: {row['Feature Selector'][:15]})")
                # To plot all:
                # for i in range(n_classes):
                #    plt.plot(fpr[i], tpr[i], label=f"{row['Classifier']} - class {row['class_names'][i]} (AUC = {auc(fpr[i], tpr[i]):.2f}) (FS: {row['Feature Selector']})")


    plt.plot([0, 1], [0, 1], 'k--') # Dashed diagonal
    plt.xlim([0.0, 1.0])
    plt.ylim([0.95, 1.02])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.legend(loc="lower right")
    plt.grid(True)
    plt.savefig(f"roc_curves_{ds_name.replace(' ', '_')}.png")
    plt.show()


# （2）在不同数据集上，对比不同分类算法的性能指标和执行时间
print("\n\n--- (2) Cross-Dataset Algorithm Comparison ---")
# We want to see how each algorithm performs across datasets
# For this, we can group by 'Classifier' then 'Dataset'
# Or show a summary table for each classifier
summary_metrics = ["Accuracy", "F1-score", "ROC AUC", "Train Time (s)", "Predict Time (s)"]

for clf_name in classifiers.keys():
    print(f"\n--- Performance of: {clf_name} ---")
    clf_results = results_df[results_df['Classifier'] == clf_name].copy()
    
    # For each dataset, find the best feature selection method for this classifier
    best_results_for_clf = clf_results.loc[clf_results.groupby("Dataset")["F1-score"].idxmax()]
    
    print(best_results_for_clf[["Dataset", "Feature Selector", "Num Features Selected"] + summary_metrics].sort_values(by="Dataset").to_string())


print("\n\n--- Analysis Summary ---")
print("DataFrames `results_df` contains all detailed results.")

# --- Analysis Text (Example) ---
analysis_text = """
分析总结：

(1) 算法在不同特征选择下的性能：

*   **Iris 数据集 (低维, 4 特征):**
    *   特征选择影响较小，因为原始特征数已经很少。PCA降到2维或SelectKBest选2个特征后，性能可能略有下降，但对于简单线性可分的部分仍表现良好。
    *   "All Features" 通常表现最佳或接近最佳。
    *   所有分类器在此数据集上通常都能达到较高性能。

*   **Breast Cancer 数据集 (中维, 30 特征):**
    *   特征选择开始显现作用。`SelectKBest` 和 `SelectFromModel` 通常能剔除不相关或冗余特征，有时能提升模型性能（特别是对抗过拟合）或至少维持性能并减少计算复杂度。
    *   `PCA` 在此数据集上可能表现良好，通过组合特征到新的低维空间来捕获主要方差。
    *   `Random Forest` 和 `SVC` 通常表现稳健，`Logistic Regression` 对特征缩放和选择较为敏感。

*   **THUCNews 数据集 (高维, TF-IDF后可达数千上万特征):**
    *   特征选择至关重要。使用 "All Features" (例如1000个TF-IDF特征) 可能导致模型训练缓慢且容易过拟合，特别是对于 `Logistic Regression` 和 `SVC`。
    *   `SelectKBest (chi2)` 是文本分类常用的有效特征选择方法。
    *   `SelectFromModel` (如基于RF或L1的LR) 也能有效筛选重要词汇特征。
    *   `PCA` 可以大幅降维，但可能会损失文本特征的解释性，且在稀疏TF-IDF矩阵上直接应用PCA需要注意（通常转为稠密或使用TruncatedSVD）。
    *   `Multinomial NB` 通常在高维稀疏文本数据上表现良好且快速，对特征选择的依赖相对较低，但合适的特征数量仍有益。

(2) 不同数据集上算法表现差异 (有效性与效率):

*   **有效性 (Accuracy, F1-score, ROC AUC):**
    *   **Logistic Regression:**
        *   Iris/Breast Cancer: 表现良好，特别是特征经过标准化和适当选择后。
        *   THUCNews: 如果特征维度非常高且没有有效选择，性能可能不如 `Multinomial NB` 或 `Random Forest`。对正则化敏感。
    *   **SVC:**
        *   Iris/Breast Cancer: 通常非常强大，能处理非线性关系（使用RBF核时）。对特征缩放敏感。
        *   THUCNews: 在高维空间中表现良好，但训练时间可能较长，尤其是在样本量大或特征数多时。特征选择有助于提升效率和性能。
    *   **Random Forest:**
        *   在所有类型数据集上通常表现稳健且强大，不易过拟合，对特征缩放不敏感。
        *   THUCNews: 能够处理高维数据，特征重要性可用于特征选择。
    *   **Multinomial NB:**
        *   THUCNews: 专为文本设计，通常是文本分类的强基线，计算速度快，对高维稀疏数据处理得当。
        *   Iris/Breast Cancer: 不适用于这些具有连续值或可能为负值（标准化后）的非计数型数据。

*   **效率 (执行时间):**
    *   **训练时间:**
        *   Iris (小规模, 低维): 所有算法都很快。
        *   Breast Cancer (中规模, 中维): `Logistic Regression` 和 `Multinomial NB` (若适用) 最快。`Random Forest` 其次。 `SVC` (尤其带复杂核函数) 可能最慢。
        *   THUCNews (模拟中等规模, 高维):
            *   `Multinomial NB` 通常最快。
            *   `Logistic Regression` 速度适中，受特征数量影响。
            *   `Random Forest` 训练时间会随树的数量和深度增加，以及特征数量增加而显著增加。
            *   `SVC` 在高维大数据集上训练时间可能非常长（复杂度约为O(N^2*D)到O(N^3*D)）。
            *   特征选择能显著减少所有算法（尤其是 `SVC` 和 `Random Forest`）的训练时间。PCA降维也是。
    *   **预测时间:**
        *   通常都很快。`Logistic Regression` 和 `Multinomial NB` 非常快。`SVC` 的预测时间取决于支持向量的数量。`Random Forest` 取决于树的数量。

*   **数据规模/特征维度影响总结:**
    *   **低维小规模 (Iris):** 多数算法表现良好，差异不大。特征选择意义不大。
    *   **中维中规模 (Breast Cancer):** 特征选择开始重要。算法间差异开始显现。`SVC`, `RF` 通常占优。
    *   **高维 (THUCNews):**
        *   **有效性:** `Multinomial NB` 和 `Random Forest` 通常是强选择。`SVC` 和 `LR` 需要仔细的特征工程/选择和调参。
        *   **效率:** `Multinomial NB` 和 `LR` (有特征选择时) 较快。`RF` 和 `SVC` 对高维度和大规模数据计算成本高昂，特征选择/降维是必须的。

结论：没有万能的算法。算法选择应基于数据特性（类型、规模、维度）、计算资源和具体任务目标。特征选择是提升模型性能和效率的关键步骤，尤其是在处理高维数据时。
"""
print(analysis_text)

# 确保THUCNews_small目录被创建，这样代码可以运行
if not os.path.exists('THUCNews_small'):
    print("Simulating THUCNews data creation for the first run...")
    load_thucnews_subset(base_path='THUCNews_small', num_samples_per_category=50) # num_samples_per_category 增加以获得更稳定的结果