import streamlit as st
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import io
import joblib
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler, OneHotEncoder, LabelEncoder, PolynomialFeatures
from sklearn.impute import SimpleImputer
# 导入高斯过程分类器所需的核函数
from sklearn.gaussian_process import kernels
from sklearn.metrics import (accuracy_score, precision_score, recall_score, f1_score, confusion_matrix,
                             roc_curve, auc, classification_report, mean_squared_error, mean_absolute_error,
                             r2_score, silhouette_score)

# 设置中文字体
plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]
sns.set(font_scale=1.2)
sns.set_style("whitegrid")

# 页面配置
st.set_page_config(
    page_title="机器学习建模工具",
    page_icon="📊",
    layout="wide"
)

# 标题和说明
st.title("📊 机器学习建模工具")
st.markdown("这是一个集成了scikit-learn主要建模功能的应用程序，支持分类、回归、聚类和降维分析。")

# 模型配置字典 - 定义所有支持的模型及其参数
MODEL_CONFIG = {
    "分类": {
        "逻辑回归": {
            "model": "sklearn.linear_model.LogisticRegression",
            "params": {
                "penalty": {"type": "selectbox", "options": ["l2", "l1", "elasticnet", "none"], "default": "l2", "help": "正则化类型"},
                "C": {"type": "slider", "min": 0.01, "max": 10.0, "step": 0.01, "default": 1.0, "help": "正则化强度的倒数，值越小正则化越强"},
                "solver": {"type": "selectbox", "options": ["lbfgs", "newton-cg", "liblinear", "sag", "saga"], "default": "lbfgs", "help": "优化器"},
                "max_iter": {"type": "slider", "min": 50, "max": 1000, "step": 50, "default": 100, "help": "最大迭代次数"},
                "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
            }
        },
        "支持向量机(SVM)": {
            "model": "sklearn.svm.SVC",
            "params": {
                "C": {"type": "slider", "min": 0.01, "max": 10.0, "step": 0.01, "default": 1.0, "help": "正则化参数"},
                "kernel": {"type": "selectbox", "options": ["linear", "poly", "rbf", "sigmoid"], "default": "rbf", "help": "核函数类型"},
                "gamma": {"type": "selectbox", "options": ["scale", "auto"], "default": "scale", "help": "核系数"},
                "degree": {"type": "slider", "min": 2, "max": 5, "step": 1, "default": 3, "help": "多项式核函数的阶数"},
                "probability": {"type": "checkbox", "default": False, "help": "是否启用概率估计"},
                "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
            }
        },
        "决策树": {
            "model": "sklearn.tree.DecisionTreeClassifier",
            "params": {
                "criterion": {"type": "selectbox", "options": ["gini", "entropy"], "default": "gini", "help": "分裂标准"},
                "max_depth": {"type": "selectbox", "options": [None, 5, 10, 20, 30, 50], "default": None, "help": "树的最大深度"},
                "min_samples_split": {"type": "slider", "min": 2, "max": 20, "step": 1, "default": 2, "help": "分裂内部节点所需的最小样本数"},
                "min_samples_leaf": {"type": "slider", "min": 1, "max": 20, "step": 1, "default": 1, "help": "叶节点所需的最小样本数"},
                "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
            }
        },
        "随机森林": {
            "model": "sklearn.ensemble.RandomForestClassifier",
            "params": {
                "n_estimators": {"type": "slider", "min": 10, "max": 500, "step": 10, "default": 100, "help": "森林中树的数量"},
                "criterion": {"type": "selectbox", "options": ["gini", "entropy"], "default": "gini", "help": "分裂标准"},
                "max_depth": {"type": "selectbox", "options": [None, 5, 10, 20, 30, 50], "default": None, "help": "树的最大深度"},
                "min_samples_split": {"type": "slider", "min": 2, "max": 20, "step": 1, "default": 2, "help": "分裂内部节点所需的最小样本数"},
                "min_samples_leaf": {"type": "slider", "min": 1, "max": 20, "step": 1, "default": 1, "help": "叶节点所需的最小样本数"},
                "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
            }
        },
            "朴素贝叶斯": {
        "model": "sklearn.naive_bayes.GaussianNB",
        "params": {
            "var_smoothing": {"type": "slider", "min": 1e-10, "max": 1e-7, "step": 1e-10, "default": 1e-9, "help": "所有特征的方差平滑参数"}
        }
    },
    "K近邻(KNN)": {
        "model": "sklearn.neighbors.KNeighborsClassifier",
        "params": {
            "n_neighbors": {"type": "slider", "min": 1, "max": 20, "step": 1, "default": 5, "help": "近邻数量"},
            "weights": {"type": "selectbox", "options": ["uniform", "distance"], "default": "uniform", "help": "权重计算方式"},
            "metric": {"type": "selectbox", "options": ["euclidean", "manhattan", "chebyshev", "minkowski"], "default": "minkowski", "help": "距离度量"}
        }
    },
    "梯度提升机": {
        "model": "sklearn.ensemble.GradientBoostingClassifier",
        "params": {
            "n_estimators": {"type": "slider", "min": 50, "max": 500, "step": 50, "default": 100, "help": "弱学习器数量"},
            "learning_rate": {"type": "slider", "min": 0.01, "max": 1.0, "step": 0.01, "default": 0.1, "help": "学习率"},
            "max_depth": {"type": "slider", "min": 1, "max": 10, "step": 1, "default": 3, "help": "树的最大深度"},
            "subsample": {"type": "slider", "min": 0.5, "max": 1.0, "step": 0.1, "default": 1.0, "help": "子样本比例"},
            "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
        }
    },
    "多层感知器(MLP)": {
        "model": "sklearn.neural_network.MLPClassifier",
        "params": {
            "hidden_layer_sizes": {"type": "selectbox", "options": [(100,), (50,50), (100,50), (50,50,50)], "default": (100,), "help": "隐藏层大小"},
            "activation": {"type": "selectbox", "options": ["relu", "tanh", "logistic"], "default": "relu", "help": "激活函数"},
            "solver": {"type": "selectbox", "options": ["adam", "sgd", "lbfgs"], "default": "adam", "help": "优化器"},
            "alpha": {"type": "slider", "min": 0.0001, "max": 0.1, "step": 0.0001, "default": 0.0001, "help": "L2正则化参数"},
            "max_iter": {"type": "slider", "min": 100, "max": 1000, "step": 100, "default": 200, "help": "最大迭代次数"},
            "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
        }
    },"AdaBoost分类器": {
        "model": "sklearn.ensemble.AdaBoostClassifier",
        "params": {
            "n_estimators": {"type": "slider", "min": 50, "max": 500, "step": 50, "default": 100, "help": "弱学习器数量"},
            "learning_rate": {"type": "slider", "min": 0.01, "max": 2.0, "step": 0.01, "default": 1.0, "help": "学习率"},
            "algorithm": {"type": "selectbox", "options": ["SAMME", "SAMME.R"], "default": "SAMME.R", "help": "Boosting算法"},
            "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
        }
    },
    "极端随机树分类器": {
    "model": "sklearn.ensemble.ExtraTreesClassifier",
    "params": {
        # 基础集成参数
        "n_estimators": {
            "type": "slider", 
            "min": 10, 
            "max": 1000, 
            "step": 50, 
            "default": 100, 
            "help": "树的数量，增加可提升性能但增加计算成本"
        },
        "bootstrap": {
            "type": "checkbox", 
            "default": False, 
            "help": "是否使用bootstrap抽样构建每棵树的训练集"
        },
        "oob_score": {
            "type": "checkbox", 
            "default": False, 
            "help": "是否使用袋外样本评估模型（仅当bootstrap=True时有效）",
            "condition": "bootstrap == True"  # 条件显示
        },
        
        # 树结构参数
        "max_depth": {
            "type": "selectbox", 
            "options": [None, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 40, 50], 
            "default": None, 
            "help": "树的最大深度，None表示不限制（可能过拟合）"
        },
        "min_samples_split": {
            "type": "slider", 
            "min": 2, 
            "max": 50, 
            "step": 1, 
            "default": 2, 
            "help": "分裂内部节点所需的最小样本数（越大模型越简单）"
        },
        "min_samples_leaf": {
            "type": "slider", 
            "min": 1, 
            "max": 30, 
            "step": 1, 
            "default": 1, 
            "help": "叶节点所需的最小样本数（越大模型越简单）"
        },
        "max_features": {
            "type": "selectbox", 
            "options": ["sqrt", "log2", None], 
            "default": "sqrt", 
            "help": "分裂时考虑的特征数量：sqrt(默认)取特征数平方根，log2取log2(n_features)"
        },
        "max_leaf_nodes": {
            "type": "selectbox", 
            "options": [None, 10, 50, 100, 200, 500], 
            "default": None, 
            "help": "叶节点的最大数量（None表示不限制）"
        },
        
        # 计算优化参数
        "criterion": {
            "type": "selectbox", 
            "options": ["gini", "entropy", ], 
            "default": "gini", 
            "help": "分裂质量的评价标准：gini(基尼不纯度)、entropy(信息熵)"
        },
        
        # 随机性控制
        "random_state": {
            "type": "number", 
            "default": 42, 
            "help": "随机数种子，确保结果可复现"
        },
    }
}
    ,
    "装袋分类器(Bagging)": {
        "model": "sklearn.ensemble.BaggingClassifier",
        "params": {
            "n_estimators": {"type": "slider", "min": 10, "max": 200, "step": 10, "default": 10, "help": "基估计器数量"},
            "max_samples": {"type": "slider", "min": 0.1, "max": 1.0, "step": 0.1, "default": 1.0, "help": "每个基估计器的样本比例"},
            "bootstrap": {"type": "checkbox", "default": True, "help": "是否使用bootstrap抽样"},
            "n_jobs": {"type": "selectbox", "options": [-1, 1, 2], "default": -1, "help": "并行计算数量"},
            "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
        }
    },
    "二次判别分析(QDA)": {
        "model": "sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis",
        "params": {
            "priors": {"type": "selectbox", "options": [None, "uniform"], "default": None, "help": "类别先验概率，None则从数据估计"},
            "reg_param": {"type": "slider", "min": 0.0, "max": 1.0, "step": 0.01, "default": 0.0, "help": "正则化参数，0表示无正则化"}
        }
    },
    "高斯过程分类器": {
    "model": "sklearn.gaussian_process.GaussianProcessClassifier",
    "params": {        # 关键修改：options传入核函数实例，而非字符串
        "kernel": {
            "type": "selectbox",
            "options": [kernels.RBF(), kernels.Matern()],  # 核函数实例列表
            "default": kernels.RBF(),  # 默认核函数实例
            "help": "核函数类型（RBF：径向基核；Matern：马顿核，适合非平滑数据）"
        },
        "n_restarts_optimizer": {
            "type": "slider", 
            "min": 0, 
            "max": 10, 
            "step": 1, 
            "default": 0, 
            "help": "优化器重启次数（增加次数可提升核参数优化效果，但耗时增加）"
        },
        "random_state": {
            "type": "number", 
            "default": 42, 
            "help": "随机数种子，确保结果可复现"
        }
    }
},
    },
    "回归": {
        "线性回归": {
            "model": "sklearn.linear_model.LinearRegression",
            "params": {
                "fit_intercept": {"type": "checkbox", "default": True, "help": "是否计算截距"},
                "normalize": {"type": "checkbox", "default": False, "help": "是否对特征进行归一化"}
            }
        },
        "岭回归": {
            "model": "sklearn.linear_model.Ridge",
            "params": {
                "alpha": {"type": "slider", "min": 0.01, "max": 10.0, "step": 0.01, "default": 1.0, "help": "正则化强度"},
                "fit_intercept": {"type": "checkbox", "default": True, "help": "是否计算截距"},
                "normalize": {"type": "checkbox", "default": False, "help": "是否对特征进行归一化"},
                "solver": {"type": "selectbox", "options": ["auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga"], "default": "auto", "help": "求解器"},
                "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
            }
        },
        "Lasso回归": {
            "model": "sklearn.linear_model.Lasso",
            "params": {
                "alpha": {"type": "slider", "min": 0.01, "max": 10.0, "step": 0.01, "default": 1.0, "help": "正则化强度"},
                "fit_intercept": {"type": "checkbox", "default": True, "help": "是否计算截距"},
                "normalize": {"type": "checkbox", "default": False, "help": "是否对特征进行归一化"},
                "max_iter": {"type": "slider", "min": 100, "max": 10000, "step": 100, "default": 1000, "help": "最大迭代次数"},
                "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
            }
        },
        "决策树回归": {
            "model": "sklearn.tree.DecisionTreeRegressor",
            "params": {
                "criterion": {"type": "selectbox", "options": ["mse", "friedman_mse", "mae"], "default": "mse", "help": "分裂标准"},
                "max_depth": {"type": "selectbox", "options": [None, 5, 10, 20, 30, 50], "default": None, "help": "树的最大深度"},
                "min_samples_split": {"type": "slider", "min": 2, "max": 20, "step": 1, "default": 2, "help": "分裂内部节点所需的最小样本数"},
                "min_samples_leaf": {"type": "slider", "min": 1, "max": 20, "step": 1, "default": 1, "help": "叶节点所需的最小样本数"},
                "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
            }
        },
        "弹性网络(ElasticNet)": {
        "model": "sklearn.linear_model.ElasticNet",
        "params": {
            "alpha": {"type": "slider", "min": 0.01, "max": 10.0, "step": 0.01, "default": 1.0, "help": "正则化强度"},
            "l1_ratio": {"type": "slider", "min": 0.0, "max": 1.0, "step": 0.1, "default": 0.5, "help": "L1正则化比例"},
            "fit_intercept": {"type": "checkbox", "default": True, "help": "是否计算截距"},
            "normalize": {"type": "checkbox", "default": False, "help": "是否对特征进行归一化"},
            "max_iter": {"type": "slider", "min": 100, "max": 10000, "step": 100, "default": 1000, "help": "最大迭代次数"},
            "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
        }
    },
    "随机森林回归": {
        "model": "sklearn.ensemble.RandomForestRegressor",
        "params": {
            "n_estimators": {"type": "slider", "min": 10, "max": 500, "step": 10, "default": 100, "help": "森林中树的数量"},
            "criterion": {"type": "selectbox", "options": ["mse", "mae"], "default": "mse", "help": "分裂标准"},
            "max_depth": {"type": "selectbox", "options": [None, 5, 10, 20, 30, 50], "default": None, "help": "树的最大深度"},
            "min_samples_split": {"type": "slider", "min": 2, "max": 20, "step": 1, "default": 2, "help": "分裂内部节点所需的最小样本数"},
            "min_samples_leaf": {"type": "slider", "min": 1, "max": 20, "step": 1, "default": 1, "help": "叶节点所需的最小样本数"},
            "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
        }
    },
    "梯度提升回归": {
        "model": "sklearn.ensemble.GradientBoostingRegressor",
        "params": {
            "n_estimators": {"type": "slider", "min": 50, "max": 500, "step": 50, "default": 100, "help": "弱学习器数量"},
            "learning_rate": {"type": "slider", "min": 0.01, "max": 1.0, "step": 0.01, "default": 0.1, "help": "学习率"},
            "max_depth": {"type": "slider", "min": 1, "max": 10, "step": 1, "default": 3, "help": "树的最大深度"},
            "subsample": {"type": "slider", "min": 0.5, "max": 1.0, "step": 0.1, "default": 1.0, "help": "子样本比例"},
            "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
        }
    },
  
    "支持向量回归(SVR)": {
        "model": "sklearn.svm.SVR",
        "params": {
            "kernel": {"type": "selectbox", "options": ["linear", "poly", "rbf", "sigmoid"], "default": "rbf", "help": "核函数类型"},
            "C": {"type": "slider", "min": 0.01, "max": 100.0, "step": 0.1, "default": 1.0, "help": "正则化参数"},
            "gamma": {"type": "selectbox", "options": ["scale", "auto"], "default": "scale", "help": "核系数"},
            "epsilon": {"type": "slider", "min": 0.01, "max": 2.0, "step": 0.01, "default": 0.1, "help": "不惩罚的误差范围"},
            "degree": {"type": "slider", "min": 2, "max": 5, "step": 1, "default": 3, "help": "多项式核函数的阶数"}
        }
    },
    "AdaBoost回归器": {
        "model": "sklearn.ensemble.AdaBoostRegressor",
        "params": {
            "n_estimators": {"type": "slider", "min": 50, "max": 500, "step": 50, "default": 100, "help": "弱学习器数量"},
            "learning_rate": {"type": "slider", "min": 0.01, "max": 2.0, "step": 0.01, "default": 1.0, "help": "学习率"},
            "loss": {"type": "selectbox", "options": ["linear", "square", "exponential"], "default": "linear", "help": "损失函数"},
            "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
        }
    },   "K近邻回归(KNN)": {
        "model": "sklearn.neighbors.KNeighborsRegressor",
        "params": {
            "n_neighbors": {"type": "slider", "min": 1, "max": 20, "step": 1, "default": 5, "help": "近邻数量"},
            "weights": {"type": "selectbox", "options": ["uniform", "distance"], "default": "uniform", "help": "权重计算方式"},
            "metric": {"type": "selectbox", "options": ["euclidean", "manhattan", "chebyshev"], "default": "minkowski", "help": "距离度量"}
        }
    },
    "贝叶斯岭回归": {
        "model": "sklearn.linear_model.BayesianRidge",
        "params": {
            "n_iter": {"type": "slider", "min": 100, "max": 2000, "step": 100, "default": 300, "help": "最大迭代次数"},
            "alpha_1": {"type": "slider", "min": 1e-8, "max": 1e-3, "step": 1e-8, "default": 1e-6, "help": "alpha先验的形状参数"},
            "alpha_2": {"type": "slider", "min": 1e-8, "max": 1e-3, "step": 1e-8, "default": 1e-6, "help": "alpha先验的逆尺度参数"},
            "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
        }
    },
    "多层感知器回归(MLP)": {
        "model": "sklearn.neural_network.MLPRegressor",
        "params": {
            "hidden_layer_sizes": {"type": "selectbox", "options": [(100,), (50,50), (100,50)], "default": (100,), "help": "隐藏层大小"},
            "activation": {"type": "selectbox", "options": ["relu", "tanh", "identity"], "default": "relu", "help": "激活函数"},
            "solver": {"type": "selectbox", "options": ["adam", "sgd", "lbfgs"], "default": "adam", "help": "优化器"},
            "alpha": {"type": "slider", "min": 1e-5, "max": 0.1, "step": 1e-5, "default": 1e-4, "help": "L2正则化参数"},
            "max_iter": {"type": "slider", "min": 100, "max": 2000, "step": 100, "default": 500, "help": "最大迭代次数"}
        }
    },
    },
    "聚类": {
        "K-Means": {
            "model": "sklearn.cluster.KMeans",
            "params": {
                "n_clusters": {"type": "slider", "min": 2, "max": 10, "step": 1, "default": 3, "help": "聚类数量"},
                "init": {"type": "selectbox", "options": ["k-means++", "random"], "default": "k-means++", "help": "初始化方法"},
                "n_init": {"type": "slider", "min": 1, "max": 20, "step": 1, "default": 10, "help": "运行算法的次数"},
                "max_iter": {"type": "slider", "min": 100, "max": 1000, "step": 50, "default": 300, "help": "最大迭代次数"},
                "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
            }
        },
        "DBSCAN": {
            "model": "sklearn.cluster.DBSCAN",
            "params": {
                "eps": {"type": "slider", "min": 0.1, "max": 5.0, "step": 0.1, "default": 0.5, "help": "邻域半径"},
                "min_samples": {"type": "slider", "min": 2, "max": 20, "step": 1, "default": 5, "help": "形成密集区域所需的最小样本数"},
                "metric": {"type": "selectbox", "options": ["euclidean", "manhattan", "chebyshev", "minkowski"], "default": "euclidean", "help": "距离度量"}
            }
        },
            "层次聚类": {
        "model": "sklearn.cluster.AgglomerativeClustering",
        "params": {
            "n_clusters": {"type": "slider", "min": 2, "max": 10, "step": 1, "default": 3, "help": "聚类数量"},
            "linkage": {"type": "selectbox", "options": ["ward", "complete", "average", "single"], "default": "ward", "help": "链接方法"},
            "affinity": {"type": "selectbox", "options": ["euclidean", "l1", "l2", "manhattan", "cosine"], "default": "euclidean", "help": "距离度量"}
        }
    },
    "均值漂移": {
        "model": "sklearn.cluster.MeanShift",
        "params": {
            "bandwidth": {"type": "selectbox", "options": [None, 0.5, 1.0, 2.0, 5.0], "default": None, "help": "带宽参数，None则自动估计"},
            "bin_seeding": {"type": "checkbox", "default": False, "help": "是否使用bin seeding加速"},
            "n_jobs": {"type": "selectbox", "options": [-1, 1, 2, 4], "default": -1, "help": "并行工作的数量，-1表示使用所有可用核心"}
        }
    },"谱聚类": {
        "model": "sklearn.cluster.SpectralClustering",
        "params": {
            "n_clusters": {"type": "slider", "min": 2, "max": 10, "step": 1, "default": 3, "help": "聚类数量"},
            "affinity": {"type": "selectbox", "options": ["nearest_neighbors", "rbf", "precomputed"], "default": "nearest_neighbors", "help": "亲和矩阵构建方式"},
            "n_neighbors": {"type": "slider", "min": 5, "max": 30, "step": 1, "default": 10, "help": "近邻数量(当affinity为nearest_neighbors时)"},
            "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
        }
    },
    "BIRCH聚类": {
        "model": "sklearn.cluster.Birch",
        "params": {
            "n_clusters": {"type": "selectbox", "options": [None, 2, 3, 4, 5, 6, 7, 8, 9, 10], "default": 3, "help": "聚类数量，None表示只进行特征树构建"},
            "threshold": {"type": "slider", "min": 0.01, "max": 1.0, "step": 0.01, "default": 0.5, "help": "分支因子阈值"},
            "branching_factor": {"type": "slider", "min": 10, "max": 200, "step": 10, "default": 50, "help": "每个节点的最大子节点数"}
        }
    },    "OPTICS聚类": {
        "model": "sklearn.cluster.OPTICS",
        "params": {
            "min_samples": {"type": "slider", "min": 2, "max": 30, "step": 1, "default": 5, "help": "核心点最小样本数"},
            "max_eps": {"type": "slider", "min": 0.1, "max": 10.0, "step": 0.1, "default": np.inf, "help": "最大邻域半径"},
            "metric": {"type": "selectbox", "options": ["euclidean", "manhattan"], "default": "euclidean", "help": "距离度量"},
            "cluster_method": {"type": "selectbox", "options": ["xi", "dbscan"], "default": "xi", "help": "聚类提取方法"}
        }
    },
    "小批量K-Means": {
        "model": "sklearn.cluster.MiniBatchKMeans",
        "params": {
            "n_clusters": {"type": "slider", "min": 2, "max": 10, "step": 1, "default": 3, "help": "聚类数量"},
            "batch_size": {"type": "slider", "min": 100, "max": 1000, "step": 100, "default": 100, "help": "小批量样本数"},
            "n_init": {"type": "slider", "min": 1, "max": 10, "step": 1, "default": 3, "help": "初始化次数"},
            "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
        }
    },
    "亲和传播聚类": {
        "model": "sklearn.cluster.AffinityPropagation",
        "params": {
            "damping": {"type": "slider", "min": 0.5, "max": 1.0, "step": 0.05, "default": 0.5, "help": "阻尼系数(0.5-1)"},
            "max_iter": {"type": "slider", "min": 100, "max": 2000, "step": 100, "default": 200, "help": "最大迭代次数"},
            "convergence_iter": {"type": "slider", "min": 10, "max": 100, "step": 5, "default": 15, "help": "收敛迭代次数"},
            "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
        }
    },
    },
    "降维": {
        "PCA": {
            "model": "sklearn.decomposition.PCA",
            "params": {
                "n_components": {"type": "slider", "min": 2, "max": 10, "step": 1, "default": 2, "help": "降维后的维度"},
                "svd_solver": {"type": "selectbox", "options": ["auto", "full", "arpack", "randomized"], "default": "auto", "help": "SVD求解器"},
                "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
            }
        },
        "t-SNE": {
            "model": "sklearn.manifold.TSNE",
            "params": {
                "n_components": {"type": "slider", "min": 2, "max": 3, "step": 1, "default": 2, "help": "降维后的维度"},
                "perplexity": {"type": "slider", "min": 5, "max": 50, "step": 1, "default": 30, "help": "困惑度，与近邻数相关"},
                "learning_rate": {"type": "slider", "min": 10, "max": 1000, "step": 10, "default": 200, "help": "学习率"},
                "n_iter": {"type": "slider", "min": 250, "max": 1000, "step": 50, "default": 1000, "help": "迭代次数"},
                "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
            }
        },
            "因子分析": {
        "model": "sklearn.decomposition.FactorAnalysis",
        "params": {
            "n_components": {"type": "slider", "min": 2, "max": 10, "step": 1, "default": 2, "help": "要提取的因子数量"},
            "rotation": {"type": "selectbox", "options": [None, "varimax", "quartimax"], "default": None, "help": "旋转方法"},
            "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
        }
    },
    "独立成分分析(ICA)": {
        "model": "sklearn.decomposition.FastICA",
        "params": {
            "n_components": {"type": "slider", "min": 2, "max": 10, "step": 1, "default": 2, "help": "要提取的独立成分数量"},
            "algorithm": {"type": "selectbox", "options": ["parallel", "deflation"], "default": "parallel", "help": "算法类型"},
            "whiten": {"type": "checkbox", "default": True, "help": "是否白化数据"},
            "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
        }
    },    "字典学习": {
        "model": "sklearn.decomposition.DictionaryLearning",
        "params": {
            "n_components": {"type": "slider", "min": 2, "max": 100, "step": 1, "default": 10, "help": "字典中原子的数量"},
            "alpha": {"type": "slider", "min": 0.001, "max": 1.0, "step": 0.001, "default": 1.0, "help": "稀疏惩罚参数"},
            "max_iter": {"type": "slider", "min": 100, "max": 2000, "step": 100, "default": 1000, "help": "最大迭代次数"},
            "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
        }
    },
    "非负矩阵分解(NMF)": {
        "model": "sklearn.decomposition.NMF",
        "params": {
            "n_components": {"type": "slider", "min": 2, "max": 50, "step": 1, "default": 5, "help": "分解后的组件数量"},
            "init": {"type": "selectbox", "options": ["random", "nndsvd", "nndsvda", "nndsvdar"], "default": "nndsvd", "help": "初始化方法"},
            "max_iter": {"type": "slider", "min": 100, "max": 2000, "step": 100, "default": 1000, "help": "最大迭代次数"},
            "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
        }
    },"截断SVD(适用于稀疏数据)": {
        "model": "sklearn.decomposition.TruncatedSVD",
        "params": {
            "n_components": {"type": "slider", "min": 2, "max": 50, "step": 1, "default": 2, "help": "降维后的维度"},
            "algorithm": {"type": "selectbox", "options": ["arpack", "randomized"], "default": "randomized", "help": "SVD算法"},
            "n_iter": {"type": "slider", "min": 5, "max": 50, "step": 1, "default": 5, "help": "迭代次数"},
            "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
        }
    },
    "Isomap(流形学习)": {
        "model": "sklearn.manifold.Isomap",
        "params": {
            "n_components": {"type": "slider", "min": 2, "max": 10, "step": 1, "default": 2, "help": "降维后的维度"},
            "n_neighbors": {"type": "slider", "min": 5, "max": 50, "step": 1, "default": 5, "help": "近邻数量"},
            "n_jobs": {"type": "selectbox", "options": [-1, 1, 2], "default": -1, "help": "并行计算数量"}
        }
    },
    "局部线性嵌入(LLE)": {
        "model": "sklearn.manifold.LocallyLinearEmbedding",
        "params": {
            "n_components": {"type": "slider", "min": 2, "max": 10, "step": 1, "default": 2, "help": "降维后的维度"},
            "n_neighbors": {"type": "slider", "min": 5, "max": 50, "step": 1, "default": 10, "help": "近邻数量"},
            "method": {"type": "selectbox", "options": ["standard", "ltsa", "hessian", "modified"], "default": "standard", "help": "LLE变体"},
            "random_state": {"type": "number", "default": 42, "help": "随机数种子"}
        }
    },
    }
}

def load_model(model_path):
    """动态加载模型类"""
    module_name, class_name = model_path.rsplit('.', 1)
    module = __import__(module_name, fromlist=[class_name])
    return getattr(module, class_name)

def get_data_info(df):
    """获取数据信息并显示"""
    st.subheader("数据信息")
    col1, col2 = st.columns(2)
    
    with col1:
        st.write(f"数据集形状: {df.shape[0]} 行, {df.shape[1]} 列")
        st.write("数据类型分布:")
        dtype_counts = df.dtypes.value_counts()
        st.dataframe(dtype_counts.to_frame(name="数量"))
    
    with col2:
        st.write("缺失值统计:")
        missing = df.isnull().sum()
        missing = missing[missing > 0]
        if not missing.empty:
            st.dataframe(missing.to_frame(name="缺失值数量"))
        else:
            st.info("数据集中没有缺失值")

def handle_missing_values(df, strategy, num_fill_value=None, cat_fill_value=None):
    """处理缺失值"""
    numeric_cols = df.select_dtypes(include=['number']).columns
    categorical_cols = df.select_dtypes(exclude=['number']).columns
    
    if strategy == "删除":
        df_clean = df.dropna()
        st.info(f"删除缺失值后，数据集形状: {df_clean.shape[0]} 行, {df_clean.shape[1]} 列")
        return df_clean
    
    elif strategy == "填充":
        # 对数值型列填充
        if not numeric_cols.empty:
            if num_fill_value == "均值":
                num_imputer = SimpleImputer(strategy='mean')
            elif num_fill_value == "中位数":
                num_imputer = SimpleImputer(strategy='median')
            elif num_fill_value == "众数":
                num_imputer = SimpleImputer(strategy='most_frequent')
            else:  # 常数
                num_imputer = SimpleImputer(strategy='constant', fill_value=num_fill_value)
            
            df[numeric_cols] = num_imputer.fit_transform(df[numeric_cols])
        
        # 对分类型列填充
        if not categorical_cols.empty:
            if cat_fill_value == "众数":
                cat_imputer = SimpleImputer(strategy='most_frequent')
            else:  # 常数
                cat_imputer = SimpleImputer(strategy='constant', fill_value=cat_fill_value)
            
            df[categorical_cols] = cat_imputer.fit_transform(df[categorical_cols])
        
        st.info("缺失值填充完成")
        return df

def encode_categorical(df, encoding_type):
    """编码分类变量"""
    numeric_cols = df.select_dtypes(include=['number']).columns
    categorical_cols = df.select_dtypes(exclude=['number']).columns
    
    if not categorical_cols.empty:
        if encoding_type == "独热编码":
            encoder = OneHotEncoder(sparse=False, drop='first')
            encoded = encoder.fit_transform(df[categorical_cols])
            encoded_df = pd.DataFrame(
                encoded, 
                columns=encoder.get_feature_names_out(categorical_cols)
            )
            df_encoded = pd.concat([df[numeric_cols], encoded_df], axis=1)
            
        elif encoding_type == "标签编码":
            df_encoded = df.copy()
            for col in categorical_cols:
                le = LabelEncoder()
                df_encoded[col] = le.fit_transform(df_encoded[col])
        
        st.info(f"分类变量编码完成，使用 {encoding_type}")
        return df_encoded
    else:
        st.info("数据集中没有分类变量，无需编码")
        return df
# 2. 添加多项式特征处理函数
def create_polynomial_features(df, degree, include_bias):
    """创建多项式特征"""
    poly = PolynomialFeatures(degree=degree, include_bias=include_bias)
    df_poly = pd.DataFrame(
        poly.fit_transform(df),
        columns=poly.get_feature_names_out(df.columns)
    )
    st.info(f"已生成{degree}阶多项式特征，特征数量从{df.shape[1]}增加到{df_poly.shape[1]}")
    return df_poly, poly
def scale_features(df, scaling_type):
    """特征缩放"""
    numeric_cols = df.select_dtypes(include=['number']).columns
    
    if not numeric_cols.empty:
        if scaling_type == "标准化":
            scaler = StandardScaler()
            df[numeric_cols] = scaler.fit_transform(df[numeric_cols])
        elif scaling_type == "归一化":
            scaler = MinMaxScaler()
            df[numeric_cols] = scaler.fit_transform(df[numeric_cols])
        
        st.info(f"特征缩放完成，使用 {scaling_type}")
        return df, scaler
    else:
        st.info("数据集中没有数值型变量，无需缩放")
        return df, None

def split_data(df, target_col, test_size, random_state):
    """划分训练集和测试集"""
    X = df.drop(columns=[target_col])
    y = df[target_col]
    
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=test_size, random_state=random_state,
        stratify=y if pd.api.types.is_categorical_dtype(y) or y.nunique() < 10 else None
    )
    
    st.info(f"数据集划分完成: 训练集 {X_train.shape[0]} 样本, 测试集 {X_test.shape[0]} 样本")
    return X_train, X_test, y_train, y_test

def train_model(model_type, model_name, params, X_train, y_train=None):
    """训练模型"""
    try:
        # 获取模型类并实例化
        model_class = load_model(MODEL_CONFIG[model_type][model_name]["model"])
        model = model_class(**params)
        
        # 训练模型
        with st.spinner("正在训练模型..."):
            if model_type in ["聚类", "降维"]:
                # 聚类和降维模型不需要目标变量
                model.fit(X_train)
            else:
                model.fit(X_train, y_train)
        
        st.success("模型训练完成!")
        return model
    except Exception as e:
        st.error(f"模型训练失败: {str(e)}")
        return None

def evaluate_classification(model, X_test, y_test):
    """评估分类模型"""
    st.subheader("分类模型评估结果")
    
    # 预测
    y_pred = model.predict(X_test)
    
    # 计算评估指标
    accuracy = accuracy_score(y_test, y_pred)
    precision = precision_score(y_test, y_pred, average='weighted')
    recall = recall_score(y_test, y_pred, average='weighted')
    f1 = f1_score(y_test, y_pred, average='weighted')
    
    # 显示评估指标
    col1, col2, col3, col4 = st.columns(4)
    col1.metric("准确率", f"{accuracy:.4f}")
    col2.metric("精确率", f"{precision:.4f}")
    col3.metric("召回率", f"{recall:.4f}")
    col4.metric("F1值", f"{f1:.4f}")
    
    # 显示分类报告
    st.text("详细分类报告:")
    report = classification_report(y_test, y_pred)
    st.text(report)
    
    # 混淆矩阵
    st.subheader("混淆矩阵")
    cm = confusion_matrix(y_test, y_pred)
    fig, ax = plt.subplots(figsize=(10, 8))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', ax=ax)
    ax.set_xlabel('Predicted Labels')
    ax.set_ylabel('True Labels')
    ax.set_title('Confusion Matrix')
    st.pyplot(fig)
    
    # ROC曲线 (如果模型支持概率预测)
    try:
        if hasattr(model, "predict_proba"):
            st.subheader("ROC曲线")
            # 处理多类别的情况
            if len(np.unique(y_test)) > 2:
                st.info("ROC曲线仅支持二分类问题，多分类问题未显示ROC曲线。")
            else:
                y_score = model.predict_proba(X_test)[:, 1]
                fpr, tpr, _ = roc_curve(y_test, y_score)
                roc_auc = auc(fpr, tpr)
                
                fig, ax = plt.subplots(figsize=(10, 8))
                ax.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC Curve (area = {roc_auc:.2f})')
                ax.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
                ax.set_xlim([0.0, 1.0])
                ax.set_ylim([0.0, 1.05])
                ax.set_xlabel('Fake Positive Rate')
                ax.set_ylabel('True Positive Rate')
                ax.set_title('ROC Curve')
                ax.legend(loc="lower right")
                st.pyplot(fig)
    except Exception as e:
        st.warning(f"无法生成ROC曲线: {str(e)}")
    
    # 特征重要性 (如果模型支持)
    try:
        if hasattr(model, "feature_importances_"):
            st.subheader("特征重要性")
            importances = model.feature_importances_
            indices = np.argsort(importances)[::-1]  # 按重要性降序排序
            feature_names = X_test.columns
            
            # 只取前20个特征
            top_n = 20
            top_indices = indices[:top_n]
            top_importances = importances[top_indices]
            top_feature_names = [feature_names[i] for i in top_indices]
            
            fig, ax = plt.subplots(figsize=(10, 8))
            ax.bar(range(len(top_indices)), top_importances)
            ax.set_xticks(range(len(top_indices)))
            ax.set_xticklabels(top_feature_names, rotation=90)
            ax.set_xlabel('Feature')
            ax.set_ylabel('Importance')
            ax.set_title(f'Top {top_n} Feature Importance')  # 更新标题以显示数量
            st.pyplot(fig)
    except Exception as e:
        st.warning(f"无法生成特征重要性图: {str(e)}")


def evaluate_regression(model, X_test, y_test):
    """评估回归模型"""
    st.subheader("回归模型评估结果")
    
    # 预测
    y_pred = model.predict(X_test)
    
    # 计算评估指标
    mse = mean_squared_error(y_test, y_pred)
    mae = mean_absolute_error(y_test, y_pred)
    r2 = r2_score(y_test, y_pred)
    
    # 显示评估指标
    col1, col2, col3 = st.columns(3)
    col1.metric("均方误差 (MSE)", f"{mse:.4f}")
    col2.metric("平均绝对误差 (MAE)", f"{mae:.4f}")
    col3.metric("决定系数 (R²)", f"{r2:.4f}")
    
    # 预测值与真实值对比图
    st.subheader("预测值与真实值对比")
    fig, ax = plt.subplots(figsize=(10, 8))
    ax.scatter(y_test, y_pred, alpha=0.5)
    ax.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'r--')
    ax.set_xlabel('True Value')
    ax.set_ylabel('Predicted Value')
    ax.set_title('True vs Predicted')
    st.pyplot(fig)
    
    # 残差图
    st.subheader("Residue Plotting")
    residuals = y_test - y_pred
    fig, ax = plt.subplots(figsize=(10, 8))
    ax.scatter(y_pred, residuals, alpha=0.5)
    ax.axhline(y=0, color='r', linestyle='--')
    ax.set_xlabel('Predicted Value')
    ax.set_ylabel('Residue')
    ax.set_title('Residue Plotting')
    st.pyplot(fig)
    
    # 特征重要性 (如果模型支持)
    try:
        if hasattr(model, "feature_importances_"):
            st.subheader("特征重要性")
            importances = model.feature_importances_
            indices = np.argsort(importances)[::-1]
            feature_names = X_test.columns
            
            fig, ax = plt.subplots(figsize=(10, 8))
            ax.bar(range(X_test.shape[1]), importances[indices])
            ax.set_xticks(range(X_test.shape[1]))
            ax.set_xticklabels([feature_names[i] for i in indices], rotation=90)
            ax.set_xlabel('Feature')
            ax.set_ylabel('Importance')
            ax.set_title('Feature Importance')
            st.pyplot(fig)
    except Exception as e:
        st.warning(f"无法生成特征重要性图: {str(e)}")

def evaluate_clustering(model, X, labels):
    """评估聚类模型"""
    st.subheader("聚类模型评估结果")
    
    # 计算轮廓系数
    try:
        silhouette_avg = silhouette_score(X, labels)
        st.metric("轮廓系数", f"{silhouette_avg:.4f}")
        st.info("轮廓系数越接近1，表示聚类效果越好；接近-1，表示聚类效果较差。")
    except Exception as e:
        st.warning(f"无法计算轮廓系数: {str(e)}")
    
    # 聚类结果可视化 (使用前两个特征)
    st.subheader("聚类结果可视化")
    if X.shape[1] >= 2:
        fig, ax = plt.subplots(figsize=(10, 8))
        scatter = ax.scatter(X.iloc[:, 0], X.iloc[:, 1], c=labels, cmap='viridis', alpha=0.7)
        ax.set_xlabel(f'特征 1: {X.columns[0]}')
        ax.set_ylabel(f'特征 2: {X.columns[1]}')
        ax.set_title('聚类结果可视化')
        plt.colorbar(scatter, label='聚类标签')
        st.pyplot(fig)
    else:
        st.warning("数据维度不足，无法可视化聚类结果。")

def evaluate_dim_reduction(model, X, original_labels=None):
    """评估降维模型"""
    st.subheader("降维结果可视化")
    
    # 应用降维
    X_reduced = model.transform(X)
    
    # 可视化降维结果
    fig, ax = plt.subplots(figsize=(10, 8))
    
    if X_reduced.shape[1] >= 2:
        if original_labels is not None:
            # 如果有原始标签，使用标签着色
            scatter = ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=original_labels, 
                               cmap='viridis', alpha=0.7)
            plt.colorbar(scatter, label='原始标签')
        else:
            # 没有原始标签，使用单一颜色
            ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c='blue', alpha=0.7)
        
        ax.set_xlabel('降维特征 1')
        ax.set_ylabel('降维特征 2')
        ax.set_title(f'{model.__class__.__name__} 降维结果')
        st.pyplot(fig)
    else:
        st.warning("降维后的维度不足，无法可视化。")
    
    # 解释方差比例 (仅对PCA有效)
    if hasattr(model, "explained_variance_ratio_"):
        st.subheader("解释方差比例")
        explained_variance = model.explained_variance_ratio_
        total_explained = sum(explained_variance)
        
        fig, ax = plt.subplots(figsize=(10, 6))
        ax.bar(range(1, len(explained_variance) + 1), explained_variance)
        ax.set_xlabel('主成分')
        ax.set_ylabel('解释方差比例')
        ax.set_title(f'各主成分解释方差比例 (累计: {total_explained:.2%})')
        st.pyplot(fig)

def main():
    # 初始化会话状态
    if 'df' not in st.session_state:
        st.session_state.df = None
    if 'preprocessed_df' not in st.session_state:
        st.session_state.preprocessed_df = None
    if 'model' not in st.session_state:
        st.session_state.model = None
    if 'scaler' not in st.session_state:
        st.session_state.scaler = None
    # 步骤1: 数据上传
    st.sidebar.header("步骤1: 数据上传")
    uploaded_file = st.sidebar.file_uploader("上传CSV或Excel文件", type=["csv", "xlsx"])

    if uploaded_file is not None:
        try:
            # 读取数据
            if uploaded_file.name.endswith('.csv'):
                st.session_state.df = pd.read_csv(uploaded_file)
            else:  # Excel
                st.session_state.df = pd.read_excel(uploaded_file)
            
            st.success("数据上传成功!")
            
            # 显示数据预览
            st.subheader("数据预览")
            st.dataframe(st.session_state.df.head(10))
            
            # 显示数据信息
            get_data_info(st.session_state.df)
            
            # 步骤2: 数据预处理
            st.sidebar.header("步骤2: 数据预处理")
            
            # 缺失值处理
            if st.session_state.df.isnull().any().any():
                st.subheader("缺失值处理")
                missing_strategy = st.radio("选择缺失值处理方式:", ["保留", "删除", "填充"])
                
                if missing_strategy == "删除":
                    processed_df = handle_missing_values(st.session_state.df, "删除")
                elif missing_strategy == "填充":
                    col1, col2 = st.columns(2)
                    with col1:
                        num_fill = st.selectbox("数值型特征填充方式:", ["均值", "中位数", "众数", "常数"])
                        num_value = None
                        if num_fill == "常数":
                            num_value = st.number_input("请输入填充常数:", value=0.0)
                
                    with col2:
                        cat_fill = st.selectbox("分类型特征填充方式:", ["众数", "常数"])
                        cat_value = None
                        if cat_fill == "常数":
                            cat_value = st.text_input("请输入填充字符串:", value="未知")
                    
                    processed_df = handle_missing_values(
                        st.session_state.df.copy(), 
                        "填充", 
                        num_fill, 
                        cat_value
                    )
                else:  # 保留
                    processed_df = st.session_state.df.copy()
                    st.info("未处理缺失值，某些模型可能无法处理缺失值。")
            else:
                processed_df = st.session_state.df.copy()
                st.info("数据中没有缺失值，无需处理。")
            
            # 分类变量编码
            categorical_cols = processed_df.select_dtypes(exclude=['number']).columns
            if not categorical_cols.empty:
                st.subheader("分类变量编码")
                encoding_type = st.selectbox("选择编码方式:", ["不编码", "独热编码", "标签编码"])
                
                if encoding_type != "不编码":
                    processed_df = encode_categorical(processed_df, encoding_type)

            # -------------------------- 新增代码1: 提前选择目标变量+分离特征与目标 --------------------------
            # 初始化特征/目标变量存储（用于后续多项式处理）
            features_df = None
            target_series = None
            selected_poly_cols = None  # 记录多项式转换的特征列
            
            # 仅分类/回归模型需要目标变量，聚类/降维不需要
            st.subheader("目标变量设置（仅分类/回归模型）")
            if st.radio("是否需要目标变量（用于分类/回归）:", ["是", "否"], index=0) == "是":
                # 让用户选择目标变量（自动排除无意义列，如全唯一值列）
                valid_target_cols = [col for col in processed_df.columns 
                                    if processed_df[col].nunique() < len(processed_df)*0.8]  # 排除全唯一值列
                target_col = st.selectbox("选择目标变量:", valid_target_cols)
                
                # 分离特征和目标变量（目标变量不参与任何特征转换）
                features_df = processed_df.drop(columns=[target_col]).copy()
                target_series = processed_df[target_col].copy()
                st.success(f"已选择目标变量: {target_col}，特征变量共 {features_df.shape[1]} 个")
            else:
                # 聚类/降维模型：所有列都是特征
                features_df = processed_df.copy()
                st.info("未选择目标变量（适用于聚类/降维模型）")
            # ------------------------------------------------------------------------------------------

            # -------------------------- 修改代码: 多项式特征生成（仅对features_df处理） --------------------------
            st.subheader("多项式特征生成")
            use_polynomial = st.checkbox("生成多项式特征", value=False, help="创建多项式特征和交互项（仅建议数值型特征）")

            if use_polynomial:
                # 1. 智能推荐：自动筛选数值型特征（分类特征不适合多项式转换）
                numeric_features = features_df.select_dtypes(include=['number']).columns.tolist()
                non_numeric_features = features_df.select_dtypes(exclude=['number']).columns.tolist()
                
                # 提示用户分类特征不适合多项式转换
                if non_numeric_features:
                    st.warning(f"以下分类特征不建议多项式转换，已默认排除: {', '.join(non_numeric_features)}")
                
                # 2. 让用户选择需要转换的特征（默认选中所有数值型特征）
                selected_poly_cols = st.multiselect(
                    "选择需要多项式转换的特征列（至少1列）:",
                    options=features_df.columns.tolist(),
                    default=numeric_features,
                    help="建议仅选择数值型特征（如年龄、收入等）"
                )
                
                # 校验：至少选择1列
                if len(selected_poly_cols) == 0:
                    st.error("请至少选择1列特征进行多项式转换！")
                    st.stop()  # 中断流程，避免后续错误
                
                # 3. 多项式参数配置
                col1, col2 = st.columns(2)
                with col1:
                    poly_degree = st.slider("多项式阶数:", 2, 4, 2, help="阶数越高特征越多，建议不超过4阶")
                with col2:
                    include_bias = st.checkbox("包含偏差项（全1列）", value=False, help="一般建议不包含，避免多重共线性")
                
                # 4. 仅对选中的特征列进行多项式转换
                # 提取待转换特征
                features_to_transform = features_df[selected_poly_cols].copy()
                # 提取不转换的特征（如分类编码后的列）
                features_no_transform = features_df.drop(columns=selected_poly_cols).copy()
                
                # 执行多项式转换
                poly_transformer = PolynomialFeatures(degree=poly_degree, include_bias=include_bias)
                transformed_arr = poly_transformer.fit_transform(features_to_transform)
                transformed_df = pd.DataFrame(
                    transformed_arr,
                    columns=poly_transformer.get_feature_names_out(selected_poly_cols),
                    index=features_df.index  # 保持索引一致，避免合并错误
                )
                
                # 合并：不转换特征 + 转换后特征
                features_df = pd.concat([features_no_transform, transformed_df], axis=1)
                
                # 提示结果
                st.success(
                    f"多项式转换完成：\n"
                    f"原始 {len(selected_poly_cols)} 列 → 转换后 {transformed_df.shape[1]} 列\n"
                    f"总特征数：{features_df.shape[1]} 列"
                )
                st.session_state.poly_transformer = poly_transformer  # 保存转换器
            else:
                st.session_state.poly_transformer = None
                st.info("未启用多项式特征生成")
            # ------------------------------------------------------------------------------------------

            # -------------------------- 新增代码2: 重新合并特征与目标变量（恢复processed_df） --------------------------
            # 若有目标变量，合并特征和目标；若无，直接使用features_df
            if target_series is not None:
                processed_df = pd.concat([features_df, target_series], axis=1)
            else:
                processed_df = features_df.copy()
            # ------------------------------------------------------------------------------------------

            # 特征缩放（此时processed_df已排除目标变量，且仅选中特征做了多项式转换）
            st.subheader("特征缩放")
            scaling_type = st.selectbox("选择特征缩放方式:", ["不缩放", "标准化", "归一化"])
            
            if scaling_type != "不缩放":
                # 仅对特征列缩放（排除目标变量，若存在）
                if target_series is not None:
                    # 分离特征和目标，仅缩放特征
                    scale_features_df = processed_df.drop(columns=[target_col]).copy()
                    scale_features_df, scaler = scale_features(scale_features_df, scaling_type)
                    # 合并缩放后的特征和目标
                    processed_df = pd.concat([scale_features_df, target_series], axis=1)
                else:
                    processed_df, scaler = scale_features(processed_df, scaling_type)
                st.session_state.scaler = scaler
            else:
                st.session_state.scaler = None
                st.info("未进行特征缩放")
            
            # 保存预处理后的数据
            st.session_state.preprocessed_df = processed_df
            
            # 显示预处理后的数据预览
            st.subheader("预处理后的数据预览")
            st.dataframe(processed_df.head(10))
            
            # 步骤3: 模型选择与参数配置
            st.sidebar.header("步骤3: 模型选择与参数配置")
            model_type = st.sidebar.selectbox("选择模型类型:", list(MODEL_CONFIG.keys()))
            
            # -------------------------- 修改代码: 目标变量复用（避免重复选择） --------------------------
            if model_type in ["分类", "回归"]:
                # 若之前已选择目标变量，直接复用；未选择则重新让用户选择
                if target_series is None:
                    st.subheader("选择目标变量")
                    target_col = st.selectbox("请选择目标变量:", processed_df.columns)
                else:
                    st.subheader("目标变量确认")
                    st.write(f"当前目标变量: **{target_col}**（可重新选择）")
                    target_col = st.selectbox("重新选择目标变量（可选）:", processed_df.columns, index=processed_df.columns.get_loc(target_col))
                
                # 数据集划分
                st.subheader("数据集划分")
                col1, col2 = st.columns(2)
                with col1:
                    test_size = st.slider("测试集比例:", 0.1, 0.5, 0.2, 0.05)
                with col2:
                    random_state = st.number_input("随机数种子:", 0, 1000, 42)
                
                # 划分数据集（此时processed_df已正确包含特征+目标）
                X_train, X_test, y_train, y_test = split_data(
                    processed_df, target_col, test_size, random_state
                )
            # ------------------------------------------------------------------------------------------

            # 选择具体模型
            model_name = st.sidebar.selectbox("选择模型:", list(MODEL_CONFIG[model_type].keys()))
            
            # 模型参数配置（原有代码不变）
            st.subheader(f"{model_type} - {model_name} 参数配置")
            params = {}
            config_params = MODEL_CONFIG[model_type][model_name]["params"]
            
            for param, config in config_params.items():
                col1, col2 = st.columns([3, 1])
                with col1:
                    if config["type"] == "slider":
                        if isinstance(config["min"], float):
                            params[param] = st.slider(
                                param, 
                                min_value=config["min"], 
                                max_value=config["max"], 
                                step=config["step"], 
                                value=config["default"],
                                help=config["help"]
                            )
                        else:
                            params[param] = st.slider(
                                param, 
                                min_value=config["min"], 
                                max_value=config["max"], 
                                step=config["step"], 
                                value=config["default"],
                                help=config["help"]
                            )
                    elif config["type"] == "selectbox":
                        params[param] = st.selectbox(
                            param, 
                            options=config["options"], 
                            index=config["options"].index(config["default"]),
                            help=config["help"]
                        )
                    elif config["type"] == "checkbox":
                        params[param] = st.checkbox(
                            param, 
                            value=config["default"],
                            help=config["help"]
                        )
                    elif config["type"] == "number":
                        params[param] = st.number_input(
                            param, 
                            value=config["default"],
                            help=config["help"]
                        )
            
            # 步骤4: 模型训练（原有代码不变）
            st.sidebar.header("步骤4: 模型训练与评估")
            if st.sidebar.button("训练模型"):
                if model_type in ["分类", "回归"]:
                    st.session_state.model = train_model(
                        model_type, model_name, params, X_train, y_train
                    )
                else:  # 聚类和降维不需要目标变量
                    # 使用所有特征数据进行训练（已排除目标变量）
                    st.session_state.model = train_model(
                        model_type, model_name, params, processed_df
                    )
            
            # 模型评估（原有代码不变）
            if st.session_state.model is not None:
                st.subheader("模型评估结果")
                
                if model_type == "分类":
                    evaluate_classification(st.session_state.model, X_test, y_test)
                elif model_type == "回归":
                    evaluate_regression(st.session_state.model, X_test, y_test)
                elif model_type == "聚类":
                    # 获取聚类标签（使用特征数据）
                    labels = st.session_state.model.predict(processed_df)
                    evaluate_clustering(st.session_state.model, processed_df, labels)
                elif model_type == "降维":
                    # 若有目标变量，用目标变量着色
                    original_labels = target_series if target_series is not None else None
                    evaluate_dim_reduction(st.session_state.model, processed_df, original_labels)
            
            # -------------------------- 修改代码: 模型保存（新增多项式特征列/目标变量记录） --------------------------
            st.sidebar.header("模型保存与加载")
            
            # 保存模型
            if st.session_state.model is not None:
                if st.sidebar.button("保存模型"):
                    try:
                        # 模型数据字典：新增多项式特征列、目标变量名，方便后续还原
                        model_data = {
                            'model': st.session_state.model,
                            'scaler': st.session_state.scaler,
                            'poly_transformer': st.session_state.poly_transformer,
                            'selected_poly_cols': selected_poly_cols,  # 记录多项式转换的特征列
                            'target_column': target_col if target_series is not None else None,  # 记录目标变量
                            'model_type': model_type,
                            'model_name': model_name,
                            'columns': processed_df.columns.tolist()
                        }
                        
                        # 序列化模型
                        buffer = io.BytesIO()
                        joblib.dump(model_data, buffer)
                        buffer.seek(0)
                        
                        # 提供下载
                        st.sidebar.download_button(
                            label="下载模型",
                            data=buffer,
                            file_name=f"{model_type}_{model_name}.pkl",
                            mime="application/octet-stream"
                        )
                        st.sidebar.success("模型已准备好下载（含预处理配置）")
                    except Exception as e:
                        st.sidebar.error(f"保存模型失败: {str(e)}")
            # ------------------------------------------------------------------------------------------

            # 加载模型（原有代码不变，若需完整还原可基于model_data中的参数扩展）
            uploaded_model = st.sidebar.file_uploader("上传模型文件", type=["pkl"])
            if uploaded_model is not None:
                try:
                    model_data = joblib.load(uploaded_model)
                    st.session_state.model = model_data['model']
                    st.session_state.scaler = model_data.get('scaler')
                    st.session_state.poly_transformer = model_data.get('poly_transformer')
                    st.sidebar.success("模型加载成功（含预处理配置）")
                except Exception as e:
                    st.sidebar.error(f"加载模型失败: {str(e)}")
        except Exception as e:
            st.error(f"数据预处理失败: {str(e)}")
   
    
    else:
        st.info("请在左侧上传CSV或Excel格式的数据集开始分析")

if __name__ == "__main__":
    main()
    