from sklearn.feature_selection import SequentialFeatureSelector, VarianceThreshold, mutual_info_classif, SelectKBest
from sklearn.linear_model import LogisticRegression  # 可换成任意模型
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
from utils.utils import *
class ForwardSelectionUtil:
    def __init__(self,dp,split_random_state,pic_save_path,feature_save_path=fs_feature_path):
        self.total_df = None
        self.feature_save_path = opj(feature_save_path,str(split_random_state))
        self.pic_save_path = opj(pic_save_path,str(split_random_state),'feature_extract','fs')
        self.dp = dp


    # def extract_feature(self,df,data_type,n_features_to_select=5):
    #
    #     # 先划分训练/测试集
    #     X_train, X_test, y_train, y_test, _, _ = self.dp.split_train_and_test(df.copy())
    #
    #     model = LogisticRegression(solver='liblinear')  # 可替换为SVM、RF等
    #     sfs = SequentialFeatureSelector(model,
    #                                     n_features_to_select=n_features_to_select,
    #                                     direction='forward',
    #                                     scoring='roc_auc',
    #                                     cv=5)
    #     vt = VarianceThreshold(threshold=1e-5)
    #     X_train_vt = vt.fit_transform(X_train)
    #     vt_feature_names = X_train.columns[vt.get_support()]
    #
    #     skb = SelectKBest(score_func=mutual_info_classif, k=('all' if X_train_vt.shape[1] < X_train_vt.shape[0] else 200))
    #     X_train_selected = skb.fit_transform(X_train_vt, y_train)
    #     skb_feature_names = vt_feature_names[skb.get_support()]
    #
    #     scaler = StandardScaler()
    #     X_train_scaled = scaler.fit_transform(X_train_selected)
    #     import pandas as pd
    #     X_train_scaled_df = pd.DataFrame(X_train_scaled, columns=skb_feature_names, index=X_train.index)
    #
    #     sfs.fit(X_train_scaled_df, y_train)
    #     selected_features = X_train_scaled_df.columns[sfs.get_support()].tolist()
    #     df_filtered = df.loc[:, ['Patient_ID', target_column] + selected_features]
    #     df_filtered = df_filtered.dropna(how='all', subset=selected_features)
    #
    #     md(self.feature_save_path)
    #     df_filtered.to_csv(opj(self.feature_save_path, data_type.replace('FeatureSummary', '') + '.csv'), index=False)
    #     if self.total_df is None:
    #         self.total_df = df_filtered
    #     else:
    #         self.total_df = self.total_df.merge(df_filtered, how='left', on=['Patient_ID', target_column])

    def extract_feature(self, df, data_type, auc_threshold=0.9, n_sfs_features=5):
        X, _, y, _, _, _ = self.dp.split_train_and_test(df.copy())
        # 只取整体数据集的训练集进行特征筛选
        skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)

        feature_scores = {f: [] for f in X.columns}

        for train_idx, val_idx in skf.split(X, y):
            X_train, y_train = X.iloc[train_idx], y.iloc[train_idx]

            # Step 1: 方差过滤
            vt = VarianceThreshold(threshold=1e-5)
            X_train_vt = vt.fit_transform(X_train)
            vt_feature_names = X_train.columns[vt.get_support()]

            # Step 2: 互信息筛选
            skb = SelectKBest(score_func=mutual_info_classif,
                              k=('all' if X_train_vt.shape[1] < X_train_vt.shape[0] else 200))
            X_train_selected = skb.fit_transform(X_train_vt, y_train)
            skb_feature_names = vt_feature_names[skb.get_support()]

            # Step 3: 标准化
            scaler = StandardScaler()
            X_train_scaled = scaler.fit_transform(X_train_selected)
            X_train_scaled_df = pd.DataFrame(X_train_scaled, columns=skb_feature_names, index=X_train.index)

            # Step 4: SFS + AUC
            model = LogisticRegression(solver='liblinear')
            sfs = SequentialFeatureSelector(model,
                                            n_features_to_select=n_sfs_features,
                                            direction='forward',
                                            scoring='roc_auc',
                                            cv=5,
                                            n_jobs=-1)
            sfs.fit(X_train_scaled_df, y_train)
            selected = X_train_scaled_df.columns[sfs.get_support()].tolist()

            # Step 5: 给选中的特征加 1 分（可扩展为打分机制）
            for feat in selected:
                feature_scores[feat].append(1.0)

            # 未选中的给 0 分
            for feat in set(feature_scores.keys()) - set(selected):
                feature_scores[feat].append(0.0)

        # Step 6: 汇总平均得分并归一化
        feature_avg_scores = {f: np.mean(score_list) for f, score_list in feature_scores.items()}
        max_score = max(feature_avg_scores.values())
        normalized_scores = {f: v / max_score for f, v in feature_avg_scores.items()}

        # Step 7: 选出平均归一化重要性 ≥ 阈值的特征
        final_selected_features = [f for f, score in normalized_scores.items() if score >= auc_threshold]

        # Step 8: 返回包含所选特征的原始数据
        df_filtered = df.loc[:, ['Patient_ID', target_column] + final_selected_features]
        df_filtered = df_filtered.dropna(how='all', subset=final_selected_features)

        # 可选：输出得分表
        importance_df = pd.DataFrame({
            'Feature': list(normalized_scores.keys()),
            'Normalized_Importance': list(normalized_scores.values())
        }).sort_values(by='Normalized_Importance', ascending=False)

        md(self.feature_save_path)
        df_filtered.to_csv(opj(self.feature_save_path, data_type.replace('FeatureSummary', '') + '.csv'), index=False)
        if self.total_df is None:
            self.total_df = df_filtered
        else:
            self.total_df = self.total_df.merge(df_filtered, how='left', on=['Patient_ID', target_column])

    def save_total_df(self):
        md(self.feature_save_path)
        self.total_df.to_csv(opj(self.feature_save_path, 'total.csv'), index=False)