import numpy as np
import shap
from sklearn.model_selection import StratifiedKFold
from copy import deepcopy

class SHAP_RFECV:
    def __init__(self, estimator, min_features_to_select=1, cv=5, random_state=42, verbose=1):
        self.estimator = estimator
        self.min_features_to_select = min_features_to_select
        self.cv = cv
        self.random_state = random_state
        self.verbose = verbose

    def fit(self, X, y):
        X = X.copy()
        self.n_features_ = X.shape[1]
        self.support_ = np.ones(self.n_features_, dtype=bool)
        self.ranking_ = np.ones(self.n_features_, dtype=int)
        self.grid_scores_ = []
        self.n_features_list_ = []
        self.selected_features_ = list(X.columns)
        self.best_score_ = -np.inf
        self.best_features_ = list(X.columns)
        self.best_n_features_ = self.n_features_

        current_features = list(X.columns)
        while len(current_features) >= self.min_features_to_select:
            scores = []
            shap_values_list = []
            skf = StratifiedKFold(n_splits=self.cv, shuffle=True, random_state=self.random_state)
            for train_idx, test_idx in skf.split(X[current_features], y):
                X_train, X_test = X.iloc[train_idx][current_features], X.iloc[test_idx][current_features]
                y_train, y_test = y.iloc[train_idx], y.iloc[test_idx]
                model = deepcopy(self.estimator)
                model.fit(X_train, y_train)
                score = model.score(X_test, y_test)
                scores.append(score)
                explainer = shap.TreeExplainer(model)
                shap_values = np.abs(explainer.shap_values(X_test))
                if isinstance(shap_values, list):  # 分类问题
                    shap_values = np.abs(shap_values[1])
                shap_values_list.append(shap_values)
            mean_score = np.mean(scores)
            self.grid_scores_.append(mean_score)
            self.n_features_list_.append(len(current_features))
            if self.verbose:
                print(f"特征数: {len(current_features)}, 平均准确率: {mean_score:.4f}")
            if mean_score > self.best_score_:
                self.best_score_ = mean_score
                self.best_features_ = list(current_features)
                self.best_n_features_ = len(current_features)
            # 计算每个特征的平均SHAP值
            shap_values_all = np.concatenate(shap_values_list, axis=0)
            mean_shap = np.mean(shap_values_all, axis=0)
            # 找到最小SHAP值的特征
            min_idx = np.argmin(mean_shap)
            if len(current_features) == self.min_features_to_select:
                break
            if len(current_features) == 0 or min_idx >= len(current_features):
                break
            # 删除该特征
            del_feature = current_features[min_idx]
            current_features.remove(del_feature)
        self.selected_features_ = self.best_features_
        return self

    def transform(self, X):
        return X[self.selected_features_]

    def fit_transform(self, X, y):
        self.fit(X, y)
        return self.transform(X) 