import time
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
from sklearn.preprocessing import StandardScaler, OneHotEncoder, OrdinalEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from imblearn.pipeline import Pipeline as ImbPipeline
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
import pickle
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV, GroupKFold, StratifiedKFold
import numpy as np
from scipy.stats import randint, uniform
from sklearn.model_selection import ParameterGrid, GroupKFold, KFold
from sklearn.base import clone
from tqdm import tqdm
from sklearn.svm import LinearSVC
from scipy.stats import loguniform



# 定义不同模型的参数网格
PARAM_GRIDS = {
    'rf': {
        'classifier__n_estimators': [300, 400, 500],
        'classifier__max_depth': [20, None],
        'classifier__min_samples_split': [2, 5],
        'classifier__min_samples_leaf': [1, 2],
        'classifier__max_features': ['sqrt'],   # 'log2' 可选，不必占大比例
        'classifier__class_weight': ['balanced']  # 主要聚焦 balanced
    },
    'xgb': {
        'classifier__n_estimators': [200, 300, 400],      # 学习率小可以适当增加树
        'classifier__max_depth': [4, 6, 8],               # 控制树深，防止过拟合
        'classifier__learning_rate': [0.05, 0.1, 0.15],  # 小一些的学习率提升泛化能力
        'classifier__subsample': [0.7, 0.8, 0.9],        # 行采样，减少过拟合
        'classifier__colsample_bytree': [0.7, 0.8, 0.9], # 列采样，提高鲁棒性
        'classifier__gamma': [0, 0.1, 0.3],              # 节点分裂最小损失，控制复杂度
        'classifier__min_child_weight': [1, 3, 5]        # 叶子最小权重，防止过拟合
    },
    'svm': {
        'classifier__C': [0.01, 0.1, 1, 10, 100],
        'classifier__class_weight': ['balanced', None],
        'classifier__max_iter': [1000, 5000, 10000]
    }

}

# 随机搜索的参数分布
PARAM_DISTS = {
    'rf': {
        'classifier__n_estimators': randint(200, 500),          # 随机选树数量
        'classifier__max_depth': [None] + list(range(5, 30)),   # None 或随机深度
        'classifier__min_samples_split': randint(2, 20),
        'classifier__min_samples_leaf': randint(1, 10),
        'classifier__max_features': ['sqrt', 'log2', None],
        'classifier__class_weight': ['balanced', 'balanced_subsample']
    },
    'xgb': {
        'classifier__n_estimators': randint(200, 500),
        'classifier__max_depth': randint(3, 10),
        'classifier__learning_rate': uniform(0.01, 0.3),   # 连续随机学习率
        'classifier__subsample': uniform(0.5, 0.5),        # [0.5, 1.0)
        'classifier__colsample_bytree': uniform(0.5, 0.5),
        'classifier__gamma': uniform(0, 0.5),
        'classifier__min_child_weight': randint(1, 10)
    },
    'svm': {
        # C 通常用 loguniform 更合理，覆盖小到大的范围
        'classifier__C': loguniform(1e-4, 1e3),  
        
        # class_weight 可以调，但可选项不多
        'classifier__class_weight': ['balanced', None],
        
        # 迭代次数，避免收敛问题
        'classifier__max_iter': [1000, 5000, 10000]
    }
}

class BaseClassificationModel:
    def __init__(self, random_state=42, **kwargs):
        self.random_state = random_state
        self.kwargs = kwargs
        self.model = None
        self.preprocessor = None
        self.train_time = None
        self.best_params_ = None

    def _create_model(self):
        """子类实现: 返回 sklearn/xgb 模型实例"""
        print("创建模型")
        raise NotImplementedError

    def _create_preprocessor(self, X, use_scaler=False):
        """根据特征类型生成预处理器"""
        numerical_cols = X.select_dtypes(include=['int64', 'float64']).columns
        categorical_cols = X.select_dtypes(include=['object', 'category']).columns

        if use_scaler:
            # 比如 SVM 需要标准化 + OneHot
            preprocessor = ColumnTransformer([
                ('num', StandardScaler(), numerical_cols),
                ('cat', OneHotEncoder(handle_unknown='ignore'), categorical_cols)
            ])
        else:
            # 树模型用 OrdinalEncoder
            preprocessor = ColumnTransformer([
                ('num', 'passthrough', numerical_cols),
                ('cat', OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value=-1), categorical_cols)
            ])
        return preprocessor

    def _create_pipeline(self, X, use_scaler=False):
        """生成 pipeline: 预处理 + SMOTE + 模型"""
        preprocessor = self._create_preprocessor(X, use_scaler)
        model = self._create_model()

        pipeline = ImbPipeline([   # 这里改成 ImbPipeline
            ('preprocessor', preprocessor),
            ('under', RandomUnderSampler(sampling_strategy=0.5, random_state=42)),
            ('smote', SMOTE(random_state=self.random_state)),
            ('classifier', model)
        ])
        return pipeline

    def fit(self, train_df, label_col="cogimp_label", tune_params=False, param_grid=None,
            search_type="grid", cv_folds=5, n_iter=50, group_col="subject_id", scoring="recall", 
            n_jobs=-1, sensitive_analysis=False):
        """
        训练模型

        :param train_df: 训练数据 DataFrame
        :param label_col: 标签列名
        :param tune_params: 是否进行超参数调参
        :param param_grid: 自定义参数网格
        :param search_type: "grid" 或 "random"
        :param cv_folds: 交叉验证折数
        :param n_iter: 随机搜索迭代次数
        :param group_col: 按哪一列做分组交叉验证，如果列不存在或敏感性分析则不分组
        :param scoring: 评估指标
        :param n_jobs: 并行数
        :param sensitive_analysis: 是否是敏感性分析
        """
        # ===== 构造训练数据 =====
        cols_to_drop = [label_col]
        if group_col and group_col in train_df.columns and not sensitive_analysis:
            cols_to_drop.append(group_col)
        X_train = train_df.drop(columns=cols_to_drop)
        y_train = train_df[label_col].values

        # ===== 构造 groups =====
        groups = None
        if group_col and group_col in train_df.columns and not sensitive_analysis:
            groups = train_df[group_col].values

        # ===== 构造 pipeline =====
        self.model = self._create_pipeline(X_train, use_scaler=self.use_scaler)

        start_time = time.time()
        self.best_params_ = None  # 保存最佳参数

        # ===== 是否调参 =====
        if tune_params:
            # 自动选择网格 / 分布
            model_name = self.__class__.__name__.replace("ClassificationModel", "").lower()
            if param_grid is None:
                if search_type == "grid":
                    if model_name in PARAM_GRIDS:
                        param_grid = PARAM_GRIDS[model_name]
                    else:
                        raise ValueError(f"没有找到 {model_name} 的网格，请检查 PARAM_GRIDS")
                elif search_type == "random":
                    if model_name in PARAM_DISTS:
                        param_grid = PARAM_DISTS[model_name]
                    else:
                        raise ValueError(f"没有找到 {model_name} 的分布，请检查 PARAM_DISTS")
                else:
                    raise ValueError("search_type 只能是 'grid' 或 'random'")

            # 调参
            self._tune_hyperparameters(
                X_train, y_train, param_grid,
                search_type, cv_folds, n_iter, scoring=scoring, n_jobs=n_jobs, groups=groups,
                sensitive_analysis=sensitive_analysis
            )

            # 保存最佳参数
            if hasattr(self.model, "get_params"):
                self.best_params_ = {k: v for k, v in self.model.get_params().items() if "classifier" in k}
        else:
            # 直接训练
            self.model.fit(X_train, y_train)
            if hasattr(self.model, "get_params"):
                self.best_params_ = {k: v for k, v in self.model.get_params().items() if "classifier" in k}

        end_time = time.time()
        self.train_time = end_time - start_time
        print(f"训练完成，耗时: {self.train_time:.2f}s")



    def _tune_hyperparameters(self, X, y, param_grid, search_type, cv_folds, n_iter, scoring="recall", n_jobs=-1, groups=None, sensitive_analysis=False):
        """
        调参方法，兼容敏感性分析和分组交叉验证
        """
        print("开始调参")

        # ===== CV 设置 =====
        if sensitive_analysis:
            # 敏感性分析不分组
            if len(np.unique(y)) > 1:
                # 分类任务用 StratifiedKFold
                cv = StratifiedKFold(n_splits=cv_folds, shuffle=True, random_state=self.random_state)
            else:
                # 回归或单类用普通 KFold
                cv = KFold(n_splits=cv_folds, shuffle=True, random_state=self.random_state)
        else:
            # groups 存在且在训练数据中，使用 GroupKFold，否则普通 KFold
            if groups is not None and len(groups) == len(y):
                cv = GroupKFold(n_splits=cv_folds)
            else:
                cv = KFold(n_splits=cv_folds, shuffle=True, random_state=self.random_state)

        # ===== 选择搜索类型 =====
        if search_type == "grid":
            search = GridSearchCV(
                self.model, param_grid, cv=cv,
                scoring=scoring, n_jobs=n_jobs, verbose=1
            )
        elif search_type == "random":
            search = RandomizedSearchCV(
                self.model, param_distributions=param_grid,
                n_iter=n_iter, cv=cv,
                scoring=scoring, n_jobs=n_jobs,
                random_state=self.random_state, verbose=1
            )
        else:
            raise ValueError("search_type 必须是 'grid' 或 'random'")

        # ===== 调用 fit =====
        # 如果 groups 存在且敏感性分析为 False，则传 groups
        if groups is not None and not sensitive_analysis and len(groups) == len(y):
            search.fit(X, y, groups=groups)
        else:
            search.fit(X, y)

        self.model = search.best_estimator_
        print(f"最佳参数: {search.best_params_}")
        print(f"最佳交叉验证分数: {search.best_score_:.4f}")

    def model_predict(self, X_input):
        return self.model.predict_proba(X_input)  # RF / XGB / SVM


    def evaluate(self, test_df, label_col="cogimp_label"):
        X_test = test_df.drop(columns=[label_col])
        y_test = test_df[label_col].values

        start_time = time.time()
        y_pred = self.model.predict(X_test)
        pred_time = time.time() - start_time

        # ===== 指标 =====
        acc = (y_pred == y_test).mean()
        precision = precision_score(y_test, y_pred)
        recall = recall_score(y_test, y_pred)
        f1 = f1_score(y_test, y_pred)

        # ===== ROC 处理 =====
        try:
            if hasattr(self.model, "predict_proba"):
                y_score = self.model.predict_proba(X_test)[:, 1]
            elif hasattr(self.model, "decision_function"):
                y_score = self.model.decision_function(X_test)
            else:
                y_score = None

            roc = roc_auc_score(y_test, y_score) if y_score is not None else None
        except Exception as e:
            print(f"ROC 计算失败: {e}")
            roc = None

        # 模型参数量
        clf = self.model.named_steps['classifier']
        if hasattr(clf, 'estimators_'):  # RandomForest
            n_params = sum([tree.tree_.node_count for tree in clf.estimators_])
        elif hasattr(clf, 'booster'):    # XGBClassifier
            n_params = clf.get_booster().num_boosted_rounds()  # 大致参数数
        else:
            n_params = None

        metrics = {
            "acc": acc,
            "f1": f1,
            "precision": precision,
            "recall": recall,
            "roc_auc": roc,
            "params": n_params,
            "best_params": self.best_params_,
            "classification_report": classification_report(y_test, y_pred, digits=4),
            "confusion_matrix": confusion_matrix(y_test, y_pred),
            "train_time": self.train_time,
            "inference_time": pred_time
        }
        return metrics


    def save(self, path):
        with open(path, "wb") as f:
            pickle.dump(self.model, f)
            
    def load(self, path):
        """
        从文件中加载已保存的模型（pipeline）。
        :param path: 模型文件路径
        """
        import pickle
        with open(path, "rb") as f:
            self.model = pickle.load(f)
        print(f"模型已从 {path} 加载完成。")



from sklearn.ensemble import RandomForestClassifier

class RFClassificationModel(BaseClassificationModel):
    def __init__(self, random_state=42, **kwargs):
        super().__init__(random_state, **kwargs)
        self.use_scaler = False  # 树模型不用 scaler

    def _create_model(self):
        return RandomForestClassifier(
            random_state=self.random_state,
            **self.kwargs
        )


from xgboost import XGBClassifier

class XGBClassificationModel(BaseClassificationModel):
    def __init__(self, random_state=42, **kwargs):
        super().__init__(random_state, **kwargs)
        self.use_scaler = False

    def _create_model(self):
        return XGBClassifier(
            random_state=self.random_state,
            **self.kwargs
        )
                    # scale_pos_weight=9,
            # eval_metric=["auc", "aucpr"],

from sklearn.svm import SVC

class SVMClassificationModel(BaseClassificationModel):
    def __init__(self, random_state=42, **kwargs):
        super().__init__(random_state, **kwargs)
        self.use_scaler = True  # SVM 需要标准化
        self.kernel = self.kwargs.pop("kernel", "rbf")  # 避免重复传 kernel

    def _create_model(self):
        if self.kernel == "rbf":
            return SVC(
                random_state=self.random_state,
                kernel="rbf",
                probability=True,
                **self.kwargs
            )
        elif self.kernel == "linear":
            return LinearSVC(
                random_state=self.random_state,
                **self.kwargs
            )
        else:
            raise ValueError(f"Unsupported kernel: {self.kernel}")

