import time
import pickle
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.preprocessing import StandardScaler, OneHotEncoder, OrdinalEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV, GroupKFold, KFold
import numpy as np

# 回归超参数网格
REGRESSION_PARAM_GRIDS = {
    'linear': {
        'regressor__fit_intercept': [True, False],
    },
    'rf': {
        'regressor__n_estimators': [200, 400, 800],       # 树的数量，更多更稳，但训练时间长
        'regressor__max_depth': [None, 20, 30, 50],       # 控制树的深度，None = 不限制
        'regressor__min_samples_split': [2, 5, 10],       # 节点再划分的最小样本数
        'regressor__min_samples_leaf': [1, 2, 4],         # 叶子节点最小样本数，防止过拟合
        'regressor__max_features': ['sqrt', 'log2', 0.5], # 每次分裂考虑的特征数，0.5 = 一半
        'regressor__bootstrap': [True]                    # 是否用 bootstrap 采样（建议 True）
    },
    'gbr': {
        'regressor__n_estimators': [100, 200, 300, 500, 1000],   # 往大了试，树多一些更稳定
        'regressor__max_depth': [3, 4, 5, 6, 8],           # 不要跨度太大，细调
        'regressor__learning_rate': [0.01, 0.05, 0.1, 0.2], # 小步长更稳，大步长快但容易过拟合
        'regressor__subsample': [0.6, 0.8, 0.9, 1.0]        # 小于1可以引入随机性，帮助泛化
    }

}

class BaseRegressionModel:
    def __init__(self, random_state=42, **kwargs):
        self.random_state = random_state
        self.kwargs = kwargs
        self.model = None
        self.preprocessor = None
        self.train_time = None  # 新增：保存训练时间

    def _create_model(self):
        """子类实现: 返回 sklearn 模型实例"""
        raise NotImplementedError

    def _create_preprocessor(self, X, use_scaler=False):
        numerical_cols = X.select_dtypes(include=['int64', 'float64']).columns
        categorical_cols = X.select_dtypes(include=['object', 'category']).columns

        if use_scaler:
            preprocessor = ColumnTransformer([
                ('num', StandardScaler(), numerical_cols),
                ('cat', OneHotEncoder(handle_unknown='ignore'), categorical_cols)
            ])
        else:
            preprocessor = ColumnTransformer([
                ('num', 'passthrough', numerical_cols),
                ('cat', OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value=-1), categorical_cols)
            ])
        return preprocessor

    def _create_pipeline(self, X, use_scaler=False):
        preprocessor = self._create_preprocessor(X, use_scaler)
        model = self._create_model()

        pipeline = Pipeline([ 
            ('preprocessor', preprocessor),
            ('regressor', model)
        ])
        return pipeline

    def fit(
        self, 
        train_df, 
        label_col="cogimp_label", 
        tune_params=False,
        search_type="grid", 
        cv_folds=5, 
        n_iter=50, 
        scoring="neg_mean_squared_error", 
        n_jobs=-1, 
        sensitive_analysis=False
    ):
        """
        回归模型训练方法

        :param train_df: 训练数据 DataFrame
        :param label_col: 标签列
        :param tune_params: 是否调参
        :param search_type: "grid" 或 "random"
        :param cv_folds: 交叉验证折数
        :param n_iter: 随机搜索的迭代次数
        :param scoring: 评估指标
        :param n_jobs: 并行进程数
        :param sensitive_analysis: 是否敏感性分析（True 时不做分组交叉验证）
        """

        # 特征和标签
        cols_to_drop = [label_col]
        if "subject_id" in train_df.columns and not sensitive_analysis:
            cols_to_drop.append("subject_id")
        X_train = train_df.drop(columns=cols_to_drop)
        y_train = train_df[label_col].values

        # groups
        groups = None
        if "subject_id" in train_df.columns and not sensitive_analysis:
            groups = train_df["subject_id"].values

        # 构建 pipeline
        self.model = self._create_pipeline(X_train, use_scaler=self.use_scaler)

        start_time = time.time()

        if tune_params:
            model_name = self.__class__.__name__.replace("RegressionModel", "").lower()
            if model_name in REGRESSION_PARAM_GRIDS:
                param_grid = REGRESSION_PARAM_GRIDS[model_name]
            else:
                raise ValueError(f"没有找到 {model_name} 的参数网格，请检查 REGRESSION_PARAM_GRIDS")

            self._tune_hyperparameters(
                X_train, y_train, groups, param_grid, 
                search_type, cv_folds, n_iter, scoring, n_jobs, 
                sensitive_analysis=sensitive_analysis
            )
        else:
            self.model.fit(X_train, y_train)

        self.train_time = time.time() - start_time
        print(f"训练完成，耗时: {self.train_time:.2f}s")


    def _tune_hyperparameters(
        self, 
        X, y, groups, param_grid, 
        search_type, cv_folds, n_iter, 
        scoring="neg_mean_squared_error", 
        n_jobs=-1,
        sensitive_analysis=False
    ):
        """
        超参数调优，支持敏感性分析
        """
        print("开始调参")

        # ===== CV 设置 =====
        if sensitive_analysis:
            cv = KFold(n_splits=cv_folds, shuffle=True, random_state=self.random_state)
        else:
            if groups is not None and len(groups) == len(y):
                cv = GroupKFold(n_splits=cv_folds).split(X, y, groups=groups)
            else:
                cv = KFold(n_splits=cv_folds, shuffle=True, random_state=self.random_state)

        # ===== 搜索器选择 =====
        if search_type == "grid":
            search = GridSearchCV(
                self.model, param_grid, cv=cv,
                scoring=scoring, n_jobs=n_jobs, verbose=1
            )
        else:
            search = RandomizedSearchCV(
                self.model, param_grid, n_iter=n_iter, cv=cv,
                scoring=scoring, n_jobs=n_jobs,
                random_state=self.random_state, verbose=1
            )

        # ===== 训练搜索 =====
        search.fit(X, y)
        self.model = search.best_estimator_
        print(f"最佳参数: {search.best_params_}")
        print(f"最佳交叉验证 MSE: {-search.best_score_:.4f}")


    

    def evaluate(self, test_df, label_col="cogimp_label"):
        X_test = test_df.drop(columns=[label_col])
        y_test = test_df[label_col].values

        start_time = time.time()
        y_pred = self.model.predict(X_test)
        pred_time = time.time() - start_time

        mse = mean_squared_error(y_test, y_pred)
        mae = mean_absolute_error(y_test, y_pred)
        r2 = r2_score(y_test, y_pred)
        rmse = np.sqrt(mse)

        # print(
        #     f"MSE: {mse:.4f}, MAE: {mae:.4f}, R2: {r2:.4f}, RMSE: {rmse:.4f}, "
        #     f"Train time: {self.train_time:.4f}s, Inference time: {pred_time:.4f}s"
        # )

        metrics = {
            "mse": mse,
            "mae": mae,
            "r2": r2,
            "rmse": rmse,
            "train_time": self.train_time,
            "inference_time": pred_time,
        }
        return metrics

    def save(self, path):
        with open(path, "wb") as f:
            pickle.dump(self.model, f)
        
    def load(self, path):
        """
        从文件中加载已保存的模型（pipeline）。
        :param path: 模型文件路径
        """
        import pickle
        with open(path, "rb") as f:
            self.model = pickle.load(f)
        print(f"模型已从 {path} 加载完成。")


class LinearRegressionModel(BaseRegressionModel):
    def __init__(self, random_state=42, **kwargs):
        super().__init__(random_state, **kwargs)
        self.use_scaler = False

    def _create_model(self):
        return LinearRegression(fit_intercept=False, **self.kwargs)


class RFRegressionModel(BaseRegressionModel):
    def __init__(self, random_state=42, **kwargs):
        super().__init__(random_state, **kwargs)
        self.use_scaler = False

    def _create_model(self):
        return RandomForestRegressor(random_state=self.random_state, **self.kwargs)


class GBRRegressionModel(BaseRegressionModel):
    def __init__(self, random_state=42, **kwargs):
        super().__init__(random_state, **kwargs)
        self.use_scaler = False

    def _create_model(self):
        return GradientBoostingRegressor(random_state=self.random_state, **self.kwargs)
