import time
import json
import pickle
import os
from datetime import datetime

import pandas as pd
# knn的两个方法（回归、分类）
from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier
# 正规方程（回归）、逻辑回归（分类）、梯度下降（回归）、拉索（回归L1正则化）、岭回归（回归L2正则化）
from sklearn.linear_model import LinearRegression, LogisticRegression, SGDRegressor, Lasso, Ridge
# 决策树（回归、分类）
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
# 随机森林（分类）、随机森林（回归）、adaboost（分类）、GBDT梯度提升数（分类）、GBDT梯度提升数（回归）
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, AdaBoostClassifier, \
    GradientBoostingClassifier, GradientBoostingRegressor
# 分割训练集、交叉验证网格搜索
from sklearn.model_selection import train_test_split, GridSearchCV
# 正则化
from sklearn.preprocessing import StandardScaler
# 分类：精确率、f1值、召回率、AUC值。          回归：均方根误差RMSE、平均绝对误差MAE、均方误差MSE
from sklearn.metrics import precision_score, f1_score, recall_score, roc_auc_score, root_mean_squared_error, \
    mean_absolute_error, mean_squared_error
from xgboost import XGBClassifier

import numpy as np
# 消除警告
import warnings
from sklearn.exceptions import FitFailedWarning
import config as co
# 查看样本数据
from collections import Counter
from copy import deepcopy
from sklearn.feature_selection import VarianceThreshold


class MethodIntegration:
    """
    机器学习方法集成与自动建模工具类。
    支持回归与分类问题的自动模型选择、参数搜索与评估。
    """

    def __init__(self, data, target, problem_type, more_model=False):
        """
        初始化方法。

        参数：
            data (pd.DataFrame): 特征数据，要求为DataFrame格式。
            target (pd.Series): 目标变量，要求为Series格式。
            problem_type (str): 问题类型，'Classifier' 或 'Regression'。
            more_model (bool): 是否尝试多模型，默认False。
        """
        if not isinstance(data, pd.DataFrame):
            raise TypeError('data 类型错误需要为pd.DataFrame对象')
        if not isinstance(target, pd.Series):
            raise TypeError('taget 类型错误需要为pd.Series对象')
        self.data = data
        self.target = target
        self.problem_type = problem_type
        self.more_model = more_model

        self.es = None
        self.all_param_grids = None
        self.all_model = None
        # 后期增加
        self.cv = co.CV

    def find_best_parameter(self):
        """
        自动遍历不同模型、参数、数据划分，寻找最优配置。
        返回：
            all_model_dict (dict): 所有模型的最优配置与对象。
            best_model (dict): 最优模型的配置与对象。
        """

        if self.problem_type is None:
            raise ValueError('problem_types is None')

        # 忽略特定警告
        warnings.filterwarnings('ignore', category=UserWarning)
        warnings.filterwarnings('ignore', category=FitFailedWarning)
        all_model_dict = {}
        # 遍历不同的测试集比例和随机种子
        for model_name, model in self.all_model.items():
            results_dict = {}
            print(f'开始模型{model_name}训练')
            for test_size_frac in [0.1, 0.2, 0.3, 0.4]:
                for random_state in range(1, 5):
                    # 根据问题类型设置stratify参数
                    stratify = self.target if self.problem_type == 'Classifier' else None

                    try:
                        # 划分数据集
                        x_train, x_test, y_train, y_test = train_test_split(
                            self.data, self.target,
                            test_size=test_size_frac,
                            random_state=random_state,
                            stratify=stratify
                        )

                        # 检查训练集大小是否足够
                        # if len(x_train) < 5:  # 提高最小样本要求
                        #     continue

                        # 标准化数据
                        scaler = StandardScaler()
                        x_train = scaler.fit_transform(x_train)
                        x_test = scaler.transform(x_test)

                        # 动态调整交叉验证折数
                        n_cv = self.cv or min(4, max(2, len(x_train) // 2))  # 确保每折有足够样本

                        # 设置网格搜索 - 使用error_score=np.nan跳过无效参数
                        es_new = GridSearchCV(
                            estimator=model,  # 模型对象
                            param_grid=self.all_param_grids[model_name],  # 超参选择
                            cv=n_cv,  # 交叉验证折数
                            error_score='raise',  # np.nan为跳过
                            n_jobs=-1,  # CUP使用
                            verbose=1,  # 控制日志输出的详细程度 0（默认）：不输出日志。1：偶尔输出进度信息。>1：输出详细的参数搜索过程。
                            return_train_score=True  # 用于返回训练集评分
                        )

                        # # 检查是否有有效的参数组合
                        # if np.isnan(es_new.best_score_):
                        #     continue
                        # 训练模型
                        es_new.fit(x_train, y_train)
                        # 预测
                        y_pre = es_new.predict(x_test)
                        # 正类
                        y_prob = es_new.predict_proba(x_test)[:, 1]
                        # 创建结果记录
                        record = {
                            'best_params': es_new.best_params_,
                            'best_score': es_new.best_score_,
                            'test_size': test_size_frac,
                            'random_state': random_state
                        }

                        # 根据问题类型计算指标
                        if self.problem_type == 'Classifier':
                            # 精确率
                            record['precision_score'] = precision_score(y_test, y_pre, zero_division=0)
                            record['recall_score'] = recall_score(y_test, y_pre)
                            record['f1_score'] = f1_score(y_test, y_pre)
                            try:
                                record['test_auc'] = roc_auc_score(y_test, y_prob)
                            except (AttributeError, IndexError):
                                record['test_auc'] = np.nan
                            record['primary_metric'] = record['test_auc']
                        else:  # Regression
                            rmse = root_mean_squared_error(y_test, y_pre)
                            try:
                                mae = mean_absolute_error(y_test, y_pre)
                                rmse_mae_std = np.std([rmse, mae])
                                rmse_mae_mean = (rmse + mae) / 2
                                record['test_rmse'] = rmse

                                rmse = (rmse - rmse_mae_mean) / rmse_mae_std
                                mae = (mae - rmse_mae_mean) / rmse_mae_std
                                record['primary_metric'] = -(rmse * co.RMSE_WEIGHT + mae * co.MAE_WEIGHT)
                            except Exception as e:
                                print('逻辑回归模型评分计算有误，取RMSE值为默认值')
                            else:
                                record['test_rmse'] = rmse
                                record['primary_metric'] = -rmse  # 统一为越大越好

                        # 保存结果
                        key = f"random_state:{random_state}&test_size:{test_size_frac}"
                        # 数据格式:{'random_state:1&test_size:0.1':{'best_params': es_new.best_params_,....}}
                        results_dict[key] = record

                    except Exception as e:
                        # 捕获并打印每次配置失败的详细信息，便于调试
                        print(f"配置失败: random_state={random_state}, test_size={test_size_frac}, 错误: {str(e)}")
                        continue

            if not results_dict:
                raise RuntimeError("所有训练配置均失败，请检查数据和参数设置")
            # {模型名称1：{size+random：record, 'model'：最优模型对象}, 模型名称2：{size+random：record,model：最优模型对象}}
            # 根据问题类型选择最优配置
            best_key = max(results_dict.keys(), key=lambda k: results_dict[k]['primary_metric'])
            all_model_dict[model_name] = {best_key: results_dict[best_key], 'model': model}

        if not all_model_dict:
            raise ValueError("没有找到有效的模型配置")

        best_configs = []
        for model_name, model_data in all_model_dict.items():
            config_key = next(iter(model_data.keys() - {'model'}))  # 获取除'model'外的键
            config = model_data[config_key]
            config['model_name'] = model_name
            best_configs.append(config)

        # 按primary_metric降序排序
        best_configs.sort(key=lambda x: x['primary_metric'], reverse=True)

        # 返回最佳配置及其对应的模型名称
        best_config = best_configs[0]
        best_model_name = best_config['model_name']
        best_model = all_model_dict[best_model_name]['model']

        return all_model_dict, {best_model_name: all_model_dict[best_model_name]}

    def select_models(self):
        """moxing
        根据问题类型和数据特征自动选择合适的模型。
        设置 self.all_model。
        """
        models = {}

        if self.problem_type == 'Classifier':
            # 标签去重，判断是否为二分类
            is_binary = len(np.unique(self.target)) == 2
            # 返回多个模型
            if self.more_model:
                # KNN、决策树、随机森林（决策树）、GBDT(分类）
                models = {'KNeighborsClassifier': KNeighborsClassifier(),
                          # 'DecisionTreeClassifier': DecisionTreeClassifier(),
                          # 'RandomForestClassifier': RandomForestClassifier(),
                          # 'GradientBoostingClassifier': GradientBoostingClassifier(),
                          'XGBClassifier': XGBClassifier()}
                if is_binary:
                    # 逻辑回归
                    models['LogisticRegression'] = LogisticRegression()
                    # adaboost
                    # models['AdaBoostClassifier'] = AdaBoostClassifier()
            else:
                # 只选择一个相对最优模型
                best_model = self._select_best_classifier(self.data, self.target)
                models[best_model[0]] = best_model[1]

        elif self.problem_type == 'Regression':
            if self.more_model:
                # 返回多个模型
                # KNN、正规方程、随机森林、梯度下降、决策树、GBDT(回归）
                models = {'KNeighborsRegressor': KNeighborsRegressor(),
                          'LinearRegression': LinearRegression(),
                          'RandomForestRegressor': RandomForestRegressor(),
                          'SGDRegressor': SGDRegressor(),
                          'DecisionTreeRegressor': DecisionTreeRegressor(),
                          'GradientBoostingRegressor': GradientBoostingRegressor()}

            else:
                # 只选择一个相对最优模型
                best_model = self._select_best_regressor(self.data, self.target)
                models[best_model[0]] = best_model[1]
        else:
            # 问题类型不合法时抛出异常
            raise ValueError("问题类型必须是'Classifier'或'Regression'")
        if len(models) == 0:
            raise ValueError('模型选择出现问题')
        self.all_model = models
        self.get_param()
        print(self.all_model)
        # return models

    @staticmethod
    def _select_best_regressor(X, y, verbose=False):
        """
        根据数据特征自动推荐最适合的回归算法（无需训练模型）。

        参数：
            X (pd.DataFrame): 特征矩阵。
            y (pd.Series): 目标值。
            verbose (bool): 是否输出详细分析过程。
        返回：
            tuple: (算法名称, 算法类实例, 推荐理由)
        """
        results = {}

        # 1. 基本数据特征
        n_samples, n_features = X.shape
        results['样本量'] = n_samples
        results['特征数'] = n_features

        # 2. 特征类型分析
        numeric_cols = X.select_dtypes(include=['int64', 'float64']).columns.tolist()
        categorical_cols = X.select_dtypes(exclude=['int64', 'float64']).columns.tolist()
        results['数值特征数'] = len(numeric_cols)
        results['类别特征数'] = len(categorical_cols)

        # 3. 稀疏性分析
        if n_features > 0:
            zero_ratio = (X == 0).sum().sum() / (n_samples * n_features)
            results['数据稀疏度(0值比例)'] = zero_ratio
            is_sparse = zero_ratio > 0.5
        else:
            is_sparse = False

        # 4. 线性相关性分析（仅对数值特征）
        if len(numeric_cols) > 0:
            corr = pd.concat([X[numeric_cols], y], axis=1).corr()[y.name].drop(y.name)
            max_corr = corr.abs().max()
            results['最大线性相关系数'] = max_corr
            is_linear = max_corr > 0.5
        else:
            is_linear = False

        # 5. 离群点分析（通过IQR方法）
        if len(numeric_cols) > 0:
            q1 = X[numeric_cols].quantile(0.25)
            q3 = X[numeric_cols].quantile(0.75)
            iqr = q3 - q1
            lower_bound = q1 - 1.5 * iqr
            upper_bound = q3 + 1.5 * iqr

            outliers = ((X[numeric_cols] < lower_bound) | (X[numeric_cols] > upper_bound)).sum()
            total_outliers = outliers.sum()
            outlier_ratio = total_outliers / (n_samples * len(numeric_cols))
            results['离群点比例'] = outlier_ratio
            has_outliers = outlier_ratio > 0.05  # 超过5%的点被视为离群点
        else:
            has_outliers = False

        # 6. 目标值分布分析
        y_mean = y.mean()
        y_std = y.std()
        if y_std > 0:
            y_cv = y_std / y_mean  # 变异系数
            results['目标值变异系数'] = y_cv
            has_large_variation = y_cv > 1.0  # 变异系数大于1表示波动大
        else:
            has_large_variation = False

        # 7. 根据特征自动推荐算法
        if is_linear:
            if n_samples > 100000 or is_sparse:
                algorithm_name = "SGDRegressor"
                algorithm_class = SGDRegressor()
                reason = "数据具有线性关系，且样本量大或特征稀疏，SGDRegressor效率高"
            else:
                algorithm_name = "LinearRegression"
                algorithm_class = LinearRegression()
                reason = "数据具有线性关系，线性回归是最优选择"
        else:
            if has_outliers:
                algorithm_name = "RandomForestRegressor"
                algorithm_class = RandomForestRegressor()
                reason = "数据是非线性关系，且存在离群点，随机森林对噪声更鲁棒"
            else:
                if n_samples < 10000 and n_features < 20:
                    if len(categorical_cols) > 0:
                        algorithm_name = "RandomForestRegressor"
                        algorithm_class = RandomForestRegressor()
                        reason = "数据是非线性关系，样本量小且有类别特征，随机森林能有效处理"
                    else:
                        algorithm_name = "KNeighborsRegressor"
                        algorithm_class = KNeighborsRegressor()
                        reason = "数据是非线性关系，样本量小且特征维度低，KNN适合局部建模"
                elif n_samples > 100000 or is_sparse:
                    algorithm_name = "SGDRegressor"
                    algorithm_class = SGDRegressor()
                    reason = "数据是非线性关系，但样本量大或特征稀疏，SGDRegressor效率更高"
                else:
                    algorithm_name = "GradientBoostingRegressor"
                    algorithm_class = GradientBoostingRegressor()
                    reason = "数据是非线性关系，样本量中等，梯度提升树通常能提供更高精度"

        # 8. 对类别特征较多的情况进行特殊处理
        if len(categorical_cols) > 0 and algorithm_name != "RandomForestRegressor":
            if n_samples > 10000:
                algorithm_name = "GradientBoostingRegressor"
                algorithm_class = GradientBoostingRegressor()
                reason = "数据包含较多类别特征，样本量较大，梯度提升树能更好地处理类别特征"
            else:
                algorithm_name = "RandomForestRegressor"
                algorithm_class = RandomForestRegressor()
                reason = "数据包含较多类别特征，随机森林对类别特征的处理能力更强"

        if verbose:
            for key, value in results.items():
                print(f"{key}: {value}")
            print(f"推荐算法: {algorithm_name}")
            print(f"推荐理由: {reason}")

        return algorithm_name, algorithm_class, reason

    @staticmethod
    def _select_best_classifier(x, y):
        """
        根据训练数据特性选择最优分类模型。

        参数：
            x (pd.DataFrame): 特征矩阵。
            y (pd.Series): 目标值。
        返回：
            tuple: (算法名称, 算法类实例)
        """
        n_samples, n_features = x.shape
        is_binary = len(np.unique(y)) == 2

        # 数据预处理

        # 基于数据特性选择模型
        if n_samples < 100:
            # 小样本优先用逻辑回归或决策树
            return ('LogisticRegression' if is_binary else 'DecisionTree',
                    LogisticRegression() if is_binary else DecisionTreeClassifier())

        if n_features > 50:
            # 高维特征优先用逻辑回归
            return 'LogisticRegression', LogisticRegression()

        # 默认使用随机森林
        return 'RandomForestClassifier', RandomForestClassifier()

    def get_param(self):
        """
        根据已选模型，获取参数网格。
        设置 self.all_param_grids。
        """
        all_param = {}
        if not self.all_model:
            # 检查模型列表是否为空
            raise RuntimeError("该列表模型为空，请重新检查")

        for name in self.all_model:
            if name not in co.PARAM:
                # 检查参数网格是否存在
                raise RuntimeError(f"没有找到该模型{name}的参数网格，请在PARAM中补充")
            all_param[name] = co.PARAM[name]
        self.all_param_grids = all_param

    def save_results(self, all_model_dict, best_model_dict, filepath=None):
        """
        保存模型训练结果到JSON文件。

        参数：
            all_model_dict (dict): 所有模型的结果字典
            best_model_dict (dict): 最佳模型的结果字典
            filepath (str): 文件保存路径，默认为时间戳命名
        """
        if filepath is None:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filepath = f"../results/model_results_{timestamp}.json"

        # 确保目录存在
        directory = os.path.dirname(filepath)
        if directory and not os.path.exists(directory):
            os.makedirs(directory, exist_ok=True)
            print(f"创建目录: {directory}")

        # 准备保存的数据结构
        save_data = {
            "metadata": {
                "problem_type": self.problem_type,
                "data_shape": self.data.shape,
                "target_info": {
                    "unique_values": len(self.target.unique()),
                    "is_binary": len(self.target.unique()) == 2 if self.problem_type == 'Classifier' else None
                },
                "saved_time": datetime.now().isoformat(),
                "more_model": self.more_model
            },
            "all_models": {},
            "best_model": {}
        }

        # 处理所有模型结果
        for model_name, model_data in all_model_dict.items():
            # 提取配置信息，排除模型对象
            config_key = next(iter(model_data.keys() - {'model'}))
            config = model_data[config_key].copy()
            config['model_name'] = model_name

            # 移除不能序列化的模型对象
            if 'model' in model_data:
                config['model_type'] = type(model_data['model']).__name__

            save_data["all_models"][model_name] = config

        # 处理最佳模型结果
        for model_name, model_data in best_model_dict.items():
            config_key = next(iter(model_data.keys() - {'model'}))
            config = model_data[config_key].copy()
            config['model_name'] = model_name

            if 'model' in model_data:
                config['model_type'] = type(model_data['model']).__name__

            save_data["best_model"][model_name] = config

        # 保存到JSON文件
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(save_data, f, indent=2, ensure_ascii=False)

        print(f"结果已保存到: {filepath}")
        return filepath


if __name__ == '__main__':
    # 模拟分类数据
    df_data = pd.read_csv(r"E:\workspace\04_机器学习\决策树\train.csv")
    # print(df_data.info())
    # 2、数据预处理
    x = df_data[['Pclass', 'Sex', 'Age', 'Fare', 'SibSp']].copy()
    y = df_data['Survived']
    # 2-2 空值处理
    x['Age'] = x['Age'].fillna(x['Age'].mean())
    # 2-3 热编码处理
    x = pd.get_dummies(x)
    # 2-4划分训练集和测试集

    m = MethodIntegration(x, y, 'Classifier', more_model=True)
    m.select_models()
    # m.get_param()
    all_model_dict, best = m.find_best_parameter()
    print(all_model_dict)
    print(best)
    # 保存结果到JSON文件
    results_file = m.save_results(all_model_dict, best)
    print(f"结果已保存到: {results_file}")

