from hyperopt import fmin, tpe, hp,  Trials, STATUS_OK
from hyperopt.pyll import scope    
from sklearn.ensemble import RandomForestRegressor    
from sklearn.model_selection import train_test_split ,cross_validate
from   sklearn.metrics import  mean_squared_error as mse  ,r2_score ,mean_absolute_error as mae 
import math  
import numpy as np  
import pandas as pd  
import mlflow  
import json  
from mlflow.tracking import MlflowClient   
from xgboost import XGBRegressor
import lightgbm as lgb
from catboost import CatBoostRegressor

client = MlflowClient()  


def calculate_metrics_reg(y_true, y_pred) :
    mse_score = mse(y_true, y_pred)
    mae_score = mae(y_true, y_pred)
    rmse_score = np.sqrt(mse_score)
    r2 = r2_score(y_true, y_pred)
    return mse_score, mae_score, rmse_score, r2 


##================================ 随机森林超参数搜索 ============================ ##
def  rf_reg_search_space(n_min,n_max, leaf_min,leaf_max,
                     d_min,d_max,criterion_list
                     ,max_features_list):

    assert n_min<=n_max 
    assert leaf_min<=leaf_max 
    assert d_min<=d_max
    
    search_space = {   
        'n_estimators': scope.int(hp.quniform('n_estimators', n_min, n_max, 1)),
        'min_samples_leaf': scope.int(hp.quniform('min_samples_leaf', leaf_min, leaf_max, 1)), 
        'max_features': hp.choice('max_features', max_features_list +[None] ),  
        'max_depth': scope.int(hp.quniform('max_depth', d_min, d_max, 1)),
        'criterion':hp.choice('criterion', criterion_list)
        } 

    return  search_space    


def  cal_fmin_rf_reg(X_train,y_train,search_space,experiment_name,run_name,X_val=None,y_val=None,max_evals=20,cv=5):   
    

    mlflow.sklearn.autolog()
    
    # 设置MLflow实验
    try:
        mlflow.create_experiment(experiment_name)
    except mlflow.exceptions.MlflowException:
        # 仅捕获MLflow相关异常
        pass
    mlflow.set_experiment(experiment_name)
    

    def objective(params: dict) : 
        run_name_tmp = f"{run_name}_trial_{len(trials.trials)}"  
        with mlflow.start_run(nested=True,run_name=run_name_tmp):  
            model = RandomForestRegressor(**params, random_state=42, n_jobs=8)
            
            if cv > 1:
                scoring = {
                    'mse': 'neg_mean_squared_error',
                    'mae': 'neg_mean_absolute_error',
                    'r2': 'r2'
                }
                # cv  
                results = cross_validate(model, X_train, y_train, cv=cv, scoring=scoring)
                
                mse_score = np.mean(-results['test_mse'])
                mae_score = np.mean(-results['test_mae'])
                rmse_score = np.sqrt(mse_score)
                r2 = np.mean(results['test_r2']) 
                model.fit(X_train, y_train) 
                
            else: 
                model.fit(X_train, y_train)
                preds = model.predict(X_val)
                mse_score, mae_score, rmse_score, r2 = calculate_metrics_reg(y_val, preds)
                # 评估之后，还是要把所有的数据进行训练 充分利用数据 
                model.fit(pd.concat([X_train, X_val], axis=0), pd.concat([y_train, y_val], axis=0))  
            
            # 记录指标
            mlflow.log_metrics({
                "val_loss": rmse_score,
                "val_r2": r2,
                "val_mse": mse_score,
                "val_mae": mae_score
            })

            params['features'] = json.dumps(X_train.columns.tolist() )
            params['target'] = y_train.name
            params['task_type'] = 'regression'
            mlflow.log_params(params) 

        return {'loss': mse_score, 'status': STATUS_OK}
    
    if  cv >1   and  (X_val is not None and y_val is not None):
        X_train = pd.concat([X_train,X_val],axis=0)
        y_train = pd.concat([y_train,y_val],axis=0) 
    elif  cv <1 and (X_val is None or y_val is None):
        X_train, X_val, y_train, y_val = train_test_split(
                        X_train, y_train, test_size=0.2, random_state=42
                    ) 

    with mlflow.start_run(run_name=run_name,nested=True) as run :
        trials = Trials()
        best_params = fmin(  
            fn=objective,
            space=search_space,
            algo=tpe.suggest,
            max_evals=max_evals,
            trials=trials
        ) 

        run_id = run.info.run_id 
        run = mlflow.get_run(run_id)
        experiment_id = run.info.experiment_id  

        best_run = client.search_runs(
                experiment_ids=experiment_id,
                filter_string=f"tags.mlflow.parentRunId = '{run_id}'",
                order_by=['metrics.val_loss','metrics.val_mae DESC', 'start_time DESC'],
                max_results=1,
                )[0]


        run_results ={
            'metrics':best_run.data.metrics ,
            'params' :  best_run.data.params  
        }
    
    return run_results      


##================================ XGBOOST超参数搜索 ============================ ## 


def xgb_reg_search_space(n_min, n_max, d_min, d_max,
                         lr_min, lr_max, gamma_min, gamma_max,
                         ss_min, ss_max, csbt_min, csbt_max, reg_lambda_min, reg_lambda_max):

    assert n_min <= n_max
    assert d_min <= d_max
    assert lr_min <= lr_max
    assert gamma_min <= gamma_max
    assert ss_min <= ss_max
    assert csbt_min <= csbt_max
    assert reg_lambda_min <= reg_lambda_max

    search_space = {
        'n_estimators': scope.int(hp.quniform('n_estimators', n_min, n_max, 1)),
        'max_depth': scope.int(hp.quniform('max_depth', d_min, d_max, 1)),
        'learning_rate': hp.loguniform('learning_rate', np.log(lr_min), np.log(lr_max)),
        'gamma': hp.uniform('gamma', gamma_min, gamma_max),
        'subsample': hp.uniform('subsample', ss_min, ss_max),
        'colsample_bytree': hp.uniform('colsample_bytree', csbt_min, csbt_max),
        'reg_lambda': hp.uniform('reg_lambda', reg_lambda_min, reg_lambda_max),
        'objective': 'reg:squarederror'
    } 

    return search_space  



def cal_fmin_xgb_reg(X_train, y_train, search_space, experiment_name, run_name, X_val=None, y_val=None, max_evals=20, cv=5):
    
    mlflow.sklearn.autolog()
    
    try:
        mlflow.create_experiment(experiment_name)
    except mlflow.exceptions.MlflowException:
        pass
    mlflow.set_experiment(experiment_name)

    
    def objective(params: dict):
        run_name_tmp = f"{run_name}_trial_{len(trials.trials)}"  
        with mlflow.start_run(nested=True,run_name=run_name_tmp):  
            # 训练XGBoost模型
            model = XGBRegressor(**params, random_state=42, n_jobs=8)
            
            if cv > 1:
                scoring = {
                    'mse': 'neg_mean_squared_error',
                    'mae': 'neg_mean_absolute_error',
                    'r2': 'r2'
                }
                results = cross_validate(model, X_train, y_train, cv=cv, scoring=scoring)
                
                mse_score = np.mean(-results['test_mse'])
                mae_score = np.mean(-results['test_mae'])
                rmse_score = np.sqrt(mse_score)
                r2 = np.mean(results['test_r2'])
                
                # 交叉验证时，需要重新拟合完整模型用于记录
                model.fit(X_train, y_train) 

            else:
                model.fit(X_train, y_train)
                preds = model.predict(X_val)
                mse_score, mae_score, rmse_score, r2 = calculate_metrics_reg(y_val, preds)
                # 评估之后，还是要把所有的数据进行训练 充分利用数据 
                model.fit(pd.concat([X_train, X_val], axis=0), pd.concat([y_train, y_val], axis=0)) 
            
            # 记录评估指标
            mlflow.log_metrics({
                "val_loss": rmse_score,
                "val_r2": r2,
                "val_mse": mse_score,
                "val_mae": mae_score
            })

            params['features'] = json.dumps(X_train.columns.tolist() )
            params['target'] = y_train.name
            params['task_type'] = 'regression' 
            mlflow.log_params(params) 
            
            # 关键修改：使用mlflow.xgboost专用接口记录XGBoost模型
            mlflow.xgboost.log_model(
                xgb_model=model,
                artifact_path="model",  # 模型在MLflow中的保存路径
                # 可选：添加模型签名，记录输入输出格式
                signature=mlflow.models.infer_signature(X_train, model.predict(X_train))
            )
            
        return {'loss': mse_score, 'status': STATUS_OK}
    
    
    if cv > 1 and (X_val is not None and y_val is not None):
        X_train = pd.concat([X_train, X_val], axis=0)
        y_train = pd.concat([y_train, y_val], axis=0)
    elif cv <= 1 and (X_val is None or y_val is None):
        X_train, X_val, y_train, y_val = train_test_split(
            X_train, y_train, test_size=0.2, random_state=42
        )
    
    with mlflow.start_run(run_name=run_name, nested=True) as run : 
        trials = Trials()
        best_params = fmin(
            fn=objective,
            space=search_space,
            algo=tpe.suggest,
            max_evals=max_evals,
            trials=trials
        )
    
    
        run_id = run.info.run_id 
        run = mlflow.get_run(run_id)
        experiment_id = run.info.experiment_id  

        best_run = client.search_runs(
                experiment_ids=experiment_id,
                filter_string=f"tags.mlflow.parentRunId = '{run_id}'",
                order_by=['metrics.val_loss','metrics.val_mae DESC', 'start_time DESC'],
                max_results=1,
                )[0]


        run_results ={
            'metrics':best_run.data.metrics ,
            'params' :  best_run.data.params  
        }
    
    return run_results 

##================================ lgb 超参数搜索 ============================ ##  
def lgb_reg_search_space(n_min, n_max, d_min, d_max,
                         lr_min, lr_max, leaves_min, leaves_max,
                         min_child_samples_min, min_child_samples_max,
                         ss_min, ss_max, csbt_min, csbt_max, reg_lambda_min, reg_lambda_max):

    assert n_min <= n_max
    assert d_min <= d_max
    assert lr_min <= lr_max
    assert leaves_min <= leaves_max
    assert min_child_samples_min <= min_child_samples_max
    assert ss_min <= ss_max
    assert csbt_min <= csbt_max
    assert reg_lambda_min <= reg_lambda_max

    search_space = {
        'n_estimators': scope.int(hp.quniform('n_estimators', n_min, n_max, 1)),
        'max_depth': scope.int(hp.quniform('max_depth', d_min, d_max, 1)),
        'learning_rate': hp.loguniform('learning_rate', np.log(lr_min), np.log(lr_max)),
        'num_leaves': scope.int(hp.quniform('num_leaves', leaves_min, leaves_max, 1)),
        'min_child_samples': scope.int(hp.quniform('min_child_samples', min_child_samples_min, min_child_samples_max, 1)),
        'subsample': hp.uniform('subsample', ss_min, ss_max),
        'colsample_bytree': hp.uniform('colsample_bytree', csbt_min, csbt_max),
        'reg_lambda': hp.uniform('reg_lambda', reg_lambda_min, reg_lambda_max),
        'objective': 'regression'
    }

    return search_space 

def cal_fmin_lgb_reg(X_train, y_train, search_space, experiment_name, run_name, X_val=None, y_val=None, max_evals=20, cv=5):
    """
    使用hyperopt为LightGBM回归模型进行超参数优化
    
    参数:
        X_train: 训练特征
        y_train: 训练目标
        search_space: hyperopt格式的搜索空间
        experiment_name: MLflow实验名称
        run_name: MLflow运行名称
        X_val: 验证特征（可选）
        y_val: 验证目标（可选）
        max_evals: 最大搜索次数
        cv: 交叉验证折数
    """
    # 初始化MLflow
    mlflow.lightgbm.autolog()
    try:
        mlflow.create_experiment(experiment_name)
    except mlflow.exceptions.MlflowException:
        pass
    mlflow.set_experiment(experiment_name)
    
    # hyperopt目标函数
    def objective(params):
        # hyperopt返回的整数参数是float类型，需要转换
        int_params = ['num_leaves', 'max_depth', 'n_estimators', 'min_child_samples']
        for param in int_params:
            if param in params:
                params[param] = int(params[param])
        
        run_name_tmp = f"{run_name}_trial_{len(trials.trials)}"  
        with mlflow.start_run(nested=True,run_name=run_name_tmp):  
            # 初始化LightGBM模型
            model = lgb.LGBMRegressor(
                **params,
                random_state=42,
                n_jobs=8,
                verbose=-1
            )
            
            if cv > 1:
                # 交叉验证评估
                scoring = {
                    'mse': 'neg_mean_squared_error',
                    'mae': 'neg_mean_absolute_error',
                    'r2': 'r2'
                }
                results = cross_validate(model, X_train, y_train, cv=cv, scoring=scoring)
                
                mse_score = np.mean(-results['test_mse'])
                mae_score = np.mean(-results['test_mae'])
                rmse_score = np.sqrt(mse_score)
                r2 = np.mean(results['test_r2'])
                
                # 全量数据拟合模型
                model.fit(X_train, y_train)
            else:
                # 验证集评估
                model.fit(X_train, y_train)
                preds = model.predict(X_val)
                mse_score, mae_score, rmse_score, r2 = calculate_metrics_reg(y_val, preds)
                # 评估之后，还是要把所有的数据进行训练 充分利用数据 
                model.fit(pd.concat([X_train, X_val], axis=0), pd.concat([y_train, y_val], axis=0)) 

            
            # 记录参数和指标
            mlflow.log_metrics({
                "val_loss": rmse_score,
                "val_r2": r2,
                "val_mse": mse_score,
                "val_mae": mae_score
            })
            
            # 记录模型
            mlflow.lightgbm.log_model(
                lgb_model=model,
                artifact_path="model",
                signature=mlflow.models.infer_signature(X_train, model.predict(X_train))
            )
            
            params['features'] = json.dumps(X_train.columns.tolist() )
            params['target'] = y_train.name
            params['task_type'] = 'regression' 
            mlflow.log_params(params) 
        
        return {'loss': mse_score, 'status': STATUS_OK}
    
    # 数据处理逻辑
    if cv > 1 and (X_val is not None and y_val is not None):
        X_train = pd.concat([X_train, X_val], axis=0)
        y_train = pd.concat([y_train, y_val], axis=0)
    elif cv <= 1 and (X_val is None or y_val is None):
        X_train, X_val, y_train, y_val = train_test_split(
            X_train, y_train, test_size=0.2, random_state=42
        )
    
    # 执行hyperopt搜索
    with mlflow.start_run(run_name=run_name,nested=True) as run : 
        trials = Trials()
        best_params = fmin(
            fn=objective,
            space=search_space,
            algo=tpe.suggest,
            max_evals=max_evals,
            trials=trials,
            rstate=np.random.default_rng(42)  # 固定随机种子
        )
        
        run_id = run.info.run_id 
        run = mlflow.get_run(run_id)
        experiment_id = run.info.experiment_id  

        best_run = client.search_runs(
                experiment_ids=experiment_id,
                filter_string=f"tags.mlflow.parentRunId = '{run_id}'",
                order_by=['metrics.val_loss','metrics.val_mae DESC', 'start_time DESC'],
                max_results=1,
                )[0]

        run_results ={
            'metrics':best_run.data.metrics ,
            'params' :  best_run.data.params  
        }

    return run_results


##================================ catboost 超参数搜索 ============================ ##  
def cat_reg_search_space(n_min, n_max, d_min, d_max,
                          lr_min, lr_max, l2_reg_min, l2_reg_max):
    
    assert n_min <= n_max
    assert d_min <= d_max
    assert lr_min <= lr_max
    assert l2_reg_min <= l2_reg_max

    search_space = {
        'n_estimators': scope.int(hp.quniform('n_estimators', n_min, n_max, 1)),
        'max_depth': scope.int(hp.quniform('max_depth', d_min, d_max, 1)),
        'learning_rate': hp.loguniform('learning_rate', np.log(lr_min), np.log(lr_max)),
        'l2_leaf_reg': hp.uniform('l2_leaf_reg', l2_reg_min, l2_reg_max),
        'eval_metric': 'RMSE',
        'random_strength': hp.uniform('random_strength', 0.1, 10),
    }

    return search_space 



def cal_fmin_catboost_reg(X_train, y_train, search_space, experiment_name, run_name, X_val=None, y_val=None, max_evals=20, cv=5):
    """
    使用hyperopt为CatBoost回归模型进行超参数优化
    
    参数:
        X_train: 训练特征
        y_train: 训练目标
        search_space: hyperopt格式的搜索空间
        experiment_name: MLflow实验名称
        run_name: MLflow运行名称
        X_val: 验证特征（可选）
        y_val: 验证目标（可选）
        max_evals: 最大搜索次数
        cv: 交叉验证折数
    """
    # 初始化MLflow
    mlflow.sklearn.autolog()
    try:
        mlflow.create_experiment(experiment_name)
    except mlflow.exceptions.MlflowException:
        pass
    mlflow.set_experiment(experiment_name)
    
    
    # hyperopt目标函数
    def objective(params):
        # 转换整数参数（hyperopt返回float类型）
        int_params = ['depth', 'n_estimators', 'min_data_in_leaf', 'l2_leaf_reg']
        for param in int_params:
            if param in params:
                params[param] = int(params[param])
        
        run_name_tmp = f"{run_name}_trial_{len(trials.trials)}"  
        with mlflow.start_run(nested=True,run_name=run_name_tmp):  
            # 初始化CatBoost模型
            model = CatBoostRegressor(
                **params,
                random_state=42,
                thread_count=8,  # 多线程参数（类似n_jobs）
                verbose=0  # 关闭训练日志
            )
            
            if cv > 1:
                # 交叉验证评估
                scoring = {
                    'mse': 'neg_mean_squared_error',
                    'mae': 'neg_mean_absolute_error',
                    'r2': 'r2'
                }
                results = cross_validate(model, X_train, y_train, cv=cv, scoring=scoring)
                
                mse_score = np.mean(-results['test_mse'])
                mae_score = np.mean(-results['test_mae'])
                rmse_score = np.sqrt(mse_score)
                r2 = np.mean(results['test_r2'])
                
                # 全量数据拟合模型
                model.fit(X_train, y_train)
            else:
                # 验证集评估
                model.fit(X_train, y_train)
                preds = model.predict(X_val)
                mse_score, mae_score, rmse_score, r2 = calculate_metrics_reg(y_val, preds)
                # 评估之后，还是要把所有的数据进行训练 充分利用数据 
                model.fit(pd.concat([X_train, X_val], axis=0), pd.concat([y_train, y_val], axis=0))  
            
            # 记录参数和指标
            mlflow.log_metrics({
                "val_loss": rmse_score,
                "val_r2": r2,
                "val_mse": mse_score,
                "val_mae": mae_score
            })
            
            # 记录模型
            mlflow.catboost.log_model( 
                cb_model=model,
                artifact_path="model",
                signature=mlflow.models.infer_signature(X_train, model.predict(X_train))
            )

            params['features'] = json.dumps(X_train.columns.tolist() )
            params['target'] = y_train.name
            params['task_type'] = 'regression' 
            mlflow.log_params(params) 
        
        return {'loss': mse_score, 'status': STATUS_OK}
    
    # 数据处理逻辑
    if cv > 1 and (X_val is not None and y_val is not None):
        X_train = pd.concat([X_train, X_val], axis=0)
        y_train = pd.concat([y_train, y_val], axis=0)
    elif cv <= 1 and (X_val is None or y_val is None):
        X_train, X_val, y_train, y_val = train_test_split(
            X_train, y_train, test_size=0.2, random_state=42
        )
    
    # 执行hyperopt搜索
    with mlflow.start_run(run_name=run_name,nested=True) as run : 
        trials = Trials()
        best_params = fmin(
            fn=objective,
            space=search_space,
            algo=tpe.suggest,
            max_evals=max_evals,
            trials=trials,
            rstate=np.random.default_rng(42)  # 固定随机种子
        )
        
        run_id = run.info.run_id 
        run = mlflow.get_run(run_id)
        experiment_id = run.info.experiment_id  

        best_run = client.search_runs( 
                experiment_ids=experiment_id,
                filter_string=f"tags.mlflow.parentRunId = '{run_id}'",
                order_by=['metrics.val_loss','metrics.val_mae DESC', 'start_time DESC'],
                max_results=1,
                )[0] 


        run_results ={  
            'metrics':best_run.data.metrics ,
            'params' :  best_run.data.params  
        } 
    
    return run_results  


