import pandas as pd
import numpy as np 
import streamlit as st 
from utils.ml_clf import * 
from utils.ml_reg import *  



def  get_search_params(model_type,task_type):
    params_config={} 
    col3, col4 = st.columns(2) 
    if task_type in ["binary", "multiclass"]:
        if model_type == "rf":
            # 随机森林分类超参数
            with col3:
                params_config["n_min"] = st.number_input("n_estimators最小值", 10, 1000, 50, key="rf_n_min")
                params_config["leaf_min"] = st.number_input("min_samples_leaf最小值", 1, 100, 1, key="rf_leaf_min")
                params_config["d_min"] = st.number_input("max_depth最小值", 1, 50, 3, key="rf_d_min")
                params_config["criterion_list"] = st.multiselect("criterion选项", ["gini", "entropy"], ["gini"], key="rf_criterion")
            with col4:    
                params_config["n_max"] = st.number_input("n_estimators最大值", params_config["n_min"], 2000, 200, key="rf_n_max")
                params_config["leaf_max"] = st.number_input("min_samples_leaf最大值", params_config["leaf_min"], 200, 20, key="rf_leaf_max")
                params_config["d_max"] = st.number_input("max_depth最大值", params_config["d_min"], 100, 10, key="rf_d_max")
                params_config["max_features_list"] = st.multiselect("max_features选项", ["sqrt", "log2"], ["sqrt"], key="rf_max_features")

            search_space =   rf_clf_search_space(**params_config)  
        elif model_type == "xgb":
            # XGBoost分类超参数
            with col3: 
                params_config["n_min"] = st.number_input("n_estimators最小值", 10, 1000, 50, key="xgb_n_min")
                params_config["d_min"] = st.number_input("max_depth最小值", 1, 20, 3, key="xgb_d_min")
                params_config["lr_min"] = st.number_input("learning_rate最小值", 0.0001, 0.1, 0.01, format="%.4f", key="xgb_lr_min")
                params_config["gamma_min"] = st.number_input("gamma最小值", 0.0, 10.0, 0.0, key="xgb_gamma_min")
                params_config["ss_min"] = st.number_input("subsample最小值", 0.1, 1.0, 0.5, key="xgb_ss_min")
                params_config["csbt_min"] = st.number_input("colsample_bytree最小值", 0.1, 1.0, 0.5, key="xgb_csbt_min")
                params_config["reg_lambda_min"] = st.number_input("reg_lambda最小值", 0.0, 10.0, 0.0, key="xgb_reg_lambda_min")
                
            with col4:
                params_config["n_max"] = st.number_input("n_estimators最大值", params_config["n_min"], 2000, 200, key="xgb_n_max")
                params_config["d_max"] = st.number_input("max_depth最大值", params_config["d_min"], 50, 10, key="xgb_d_max")
                params_config["lr_max"] = st.number_input("learning_rate最大值", params_config["lr_min"], 1.0, 0.3, format="%.4f", key="xgb_lr_max")
                params_config["gamma_max"] = st.number_input("gamma最大值", params_config["gamma_min"], 20.0, 5.0, key="xgb_gamma_max")
                params_config["ss_max"] = st.number_input("subsample最大值", params_config["ss_min"], 1.0, 1.0, key="xgb_ss_max")
                params_config["csbt_max"] = st.number_input("colsample_bytree最大值", params_config["csbt_min"], 1.0, 1.0, key="xgb_csbt_max")
                params_config["reg_lambda_max"] = st.number_input("reg_lambda最大值", params_config["reg_lambda_min"], 20.0, 5.0, key="xgb_reg_lambda_max")


            search_space =   xgb_clf_search_space(**params_config) 
        elif model_type == "lgb":
            # LightGBM分类超参数
            with col3: 
                params_config["n_min"] = st.number_input("n_estimators最小值", 10, 1000, 50, key="lgb_n_min")
                params_config["d_min"] = st.number_input("max_depth最小值", 1, 20, 3, key="lgb_d_min")
                params_config["lr_min"] = st.number_input("learning_rate最小值", 0.0001, 0.1, 0.01, format="%.4f", key="lgb_lr_min")
                params_config["leaves_min"] = st.number_input("num_leaves最小值", 2, 100, 10, key="lgb_leaves_min")
                params_config["min_child_samples_min"] = st.number_input("min_child_samples最小值", 1, 100, 1, key="lgb_mcs_min")
                params_config["ss_min"] = st.number_input("subsample最小值", 0.1, 1.0, 0.5, key="lgb_ss_min")
                params_config["csbt_min"] = st.number_input("colsample_bytree最小值", 0.1, 1.0, 0.5, key="lgb_csbt_min")
                params_config["reg_lambda_min"] = st.number_input("reg_lambda最小值", 0.0, 10.0, 0.0, key="lgb_reg_lambda_min")
            
            with col4:
                params_config["n_max"] = st.number_input("n_estimators最大值", params_config["n_min"], 2000, 200, key="lgb_n_max")
                params_config["d_max"] = st.number_input("max_depth最大值", params_config["d_min"], 50, 10, key="lgb_d_max")
                params_config["lr_max"] = st.number_input("learning_rate最大值", params_config["lr_min"], 1.0, 0.3, format="%.4f", key="lgb_lr_max")
                params_config["leaves_max"] = st.number_input("num_leaves最大值", params_config["leaves_min"], 1000, 100, key="lgb_leaves_max")
                params_config["min_child_samples_max"] = st.number_input("min_child_samples最大值", params_config["min_child_samples_min"], 200, 20, key="lgb_mcs_max")
                params_config["ss_max"] = st.number_input("subsample最大值", params_config["ss_min"], 1.0, 1.0, key="lgb_ss_max")
                params_config["csbt_max"] = st.number_input("colsample_bytree最大值", params_config["csbt_min"], 1.0, 1.0, key="lgb_csbt_max")
                params_config["reg_lambda_max"] = st.number_input("reg_lambda最大值", params_config["reg_lambda_min"], 20.0, 5.0, key="lgb_reg_lambda_max")

            
            search_space =   lgb_clf_search_space(**params_config)  

        elif model_type == "catboost":
            # CatBoost分类超参数
            with col3: 
                params_config["n_min"] = st.number_input("n_estimators最小值", 10, 1000, 50, key="cat_n_min")
                params_config["d_min"] = st.number_input("max_depth最小值", 1, 20, 3, key="cat_d_min")
                params_config["lr_min"] = st.number_input("learning_rate最小值", 0.0001, 0.1, 0.01, format="%.4f", key="cat_lr_min")
                params_config["l2_reg_min"] = st.number_input("l2_leaf_reg最小值", 0.0, 10.0, 0.0, key="cat_l2_min")
            
            with col4:
                params_config["n_max"] = st.number_input("n_estimators最大值", params_config["n_min"], 2000, 200, key="cat_n_max")
                params_config["d_max"] = st.number_input("max_depth最大值", params_config["d_min"], 50, 10, key="cat_d_max")
                params_config["lr_max"] = st.number_input("learning_rate最大值", params_config["lr_min"], 1.0, 0.3, format="%.4f", key="cat_lr_max")
                params_config["l2_reg_max"] = st.number_input("l2_leaf_reg最大值", params_config["l2_reg_min"], 20.0, 5.0, key="cat_l2_max")

            search_space =   cat_clf_search_space(**params_config) 
    else:  # 回归任务
        
        if model_type == "rf":
            # 随机森林回归超参数
            with col3:
                params_config["n_min"] = st.number_input("n_estimators最小值", 10, 1000, 50, key="rf_reg_n_min")
                params_config["leaf_min"] = st.number_input("min_samples_leaf最小值", 1, 100, 1, key="rf_reg_leaf_min")
                params_config["d_min"] = st.number_input("max_depth最小值", 1, 50, 3, key="rf_reg_d_min")
                params_config["criterion_list"] = st.multiselect("criterion选项", ["squared_error", "absolute_error", "poisson"], ["squared_error"], key="rf_reg_criterion")
            
            with col4:
                params_config["n_max"] = st.number_input("n_estimators最大值", params_config["n_min"], 2000, 200, key="rf_reg_n_max")
                params_config["leaf_max"] = st.number_input("min_samples_leaf最大值", params_config["leaf_min"], 200, 20, key="rf_reg_leaf_max")
                params_config["d_max"] = st.number_input("max_depth最大值", params_config["d_min"], 100, 10, key="rf_reg_d_max")
                params_config["max_features_list"] = st.multiselect("max_features选项", ["sqrt", "log2"], ["sqrt"], key="rf_reg_max_features")

            search_space =   rf_reg_search_space(**params_config) 
        elif model_type == "xgb":
            # XGBoost回归超参数
            with col3: 
                params_config["n_min"] = st.number_input("n_estimators最小值", 10, 1000, 50, key="xgb_reg_n_min")
                params_config["d_min"] = st.number_input("max_depth最小值", 1, 20, 3, key="xgb_reg_d_min")
                params_config["lr_min"] = st.number_input("learning_rate最小值", 0.0001, 0.1, 0.01, format="%.4f", key="xgb_reg_lr_min")
                params_config["gamma_min"] = st.number_input("gamma最小值", 0.0, 10.0, 0.0, key="xgb_reg_gamma_min")
                params_config["ss_min"] = st.number_input("subsample最小值", 0.1, 1.0, 0.5, key="xgb_reg_ss_min")
                params_config["csbt_min"] = st.number_input("colsample_bytree最小值", 0.1, 1.0, 0.5, key="xgb_reg_csbt_min")
                params_config["reg_lambda_min"] = st.number_input("reg_lambda最小值", 0.0, 10.0, 0.0, key="xgb_reg_reg_lambda_min")
                
            with col4:
                params_config["n_max"] = st.number_input("n_estimators最大值", params_config["n_min"], 2000, 200, key="xgb_reg_n_max")
                params_config["d_max"] = st.number_input("max_depth最大值", params_config["d_min"], 50, 10, key="xgb_reg_d_max")
                params_config["lr_max"] = st.number_input("learning_rate最大值", params_config["lr_min"], 1.0, 0.3, format="%.4f", key="xgb_reg_lr_max")
                params_config["gamma_max"] = st.number_input("gamma最大值", params_config["gamma_min"], 20.0, 5.0, key="xgb_reg_gamma_max")
                params_config["ss_max"] = st.number_input("subsample最大值", params_config["ss_min"], 1.0, 1.0, key="xgb_reg_ss_max")
                params_config["csbt_max"] = st.number_input("colsample_bytree最大值", params_config["csbt_min"], 1.0, 1.0, key="xgb_reg_csbt_max")
                params_config["reg_lambda_max"] = st.number_input("reg_lambda最大值", params_config["reg_lambda_min"], 20.0, 5.0, key="xgb_reg_reg_lambda_max")

            search_space =   xgb_reg_search_space(**params_config)  
        elif model_type == "lgb":
            # LightGBM回归超参数
            with col3:
                params_config["n_min"] = st.number_input("n_estimators最小值", 10, 1000, 50, key="lgb_reg_n_min")
                params_config["d_min"] = st.number_input("max_depth最小值", 1, 20, 3, key="lgb_reg_d_min")
                params_config["lr_min"] = st.number_input("learning_rate最小值", 0.0001, 0.1, 0.01, format="%.4f", key="lgb_reg_lr_min")
                params_config["leaves_min"] = st.number_input("num_leaves最小值", 2, 100, 10, key="lgb_reg_leaves_min")
                params_config["min_child_samples_min"] = st.number_input("min_child_samples最小值", 1, 100, 1, key="lgb_reg_mcs_min")
                params_config["ss_min"] = st.number_input("subsample最小值", 0.1, 1.0, 0.5, key="lgb_reg_ss_min")
                params_config["csbt_min"] = st.number_input("colsample_bytree最小值", 0.1, 1.0, 0.5, key="lgb_reg_csbt_min")
                params_config["reg_lambda_min"] = st.number_input("reg_lambda最小值", 0.0, 10.0, 0.0, key="lgb_reg_reg_lambda_min")
            
            with col4:
                params_config["n_max"] = st.number_input("n_estimators最大值", params_config["n_min"], 2000, 200, key="lgb_reg_n_max")
                params_config["d_max"] = st.number_input("max_depth最大值", params_config["d_min"], 50, 10, key="lgb_reg_d_max")
                params_config["lr_max"] = st.number_input("learning_rate最大值", params_config["lr_min"], 1.0, 0.3, format="%.4f", key="lgb_reg_lr_max")
                params_config["leaves_max"] = st.number_input("num_leaves最大值", params_config["leaves_min"], 1000, 100, key="lgb_reg_leaves_max")
                params_config["min_child_samples_max"] = st.number_input("min_child_samples最大值", params_config["min_child_samples_min"], 200, 20, key="lgb_reg_mcs_max")
                params_config["ss_max"] = st.number_input("subsample最大值", params_config["ss_min"], 1.0, 1.0, key="lgb_reg_ss_max")
                params_config["csbt_max"] = st.number_input("colsample_bytree最大值", params_config["csbt_min"], 1.0, 1.0, key="lgb_reg_csbt_max")
                params_config["reg_lambda_max"] = st.number_input("reg_lambda最大值", params_config["reg_lambda_min"], 20.0, 5.0, key="lgb_reg_reg_lambda_max")

            
            search_space =   lgb_reg_search_space(**params_config)  
        elif model_type == "catboost":
            # CatBoost回归超参数
            with col3:
                params_config["n_min"] = st.number_input("n_estimators最小值", 10, 1000, 50, key="cat_reg_n_min")
                params_config["d_min"] = st.number_input("max_depth最小值", 1, 20, 3, key="cat_reg_d_min")
                params_config["lr_min"] = st.number_input("learning_rate最小值", 0.0001, 0.1, 0.01, format="%.4f", key="cat_reg_lr_min")
                params_config["l2_reg_min"] = st.number_input("l2_leaf_reg最小值", 0.0, 10.0, 0.0, key="cat_reg_l2_min")
                
            with col4:
                params_config["n_max"] = st.number_input("n_estimators最大值", params_config["n_min"], 2000, 200, key="cat_reg_n_max")
                params_config["d_max"] = st.number_input("max_depth最大值", params_config["d_min"], 50, 10, key="cat_reg_d_max")
                params_config["lr_max"] = st.number_input("learning_rate最大值", params_config["lr_min"], 1.0, 0.3, format="%.4f", key="cat_reg_lr_max")
                params_config["l2_reg_max"] = st.number_input("l2_leaf_reg最大值", params_config["l2_reg_min"], 20.0, 5.0, key="cat_reg_l2_max")

            search_space =   cat_reg_search_space(**params_config)  

    return  search_space  



def  opt_model(data_dict ,task_type,model_type,search_space,max_evals,cv):

    X_train = data_dict['X_train']
    y_train = data_dict['y_train']
    X_val = data_dict['X_val']
    y_val = data_dict['y_val'] 


    if task_type in ["binary", "multiclass"]:
        #task_type_param = "binary" if task_type == "分类" else "multiclass"
        
        if model_type == "rf":
            
            best_params = cal_fmin_rf_clf(
                X_train, y_train, search_space,
                experiment_name=st.session_state.experiment_name,
                run_name=st.session_state.run_name,
                X_val=X_val, y_val=y_val,
                max_evals=max_evals,
                cv=cv,
                class_type=task_type 
            )
        
        elif model_type == "xgb":
            
            best_params = cal_fmin_xgb_clf(
                X_train, y_train, search_space,
                experiment_name=st.session_state.experiment_name,
                run_name=st.session_state.run_name,
                X_val=X_val, y_val=y_val,
                max_evals=max_evals,
                cv=cv,
                class_type=task_type
            )
        
        elif model_type == "lgb":
            
            best_params = cal_fmin_lgb_clf(
                X_train, y_train, search_space,
                experiment_name=st.session_state.experiment_name,
                run_name=st.session_state.run_name,
                X_val=X_val, y_val=y_val,
                max_evals=max_evals,
                cv=cv,
                class_type=task_type
            )
        
        elif model_type == "catboost":
            
            best_params = cal_fmin_catboost_clf(
                X_train, y_train, search_space,
                experiment_name=st.session_state.experiment_name,
                run_name=st.session_state.run_name,
                X_val=X_val, y_val=y_val,
                max_evals=max_evals,
                cv=cv,
                class_type=task_type
            )

    else:  # 回归任务
        if model_type == "rf":

            best_params = cal_fmin_rf_reg(
                X_train, y_train, search_space,
                experiment_name=st.session_state.experiment_name,
                run_name=st.session_state.run_name,
                X_val=X_val, y_val=y_val,
                max_evals=max_evals,
                cv=cv
            )
        
        elif model_type == "xgb":


            best_params = cal_fmin_xgb_reg(
                X_train, y_train, search_space,
                experiment_name=st.session_state.experiment_name,
                run_name=st.session_state.run_name,
                X_val=X_val, y_val=y_val,
                max_evals=max_evals,
                cv=cv
            )
        
        elif model_type == "lgb":

            
            best_params = cal_fmin_lgb_reg(
                X_train, y_train, search_space,
                experiment_name=st.session_state.experiment_name,
                run_name=st.session_state.run_name,
                X_val=X_val, y_val=y_val,
                max_evals=max_evals,
                cv=cv
            )
        
        elif model_type == "catboost":

            best_params = cal_fmin_catboost_reg(
                X_train, y_train, search_space,
                experiment_name=st.session_state.experiment_name,
                run_name=st.session_state.run_name,
                X_val=X_val, y_val=y_val,
                max_evals=max_evals,
                cv=cv
            )
        

    return   best_params 



#### 获取注册的模型列表  
def get_rg_models():
    model_infos = [] 
    # 获取所有注册模型的版本信息
    ml_versions = mlflow.search_model_versions() 
    
    for model_info in ml_versions:
        # 提取模型基本信息
        model_name = model_info.name
        run_id = model_info.run_id
        version = model_info.version
        
        # 获取该模型版本对应的运行记录
        try:
            run = mlflow.get_run(run_id)
            run_name = run.info.run_name
            
            # 从运行记录的指标中提取val_loss
            # 注意：指标键可能是'val_loss'或其他（取决于训练时的命名）
            val_loss = run.data.metrics.get('val_loss')  # 核心：从run中获取指标
            # 处理可能的None（如果没有记录val_loss）
            val_loss = round(val_loss, 4) if val_loss is not None else "未记录"
            
        except Exception as e:
            run_name = "未知"
            val_loss = f"获取失败: {str(e)}"
        
        # 添加到结果列表
        model_infos.append([
            model_name, 
            run_name, 
            version, 
            run_id,
            val_loss
        ])
    
    # 转换为DataFrame
    model_df = pd.DataFrame(
        data=model_infos,
        columns=['模型名称', 'run_name', 'version_id', 'run_id', 'val_loss']
    )  
    return model_df
