from sklearn.metrics import roc_auc_score, roc_curve, precision_score, recall_score, f1_score
import numpy as np
from DataProcess import process_final_data, union_with_sample, split_and_merge_data, evaluate_score_var_rst
from sklearn.model_selection import train_test_split
from DataProcess import calculate_bad_ratio
import lightgbm as lgb
from sklearn.model_selection import GridSearchCV
from lightgbm import LGBMClassifier
from sklearn.metrics import make_scorer
from sklearn.feature_selection import VarianceThreshold
from sklearn.preprocessing import RobustScaler

def ks_error(preds, dtrain):
    """
    自定义KS评估函数
    Args:
        preds: 模型预测的概率值
        dtrain: 训练数据集对象
    Returns:
        metric_name: 评估指标名称
        ks_value: KS值
        is_higher_better: 是否越高越好
    """
    label = dtrain.get_label()
    fpr, tpr, _ = roc_curve(label, preds)
    ks = max(abs(tpr - fpr))
    return 'ks-value', ks, True

def train_and_evaluate_lgb_model(X_train, y_train, X_val, y_val, X_time_test, y_time_test):
    """
    训练LightGBM模型并评估性能
    """
    # 1. 创建LightGBM数据集
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_val = lgb.Dataset(X_val, y_val, reference=lgb_train)

    # 2. 设置模型参数
    params = {
        'task': 'train',
        'boosting_type': 'gbdt',
        'objective': 'binary',  # 改为二分类任务
        'metric': ['auc', 'binary_logloss'],
        'num_leaves': 31,
        'learning_rate': 0.05,
        'feature_fraction': 0.9,
        'bagging_fraction': 0.8,
        'bagging_freq': 5,
        'verbose': 100,
        'is_unbalance': True,  # 处理不平衡数据
        'early_stopping_round': 50
    }
    
    # 3. 训练模型
    lgb_model = lgb.train(
        params,
        lgb_train,
        num_boost_round=1000,
        valid_sets=[lgb_train, lgb_val],
        valid_names=['train', 'valid'],
        feval=ks_error
    )
    
    # 打印特征重要性
    print("\n特征重要性：")
    importance = lgb_model.feature_importance()
    feature_names = lgb_model.feature_name()
    importance_dict = dict(zip(feature_names, importance))
    sorted_importance = sorted(importance_dict.items(), key=lambda x: x[1], reverse=True)
    for feat, imp in sorted_importance[:10]:  # 打印前10个重要特征
        print(f"{feat}: {imp}")
    
    # 4. 获取预测概率
    train_pred_proba = lgb_model.predict(X_train)
    val_pred_proba = lgb_model.predict(X_val)
    test_pred_proba = lgb_model.predict(X_time_test)
    
    # 打印预测概率分布
    print("\n预测概率分布：")
    print(f"训练集概率范围: [{np.min(train_pred_proba):.4f}, {np.max(train_pred_proba):.4f}]")
    print(f"验证集概率范围: [{np.min(val_pred_proba):.4f}, {np.max(val_pred_proba):.4f}]")
    print(f"测试集概率范围: [{np.min(test_pred_proba):.4f}, {np.max(test_pred_proba):.4f}]")
    
    # 5. 在验证集上找到最优阈值
    thresholds = np.arange(0.1, 0.9, 0.05)
    best_ks = 0
    best_threshold = 0.5
    
    for threshold in thresholds:
        val_pred = (val_pred_proba >= threshold).astype(int)
        fpr, tpr, _ = roc_curve(y_val, val_pred)
        ks = max(abs(tpr - fpr))
        if ks > best_ks:
            best_ks = ks
            best_threshold = threshold
    
    print(f"\n最优阈值: {best_threshold:.4f}")
    
    # 使用最优阈值进行预测
    train_pred = (train_pred_proba >= best_threshold).astype(int)
    val_pred = (val_pred_proba >= best_threshold).astype(int)
    test_pred = (test_pred_proba >= best_threshold).astype(int)
    
    # 6. 计算各项评估指标
    def calculate_metrics(y_true, y_pred, y_pred_proba):
        auc = roc_auc_score(y_true, y_pred_proba)
        fpr, tpr, _ = roc_curve(y_true, y_pred_proba)
        ks = max(abs(tpr - fpr))
        precision = precision_score(y_true, y_pred)
        recall = recall_score(y_true, y_pred)
        f1 = f1_score(y_true, y_pred)
        return auc, ks, precision, recall, f1
    
    # 计算训练集指标
    train_metrics = calculate_metrics(y_train, train_pred, train_pred_proba)
    # 计算验证集指标
    val_metrics = calculate_metrics(y_val, val_pred, val_pred_proba)
    # 计算测试集指标
    test_metrics = calculate_metrics(y_time_test, test_pred, test_pred_proba)
    
    # 7. 打印评估结果
    def print_metrics(dataset_name, metrics):
        auc, ks, precision, recall, f1 = metrics
        print(f"\n{dataset_name} 评估指标:")
        print(f"AUC: {auc:.4f}")
        print(f"KS: {ks:.4f}")
        print(f"查准率 (Precision): {precision:.4f}")
        print(f"查全率 (Recall): {recall:.4f}")
        print(f"F1 Score: {f1:.4f}")
    
    print("\n=== LightGBM模型评估结果 ===")
    print_metrics("训练集", train_metrics)
    print_metrics("验证集", val_metrics)
    print_metrics("测试集", test_metrics)
    
    return lgb_model, train_metrics, val_metrics, test_metrics

def ks_score(y_true, y_pred):
    """
    自定义KS评分函数，用于GridSearchCV
    """
    fpr, tpr, _ = roc_curve(y_true, y_pred)
    return max(abs(tpr - fpr))

def grid_search_lgb(X_train, y_train, X_val, y_val):
    """
    使用网格搜索寻找最优参数
    """
    # 定义参数网格
    param_grid = {
        'num_leaves': [15, 31, 50, 100],
        'max_depth': [3, 5, 7, 9],
        'learning_rate': [0.01, 0.05, 0.1],
        'n_estimators': [100, 200, 500, 1000],
        'min_child_samples': [20, 50, 100],
        'subsample': [0.6, 0.8, 1.0],
        'colsample_bytree': [0.6, 0.8, 1.0],
        'reg_alpha': [0, 1, 10],
        'reg_lambda': [0, 1, 10]
    }
    
    # 创建LightGBM分类器
    base_model = LGBMClassifier(
        objective='binary',
        is_unbalance=True,
        metric='None',  # 使用自定义评估指标
        verbose=-1,
        random_state=42
    )
    
    # 创建KS评分器
    ks_scorer = make_scorer(ks_score, needs_proba=True)
    
    # 创建网格搜索对象
    grid_search = GridSearchCV(
        estimator=base_model,
        param_grid=param_grid,
        scoring=ks_scorer,
        cv=5,
        n_jobs=-1,
        verbose=2
    )
    
    # 执行网格搜索
    print("开始网格搜索...")
    grid_search.fit(X_train, y_train)
    
    # 打印最优参数和得分
    print("\n=== 网格搜索结果 ===")
    print(f"最优参数: {grid_search.best_params_}")
    print(f"最优KS得分: {grid_search.best_score_:.4f}")
    
    # 使用最优参数在验证集上评估
    best_model = grid_search.best_estimator_
    val_pred_proba = best_model.predict_proba(X_val)[:, 1]
    fpr, tpr, _ = roc_curve(y_val, val_pred_proba)
    val_ks = max(abs(tpr - fpr))
    print(f"\n验证集KS得分: {val_ks:.4f}")
    
    return grid_search.best_params_, best_model

# 在主程序中调用
if __name__ == "__main__":
    # 保持原有的数据准备代码不变
    final_df = process_final_data()
    merged_df = union_with_sample(final_df)
    train_df, val_df = split_and_merge_data(final_df, merged_df)
    
    # 评估建信金科评分在验证集上的表现
    intersection_df, ks, auc = evaluate_score_var_rst(val_df)

    # 检查是否还有缺失值
    if train_df.isnull().any().any():
        print("警告：数据中仍存在缺失值，将使用-999999填充")
        train_df = train_df.fillna(-999999)

    if val_df.isnull().any().any():
        print("警告：数据中仍存在缺失值，将使用-999999填充")
        val_df = val_df.fillna(-999999)

    # 删除不需要的列
    cols_to_drop = ['app_num', 'APPLY_NO', 'APP_DT', 'modPro', 'APPLY_STS',
                    'host_cust_id', 'ACCT_NUM', 'CREDIT_TYPE_TWO',
                    'GUAR_MODE_CD', 'DISTR_DT', 'ODUE_AMT', 'LATEST_OWE_DAYS',
                    'LONGEST_OVDUE_DAYS', 'ACCUM_OVDUE_CNT', 'jx_score', 'score_new',
                    'SCORE_ANTI_FRAUD', 'Unnamed: 13']
    train_df = train_df.drop(columns=cols_to_drop, errors='ignore')
    val_df = val_df.drop(columns=cols_to_drop, errors='ignore')
    
    # 从训练集中划分训练集和验证集
    X_train = train_df.drop('flag_all', axis=1)
    y_train = train_df['flag_all']
    X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42)
    
    # 准备测试集(原验证集)
    X_time_test = val_df.drop('flag_all', axis=1)
    y_time_test = val_df['flag_all']
    
    # 打印各数据集大小和坏样本比例
    print("\n=== 数据集统计信息 ===")
    print(f"训练集大小: {len(X_train)}, 坏样本数: {int(y_train.sum())}, 坏样本比例: {calculate_bad_ratio(y_train):.2f}%")
    print(f"验证集大小: {len(X_val)}, 坏样本数: {int(y_val.sum())}, 坏样本比例: {calculate_bad_ratio(y_val):.2f}%")
    print(f"测试集大小: {len(X_time_test)}, 坏样本数: {int(y_time_test.sum())}, 坏样本比例: {calculate_bad_ratio(y_time_test):.2f}%")
    print("=====================\n")
    
    # 确保数据类型为float32
    X_train = X_train.astype('float32')
    X_val = X_val.astype('float32')
    X_time_test = X_time_test.astype('float32')
    y_train = y_train.astype('float32')
    y_val = y_val.astype('float32')
    y_time_test = y_time_test.astype('float32')
    
    # 特征选择：使用更温和的方差阈值
    selector = VarianceThreshold(threshold=0.001)
    
    # 保存原始特征名称
    feature_names = X_train.columns.tolist()
    
    # 转换数据
    X_train_selected = selector.fit_transform(X_train)
    X_val_selected = selector.transform(X_val)
    X_time_test_selected = selector.transform(X_time_test)
    
    # 获取被选择的特征的索引
    selected_features_mask = selector.get_support()
    selected_features = [feature_names[i] for i in range(len(feature_names)) if selected_features_mask[i]]
    
    # 打印特征选择信息
    print(f"\n特征选择后的特征数量: {X_train_selected.shape[1]}")
    print(f"\n特征选择后的特征: {selected_features}")
    
    # 使用更稳健的标准化方法
    scaler = RobustScaler()
    X_train = scaler.fit_transform(X_train_selected)
    X_val = scaler.transform(X_val_selected)
    X_time_test = scaler.transform(X_time_test_selected)
    
    # 执行网格搜索
    best_params, best_model = grid_search_lgb(X_train, y_train, X_val, y_val)
    
    # 保存最优模型
    print("\n保存最优模型...")
    best_model.booster_.save_model('best_lightgbm_model.txt')
    
    # 保存最优参数
    print("最优参数已保存到文件...")
    with open('best_lightgbm_params.txt', 'w') as f:
        for param, value in best_params.items():
            f.write(f"{param}: {value}\n")