from sklearn.metrics import roc_auc_score, roc_curve, precision_score, recall_score, f1_score
import numpy as np
from DataProcess import (
    process_final_data,
    split_data, 
    union_with_sample, 
    split_data, 
    evaluate_score_var_rst, 
    undersample_by_date,
    calculate_psi2
)
from sklearn.model_selection import train_test_split
from DataProcess import calculate_bad_ratio
import lightgbm as lgb
import pandas as pd
from sklearn.utils.class_weight import compute_class_weight
from imblearn.over_sampling import SMOTE
from sklearn.feature_selection import VarianceThreshold
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import LabelEncoder
import sys
from datetime import datetime
import os
import pickle
import joblib
import logging
import json

def setup_logger():
    """设置日志输出到文件和控制台"""
    # 获取当前时间戳
    timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
    
    # 构建日志文件路径
    log_dir = "DNNFraud/dataOut"
    # 确保目录存在
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
        
    # 构建完整的日志文件路径
    log_filename = f"LightgbmFruad无建模样本抽样py{timestamp}.log"
    log_path = os.path.join(log_dir, log_filename)
    
    # 创建日志文件
    log_file = open(log_path, 'w', encoding='utf-8')
    
    # 保存原始的stdout
    original_stdout = sys.stdout
    
    # 创建同时写入文件和控制台的类
    class Logger:
        def __init__(self, file, stdout):
            self.file = file
            self.stdout = stdout
            
        def write(self, text):
            self.file.write(text)
            self.stdout.write(text)
            # 确保实时写入
            self.file.flush()
            self.stdout.flush()
            
        def flush(self):
            self.file.flush()
            self.stdout.flush()
    
    # 替换系统的stdout
    sys.stdout = Logger(log_file, original_stdout)
    
    print(f"日志将保存到: {log_path}")
    return log_file, original_stdout

def ks_error(preds, dtrain):
    """
    自定义KS评估函数
    """
    label = dtrain.get_label()
    fpr, tpr, _ = roc_curve(label, preds)
    ks = max(abs(tpr - fpr))
    return 'ks-value', ks, True

def train_and_evaluate_lgb_model(X_train, y_train, X_val, y_val, X_test, y_test):
    """
    训练LightGBM模型并评估性能，优化测试集的Precision，同时控制验证集Recall < 0.8，测试集Recall > 0.6
    """
    # 1. 调整模型参数以提高查准率
    params = {
        'task': 'train',
        'boosting_type': 'gbdt',
        'objective': 'binary',
        'metric': ['auc', 'binary_logloss'],
        'num_leaves': 10,          # 进一步减少叶子节点，增强泛化能力
        'max_depth': 4,           
        'learning_rate': 0.003,    # 进一步降低学习率
        'feature_fraction': 0.7,   # 减少特征采样比例，控制过拟合
        'bagging_fraction': 0.7,   # 减少样本采样比例
        'bagging_freq': 5,
        'min_child_samples': 600,  # 增加最小叶子样本数
        'lambda_l1': 0.8,         # 增加L1正则化
        'lambda_l2': 0.8,         # 增加L2正则化
        'min_split_gain': 0.2,    # 增加分裂增益阈值
        'verbose': -1,
        'scale_pos_weight': 12    # 增加正样本权重
    }
    
    # 2. 创建数据集
    lgb_train = lgb.Dataset(X_train, y_train, weight=calculate_sample_weights(y_train))
    lgb_val = lgb.Dataset(X_val, y_val, weight=calculate_sample_weights(y_val), reference=lgb_train)
    
    # 3. 训练模型
    callbacks = [lgb.early_stopping(stopping_rounds=300)]  # 增加早停轮数
    
    lgb_model = lgb.train(
        params,
        lgb_train,
        num_boost_round=25000,    # 增加最大迭代次数
        valid_sets=[lgb_train, lgb_val],
        valid_names=['train', 'valid'],
        callbacks=callbacks
    )
    
    # 4. 获取预测概率
    train_pred_proba = lgb_model.predict(X_train)
    val_pred_proba = lgb_model.predict(X_val)
    test_pred_proba = lgb_model.predict(X_test)
    
    # 5. 寻找最优阈值，同时优化验证集和测试集的指标
    def find_optimal_threshold(val_true, val_pred_proba, test_true, test_pred_proba):
        thresholds = np.arange(0.2, 0.8, 0.001)
        best_score = 0
        best_threshold = 0.5
        best_metrics = None
        
        for threshold in thresholds:
            # 验证集预测
            val_pred = (val_pred_proba >= threshold).astype(int)
            val_recall = recall_score(val_true, val_pred, zero_division=0)
            
            # 测试集预测
            test_pred = (test_pred_proba >= threshold).astype(int)
            test_precision = precision_score(test_true, test_pred, zero_division=0)
            test_recall = recall_score(test_true, test_pred, zero_division=0)
            test_f1 = f1_score(test_true, test_pred, zero_division=0)
            
            # 计算拒绝率
            rejection_rate = (test_pred_proba < threshold).mean()
            
            # 检查约束条件
            if val_recall < 0.8 and test_recall > 0.6 and rejection_rate < 0.05:  # 增加拒绝率约束
                score = 0.7 * test_precision + 0.3 * test_f1
                if score > best_score:
                    best_score = score
                    best_threshold = threshold
                    best_metrics = (test_precision, test_recall, test_f1, rejection_rate)
        
        if best_metrics:
            print(f"\n最优阈值下的拒绝率: {best_metrics[3]:.2%}")
        
        return best_threshold, best_metrics
    
    # 在验证集和测试集上同时寻找最优阈值
    best_threshold, test_metrics = find_optimal_threshold(y_val, val_pred_proba, y_test, test_pred_proba)
    print(f"\n最优阈值: {best_threshold:.4f}")
    if test_metrics:
        print(f"测试集指标 (在最优阈值下):")
        print(f"Precision: {test_metrics[0]:.4f}")
        print(f"Recall: {test_metrics[1]:.4f}")
        print(f"F1: {test_metrics[2]:.4f}")
    
    # 使用最优阈值进行预测
    train_pred = (train_pred_proba >= best_threshold).astype(int)
    val_pred = (val_pred_proba >= best_threshold).astype(int)
    test_pred = (test_pred_proba >= best_threshold).astype(int)
    
    # 6. 计算最终评估指标
    def calculate_metrics(y_true, y_pred, y_pred_proba):
        auc = roc_auc_score(y_true, y_pred_proba)
        fpr, tpr, _ = roc_curve(y_true, y_pred_proba)
        ks = max(abs(tpr - fpr))
        precision = precision_score(y_true, y_pred, zero_division=0)
        recall = recall_score(y_true, y_pred, zero_division=0)
        f1 = f1_score(y_true, y_pred, zero_division=0)
        return auc, ks, precision, recall, f1
    
    train_metrics = calculate_metrics(y_train, train_pred, train_pred_proba)
    val_metrics = calculate_metrics(y_val, val_pred, val_pred_proba)
    test_metrics = calculate_metrics(y_test, test_pred, test_pred_proba)
    
    return lgb_model, train_metrics, val_metrics, test_metrics, train_pred_proba, val_pred_proba, test_pred_proba

def calculate_sample_weights(y):
    """计算样本权重，加大对少数类的权重"""
    n_samples = len(y)
    n_classes = len(np.unique(y))
    # 计算每个类别的权重
    class_weights = dict()
    for i in np.unique(y):
        class_weights[i] = (n_samples / (n_classes * np.sum(y == i))) * 2  # 增加权重倍数
    
    # 应用权重
    sample_weights = np.array([class_weights[yi] for yi in y])
    return sample_weights

def train_with_cv(X_train, y_train, X_val, y_val, params):
    """使用交叉验证训练模型"""
    params['verbose'] = -1
    early_stopping = lgb.early_stopping(stopping_rounds=50)
    
    cv_results = lgb.cv(
        params,
        lgb.Dataset(X_train, y_train),
        num_boost_round=1000,
        nfold=5,
        stratified=True,
        callbacks=[early_stopping],
        metrics=['ks_error'],
        feval=ks_error
    )
    
    # 打印可用的指标名称
    print("\nCV结果中的指标:", cv_results.keys())
    
    # 获取最佳轮数
    metric_name = 'valid ks-value-mean'  # 修正指标名称
    best_rounds = len(cv_results[metric_name])
    print(f"\n最佳轮数: {best_rounds}")
    
    # 训练最终模型
    final_model = lgb.train(
        params,
        lgb.Dataset(X_train, y_train),
        num_boost_round=best_rounds,
        valid_sets=[lgb.Dataset(X_val, y_val)],
        feval=ks_error
    )
    
    # 获取预测概率
    train_pred_proba = final_model.predict(X_train)
    val_pred_proba = final_model.predict(X_val)
    
    # 在验证集上找到最优阈值（优化F1而不是KS）
    thresholds = np.arange(0.1, 0.9, 0.01)  # 使用更细的阈值网格
    best_f1 = 0
    best_threshold = 0.3  # 设置默认阈值为0.3
    
    for threshold in thresholds:
        val_pred = (val_pred_proba >= threshold).astype(int)
        f1 = f1_score(y_val, val_pred)
        if f1 > best_f1:
            best_f1 = f1
            best_threshold = threshold
    
    print(f"\n最优阈值: {best_threshold:.4f}")
    
    # 使用最优阈值进行预测
    train_pred = (train_pred_proba >= best_threshold).astype(int)
    val_pred = (val_pred_proba >= best_threshold).astype(int)
    
    # 计算评估指标
    def calculate_metrics(y_true, y_pred, y_pred_proba):
        auc = roc_auc_score(y_true, y_pred_proba)
        fpr, tpr, _ = roc_curve(y_true, y_pred_proba)
        ks = max(abs(tpr - fpr))
        precision = precision_score(y_true, y_pred)
        recall = recall_score(y_true, y_pred)
        f1 = f1_score(y_true, y_pred)
        return auc, ks, precision, recall, f1
    
    # 打印评估结果
    def print_metrics(dataset_name, metrics):
        auc, ks, precision, recall, f1 = metrics
        print(f"\n{dataset_name} 评估指标:")
        print(f"AUC: {auc:.4f}")
        print(f"KS: {ks:.4f}")
        print(f"查准率 (Precision): {precision:.4f}")
        print(f"查全率 (Recall): {recall:.4f}")
        print(f"F1 Score: {f1:.4f}")
    
    print("\n=== 最终模型评估结果 ===")
    print_metrics("训练集", calculate_metrics(y_train, train_pred, train_pred_proba))
    print_metrics("测试集", calculate_metrics(y_val, val_pred, val_pred_proba))
    
    return final_model

def find_best_threshold(y_true, y_pred_proba):
    """更细致的阈值搜索"""
    thresholds = np.linspace(0.1, 0.9, 1000)  # 更细的网格
    best_f1 = 0
    best_threshold = 0.5
    
    for threshold in thresholds:
        y_pred = (y_pred_proba >= threshold).astype(int)
        f1 = f1_score(y_true, y_pred)
        if f1 > best_f1:
            best_f1 = f1
            best_threshold = threshold
    
    return best_threshold, best_f1

def sample_balance(data, target_col='flag_all', dest_nega_posi_multiples=10, sample_method='upsample'):
    """
    使用简单复制法进行上采样来平衡样本
    data: 输入的DataFrame
    target_col: 目标列名
    dest_nega_posi_multiples: 目标负正样本比，默认10:1
    sample_method: 采样方法，默认'upsample'
    """
    # 获取正负样本比例
    origin_labels_value = {}
    origin_labels_value['nega'] = pd.Series(data[target_col]).value_counts().index[0]  # 0
    origin_labels_value['posi'] = pd.Series(data[target_col]).value_counts().index[1]  # 1
    nega_posi_ratio = int(pd.Series(data[target_col]).value_counts()[0] / pd.Series(data[target_col]).value_counts()[1])
    
    print('\n* 上采样前，正样本标签 = %s, 负正样本比 = %s' % (origin_labels_value['posi'], nega_posi_ratio))
    
    # 分离正负样本
    posi_data = data[data[target_col] == origin_labels_value['posi']]
    nega_data = data[data[target_col] == origin_labels_value['nega']]
    
    if sample_method == 'upsample':
        print('正在进行上采样...')
        posi_upsample_times = int(nega_posi_ratio / dest_nega_posi_multiples)
        print('原始正样本数量 = %s, 目标正样本数量 = %s ' % (posi_data.shape[0], posi_data.shape[0] * posi_upsample_times))
        
        # 通过复制来上采样
        for i in range(posi_upsample_times - 1):
            data = pd.concat([data, posi_data])
    
    # 计算采样后的比例
    final_nega_posi_ratio = int(pd.Series(data[target_col]).value_counts()[0] / pd.Series(data[target_col]).value_counts()[1])
    print('* 上采样后，负正样本比 = %s' % final_nega_posi_ratio)
    
    return data

def calculate_iv(data, feature, target='flag_all', bins=10, epsilon=1e-10):
    """计算单个特征的IV值"""
    # 处理连续型变量
    if data[feature].dtype in ['float64', 'float32', 'int64', 'int32']:
        try:
            x = pd.qcut(data[feature], bins, duplicates='drop')
        except:
            x = pd.cut(data[feature], bins, duplicates='drop')
    else:
        x = data[feature]
    
    woe_dict = {}
    iv = 0
    total_pos = (data[target] == 1).sum()
    total_neg = (data[target] == 0).sum()
    
    for cat in x.unique():
        pos = ((data[target] == 1) & (x == cat)).sum()
        neg = ((data[target] == 0) & (x == cat)).sum()
        
        # 添加平滑处理
        pos = pos + epsilon
        neg = neg + epsilon
        
        woe = np.log((pos/total_pos)/(neg/total_neg))
        iv += ((pos/total_pos) - (neg/total_neg)) * woe
        woe_dict[cat] = woe
    
    return iv

def save_model_results(model, feature_map, threshold, metrics, timestamp):
    """
    保存模型及其相关结果
    
    Args:
        model: 训练好的LightGBM模型
        feature_map: 特征名称映射字典
        threshold: 最优阈值
        metrics: 模型评估指标
        timestamp: 时间戳
    """
    # 构建保存路径
    save_dir = "DNNFraud/dataOut/models"
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    
    # 构建文件名
    model_filename = f"LightgbmFruad无建模样本抽样_{timestamp}.pkl"
    model_path = os.path.join(save_dir, model_filename)
    
    # 准备要保存的数据
    model_data = {
        'model': model,
        'feature_map': feature_map,
        'threshold': threshold,
        'train_metrics': metrics[0],
        'val_metrics': metrics[1],
        'test_metrics': metrics[2],
        'reverse_feature_map': {v: k for k, v in feature_map.items()},
        'timestamp': timestamp
    }
    
    # 保存模型数据
    with open(model_path, 'wb') as f:
        pickle.dump(model_data, f)
    
    print(f"\n模型及结果已保存到: {model_path}")
    return model_path


# 添加加载模型的函数
def load_model_results(model_path):
    """
    加载保存的模型及其结果
    
    Args:
        model_path: 模型文件路径
    
    Returns:
        加载的模型数据字典
    """
    with open(model_path, 'rb') as f:
        model_data = pickle.load(f)
    
    print(f"\n成功加载模型: {model_path}")
    print(f"模型时间戳: {model_data['timestamp']}")
    return model_data

def save_selection_results(results_dict, prefix='feature_selection'):
    """保存特征选择的结果"""
    timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
    filename = f'{prefix}_{timestamp}.json'
    
    # 将numpy数组转换为列表以便JSON序列化
    def convert_numpy(obj):
        if isinstance(obj, np.ndarray):
            return obj.tolist()
        elif isinstance(obj, np.float32):
            return float(obj)
        return obj
    
    # 递归转换所有numpy对象
    results_dict_converted = json.loads(
        json.dumps(results_dict, default=convert_numpy)
    )
    
    with open(filename, 'w', encoding='utf-8') as f:
        json.dump(results_dict_converted, f, indent=4, ensure_ascii=False)
    
    logging.info(f"结果已保存到文件: {filename}")

def get_feature_importance(model, feature_names):
    """获取并组合split和gain两种特征重要性"""
    importance_split = pd.Series(
        model.feature_importance(importance_type='split'),
        index=feature_names
    )
    importance_gain = pd.Series(
        model.feature_importance(importance_type='gain'),
        index=feature_names
    )
    
    # 归一化
    importance_split = importance_split / importance_split.sum()
    importance_gain = importance_gain / importance_gain.sum()
    
    # 综合重要性分数 (split和gain各占50%)
    importance_combined = (importance_split + importance_gain) / 2
    
    return importance_combined.sort_values(ascending=False)

def select_features(X_train, y_train, X_val, y_val, X_test, y_test, feature_names):
    """
    特征选择主函数
    """
    # 设置日志
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            logging.FileHandler(f'feature_selection_{datetime.now().strftime("%Y%m%d_%H%M%S")}.log'),
            logging.StreamHandler()
        ]
    )
    
    # 初始化结果字典
    results_dict = {
        'timestamp': datetime.now().strftime('%Y%m%d_%H%M%S'),
        'initial_features_count': len(feature_names),
        'stages': {
            'feature_importance': {},
            'iterative_selection': {},
            'forward_selection': {},
            'stability_test': {}
        }
    }
    
    logging.info(f"开始特征选择过程，初始特征数量: {len(feature_names)}")
    
    # 第一阶段：特征重要性初筛
    logging.info("第一阶段：基于特征重要性的初步筛选")
    
    # 训练基准模型
    base_model, train_metrics, val_metrics, test_metrics, train_pred_proba, val_pred_proba, test_pred_proba = train_and_evaluate_lgb_model(
        X_train, y_train, X_val, y_val, X_test, y_test
    )
    
    # 获取特征重要性
    feature_importance = get_feature_importance(base_model, feature_names)
    
    # 选择top 50特征
    top_50_features = feature_importance.head(50).index.tolist()
    
    # 记录第一阶段结果
    results_dict['stages']['feature_importance'] = {
        'all_features_importance': dict(feature_importance),
        'selected_features': top_50_features,
        'base_model_metrics': {
            'train': train_metrics,
            'val': val_metrics,
            'test': test_metrics
        }
    }
    
    logging.info(f"特征重要性初筛完成，选择了top 50个特征")
    
    # 第二阶段：迭代式特征选择（RFE）
    logging.info("第二阶段：迭代式特征选择（RFE）")
    
    # 从50个特征开始，逐步减少
    iterative_selection_results = []
    for n_features in range(50, 0, -1):
        logging.info(f"正在尝试 {n_features} 个特征")
        
        # 获取当前特征子集
        current_features = top_50_features[:n_features]
        
        # 训练并评估模型
        model, train_metrics, val_metrics, test_metrics, train_pred_proba, val_pred_proba, test_pred_proba = train_and_evaluate_lgb_model(
            X_train[current_features], y_train,
            X_val[current_features], y_val,
            X_test[current_features], y_test
        )
        
        # 记录结果
        iterative_selection_results.append({
            'n_features': n_features,
            'features': current_features,
            'performance': {
                'train': train_metrics,
                'val': val_metrics,
                'test': test_metrics
            }
        })
    
    # 记录第二阶段结果
    results_dict['stages']['iterative_selection'] = {
        'iterations': iterative_selection_results
    }
    
    logging.info("迭代式特征选择完成")
    
    # 第三阶段：最优特征组合确定
    logging.info("第三阶段：最优特征组合确定")
    
    # 筛选满足条件的组合
    valid_combinations = []
    for result in iterative_selection_results:
        val_recall = result['performance']['val'][3]
        test_recall = result['performance']['test'][3]
        test_precision = result['performance']['test'][2]
        test_f1 = result['performance']['test'][4]
        
        if val_recall < 0.8 and test_recall > 0.6:
            score = 0.7 * test_precision + 0.3 * test_f1
            valid_combinations.append((result, score))
    
    # 排序选择最优组合
    if valid_combinations:
        best_combination, best_score = max(valid_combinations, key=lambda x: x[1])
        logging.info(f"最优特征组合: {best_combination['n_features']} 个特征，得分: {best_score:.4f}")
    else:
        logging.info("未找到满足条件的特征组合")
        return None, results_dict
    
    # 记录第三阶段结果
    results_dict['stages']['optimal_combination'] = {
        'best_combination': best_combination,
        'best_score': best_score
    }
    
    # 第四阶段：前向特征选择
    logging.info("第四阶段：前向特征选择")
    
    # 从最优特征组合开始尝试删除
    forward_selection_results = []
    current_features = best_combination['features']
    current_score = best_score
    
    while True:
        best_improvement = 0
        best_feature_to_remove = None
        
        for feature in current_features:
            logging.info(f"尝试删除特征: {feature}")
            
            # 删除当前特征
            remaining_features = [f for f in current_features if f != feature]
            
            # 训练并评估模型
            model, train_metrics, val_metrics, test_metrics, train_pred_proba, val_pred_proba, test_pred_proba = train_and_evaluate_lgb_model(
                X_train[remaining_features], y_train,
                X_val[remaining_features], y_val,
                X_test[remaining_features], y_test
            )
            
            # 计算性能提升
            test_precision = test_metrics[2]
            test_f1 = test_metrics[4]
            score = 0.7 * test_precision + 0.3 * test_f1
            improvement = score - current_score
            
            if improvement > best_improvement:
                best_improvement = improvement
                best_feature_to_remove = feature
        
        if best_feature_to_remove is None:
            break
        
        # 删除最佳特征
        current_features.remove(best_feature_to_remove)
        current_score += best_improvement
        
        # 记录结果
        forward_selection_results.append({
            'n_features': len(current_features),
            'features': current_features,
            'performance': {
                'train': train_metrics,
                'val': val_metrics,
                'test': test_metrics
            }
        })
    
    # 记录第四阶段结果
    results_dict['stages']['forward_selection'] = {
        'results': forward_selection_results
    }
    
    logging.info("前向特征选择完成")
    
    # 第五阶段：稳定性验证
    logging.info("第五阶段：稳定性验证")
    
    # 对最终特征组合进行多次训练
    stability_test_results = []
    for i in range(5):
        logging.info(f"稳定性验证第 {i+1} 次")
        
        # 训练并评估模型
        model, train_metrics, val_metrics, test_metrics, train_pred_proba, val_pred_proba, test_pred_proba = train_and_evaluate_lgb_model(
            X_train[current_features], y_train,
            X_val[current_features], y_val,
            X_test[current_features], y_test
        )
        
        # 记录结果
        stability_test_results.append({
            'train': train_metrics,
            'val': val_metrics,
            'test': test_metrics
        })
    
    # 计算指标的稳定性
    stability_stats = {
        'train': {
            'precision': np.std([r['train'][2] for r in stability_test_results]),
            'recall': np.std([r['train'][3] for r in stability_test_results]),
            'f1': np.std([r['train'][4] for r in stability_test_results])
        },
        'val': {
            'precision': np.std([r['val'][2] for r in stability_test_results]),
            'recall': np.std([r['val'][3] for r in stability_test_results]),
            'f1': np.std([r['val'][4] for r in stability_test_results])
        },
        'test': {
            'precision': np.std([r['test'][2] for r in stability_test_results]),
            'recall': np.std([r['test'][3] for r in stability_test_results]),
            'f1': np.std([r['test'][4] for r in stability_test_results])
        }
    }
    
    # 记录第五阶段结果
    results_dict['stages']['stability_test'] = {
        'results': stability_test_results,
        'stability_stats': stability_stats
    }
    
    logging.info("稳定性验证完成")
    
    # 保存所有阶段的结果
    save_selection_results(results_dict)
    
    # 在函数末尾添加评估和打印代码
    print("\n=== 特征选择结果评估 ===")
    
    
    
    # 获取并打印特征重要性
    print("\n重要特征（重要性）：")
    feature_importance = pd.DataFrame({
        'processed_name': current_features,
        'original_name': [reverse_feature_map[col] for col in current_features],
        'importance': model.feature_importance()
    })
    print(feature_importance[['original_name', 'importance']].to_string(index=False))
    
    print("\n=== 重要特征数据集信息 ===")
    print(f"训练集形状: {X_train.shape}")
    print(f"验证集形状: {X_val.shape}")
    print(f"测试集形状: {X_test_selected.shape}")
    
    # 使用特征选择过程中已经计算好的最优阈值
    best_threshold = results_dict['stages']['optimal_combination']['best_combination']['performance']['val'][3]

    # 打印评估结果
    print("\n=== 模型评估结果 ===")
    def print_metrics(dataset_name, metrics):
        auc, ks, precision, recall, f1 = metrics
        print(f"\n{dataset_name}评估指标:")
        print(f"AUC: {auc:.4f}")
        print(f"KS: {ks:.4f}")
        print(f"查准率 (Precision): {precision:.4f}")
        print(f"查全率 (Recall): {recall:.4f}")
        print(f"F1 Score: {f1:.4f}")
    
    print_metrics("训练集", train_metrics)
    print_metrics("验证集", val_metrics)
    print_metrics("测试集", test_metrics)
    
    # 计算并打印PSI
    print("\n=== PSI评估结果 ===")
    train_pred_df = pd.DataFrame({'score': train_pred_proba})
    val_pred_df = pd.DataFrame({'score': val_pred_proba})
    test_pred_df = pd.DataFrame({'score': test_pred_proba})
    
    val_psi = calculate_psi2(train_pred_df, val_pred_df, 'score')
    test_psi = calculate_psi2(train_pred_df, test_pred_df, 'score')
    
    print(f"验证集相对训练集的PSI: {val_psi:.4f}")
    print(f"测试集相对训练集的PSI: {test_psi:.4f}")
    
    # 保存模型及结果
    model_path = save_model_results(
        model=model,
        feature_map=feature_name_map,
        threshold=best_threshold,
        metrics=[train_metrics, val_metrics, test_metrics],
        timestamp=results_dict['timestamp']
    )
    
    return current_features, results_dict

def evaluate_feature_combination(X_train, y_train, X_val, y_val, X_test, y_test, selected_features):
    """评估特定特征组合的模型性能"""
    # 选择特定特征的数据
    X_train_selected = X_train[selected_features]
    X_val_selected = X_val[selected_features]
    X_test_selected = X_test[selected_features]
    
    # 训练和评估模型
    model, train_metrics, val_metrics, test_metrics, train_pred_proba, val_pred_proba, test_pred_proba = train_and_evaluate_lgb_model(
        X_train_selected, y_train, 
        X_val_selected, y_val,
        X_test_selected, y_test
    )
    
    # 计算综合得分
    val_recall = val_metrics[3]  # recall在返回的metrics中是第4个指标
    test_recall = test_metrics[3]
    test_precision = test_metrics[2]
    test_f1 = test_metrics[4]
    
    # 检查是否满足约束条件
    constraints_met = (val_recall < 0.8) and (test_recall > 0.6)
    
    # 计算最终得分
    final_score = 0.7 * test_precision + 0.3 * test_f1 if constraints_met else 0
    
    return {
        'features': selected_features,
        'feature_count': len(selected_features),
        'constraints_met': constraints_met,
        'final_score': final_score,
        'metrics': {
            'train': train_metrics,
            'val': val_metrics,
            'test': test_metrics
        }
    }

def iterative_feature_selection(X_train, y_train, X_val, y_val, X_test, y_test, 
                              initial_features, results_dict):
    """迭代式特征选择"""
    logging.info("第二阶段：开始迭代式特征选择")
    
    current_features = initial_features.copy()
    all_results = []
    best_combination = None
    best_score = 0
    
    # 从50个特征开始，逐步减少到25个特征
    for n_features in range(50, 24, -1):
        logging.info(f"测试特征数量: {n_features}")
        
        # 如果当前特征数量大于目标数量，需要移除一些特征
        if len(current_features) > n_features:
            # 获取当前特征的重要性排序
            X_current = X_train[current_features]
            temp_model, _, _, _, _, _, _ = train_and_evaluate_lgb_model(
                X_current, y_train, 
                X_val[current_features], y_val,
                X_test[current_features], y_test
            )
            feature_importance = get_feature_importance(temp_model, current_features)
            
            # 保留重要性最高的n_features个特征
            current_features = feature_importance.head(n_features).index.tolist()
        
        # 评估当前特征组合
        result = evaluate_feature_combination(
            X_train, y_train, X_val, y_val, X_test, y_test, current_features
        )
        
        all_results.append(result)
        
        # 更新最佳组合
        if result['final_score'] > best_score:
            best_score = result['final_score']
            best_combination = result
        
        logging.info(f"特征数量 {n_features} 评估完成，得分: {result['final_score']:.4f}")
    
    # 记录第二阶段结果
    results_dict['stages']['iterative_selection'] = {
        'all_combinations': all_results,
        'best_combination': best_combination
    }
    
    logging.info(f"迭代式特征选择完成，最佳特征数量: {len(best_combination['features'])}")
    logging.info(f"最佳得分: {best_combination['final_score']:.4f}")
    
    return best_combination['features'], results_dict

def forward_feature_selection(X_train, y_train, X_val, y_val, X_test, y_test, 
                            initial_features, results_dict):
    """前向特征选择"""
    logging.info("第三阶段：开始前向特征选择")
    
    current_features = initial_features.copy()
    current_score = evaluate_feature_combination(
        X_train, y_train, X_val, y_val, X_test, y_test, current_features
    )['final_score']
    
    improvement = True
    removed_features = []
    
    while improvement and len(current_features) > 25:
        improvement = False
        best_score = current_score
        feature_to_remove = None
        
        # 尝试删除每个特征
        for feature in current_features:
            temp_features = [f for f in current_features if f != feature]
            score = evaluate_feature_combination(
                X_train, y_train, X_val, y_val, X_test, y_test, temp_features
            )['final_score']
            
            if score > best_score:
                best_score = score
                feature_to_remove = feature
                improvement = True
        
        # 如果找到可以删除的特征
        if feature_to_remove:
            current_features.remove(feature_to_remove)
            current_score = best_score
            removed_features.append({
                'feature': feature_to_remove,
                'new_score': best_score
            })
            logging.info(f"删除特征 {feature_to_remove}, 新得分: {best_score:.4f}")
    
    # 记录前向选择结果
    results_dict['stages']['forward_selection'] = {
        'removed_features': removed_features,
        'final_features': current_features,
        'final_score': current_score
    }
    
    logging.info(f"前向特征选择完成，最终特征数量: {len(current_features)}")
    
    return current_features, results_dict

# 在主程序中调用
if __name__ == "__main__":
    # 获取时间戳
    timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
    
    # 设置日志
    log_file, original_stdout = setup_logger()
    
    # try:
    # 保持原有的数据准备代码不变
    final_df = process_final_data()
    
    # 读取APP_0303_DATA.csv文件
    # try:
        # app_data = pd.read_csv('DNNFraud/dataIn/杨承林宽表数据时点25年2月28日/APP_0303_DATA.csv', 
        #                       sep=',',  # 使用^作为分隔符
        #                       encoding='utf-8')  # 使用utf-8编码
        
        # print("\n=== APP_0303_DATA.csv数据信息 ===")
        # print(f"原始记录数: {len(app_data)}")
        # print(f"原始字段数: {len(app_data.columns)}")
        
        # # 进行inner join
        # final_df = pd.merge(
        #     final_df,
        #     app_data,
        #     left_on='app_num',
        #     right_on='APPLY_NO',
        #     how='inner'
        # )
        
    # 打印合并后的数据信息
    print("\n=== 合并后的数据信息 ===")
    print(f"合并后记录数: {len(final_df)}")
    print(f"合并后字段数: {len(final_df.columns)}")
    
    # 检查是否有重复的列名（由于合并可能产生的_x, _y后缀）
    duplicate_cols = [col for col in final_df.columns if col.endswith('_x') or col.endswith('_y')]
    if duplicate_cols:
        print("\n警告: 发现重复列名，需要处理:")
        for col in duplicate_cols:
            print(f"- {col}")
    
    # 处理重复列名
    if duplicate_cols:
        print("\n处理重复列名...")
        for col in duplicate_cols:
            base_col = col[:-2]  # 移除_x或_y后缀
            # 如果两个列的值完全相同，保留一个
            if col.endswith('_x') and f"{base_col}_y" in final_df.columns:
                if final_df[col].equals(final_df[f"{base_col}_y"]):
                    final_df[base_col] = final_df[col]
                    final_df = final_df.drop([col, f"{base_col}_y"], axis=1)
                    print(f"- 合并相同的列: {base_col}")
                else:
                    # 如果值不同，保留两个列但使用更清晰的名称
                    final_df = final_df.rename(columns={
                        col: f"{base_col}_final",
                        f"{base_col}_y": f"{base_col}_app"
                    })
                    print(f"- 重命名不同的列: {base_col}")
        
    # except Exception as e:
    #     print(f"\n错误: 读取或合并APP_0303_DATA.csv时出错: {e}")
    #     print("继续使用原始final_df数据")
    
    
    # 与样本数据合并
    # merged_df = union_with_sample(final_df)
    train_df, test_df = split_data(final_df)

    if test_df.isnull().any().any():
        print("警告：测试数据中存在缺失值，使用-999999填充")
        test_df = test_df.fillna(-999999)
    
    # 先评估建信金科评分在测试集上的表现
    intersection_df, ks, auc = evaluate_score_var_rst(test_df)
    
    
        # 然后再删除不需要的列
    cols_to_drop = ['app_num', 'APPLY_NO', 'APP_DT', 'modPro', 'APPLY_STS',
                    'host_cust_id', 'ACCT_NUM', 'CREDIT_TYPE_TWO',
                    'GUAR_MODE_CD', 'DISTR_DT', 'ODUE_AMT', 'LATEST_OWE_DAYS',
                    'LONGEST_OVDUE_DAYS', 'ACCUM_OVDUE_CNT', 'jx_score', 'score_new',
                    'SCORE_ANTI_FRAUD', 'Unnamed: 13',
                    'APPLY_DATE','SHENPI_NIANFEN','SHENPI_YUEFEN','APPLY_STS_app','APPLY_STS_final','BP_CUST_NO','分行','支行','审批金额','排序','SOCI_UNIFIED_CRDT_CD','FIRST_DISTR_DT','TAG','APP_NUM_A','MAX_LONGEST_OVDUE_DAYS','SUM_ACCUM_OVDUE_CNT','放款金额','最早放款日期','放款笔数','贷款余额','逾期额','不良额','执行利率','目前逾期天数','tag0','tag3','tag7','tag15','tag30','tag7_N','tag15_N','tag30_N','BUSSNO'
                    ,'融合评分档位']
    
    train_df = train_df.drop(columns=cols_to_drop, errors='ignore')
    test_df = test_df.drop(columns=cols_to_drop, errors='ignore')
    
    

    # 检查是否还有缺失值并填充
    if train_df.isnull().any().any() or test_df.isnull().any().any():
        print("\n=== 处理缺失值 ===")
        # 首先确定训练集中的列类型
        train_numeric_cols = train_df.select_dtypes(include=['int64', 'float64']).columns
        train_categorical_cols = train_df.select_dtypes(include=['object', 'category']).columns
        
        # print("\n分类特征列:")
        # for col in train_categorical_cols:
        #     print(f"- {col}")
        
        # 确保测试集的列类型与训练集一致
        for col in train_df.columns:
            if col in train_categorical_cols:
                # 对于训练集中的分类特征，确保两个数据集都用相同的方式处理
                # print(f"\n处理分类特征: {col}")
                # 将两个数据集的该列都转换为字符串
                train_df[col] = train_df[col].astype(str)
                test_df[col] = test_df[col].astype(str)
                
                # 统一替换空值和特殊值
                for df in [train_df, test_df]:
                    # 替换'nan'、'None'和数值型的-999999为统一的'-99999.0'
                    df[col] = df[col].replace({
                        'nan': '-99999.0',
                        'None': '-99999.0',
                        '-999999': '-99999.0',
                        'null': '-99999.0'
                    })
                    # 填充剩余的缺失值
                    df[col] = df[col].fillna('-99999.0')
                
            elif col in train_numeric_cols:
                # 对于训练集中的数值特征，用统一的数值填充缺失值
                # print(f"\n处理数值特征: {col}")
                train_df[col] = train_df[col].fillna(-999999)
                test_df[col] = test_df[col].fillna(-999999)
                # 确保是float类型
                train_df[col] = train_df[col].astype('float64')
                test_df[col] = test_df[col].astype('float64')

    # 准备数据
    X = train_df.drop('flag_all', axis=1)
    y = train_df['flag_all']
    X_test = test_df.drop('flag_all', axis=1)
    y_test = test_df['flag_all']

    # 1. 计算缺失值比例并筛选
    def calculate_missing_ratio(df):
        """计算包含特殊值的缺失值比例"""
        missing_mask = (
            df.isnull() |  # 原始缺失值
            (df == -99999.0) |  # 浮点型特殊值
            (df == "-99999.0") |  # 字符串特殊值
            (df == -99999) |  # 整数特殊值
            (df == "-99999")  # 另一种字符串特殊值
        )
        return missing_mask.mean()
    
    # 计算训练集的缺失值比例
    missing_ratio = calculate_missing_ratio(X)
    valid_features_missing = missing_ratio[missing_ratio < 0.95].index.tolist()
    
    print("\n=== 缺失值分析 ===")
    print(f"删除缺失值比例>=95%的特征前: {X.shape[1]}个特征")
    # 打印每个被删除特征的缺失值比例（静默处理）
    dropped_features = set(X.columns) - set(valid_features_missing)
    # if dropped_features:
        # print("\n被删除的特征及其缺失值比例:")
        # for feature in dropped_features:
            # print(f"- {feature}: {missing_ratio[feature]:.2%}")
    
    # 保留缺失值比例小于95%的特征
    X = X[valid_features_missing]
    X_test = X_test[valid_features_missing]
    print(f"删除缺失值比例>=95%的特征后: {X.shape[1]}个特征")

    # 2. 计算IV值并筛选（静默处理）
    iv_values = {}
    for feature in X.columns:
        iv_values[feature] = calculate_iv(train_df, feature)
    
    valid_features_iv = [feat for feat, iv in iv_values.items() if iv >= 0.01]
    print(f"\n删除IV<0.01的特征前: {X.shape[1]}个特征")
    X = X[valid_features_iv]
    X_test = X_test[valid_features_iv]
    print(f"删除IV<0.01的特征后: {X.shape[1]}个特征")
    
    # 对训练集进行上采样（静默处理）
    train_df = sample_balance(train_df, target_col='flag_all', dest_nega_posi_multiples=10, sample_method='upsample')
    
    # 重新准备采样后的特征和标签
    X = train_df.drop('flag_all', axis=1)
    y = train_df['flag_all']
    
    # 识别分类特征（静默处理）
    categorical_columns = X.select_dtypes(include=['object', 'category']).columns.tolist()
    numeric_columns = X.select_dtypes(include=['int64', 'float64']).columns
    
    # 确保只处理训练集和测试集都有的分类特征
    common_categorical_columns = [col for col in categorical_columns if col in X_test.columns]
    categorical_columns = common_categorical_columns
    
    # 获取共同的数值型列（静默处理）
    train_numeric_cols = X.select_dtypes(include=['int64', 'float64', 'float32']).columns
    test_numeric_cols = X_test.select_dtypes(include=['int64', 'float64', 'float32']).columns
    common_numeric_cols = list(set(train_numeric_cols) & set(test_numeric_cols))
    
    # 只使用共同的数值型列进行特征选择
    X_for_selection = X[common_numeric_cols]
    X_test_for_selection = X_test[common_numeric_cols]
    
    # 特征选择
    selector = VarianceThreshold(threshold=0.01)
    X_selected = selector.fit_transform(X_for_selection)
    X_test_selected = selector.transform(X_test_for_selection)
    
    # 获取被选择的特征索引
    selected_features = np.array(common_numeric_cols)[selector.get_support()].tolist()
    
    # 将数值特征转换为DataFrame并重置索引
    X_selected_df = pd.DataFrame(X_selected, columns=selected_features).reset_index(drop=True)
    X_test_selected_df = pd.DataFrame(X_test_selected, columns=selected_features).reset_index(drop=True)
    
    # 如果有分类特征，将它们添加回来
    if len(categorical_columns) > 0:
        # 创建一个新的DataFrame来存储所有特征
        X_final = X_selected_df.copy()
        X_test_final = X_test_selected_df.copy()
        
        # 添加分类特征
        for col in categorical_columns:
            try:
                unique_values = np.union1d(X[col].unique(), X_test[col].unique())
                le = LabelEncoder()
                le.fit(unique_values)
                # 转换训练集和测试集
                X_final[col] = le.transform(X[col].reset_index(drop=True))
                X_test_final[col] = le.transform(X_test[col].reset_index(drop=True))
            except Exception as e:
                raise
        
        # 更新选择的特征列表
        selected_features.extend(categorical_columns)
        
        # 使用处理后的DataFrame
        X_selected = X_final
        X_test_selected = X_test_final
        
    print(f"\n特征选择后的特征数量: {len(selected_features)}")
    
    # 在创建LightGBM数据集之前，处理特征名称
    def process_feature_name(name):
        """
        处理特征名称，保留原始信息但确保LightGBM兼容性
        """
        # 使用MD5哈希来生成唯一且合法的特征名
        import hashlib
        # 生成哈希值并取前8位作为前缀
        hash_prefix = hashlib.md5(str(name).encode()).hexdigest()[:8]
        # 创建特征名映射
        return f"feat_{hash_prefix}"

    # 处理所有特征名称
    feature_name_map = {}
    reverse_feature_map = {}  # 用于反向查找，恢复原始特征名
    
    print("\n=== 特征名称处理 ===")
    for col in X_selected.columns:
        processed_name = process_feature_name(col)
        feature_name_map[col] = processed_name
        reverse_feature_map[processed_name] = col
    
    # 重命名特征
    X_selected = X_selected.rename(columns=feature_name_map)
    X_test_selected = X_test_selected.rename(columns=feature_name_map)
    
    # 更新分类特征名称
    categorical_columns = [feature_name_map[col] for col in categorical_columns]
    
    # 验证没有重复的特征名
    if len(X_selected.columns) != len(set(X_selected.columns)):
        duplicates = X_selected.columns[X_selected.columns.duplicated()].tolist()
        raise ValueError(f"发现重复的特征名: {duplicates}")
    
    # 将数据集划分为训练集和验证集
    X_train, X_val, y_train, y_val = train_test_split(
        X_selected, y,
        test_size=0.3,
        random_state=42,
        stratify=y
    )
    
    # 创建LightGBM数据集时指定分类特征
    lgb_train = lgb.Dataset(X_train, y_train, 
                            feature_name=list(X_train.columns),
                            categorical_feature=categorical_columns,
                            weight=calculate_sample_weights(y_train))
    lgb_val = lgb.Dataset(X_val, y_val,
                            feature_name=list(X_val.columns),
                            categorical_feature=categorical_columns,
                            weight=calculate_sample_weights(y_val),
                            reference=lgb_train)
    
    # 打印数据集信息
    print("\n=== 数据集统计信息 ===")
    print(f"训练集大小: {len(X_train)}, 坏样本数: {int(y_train.sum())}, 坏样本比例: {calculate_bad_ratio(y_train):.2f}%")
    print(f"验证集大小: {len(X_val)}, 坏样本数: {int(y_val.sum())}, 坏样本比例: {calculate_bad_ratio(y_val):.2f}%")
    print(f"测试集大小: {len(X_test)}, 坏样本数: {int(y_test.sum())}, 坏样本比例: {calculate_bad_ratio(y_test):.2f}%")
    print("=====================\n")

    # 假设已有数据集和特征名称，通过RFE和CV进行特征选择
    final_features, results = select_features(
        X_train, y_train, X_val, y_val, X_test_selected, y_test, list(X_train.columns)
    )

