from sklearn.metrics import roc_auc_score, roc_curve, precision_score, recall_score, f1_score
import numpy as np
from DataProcess import process_final_data, union_with_sample, split_and_merge_data, evaluate_score_var_rst
from sklearn.model_selection import train_test_split
from DataProcess import calculate_bad_ratio
import lightgbm as lgb
import pandas as pd
from sklearn.utils.class_weight import compute_class_weight
from imblearn.over_sampling import SMOTE

def f1_error(preds, dtrain):
    """
    自定义F1评估函数，替换原来的ks_error
    """
    labels = dtrain.get_label()
    # 使用0.5作为初始阈值
    preds_binary = (preds >= 0.5).astype(int)
    f1 = f1_score(labels, preds_binary)
    return 'f1-score', f1, True

def feature_engineering(train_df, val_df):
    """简化特征工程"""
    numeric_features = train_df.select_dtypes(include=['float64', 'int64']).columns
    numeric_features = [col for col in numeric_features if col != 'flag_all']
    
    # 只保留最基础的特征变换
    for col in numeric_features:
        # 分位数特征
        train_df[f'{col}_quantile'] = pd.qcut(train_df[col], q=10, labels=False, duplicates='drop')
        val_df[f'{col}_quantile'] = pd.qcut(val_df[col], q=10, labels=False, duplicates='drop')
    
    return train_df, val_df

def calculate_sample_weights(y):
    """计算样本权重，加大对少数类的权重"""
    n_samples = len(y)
    n_classes = len(np.unique(y))
    # 计算每个类别的权重
    class_weights = dict()
    for i in np.unique(y):
        class_weights[i] = (n_samples / (n_classes * np.sum(y == i))) * 2  # 增加权重倍数
    
    # 应用权重
    sample_weights = np.array([class_weights[yi] for yi in y])
    return sample_weights

def train_with_cv(X_train, y_train, X_time_test, y_time_test, params):
    """使用交叉验证训练模型"""
    params['verbose'] = -1
    early_stopping = lgb.early_stopping(stopping_rounds=50)
    
    # 1. 先进行交叉验证找到最佳轮数
    cv_results = lgb.cv(
        params,
        lgb.Dataset(X_train, y_train),
        num_boost_round=1000,
        nfold=5,
        stratified=True,
        callbacks=[early_stopping],
        metrics=['f1'],
        feval=f1_error
    )
    
    # 2. 获取最佳轮数
    metric_name = 'valid f1-score-mean'
    best_rounds = len(cv_results[metric_name])
    print(f"\n最佳轮数: {best_rounds}")
    
    # 3. 使用最佳轮数训练最终模型
    final_model = lgb.train(
        params,
        lgb.Dataset(X_train, y_train),
        num_boost_round=best_rounds,
        valid_sets=[lgb.Dataset(X_time_test, y_time_test)],
        feval=f1_error
    )
    
    # 4. 获取预测概率
    train_pred_proba = final_model.predict(X_train)
    test_pred_proba = final_model.predict(X_time_test)
    
    # 5. 在测试集上找到最优阈值
    thresholds = np.arange(0.1, 0.9, 0.01)
    best_f1 = 0
    best_threshold = 0.5
    
    for threshold in thresholds:
        test_pred = (test_pred_proba >= threshold).astype(int)
        f1 = f1_score(y_time_test, test_pred)
        if f1 > best_f1:
            best_f1 = f1
            best_threshold = threshold
    
    print(f"\n最优阈值 (基于F1): {best_threshold:.4f}")
    
    # 6. 使用最优阈值进行预测
    train_pred = (train_pred_proba >= best_threshold).astype(int)
    test_pred = (test_pred_proba >= best_threshold).astype(int)
    
    # 7. 计算并打印评估指标
    def calculate_metrics(y_true, y_pred, y_pred_proba):
        auc = roc_auc_score(y_true, y_pred_proba)
        fpr, tpr, _ = roc_curve(y_true, y_pred_proba)
        ks = max(abs(tpr - fpr))
        precision = precision_score(y_true, y_pred)
        recall = recall_score(y_true, y_pred)
        f1 = f1_score(y_true, y_pred)
        return auc, ks, precision, recall, f1
    
    def print_metrics(dataset_name, metrics):
        auc, ks, precision, recall, f1 = metrics
        print(f"\n{dataset_name}评估指标:")
        print(f"AUC: {auc:.4f}")
        print(f"KS: {ks:.4f}")
        print(f"查准率 (Precision): {precision:.4f}")
        print(f"查全率 (Recall): {recall:.4f}")
        print(f"F1 Score: {f1:.4f}")
    
    print("\n=== 最终模型评估结果 ===")
    print_metrics("训练集", calculate_metrics(y_train, train_pred, train_pred_proba))
    print_metrics("测试集", calculate_metrics(y_time_test, test_pred, test_pred_proba))
    
    # 8. 打印特征重要性
    print("\n特征重要性（前10个）：")
    importance = final_model.feature_importance()
    feature_names = final_model.feature_name()
    importance_dict = dict(zip(feature_names, importance))
    sorted_importance = sorted(importance_dict.items(), key=lambda x: x[1], reverse=True)
    for feat, imp in sorted_importance[:10]:
        print(f"{feat}: {imp}")
    
    # 9. 打印最优模型参数
    print("\n=== 最优模型参数 ===")
    for param_name, param_value in params.items():
        print(f"{param_name}: {param_value}")
    print(f"best_rounds: {best_rounds}")
    print(f"best_threshold: {best_threshold:.4f}")
    
    return final_model

# 在主程序之前定义模型参数
def get_lgb_params():
    """获取LightGBM模型参数"""
    return {
        'task': 'train',
        'boosting_type': 'gbdt',
        'objective': 'binary',
        'metric': ['auc', 'binary_logloss'],
        # 减小模型复杂度
        'num_leaves': 8,  # 进一步减少叶子节点
        'max_depth': 3,  # 减小树深度
        'learning_rate': 0.01,  # 降低学习率
        # 增强正则化
        'feature_fraction': 0.5,  # 减少特征使用比例
        'bagging_fraction': 0.5,  # 减少样本使用比例
        'bagging_freq': 5,  # 增加bagging频率
        'min_child_samples': 200,  # 增大最小样本数
        'min_child_weight': 0.1,  # 增大最小权重
        'lambda_l1': 2.0,  # 增大L1正则化
        'lambda_l2': 2.0,  # 增大L2正则化
        'verbose': -1,
        # 类别不平衡处理
        'scale_pos_weight': 3,  # 调整正样本权重
        # 添加额外的正则化参数
        'min_split_gain': 0.1,  # 最小分裂增益
        'min_data_in_bin': 100,  # 每个箱子的最小样本数
        'max_bin': 50  # 减少特征分箱数
    }

def find_best_threshold(y_true, y_pred_proba):
    """更细致的阈值搜索"""
    thresholds = np.linspace(0.1, 0.9, 1000)  # 更细的网格
    best_f1 = 0
    best_threshold = 0.5
    
    for threshold in thresholds:
        y_pred = (y_pred_proba >= threshold).astype(int)
        f1 = f1_score(y_true, y_pred)
        if f1 > best_f1:
            best_f1 = f1
            best_threshold = threshold
    
    return best_threshold, best_f1

# 在主程序中调用
if __name__ == "__main__":
    # 保持原有的数据准备代码不变
    final_df = process_final_data()
    # merged_df = union_with_sample(final_df)
    train_df, val_df = split_and_merge_data(final_df)
    
    

    # 检查是否还有缺失值
    if train_df.isnull().any().any():
        print("警告：数据中仍存在缺失值，将使用-999999填充")
        train_df = train_df.fillna(-999999)

    if val_df.isnull().any().any():
        print("警告：数据中仍存在缺失值，将使用-999999填充")
        val_df = val_df.fillna(-999999)
    
    # 评估建信金科评分在验证集上的表现
    intersection_df, ks, auc = evaluate_score_var_rst(val_df)

    # 删除不需要的列
    cols_to_drop = ['app_num', 'APPLY_NO', 'APP_DT', 'modPro', 'APPLY_STS',
                    'host_cust_id', 'ACCT_NUM', 'CREDIT_TYPE_TWO',
                    'GUAR_MODE_CD', 'DISTR_DT', 'ODUE_AMT', 'LATEST_OWE_DAYS',
                    'LONGEST_OVDUE_DAYS', 'ACCUM_OVDUE_CNT', 'jx_score', 'score_new',
                    'SCORE_ANTI_FRAUD', 'Unnamed: 13']
    train_df = train_df.drop(columns=cols_to_drop, errors='ignore')
    val_df = val_df.drop(columns=cols_to_drop, errors='ignore')
    
    # 特征工程
    train_df, val_df = feature_engineering(train_df, val_df)
    
    # 准备数据
    X_train = train_df.drop('flag_all', axis=1)
    y_train = train_df['flag_all']
    X_time_test = val_df.drop('flag_all', axis=1)
    y_time_test = val_df['flag_all']
    
    # 打印各数据集大小和坏样本比例
    print("\n=== 数据集统计信息 ===")
    print(f"训练集大小: {len(X_train)}, 坏样本数: {int(y_train.sum())}, 坏样本比例: {calculate_bad_ratio(y_train):.2f}%")
    print(f"验证集大小: {len(X_time_test)}, 坏样本数: {int(y_time_test.sum())}, 坏样本比例: {calculate_bad_ratio(y_time_test):.2f}%")
    print("=====================\n")
    
    # 确保数据类型为float32
    X_train = X_train.astype('float32')
    X_time_test = X_time_test.astype('float32')
    y_train = y_train.astype('float32')
    y_time_test = y_time_test.astype('float32')
    
    # 特征选择：增加方差阈值
    from sklearn.feature_selection import VarianceThreshold
    selector = VarianceThreshold(threshold=0.01)
    
    # 保存原始特征名称
    feature_names = X_train.columns.tolist()
    
    # 转换数据
    X_train_selected = selector.fit_transform(X_train)
    X_time_test_selected = selector.transform(X_time_test)
    
    # 获取被选择的特征的索引
    selected_features_mask = selector.get_support()
    selected_features = [feature_names[i] for i in range(len(feature_names)) if selected_features_mask[i]]
    
    # 打印特征选择信息
    print(f"\n特征选择后的特征数量: {X_train_selected.shape[1]}")
    print(f"\n特征选择后的特征: {selected_features}")
    
    # 使用更稳健的标准化方法
    from sklearn.preprocessing import RobustScaler
    scaler = RobustScaler()
    X_train = scaler.fit_transform(X_train_selected)
    X_time_test = scaler.transform(X_time_test_selected)
    
    # 获取模型参数
    params = get_lgb_params()
    
    # 使用更保守的SMOTE采样
    smote = SMOTE(random_state=42, sampling_strategy=0.3)  # 只将少数类采样到多数类的30%
    X_train_resampled, y_train_resampled = smote.fit_resample(X_train, y_train)
    
    # 使用重采样后的数据训练模型
    final_model = train_with_cv(X_train_resampled, y_train_resampled, X_time_test, y_time_test, params)