from sklearn.metrics import roc_auc_score, roc_curve, precision_score, recall_score, f1_score
import numpy as np
from DataProcess import (
    process_final_data, 
    union_with_sample, 
    split_and_merge_data, 
    evaluate_score_var_rst,
    calculate_psi2
)
from sklearn.model_selection import train_test_split
from DataProcess import calculate_bad_ratio
import lightgbm as lgb
import pandas as pd
from sklearn.utils.class_weight import compute_class_weight
from imblearn.over_sampling import SMOTE
from sklearn.feature_selection import VarianceThreshold

def ks_error(preds, dtrain):
    """
    自定义KS评估函数
    """
    label = dtrain.get_label()
    fpr, tpr, _ = roc_curve(label, preds)
    ks = max(abs(tpr - fpr))
    return 'ks-value', ks, True

def train_and_evaluate_lgb_model(X_train, y_train, X_val, y_val, X_time_test, y_time_test):
    """
    训练LightGBM模型并评估性能
    """
    # 1. 创建LightGBM数据集
    lgb_train = lgb.Dataset(X_train, y_train, weight=calculate_sample_weights(y_train))
    lgb_val = lgb.Dataset(X_val, y_val, reference=lgb_train)

    # 2. 设置模型参数
    params = {
        'task': 'train',
        'boosting_type': 'gbdt',
        'objective': 'binary',
        'metric': 'ks_error',  # 修改为KS评估
        'num_leaves': 31,
        'max_depth': 4,
        'learning_rate': 0.01,
        'feature_fraction': 0.7,
        'bagging_fraction': 0.4,
        'bagging_freq': 5,
        'min_child_samples': 300,
        'lambda_l1': 0.1,
        'lambda_l2': 0.1,
        'verbose': -1,
        'scale_pos_weight': 20,
        'early_stopping_round': 500
    }
    
    # 3. 训练模型
    lgb_model = lgb.train(
        params,
        lgb_train,
        num_boost_round=5000,  # 增加最大迭代次数
        valid_sets=[lgb_train, lgb_val],
        valid_names=['train', 'valid'],
        feval=ks_error
    )
    
    # 4. 获取预测概率
    train_pred_proba = lgb_model.predict(X_train)
    val_pred_proba = lgb_model.predict(X_val)
    test_pred_proba = lgb_model.predict(X_time_test)
    
    # 5. 在验证集上找到最优阈值
    thresholds = np.arange(0.1, 0.9, 0.01)  # 更细的阈值网格
    best_f1 = 0
    best_threshold = 0.3  # 设置默认阈值为0.3
    
    for threshold in thresholds:
        val_pred = (val_pred_proba >= threshold).astype(int)
        f1 = f1_score(y_val, val_pred)
        if f1 > best_f1:
            best_f1 = f1
            best_threshold = threshold
    
    print(f"\n最优阈值: {best_threshold:.4f}")
    
    # 使用最优阈值进行预测
    train_pred = (train_pred_proba >= best_threshold).astype(int)
    val_pred = (val_pred_proba >= best_threshold).astype(int)
    test_pred = (test_pred_proba >= best_threshold).astype(int)
    
    # 6. 计算各项评估指标
    def calculate_metrics(y_true, y_pred, y_pred_proba):
        auc = roc_auc_score(y_true, y_pred_proba)
        fpr, tpr, _ = roc_curve(y_true, y_pred_proba)
        ks = max(abs(tpr - fpr))
        precision = precision_score(y_true, y_pred)
        recall = recall_score(y_true, y_pred)
        f1 = f1_score(y_true, y_pred)
        return auc, ks, precision, recall, f1
    
    # 计算训练集指标
    train_metrics = calculate_metrics(y_train, train_pred, train_pred_proba)
    # 计算验证集指标
    val_metrics = calculate_metrics(y_val, val_pred, val_pred_proba)
    # 计算测试集指标
    test_metrics = calculate_metrics(y_time_test, test_pred, test_pred_proba)
    
    # 7. 打印评估结果
    def print_metrics(dataset_name, metrics):
        auc, ks, precision, recall, f1 = metrics
        print(f"\n{dataset_name} 评估指标:")
        print(f"AUC: {auc:.4f}")
        print(f"KS: {ks:.4f}")
        print(f"查准率 (Precision): {precision:.4f}")
        print(f"查全率 (Recall): {recall:.4f}")
        print(f"F1 Score: {f1:.4f}")
    
    print("\n=== LightGBM模型评估结果 ===")
    print_metrics("训练集", train_metrics)
    print_metrics("验证集", val_metrics)
    print_metrics("测试集", test_metrics)
    
    return lgb_model, train_metrics, val_metrics, test_metrics

def feature_engineering(train_df, val_df):
    """简化特征工程"""
    numeric_features = train_df.select_dtypes(include=['float64', 'int64']).columns
    numeric_features = [col for col in numeric_features if col != 'flag_all']
    
    # 只保留最基础的特征变换
    for col in numeric_features:
        # 分位数特征
        train_df[f'{col}_quantile'] = pd.qcut(train_df[col], q=10, labels=False, duplicates='drop')
        val_df[f'{col}_quantile'] = pd.qcut(val_df[col], q=10, labels=False, duplicates='drop')
    
    return train_df, val_df

def calculate_sample_weights(y):
    """计算样本权重，加大对少数类的权重"""
    n_samples = len(y)
    n_classes = len(np.unique(y))
    # 计算每个类别的权重
    class_weights = dict()
    for i in np.unique(y):
        class_weights[i] = (n_samples / (n_classes * np.sum(y == i))) * 2  # 增加权重倍数
    
    # 应用权重
    sample_weights = np.array([class_weights[yi] for yi in y])
    return sample_weights

def train_with_cv(X_train, y_train, X_val, y_val, params):
    """使用交叉验证训练模型"""
    params['verbose'] = -1
    early_stopping = lgb.early_stopping(stopping_rounds=50)
    
    cv_results = lgb.cv(
        params,
        lgb.Dataset(X_train, y_train),
        num_boost_round=1000,
        nfold=5,
        stratified=True,
        callbacks=[early_stopping],
        metrics=['ks_error'],
        feval=ks_error
    )
    
    # 打印可用的指标名称
    print("\nCV结果中的指标:", cv_results.keys())
    
    # 获取最佳轮数
    metric_name = 'valid ks-value-mean'  # 修正指标名称
    best_rounds = len(cv_results[metric_name])
    print(f"\n最佳轮数: {best_rounds}")
    
    # 训练最终模型
    final_model = lgb.train(
        params,
        lgb.Dataset(X_train, y_train),
        num_boost_round=best_rounds,
        valid_sets=[lgb.Dataset(X_val, y_val)],
        feval=ks_error
    )
    
    # 获取预测概率
    train_pred_proba = final_model.predict(X_train)
    val_pred_proba = final_model.predict(X_val)
    
    # 在验证集上找到最优阈值（优化F1而不是KS）
    thresholds = np.arange(0.1, 0.9, 0.01)  # 使用更细的阈值网格
    best_f1 = 0
    best_threshold = 0.3  # 设置默认阈值为0.3
    
    for threshold in thresholds:
        val_pred = (val_pred_proba >= threshold).astype(int)
        f1 = f1_score(y_val, val_pred)
        if f1 > best_f1:
            best_f1 = f1
            best_threshold = threshold
    
    print(f"\n最优阈值: {best_threshold:.4f}")
    
    # 使用最优阈值进行预测
    train_pred = (train_pred_proba >= best_threshold).astype(int)
    val_pred = (val_pred_proba >= best_threshold).astype(int)
    
    # 计算评估指标
    def calculate_metrics(y_true, y_pred, y_pred_proba):
        auc = roc_auc_score(y_true, y_pred_proba)
        fpr, tpr, _ = roc_curve(y_true, y_pred_proba)
        ks = max(abs(tpr - fpr))
        precision = precision_score(y_true, y_pred)
        recall = recall_score(y_true, y_pred)
        f1 = f1_score(y_true, y_pred)
        return auc, ks, precision, recall, f1
    
    # 打印评估结果
    def print_metrics(dataset_name, metrics):
        auc, ks, precision, recall, f1 = metrics
        print(f"\n{dataset_name} 评估指标:")
        print(f"AUC: {auc:.4f}")
        print(f"KS: {ks:.4f}")
        print(f"查准率 (Precision): {precision:.4f}")
        print(f"查全率 (Recall): {recall:.4f}")
        print(f"F1 Score: {f1:.4f}")
    
    print("\n=== 最终模型评估结果 ===")
    print_metrics("训练集", calculate_metrics(y_train, train_pred, train_pred_proba))
    print_metrics("测试集", calculate_metrics(y_val, val_pred, val_pred_proba))
    
    return final_model

# 在主程序之前定义模型参数
def get_lgb_params():
    """获取LightGBM模型参数"""
    return {
        'task': 'train',
        'boosting_type': 'gbdt',
        'objective': 'binary',
        'metric': 'ks_error',  # 修改为KS评估
        'num_leaves': 31,
        'max_depth': 4,
        'learning_rate': 0.01,
        'feature_fraction': 0.7,
        'bagging_fraction': 0.4,
        'bagging_freq': 5,
        'min_child_samples': 300,
        'lambda_l1': 0.1,
        'lambda_l2': 0.1,
        'verbose': -1,
        'scale_pos_weight': 20,
        'early_stopping_round': 500
    }

def find_best_threshold(y_true, y_pred_proba):
    """更细致的阈值搜索"""
    thresholds = np.linspace(0.1, 0.9, 1000)  # 更细的网格
    best_f1 = 0
    best_threshold = 0.5
    
    for threshold in thresholds:
        y_pred = (y_pred_proba >= threshold).astype(int)
        f1 = f1_score(y_true, y_pred)
        if f1 > best_f1:
            best_f1 = f1
            best_threshold = threshold
    
    return best_threshold, best_f1

def calculate_iv(df, feature, target='flag_all', bins=10, show_woe=False):
    """计算单个特征的IV值"""
    
    def convert_numeric(x):
        try:
            return float(x)
        except:
            return x
    
    # 数值型特征进行分箱，类别型特征直接使用
    is_numeric = all(isinstance(convert_numeric(x), (int, float)) for x in df[feature].dropna())
    
    if is_numeric:
        # 处理极端值
        df[feature] = df[feature].replace([np.inf, -np.inf], np.nan)
        # 等频分箱
        df['bins'] = pd.qcut(df[feature], bins, duplicates='drop')
    else:
        df['bins'] = df[feature]
    
    # 计算WOE和IV
    grouped = df.groupby('bins')[target].agg(['count', 'sum'])
    grouped['non_event'] = grouped['count'] - grouped['sum']
    grouped['event_rate'] = grouped['sum'] / grouped['sum'].sum()
    grouped['non_event_rate'] = grouped['non_event'] / grouped['non_event'].sum()
    grouped['woe'] = np.log(grouped['event_rate'] / grouped['non_event_rate'])
    grouped['iv'] = (grouped['event_rate'] - grouped['non_event_rate']) * grouped['woe']
    
    if show_woe:
        print(f"\n=== {feature} WOE值 ===")
        print(grouped[['count', 'sum', 'woe', 'iv']])
    
    return grouped['iv'].sum()

def feature_selection(train_df, test_df, iv_threshold=0.01, missing_threshold=0.9):
    """特征筛选：IV值和缺失值"""
    features = [col for col in train_df.columns if col not in ['flag_all', 'APP_DT']]
    
    # IV值筛选
    iv_values = {}
    print("\n=== IV值计算结果 ===")
    for feature in features:
        iv = calculate_iv(train_df, feature)
        iv_values[feature] = iv
        print(f"{feature}: {iv:.4f}")
    
    # 缺失值分析
    missing_rates = {}
    print("\n=== 缺失值分析 ===")
    for feature in features:
        missing_rate_train = train_df[feature].isnull().mean()
        missing_rate_test = test_df[feature].isnull().mean()
        missing_rates[feature] = max(missing_rate_train, missing_rate_test)
        print(f"{feature} - 训练集缺失率: {missing_rate_train:.2%}, 测试集缺失率: {missing_rate_test:.2%}")
    
    # PSI值计算
    print("\n=== PSI值计算结果 ===")
    psi_values = {}
    for feature in features:
        psi = calculate_psi2(train_df, test_df, feature)
        psi_values[feature] = psi
        print(f"{feature}: {psi:.4f}")
    
    # 特征筛选
    selected_features = [
        feature for feature in features 
        if iv_values[feature] >= iv_threshold and missing_rates[feature] < missing_threshold
    ]
    
    print("\n=== 特征筛选结果 ===")
    print(f"原始特征数: {len(features)}")
    print(f"IV值<{iv_threshold}的特征数: {sum(1 for v in iv_values.values() if v < iv_threshold)}")
    print(f"缺失率>={missing_threshold*100}%的特征数: {sum(1 for v in missing_rates.values() if v >= missing_threshold)}")
    print(f"保留特征数: {len(selected_features)}")
    
    return selected_features

# 在主程序中调用
if __name__ == "__main__":
    # 保持原有的数据准备代码不变
    final_df = process_final_data()
    train_df, test_df = split_and_merge_data(final_df)
    
    # 先评估建信金科评分在测试集上的表现
    intersection_df, ks, auc = evaluate_score_var_rst(test_df)
    
    # 然后再删除不需要的列
    cols_to_drop = ['app_num', 'APPLY_NO', 'APP_DT', 'modPro', 'APPLY_STS',
                    'host_cust_id', 'ACCT_NUM', 'CREDIT_TYPE_TWO',
                    'GUAR_MODE_CD', 'DISTR_DT', 'ODUE_AMT', 'LATEST_OWE_DAYS',
                    'LONGEST_OVDUE_DAYS', 'ACCUM_OVDUE_CNT', 'jx_score', 'score_new',
                    'SCORE_ANTI_FRAUD', 'Unnamed: 13']
    
    train_df = train_df.drop(columns=cols_to_drop, errors='ignore')
    test_df = test_df.drop(columns=cols_to_drop, errors='ignore')
    
    # 检查是否还有缺失值并填充
    if train_df.isnull().any().any():
        print("警告：训练数据中存在缺失值，使用-999999填充")
        train_df = train_df.fillna(-999999)
    if test_df.isnull().any().any():
        print("警告：测试数据中存在缺失值，使用-999999填充")
        test_df = test_df.fillna(-999999)
    
    # 在特征工程之前进行特征筛选
    selected_features = feature_selection(train_df, test_df)
    
    # 只保留筛选后的特征
    train_df = train_df[selected_features + ['flag_all']]
    test_df = test_df[selected_features + ['flag_all']]
    
    # 特征工程
    # train_df, test_df = feature_engineering(train_df, test_df)
    
    # 准备数据
    X_train = train_df.drop('flag_all', axis=1)
    y_train = train_df['flag_all']
    X_test = test_df.drop('flag_all', axis=1)
    y_test = test_df['flag_all']
    
    # 打印数据集信息
    print("\n=== 数据集统计信息 ===")
    print(f"训练集大小: {len(X_train)}, 坏样本数: {int(y_train.sum())}, 坏样本比例: {calculate_bad_ratio(y_train):.2f}%")
    print(f"测试集大小: {len(X_test)}, 坏样本数: {int(y_test.sum())}, 坏样本比例: {calculate_bad_ratio(y_test):.2f}%")
    print("=====================\n")
    
    # 数据类型转换
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    y_train = y_train.astype('float32')
    y_test = y_test.astype('float32')
    
    # 特征选择
    selector = VarianceThreshold(threshold=0.01)
    feature_names = X_train.columns.tolist()
    
    X_train_selected = selector.fit_transform(X_train)
    X_test_selected = selector.transform(X_test)
    
    # 训练模型
    lgb_model, train_metrics, _, test_metrics = train_and_evaluate_lgb_model(
        X_train_selected, y_train,
        X_train_selected, y_train,
        X_test_selected, y_test
    )
    
    # 直接使用train_metrics和test_metrics打印结果
    print("\n=== 模型评估结果 ===")
    def print_metrics(dataset_name, metrics):
        auc, ks, precision, recall, f1 = metrics
        print(f"\n{dataset_name}评估指标:")
        print(f"AUC: {auc:.4f}")
        print(f"KS: {ks:.4f}")
        print(f"查准率 (Precision): {precision:.4f}")
        print(f"查全率 (Recall): {recall:.4f}")
        print(f"F1 Score: {f1:.4f}")
    
    print_metrics("训练集", train_metrics)
    print_metrics("测试集", test_metrics)
    
    # 打印特征重要性
    print("\n特征重要性（前10个）：")
    importance = lgb_model.feature_importance()
    feature_names = lgb_model.feature_name()
    importance_dict = dict(zip(feature_names, importance))
    sorted_importance = sorted(importance_dict.items(), key=lambda x: x[1], reverse=True)
    for feat, imp in sorted_importance[:10]:
        print(f"{feat}: {imp}")