from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, roc_curve, precision_score, recall_score, f1_score
import numpy as np
from DataProcess import process_final_data, union_with_sample, split_and_merge_data,evaluate_score_var_rst
from sklearn.model_selection import train_test_split
from DataProcess import calculate_bad_ratio

def train_and_evaluate_lr_model(X_train, y_train, X_val, y_val, X_time_test, y_time_test):
    """
    训练逻辑回归模型并评估性能
    """
    # 1. 创建逻辑回归模型，增加正则化强度
    lr_model = LogisticRegression(
        C=0.00001,  # 降低C值，增加正则化强度
        solver='saga',  # 改用saga优化器，更适合处理大规模数据
        class_weight='balanced',
        max_iter=2000,  # 增加迭代次数
        random_state=42,
        penalty='l1'    # 使用L1正则化进行特征选择
    )
    
    # 2. 训练模型
    lr_model.fit(X_train, y_train)
    
    # 3. 在验证集上找到最优阈值
    val_pred_proba = lr_model.predict_proba(X_val)[:, 1]
    thresholds = np.arange(0.1, 0.9, 0.05)
    best_ks = 0
    best_threshold = 0.5
    
    for threshold in thresholds:
        val_pred = (val_pred_proba >= threshold).astype(int)
        fpr, tpr, _ = roc_curve(y_val, val_pred)
        ks = max(abs(tpr - fpr))
        if ks > best_ks:
            best_ks = ks
            best_threshold = threshold
    
    # 使用最优阈值进行预测
    train_pred_proba = lr_model.predict_proba(X_train)[:, 1]
    test_pred_proba = lr_model.predict_proba(X_time_test)[:, 1]
    
    train_pred = (train_pred_proba >= best_threshold).astype(int)
    val_pred = (val_pred_proba >= best_threshold).astype(int)
    test_pred = (test_pred_proba >= best_threshold).astype(int)

    
    # 5. 计算各项评估指标
    def calculate_metrics(y_true, y_pred, y_pred_proba):
        auc = roc_auc_score(y_true, y_pred_proba)
        fpr, tpr, _ = roc_curve(y_true, y_pred_proba)
        ks = max(abs(tpr - fpr))
        precision = precision_score(y_true, y_pred)
        recall = recall_score(y_true, y_pred)
        f1 = f1_score(y_true, y_pred)
        return auc, ks, precision, recall, f1
    
    # 计算训练集指标
    train_metrics = calculate_metrics(y_train, train_pred, train_pred_proba)
    # 计算验证集指标
    val_metrics = calculate_metrics(y_val, val_pred, val_pred_proba)
    # 计算测试集指标
    test_metrics = calculate_metrics(y_time_test, test_pred, test_pred_proba)
    
    # 6. 打印评估结果
    def print_metrics(dataset_name, metrics):
        auc, ks, precision, recall, f1 = metrics
        print(f"\n{dataset_name} 评估指标:")
        print(f"AUC: {auc:.4f}")
        print(f"KS: {ks:.4f}")
        print(f"查准率 (Precision): {precision:.4f}")
        print(f"查全率 (Recall): {recall:.4f}")
        print(f"F1 Score: {f1:.4f}")
    
    print("\n=== 逻辑回归模型评估结果 ===")
    print_metrics("训练集", train_metrics)
    print_metrics("验证集", val_metrics)
    print_metrics("测试集", test_metrics)
    
    return lr_model, train_metrics, val_metrics, test_metrics

# 在主程序中调用
if __name__ == "__main__":
     # 从数据处理.py获取训练集和验证集
    final_df = process_final_data()
    merged_df = union_with_sample(final_df)
    train_df, val_df = split_and_merge_data(final_df, merged_df)
        
    # 评估建信金科评分在验证集上的表现
    intersection_df, ks, auc = evaluate_score_var_rst(val_df)

     # 检查是否还有缺失值
    if train_df.isnull().any().any():
        print("警告：数据中仍存在缺失值，将使用-999999填充")
        train_df = train_df.fillna(-999999)

    if val_df.isnull().any().any():
        print("警告：数据中仍存在缺失值，将使用-999999填充")
        val_df = val_df.fillna(-999999)

    # 删除不需要的列
    cols_to_drop = ['app_num', 'APPLY_NO', 'APP_DT', 'modPro', 'APPLY_STS',
                    'host_cust_id', 'ACCT_NUM', 'CREDIT_TYPE_TWO',
                    'GUAR_MODE_CD', 'DISTR_DT', 'ODUE_AMT', 'LATEST_OWE_DAYS',
                    'LONGEST_OVDUE_DAYS', 'ACCUM_OVDUE_CNT', 'jx_score', 'score_new',
                    'SCORE_ANTI_FRAUD', 'Unnamed: 13']
    train_df = train_df.drop(columns=cols_to_drop, errors='ignore')
    val_df = val_df.drop(columns=cols_to_drop, errors='ignore')
    
    # 从训练集中划分训练集和验证集
    X_train = train_df.drop('flag_all', axis=1)
    y_train = train_df['flag_all']
    X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42)
    
    # 准备测试集(原验证集)
    X_time_test = val_df.drop('flag_all', axis=1)
    y_time_test = val_df['flag_all']
    
       # 打印各数据集大小和坏样本比例
    print("\n=== 数据集统计信息 ===")
    print(f"训练集大小: {len(X_train)}, 坏样本数: {int(y_train.sum())}, 坏样本比例: {calculate_bad_ratio(y_train):.2f}%")
    print(f"验证集大小: {len(X_val)}, 坏样本数: {int(y_val.sum())}, 坏样本比例: {calculate_bad_ratio(y_val):.2f}%")
    print(f"测试集大小: {len(X_time_test)}, 坏样本数: {int(y_time_test.sum())}, 坏样本比例: {calculate_bad_ratio(y_time_test):.2f}%")
    print("=====================\n")
    
    # 确保数据类型为float32
    X_train = X_train.astype('float32')
    X_val = X_val.astype('float32')
    X_time_test = X_time_test.astype('float32')
    y_train = y_train.astype('float32')
    y_val = y_val.astype('float32')
    y_time_test = y_time_test.astype('float32')
    
    # 特征选择：移除低方差特征
    from sklearn.feature_selection import VarianceThreshold
    selector = VarianceThreshold(threshold=0.01)
    
    X_train = selector.fit_transform(X_train)
    X_val = selector.transform(X_val)
    X_time_test = selector.transform(X_time_test)
    
    # 标准化特征
    from sklearn.preprocessing import StandardScaler
    scaler = StandardScaler()
    X_train = scaler.fit_transform(X_train)
    X_val = scaler.transform(X_val)
    X_time_test = scaler.transform(X_time_test)
    
    # 训练和评估模型
    lr_model, train_metrics, val_metrics, test_metrics = train_and_evaluate_lr_model(
        X_train, y_train, X_val, y_val, X_time_test, y_time_test
    )