# 导入必要的库
import pandas as pd
import numpy as np
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
import xgboost as xgb
import lightgbm as lgb
from sklearn.preprocessing import StandardScaler
from itertools import combinations
import random
from scipy import stats

# 读取数据
df1 = pd.read_csv('part-00000-bec7c3f5-6b55-4911-9150-08745df40bae-c000.csv',sep='\\x7f\\x5e',engine='python')
df2 = pd.read_csv('part-00001-bec7c3f5-6b55-4911-9150-08745df40bae-c000.csv',sep='\\x7f\\x5e',engine='python')
df=pd.concat([df1, df2], ignore_index=True)
df = df[~((df['SCORE_557'].isna()) | (df['SCORE_557'] == 0))]

# 定义要分析的字段列表
score_fields = ['QYZXMODEL', 'HNDGMODEL', 'FICOMODEL', 'GRZXMODEL', 'HNGRMODEL', 'GSMODEL']

# 确保 APPLY_DT 的格式正确
df['APPLY_DT'] = pd.to_datetime(df['APPLY_DT'])

# 定义Y标签
df['Y'] = 2  # 默认值为2
df.loc[df['PFLAG_30D'] == 1, 'Y'] = 1  # 逾期30天及以上为1
df.loc[df['PFLAG_30D'] == 0, 'Y'] = 0   # 未逾期为0

# 剔除Y=2的样本
df = df[df['Y'] != 2]

# 准备特征数据
X = df[score_fields].fillna(df[score_fields].mean())
y = df['Y']

# 数据标准化
scaler = StandardScaler()
X_scaled = pd.DataFrame(
    scaler.fit_transform(X),
    columns=score_fields,
    index=X.index  # 保持原始索引
)


# 按日期拆分数据集
train_cutoff_date = pd.to_datetime('2024-02-29')
oot_cutoff_date = pd.to_datetime('2024-06-30')

# 分离训练验证集和OOT集
train_val_mask = df['APPLY_DT'] <= train_cutoff_date
oot_mask = (df['APPLY_DT'] > train_cutoff_date) & (df['APPLY_DT'] <= oot_cutoff_date)

X_train_val = X_scaled[train_val_mask]
y_train_val = y[train_val_mask]
X_oot = X_scaled[oot_mask]
y_oot = y[oot_mask]



# 将训练验证集按6:4比例随机分割
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(X_train_val, y_train_val, train_size=0.6, random_state=42)


# 打印样本统计信息
def print_sample_stats(y_data, dataset_name):
    total_samples = len(y_data)
    bad_samples = sum(y_data == 1)
    bad_rate = bad_samples / total_samples * 100 if total_samples > 0 else 0
    print(f"\n{dataset_name}统计信息:")
    print(f"样本总量: {total_samples:,}")
    print(f"坏客户数: {bad_samples:,}")
    print(f"坏客户占比: {bad_rate:.2f}%")

print("\n=== 样本统计信息 ===")
print_sample_stats(y_train, "训练集")
print_sample_stats(y_val, "验证集")
print_sample_stats(y_oot, "OOT集")


# 定义模型字典
models = {
    'LogisticRegression': LogisticRegression(random_state=42),
    'DecisionTree': DecisionTreeClassifier(random_state=42),
    'XGBoost': xgb.XGBClassifier(random_state=42),
    'LightGBM': lgb.LGBMClassifier(random_state=42)
}

# 存储模型结果
results = {}

# 训练模型并评估
for name, model in models.items():
    print(f'\n训练 {name} 模型...')
    
    # 训练模型
    model.fit(X_train, y_train)
    
    # 针对逻辑回归模型添加详细分析
    if name == 'LogisticRegression':
        # 计算VIF
        from statsmodels.stats.outliers_influence import variance_inflation_factor
        
        # 创建VIF数据框
        vif_data = pd.DataFrame()
        vif_data["变量"] = X_train.columns
        vif_data["VIF"] = [variance_inflation_factor(X_train.values, i) 
                          for i in range(X_train.shape[1])]
        
        # 创建模型系数数据框
        coef_data = pd.DataFrame({
            '变量': X_train.columns,
            '系数': model.coef_[0],
            'Odds比': np.exp(model.coef_[0])
        })
        
        print("\n逻辑回归模型详细结果:")
        print("\n变量VIF值:")
        print(vif_data.to_string(index=False))
        print("\n变量系数和Odds比:")
        print(coef_data.to_string(index=False))
        print(f"\n截距项: {model.intercept_[0]:.4f}")
    
    # 在验证集上评估
    val_proba = model.predict_proba(X_val)[:, 1]
    fpr_val, tpr_val, _ = roc_curve(y_val, val_proba)
    ks_val = max(abs(tpr_val - fpr_val))
    
    # 在OOT集上评估
    oot_proba = model.predict_proba(X_oot)[:, 1]
    fpr_oot, tpr_oot, _ = roc_curve(y_oot, oot_proba)
    ks_oot = max(abs(tpr_oot - fpr_oot))
    
    results[name] = {
        'validation_ks': ks_val,
        'oot_ks': ks_oot
    }
    
    print(f'{name} 验证集 KS: {ks_val:.3f}')
    print(f'{name} OOT集 KS: {ks_oot:.3f}')
    
    # 绘制ROC曲线
    plt.figure(figsize=(12, 5))
    
    # 验证集ROC
    plt.subplot(1, 2, 1)
    plt.plot(fpr_val, tpr_val)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.title(f'{name} - 验证集 ROC (KS={ks_val:.3f})')
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.grid(True)
    
    # OOT集ROC
    plt.subplot(1, 2, 2)
    plt.plot(fpr_oot, tpr_oot)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.title(f'{name} - OOT集 ROC (KS={ks_oot:.3f})')
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.grid(True)
    
    plt.tight_layout()
    plt.show()

# 找出OOT集上表现最好的模型
best_model = max(results.items(), key=lambda x: x[1]['oot_ks'])
print('\n最终结果:')
print(f'OOT集上表现最好的模型是: {best_model[0]}')
print(f'验证集 KS: {best_model[1]["validation_ks"]:.3f}')
print(f'OOT集 KS: {best_model[1]["oot_ks"]:.3f}')

def evaluate_variable_combinations():
    results = []
    
    # 对每个可能的变量数量进行遍历
    for n_vars in range(1, len(score_fields)):
        print(f"\n测试{n_vars}个变量的组合...")
        
        # 获取所有可能的组合
        all_combinations = list(combinations(score_fields, n_vars))
        
        # 如果组合数量过多，随机抽样100个组合
        if len(all_combinations) > 100:
            all_combinations = random.sample(all_combinations, 100)
        
        # 测试每个组合
        for vars_combo in all_combinations:
            # 准备数据
            X_train_subset = X_train[list(vars_combo)]
            X_val_subset = X_val[list(vars_combo)]
            
            # 训练模型
            lr_model = LogisticRegression(random_state=42)
            lr_model.fit(X_train_subset, y_train)
            
            # 评估模型
            val_proba = lr_model.predict_proba(X_val_subset)[:, 1]
            fpr_val, tpr_val, _ = roc_curve(y_val, val_proba)
            ks_val = max(abs(tpr_val - fpr_val))
            
            # 只在变量数量大于1时计算VIF
            if n_vars > 1:
                vif_data = pd.DataFrame()
                vif_data["变量"] = X_train_subset.columns
                vif_data["VIF"] = [variance_inflation_factor(X_train_subset.values, i) 
                                  for i in range(X_train_subset.shape[1])]
            else:
                vif_data = pd.DataFrame({
                    "变量": X_train_subset.columns,
                    "VIF": ["NA"]  # 单个变量时VIF不适用
                })
            
            # 保存结果
            results.append({
                'n_vars': n_vars,
                'variables': vars_combo,
                'ks': ks_val,
                'model': lr_model,
                'vif_data': vif_data,
                'X_train_subset': X_train_subset,  # 保存训练数据
                'coef_data': pd.DataFrame({
                    '变量': X_train_subset.columns,
                    '系数': lr_model.coef_[0],
                    'Odds比': np.exp(lr_model.coef_[0])
                })
            })
    
    # ���变量数量分组找出每组最佳模型
    best_models = []
    for n_vars in range(1, len(score_fields)):
        group_results = [r for r in results if r['n_vars'] == n_vars]
        if group_results:
            best_model = max(group_results, key=lambda x: x['ks'])
            best_models.append(best_model)
    
    # 打印结果
    print("\n=== 各种变量数量的最优模型结果 ===")
    for result in best_models:
        print(f"\n使用 {result['n_vars']} 个变量的最优模型:")
        print(f"入模变量: {', '.join(result['variables'])}")
        print(f"验证集 KS: {result['ks']:.4f}")
        
        print("\nVIF值:")
        print(result['vif_data'].to_string(index=False))
        
        # 计算P值，保留8位小数，P值用于评估变量的显著性
        X_subset = result['X_train_subset']  # 使用保存的训练数据
        X_with_const = np.column_stack([np.ones(len(X_subset)), X_subset])
        predictions = result['model'].predict(X_subset)
        mse = np.sum((predictions - y_train) ** 2) / (len(y_train) - X_subset.shape[1] - 1)
        var_covar_matrix = mse * np.linalg.inv(np.dot(X_with_const.T, X_with_const))
        standard_errors = np.sqrt(np.diag(var_covar_matrix))[1:]  # 跳过截距项
        z_scores = result['model'].coef_[0] / standard_errors
        p_values = np.round(2 * (1 - stats.norm.cdf(abs(z_scores))), decimals=8)
        
        # 创建包含P值的结果数据框
        coef_data_with_p = result['coef_data'].copy()
        coef_data_with_p['P值'] = p_values
        
        print("\n变量系数、Odds比和P值:")
        print(coef_data_with_p.to_string(index=False))
        print(f"截距项: {result['model'].intercept_[0]:.4f}")
        print("-" * 50)
    
    # 找出所有模型中的最优模型
    overall_best = max(best_models, key=lambda x: x['ks'])
    print("\n=== 总体最优模型 ===")
    print(f"变量数量: {overall_best['n_vars']}")
    print(f"入模变量: {', '.join(overall_best['variables'])}")
    print(f"验证集 KS: {overall_best['ks']:.4f}")

# 运行变量组合分析
evaluate_variable_combinations()

# 计算SCORE在OOT样本上的KS
def calculate_557score_ootks():
    # 获取OOT样本的SCORE和Y标签
    oot_data = df.loc[~train_val_mask, ['SCORE_557', 'Y']].copy()
    
    # 将SCORE_557中的缺失值用-999999填充
    oot_data['SCORE_557'] = oot_data['SCORE_557'].fillna(-999999)
    
    # 获取处理后的数据
    oot_score = oot_data['SCORE_557']
    oot_y = oot_data['Y']
    
    # 打印样本信息
    print(f"\n原始OOT样本数量: {len(oot_data)}")
    print(f"SCORE_557缺失值数量: {oot_data['SCORE_557'].isna().sum()}")
    
    # 计算KS
    fpr, tpr, _ = roc_curve(oot_y, oot_score)
    ks = max(abs(tpr - fpr))
    
    print(f"\nSCORE_557变量在OOT样本上的KS值: {ks:.4f}")
    
    
    # 绘制ROC曲线
    plt.figure(figsize=(8, 6))
    plt.plot(fpr, tpr)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.title(f'SCORE_557 - OOT样本 ROC曲线 (KS={ks:.4f})')
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.grid(True)
    plt.show()

# 运行分析
calculate_557score_ootks()

#列出SCORE_557缺失的100个样本
print(df[df['SCORE_557'].isna()].head(100))
