# 导入必要的库
import pandas as pd
import numpy as np
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
import xgboost as xgb
import lightgbm as lgb
from sklearn.preprocessing import StandardScaler
from itertools import combinations
import random
from scipy import stats
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.metrics import roc_auc_score
import os

# 使用相对路径
import os

# 获取当前文件所在目录的路径
current_dir = os.path.dirname(os.path.abspath(__file__))

# 构建完整的文件路径
# file1_path = os.path.join(current_dir, 'part-00000-bec7c3f5-6b55-4911-9150-08745df40bae-c000.csv')
# file2_path = os.path.join(current_dir, 'part-00001-bec7c3f5-6b55-4911-9150-08745df40bae-c000.csv')
# print("File 1 path:", file1_path)
# print("File 2 path:", file2_path)

# # 读取文件
# df1 = pd.read_csv(file1_path, sep='\\x7f\\x5e', engine='python')
# df2 = pd.read_csv(file2_path, sep='\\x7f\\x5e', engine='python')
# df = pd.concat([df1, df2], ignore_index=True)

df = pd.read_csv(r'557CalKS/557全部变量part-00000-106b4f3f-9f81-495b-9510-8eaf4cec6622-c000.csv',sep='\\x7f\\x5e',engine='python')
# print(df.head(1))

# 将df的列名改为大写
df.columns = df.columns.str.upper()

print("\n=== 数据框的实际列名 ===")
print(df.columns.tolist())
df = df[~((df['SCORE_557'].isna()) | (df['SCORE_557'] == 0))]

# 定义要分析的字段列表
score_fields = ['QYZXMODEL', 'HNDGMODEL', 'FICOMODEL', 'GRZXMODEL', 'HNGRMODEL', 'GSMODEL']
# score_fields = ['FICOMODEL','PERPBCOVERDUECNTLAST6M', 'PERPBCSELFQRYCNTLAST1M', 'PERPBCNOTBANKLOANCNT6M', 'PERPBCACCTTIMEM', 'PERPBCMORTNUM', 'PERPBCALLLOANPDTOVERDUECNT', 'PERPBCMAXOVERDUE', 'PERPBCSPCTRSSINCEM', 'PERPBCLOANBALCOLLBALANCE', 'PERPBCMAXCREDITCARDLIMIT', 'PERPBCSCORE', 'PERPBCNBLOANNORMALCNT', 'PERPBCLOANAMTNONPBLNEWLYOPEN', 'PERPBCCREDITCARDQRYORGNUM', 'PERPBCPCTAVGUSELIMITCCLAST6M', 'ENTLEGPERCHANGENUMLAST6M', 'ENTINDUSTRYSTOCKRIGHTCHGCOUNTS', 'ENTOPERATIONENTSINCENUMMON', 'ENTINDUSTRYPYHCODE', 'ENTAICCONTRIBUTIONMON', 'ENTRODNETINTEREST1M', 'ENTRNTRODEMPNUM', 'ENTRODPRIVATEENT', 'ENTRODDAYAVGDEPO', 'PERRODDBANKCUST', 'PERRODCURRACCTPROFIT', 'PERRODAGE', 'PERRODHOUSECOLLLOANBALANCE', 'PERRODOVERDUEAMT', 'PERRODOVERDUEDAY', 'PERRODBASELIMIT', 'PERRODDAYAVGCURRBALANCE', 'PERRODBUSILOANCNT']

# 确保 APPLY_DT 的格式正确
df['APPLY_DT'] = pd.to_datetime(df['APPLY_DT'])

# 先创建Y标签
df['Y'] = 2  # 默认值为2
df.loc[df['PFLAG_30D'] == 1, 'Y'] = 1  # 逾期30天及以上为1
df.loc[df['PFLAG_30D'] == 0, 'Y'] = 0   # 未逾期为0

# 剔除Y=2的样本
df = df[df['Y'] != 2]

# 1. 数据准备：仅保留需要的字段
needed_fields = score_fields + ['APPLY_STS', 'SCORE_557', 'APPLY_DT', 'Y']
df = df[needed_fields].copy()

# 2. 区分通过样本和拒绝样本
accepted_df = df[df['APPLY_STS'] == 'COMP'].copy()  # 通过样本
rejected_df = df[df['APPLY_STS'] != 'COMP'].copy()  # 拒绝样本

print("\n=== 样本分布 ===")
print(f"通过样本数量: {len(accepted_df)}")
print(f"拒绝样本数量: {len(rejected_df)}")

# 3. 使用通过样本训练模型
# 准备通过样本的特征和标签
X_accepted = accepted_df[score_fields].copy()
y_accepted = accepted_df['Y'].copy()

# 填充缺失值
for field in score_fields:
    field_mean = X_accepted[field].mean()
    X_accepted[field] = X_accepted[field].fillna(field_mean)

# 标准化特征
scaler = StandardScaler()
X_accepted_scaled = scaler.fit_transform(X_accepted)

# 训练逻辑回归模型用于拒绝推断
model_for_inference = LogisticRegression(random_state=42)
model_for_inference.fit(X_accepted_scaled, y_accepted)

# 4. 对拒绝样本进行推断打标
# 准备拒绝样本的特征
X_rejected = rejected_df[score_fields].copy()

# 填充缺失值（使用通过样本的均值）
for field in score_fields:
    X_rejected[field] = X_rejected[field].fillna(X_accepted[field].mean())

# 标准化特征（使用通过样本的标准化参数）
X_rejected_scaled = scaler.transform(X_rejected)

# 预测拒绝样本的概率
rejected_probs = model_for_inference.predict_proba(X_rejected_scaled)[:, 1]

# 使用概率阈值进行标记
threshold = 0.5
rejected_df['Y'] = (rejected_probs >= threshold).astype(int)

# 合并回原始数据框
df_with_inference = pd.concat([accepted_df, rejected_df])

# 打印拒绝推断结果
print("\n=== 拒绝推断结果 ===")
print("通过样本的好坏比例：")
print(accepted_df['Y'].value_counts(normalize=True))
print("\n推断后拒绝样本的好坏比例：")
print(rejected_df['Y'].value_counts(normalize=True))
print("\n整体样本的好坏比例：")
print(df_with_inference['Y'].value_counts(normalize=True))

# 统计下df的score_fields的各个字段的缺失值情况
# 准备特征数据
# 统计下df的score_fields的各个字段的缺失值情况
print("\n=== 各评分字段缺失值统计 ===")
missing_stats = pd.DataFrame({
    '缺失值数量': df[score_fields].isnull().sum(),
    '缺失值占比(%)': (df[score_fields].isnull().sum() / len(df) * 100).round(2)
})
print(missing_stats)

# 区分数值型和类别型变量
categorical_fields = [
    # 'ENTINDUSTRYPYHCODE', 'ENTRODPRIVATEENT'
    ]  # 添加您的类别型变量
numeric_fields = [field for field in score_fields if field not in categorical_fields]

# 分别处理数值型和类别型变量
X = df[score_fields].copy()

# 数值型变量用均值填充
for field in numeric_fields:
    field_mean = df[field].mean()
    X[field] = X[field].fillna(field_mean)

# 类别型变量用众数填充
for field in categorical_fields:
    field_mode = df[field].mode()[0]  # 取众数的第一个值
    X[field] = X[field].fillna(field_mode)

# 分别处理数值型和类别型变量
X_numeric = X[numeric_fields].copy()
X_categorical = X[categorical_fields].copy()

# 对数值型变量进行标准化
scaler = StandardScaler()
X_scaled_numeric = pd.DataFrame(
    scaler.fit_transform(X_numeric),
    columns=numeric_fields,
    index=X.index
)

# 对类别型变量进行独热编码
# X_encoded_categorical = pd.get_dummies(X_categorical, columns=categorical_fields)

# 合并处理后的特征
# X_scaled = pd.concat([X_scaled_numeric, X_encoded_categorical], axis=1)
X_scaled = pd.concat([X_scaled_numeric, X_categorical], axis=1)

y = df['Y']

# 按日期拆分数据集
train_cutoff_date = pd.to_datetime('2024-02-29')
oot_cutoff_date = pd.to_datetime('2024-06-30')

# 分离训练验证集和OOT集
train_val_mask = df['APPLY_DT'] <= train_cutoff_date
oot_mask = (df['APPLY_DT'] > train_cutoff_date) & (df['APPLY_DT'] <= oot_cutoff_date)

X_train_val = X_scaled[train_val_mask]
y_train_val = y[train_val_mask]
X_oot = X_scaled[oot_mask]
y_oot = y[oot_mask]



# 将训练验证集按6:4比例随机分割
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(X_train_val, y_train_val, train_size=0.6, random_state=42)


# 打印样本统计信息
def print_sample_stats(y_data, dataset_name):
    total_samples = len(y_data)
    bad_samples = sum(y_data == 1)
    bad_rate = bad_samples / total_samples * 100 if total_samples > 0 else 0
    print(f"\n{dataset_name}统计信息:")
    print(f"样本总量: {total_samples:,}")
    print(f"坏客户数: {bad_samples:,}")
    print(f"坏客户占比: {bad_rate:.2f}%")

print("\n=== 样本统计信息 ===")
print_sample_stats(y_train, "训练集")
print_sample_stats(y_val, "验证集")
print_sample_stats(y_oot, "OOT集")

# 在样本统计信息之后添加以下代码
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.metrics import roc_auc_score

def calculate_vif(X):
    """计算VIF值"""
    vif_data = pd.DataFrame()
    vif_data["变量"] = X.columns
    vif_data["VIF"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
    return vif_data

def calculate_psi(expected, actual, bins=10):
    """计算PSI值"""
    def get_bins(data, bins):
        # 确保输入是一维数组
        if hasattr(data, 'values'):
            data = data.values
        if hasattr(data, 'ravel'):
            data = data.ravel()
        return pd.qcut(data, q=bins, duplicates='drop', labels=False)
    
    # 确保输入是一维数组
    if expected is None or actual is None:
        print("PSI计算失败: 输入数据为空")
        return None
        
    try:
        expected_dist = pd.Series(get_bins(expected, bins)).value_counts(normalize=True).sort_index()
        actual_dist = pd.Series(get_bins(actual, bins)).value_counts(normalize=True).sort_index()
        
        # 避免除以零或取对数零的情况
        epsilon = 1e-10
        expected_dist = expected_dist.clip(lower=epsilon)
        actual_dist = actual_dist.clip(lower=epsilon)
        
        psi = sum((actual_dist - expected_dist) * np.log(actual_dist / expected_dist))
        return psi
    except Exception as e:
        print(f"PSI计算失败: {str(e)}")
        return None

def prepare_data_for_stepwise(X):
    """准备数据用于逐步回归"""
    X_numeric = X.copy()
    
    # 确保所有列都是数值类型
    for col in X_numeric.columns:
        if X_numeric[col].dtype == 'object' or X_numeric[col].dtype == 'bool':
            try:
                # 尝试转换为数值类型
                X_numeric[col] = pd.to_numeric(X_numeric[col])
            except:
                # 如果无法转换，则删除该列
                print(f"删除非数值列: {col}")
                X_numeric = X_numeric.drop(columns=[col])
    
    # 删除包含空值的列
    null_cols = X_numeric.columns[X_numeric.isnull().any()].tolist()
    if null_cols:
        print(f"删除含空值的列: {null_cols}")
        X_numeric = X_numeric.dropna(axis=1)
    
    # 确保所有数据都是浮点数类型
    X_numeric = X_numeric.astype(float)
    
    return X_numeric

def stepwise_selection(X, y, initial_features=None, threshold_in=0.05, threshold_out=0.05):
    """逐步回归变量筛选"""
    # 确保X是数值类型
    X = X.astype(float)
    # 确保y是数值类型
    y = y.astype(float)
    
    included = list(initial_features) if initial_features is not None else []
    excluded = list(set(X.columns) - set(included))
    
    while True:
        changed = False
        
        # 前向选择
        excluded_pvalues = pd.Series(index=excluded)
        for new_column in excluded:
            try:
                model = sm.Logit(y, sm.add_constant(X[included + [new_column]])).fit(disp=0)
                excluded_pvalues[new_column] = model.pvalues[new_column]
            except:
                print(f"跳过变量 {new_column} - 模型拟合失败")
                excluded_pvalues[new_column] = 1
        
        best_pvalue = excluded_pvalues.min()
        
        if best_pvalue < threshold_in:
            best_feature = excluded_pvalues.idxmin()
            included.append(best_feature)
            excluded.remove(best_feature)
            changed = True
            print(f'加入变量: {best_feature} (p-value: {best_pvalue:.6f})')
            
        # 后向剔除
        if len(included) > 0:
            included_pvalues = pd.Series(index=included)
            try:
                model = sm.Logit(y, sm.add_constant(X[included])).fit(disp=0)
                included_pvalues = model.pvalues[included]
                worst_pvalue = included_pvalues.max()
                
                if worst_pvalue > threshold_out:
                    worst_feature = included_pvalues.idxmax()
                    included.remove(worst_feature)
                    excluded.append(worst_feature)
                    changed = True
                    print(f'移除变量: {worst_feature} (p-value: {worst_pvalue:.6f})')
            except:
                print("后向剔除过程中发生错误，跳过此轮")
        
        if not changed:
            break
            
    return included

# 只使用数值型变量
print("\n=== 准备数据用于逐步回归 ===")
X_train_prepared = prepare_data_for_stepwise(X_train[numeric_fields])
print(f"处理后的特征数量: {len(X_train_prepared.columns)}")
print(f"处理后的特征类型:\n{X_train_prepared.dtypes}")

# 执行逐步回归
selected_features = stepwise_selection(X_train_prepared, y_train)

# 使用筛选后的变量建立逻辑回归模型
X_train_selected = sm.add_constant(X_train_prepared[selected_features])
X_val_selected = sm.add_constant(X_val[selected_features])  # 注意这里也要用相同的特征
X_oot_selected = sm.add_constant(X_oot[selected_features])  # 注意这里也要用相同的特征

model = sm.Logit(y_train, X_train_selected).fit()

# 打印模型统计信息
print("\n=== 模型统计信息 ===")
print(model.summary())

# 计算VIF
print("\n=== VIF值 ===")
vif_data = calculate_vif(X_train_prepared[selected_features])
print(vif_data)

# 计算各样本集的KS和GINI
def calculate_metrics(X, y, dataset_name):
    """计算模型性能指标"""
    try:
        # 将X转换为numpy数组
        X_array = np.asarray(X)
        
        # 计算预测概率
        pred_proba = model.predict(X_array)
        
        # 确保pred_proba是numpy数组并且值在0-1之间
        pred_proba = np.clip(pred_proba, 0, 1)
        
        # 计算ROC曲线相关指标
        fpr, tpr, _ = roc_curve(y, pred_proba)
        ks = max(abs(tpr - fpr))
        auc_score = auc(fpr, tpr)
        gini = 2 * auc_score - 1
        
        print(f"\n{dataset_name}:")
        print(f"KS: {ks:.4f}")
        print(f"GINI: {gini:.4f}")
        
        return pred_proba
        
    except Exception as e:
        print(f"\n{dataset_name} 计算指标时发生错误:")
        print(f"错误类型: {type(e).__name__}")
        print(f"错误信息: {str(e)}")
        return None

print("\n=== 模型表现 ===")
train_pred = calculate_metrics(X_train_selected, y_train, "训练集")
val_pred = calculate_metrics(X_val_selected, y_val, "验证集")
oot_pred = calculate_metrics(X_oot_selected, y_oot, "OOT集")

# 计算PSI
print("\n=== PSI值 ===")
if train_pred is not None and val_pred is not None:
    val_psi = calculate_psi(train_pred, val_pred)
    if val_psi is not None:
        print(f"验证集 PSI: {val_psi:.4f}")
    else:
        print("验证集 PSI: 计算失败")

if train_pred is not None and oot_pred is not None:
    oot_psi = calculate_psi(train_pred, oot_pred)
    if oot_psi is not None:
        print(f"OOT集 PSI: {oot_psi:.4f}")
    else:
        print("OOT集 PSI: 计算失败")

# 计算SCORE_557在训练样本OOT样本上的KS
def calculate_557score_ks():
    # 使用与模型训练相同的样本索引
    train_data = df.loc[X_train.index, ['SCORE_557', 'Y']].copy()
    val_data = df.loc[X_val.index, ['SCORE_557', 'Y']].copy()
    oot_data = df.loc[X_oot.index, ['SCORE_557', 'Y']].copy()
    
    # 将SCORE_557中的缺失值用-999999填充
    train_data['SCORE_557'] = train_data['SCORE_557'].fillna(-999999)
    val_data['SCORE_557'] = val_data['SCORE_557'].fillna(-999999)
    oot_data['SCORE_557'] = oot_data['SCORE_557'].fillna(-999999)
    
    # 计算训练集KS
    fpr_train, tpr_train, _ = roc_curve(train_data['Y'], train_data['SCORE_557'])
    ks_train = max(abs(tpr_train - fpr_train))
    
    # 计算验证集KS
    fpr_val, tpr_val, _ = roc_curve(val_data['Y'], val_data['SCORE_557'])
    ks_val = max(abs(tpr_val - fpr_val))
    
    # 计算OOT集KS
    fpr_oot, tpr_oot, _ = roc_curve(oot_data['Y'], oot_data['SCORE_557'])
    ks_oot = max(abs(tpr_oot - fpr_oot))
    
    # 打印样本信息和KS值
    print("\n=== SCORE_557 KS分析 ===")
    print(f"训练样本数量: {len(train_data)}")
    print(f"训练集SCORE_557缺失值数量: {train_data['SCORE_557'].isna().sum()}")
    print(f"训练集KS值: {ks_train:.4f}")
    
    print(f"\n验证集样本数量: {len(val_data)}")
    print(f"验证集SCORE_557缺失值数量: {val_data['SCORE_557'].isna().sum()}")
    print(f"验证集KS值: {ks_val:.4f}")
    
    print(f"\nOOT样本数量: {len(oot_data)}")
    print(f"OOT集SCORE_557缺失值数量: {oot_data['SCORE_557'].isna().sum()}")
    print(f"OOT集KS值: {ks_oot:.4f}")
    
    # 绘制ROC曲线
    plt.figure(figsize=(10, 5))
    
    # 训练集ROC曲线
    plt.subplot(1, 2, 1)
    plt.plot(fpr_train, tpr_train)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.title(f'训练集 ROC曲线 (KS={ks_train:.4f})')
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.grid(True)
    
    # OOT集ROC曲线
    plt.subplot(1, 2, 2)
    plt.plot(fpr_oot, tpr_oot)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.title(f'OOT集 ROC曲线 (KS={ks_oot:.4f})')
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.grid(True)
    
    plt.tight_layout()
    plt.show()

# 运行分析
calculate_557score_ks()

#列出SCORE_557缺失的100个样本
print(df[df['SCORE_557'].isna()].head(100))
