import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score, roc_curve
import datetime
import pickle
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
from tensorflow.keras.callbacks import EarlyStopping
from DataProcess import process_final_data, union_with_sample, split_and_merge_data,evaluate_score_var_rst

# 1. 数据预处理和划分
def split_by_time_and_bad_ratio(data, date_col='APPLY_DATE', bad_flag_col='flag_all', bad_ratio=0.75):
    """
    按时间和坏客户比例拆分数据集
    """
    # 转换日期格式
    data[date_col] = pd.to_datetime(data[date_col], format='%d%b%Y')
    
    # 获取坏客户数据
    bad_cases = data[data[bad_flag_col] == 1]
    bad_count = len(bad_cases)
    
    # 计算A集需要的坏客户数量
    bad_count_a = int(bad_count * bad_ratio)
    
    # 按时间排序
    sorted_bad_cases = bad_cases.sort_values(by=date_col)
    
    # 获取分割时间点
    split_date = sorted_bad_cases.iloc[bad_count_a][date_col]
    
    # 分割数据集
    data_a = data[data[date_col] <= split_date]
    data_b = data[data[date_col] > split_date]
    
    return data_a, data_b

def preprocess_data(data):
    """
    数据预处理
    """
    # 首先只保留flag_all为0或1的记录
    data = data[data['flag_all'].isin([0, 1])]
    print(f"清理异常标签后的样本量: {len(data)}")
    
    # 首先删除无用列
    cols_to_drop = ['app_num', 'host_cust_id', 'APPLY_STS', 
                    'CREDIT_TYPE_TWO', 'ACCT_NUM', 'IC_RGST_NUM', 
                    'SOCI_UNIFIED_CRDT_CD']
    data = data.drop(columns=cols_to_drop)
    
    # 转换APPLY_DATE为datetime类型
    data['APPLY_DATE'] = pd.to_datetime(data['APPLY_DATE'], format='%d%b%Y')
    
    # 获取数值列
    numerical_cols = data.select_dtypes(include=[np.number]).columns
    
    # 对非数值列进行处理
    for col in data.columns:
        if col not in numerical_cols and col != 'APPLY_DATE':
            # 将非数值列转换为数值类型
            data[col] = pd.to_numeric(data[col], errors='coerce')
    
    # 更新数值列列表
    numerical_cols = data.select_dtypes(include=[np.number]).columns
    
    # 处理无穷大和无穷小值
    data = data.replace([np.inf, -np.inf], np.nan)
    
    # 对数值列进行处理
    for col in numerical_cols:
        if col != 'flag_all':
            # 处理缺失值
            data[col] = data[col].fillna(data[col].median())
            
            # 处理异常值
            q1 = data[col].quantile(0.25)
            q3 = data[col].quantile(0.75)
            iqr = q3 - q1
            upper_limit = q3 + 1.5 * iqr
            lower_limit = q1 - 1.5 * iqr
            data[col] = data[col].clip(lower_limit, upper_limit)
            
            # 标准化
            if data[col].std() != 0:
                data[col] = (data[col] - data[col].mean()) / (data[col].std() + 1e-8)
            else:
                data[col] = 0  # 如果标准差为0，则将所有值设为0
    
    return data

def create_model(input_dim):
    """
    简化模型结构，增强正则化
    """
    tf.random.set_seed(42)  # 添加随机种子
    
    model = Sequential([
        Dense(32, input_dim=input_dim, activation='relu', 
              kernel_initializer='he_normal',
              kernel_regularizer=l2(0.1)),  # 增加L2正则化强度
        BatchNormalization(),
        Dropout(0.2),
        
        Dense(16, activation='relu',
              kernel_initializer='he_normal',
              kernel_regularizer=l2(0.1)),
        BatchNormalization(),
        
        Dense(1, activation='sigmoid')
    ])
    
    optimizer = Adam(learning_rate=0.001)
    model.compile(optimizer=optimizer,
                 loss='binary_crossentropy',
                 metrics=['AUC'])
    return model

def calculate_bad_ratio(y):
    """
    计算坏样本比例
    """
    return (y == 1).sum() / len(y) * 100

# 主程序
def main():
     # 从数据处理.py获取训练集和验证集
    final_df = process_final_data()
    merged_df = union_with_sample(final_df)
    train_df, val_df = split_and_merge_data(final_df, merged_df)
        
    # 评估建信金科评分在验证集上的表现
    intersection_df, ks, auc = evaluate_score_var_rst(val_df)
     # 检查是否还有缺失值
    if train_df.isnull().any().any():
        print("警告：数据中仍存在缺失值，将使用-999999填充")
        train_df = train_df.fillna(-999999)

    if val_df.isnull().any().any():
        print("警告：数据中仍存在缺失值，将使用-999999填充")
        val_df = val_df.fillna(-999999)

    # 删除不需要的列
    cols_to_drop = ['app_num', 'APPLY_NO', 'APP_DT', 'modPro', 'APPLY_STS',
                    'host_cust_id', 'ACCT_NUM', 'CREDIT_TYPE_TWO',
                    'GUAR_MODE_CD', 'DISTR_DT', 'ODUE_AMT', 'LATEST_OWE_DAYS',
                    'LONGEST_OVDUE_DAYS', 'ACCUM_OVDUE_CNT', 'jx_score', 'score_new',
                    'SCORE_ANTI_FRAUD', 'Unnamed: 13']
    train_df = train_df.drop(columns=cols_to_drop, errors='ignore')
    val_df = val_df.drop(columns=cols_to_drop, errors='ignore')
    
    # 从训练集中划分训练集和验证集
    X_train = train_df.drop('flag_all', axis=1)
    y_train = train_df['flag_all']
    X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.4, random_state=42)
    
    # 准备测试集(原验证集)
    X_time_test = val_df.drop('flag_all', axis=1)
    y_time_test = val_df['flag_all']
    
    print("\n=== 数据集统计信息 ===")
    print(f"训练集大小: {len(X_train)}, 坏样本数: {int(y_train.sum())}, 坏样本比例: {calculate_bad_ratio(y_train):.2f}%")
    print(f"验证集大小: {len(X_val)}, 坏样本数: {int(y_val.sum())}, 坏样本比例: {calculate_bad_ratio(y_val):.2f}%")
    print(f"测试集大小: {len(X_time_test)}, 坏样本数: {int(y_time_test.sum())}, 坏样本比例: {calculate_bad_ratio(y_time_test):.2f}%")
    print("=====================\n")
    
  
    # 在创建模型之前添加数据类型转换
    X_train = X_train.astype('float32')
    X_val = X_val.astype('float32')
    X_time_test = X_time_test.astype('float32')
    y_train = y_train.astype('float32')
    y_val = y_val.astype('float32')
    y_time_test = y_time_test.astype('float32')
    
    # 创建和训练模型
    model = create_model(X_train.shape[1])
    
    early_stopping = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)
    
    history = model.fit(X_train, y_train,
                       validation_data=(X_val, y_val),  # 使用验证集
                       epochs=100,
                       batch_size=32,
                       callbacks=[early_stopping],
                       verbose=1)
    
    # 预测和评估
    train_pred = model.predict(X_train)
    val_pred = model.predict(X_val)
    test_pred = model.predict(X_time_test)  # 使用时间测试集
    
    # 计算AUC和KS
    train_fpr, train_tpr, _ = roc_curve(y_train, train_pred)
    val_fpr, val_tpr, _ = roc_curve(y_val, val_pred)
    test_fpr, test_tpr, _ = roc_curve(y_time_test, test_pred)
    
    train_ks = max(abs(train_tpr - train_fpr))
    val_ks = max(abs(val_tpr - val_fpr))
    test_ks = max(abs(test_tpr - test_fpr))
    
    train_auc = roc_auc_score(y_train, train_pred)
    val_auc = roc_auc_score(y_val, val_pred)
    test_auc = roc_auc_score(y_time_test, test_pred)
    
    # 打印评估指标
    print(f'训练集 AUC: {train_auc:.4f}, KS: {train_ks:.4f}')
    print(f'验证集 AUC: {val_auc:.4f}, KS: {val_ks:.4f}')
    print(f'测试集 AUC: {test_auc:.4f}, KS: {test_ks:.4f}')

if __name__ == "__main__":
    main()