import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split, KFold
from sklearn.metrics import roc_auc_score, roc_curve, precision_score, recall_score, f1_score
import datetime
import pickle
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
from tensorflow.keras.callbacks import EarlyStopping
from itertools import product
import matplotlib.pyplot as plt

# 1. 数据预处理和划分
def split_by_time_and_bad_ratio(data, date_col='APPLY_DATE', bad_flag_col='flag_all', bad_ratio=0.75):
    """
    按时间和坏客户比例拆分数据集
    """
    # 转换日期格式
    data[date_col] = pd.to_datetime(data[date_col], format='%d%b%Y')
    
    # 获取坏客户数据
    bad_cases = data[data[bad_flag_col] == 1]
    bad_count = len(bad_cases)
    
    # 计算A集需要的坏客户数量
    bad_count_a = int(bad_count * bad_ratio)
    
    # 按时间排序
    sorted_bad_cases = bad_cases.sort_values(by=date_col)
    
    # 获取分割时间点
    split_date = sorted_bad_cases.iloc[bad_count_a][date_col]
    
    # 分割数据集
    data_a = data[data[date_col] <= split_date]
    data_b = data[data[date_col] > split_date]
    
    return data_a, data_b

def preprocess_data(data):
    """
    数据预处理
    """
    # 首先只保留flag_all为0或1的记录
    data = data[data['flag_all'].isin([0, 1])]
    print(f"清理异常标签后的样本量: {len(data)}")
    
    # 删除无用列
    cols_to_drop = ['app_num', 'host_cust_id', 'APPLY_STS', 
                    'CREDIT_TYPE_TWO', 'ACCT_NUM', 'IC_RGST_NUM', 
                    'SOCI_UNIFIED_CRDT_CD']
    data = data.drop(columns=cols_to_drop)
    
    # 转换APPLY_DATE为datetime类型
    data['APPLY_DATE'] = pd.to_datetime(data['APPLY_DATE'], format='%d%b%Y')
    
    # 获取数值列
    numerical_cols = data.select_dtypes(include=[np.number]).columns
    
    # 对非数值列进行处理
    for col in data.columns:
        if col not in numerical_cols and col != 'APPLY_DATE':
            # 将非数值列转换为数值类型
            data[col] = pd.to_numeric(data[col], errors='coerce')
    
    # 更新数值列列表
    numerical_cols = data.select_dtypes(include=[np.number]).columns
    
    # 处理无穷大和无穷小值
    data = data.replace([np.inf, -np.inf], np.nan)
    
    # 对数值列进行处理
    for col in numerical_cols:
        if col != 'flag_all':
            # 先填充缺失值，使用中位数
            median_val = data[col].median()
            data[col] = data[col].fillna(median_val)
            
            # 处理异常值
            q1 = data[col].quantile(0.25)
            q3 = data[col].quantile(0.75)
            iqr = q3 - q1
            if iqr != 0:
                upper_limit = q3 + 1.5 * iqr
                lower_limit = q1 - 1.5 * iqr
                data[col] = data[col].clip(lower_limit, upper_limit)
            
            # 标准化
            mean_val = data[col].mean()
            std_val = data[col].std()
            if std_val != 0:
                data[col] = (data[col] - mean_val) / std_val
            else:
                data[col] = 0  # 如果标准差为0，则将所有值设为0
    
    # 确保没有NaN值
    data = data.fillna(0)
    
    return data

def calculate_ks(y_true, y_pred):
    """计算KS值"""
    fpr, tpr, thresholds = roc_curve(y_true, y_pred)
    ks = max(abs(tpr - fpr))
    return ks

def calculate_psi(expected, actual, bins=10):
    """计算PSI值"""
    def get_bins(data, bins):
        quantiles = np.linspace(0, 1, bins+1)
        bins = np.quantile(data, quantiles)
        bins[0] = float('-inf')
        bins[-1] = float('inf')
        return bins
    
    breaks = get_bins(expected, bins)
    expected_percents = np.histogram(expected, breaks)[0] / len(expected)
    actual_percents = np.histogram(actual, breaks)[0] / len(actual)
    
    # 避免除以0
    expected_percents = np.where(expected_percents == 0, 0.0001, expected_percents)
    actual_percents = np.where(actual_percents == 0, 0.0001, actual_percents)
    
    psi = sum((actual_percents - expected_percents) * np.log(actual_percents / expected_percents))
    return psi

def grid_search_parameters():
    """定义网格搜索参数，专注于网络结构和正则化参数"""
    param_grid = {
        'batch_size': [32, 64, 128],  # 批量大小影响优化效果
        'network_structure': [
            # (隐层神经元数, dropout率)
            [(128, 0.3), (64, 0.2), (32, 0.1)],  # 较大网络
            [(64, 0.2), (32, 0.1)],              # 中等网络
            [(32, 0.1)]                          # 小网络
        ],
        'l2_lambda': [0.01, 0.001, 0.0001]      # L2正则化强度
    }
    return param_grid

def create_model_with_params(input_dim, params):
    """根据参数创建模型"""
    model = Sequential()
    
    # 添加输入层和隐藏层
    first_layer = True
    for units, dropout_rate in params['network_structure']:
        if first_layer:
            model.add(Dense(units, 
                          input_dim=input_dim,
                          activation='relu',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(params['l2_lambda'])))
            first_layer = False
        else:
            model.add(Dense(units,
                          activation='relu',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(params['l2_lambda'])))
        
        model.add(BatchNormalization())
        model.add(Dropout(dropout_rate))
    
    # 添加输出层
    model.add(Dense(1, activation='sigmoid'))
    
    # 使用固定学习率的Adam优化器
    optimizer = Adam(learning_rate=0.001)
    model.compile(optimizer=optimizer,
                 loss='binary_crossentropy',
                 metrics=['AUC'])
    return model

def evaluate_model(y_true, y_pred_proba, threshold=0.5):
    """评估模型性能"""
    y_pred = (y_pred_proba >= threshold).astype(int)
    
    # 计算各项指标
    precision = precision_score(y_true, y_pred)
    recall = recall_score(y_true, y_pred)
    f1 = f1_score(y_true, y_pred)
    ks = calculate_ks(y_true, y_pred_proba)
    auc = roc_auc_score(y_true, y_pred_proba)
    
    return {
        'precision': precision,
        'recall': recall,
        'f1_score': f1,
        'ks': ks,
        'auc': auc
    }

# 主程序
def main():
    # 读取数据
    data = pd.read_csv('/Users/zhd/developer/DNNFraud/SAMPLE_S4_MODEL.csv', low_memory=False)
    
    # 数据预处理
    data = preprocess_data(data)
    
    # 检查是否存在NaN值
    if data.isnull().any().any():
        print("警告：数据中仍存在NaN值")
        print(data.isnull().sum()[data.isnull().sum() > 0])
    
    # 1. 首先按时间顺序分割出跨时间验证集（数据集B）
    data_a, data_b = split_by_time_and_bad_ratio(data)
    
    # 删除APPLY_DATE字段
    data_a = data_a.drop('APPLY_DATE', axis=1)
    data_b = data_b.drop('APPLY_DATE', axis=1)
    
    # 2. 从数据集A中划分训练集和验证集
    X = data_a.drop('flag_all', axis=1)
    y = data_a['flag_all']
    
    # 划分训练集和验证集
    X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
    
    # 准备跨时间验证集
    X_time_test = data_b.drop('flag_all', axis=1)
    y_time_test = data_b['flag_all']
    
    # 打印各数据集大小和坏样本比例
    print("\n=== 数据集统计信息 ===")
    print(f"训练集大小: {len(X_train)}, 坏样本比例: {calculate_bad_ratio(y_train):.2f}%")
    print(f"验证集大小: {len(X_val)}, 坏样本比例: {calculate_bad_ratio(y_val):.2f}%")
    print(f"跨时间验证集大小: {len(X_time_test)}, 坏样本比例: {calculate_bad_ratio(y_time_test):.2f}%")
    print("=====================\n")
    
    # 检查是否还有缺失值
    if X.isnull().any().any():
        print("警告：数据中仍存在缺失值，将使用0填充")
        X = X.fillna(0)
    
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
    
    # 定义K折交叉验证
    n_splits = 5
    kf = KFold(n_splits=n_splits, shuffle=True, random_state=42)
    
    # 网格搜索
    param_grid = grid_search_parameters()
    best_score = 0
    best_params = None
    
    # 存储所有验证结果
    results = []
    
    for params in (dict(zip(param_grid.keys(), v)) for v in product(*param_grid.values())):
        fold_scores = []
        
        for fold, (train_idx, val_idx) in enumerate(kf.split(X_train)):
            X_fold_train = X_train.iloc[train_idx]
            y_fold_train = y_train.iloc[train_idx]
            X_fold_val = X_train.iloc[val_idx]
            y_fold_val = y_train.iloc[val_idx]
            
            model = create_model_with_params(X_train.shape[1], params)
            
            early_stopping = EarlyStopping(
                monitor='val_auc',
                mode='max',
                patience=10,
                restore_best_weights=True
            )
            
            history = model.fit(
                X_fold_train, y_fold_train,
                epochs=100,
                batch_size=params['batch_size'],
                validation_data=(X_fold_val, y_fold_val),
                callbacks=[early_stopping],
                verbose=0
            )
            
            # 评估当前折的结果
            val_pred = model.predict(X_fold_val)
            metrics = evaluate_model(y_fold_val, val_pred)
            fold_scores.append(metrics['f1_score'])
        
        avg_score = np.mean(fold_scores)
        results.append({
            'params': params,
            'score': avg_score
        })
        
        if avg_score > best_score:
            best_score = avg_score
            best_params = params
    
    print("\n=== 最佳参数 ===")
    for key, value in best_params.items():
        print(f"{key}: {value}")
    
    # 使用最佳参数训练最终模型
    final_model = create_model_with_params(X_train.shape[1], best_params)
    
    early_stopping = EarlyStopping(
        monitor='val_auc',
        mode='max',
        patience=10,
        restore_best_weights=True
    )
    
    history = final_model.fit(
        X_train, y_train,
        epochs=100,
        batch_size=best_params['batch_size'],
        validation_data=(X_val, y_val),
        callbacks=[early_stopping],
        verbose=1
    )
    
    # 在跨时间验证集上评估
    time_test_pred = final_model.predict(X_time_test)
    
    # 计算PSI
    psi = calculate_psi(final_model.predict(X_train), time_test_pred)
    
    # 评估并打印所有指标
    print("\n=== 模型评估指标 ===")
    metrics = evaluate_model(y_time_test, time_test_pred)
    print(f"查准率 (Precision): {metrics['precision']:.4f}")
    print(f"查全率 (Recall): {metrics['recall']:.4f}")
    print(f"F1 Score: {metrics['f1_score']:.4f}")
    print(f"KS: {metrics['ks']:.4f}")
    print(f"AUC: {metrics['auc']:.4f}")
    print(f"PSI: {psi:.4f}")
    print("=====================\n")
    
    # 绘制训练过程中的AUC变化
    plt.figure(figsize=(10, 6))
    plt.plot(history.history['auc'], label='训练集AUC')
    plt.plot(history.history['val_auc'], label='验证集AUC')
    plt.title('模型训练过程中的AUC变化')
    plt.xlabel('Epoch')
    plt.ylabel('AUC')
    plt.legend()
    plt.savefig('training_auc.png')
    plt.close()

if __name__ == "__main__":
    main()