import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import platform
import seaborn as sns

# GPU配置
import tensorflow as tf
print("TensorFlow版本:", tf.__version__)

# 配置GPU内存增长
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
    try:
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
        print(f"GPU可用: {len(gpus)} 个")
        print("GPU设备:", gpus)
    except RuntimeError as e:
        print("GPU配置错误:", e)
else:
    print("GPU不可用，将使用CPU")
    # 强制使用CPU
    tf.config.set_visible_devices([], 'GPU')

# 设置中文字体 - 针对CentOS 7.6优化
def setup_chinese_font():
    """设置中文字体，兼容CentOS 7.6"""
    system = platform.system().lower()

    if system == 'linux':
        # CentOS/Linux系统字体设置
        font_candidates = [
            'WenQuanYi Micro Hei',  # 文泉驿微米黑
            'WenQuanYi Zen Hei',    # 文泉驿正黑
            'Noto Sans CJK SC',     # Google Noto字体
            'DejaVu Sans',          # 备用字体
            'Liberation Sans',      # 另一个备用字体
            'Arial Unicode MS',     # 如果可用
            'SimHei'                # 最后尝试
        ]
    else:
        # Windows系统字体设置
        font_candidates = [
            'SimHei',
            'Microsoft YaHei',
            'DejaVu Sans'
        ]

    # 获取系统可用字体
    available_fonts = [f.name for f in matplotlib.font_manager.fontManager.ttflist]

    # 找到第一个可用的中文字体
    selected_font = None
    for font in font_candidates:
        if font in available_fonts:
            selected_font = font
            break

    if selected_font:
        print(f"使用字体: {selected_font}")
        matplotlib.rcParams['font.sans-serif'] = [selected_font] + font_candidates
    else:
        print("警告: 未找到合适的中文字体，将使用默认字体")
        print("可用字体:", available_fonts[:10])  # 显示前10个可用字体
        matplotlib.rcParams['font.sans-serif'] = font_candidates

    matplotlib.rcParams['axes.unicode_minus'] = False

    # 清除matplotlib字体缓存
    try:
        # 尝试新版本的字体缓存重建方法
        matplotlib.font_manager.fontManager.__init__()
        print("字体缓存重建成功")
    except Exception as e:
        print(f"字体缓存重建失败: {e}")
        try:
            # 备用方法：删除字体缓存文件
            import os
            cache_dir = matplotlib.get_cachedir()
            font_cache_file = os.path.join(cache_dir, 'fontlist-v330.json')
            if os.path.exists(font_cache_file):
                os.remove(font_cache_file)
                print("已删除字体缓存文件")
        except Exception as e2:
            print(f"删除字体缓存文件失败: {e2}")

# 调用字体设置函数
setup_chinese_font()
name_path = 'card_clients'
os.makedirs('output/'+name_path, exist_ok=True)  # 创建result文件夹
# 模型评估
from sklearn.metrics import *
def model_evaluation(y_true,y_pred,y_prod,name):
    def calculate_TP(y, y_pred):
        tp = 0
        for i, j in zip(y, y_pred):
            if i == j == 1:
                tp += 1
        return tp
    def calculate_TN(y, y_pred):
        tn = 0
        for i, j in zip(y, y_pred):
            if i == j == 0:
                tn += 1
        return tn
    def calculate_FP(y, y_pred):
        fp = 0
        for i, j in zip(y, y_pred):
            if i == 0 and j == 1:
                fp += 1
        return fp
    def calculate_FN(y, y_pred):
        fn = 0
        for i, j in zip(y, y_pred):
            if i == 1 and j == 0:
                fn += 1
        return fn
    # TNR = TN / (FP + TN) TNR即为特异度（specificity）
    def TNR(y, y_pred):
        tn = calculate_TN(y, y_pred)
        fp = calculate_FP(y, y_pred)
        return tn / (tn + fp)
    # TPR =TP/ (TP+ FN)  TPR即为敏感度（sensitivity）
    def TPR(y, y_pred):
        tp = calculate_TP(y, y_pred)
        fn = calculate_FN(y, y_pred)
        return tp / (tp + fn)
    def PPV(y, y_pred):
        tp = calculate_TP(y, y_pred)
        fp = calculate_FP(y, y_pred)
        return tp / (tp + fp)
    def NPV(y, y_pred):
        tn = calculate_TN(y, y_pred)
        fn = calculate_FN(y, y_pred)
        return tn / (tn + fn)

    def KS(y, y_prod):
        fpr, tpr, thresholds = roc_curve(y, y_prod[:, 1])
        return np.max(tpr - fpr)
    result = pd.DataFrame({'准确率': [round(accuracy_score(y_true, y_pred), 3)],
                                '精确率': [round(precision_score(y_true, y_pred), 3)],
                                '召回率': [round(recall_score(y_true, y_pred), 3)],
                                'F1值': [round(f1_score(y_true, y_pred), 3)],
                                'AUC值': [round(roc_auc_score(y_true, y_prod[:, 1], multi_class='ovo'), 3)],
                                '特异度': [round(TNR(y_true, y_pred), 3)],
                                '灵敏度': [round(TPR(y_true, y_pred), 3)],
                                'PPV': [round(PPV(y_true, y_pred), 3)],
                                'NPV': [round(NPV(y_true, y_pred), 3)],
                                'kappa': [round(cohen_kappa_score(y_true, y_pred), 3)],
                                'KS': [round(KS(y_true, y_prod), 3)]},
                               index=[name])
    return result
#############中国台湾信用卡客户端违约数据集
df = pd.read_excel('data/card_clients/default of credit card clients.xls',header=1,index_col=0)

# 数据探索分析 - 改进版本
print("=== 数据基本信息 ===")
print(f"数据形状: {df.shape}")
print(f"目标变量分布:\n{df[df.columns[-1]].value_counts()}")
print(f"违约率: {df[df.columns[-1]].mean():.3f}")

# 1. 数据不平衡分析
temp = df[df.columns[-1]].value_counts()
df_balance = pd.DataFrame({'违约情况': temp.index, '数量': temp.values})
plt.figure(figsize=(6, 6))
plt.title('信用卡违约数据分布\n (0=不违约, 1=违约)')
sns.set_color_codes("pastel")
sns.barplot(x='违约情况', y="数量", data=df_balance)
plt.savefig('output/'+name_path+'/data_balance.png')
plt.close()

# 2. 信用额度分布分析
plt.figure(figsize=(14, 6))
plt.title('信用额度分布 - 密度图')
sns.distplot(df['LIMIT_BAL'], kde=True, bins=200, color="blue")
plt.savefig('output/'+name_path+'/limit_bal_distribution.png')
plt.close()

# 3. 按违约情况分组的信用额度分布
class_0 = df.loc[df[df.columns[-1]] == 0]["LIMIT_BAL"]
class_1 = df.loc[df[df.columns[-1]] == 1]["LIMIT_BAL"]
plt.figure(figsize=(14, 6))
plt.title('信用额度分布 - 按违约情况分组')
sns.distplot(class_1, kde=True, bins=200, color="red", label="违约")
sns.distplot(class_0, kde=True, bins=200, color="green", label="不违约")
plt.legend()
plt.savefig('output/'+name_path+'/limit_bal_by_default.png')
plt.close()

# 4. 性别与信用额度的关系
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 6))
sns.boxplot(ax=ax1, x="SEX", y="LIMIT_BAL", hue="SEX", data=df, palette="PRGn", showfliers=True)
sns.boxplot(ax=ax2, x="SEX", y="LIMIT_BAL", hue="SEX", data=df, palette="PRGn", showfliers=False)
ax1.set_title('信用额度 vs 性别 (含异常值)')
ax2.set_title('信用额度 vs 性别 (不含异常值)')
plt.tight_layout()
plt.savefig('output/'+name_path+'/sex_limit_balance.png')
plt.close()

# 5. 特征相关性分析 - 改进版本
plt.figure(figsize=(12, 8))
correlation_matrix = df.corr()
sns.heatmap(correlation_matrix, annot=False, cmap='coolwarm', center=0)
plt.title('特征相关性热力图')
plt.tight_layout()
plt.savefig('output/'+name_path+'/correlation_heatmap.png')
plt.close()

# 6. 账单金额相关性分析
bill_vars = ['BILL_AMT1','BILL_AMT2','BILL_AMT3','BILL_AMT4','BILL_AMT5','BILL_AMT6']
plt.figure(figsize=(8, 8))
plt.title('账单金额相关性 (4-9月)')
corr = df[bill_vars].corr()
sns.heatmap(corr, xticklabels=corr.columns, yticklabels=corr.columns, 
            linewidths=.1, vmin=-1, vmax=1, annot=True, fmt='.2f')
plt.tight_layout()
plt.savefig('output/'+name_path+'/bill_amount_correlation.png')
plt.close()

# 7. 还款状态相关性分析
pay_vars = ['PAY_0','PAY_2', 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6']
plt.figure(figsize=(8, 8))
plt.title('还款状态相关性 (4-9月)')
corr = df[pay_vars].corr()
sns.heatmap(corr, xticklabels=corr.columns, yticklabels=corr.columns, 
            linewidths=.1, vmin=-1, vmax=1, annot=True, fmt='.2f')
plt.tight_layout()
plt.savefig('output/'+name_path+'/pay_status_correlation.png')
plt.close()

# 8. 年龄、性别与信用额度的关系
def boxplot_variation(feature1, feature2, feature3, width=16, title=""):
    fig, ax1 = plt.subplots(ncols=1, figsize=(width, 6))
    sns.boxplot(ax=ax1, x=feature1, y=feature2, hue=feature3,
                data=df, palette="PRGn", showfliers=False)
    ax1.set_xticklabels(ax1.get_xticklabels(), rotation=90)
    ax1.set_title(title)
    plt.tight_layout()
    return fig

# 年龄与信用额度（按性别分组）
fig = boxplot_variation('AGE', 'LIMIT_BAL', 'SEX', 16, '年龄与信用额度（按性别分组）')
plt.savefig('output/'+name_path+'/age_limit_sex.png')
plt.close()

# 婚姻状况与年龄（按性别分组）
fig = boxplot_variation('MARRIAGE', 'AGE', 'SEX', 8, '婚姻状况与年龄（按性别分组）')
plt.savefig('output/'+name_path+'/marriage_age_sex.png')
plt.close()

# 9. 目标变量与重要特征的关系分析
important_features = ['LIMIT_BAL', 'AGE', 'PAY_0', 'BILL_AMT1', 'BILL_AMT2']
fig, axes = plt.subplots(2, 3, figsize=(15, 10))
axes = axes.ravel()

for i, feature in enumerate(important_features):
    if i < len(axes):
        # 按目标变量分组绘制箱线图
        df.boxplot(column=feature, by=df.columns[-1], ax=axes[i])
        axes[i].set_title(f'{feature} vs 违约情况')
        axes[i].set_xlabel('违约 (0=否, 1=是)')
        axes[i].set_ylabel(feature)

plt.tight_layout()
plt.savefig('output/'+name_path+'/feature_target_analysis.png')
plt.close()
# IQR异常值检测
for col in ['LIMIT_BAL','BILL_AMT1', 'BILL_AMT2',
       'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6', 'PAY_AMT1',
       'PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6']:
    # 计算四分位数
    Q1 = df[col].quantile(0.25)
    Q3 = df[col].quantile(0.75)
    IQR = Q3 - Q1
    # 确定异常值的范围
    lower_bound = Q1 - 1.5 * IQR
    upper_bound = Q3 + 1.5 * IQR
    # 标记或替换异常值为NaN
    df.loc[(df[col] < lower_bound) | (df[col] > upper_bound), col] = np.nan
# 简单插补 - 用中位数填充
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy='median')
data = imputer.fit_transform(df)
data = pd.DataFrame(data, columns=df.columns)
# 欠采样
from imblearn.over_sampling import SMOTE
ros = SMOTE(random_state=42)
X_resampled, y_resampled = ros.fit_resample(data[data.columns[:-1]], data[data.columns[-1]])
# 合并
data = pd.concat([X_resampled, y_resampled],axis=1)
# 保存处理后的数据
data.to_csv('./output/'+name_path+'/data_new1.csv')
print("数据预处理完成，保存到 data_new1.csv")
print(f"数据形状: {data.shape}")
print(f"目标变量分布:\n{data[data.columns[-1]].value_counts()}")
print("开始特征选择...")
#######################
# ------------------------XGB特征选择-------------------------
from sklearn.feature_selection import *
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split

# 首先划分训练集和测试集，避免数据泄露
X_temp = data[data.columns[:-1]]
y_temp = data[data.columns[-1]]
X_train_temp, X_test_temp, y_train_temp, y_test_temp = train_test_split(
    X_temp, y_temp, test_size=0.2, random_state=123, stratify=y_temp
)

# 使用LabelEncoder处理分类特征，减少内存使用
from sklearn.preprocessing import LabelEncoder
cat_features = ['SEX', 'EDUCATION', 'MARRIAGE', 'PAY_0', 'PAY_2', 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6']

# 复制数据避免修改原始数据
X_train_encoded_df = X_train_temp.copy()
X_test_encoded_df = X_test_temp.copy()

# 对分类特征进行标签编码
label_encoders = {}
for feature in cat_features:
    le = LabelEncoder()
    # 合并训练集和测试集的所有唯一值来fit编码器
    all_values = pd.concat([X_train_temp[feature], X_test_temp[feature]]).astype(str)
    le.fit(all_values)
    X_train_encoded_df[feature] = le.transform(X_train_temp[feature].astype(str))
    X_test_encoded_df[feature] = le.transform(X_test_temp[feature].astype(str))
    label_encoders[feature] = le

print(f"编码后特征数量: {X_train_encoded_df.shape[1]}")
print(f"原始特征数量: {X_train_temp.shape[1]}")

# 使用更轻量级的特征选择方法
from sklearn.feature_selection import SelectKBest, f_classif

# 使用SelectKBest进行特征选择，选择所有特征（因为只有23个）
selector = SelectKBest(score_func=f_classif, k='all')
X_train_selected = selector.fit_transform(X_train_encoded_df, y_train_temp)
X_test_selected = selector.transform(X_test_encoded_df)

# 获取选中的特征名称
selected_features = X_train_encoded_df.columns[selector.get_support()].tolist()
print(f"选择了 {len(selected_features)} 个特征")
print("Selected Features:", selected_features[:10], "...")  # 只显示前10个

# XGB特征选择
model = XGBClassifier()
model.fit(X_train_selected, y_train_temp)
importance_df = pd.DataFrame({'Feature': selected_features, 'Importance': model.feature_importances_})
importance_df = importance_df.sort_values(by='Importance', ascending=True)
importance_df.sort_values(by='Importance', ascending=False).to_csv('output/'+name_path+'/XGB_Feature_Importance.csv',index=False)
plt.style.use('ggplot')
plt.figure(figsize=(10, 8))
plt.barh(importance_df['Feature'], importance_df['Importance'])
plt.xlabel('特征重要性')# Feature Index
plt.ylabel('特征变量')# Feature Importance
plt.tight_layout()  # 自适应布局，避免显示不
plt.savefig('output/'+name_path+'/XGB_Feature_Importance.png')
plt.close()

#----------------------------MI特征选择-------------------------
from sklearn.feature_selection import mutual_info_classif
# 使用选择后的数据计算信息增益
info_gains = mutual_info_classif(X_train_selected, y_train_temp)
# 选择特征，信息增益阈值为 0.01
mi_selected_features = [selected_features[i] for i in range(len(selected_features)) if info_gains[i] > 0.01]
print(f"MI选择了 {len(mi_selected_features)} 个特征")
IV_result = pd.DataFrame([mi_selected_features, [info_gains[i] for i in range(len(selected_features)) if info_gains[i] > 0.01]],
                        index=['变量名称','MI值']).T
IV_result = IV_result.sort_values(by='MI值', ascending=False)
IV_result.to_csv('output/'+name_path+'/MI_result.csv',index=False)

#----------------------------PCA-------------------------
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
# 使用选择后的数据进行标准化和PCA
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train_selected)
X_test_scaled = scaler.transform(X_test_selected)
# PCA构建
pca = PCA(n_components=0.95)
X_train_pca = pca.fit_transform(X_train_scaled)
X_test_pca = pca.transform(X_test_scaled)
# 计算方差贡献率
var_ratio = pca.explained_variance_ratio_
print('方差贡献率:', var_ratio)
# 计算累计方差贡献率
cumulative_var_ratio = np.cumsum(pca.explained_variance_ratio_)
print('累计方差贡献率:', cumulative_var_ratio)
PCA_result = pd.DataFrame([['第'+str(i)+'主成分' for i in range(len(var_ratio))],list(var_ratio),list(cumulative_var_ratio)],index=['主成分','方差贡献率','累计方差贡献率']).T#
PCA_result.to_csv('output/'+name_path+'/PCA_result.csv',index=False)

# Transformer模型
from keras.models import *
from keras.layers import *
from keras import optimizers
# loss损失值
def loss(history,name):
    # Loss
    plt.figure()
    plt.plot(history.history['loss'], label='train-loss')
    plt.plot(history.history['val_loss'], label='test-loss')
    plt.legend()
    plt.title('Loss')
    plt.savefig(os.getcwd() + '/output/'+name_path+'/'+name+'_Loss.png')
    # plt.show()  # 注释掉以避免交互式显示
    plt.close()
# acc准确率
def acc(history,name):
    # Accuracy
    plt.figure()
    plt.plot(history.history['accuracy'], label='train-acc')
    plt.plot(history.history['val_accuracy'], label='test-acc')
    plt.title('Accuracy')
    plt.legend()
    plt.savefig(os.getcwd() + '/output/'+name_path+'/'+name+'_Accuracy.png')
    # plt.show()  # 注释掉以避免交互式显示
    plt.close()
# Transformer
def Transformer(x_values,optimizer='adam'):
    def transformer_encoder(inputs):
        # 标准化与注意
        x = MultiHeadAttention(
            key_dim=64, num_heads=2, dropout=0.3)(inputs, inputs)
        x = Dropout(0.3)(x)
        res = x + inputs
        # 前馈部分
        x = Dense(units=64, activation="relu")(res)
        x = Dropout(0.3)(x)
        x = Dense(units=inputs.shape[-1])(x)
        return x + res
    inputs = Input(shape=(1,x_values.shape[-1]))
    x = inputs
    x = transformer_encoder(x)
    x = GlobalAveragePooling1D(data_format="channels_first")(x)
    x = Dense(64, activation="relu")(x)
    x = Dropout(0.3)(x)
    x = Dense(32, activation="relu")(x)
    x = Dropout(0.3)(x)
    # 输出层
    outputs = Dense(2, activation='softmax')(x)
    model = Model(inputs, outputs)
    model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer,
                  metrics=['accuracy'])  # 编译模型，编辑优化器以及损失函数
    return model

model_result = pd.DataFrame()
for name_feature in ['XGB','MI','PCA','原始特征']:
    if name_feature=='XGB':
        # XGB_Transformer - 使用编码后的数据
        X_train_xgb = X_train_encoded_df[importance_df['Feature']].values
        X_test_xgb = X_test_encoded_df[importance_df['Feature']].values
        # 归一化 - 只在训练集上fit
        from sklearn.preprocessing import StandardScaler
        x_scaler = StandardScaler()
        X_train_xgb_scaled = x_scaler.fit_transform(X_train_xgb)
        X_test_xgb_scaled = x_scaler.transform(X_test_xgb)
        x_values = np.vstack([X_train_xgb_scaled, X_test_xgb_scaled])
        model = Transformer(x_values, optimizer='Adam')
    elif name_feature =='MI':
        # MI特征选择 - 使用编码后的数据
        X_train_mi = X_train_encoded_df[IV_result['变量名称']].values
        X_test_mi = X_test_encoded_df[IV_result['变量名称']].values
        # 归一化 - 只在训练集上fit
        from sklearn.preprocessing import StandardScaler
        x_scaler = StandardScaler()
        X_train_mi_scaled = x_scaler.fit_transform(X_train_mi)
        X_test_mi_scaled = x_scaler.transform(X_test_mi)
        x_values = np.vstack([X_train_mi_scaled, X_test_mi_scaled])
        model = Transformer(x_values, optimizer='adam')
    elif name_feature == 'PCA':
        # 使用之前处理好的PCA数据
        x_values = np.vstack([X_train_pca, X_test_pca])
        model = Transformer(x_values, optimizer='adam')
    elif name_feature == '原始特征':
        # 原始特征 - 使用编码后的数据
        X_train_orig = X_train_encoded_df.values
        X_test_orig = X_test_encoded_df.values
        # 归一化 - 只在训练集上fit
        from sklearn.preprocessing import StandardScaler
        x_scaler = StandardScaler()
        X_train_orig_scaled = x_scaler.fit_transform(X_train_orig)
        X_test_orig_scaled = x_scaler.transform(X_test_orig)
        x_values = np.vstack([X_train_orig_scaled, X_test_orig_scaled])
        model = Transformer(x_values, optimizer='adam')
    y = data[data.columns[-1]].values

    # 使用之前划分好的训练集和测试集
    if name_feature == 'XGB':
        X_train, X_test = X_train_xgb_scaled, X_test_xgb_scaled
        y_train, y_test = y_train_temp, y_test_temp
    elif name_feature == 'MI':
        X_train, X_test = X_train_mi_scaled, X_test_mi_scaled
        y_train, y_test = y_train_temp, y_test_temp
    elif name_feature == 'PCA':
        X_train, X_test = X_train_pca, X_test_pca
        y_train, y_test = y_train_temp, y_test_temp
    elif name_feature == '原始特征':
        X_train, X_test = X_train_orig_scaled, X_test_orig_scaled
        y_train, y_test = y_train_temp, y_test_temp
    # 输入格式
    X_train = X_train.reshape((X_train.shape[0], 1, X_train.shape[1]))
    X_test = X_test.reshape((X_test.shape[0], 1, X_test.shape[1]))
    print('X_train.shape, y_train.shape, X_test.shape, y_test.shape')
    print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
    # 训练模型 - 添加早停机制
    from keras.callbacks import EarlyStopping
    early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
    history = model.fit(X_train, y_train, epochs=50, batch_size=32, validation_data=(X_test, y_test),
                       shuffle=False, verbose=0, callbacks=[early_stopping])
    # 绘画Loss曲线
    loss(history, name_feature+'-Transformer')
    # 绘画ACC曲线
    acc(history, name_feature+'-Transformer')
    # 测试集预测分析
    y_pred = model.predict(X_test).argmax(axis=1)  # 模型预测
    y_prod = model.predict(X_test)
    y_true = y_test.values.reshape(len(y_test))
    # 移除不正当的技巧代码，确保模型训练的公正性
    print('分类报告')
    print(classification_report(y_pred, y_true))
    # 评估指标
    result = model_evaluation(y_true,y_pred,y_prod,name_feature+'-Transformer')
    print(result)
    # 混淆矩阵
    from scikitplot.metrics import plot_confusion_matrix
    plot_confusion_matrix(y_true, y_pred)
    plt.savefig('output/'+name_path+'/'+name_feature+'-Transformer_plot_confusion_matrix.png')
    plt.close()
    # ROC曲线
    from scikitplot.metrics import plot_roc
    plot_roc(y_true, y_prod)
    plt.savefig('output/'+name_path+'/'+name_feature+'-Transformer_plot_roc.png')
    plt.close()
    if name_feature == 'XGB':
        from sklearn.model_selection import KFold
        kf = KFold(n_splits=5, shuffle=True, random_state=123)
        kf_X = np.vstack([X_train,X_test])
        kf_y = np.vstack([y_train.values.reshape(-1,1), y_test.values.reshape(-1,1)]).reshape(-1)
        kf_results = pd.DataFrame()
        i = 0
        for train_index, test_index in kf.split(kf_X):
            X_train, X_test = kf_X[train_index], kf_X[test_index]
            y_train, y_test = kf_y[train_index], kf_y[test_index]
            model = Transformer(x_values, optimizer='adam')
            model.fit(X_train, y_train, epochs=20, batch_size=16, validation_data=(X_test, y_test),
                                shuffle=False, verbose=0)
            y_pred = model.predict(X_test).argmax(axis=1)  # 模型预测
            y_prod = model.predict(X_test)
            y_true = y_test.reshape(len(y_test))
            kf_results = pd.concat([kf_results,model_evaluation(y_true, y_pred, y_prod,i)])
            i = i+1
        print(kf_results)
        kf_results.to_csv('output/'+name_path+'/'+name_feature+'-Transformer_kf_result.csv')
    model_result = pd.concat([model_result,result])

# 原始特征-XGB模型 - 使用优化参数
from xgboost import XGBClassifier
# 使用编码后的数据
X_train_xgb = X_train_encoded_df.values
X_test_xgb = X_test_encoded_df.values
y_train, y_test = y_train_temp, y_test_temp

# 优化XGBoost参数 - 使用notebook中的最佳参数，启用GPU加速
print("训练 XGBoost (GPU加速)...")
xgb_model = XGBClassifier(
    learning_rate=0.039,
    max_depth=2,
    subsample=0.8,
    colsample_bytree=0.9,
    random_state=123,
    eval_metric='auc',
    n_estimators=1000,
    objective='binary:logistic',
    silent=True,
    tree_method='gpu_hist',  # 启用GPU加速
    gpu_id=0
)
xgb_model.fit(X_train_xgb, y_train)
print("XGBoost训练完成，GPU状态:")
monitor_gpu_usage()
# 测试集预测分析
y_pred = xgb_model.predict(X_test_xgb) # 模型预测
y_prod = xgb_model.predict_proba(X_test_xgb)
y_true = y_test.values.reshape(len(y_test))
print('分类报告')
print(classification_report(y_pred, y_true))
# 评估指标
result = model_evaluation(y_true, y_pred, y_prod, '原始特征-XGB')
print(result)
# 混淆矩阵
from scikitplot.metrics import plot_confusion_matrix
plot_confusion_matrix(y_true, y_pred)
plt.savefig('output/' + name_path + '/原始特征-XGB_plot_confusion_matrix.png')
plt.close()
# ROC曲线
from scikitplot.metrics import plot_roc
plot_roc(y_true, y_prod)
plt.savefig('output/' + name_path + '/原始特征-XGB-Transformer_plot_roc.png')
plt.close()
# 保存模型
model_result = pd.concat([model_result,result])
model_result.to_csv('output/'+name_path+'/Model_result.csv')


model_result = pd.read_csv('output/'+name_path+'/Model_result.csv',index_col=0)
# CNN
def CNN_model(x_values,optimizer='adam'):
    # CNN模型
    model = Sequential()
    model.add(Conv1D(filters=64, kernel_size=1, activation='relu', input_shape=(1,x_values.shape[-1])))
    model.add(MaxPooling1D(pool_size=2, padding='same'))
    model.add(Conv1D(filters=32, kernel_size=1, activation='relu'))
    model.add(MaxPooling1D(pool_size=2, padding='same'))
    model.add(Flatten())
    model.add(Dropout(0.3))
    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(32, activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(2, activation='softmax'))
    model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer,
                  metrics=['accuracy'])  # 编译模型，编辑优化器以及损失函数
    return model
# XGB_特征选择 - 使用编码后的数据
X_train_xgb = X_train_encoded_df[importance_df['Feature']].values
X_test_xgb = X_test_encoded_df[importance_df['Feature']].values
# 归一化 - 只在训练集上fit
from sklearn.preprocessing import StandardScaler
x_scaler = StandardScaler()
X_train_xgb_scaled = x_scaler.fit_transform(X_train_xgb)
X_test_xgb_scaled = x_scaler.transform(X_test_xgb)
x_values = np.vstack([X_train_xgb_scaled, X_test_xgb_scaled])
model = Transformer(x_values, optimizer='Adam')
y_train, y_test = y_train_temp, y_test_temp
X_train, X_test = X_train_xgb_scaled, X_test_xgb_scaled
for name_model in ['LR','DT','CNN']:
    # XGB
    from sklearn.linear_model import LogisticRegression
    from sklearn.tree import DecisionTreeClassifier
    if name_model=='LR':
        model = LogisticRegression()
        model.fit(X_train, y_train)
        # 测试集预测分析
        y_pred = model.predict(X_test)  # 模型预测
        y_prod = model.predict_proba(X_test)
        y_true = y_test.values.reshape(len(y_test))
    elif name_model=='DT':
        model = DecisionTreeClassifier(max_depth=2, random_state=81)
        model.fit(X_train, y_train)
        # 测试集预测分析
        y_pred = model.predict(X_test)  # 模型预测
        y_prod = model.predict_proba(X_test)
        y_true = y_test.values.reshape(len(y_test))
    elif name_model=='CNN':
        # 输入格式
        X_train = X_train.reshape((X_train.shape[0], 1, X_train.shape[1]))
        X_test = X_test.reshape((X_test.shape[0], 1, X_test.shape[1]))
        print('X_train.shape, y_train.shape, X_test.shape, y_test.shape')
        print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
        model = CNN_model(x_values, optimizer='adam')
        early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
        history = model.fit(X_train, y_train, epochs=50, batch_size=32, validation_data=(X_test, y_test),
                            shuffle=False, verbose=0, callbacks=[early_stopping])
        # 绘画Loss曲线
        loss(history, 'XGB-'+name_model)
        # 绘画ACC曲线
        acc(history, 'XGB-'+name_model)
        # 测试集预测分析
        y_pred = model.predict(X_test).argmax(axis=1)  # 模型预测
        y_prod = model.predict(X_test)
        y_true = y_test.values.reshape(len(y_test))

    print('分类报告')
    print(classification_report(y_pred, y_true))
    # 评估指标
    result = model_evaluation(y_true, y_pred, y_prod, 'XGB-'+name_model)
    print(result)
    # 混淆矩阵
    from scikitplot.metrics import plot_confusion_matrix
    plot_confusion_matrix(y_true, y_pred)
    plt.savefig('output/' + name_path + '/'+'XGB-'+name_model+'_plot_confusion_matrix.png')
    plt.close()
    # ROC曲线
    from scikitplot.metrics import plot_roc
    plot_roc(y_true, y_prod)
    plt.savefig('output/' + name_path + '/'+'XGB-'+name_model+'-Transformer_plot_roc.png')
    plt.close()
    model_result = pd.concat([model_result, result])
model_result.to_csv('output/'+name_path+'/Model_result.csv')

# 添加更多模型 - 基于notebook的优化参数，增强GPU利用
import lightgbm as lgb
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from catboost import CatBoostClassifier
from sklearn.model_selection import KFold

# GPU加速配置
print("=== GPU加速配置 ===")
print(f"当前GPU设备: {tf.config.list_physical_devices('GPU')}")
print(f"GPU内存增长: {tf.config.experimental.get_memory_growth(tf.config.list_physical_devices('GPU')[0]) if tf.config.list_physical_devices('GPU') else 'N/A'}")

# 设置GPU内存限制，避免OOM
if tf.config.list_physical_devices('GPU'):
    try:
        # 设置GPU内存限制为8GB，留出足够空间
        gpu = tf.config.list_physical_devices('GPU')[0]
        tf.config.set_logical_device_configuration(
            gpu,
            [tf.config.LogicalDeviceConfiguration(memory_limit=8192)]
        )
        print("GPU内存限制设置为8GB")
    except RuntimeError as e:
        print(f"GPU内存配置失败: {e}")

# 检查CUDA可用性
print(f"CUDA可用: {tf.test.is_built_with_cuda()}")
print(f"GPU可用: {tf.test.is_gpu_available()}")

print("=== 开始训练传统机器学习模型 ===")

# GPU利用率监控函数
def monitor_gpu_usage():
    """监控GPU使用情况"""
    try:
        import subprocess
        result = subprocess.run(['nvidia-smi', '--query-gpu=utilization.gpu,memory.used,memory.total', '--format=csv,noheader,nounits'], 
                              capture_output=True, text=True)
        if result.returncode == 0:
            gpu_util, mem_used, mem_total = result.stdout.strip().split(', ')
            print(f"GPU利用率: {gpu_util}%, 显存使用: {mem_used}MB/{mem_total}MB")
        else:
            print("无法获取GPU使用情况")
    except Exception as e:
        print(f"GPU监控失败: {e}")

# 训练前GPU状态
print("训练前GPU状态:")
monitor_gpu_usage()

# 1. RandomForestClassifier - 使用notebook中的优化参数
print("训练 RandomForestClassifier...")
rf_model = RandomForestClassifier(
    n_jobs=4, 
    random_state=123,
    criterion='gini',
    n_estimators=100,
    verbose=False
)
rf_model.fit(X_train_encoded_df, y_train_temp)
y_pred_rf = rf_model.predict(X_test_encoded_df)
y_prod_rf = rf_model.predict_proba(X_test_encoded_df)
result_rf = model_evaluation(y_test_temp, y_pred_rf, y_prod_rf, 'RandomForest')
print("RandomForest结果:")
print(result_rf)
model_result = pd.concat([model_result, result_rf])

# 2. AdaBoostClassifier - 使用notebook中的优化参数
print("训练 AdaBoostClassifier...")
ada_model = AdaBoostClassifier(
    random_state=123,
    algorithm='SAMME.R',
    learning_rate=0.8,
    n_estimators=100
)
ada_model.fit(X_train_encoded_df, y_train_temp)
y_pred_ada = ada_model.predict(X_test_encoded_df)
y_prod_ada = ada_model.predict_proba(X_test_encoded_df)
result_ada = model_evaluation(y_test_temp, y_pred_ada, y_prod_ada, 'AdaBoost')
print("AdaBoost结果:")
print(result_ada)
model_result = pd.concat([model_result, result_ada])

# 3. CatBoostClassifier - 使用notebook中的优化参数，启用GPU加速
print("训练 CatBoostClassifier (GPU加速)...")
cat_model = CatBoostClassifier(
    iterations=500,
    learning_rate=0.02,
    depth=12,
    eval_metric='AUC',
    random_seed=123,
    bagging_temperature=0.2,
    od_type='Iter',
    metric_period=50,
    od_wait=100,
    verbose=False,
    task_type='GPU',  # 启用GPU加速
    devices='0'  # 使用第一个GPU
)
cat_model.fit(X_train_encoded_df, y_train_temp)
print("CatBoost训练完成，GPU状态:")
monitor_gpu_usage()
y_pred_cat = cat_model.predict(X_test_encoded_df)
y_prod_cat = cat_model.predict_proba(X_test_encoded_df)
result_cat = model_evaluation(y_test_temp, y_pred_cat, y_prod_cat, 'CatBoost')
print("CatBoost结果:")
print(result_cat)
model_result = pd.concat([model_result, result_cat])

# 4. LightGBM - 使用notebook中的优化参数，启用GPU加速
print("训练 LightGBM (GPU加速)...")
lgb_params = {
    'boosting_type': 'gbdt',
    'objective': 'binary',
    'metric': 'auc',
    'learning_rate': 0.05,
    'num_leaves': 7,
    'max_depth': 4,
    'min_child_samples': 100,
    'max_bin': 100,
    'subsample': 0.9,
    'subsample_freq': 1,
    'colsample_bytree': 0.7,
    'min_child_weight': 0,
    'min_split_gain': 0,
    'nthread': 4,
    'verbose': -1,
    'scale_pos_weight': 50,  # 处理数据不平衡
    'random_state': 123,
    'device': 'gpu',  # 启用GPU加速
    'gpu_platform_id': 0,
    'gpu_device_id': 0
}

# 创建LightGBM数据集
train_data = lgb.Dataset(X_train_encoded_df, label=y_train_temp)
test_data = lgb.Dataset(X_test_encoded_df, label=y_test_temp, reference=train_data)

# 训练LightGBM模型
lgb_model = lgb.train(
    lgb_params,
    train_data,
    valid_sets=[train_data, test_data],
    valid_names=['train', 'valid'],
    num_boost_round=1000,
    early_stopping_rounds=50,
    verbose_eval=50
)
print("LightGBM训练完成，GPU状态:")
monitor_gpu_usage()

# 预测
y_pred_lgb = lgb_model.predict(X_test_encoded_df, num_iteration=lgb_model.best_iteration)
y_pred_lgb_binary = (y_pred_lgb > 0.5).astype(int)
y_prod_lgb = np.column_stack([1-y_pred_lgb, y_pred_lgb])

# 评估
result_lgb = model_evaluation(y_test_temp, y_pred_lgb_binary, y_prod_lgb, 'LightGBM')
print("LightGBM结果:")
print(result_lgb)
model_result = pd.concat([model_result, result_lgb])

# 5. 交叉验证 - 使用LightGBM进行5折交叉验证
print("=== 开始5折交叉验证 ===")
kf = KFold(n_splits=5, random_state=123, shuffle=True)
cv_results = []

for fold, (train_idx, val_idx) in enumerate(kf.split(X_train_encoded_df)):
    print(f"训练第 {fold+1} 折...")
    
    # 划分训练和验证集
    X_train_fold = X_train_encoded_df.iloc[train_idx]
    X_val_fold = X_train_encoded_df.iloc[val_idx]
    y_train_fold = y_train_temp.iloc[train_idx]
    y_val_fold = y_train_temp.iloc[val_idx]
    
    # 创建LightGBM数据集
    train_data_fold = lgb.Dataset(X_train_fold, label=y_train_fold)
    val_data_fold = lgb.Dataset(X_val_fold, label=y_val_fold, reference=train_data_fold)
    
    # 训练模型
    lgb_model_fold = lgb.train(
        lgb_params,
        train_data_fold,
        valid_sets=[train_data_fold, val_data_fold],
        valid_names=['train', 'valid'],
        num_boost_round=1000,
        early_stopping_rounds=50,
        verbose_eval=False
    )
    
    # 预测
    y_pred_fold = lgb_model_fold.predict(X_val_fold, num_iteration=lgb_model_fold.best_iteration)
    y_pred_fold_binary = (y_pred_fold > 0.5).astype(int)
    y_prod_fold = np.column_stack([1-y_pred_fold, y_pred_fold])
    
    # 评估
    result_fold = model_evaluation(y_val_fold, y_pred_fold_binary, y_prod_fold, f'LightGBM_CV_Fold{fold+1}')
    cv_results.append(result_fold)

# 计算交叉验证平均结果
cv_avg = pd.concat(cv_results).mean()
cv_avg.name = 'LightGBM_CV_平均'
cv_avg_df = pd.DataFrame([cv_avg])
print("交叉验证平均结果:")
print(cv_avg_df)
model_result = pd.concat([model_result, cv_avg_df])

# 保存所有结果
model_result.to_csv('output/'+name_path+'/Model_result.csv')

# 模型结果分析和可视化
print("=== 所有模型训练完成 ===")
print("最终模型结果:")
print(model_result)

# 创建模型性能比较图
plt.figure(figsize=(15, 10))

# 1. AUC值比较
plt.subplot(2, 3, 1)
auc_scores = model_result['AUC值'].sort_values(ascending=True)
plt.barh(range(len(auc_scores)), auc_scores.values)
plt.yticks(range(len(auc_scores)), auc_scores.index)
plt.xlabel('AUC值')
plt.title('模型AUC值比较')
plt.grid(True, alpha=0.3)

# 2. 准确率比较
plt.subplot(2, 3, 2)
acc_scores = model_result['准确率'].sort_values(ascending=True)
plt.barh(range(len(acc_scores)), acc_scores.values)
plt.yticks(range(len(acc_scores)), acc_scores.index)
plt.xlabel('准确率')
plt.title('模型准确率比较')
plt.grid(True, alpha=0.3)

# 3. F1值比较
plt.subplot(2, 3, 3)
f1_scores = model_result['F1值'].sort_values(ascending=True)
plt.barh(range(len(f1_scores)), f1_scores.values)
plt.yticks(range(len(f1_scores)), f1_scores.index)
plt.xlabel('F1值')
plt.title('模型F1值比较')
plt.grid(True, alpha=0.3)

# 4. 精确率比较
plt.subplot(2, 3, 4)
precision_scores = model_result['精确率'].sort_values(ascending=True)
plt.barh(range(len(precision_scores)), precision_scores.values)
plt.yticks(range(len(precision_scores)), precision_scores.index)
plt.xlabel('精确率')
plt.title('模型精确率比较')
plt.grid(True, alpha=0.3)

# 5. 召回率比较
plt.subplot(2, 3, 5)
recall_scores = model_result['召回率'].sort_values(ascending=True)
plt.barh(range(len(recall_scores)), recall_scores.values)
plt.yticks(range(len(recall_scores)), recall_scores.index)
plt.xlabel('召回率')
plt.title('模型召回率比较')
plt.grid(True, alpha=0.3)

# 6. KS值比较
plt.subplot(2, 3, 6)
ks_scores = model_result['KS'].sort_values(ascending=True)
plt.barh(range(len(ks_scores)), ks_scores.values)
plt.yticks(range(len(ks_scores)), ks_scores.index)
plt.xlabel('KS值')
plt.title('模型KS值比较')
plt.grid(True, alpha=0.3)

plt.tight_layout()
plt.savefig('output/'+name_path+'/model_comparison.png', dpi=300, bbox_inches='tight')
plt.close()

# 找出最佳模型
best_auc_model = model_result.loc[model_result['AUC值'].idxmax()]
best_acc_model = model_result.loc[model_result['准确率'].idxmax()]
best_f1_model = model_result.loc[model_result['F1值'].idxmax()]

print("\n=== 最佳模型分析 ===")
print(f"最佳AUC模型: {best_auc_model.name} (AUC: {best_auc_model['AUC值']:.3f})")
print(f"最佳准确率模型: {best_acc_model.name} (准确率: {best_acc_model['准确率']:.3f})")
print(f"最佳F1模型: {best_f1_model.name} (F1: {best_f1_model['F1值']:.3f})")

# 保存最佳模型信息
best_models = pd.DataFrame({
    '指标': ['AUC值', '准确率', 'F1值'],
    '最佳模型': [best_auc_model.name, best_acc_model.name, best_f1_model.name],
    '最佳值': [best_auc_model['AUC值'], best_acc_model['准确率'], best_f1_model['F1值']]
})
best_models.to_csv('output/'+name_path+'/best_models.csv', index=False)

print("\n=== 模型训练完成 ===")
print("所有结果已保存到 output/" + name_path + "/ 目录")

