import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
name_path = 'card_clients'
os.makedirs('output/'+name_path, exist_ok=True)  # 创建result文件夹
from sklearn.preprocessing import StandardScaler, LabelEncoder, MinMaxScaler
# 模型评估
from sklearn.metrics import *
def model_evaluation(y_true,y_pred,y_prod,name):
    def calculate_TP(y, y_pred):
        tp = 0
        for i, j in zip(y, y_pred):
            if i == j == 1:
                tp += 1
        return tp
    def calculate_TN(y, y_pred):
        tn = 0
        for i, j in zip(y, y_pred):
            if i == j == 0:
                tn += 1
        return tn
    def calculate_FP(y, y_pred):
        fp = 0
        for i, j in zip(y, y_pred):
            if i == 0 and j == 1:
                fp += 1
        return fp
    def calculate_FN(y, y_pred):
        fn = 0
        for i, j in zip(y, y_pred):
            if i == 1 and j == 0:
                fn += 1
        return fn
    # TNR = TN / (FP + TN) TNR即为特异度（specificity）
    def TNR(y, y_pred):
        tn = calculate_TN(y, y_pred)
        fp = calculate_FP(y, y_pred)
        return tn / (tn + fp)
    # TPR =TP/ (TP+ FN)  TPR即为敏感度（sensitivity）
    def TPR(y, y_pred):
        tp = calculate_TP(y, y_pred)
        fn = calculate_FN(y, y_pred)
        return tp / (tp + fn)
    def PPV(y, y_pred):
        tp = calculate_TP(y, y_pred)
        fp = calculate_FP(y, y_pred)
        return tp / (tp + fp)
    def NPV(y, y_pred):
        tn = calculate_TN(y, y_pred)
        fn = calculate_FN(y, y_pred)
        return tn / (tn + fn)

    def KS(y, y_prod):
        fpr, tpr, thresholds = roc_curve(y, y_prod[:, 1])
        return np.max(tpr - fpr)
    result = pd.DataFrame({'准确率': [round(accuracy_score(y_true, y_pred), 3)],
                                '精确率': [round(precision_score(y_true, y_pred), 3)],
                                '召回率': [round(recall_score(y_true, y_pred), 3)],
                                'F1值': [round(f1_score(y_true, y_pred), 3)],
                                'AUC值': [round(roc_auc_score(y_true, y_prod[:, 1], multi_class='ovo'), 3)],
                                '特异度': [round(TNR(y_true, y_pred), 3)],
                                '灵敏度': [round(TPR(y_true, y_pred), 3)],
                                'PPV': [round(PPV(y_true, y_pred), 3)],
                                'NPV': [round(NPV(y_true, y_pred), 3)],
                                'kappa': [round(cohen_kappa_score(y_true, y_pred), 3)],
                                'KS': [round(KS(y_true, y_prod), 3)]},
                               index=[name])
    return result
#############中国台湾信用卡客户端违约数据集
df = pd.read_excel('data/card_clients/default of credit card clients.xls',header=1,index_col=0)
df[df.columns[-1]].value_counts()
# IQR异常值检测
for col in ['LIMIT_BAL','BILL_AMT1', 'BILL_AMT2',
       'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6', 'PAY_AMT1',
       'PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6']:
    # 计算四分位数
    Q1 = df[col].quantile(0.25)
    Q3 = df[col].quantile(0.75)
    IQR = Q3 - Q1
    # 确定异常值的范围
    lower_bound = Q1 - 1.5 * IQR
    upper_bound = Q3 + 1.5 * IQR
    # 标记或替换异常值为NaN
    df.loc[(df[col] < lower_bound) | (df[col] > upper_bound), col] = np.nan
# K-最近邻插补
from sklearn.impute import KNNImputer
imputer = KNNImputer(n_neighbors=2)  # 使用2个最近邻进行插补
data = imputer.fit_transform(df)
data = pd.DataFrame(data, columns=df.columns)


# ------------------------XGB特征选择-------------------------
from sklearn.feature_selection import *
from xgboost import XGBClassifier
# RFE
rfe = RFE(XGBClassifier())
rfe.fit(data[data.columns[:-1]], data[data.columns[-1]])
selected_features = [data.columns[:-1][i] for i in range(len(data.columns[:-1])) if rfe.support_[i]]
print("Selected Features:", selected_features)
# XGB特征选择
model = XGBClassifier()
model.fit(data[selected_features], data[data.columns[-1]])
importance_df = pd.DataFrame({'Feature': selected_features, 'Importance': model.feature_importances_})
importance_df = importance_df.sort_values(by='Importance', ascending=True)
importance_df.sort_values(by='Importance', ascending=False).to_csv('output/'+name_path+'/XGB_Feature_Importance.csv',index=False)
plt.style.use('ggplot')
plt.figure(figsize=(10, 8))
plt.barh(importance_df['Feature'], importance_df['Importance'])
plt.xlabel('特征重要性')# Feature Index
plt.ylabel('特征变量')# Feature Importance
plt.tight_layout()  # 自适应布局，避免显示不
plt.savefig('output/'+name_path+'/XGB_Feature_Importance.png')
plt.close()
# 欠采样
from imblearn.over_sampling import SMOTE
ros = SMOTE(random_state=42)
X_resampled, y_resampled = ros.fit_resample(data[selected_features], data[data.columns[-1]])
# 合并
data = pd.concat([X_resampled, y_resampled],axis=1)
# 热力图
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# 提取连续值特征
continuous_features = list(selected_features)+list([data.columns[-1]])
# 使用 Pandas 的corr()方法计算这些连续特征之间的皮尔逊相关系数矩阵
correlation_matrix = data[continuous_features].corr()
correlation_matrix = round(correlation_matrix,2)
# 使用 Seaborn 的heatmap()函数将相关系数矩阵可视化：
plt.figure(figsize=(8, 6))
sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm', vmin=-1, vmax=1)
plt.title('Correlation Heatmap of Continuous Features')
plt.tight_layout()
plt.savefig('./output/card_clients/corr.png')
plt.show()
#----------------------------MI特征选择-------------------------
from sklearn.feature_selection import mutual_info_classif
# 计算信息增益
info_gains = mutual_info_classif(data[data.columns[:-1]], data[data.columns[-1]])
# 选择特征，信息增益阈值为 0.01
selected_features = data[data.columns[:-1]].columns[info_gains > 0.01]  # 假设
print(selected_features)
IV_result = pd.DataFrame([list(selected_features),list(info_gains[info_gains > 0.01])],index=['变量名称','MI值']).T#columns=['IV值']
IV_result = IV_result.sort_values(by='MI值', ascending=False)
IV_result.to_csv('output/'+name_path+'/MI_result.csv',index=False)

#----------------------------PCA-------------------------
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
# 数据预处理，进行标准化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(data[data.columns[:-1]])
# PCA构建
pca = PCA(n_components=0.95)  # 降维至2维
X_data = pca.fit_transform(X_scaled)
# 计算方差贡献率
var_ratio = pca.explained_variance_ratio_
print('方差贡献率:', var_ratio)
# 计算累计方差贡献率
cumulative_var_ratio = np.cumsum(pca.explained_variance_ratio_)
print('累计方差贡献率:', cumulative_var_ratio)
PCA_result = pd.DataFrame([['第'+str(i)+'主成分' for i in range(len(var_ratio))],list(var_ratio),list(cumulative_var_ratio)],index=['主成分','方差贡献率','累计方差贡献率']).T#
PCA_result.to_csv('output/'+name_path+'/PCA_result.csv',index=False)

# Transformer模型
from keras.models import *
from keras.layers import *
from keras import optimizers
# loss损失值
def loss(history,name):
    # Loss
    plt.figure()
    plt.plot(history.history['loss'], label='train-loss')
    plt.plot(history.history['val_loss'], label='test-loss')
    plt.legend()
    plt.title('Loss')
    plt.savefig(os.getcwd() + '/output/'+name_path+'/'+name+'_Loss.png')
    plt.show()
    plt.close()
# acc准确率
def acc(history,name):
    # Accuracy
    plt.figure()
    plt.plot(history.history['accuracy'], label='train-acc')
    plt.plot(history.history['val_accuracy'], label='test-acc')
    plt.title('Accuracy')
    plt.legend()
    plt.savefig(os.getcwd() + '/output/'+name_path+'/'+name+'_Accuracy.png')
    plt.show()
    plt.close()
# Transformer
def Transformer(x_values,optimizer='SGD'):
    def transformer_encoder(inputs):
        # 标准化与注意
        x = MultiHeadAttention(
            key_dim=256, num_heads=4, dropout=0.2)(inputs, inputs)
        x = Dropout(0.2)(x)
        res = x + inputs
        # 前馈部分
        x = Dense(units=128, activation="relu")(res)
        x = Dropout(0.2)(x)
        x = Dense(units=inputs.shape[-1])(x)
        return x + res
    inputs = Input(shape=(1,x_values.shape[-1]))
    x = inputs
    x = transformer_encoder(x)
    x = GlobalAveragePooling1D(data_format="channels_first")(x)
    x = Dense(128, activation="relu")(x)
    x = Dropout(0.2)(x)
    # 输出层
    outputs = Dense(2, activation='softmax')(x)
    model = Model(inputs, outputs)
    model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer,
                  metrics=['accuracy'])  # 编译模型，编辑优化器以及损失函数
    return model

model_result = pd.DataFrame()
for name_feature in ['XGB','MI','PCA','原始特征']:
    if name_feature=='XGB':
        # XGB_Transformer
        X = data[importance_df['Feature']].values
        # 归一化
        from sklearn.preprocessing import StandardScaler
        x_scaler = StandardScaler()
        x_values = x_scaler.fit_transform(X)
        model = Transformer(x_values, optimizer='Adam')
    elif name_feature =='MI':
        X = data[IV_result['变量名称']].values
        # 归一化
        from sklearn.preprocessing import StandardScaler
        x_scaler = StandardScaler()
        x_values = x_scaler.fit_transform(X)
        model = Transformer(x_values, optimizer=optimizers.SGD(lr=0.0003, momentum=0.85, clipvalue=0.5))
    elif name_feature == 'PCA':
        x_values = X_data
        model = Transformer(x_values, optimizer=optimizers.SGD(lr=0.0003, momentum=0.85, clipvalue=0.3))
    elif name_feature == '原始特征':
        x_values = data[data.columns[:-1]].values
        # 归一化
        from sklearn.preprocessing import StandardScaler
        x_scaler = StandardScaler()
        x_values = x_scaler.fit_transform(X)
        model = Transformer(x_values, optimizer=optimizers.SGD(lr=0.0003, momentum=0.85, clipvalue=0.3))#optimizers.SGD(lr=0.07, momentum=0.85)
    y = data[data.columns[-1]].values

    # 划分训练集与测试集
    from sklearn.model_selection import train_test_split
    X_train, X_test, y_train, y_test = train_test_split(x_values, y, test_size=0.2,random_state=123)  # 45，123，169, random_state=156
    # 输入格式
    X_train = X_train.reshape((X_train.shape[0], 1, X_train.shape[1]))
    X_test = X_test.reshape((X_test.shape[0], 1, X_test.shape[1]))
    print('X_train.shape, y_train.shape, X_test.shape, y_test.shape')
    print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
    # 训练模型
    history = model.fit(X_train, y_train, epochs=20, batch_size=16, validation_data=(X_test, y_test),shuffle=False,verbose=0)
    # 绘画Loss曲线
    loss(history, name_feature+'-Transformer')
    # 绘画ACC曲线
    acc(history, name_feature+'-Transformer')
    # 测试集预测分析
    y_pred = model.predict(X_test).argmax(axis=1)  # 模型预测
    y_prod = model.predict(X_test)
    y_true = y_test.reshape(len(y_test))
    print('分类报告')
    print(classification_report(y_pred, y_true))
    # 评估指标
    result = model_evaluation(y_true,y_pred,y_prod,name_feature+'-Transformer')
    print(result)
    # 混淆矩阵
    from scikitplot.metrics import plot_confusion_matrix
    plot_confusion_matrix(y_true, y_pred)
    plt.savefig('output/'+name_path+'/'+name_feature+'-Transformer_plot_confusion_matrix.png')
    plt.close()
    # ROC曲线
    from scikitplot.metrics import plot_roc
    plot_roc(y_true, y_prod)
    plt.savefig('output/'+name_path+'/'+name_feature+'-Transformer_plot_roc.png')
    plt.close()
    if name_feature == 'XGB':
        from sklearn.model_selection import KFold
        kf = KFold(n_splits=5, shuffle=True, random_state=123)
        kf_X = np.vstack([X_train,X_test])
        kf_y = np.vstack([y_train.reshape(-1,1), y_test.reshape(-1,1)]).reshape(-1)
        kf_results = pd.DataFrame()
        i = 0
        for train_index, test_index in kf.split(kf_X):
            X_train, X_test = kf_X[train_index], kf_X[test_index]
            y_train, y_test = kf_y[train_index], kf_y[test_index]
            model = Transformer(x_values, optimizer='adam')
            model.fit(X_train, y_train, epochs=20, batch_size=16, validation_data=(X_test, y_test),
                                shuffle=False, verbose=0)
            y_pred = model.predict(X_test).argmax(axis=1)  # 模型预测
            y_prod = model.predict(X_test)
            y_true = y_test.reshape(len(y_test))
            kf_results = pd.concat([kf_results,model_evaluation(y_true, y_pred, y_prod,i)])
            i = i+1
        print(kf_results)
        kf_results.to_csv('output/'+name_path+'/'+name_feature+'-Transformer_kf_result.csv')
    model_result = pd.concat([model_result,result])

# 原始特征-XGB模型
X = data[data.columns[:-1]].values
y = data[data.columns[-1]].values
# 归一化
from sklearn.preprocessing import StandardScaler
x_scaler = StandardScaler()
x_values = x_scaler.fit_transform(X)
# 划分训练集与测试集
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x_values, y, test_size=0.2, random_state=156)  # 45，123，169
# XGB
from xgboost import XGBClassifier
xgb_model = XGBClassifier(learning_rate=0.001, max_depth=2, random_state=81,gamma=0.1)
xgb_model.fit(X_train, y_train)
# 测试集预测分析
y_pred = xgb_model.predict(X_test) # 模型预测
y_prod = xgb_model.predict_proba(X_test)
y_true = y_test.reshape(len(y_test))
print('分类报告')
print(classification_report(y_pred, y_true))
# 评估指标
result = model_evaluation(y_true, y_pred, y_prod, '原始特征-XGB')
print(result)
# 混淆矩阵
from scikitplot.metrics import plot_confusion_matrix
plot_confusion_matrix(y_true, y_pred)
plt.savefig('output/' + name_path + '/原始特征-XGB_plot_confusion_matrix.png')
plt.close()
# ROC曲线
from scikitplot.metrics import plot_roc
plot_roc(y_true, y_prod)
plt.savefig('output/' + name_path + '/原始特征-XGB-Transformer_plot_roc.png')
plt.close()
# 保存模型
model_result = pd.concat([model_result,result])
model_result.to_csv('output/'+name_path+'/Model_result.csv')


model_result = pd.read_csv('output/'+name_path+'/Model_result.csv',index_col=0)
# CNN
def CNN_model(x_values,optimizer='adam'):
    # CNN模型
    model = Sequential()
    model.add(Conv1D(filters=256, kernel_size=1, activation='relu', input_shape=(1,x_values.shape[-1])))
    model.add(MaxPooling1D(pool_size=2, padding='same'))
    model.add(Flatten())
    model.add(Dropout(0.2))
    model.add(Dense(128))
    model.add(Dense(2, activation='softmax'))
    model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer,
                  metrics=['accuracy'])  # 编译模型，编辑优化器以及损失函数
    return model
# XGB_特征选择
X = data[importance_df['Feature']].values
# 归一化
from sklearn.preprocessing import StandardScaler
x_scaler = StandardScaler()
x_values = x_scaler.fit_transform(X)
model = Transformer(x_values, optimizer='Adam')
y = data[data.columns[-1]].values
# 划分训练集与测试集
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x_values, y, test_size=0.2,
                                                    random_state=123)  # 45，123，169, random_state=156
for name_model in ['LR','DT','CNN']:
    # XGB
    from sklearn.linear_model import LogisticRegression
    from sklearn.tree import DecisionTreeClassifier
    if name_model=='LR':
        model = LogisticRegression()
        model.fit(X_train, y_train)
        # 测试集预测分析
        y_pred = model.predict(X_test)  # 模型预测
        y_prod = model.predict_proba(X_test)
        y_true = y_test.reshape(len(y_test))
    elif name_model=='DT':
        model = DecisionTreeClassifier(max_depth=2, random_state=81)
        model.fit(X_train, y_train)
        # 测试集预测分析
        y_pred = model.predict(X_test)  # 模型预测
        y_prod = model.predict_proba(X_test)
        y_true = y_test.reshape(len(y_test))
    elif name_model=='CNN':
        # 输入格式
        X_train = X_train.reshape((X_train.shape[0], 1, X_train.shape[1]))
        X_test = X_test.reshape((X_test.shape[0], 1, X_test.shape[1]))
        print('X_train.shape, y_train.shape, X_test.shape, y_test.shape')
        print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
        model = CNN_model(x_values, optimizer=optimizers.SGD(lr=0.0001, momentum=0.85, clipvalue=0.3))#optimizers.SGD(lr=0.07, momentum=0.85)
        history = model.fit(X_train, y_train, epochs=20, batch_size=16, validation_data=(X_test, y_test), shuffle=False,
                            verbose=0)
        # 绘画Loss曲线
        loss(history, 'XGB-'+name_model)
        # 绘画ACC曲线
        acc(history, 'XGB-'+name_model)
        # 测试集预测分析
        y_pred = model.predict(X_test).argmax(axis=1)  # 模型预测
        y_prod = model.predict(X_test)
        y_true = y_test.reshape(len(y_test))

    print('分类报告')
    print(classification_report(y_pred, y_true))
    # 评估指标
    result = model_evaluation(y_true, y_pred, y_prod, 'XGB-'+name_model)
    print(result)
    # 混淆矩阵
    from scikitplot.metrics import plot_confusion_matrix
    plot_confusion_matrix(y_true, y_pred)
    plt.savefig('output/' + name_path + '/'+'XGB-'+name_model+'_plot_confusion_matrix.png')
    plt.close()
    # ROC曲线
    from scikitplot.metrics import plot_roc
    plot_roc(y_true, y_prod)
    plt.savefig('output/' + name_path + '/'+'XGB-'+name_model+'-Transformer_plot_roc.png')
    plt.close()
    model_result = pd.concat([model_result, result])
model_result.to_csv('output/'+name_path+'/Model_result.csv')

