import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import minmax_scale
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier


def read_total_data(DataFrame):
    feature = []
    label = []
    for row in range(len(DataFrame)):
        temp_feature = []
        for col in range(len(DataFrame.columns) - 1):
            temp_feature.append(DataFrame[DataFrame.columns[col]][row])
        feature.append(temp_feature)
        label.append(DataFrame[DataFrame.columns[len(DataFrame.columns) - 1]][row])
    return feature, label


def read_label_data(DataFrame):
    feature_1 = []
    feature_0 = []
    label_1 = []
    label_0 = []

    for row in range(len(DataFrame)):
        temp_feature = []
        if DataFrame[DataFrame.columns[len(DataFrame.columns) - 1]][row] == 1:
            for col in range(len(DataFrame.columns) - 1):
                temp_feature.append(DataFrame[DataFrame.columns[col]][row])
            feature_1.append(temp_feature)
            label_1.append(1)
        else:
            for col in range(len(DataFrame.columns) - 1):
                temp_feature.append(DataFrame[DataFrame.columns[col]][row])
            feature_0.append(temp_feature)
            label_0.append(0)

    return feature_1, feature_0, label_1, label_0


data = pd.read_csv("australian.dat", sep=" ", header=None)
G_data = pd.read_csv("G_data_1.csv", header=None)
total_feature, total_label = read_total_data(data)
G_feature, G_label = read_total_data(G_data)
feature_bad, feature_good, label_bad, label_good = read_label_data(data)

# 归一化之后效果显著
total_feature_01 = minmax_scale(total_feature)
feature_good_01 = minmax_scale(feature_good)
feature_bad_01 = minmax_scale(feature_bad)

# 画图
x_axis = []

y_bad_DNN = []
y_test_DNN = []

y_bad_DT = []
y_test_DT = []

y_bad_RF = []
y_test_RF = []

y_bad_LR = []
y_test_LR = []

y_bad_SVM = []
y_test_SVM = []

y_bad_KNN = []
y_test_KNN = []

y_bad_xgb = []
y_test_xgb = []

y_bad_lgb = []
y_test_lgb = []

# 添加生成的1样本的比例
for rate in np.arange(0.0, 2.1, 0.1):
    x_axis.append(round(rate, 2))

    acc_bad_DNN = []
    acc_test_DNN = []

    acc_bad_DT = []
    acc_test_DT = []

    acc_bad_RF = []
    acc_test_RF = []

    acc_bad_LR = []
    acc_test_LR = []

    acc_bad_SVM = []
    acc_test_SVM = []

    acc_bad_KNN = []
    acc_test_KNN = []

    acc_bad_xgb = []
    acc_test_xgb = []

    acc_bad_lgb = []
    acc_test_lgb = []

    # 进行30次训练取平均值
    for i in range(10):
        # 记录添加的生成1样本数量
        attack_num = 0
        # 每次训练重新划分训练集和测试集,用这个操作来加入随机性
        X_train, X_test, Y_train, Y_test = train_test_split(total_feature_01,
                                                            total_label,
                                                            train_size=0.7,
                                                            test_size=0.3,
                                                            )
        X_in = X_train
        Y_in = Y_train
        # 添加生成的1样本
        for j in range(40000):
            # 向循环内部的训练集添加对抗样本
            X_in = np.r_[X_in, [G_feature[len(G_feature) - attack_num - 1750]]]
            Y_in = np.append(Y_in, G_label[len(G_label) - attack_num - 1])
            attack_num += 1
            if round(attack_num / Y_train.count(1), 2) == round(rate, 2):
                break
        # 每次小循环重新训练模型，训练30个模型取平均数，模型也加入随机性，不设置random_state
        # DNN迁移到DNN（需要归一化）
        NN = MLPClassifier().fit(X_in, Y_in)
        acc_bad_DNN.append(NN.score(feature_bad_01, label_bad)*100)
        acc_test_DNN.append(NN.score(X_test, Y_test)*100)

        # DNN无法迁移到决策树，有文献已经指出（不需要归一化）
        DT = DecisionTreeClassifier().fit(X_in, Y_in)
        acc_bad_DT.append(DT.score(feature_bad_01, label_bad)*100)
        acc_test_DT.append(DT.score(X_test, Y_test)*100)

        # DNN迁移到随机森林，评估集成算法安全性
        RF = RandomForestClassifier().fit(X_in, Y_in)
        acc_bad_RF.append(RF.score(feature_bad_01, label_bad)*100)
        acc_test_RF.append(RF.score(X_test, Y_test)*100)

        # DNN可以迁移到线性回归（理论上需要归一化，会收敛到20%正确率；但不归一化的效果符合预期，收敛到50%）
        LR = LogisticRegression().fit(X_in, Y_in)
        acc_bad_LR.append(LR.score(feature_bad_01, label_bad)*100)
        acc_test_LR.append(LR.score(X_test, Y_test)*100)

        # DNN迁移到SVM的效果好（需要归一化）
        SVM = SVC().fit(X_in, Y_in)
        acc_bad_SVM.append(SVM.score(feature_bad_01, label_bad)*100)
        acc_test_SVM.append(SVM.score(X_test, Y_test)*100)

        # DNN迁移KNN效果很不好，文献中有说明（需要归一化）
        KNN = KNeighborsClassifier().fit(X_in, Y_in)
        acc_bad_KNN.append(KNN.score(feature_bad_01, label_bad)*100)
        acc_test_KNN.append(KNN.score(X_test, Y_test)*100)

        # XGBoost
        xgb = XGBClassifier().fit(X_in, Y_in)
        acc_bad_xgb.append(xgb.score(feature_bad_01, label_bad) * 100)
        acc_test_xgb.append(xgb.score(X_test, Y_test) * 100)

        # LightGBM
        lgb = LGBMClassifier().fit(X_in, Y_in)
        acc_bad_lgb.append(lgb.score(feature_bad_01, label_bad) * 100)
        acc_test_lgb.append(lgb.score(X_test, Y_test) * 100)

    y_bad_DNN.append(np.mean(acc_bad_DNN))
    y_test_DNN.append(np.mean(acc_test_DNN))

    y_bad_DT.append(np.mean(acc_bad_DT))
    y_test_DT.append(np.mean(acc_test_DT))

    y_bad_RF.append(np.mean(acc_bad_RF))
    y_test_RF.append(np.mean(acc_test_RF))

    y_bad_LR.append(np.mean(acc_bad_LR))
    y_test_LR.append(np.mean(acc_test_LR))

    y_bad_SVM.append(np.mean(acc_bad_SVM))
    y_test_SVM.append(np.mean(acc_test_SVM))

    y_bad_KNN.append(np.mean(acc_bad_KNN))
    y_test_KNN.append(np.mean(acc_test_KNN))

    y_bad_xgb.append(np.mean(acc_bad_xgb))
    y_test_xgb.append(np.mean(acc_test_xgb))

    y_bad_lgb.append(np.mean(acc_bad_lgb))
    y_test_lgb.append(np.mean(acc_test_lgb))


fig = plt.figure(figsize=(10, 16))
plt.subplot(421)
plt.plot(x_axis, y_bad_DNN, marker=".", color="b", label="Positive Accuracy", linewidth=0.5, markersize='8')
plt.plot(x_axis, y_test_DNN, marker="s", color="k", label="Accuracy", linewidth=0.5, markersize='4')
plt.xlabel(chr(948), fontsize=14)
plt.ylabel('Accuracy (%)', fontsize=14)
plt.ylim(0, 100)
plt.xticks(np.linspace(0, 2, 11), fontsize=14)
plt.yticks(fontsize=14)
plt.title("(a) attack against target model DNN", fontsize=14)
plt.legend(fontsize=14, loc=3)

plt.subplot(422)
plt.plot(x_axis, y_bad_DT, marker=".", color="b", label="Positive Accuracy", linewidth=0.5, markersize='8')
plt.plot(x_axis, y_test_DT, marker="s", color="k", label="Accuracy", linewidth=0.5, markersize='4')
plt.xlabel(chr(948), fontsize=14)
plt.ylabel('Accuracy (%)', fontsize=14)
plt.ylim(0, 100)
plt.xticks(np.linspace(0, 2, 11), fontsize=14)
plt.yticks(fontsize=14)
plt.title("(b) attack against target model DT", fontsize=14)
plt.legend(fontsize=14, loc=3)

plt.subplot(423)
plt.plot(x_axis, y_bad_RF, marker=".", color="b", label="Positive Accuracy", linewidth=0.5, markersize='8')
plt.plot(x_axis, y_test_RF, marker="s", color="k", label="Accuracy", linewidth=0.5, markersize='4')
plt.xlabel(chr(948), fontsize=14)
plt.ylabel('Accuracy (%)', fontsize=14)
plt.ylim(0, 100)
plt.xticks(np.linspace(0, 2, 11), fontsize=14)
plt.yticks(fontsize=14)
plt.title("(c) attack against target model RF", fontsize=14)
plt.legend(fontsize=14, loc=3)

plt.subplot(424)
plt.plot(x_axis, y_bad_LR, marker=".", color="b", label="Positive Accuracy", linewidth=0.5, markersize='8')
plt.plot(x_axis, y_test_LR, marker="s", color="k", label="Accuracy", linewidth=0.5, markersize='4')
plt.xlabel(chr(948), fontsize=14)
plt.ylabel('Accuracy (%)', fontsize=14)
plt.ylim(0, 100)
plt.xticks(np.linspace(0, 2, 11), fontsize=14)
plt.yticks(fontsize=14)
plt.title("(d) attack against target model LR", fontsize=14)
plt.legend(fontsize=14, loc=3)

plt.subplot(425)
plt.plot(x_axis, y_bad_SVM, marker=".", color="b", label="Positive Accuracy", linewidth=0.5, markersize='8')
plt.plot(x_axis, y_test_SVM, marker="s", color="k", label="Accuracy", linewidth=0.5, markersize='4')
plt.xlabel(chr(948), fontsize=14)
plt.ylabel('Accuracy (%)', fontsize=14)
plt.ylim(0, 100)
plt.xticks(np.linspace(0, 2, 11), fontsize=14)
plt.yticks(fontsize=14)
plt.title("(e) attack against target model SVM", fontsize=14)
plt.legend(fontsize=14, loc=3)

plt.subplot(426)
plt.plot(x_axis, y_bad_KNN, marker=".", color="b", label="Positive Accuracy", linewidth=0.5, markersize='8')
plt.plot(x_axis, y_test_KNN, marker="s", color="k", label="Accuracy", linewidth=0.5, markersize='4')
plt.xlabel(chr(948), fontsize=14)
plt.ylabel('Accuracy (%)', fontsize=14)
plt.ylim(0, 100)
plt.xticks(np.linspace(0, 2, 11), fontsize=14)
plt.yticks(fontsize=14)
plt.title("(f) attack against target model KNN", fontsize=14)
plt.legend(fontsize=14, loc=3)

plt.subplot(427)
plt.plot(x_axis, y_bad_xgb, marker=".", color="b", label="Positive Accuracy", linewidth=0.5, markersize='8')
plt.plot(x_axis, y_test_xgb, marker="s", color="k", label="Accuracy", linewidth=0.5, markersize='4')
plt.xlabel(chr(948), fontsize=14)
plt.ylabel('Accuracy (%)', fontsize=14)
plt.ylim(0, 100)
plt.xticks(np.linspace(0, 2, 11), fontsize=14)
plt.yticks(fontsize=14)
plt.title("(g) attack against target model XGBoost", fontsize=14)
plt.legend(fontsize=14, loc=3)

plt.subplot(428)
plt.plot(x_axis, y_bad_lgb, marker=".", color="b", label="Positive Accuracy", linewidth=0.5, markersize='8')
plt.plot(x_axis, y_test_lgb, marker="s", color="k", label="Accuracy", linewidth=0.5, markersize='4')
plt.xlabel(chr(948), fontsize=14)
plt.ylabel('Accuracy (%)', fontsize=14)
plt.ylim(0, 100)
plt.xticks(np.linspace(0, 2, 11), fontsize=14)
plt.yticks(fontsize=14)
plt.title("(h) attack against target model LightGBM", fontsize=14)
plt.legend(fontsize=14, loc=3)

fig.tight_layout()
plt.subplots_adjust(wspace=0.2, hspace=0.3)

plt.savefig("australian.svg", dpi=300, format="svg")

plt.show()

print(y_bad_DNN)
print(y_test_DNN)
print(y_bad_DT)
print(y_test_DT)
print(y_bad_RF)
print(y_test_RF)
print(y_bad_LR)
print(y_test_LR)
print(y_bad_SVM)
print(y_test_SVM)
print(y_bad_KNN)
print(y_test_KNN)
print(y_bad_xgb)
print(y_test_xgb)
print(y_bad_lgb)
print(y_test_lgb)
