import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import minmax_scale, StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import PCA
from mlxtend.plotting import plot_decision_regions
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier


def read_total_data(DataFrame):
    feature = []
    label = []
    for row in range(len(DataFrame)):
        temp_feature = []
        for col in range(len(DataFrame.columns) - 1):
            temp_feature.append(DataFrame[DataFrame.columns[col]][row])
        feature.append(temp_feature)
        label.append(DataFrame[DataFrame.columns[len(DataFrame.columns) - 1]][row])
    return feature, label


def read_label_data(DataFrame):
    feature_1 = []
    feature_0 = []
    label_1 = []
    label_0 = []

    for row in range(len(DataFrame)):
        temp_feature = []
        if DataFrame[DataFrame.columns[len(DataFrame.columns) - 1]][row] == 1:
            for col in range(len(DataFrame.columns) - 1):
                temp_feature.append(DataFrame[DataFrame.columns[col]][row])
            feature_1.append(temp_feature)
            label_1.append(1)
        else:
            for col in range(len(DataFrame.columns) - 1):
                temp_feature.append(DataFrame[DataFrame.columns[col]][row])
            feature_0.append(temp_feature)
            label_0.append(0)

    return feature_1, feature_0, label_1, label_0


# data = pd.read_csv("australian.dat", sep=" ", header=None)
data = pd.read_csv("data_encoder_2.csv")
G_data = pd.read_csv("G_data_2.csv", header=None)
total_feature, total_label = read_total_data(data)

G_feature, G_label = read_total_data(G_data)
feature_bad, feature_good, label_bad, label_good = read_label_data(data)

# 归一化之后效果显著
total_feature_01 = minmax_scale(total_feature)
feature_good_01 = minmax_scale(feature_good)
feature_bad_01 = minmax_scale(feature_bad)

# 记录添加的生成1样本数量
attack_num = 0
# 每次训练重新划分训练集和测试集,用这个操作来加入随机性
X_train, X_test, Y_train, Y_test = train_test_split(total_feature_01,
                                                    total_label,
                                                    train_size=0.7,
                                                    test_size=0.3,
                                                    random_state=0
                                                    )
X_in = X_train
Y_in = Y_train

# 标准化
X_in_before = StandardScaler().fit_transform(X_in)

# PCA，主成分分析之前要先标准化
pca = PCA(n_components=2)
X_in_before = pca.fit_transform(X_in_before)

# 归一化
X_in_before = minmax_scale(X_in_before)

Y_in_before = Y_in

# 加入对抗样本之前的决策边界
# DNN = MLPClassifier().fit(X_in_before, Y_in_before)
# DT = DecisionTreeClassifier().fit(X_in_before, Y_in_before)
# RF = RandomForestClassifier().fit(X_in_before, Y_in_before)
# LR = LogisticRegression().fit(X_in_before, Y_in_before)
# SVM = SVC().fit(X_in_before, Y_in_before)
# KNN = KNeighborsClassifier().fit(X_in_before, Y_in_before)
# xgb = XGBClassifier().fit(X_in_before, Y_in_before)
lgb = LGBMClassifier().fit(X_in_before, Y_in_before)

fig_1 = plt.figure(figsize=(8, 3))
plt.subplot(131)
fig = plot_decision_regions(X=X_in_before, y=np.array(Y_in_before), clf=lgb)
plt.xlim(-0.1, 1.1)
plt.ylim(-0.1, 1.1)
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.xlabel("第一主成分", fontsize=10)
plt.ylabel("第二主成分", fontsize=10)
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
plt.title("(a) "+chr(948)+"= 0 时的决策边界", fontsize=10)

# 添加生成的1样本数量和训练集的1样本数量一样
for j in range(1000000000):
    # 向循环内部的训练集添加对抗样本
    X_in = np.r_[X_in, [G_feature[len(G_feature) - attack_num - 1300]]]
    Y_in = np.append(Y_in, int(G_label[len(G_label) - attack_num - 1]))
    attack_num += 1
    if round(attack_num / Y_train.count(1), 2) == 1.0:
        break

# 标准化
X_in_after = StandardScaler().fit_transform(X_in)

# PCA，主成分分析之前要先标准化
pca = PCA(n_components=2)
X_in_after = pca.fit_transform(X_in_after)

# 归一化
X_in_after = minmax_scale(X_in_after)

Y_in_after = Y_in

# DNN = MLPClassifier().fit(X_in_after, Y_in_after)
# DT = DecisionTreeClassifier().fit(X_in_after, Y_in_after)
# RF = RandomForestClassifier().fit(X_in_after, Y_in_after)
# LR = LogisticRegression().fit(X_in_after, Y_in_after)
# SVM = SVC().fit(X_in_after, Y_in_after)
# KNN = KNeighborsClassifier().fit(X_in_after, Y_in_after)
# xgb = XGBClassifier().fit(X_in_before, Y_in_before)
lgb = LGBMClassifier().fit(X_in_before, Y_in_before)

plt.subplot(132)
fig = plot_decision_regions(X=X_in_after, y=np.array(Y_in_after), clf=lgb)
plt.xlim(-0.1, 1.1)
plt.ylim(-0.1, 1.1)
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.xlabel("第一主成分", fontsize=10)
plt.ylabel("第二主成分", fontsize=10)
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
plt.title("(b) "+chr(948)+"= 1 时的决策边界", fontsize=10)

# 添加生成的1样本数量是训练集的1样本数量的两倍
for j in range(100000000):
    # 向循环内部的训练集添加对抗样本
    X_in = np.r_[X_in, [G_feature[len(G_feature) - attack_num - 1300]]]
    Y_in = np.append(Y_in, int(G_label[len(G_label) - attack_num - 1]))
    attack_num += 1
    if round(attack_num / Y_train.count(1), 2) == 2.0:
        break

# 标准化
X_in_later = StandardScaler().fit_transform(X_in)

# PCA，主成分分析之前要先标准化
pca = PCA(n_components=2)
X_in_later = pca.fit_transform(X_in_later)

# 归一化
X_in_later = minmax_scale(X_in_later)

Y_in_later = Y_in

# DNN = MLPClassifier().fit(X_in_later, Y_in_later)
# DT = DecisionTreeClassifier().fit(X_in_after, Y_in_after)
# RF = RandomForestClassifier().fit(X_in_after, Y_in_after)
# LR = LogisticRegression().fit(X_in_after, Y_in_after)
# SVM = SVC().fit(X_in_after, Y_in_after)
# KNN = KNeighborsClassifier().fit(X_in_after, Y_in_after)
# xgb = XGBClassifier().fit(X_in_before, Y_in_before)
lgb = LGBMClassifier().fit(X_in_before, Y_in_before)

plt.subplot(133)
fig = plot_decision_regions(X=X_in_later, y=np.array(Y_in_later), clf=lgb)
plt.xlim(-0.1, 1.1)
plt.ylim(-0.1, 1.1)
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.xlabel("第一主成分", fontsize=10)
plt.ylabel("第二主成分", fontsize=10)
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
plt.title("(c) "+chr(948)+"= 2 时的决策边界", fontsize=10)

fig_1.tight_layout()
plt.subplots_adjust()

plt.savefig("111.svg", dpi=300, format="svg")

plt.show()

