import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import minmax_scale


def read_total_data(DataFrame):
    feature = []
    label = []
    for row in range(len(DataFrame)):
        temp_feature = []
        for col in range(len(DataFrame.columns) - 1):
            temp_feature.append(DataFrame[DataFrame.columns[col]][row])
        feature.append(temp_feature)
        label.append(DataFrame[DataFrame.columns[len(DataFrame.columns) - 1]][row])
    return feature, label


def read_label_data(DataFrame):
    feature_1 = []
    feature_0 = []
    label_1 = []
    label_0 = []

    for row in range(len(DataFrame)):
        temp_feature = []
        if DataFrame[DataFrame.columns[len(DataFrame.columns) - 1]][row] == 1:
            for col in range(len(DataFrame.columns) - 1):
                temp_feature.append(DataFrame[DataFrame.columns[col]][row])
            feature_1.append(temp_feature)
            label_1.append(1)
        else:
            for col in range(len(DataFrame.columns) - 1):
                temp_feature.append(DataFrame[DataFrame.columns[col]][row])
            feature_0.append(temp_feature)
            label_0.append(0)

    return feature_1, feature_0, label_1, label_0


data = pd.read_csv("australian.dat", sep=" ", header=None)
G_data = pd.read_csv("G_data.csv", header=None)
total_feature, total_label = read_total_data(data)
G_feature, G_label = read_total_data(G_data)
feature_bad, feature_good, label_bad, label_good = read_label_data(data)

# 归一化之后效果显著
total_feature_01 = minmax_scale(total_feature)
feature_good_01 = minmax_scale(feature_good)
feature_bad_01 = minmax_scale(feature_bad)

X_train, X_test, Y_train, Y_test = train_test_split(total_feature_01,
                                                    total_label,
                                                    train_size=0.7,
                                                    test_size=0.3,
                                                    random_state=0)

# 画图
x_axis = []
y_axis = []

# 添加生成的1样本的比例最多和训练集1样本一样
for rate in np.arange(0.0, 1.1, 0.1):
    x_axis.append(round(rate, 2))
    # 记录添加的生成1样本数量
    attack_num = 0
    # 每次循坏初始化内部的训练集
    X_in = X_train
    Y_in = Y_train
    # 添加生成的1样本数量和训练集的1样本数量一样
    for i in range(Y_train.count(1)):
        # 向循环内部的训练集添加对抗样本
        X_in = np.r_[X_in, [G_feature[len(G_feature)-attack_num-1]]]
        Y_in = np.append(Y_in, G_label[len(G_label)-attack_num-1])
        attack_num += 1
        if round(attack_num / Y_train.count(1), 2) == round(rate, 2):
            break
    # 每次大循环重新训练模型
    NN = MLPClassifier(random_state=0).fit(X_in, Y_in)
    y_axis.append(NN.score(feature_bad_01, label_bad))

    # DT = DecisionTreeClassifier(random_state=0).fit(X_in, Y_in)
    # y_axis.append(DT.score(feature_bad_01, label_bad))

plt.title("poisoning attack")
plt.xlabel('rate')
plt.ylabel('acc')
plt.ylim(0.5, 1)
plt.plot(x_axis, y_axis, marker="*", color="c")
plt.legend("s")

plt.show()
