import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import minmax_scale
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier


def read_total_data(DataFrame):
    feature = []
    label = []
    for row in range(len(DataFrame)):
        temp_feature = []
        for col in range(len(DataFrame.columns) - 1):
            temp_feature.append(DataFrame[DataFrame.columns[col]][row])
        feature.append(temp_feature)
        label.append(DataFrame[DataFrame.columns[len(DataFrame.columns) - 1]][row])
    return feature, label


def read_label_data(DataFrame):
    feature_1 = []
    feature_0 = []
    label_1 = []
    label_0 = []

    for row in range(len(DataFrame)):
        temp_feature = []
        if DataFrame[DataFrame.columns[len(DataFrame.columns) - 1]][row] == 1:
            for col in range(len(DataFrame.columns) - 1):
                temp_feature.append(DataFrame[DataFrame.columns[col]][row])
            feature_1.append(temp_feature)
            label_1.append(1)
        else:
            for col in range(len(DataFrame.columns) - 1):
                temp_feature.append(DataFrame[DataFrame.columns[col]][row])
            feature_0.append(temp_feature)
            label_0.append(0)

    return feature_1, feature_0, label_1, label_0


# data = pd.read_csv("australian.dat", sep=" ", header=None)
# data = pd.read_excel("信用卡交易数据.xlsx")
# data = pd.read_csv("data_encoder.csv")
# data = pd.read_excel("default of credit card clients.xls")
data = pd.read_csv("UKtomas.csv", header=None)
G_data = pd.read_csv("G_data.csv", header=None)
total_feature, total_label = read_total_data(data)
G_feature, G_label = read_total_data(G_data)
feature_bad, feature_good, label_bad, label_good = read_label_data(data)

# 归一化之后效果显著
total_feature_01 = minmax_scale(total_feature)
feature_good_01 = minmax_scale(feature_good)
feature_bad_01 = minmax_scale(feature_bad)

# 画图
x_axis = []
y_good = []
y_bad = []
y_test = []

# 添加生成的1样本的比例最多和训练集1样本一样
for rate in np.arange(0.0, 1.6, 0.1):
    x_axis.append(round(rate, 2))
    acc_good = []
    acc_bad = []
    acc_test = []
    # 进行10次训练取平均值
    for i in range(10):
        # 记录添加的生成1样本数量
        attack_num = 0
        # 每次训练重新划分训练集和测试集,用这个操作来加入随机性
        X_train, X_test, Y_train, Y_test = train_test_split(total_feature_01,
                                                            total_label,
                                                            train_size=0.7,
                                                            test_size=0.3,
                                                            )
        X_in = X_train
        Y_in = Y_train
        # 添加生成的1样本数量和训练集的1样本数量一样
        for j in range(Y_train.count(0)):
            # 向循环内部的训练集添加对抗样本
            X_in = np.r_[X_in, [G_feature[len(G_feature) - attack_num - 6400]]]
            Y_in = np.append(Y_in, G_label[len(G_label) - attack_num - 6400])
            attack_num += 1
            if round(attack_num / Y_train.count(0), 2) == round(rate, 2):
                break
        # 每次小循环重新训练模型，训练10个模型取平均数，模型也加入随机性，不设置random_state
        # DNN迁移到DNN
        NN = MLPClassifier().fit(X_in, Y_in)
        acc_good.append(NN.score(feature_good_01, label_good))
        acc_bad.append(NN.score(feature_bad_01, label_bad))
        acc_test.append(NN.score(X_test, Y_test))

        # DNN无法迁移到决策树，有文献已经指出
        # DT = DecisionTreeClassifier().fit(X_in, Y_in)
        # acc_good.append(DT.score(feature_good_01, label_good))
        # acc_bad.append(DT.score(feature_bad_01, label_bad))
        # acc_test.append(DT.score(X_test, Y_test))

        # DNN可以迁移到线性回归
        # LR = LogisticRegression().fit(X_in, Y_in)
        # acc_good.append(LR.score(feature_good_01, label_good))
        # acc_bad.append(LR.score(feature_bad_01, label_bad))
        # acc_test.append(LR.score(X_test, Y_test))

        # DNN迁移到SVM的效果不好，文献中也有依据
        # SVM = SVC().fit(X_in, Y_in)
        # acc_good.append(SVM.score(feature_good_01, label_good))
        # acc_bad.append(SVM.score(feature_bad_01, label_bad))
        # acc_test.append(SVM.score(X_test, Y_test))

        # DNN迁移KNN效果也不好
        # KNN = KNeighborsClassifier().fit(X_in, Y_in)
        # acc_good.append(KNN.score(feature_good_01, label_good))
        # acc_bad.append(KNN.score(feature_bad_01, label_bad))
        # acc_test.append(KNN.score(X_test, Y_test))

    y_good.append(np.mean(acc_good))
    y_bad.append(np.mean(acc_bad))
    y_test.append(np.mean(acc_test))

plt.title("poisoning attack")
plt.xlabel('rate')
plt.ylabel('acc')
# plt.ylim(0.5, 1)
plt.plot(x_axis, y_good, marker="*", color="c", label="good")
plt.plot(x_axis, y_bad, marker="*", color="r", label="bad")
plt.plot(x_axis, y_test, marker="*", color="b", label="tset")
plt.legend()

plt.show()
