import pandas as pd
import numpy as np
import random
from lightgbm import LGBMClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import minmax_scale, MinMaxScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
import tensorflow
from keras.layers import Input, Dense
from keras.models import Sequential, Model


def read_label_data(DataFrame):
    feature_1 = []
    feature_0 = []
    label_1 = []
    label_0 = []

    for row in range(len(DataFrame)):
        temp_feature = []
        if DataFrame[DataFrame.columns[len(DataFrame.columns) - 1]][row] == 1:
            for col in range(len(DataFrame.columns) - 1):
                temp_feature.append(DataFrame[DataFrame.columns[col]][row])
            feature_1.append(temp_feature)
            label_1.append(1)
        else:
            for col in range(len(DataFrame.columns) - 1):
                temp_feature.append(DataFrame[DataFrame.columns[col]][row])
            feature_0.append(temp_feature)
            label_0.append(0)

    return feature_1, feature_0, label_1, label_0


def get_list(list_list, index):
    list_list = list_list
    index = index
    result_list = []
    for x in index:
        result_list.append(list_list[x])
    return result_list


def read_total_data(DataFrame):
    feature = []
    label = []
    for row in range(len(DataFrame)):
        temp_feature = []
        for col in range(len(DataFrame.columns) - 1):
            temp_feature.append(DataFrame[DataFrame.columns[col]][row])
        feature.append(temp_feature)
        label.append(DataFrame[DataFrame.columns[len(DataFrame.columns) - 1]][row])
    return feature, label


# 划分公司和攻击者数据
def split_enterprise_attacker(DataFrame, phi):
    attacker_data = []
    enterprise_data = []
    total_index = list(range(len(DataFrame)))
    index = random.sample(total_index, int(len(DataFrame) * phi))
    for row in range(len(DataFrame)):
        temp_feature = []
        if row in index:
            for col in range(len(DataFrame.columns)):
                temp_feature.append(DataFrame[DataFrame.columns[col]][row])
            attacker_data.append(temp_feature)
        else:
            for col in range(len(DataFrame.columns)):
                temp_feature.append(DataFrame[DataFrame.columns[col]][row])
            enterprise_data.append(temp_feature)

    return pd.DataFrame(attacker_data), pd.DataFrame(enterprise_data)


# 生成不平衡比例的训练集
def imbalance_sample(feature, label, theta):
    feature_tr = []
    label_tr = []
    positive_index = []
    negative_index = []
    positive_feature = []
    negative_feature = []
    positive_label = []
    negative_label = []
    for i in range(len(label)):
        if label[i] == 0:
            negative_index.append(i)
            negative_feature.append(feature[i].tolist())
            negative_label.append(label[i])
        else:
            positive_index.append(i)
            positive_feature.append(feature[i].tolist())
            positive_label.append(label[i])

    if len(negative_index)/len(positive_index) > theta:
        feature_tr = positive_feature
        label_tr = positive_label
        index = random.sample(negative_index, int(len(positive_label) * theta))
        for i in range(len(feature)):
            if i in index:
                feature_tr.append(feature[i].tolist())
                label_tr.append(label[i])
    else:
        feature_tr = negative_feature
        label_tr = negative_label
        index = random.sample(positive_index, int(len(negative_label) / theta))
        for i in range(len(feature)):
            if i in index:
                feature_tr.append(feature[i].tolist())
                label_tr.append(label[i])

    return np.array(feature_tr), label_tr


# 查询错误的样本
def select_error_sample(X_test, Y_test, model):
    error_feature = []
    error_label = []
    Y_predict = model.predict(X_test).tolist()
    index = 0
    for i in Y_test:
        if i == Y_predict[index]:
            index += 1
        else:
            error_feature.append(X_test[index])
            error_label.append(Y_test[index])
            index += 1
    error_sample = pd.DataFrame(error_feature)
    error_sample = pd.concat([error_sample, pd.DataFrame(error_label)], axis=1, ignore_index=True, sort=False)

    return error_sample


# 测试集正类样本准确率
test_positive_acc_DNN = []
test_positive_acc_DT = []
test_positive_acc_RF = []
test_positive_acc_LR = []
test_positive_acc_SVM = []
test_positive_acc_KNN = []
test_positive_acc_xgb = []
test_positive_acc_lgb = []

# 测试集对抗样本准确率
test_adv_acc_DNN = []
test_adv_acc_DT = []
test_adv_acc_RF = []
test_adv_acc_LR = []
test_adv_acc_SVM = []
test_adv_acc_KNN = []
test_adv_acc_xgb = []
test_adv_acc_lgb = []

# 读数据集
# data = pd.read_csv("australian.dat", sep=" ", header=None)
# data = pd.read_csv("data_encoder_2.csv")
data = pd.read_excel("default of credit card clients.xls")
# data = pd.read_csv("UKtomas.csv", header=None)
# data = pd.read_csv("PAKDD.csv")
# data = pd.read_csv("2016leadingclub.csv")

# 模型参数
eposilon = 1
phi = 0.05
theta = 2

# 10次实验取平均值
for xx in range(10):
    class GAN(object):
        def __init__(self):
            # 特征维度
            self.feature_shape = len(data.columns) - 1
            # 优化器，**********选定Adam，性能最好************
            optimizer = tensorflow.keras.optimizers.Adam(
                                                         learning_rate=0.001,
                                                         # beta_1=0.9,
                                                         # beta_2=0.999,
                                                         # epsilon=1e-07,
                                                         )
            # 定义D，设置训练参数
            self.discriminator = self.build_discriminator()
            self.discriminator.compile(loss='BinaryFocalCrossentropy',
                                       optimizer=optimizer,
                                       )
            # 定义G
            self.generator = self.build_generator()
            # 构建GAN结构
            gan_input = Input(shape=(self.feature_shape, ))
            generated_feature = self.generator(gan_input)
            self.discriminator.trainable = False
            validity = self.discriminator(generated_feature)
            self.gan = Model(gan_input, validity)
            self.gan.compile(loss='BinaryFocalCrossentropy',
                             optimizer=optimizer,
                             )

        def build_generator(self):
            model = Sequential()
            model.add(Dense(self.feature_shape, activation="LeakyReLU"))
            model.add(Dense(32, activation="LeakyReLU"))
            model.add(Dense(64, activation="LeakyReLU"))
            model.add(Dense(32, activation="LeakyReLU"))
            model.add(Dense(self.feature_shape))

            noise = Input(shape=(self.feature_shape, ))
            generated_feature = model(noise)

            return Model(noise, generated_feature)

        def build_discriminator(self):
            model = Sequential()
            model.add(Dense(self.feature_shape, activation="LeakyReLU"))
            model.add(Dense(int(self.feature_shape / 2), activation="LeakyReLU"))
            # model.add(Dense(32, activation="LeakyReLU"))
            model.add(Dense(1, activation="sigmoid"))

            feature = Input(shape=(self.feature_shape,))
            labels = model(feature)

            return Model(feature, labels)

        def train_GAN(self, epochs, batch_size, sample_interval, mumber_GAN_sample):
            global GAN_sample
            number = 0

            # 先训练好D
            self.discriminator.fit(np.array(feature_bad), np.array(label_bad), epochs=100)  # 可以不要

            for epoch in range(epochs):
                real_feature_index = random.sample(range(0, len(feature_bad)), batch_size)
                real_feature = get_list(feature_bad, real_feature_index)
                real_label = get_list(label_bad, real_feature_index)

                mix_feature = real_feature
                mix_label = real_label  # mix_label = []
                GAN_feature = []
                GAN_label = []
                latent_space_sample = []

                for x in range(batch_size):
                    latent_space_sample.append(real_feature[random.sample(range(0, batch_size), 1)[0]])
                    # 用G给原始样本添加扰动
                    mix_feature.append(self.generator.predict(np.array(latent_space_sample))[0].tolist())
                    mix_label.append(int(np.zeros(1)))
                    # 将原始数据输入训练G
                    GAN_feature.append(latent_space_sample[0])
                    GAN_label.append(int(np.ones(1)))

                # 训练D：把真的1样本预测为1，把生成的1样本预测为0
                D_loss = self.discriminator.train_on_batch(np.array(mix_feature), np.array(mix_label))

                # 训练G：让GAN把生成的1样本预测为1
                GAN_loss = self.gan.train_on_batch(np.array(GAN_feature), np.array(GAN_label))

                # if epoch % sample_interval == 0:
                #     print("epochs:{}".format(epoch))

                # if epoch >= 0.01*epochs:
                GAN_result = self.generator.predict(np.array(latent_space_sample))
                # print(np.sqrt(np.sum(np.square(latent_space_sample[0] - GAN_result))))
                # 判别器判断错误的样本再加入
                # if self.discriminator.predict(GAN_result)[0][0] > 0.5:
                if np.sqrt(np.sum(np.square(latent_space_sample[0] - GAN_result))) < eposilon:  # 参数epsilon
                    # 归一化还原
                    # GAN_result = mm.inverse_transform(GAN_result)
                    # 把生成的1样本标记为1输出
                    G_sample = np.append(GAN_result[0], 1)  # G_sample = np.append(GAN_result[0])
                    # print(G_sample)
                    # print(D_loss)
                    # print(GAN_loss)
                    new_data = pd.DataFrame([G_sample])
                    GAN_sample = pd.concat([GAN_sample, new_data], axis=0, ignore_index=True, sort=False)
                    # new_data.to_csv('evasion_Lendingclub_DT_GAN.csv',
                    #                 mode='a',
                    #                 header=False,
                    #                 float_format="%.5f",
                    #                 index=False
                    #                 )
                if len(GAN_sample) >= mumber_GAN_sample:
                    break


    # 划分攻击者和企业掌握的数据集
    attacker_data, enterprise_data = split_enterprise_attacker(data, phi)

    """企业建模"""
    total_feature_enterprise, total_label_enterprise = read_total_data(enterprise_data)

    total_feature_01_enterprise = minmax_scale(total_feature_enterprise)

    X_train, X_test, Y_train, Y_test = train_test_split(total_feature_01_enterprise,
                                                        total_label_enterprise,
                                                        train_size=0.7,
                                                        test_size=0.3,
                                                        # random_state=0
                                                        )

    # 训练集不平衡比例：theta = 负类/正类
    X_train, Y_train = imbalance_sample(X_train, Y_train, theta)

    DNN = MLPClassifier().fit(X_train, Y_train)
    DT = DecisionTreeClassifier().fit(X_train, Y_train)
    RF = RandomForestClassifier().fit(X_train, Y_train)
    LR = LogisticRegression().fit(X_train, Y_train)
    SVM = SVC().fit(X_train, Y_train)
    KNN = KNeighborsClassifier().fit(X_train, Y_train)
    xgb = XGBClassifier().fit(X_train, Y_train)
    lgb = LGBMClassifier().fit(X_train, Y_train)

    # 取测试集中的正、负类样本
    data_test = pd.concat([pd.DataFrame(X_test), pd.DataFrame(Y_test)], axis=1, ignore_index=True, sort=False)
    feature_bad_test, feature_good_test, label_bad_test, label_good_test = read_label_data(data_test)

    # print("正类准确率：{}".format(RF.score(feature_bad_test, label_bad_test)))
    # print("负类准确率：{}".format(RF.score(feature_good_test, label_good_test)))
    # print("整体准确率：{}".format(RF.score(X_test, Y_test)))

    """攻击者查询"""
    total_feature_attacker, total_label_attacker = read_total_data(attacker_data)

    total_feature_01_attacker = minmax_scale(total_feature_attacker)

    # 替代模型
    error_sample = select_error_sample(total_feature_01_attacker, total_label_attacker, RF)

    """GAN生成对抗样本"""
    total_feature, total_label = read_total_data(error_sample)
    feature_bad, feature_good, label_bad, label_good = read_label_data(error_sample)
    # 归一化
    mm = MinMaxScaler()
    mm.fit(total_feature)
    total_feature = mm.transform(total_feature)
    feature_good = mm.transform(feature_good)
    feature_bad = mm.transform(feature_bad)

    n_Y_test_1 = Y_test.count(1)
    GAN_sample = pd.DataFrame()
    gan = GAN()
    gan.train_GAN(epochs=10000000000000, batch_size=1, sample_interval=10, mumber_GAN_sample=n_Y_test_1)

    # 测试安全性
    test_positive_acc_DNN.append(DNN.score(feature_bad_test, label_bad_test))
    test_positive_acc_DT.append(DT.score(feature_bad_test, label_bad_test))
    test_positive_acc_RF.append(RF.score(feature_bad_test, label_bad_test))
    test_positive_acc_LR.append(LR.score(feature_bad_test, label_bad_test))
    test_positive_acc_SVM.append(SVM.score(feature_bad_test, label_bad_test))
    test_positive_acc_KNN.append(KNN.score(feature_bad_test, label_bad_test))
    test_positive_acc_xgb.append(xgb.score(feature_bad_test, label_bad_test))
    test_positive_acc_lgb.append(lgb.score(feature_bad_test, label_bad_test))

    total_feature_GAN, total_label_GAN = read_total_data(GAN_sample)

    test_adv_acc_DNN.append(DNN.score(total_feature_GAN, total_label_GAN))
    test_adv_acc_DT.append(DT.score(total_feature_GAN, total_label_GAN))
    test_adv_acc_RF.append(RF.score(total_feature_GAN, total_label_GAN))
    test_adv_acc_LR.append(LR.score(total_feature_GAN, total_label_GAN))
    test_adv_acc_SVM.append(SVM.score(total_feature_GAN, total_label_GAN))
    test_adv_acc_KNN.append(KNN.score(total_feature_GAN, total_label_GAN))
    test_adv_acc_xgb.append(xgb.score(total_feature_GAN, total_label_GAN))
    test_adv_acc_lgb.append(lgb.score(total_feature_GAN, total_label_GAN))

    print(xx)

# 输出
print("测试集正类准确率：")
print(sum(test_positive_acc_DNN)/len(test_positive_acc_DNN))
print(sum(test_positive_acc_DT)/len(test_positive_acc_DT))
print(sum(test_positive_acc_RF)/len(test_positive_acc_RF))
print(sum(test_positive_acc_LR)/len(test_positive_acc_LR))
print(sum(test_positive_acc_SVM)/len(test_positive_acc_SVM))
print(sum(test_positive_acc_KNN)/len(test_positive_acc_KNN))
print(sum(test_positive_acc_xgb)/len(test_positive_acc_xgb))
print(sum(test_positive_acc_lgb)/len(test_positive_acc_lgb))

print("对抗样本准确率：")
print(sum(test_adv_acc_DNN)/len(test_adv_acc_DNN))
print(sum(test_adv_acc_DT)/len(test_adv_acc_DT))
print(sum(test_adv_acc_RF)/len(test_adv_acc_RF))
print(sum(test_adv_acc_LR)/len(test_adv_acc_LR))
print(sum(test_adv_acc_SVM)/len(test_adv_acc_SVM))
print(sum(test_adv_acc_KNN)/len(test_adv_acc_KNN))
print(sum(test_adv_acc_xgb)/len(test_adv_acc_xgb))
print(sum(test_adv_acc_lgb)/len(test_adv_acc_lgb))
