import pandas as pd
from lightgbm import LGBMClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import minmax_scale
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier


def read_total_data(DataFrame):
    feature = []
    label = []
    for row in range(len(DataFrame)):
        temp_feature = []
        for col in range(len(DataFrame.columns) - 1):
            temp_feature.append(DataFrame[DataFrame.columns[col]][row])
        feature.append(temp_feature)
        label.append(DataFrame[DataFrame.columns[len(DataFrame.columns) - 1]][row])
    return feature, label


def read_label_data(DataFrame):
    feature_1 = []
    feature_0 = []
    label_1 = []
    label_0 = []

    for row in range(len(DataFrame)):
        temp_feature = []
        if DataFrame[DataFrame.columns[len(DataFrame.columns) - 1]][row] == 1:
            for col in range(len(DataFrame.columns) - 1):
                temp_feature.append(DataFrame[DataFrame.columns[col]][row])
            feature_1.append(temp_feature)
            label_1.append(1)
        else:
            for col in range(len(DataFrame.columns) - 1):
                temp_feature.append(DataFrame[DataFrame.columns[col]][row])
            feature_0.append(temp_feature)
            label_0.append(0)

    return feature_1, feature_0, label_1, label_0


# data = pd.read_csv("australian.dat", sep=" ", header=None)
# data = pd.read_csv("data_encoder_2.csv")
# data = pd.read_excel("default of credit card clients.xls")
# data = pd.read_csv("UKtomas.csv", header=None)
# data = pd.read_csv("PAKDD.csv")
data = pd.read_csv("2016leadingclub.csv")

total_feature, total_label = read_total_data(data)
feature_bad, feature_good, label_bad, label_good = read_label_data(data)

# 归一化
total_feature_01 = minmax_scale(total_feature)
feature_bad_01 = minmax_scale(feature_bad)

X_train, X_test, Y_train, Y_test = train_test_split(total_feature_01,
                                                    total_label,
                                                    train_size=0.7,
                                                    test_size=0.3,
                                                    random_state=0
                                                    )
# feature_bad, feature_good, label_bad, label_good = read_label_data(test_Data)

DNN = MLPClassifier(random_state=0).fit(X_train, Y_train)
DT = DecisionTreeClassifier(random_state=0).fit(X_train, Y_train)
RF = RandomForestClassifier(random_state=0).fit(X_train, Y_train)
LR = LogisticRegression(random_state=0).fit(X_train, Y_train)
SVM = SVC(random_state=0).fit(X_train, Y_train)
KNN = KNeighborsClassifier().fit(X_train, Y_train)
xgb = XGBClassifier(random_state=0).fit(X_train, Y_train)
lgb = LGBMClassifier(random_state=0).fit(X_train, Y_train)

print(DNN.score(feature_bad_01, label_bad))
print(DT.score(feature_bad_01, label_bad))
print(RF.score(feature_bad_01, label_bad))
print(LR.score(feature_bad_01, label_bad))
print(SVM.score(feature_bad_01, label_bad))
print(KNN.score(feature_bad_01, label_bad))
print(xgb.score(feature_bad_01, label_bad))
print(lgb.score(feature_bad_01, label_bad))
print("--------------------------------")

GAN_data = pd.read_csv("evasion_Lendingclub_DT_GAN.csv")

total_feature_GAN, total_label_GAN = read_total_data(GAN_data)

print(DNN.score(total_feature_GAN, total_label_GAN))
print(DT.score(total_feature_GAN, total_label_GAN))
print(RF.score(total_feature_GAN, total_label_GAN))
print(LR.score(total_feature_GAN, total_label_GAN))
print(SVM.score(total_feature_GAN, total_label_GAN))
print(KNN.score(total_feature_GAN, total_label_GAN))
print(xgb.score(total_feature_GAN, total_label_GAN))
print(lgb.score(total_feature_GAN, total_label_GAN))
