import os
import numpy as np
import pandas as pd
import joblib
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_squared_error, roc_curve, auc, classification_report
from sklearn.preprocessing import StandardScaler, LabelBinarizer, LabelEncoder
from sklearn.model_selection import train_test_split, cross_val_score
import matplotlib.pyplot as plt
"""
二分类
分类器更全面
"""
HOUSING_PATH = "train/datasets/kddcup"
save_file_name = "train/test3/"


def load_dataset(housing_path=HOUSING_PATH):
    csv_path = os.path.join(housing_path, "kddcup.data_10_percent_corrected")
    return pd.read_csv(csv_path)


class DataFrameSelector(BaseEstimator, TransformerMixin):
    def __init__(self, attribute_names):
        self.attribute_names = attribute_names

    def fit(self, X, y=None):
        return self

    def transform(self, X, y=None):
        return X[self.attribute_names].values

    def fit_transform(self, X, y=None):
        return self.transform(X)


# data的cat_attrib属性变为独热码返回，行数不变，列数变为此属性的种类数
def getOneHotCode(data, cat_attrib, whole_dataset):
    selector = DataFrameSelector(cat_attrib)
    cat_1 = selector.transform(data)
    encoder = LabelBinarizer()
    tmp = selector.transform(whole_dataset)
    encoder.fit(tmp)
    tmp = encoder.transform(cat_1)
    # print(encoder.classes_)
    return tmp



def getLabelEncode(data, cat_attrib, whole_dataset):
    selector = DataFrameSelector(cat_attrib)
    cat_1 = selector.transform(data)
    encoder = LabelEncoder()
    tmp = selector.transform(whole_dataset)
    encoder.fit(tmp.ravel())
    tmp = encoder.transform(cat_1.ravel())
    # print(encoder.classes_)
    for ele in range(len(encoder.classes_)):
        # print(encoder.classes_[ele])
        if encoder.classes_[ele] == "normal.":
            q_NORMAL_NUM = ele
            break
    # print(type(encoder.classes_))
    return tmp, q_NORMAL_NUM



def full_pipeline(data, num_attribs, cat_attribs, whole_dataset):
    selector = DataFrameSelector(num_attribs)
    num_1 = selector.transform(data)
    imputer = SimpleImputer(strategy="median")
    num_2 = imputer.fit_transform(num_1)
    std_scaler = StandardScaler()
    num_3 = std_scaler.fit_transform(num_2)
    # print(num_3.shape)
    # print(num_3)

    for ele in cat_attribs:
        tmp = getOneHotCode(data, ele, whole_dataset)
        # print(type(tmp))
        # print(tmp.shape)
        # df_ = pd.DataFrame(tmp, columns=list(label_binarizer.classes_))
        # print(df_.head())
        num_3 = np.c_[num_3, tmp]
    return num_3


def model(clf, data_prepared, data_labels, test_prepared, test_labels, save_name="my_model.pkl"):
    print("\n", save_name)

    clf.fit(data_prepared, data_labels)
    # y_score = clf.decision_function(test_prepared)
    test_pred = clf.predict(test_prepared)
    n_correct = sum(test_pred == test_labels)
    print("准确率：\t", (n_correct / len(test_pred)))

    # from sklearn.model_selection import cross_val_score
    # score = cross_val_score(sgd_clf, data_prepared, data_prepared, cv=1, scoring="accuracy")
    # print("得分：", score)

    from sklearn.model_selection import cross_val_predict
    train_pred = cross_val_predict(clf, data_prepared, data_labels, cv=3)
    from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
    print("混淆矩阵：\t", confusion_matrix(data_labels, train_pred))
    print("精度：\t", precision_score(data_labels, train_pred))
    print("召回率：\t", recall_score(data_labels, train_pred))
    print("F1分数：\t", f1_score(data_labels, train_pred))

    # plot_ROC(test_labels, y_score)

    # 保存模型
    joblib.dump(clf, save_file_name + save_name)
    # 导入模型
    # my_model_loaded = joblib.load("my_model.pkl")

def plot_ROC(q_y_test, q_y_score):
    print(q_y_test.shape)
    q_n_classes = 1  # n_classes==3
    # 为每个类别计算ROC曲线和AUC
    fpr = dict()
    tpr = dict()
    roc_auc = dict()
    for i in range(q_n_classes):
        fpr[i], tpr[i], _ = roc_curve(q_y_test[:, i], q_y_score[:, i])
        roc_auc[i] = auc(fpr[i], tpr[i])
    # fpr[0].shape==tpr[0].shape==(21, ), fpr[1].shape==tpr[1].shape==(35, ), fpr[2].shape==tpr[2].shape==(33, )
    # roc_auc {0: 0.9118165784832452, 1: 0.6029629629629629, 2: 0.7859477124183007}

    plt.figure()
    lw = 2
    plt.plot(fpr[2], tpr[2], color='darkorange',
         lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])
    plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver operating characteristic example')
    plt.legend(loc="lower right")
    plt.show()


def my_model(clf, data_prepared, data_labels, test_prepared, test_labels, save_name="my_model.pkl"):
    print("\n", save_name)

    clf.fit(data_prepared, data_labels)
    # q_pred_proba = clf.predict_proba(test_prepared)
    test_pred = clf.predict(test_prepared)
    n_correct = sum(test_pred == test_labels)
    print("准确率：\t", (n_correct / len(test_pred)))

    # from sklearn.model_selection import cross_val_score
    # score = cross_val_score(clf, data_prepared, data_prepared, cv=1, scoring="accuracy")
    # print("得分：", score)


    # from sklearn.model_selection import cross_val_predict
    # train_pred = cross_val_predict(clf, data_prepared, data_labels, cv=3)
    # from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
    # print("混淆矩阵：\t", confusion_matrix(data_labels, train_pred))
    # print("精度：\t", precision_score(data_labels, train_pred))
    # print("召回率：\t", recall_score(data_labels, train_pred))
    # print("F1分数：\t", f1_score(data_labels, train_pred))

    # my_plot_roc(test_labels, q_pred_proba)

    # 保存模型
    joblib.dump(clf, save_file_name + save_name)
    # 导入模型
    # my_model_loaded = joblib.load("my_model.pkl")



def my_plot_roc(q_labels, q_predict_prob):
    print(q_predict_prob.shape)
    false_positive_rate, true_positive_rate,thresholds = roc_curve(q_labels, q_predict_prob[:, 1])
    roc_auc = auc(false_positive_rate, true_positive_rate)
    plt.title('ROC')
    plt.plot(false_positive_rate, true_positive_rate, 'b', label='AUC = %0.4f' % roc_auc)
    plt.legend(loc='lower right')
    plt.plot([0, 1], [0, 1], 'r--')
    plt.ylabel('TPR')
    plt.xlabel('FPR')
    plt.show()




def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
    plt.plot(thresholds, precisions[:-1], "b--", label="Precision")
    plt.plot(thresholds, recalls[:-1], "g-", label="Recall")
    plt.xlabel("Threshold")
    plt.legend(loc="upper left")
    plt.ylim([0, 1])


def save_info_file(file_name, content):
    import sys
    output = sys.stdout
    output_file = open("kddcup_" + file_name, 'w')
    sys.stdout = output_file
    print(content)
    output_file.close()
    sys.stdout = output

def save_labels(dataset):
    import sys
    output = sys.stdout
    output_file = open("kddcup_labels", 'w')
    sys.stdout = output_file
    print(dataset.info())
    print("\n\nexample:\n")
    print(dataset.loc[0, :])
    print("\n\nexample:\n")
    a = dataset.loc[0, :].values
    print(a.shape)
    print(type(a))
    print(a)
    print("\n\nexample:\n")
    print(dict(dataset.loc[0, :]))

    output_file.close()
    sys.stdout = output

if __name__ == '__main__':
    NORMAL_NUM = -1
    kddcup = load_dataset()  # <class 'pandas.core.frame.DataFrame'>
    # print(kddcup.info())  # (494021, 42)
    # print(kddcup["class"].value_counts())
    save_labels(kddcup)

    labels, NORMAL_NUM = getLabelEncode(kddcup, ["class"], kddcup)
    kddcup["class"] = labels

    cat_attribs = ["protocol_type", "service", "flag"]  # 3
    num_attribs = list(kddcup)  # 42 - 3 - 1 = 38
    num_attribs.remove("class")
    for ele in cat_attribs:
        num_attribs.remove(ele)
    # print(kddcup["class"].value_counts())

    # 分层抽样
    from sklearn.model_selection import StratifiedShuffleSplit
    split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
    strat_train_set, strat_test_set = [], []
    for train_index, test_index in split.split(kddcup, kddcup["class"]):
        strat_train_set = kddcup.loc[train_index]
        strat_test_set = kddcup.loc[test_index]

    train_set, test_set = strat_train_set, strat_test_set
    # print(train_set["class"].value_counts())
    # print(test_set["class"].value_counts())
    # (395216, 42) (98805, 42)
    # train_set, test_set = train_test_split(kddcup, test_size=0.2, random_state=42)
    # print(train_set["class"].value_counts() / len(train_set["class"]))
    # print(test_set["class"].value_counts() / len(test_set["class"]))
    train_set_labels = train_set["class"].copy()
    train_set = train_set.drop("class", axis=1)  # (395216, 41)
    test_set_labels = test_set["class"].copy()
    test_set = test_set.drop("class", axis=1)  # (98805, 41)

    # (395216, 118) (98805, 118)
    train_set_prepared = full_pipeline(train_set, num_attribs, cat_attribs, kddcup)
    test_set_prepared = full_pipeline(test_set, num_attribs, cat_attribs, kddcup)

    # 分类器
    # 二元分类器
    print(NORMAL_NUM)
    y_train_set_labels_noraml = (train_set_labels == NORMAL_NUM)
    y_test_set_labels_normal = (test_set_labels == NORMAL_NUM)

    # KNN Classifier
    from sklearn.neighbors import KNeighborsClassifier
    knb_clf = KNeighborsClassifier()
    my_model(knb_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal, save_name="knb_clf.pkl")

    # Logistic Regression Classifier
    # ConvergenceWarning: lbfgs failed to converge (status=1): STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
    # from sklearn.linear_model import LogisticRegression
    # lgr_clf = LogisticRegression(penalty='l2')
    # my_model(lgr_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal, save_name="lgr_clf.pkl")

    # Random Forest Classifier
    from sklearn.ensemble import RandomForestClassifier
    rdm_forest_clf = RandomForestClassifier(n_estimators=8)
    my_model(rdm_forest_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal,
          save_name="rdm_forest_clf.pkl")
    #
    # # Decision Tree Classifier
    from sklearn import tree
    tree_decision_clf = tree.DecisionTreeClassifier()
    my_model(tree_decision_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal,
          save_name="tree_decision_clf.pkl")

    # GBDT(Gradient Boosting Decision Tree) Classifier
    from sklearn.ensemble import GradientBoostingClassifier
    GBDT_clf = GradientBoostingClassifier(n_estimators=200)
    model(GBDT_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal,
          save_name="GBDT_clf.pkl")

    # AdaBoost Classifier
    from sklearn.ensemble import AdaBoostClassifier
    AdaBoost_clf = AdaBoostClassifier()
    model(AdaBoost_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal,
          save_name="AdaBoost_clf.pkl")

    # GaussianNB
    from sklearn.naive_bayes import GaussianNB
    bayes_clf = GaussianNB()
    my_model(bayes_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal,
          save_name="bayes_clf.pkl")

    # Linear Discriminant Analysis
    from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
    line_dis_clf = LinearDiscriminantAnalysis()
    model(line_dis_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal,
          save_name="line_dis_clf.pkl")

    # Quadratic Discriminant Analysis
    # UserWarning: Variables are collinear warnings.warn("Variables are collinear")
    # from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
    # qd_clf = QuadraticDiscriminantAnalysis()
    # model(qd_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal,
          # save_name="qd_clf.pkl")

    # SVM Classifier
    from sklearn.svm import SVC
    svm_rbf_clf = SVC(kernel='rbf', probability=True)
    model(svm_rbf_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal,
          save_name="svm_rbf_clf.pkl")


