import os
import numpy as np
import pandas as pd
import joblib
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_squared_error, roc_curve, auc, classification_report
from sklearn.preprocessing import StandardScaler, LabelBinarizer, LabelEncoder
from sklearn.model_selection import train_test_split, cross_val_score
import matplotlib.pyplot as plt

"""
二分类
# 二元分类器
y_train_set_labels_noraml = (train_set_labels == 11)
y_test_set_labels_normal = (test_set_labels == 11)

from sklearn.linear_model import SGDClassifier

# 使用朴素贝叶斯进行训练
from sklearn.naive_bayes import GaussianNB

# 决策树
from sklearn.tree import DecisionTreeClassifier

# 支持向量机
from sklearn import svm

from sklearn.svm import LinearSVC

# KNN Classifier
from sklearn.neighbors import KNeighborsClassifier

# Logistic Regression Classifier
from sklearn.linear_model import LogisticRegression
"""

HOUSING_PATH = "datasets/kddcup"
save_file_name = "test2/"

def load_dataset(housing_path=HOUSING_PATH):
    csv_path = os.path.join(housing_path, "kddcup.data_10_percent_corrected")
    return pd.read_csv(csv_path)


class DataFrameSelector(BaseEstimator, TransformerMixin):
    def __init__(self, attribute_names):
        self.attribute_names = attribute_names

    def fit(self, X, y=None):
        return self

    def transform(self, X, y=None):
        return X[self.attribute_names].values

    def fit_transform(self, X, y=None):
        return self.transform(X)


# data的cat_attrib属性变为独热码返回，行数不变，列数变为此属性的种类数
def getOneHotCode(data, cat_attrib, whole_dataset):
    selector = DataFrameSelector(cat_attrib)
    cat_1 = selector.transform(data)
    encoder = LabelBinarizer()
    tmp = selector.transform(whole_dataset)
    encoder.fit(tmp)
    tmp = encoder.transform(cat_1)
    # print(encoder.classes_)
    return tmp


def getLabelEncode(data, cat_attrib, whole_dataset):
    selector = DataFrameSelector(cat_attrib)
    cat_1 = selector.transform(data)
    encoder = LabelEncoder()
    tmp = selector.transform(whole_dataset)
    encoder.fit(tmp.ravel())
    tmp = encoder.transform(cat_1.ravel())
    # print(encoder.classes_)
    return tmp


def full_pipeline(data, num_attribs, cat_attribs, whole_dataset):
    selector = DataFrameSelector(num_attribs)
    num_1 = selector.transform(data)
    imputer = SimpleImputer(strategy="median")
    num_2 = imputer.fit_transform(num_1)
    std_scaler = StandardScaler()
    num_3 = std_scaler.fit_transform(num_2)
    # print(num_3.shape)
    # print(num_3)

    for ele in cat_attribs:
        tmp = getOneHotCode(data, ele, whole_dataset)
        # print(type(tmp))
        # print(tmp.shape)
        # df_ = pd.DataFrame(tmp, columns=list(label_binarizer.classes_))
        # print(df_.head())
        num_3 = np.c_[num_3, tmp]
    return num_3


def model(clf, data_prepared, data_labels, test_prepared, test_labels, save_name="my_model.pkl"):
    print("\n", save_name)
    clf.fit(data_prepared, data_labels)
    test_pred = clf.predict(test_prepared)
    n_correct = sum(test_pred == test_labels)
    print("准确率：\t", (n_correct / len(test_pred)))

    # from sklearn.model_selection import cross_val_score
    # score = cross_val_score(sgd_clf, data_prepared, data_prepared, cv=1, scoring="accuracy")
    # print("得分：", score)

    from sklearn.model_selection import cross_val_predict
    train_pred = cross_val_predict(clf, data_prepared, data_labels, cv=3)
    from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
    print("混淆矩阵：\t", confusion_matrix(data_labels, train_pred))
    print("精度：\t", precision_score(data_labels, train_pred))
    print("召回率：\t", recall_score(data_labels, train_pred))
    print("F1分数：\t", f1_score(data_labels, train_pred))
    """

    y_scores = cross_val_predict(clf, data_prepared, data_labels, cv=3, method="decision_function")
    print(y_scores)
    # from sklearn.metrics import precision_recall_curve
    # precisions, recalls, thresholds = precision_recall_curve(data_labels, y_scores)
    # plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
    # plt.show()

    # q_test_prob = clf.predict_proba(test_prepared)
    # plot_roc(test_labels, predict_prob=q_test_prob)
    # plt.show()

    from sklearn.metrics import roc_curve
    fpr, tpr, thresholds = roc_curve(data_labels, y_scores)
    plot_roc_curve(fpr, tpr)
    plt.show()
    """
    # 保存模型
    joblib.dump(clf, save_name)
    # 导入模型
    # my_model_loaded = joblib.load("my_model.pkl")


def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
    plt.plot(thresholds, precisions[:-1], "b--", label="Precision")
    plt.plot(thresholds, recalls[:-1], "g-", label="Recall")
    plt.xlabel("Threshold")
    plt.legend(loc="upper left")
    plt.ylim([0, 1])


def plot_roc_curve(fpr, tpr, label=None):
    plt.plot(fpr, tpr, linewidth=2, label=label)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.axis([0, 1, 0, 1])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')


def plot_roc(data_labels, predict_prob):
    false_positive_rate, true_positive_rate, thresholds = roc_curve(data_labels, predict_prob)
    roc_auc = auc(false_positive_rate, true_positive_rate)
    plt.title('ROC')
    plt.plot(false_positive_rate, true_positive_rate, 'b', label='AUC = %0.4f' % roc_auc)
    plt.legend(loc='lower right')
    plt.plot([0, 1], [0, 1], 'r--')
    plt.ylabel('TPR')
    plt.xlabel('FPR')


def my_model(clf, data_prepared, data_labels, test_prepared, test_labels, save_name="my_model.pkl"):
    print("\n", save_name)
    clf.fit(data_prepared, data_labels)
    test_pred = clf.predict(test_prepared)

    print('The Accuracy of Naive Bayes Classifier is:', clf.score(test_prepared, test_labels))
    print(classification_report(test_labels, test_pred, target_names=kddcup["class"]))


if __name__ == '__main__':
    kddcup = load_dataset()  # <class 'pandas.core.frame.DataFrame'>
    # print(kddcup.info())  # (494021, 42)
    labels = getLabelEncode(kddcup, ["class"], kddcup)
    kddcup["class"] = labels

    cat_attribs = ["protocol_type", "service", "flag"]  # 3
    num_attribs = list(kddcup)  # 42 - 3 - 1= 38
    num_attribs.remove("class")
    for ele in cat_attribs:
        num_attribs.remove(ele)

    # 普通抽样
    # (395216, 42) (98805, 42)
    train_set, test_set = train_test_split(kddcup, test_size=0.2, random_state=42)
    # print(train_set["class"].value_counts() / len(train_set["class"]))
    # print(test_set["class"].value_counts() / len(test_set["class"]))
    train_set_labels = train_set["class"].copy()
    train_set = train_set.drop("class", axis=1)  # (395216, 41)
    test_set_labels = test_set["class"].copy()
    test_set = test_set.drop("class", axis=1)  # (98805, 41)

    # (395216, 118) (98805, 118)
    train_set_prepared = full_pipeline(train_set, num_attribs, cat_attribs, kddcup)
    test_set_prepared = full_pipeline(test_set, num_attribs, cat_attribs, kddcup)

    # 分类器

    # 二元分类器
    y_train_set_labels_noraml = (train_set_labels == 11)
    y_test_set_labels_normal = (test_set_labels == 11)

    from sklearn.linear_model import SGDClassifier

    sgd_clf = SGDClassifier(random_state=42)
    model(sgd_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal,
          save_name="sgd_clf.pkl")

    # 使用朴素贝叶斯进行训练
    from sklearn.naive_bayes import GaussianNB

    mnb_clf = GaussianNB()
    model(mnb_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal,
          save_name="mnb_clf.pkl")

    # 决策树
    from sklearn.tree import DecisionTreeClassifier

    tree_clf = DecisionTreeClassifier()
    model(tree_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal,
          save_name="tree_clf.pkl")

    # 支持向量机
    from sklearn import svm

    # svc_clf = svm.SVC(kernel='linear', probability=True, random_state=42)
    svc_clf = svm.SVC()
    model(svc_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal,
          save_name="svc_clf.pkl")

    from sklearn.svm import LinearSVC

    linear_svc_clf = LinearSVC()
    model(linear_svc_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal,
          save_name="linear_svc_clf.pkl")

    # KNN Classifier
    from sklearn.neighbors import KNeighborsClassifier

    knb_clf = KNeighborsClassifier()
    model(knb_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal,
          save_name="knb_clf.pkl")

    # Logistic Regression Classifier
    from sklearn.linear_model import LogisticRegression

    lgr_clf = LogisticRegression(penalty='l2')
    model(lgr_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal,
          save_name="lgr_clf.pkl")










