import math
import os
import numpy as np
import pandas as pd
import joblib
from sklearn import metrics
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_squared_error, roc_curve, auc, classification_report
from sklearn.preprocessing import StandardScaler, LabelBinarizer, LabelEncoder
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
import matplotlib.pyplot as plt
"""
1.二分类
2.分类器更全面
3.增量学习
"""

HOUSING_PATH = "datasets/kddcup"
save_file_name = "test5/"



def load_dataset(housing_path=HOUSING_PATH):
    csv_path = os.path.join(housing_path, "kddcup.data_10_percent_corrected")
    return pd.read_csv(csv_path)

class DataFrameSelector(BaseEstimator, TransformerMixin):
    def __init__(self, attribute_names):
        self.attribute_names = attribute_names

    def fit(self, X, y=None):
        return self

    def transform(self, X, y=None):
        return X[self.attribute_names].values

    def fit_transform(self, X, y=None):
        return self.transform(X)


# data的cat_attrib属性变为独热码返回，行数不变，列数变为此属性的种类数
def getOneHotCode(data, cat_attrib, whole_dataset):
    selector = DataFrameSelector(cat_attrib)
    cat_1 = selector.transform(data)
    encoder = LabelBinarizer()
    tmp = selector.transform(whole_dataset)
    encoder.fit(tmp)
    tmp = encoder.transform(cat_1)
    # print(encoder.classes_)
    return tmp



def getLabelEncode(data, cat_attrib, whole_dataset):
    selector = DataFrameSelector(cat_attrib)
    cat_1 = selector.transform(data)
    encoder = LabelEncoder()
    tmp = selector.transform(whole_dataset)
    encoder.fit(tmp.ravel())
    tmp = encoder.transform(cat_1.ravel())
    # print(encoder.classes_)
    for ele in range(len(encoder.classes_)):
        # print(encoder.classes_[ele])
        if encoder.classes_[ele] == "normal.":
            q_NORMAL_NUM = ele
            break
    # print(type(encoder.classes_))
    return tmp, q_NORMAL_NUM



def full_pipeline(data, num_attribs, cat_attribs, whole_dataset):
    selector = DataFrameSelector(num_attribs)
    num_1 = selector.transform(data)
    imputer = SimpleImputer(strategy="median")
    num_2 = imputer.fit_transform(num_1)
    std_scaler = StandardScaler()
    num_3 = std_scaler.fit_transform(num_2)
    # print(num_3.shape)
    # print(num_3)

    for ele in cat_attribs:
        tmp = getOneHotCode(data, ele, whole_dataset)
        # print(type(tmp))
        # print(tmp.shape)
        # df_ = pd.DataFrame(tmp, columns=list(label_binarizer.classes_))
        # print(df_.head())
        num_3 = np.c_[num_3, tmp]
    return num_3


def model(clf, data_prepared, data_labels, test_prepared, test_labels, save_name="my_model.pkl"):
    print("\n", save_name)

    clf.fit(data_prepared, data_labels)
    # y_score = clf.decision_function(test_prepared)
    test_pred = clf.predict(test_prepared)
    n_correct = sum(test_pred == test_labels)
    print("准确率：\t", (n_correct / len(test_pred)))

    # from sklearn.model_selection import cross_val_score
    # score = cross_val_score(sgd_clf, data_prepared, data_prepared, cv=1, scoring="accuracy")
    # print("得分：", score)

    from sklearn.model_selection import cross_val_predict
    train_pred = cross_val_predict(clf, data_prepared, data_labels, cv=3)
    from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
    print("混淆矩阵：\t", confusion_matrix(data_labels, train_pred))
    print("精度：\t", precision_score(data_labels, train_pred))
    print("召回率：\t", recall_score(data_labels, train_pred))
    print("F1分数：\t", f1_score(data_labels, train_pred))

    # plot_ROC(test_labels, y_score)

    # 保存模型
    joblib.dump(clf, save_file_name + save_name)
    # 导入模型
    # my_model_loaded = joblib.load("my_model.pkl")

def plot_ROC(q_y_test, q_y_score):
    print(q_y_test.shape)
    q_n_classes = 1  # n_classes==3
    # 为每个类别计算ROC曲线和AUC
    fpr = dict()
    tpr = dict()
    roc_auc = dict()
    for i in range(q_n_classes):
        fpr[i], tpr[i], _ = roc_curve(q_y_test[:, i], q_y_score[:, i])
        roc_auc[i] = auc(fpr[i], tpr[i])
    # fpr[0].shape==tpr[0].shape==(21, ), fpr[1].shape==tpr[1].shape==(35, ), fpr[2].shape==tpr[2].shape==(33, )
    # roc_auc {0: 0.9118165784832452, 1: 0.6029629629629629, 2: 0.7859477124183007}

    plt.figure()
    lw = 2
    plt.plot(fpr[2], tpr[2], color='darkorange',
         lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])
    plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver operating characteristic example')
    plt.legend(loc="lower right")
    plt.show()


def my_model(clf, data_prepared, data_labels, test_prepared, test_labels, save_name="my_model.pkl"):
    print("\n", save_name)

    clf.fit(data_prepared, data_labels)
    # q_pred_proba = clf.predict_proba(test_prepared)
    test_pred = clf.predict(test_prepared)
    n_correct = sum(test_pred == test_labels)
    print("准确率：\t", (n_correct / len(test_pred)))

    # from sklearn.model_selection import cross_val_score
    # score = cross_val_score(clf, data_prepared, data_prepared, cv=1, scoring="accuracy")
    # print("得分：", score)


    from sklearn.model_selection import cross_val_predict
    train_pred = cross_val_predict(clf, data_prepared, data_labels, cv=3)
    from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
    print("混淆矩阵：\t", confusion_matrix(data_labels, train_pred))
    print("精度：\t", precision_score(data_labels, train_pred))
    print("召回率：\t", recall_score(data_labels, train_pred))
    print("F1分数：\t", f1_score(data_labels, train_pred))

    # my_plot_roc(test_labels, q_pred_proba)

    # 保存模型
    joblib.dump(clf, save_file_name + save_name)
    # 导入模型
    # my_model_loaded = joblib.load("my_model.pkl")



def my_plot_roc(q_labels, q_predict_prob):
    print(q_predict_prob.shape)
    false_positive_rate, true_positive_rate,thresholds = roc_curve(q_labels, q_predict_prob[:, 1])
    roc_auc = auc(false_positive_rate, true_positive_rate)
    plt.title('ROC')
    plt.plot(false_positive_rate, true_positive_rate, 'b', label='AUC = %0.4f' % roc_auc)
    plt.legend(loc='lower right')
    plt.plot([0, 1], [0, 1], 'r--')
    plt.ylabel('TPR')
    plt.xlabel('FPR')
    plt.show()




def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
    plt.plot(thresholds, precisions[:-1], "b--", label="Precision")
    plt.plot(thresholds, recalls[:-1], "g-", label="Recall")
    plt.xlabel("Threshold")
    plt.legend(loc="upper left")
    plt.ylim([0, 1])



# def iter_minibatches(data_stream, minibatch_size=1000):
#     '''
#     迭代器
#     给定文件流（比如一个大文件），每次输出minibatch_size行，默认选择1k行
#     将输出转化成numpy输出，返回X, y
#     '''
#     X = []
#     y = []
#     cur_line_num = 0
#
#     csvfile = file(data_stream, 'rb')
#     reader = csv.reader(csvfile)
#     for line in reader:
#         y.append(float(line[0]))
#         X.append(line[1:])  # 这里要将数据转化成float类型
#
#         cur_line_num += 1
#         if cur_line_num >= minibatch_size:
#             X, y = np.array(X), np.array(y)  # 将数据转成numpy的array类型并返回
#             yield X, y
#             X, y = [], []
#             cur_line_num = 0
#     csvfile.close()
# 生成测试文件
# minibatch_test_iterators = iter_minibatches(test_file, minibatch_size=5000)
# X_test, y_test = minibatch_test_iterators.next()  # 得到一份测试文件



def inc_model(clf, data_prepared, data_labels, test_prepared, test_labels, save_name="my_inc_model.pkl"):
    print(save_name)
    import sys
    output = sys.stdout
    output_file = open("kddcup_"+save_name, 'w')
    sys.stdout = output_file
    print(save_name)
    print(data_prepared.shape, data_labels.shape, test_prepared.shape, test_labels.shape)
    inc_length = len(data_prepared)
    n = 1000
    for i in range(n):
        # one_list = lists[math.floor(i / n * length):math.floor((i + 1) / n * length)]
        X_train_part = data_prepared[math.floor(i / n * inc_length):math.floor((i + 1) / n * inc_length)]
        Y_train_part = data_labels[math.floor(i / n * inc_length):math.floor((i + 1) / n * inc_length)]
        print(X_train_part.shape) # (19761, 118)
        print(Y_train_part.shape)
        clf.partial_fit(X_train_part, Y_train_part, classes=np.array([0, 1]))
        print(i, "time. accuracy: {} ".format(clf.score(test_prepared, test_labels)))  # 在测试集上看效果
    output_file.close()
    sys.stdout = output

    print("\n", save_name)
    print("accuracy: {} ".format(clf.score(test_prepared, test_labels)))  # 在测试集上看效果
    # from sklearn.model_selection import cross_val_score
    # score = cross_val_score(clf, data_prepared, data_prepared, cv=1, scoring="accuracy")
    # print("得分：", score)

    # from sklearn.model_selection import cross_val_predict
    # train_pred = cross_val_predict(clf, data_prepared, data_labels, cv=3)
    # from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
    # print("混淆矩阵：\t", confusion_matrix(data_labels, train_pred))
    # print("精度：\t", precision_score(data_labels, train_pred))
    # print("召回率：\t", recall_score(data_labels, train_pred))
    # print("F1分数：\t", f1_score(data_labels, train_pred))

    # 保存模型
    joblib.dump(clf, save_file_name + save_name)

    # from sklearn.metrics import roc_auc_score, auc
    # import matplotlib.pyplot as plt
    # y_scores = cross_val_predict(clf, data_prepared, data_labels, cv=3, method="decision_function")
    # fpr, tpr, thresholds = roc_curve(data_labels, y_scores)
    # print(fpr)
    # print(tpr)
    # plot_roc_curve(fpr, tpr)
    # plt.show()
    #
    # y_pred = clf.predict(data_prepared)
    # rocauc = roc_auc_score(data_labels, y_pred)
    # y_pred_prob = clf.predict_proba(data_prepared)[:, 1]  ###这玩意就是预测概率的
    # fpr, tpr, threshold = roc_curve(data_labels, y_pred_prob)
    # plt.plot(fpr, tpr, 'b', label='AUC = %0.2f' % rocauc)  # 生成ROC曲线
    # plt.legend(loc='lower right')
    # plt.plot([0, 1], [0, 1], 'r--')
    # plt.xlim([0, 1])
    # plt.ylim([0, 1])
    # plt.ylabel('真正率')
    # plt.xlabel('假正率')
    # plt.show()
    #
    # plot_roc(data_labels, y_pred_prob)


def plot_roc_curve(fpr, tpr, label=None):
    plt.plot(fpr, tpr, linewidth=2, label=label)
    plt.plot([0,1], [0,1], 'b--')
    plt.axis([0, 1, 0, 1])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')

def plot_roc(labels, predict_prob):
    false_positive_rate,true_positive_rate,thresholds=roc_curve(labels, predict_prob)
    roc_auc=auc(false_positive_rate, true_positive_rate)
    plt.title('ROC')
    plt.plot(false_positive_rate, true_positive_rate,'b',label='AUC = %0.4f'% roc_auc)
    plt.legend(loc='lower right')
    plt.plot([0,1],[0,1],'r--')
    plt.ylabel('TPR')
    plt.xlabel('FPR')
    plt.show()


if __name__ == '__main__':
    NORMAL_NUM = -1
    kddcup = load_dataset()  # <class 'pandas.core.frame.DataFrame'>
    # print(kddcup.info())  # (494021, 42)
    labels, NORMAL_NUM = getLabelEncode(kddcup, ["class"], kddcup)
    kddcup["class"] = labels

    cat_attribs = ["protocol_type", "service", "flag"]  # 3
    num_attribs = list(kddcup)  # 42 - 3 - 1 = 38
    num_attribs.remove("class")
    for ele in cat_attribs:
        num_attribs.remove(ele)

    # 分层抽样
    from sklearn.model_selection import StratifiedShuffleSplit
    split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
    strat_train_set, strat_test_set = [], []
    for train_index, test_index in split.split(kddcup, kddcup["class"]):
        strat_train_set = kddcup.loc[train_index]
        strat_test_set = kddcup.loc[test_index]

    train_set, test_set = strat_train_set, strat_test_set
    # (395216, 42) (98805, 42)
    # train_set, test_set = train_test_split(kddcup, test_size=0.2, random_state=42)
    # print(train_set["class"].value_counts() / len(train_set["class"]))
    # print(test_set["class"].value_counts() / len(test_set["class"]))
    train_set_labels = train_set["class"].copy()
    train_set = train_set.drop("class", axis=1)  # (395216, 41)
    test_set_labels = test_set["class"].copy()
    test_set = test_set.drop("class", axis=1)  # (98805, 41)

    # (395216, 118) (98805, 118)
    train_set_prepared = full_pipeline(train_set, num_attribs, cat_attribs, kddcup)
    test_set_prepared = full_pipeline(test_set, num_attribs, cat_attribs, kddcup)

    # 分类器
    # 二元分类器
    print(NORMAL_NUM)
    y_train_set_labels_noraml = (train_set_labels == NORMAL_NUM)
    y_test_set_labels_normal = (test_set_labels == NORMAL_NUM)

    # from sklearn.model_selection import KFold
    # kf = KFold(n_splits=5)
    # for train, test in kf.split(train_set_prepared):
    #     print(train.shape, '\n', test.shape)

    from sklearn.linear_model import SGDClassifier, Perceptron, PassiveAggressiveClassifier
    sgd_clf = SGDClassifier(loss="log")  # SGDClassifier的参数设置可以参考sklearn官网
    inc_model(sgd_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal, save_name="sgd_clf_inc_model.pkl")

    prt_clf = Perceptron()
    inc_model(prt_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal,
              save_name="prt_clf_inc_model.pkl")

    pac_clf = PassiveAggressiveClassifier()
    inc_model(pac_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal,
              save_name="pac_clf_inc_model.pkl")

    # 使用朴素贝叶斯进行训练
    from sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB
    gub_clf = GaussianNB()
    inc_model(gub_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal,
              save_name="gub_clf_inc_model.pkl")

    # ValueError: Negative values in data passed to MultinomialNB (input X)
    # mnb_clf = MultinomialNB()
    # inc_model(mnb_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal,
    #           save_name="mnb_clf_inc_model.pkl")

    bnb_clf = BernoulliNB()
    inc_model(bnb_clf, train_set_prepared, y_train_set_labels_noraml, test_set_prepared, y_test_set_labels_normal,
              save_name="bnb_clf_inc_model.pkl")

