import os
import warnings

from sklearn.model_selection import train_test_split

from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn import naive_bayes
from sklearn.preprocessing import label_binarize
from sklearn import metrics
from sklearn.metrics import confusion_matrix, precision_score, classification_report, cohen_kappa_score, roc_auc_score
from sklearn.exceptions import ConvergenceWarning
from features import FormattedData
# from sklearn.utils import shuffle
# import pickle

# from features import get_features
from features.get_features import ds

import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import seaborn as sns


def draw_auc_curve(y_true, y_prob):
    """
    Just process problem of binary classification
    """
    fpr, tpr, thresholds = metrics.roc_curve(y_true.ravel(), y_prob.ravel())
    auc = metrics.auc(fpr, tpr)
    print(fpr, tpr, thresholds)
    print(auc)
    lw = 2
    plt.figure()  # figsize=(10,10)
    plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve(AUC=%0.4f)' % auc)  # 假正率为横坐标，真正率为纵坐标做曲线
    plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('ROC')
    plt.legend(loc="lower right")
    plt.show()


def draw_confusion_matrix(cm, label_names, path, name):
    sns.set()
    mpl.rcParams['font.sans-serif'] = 'Times New Roman'
    mpl.rcParams['axes.unicode_minus'] = False
    f, ax = plt.subplots()
    sns.heatmap(cm, annot=True, ax=ax, fmt="d")
    ax.set(xticklabels=label_names, yticklabels=label_names)
    ax.set_title('Confusion Matrix of {}'.format(name), fontsize=16, fontweight='bold')
    ax.set_xlabel('Pred Class', fontsize=14, fontweight='bold')
    ax.set_ylabel('True Class', fontsize=14, fontweight='bold')
    plt.savefig(os.path.join(path, name))
    plt.show()


def get_metric(y_true, y_pred, y_prob, path, name):
    cf_matrix = confusion_matrix(y_true, y_pred)
    draw_confusion_matrix(cf_matrix, ["DT", "IEE", "NORMAL"], path, name)
    print("confusion matrix: \n", cf_matrix)

    micro_precision_s = precision_score(y_true, y_pred, average="micro")
    macro_precision_s = precision_score(y_true, y_pred, average="macro")
    weight_precision_s = precision_score(y_true, y_pred, average="weighted")

    print(
        "micro precision score: {}\nmacro precision score: {}\nweighted precision score: {}\n".format(micro_precision_s,
                                                                                                      macro_precision_s,
                                                                                                      weight_precision_s))

    print("classification report:\n", classification_report(y_true, y_pred, labels=np.array([1, 2, 3]),
                                                            target_names=np.array(["DT", "IEE", "NORMAL"])))

    print("cohen kappa score: ", cohen_kappa_score(y_true, y_pred))

    roc_score = roc_auc_score(y_true, y_prob, multi_class="ovo")

    print("roc-ovo: ", roc_score)


def logistic_regression():
    # samples, labels = get_features.get_fea()
    # dataset = ds.get_data("approximate")
    # dataset = ds.get_data("sample")
    dataset = ds.get_data("fuzzy")
    samples, labels = dataset.data, dataset.label
    # shuffle_X, shuffle_Y = shuffle(samples, labels, random_state=666)
    x_train, x_test, y_train, y_test = train_test_split(samples, labels, test_size=0.2, shuffle=True, random_state=666)
    # 默认是OVR
    lr = LogisticRegression()
    lr.fit(x_train, y_train)

    # pickle.dump(lr, open("./logreg.pkl", "wb"))

    pred = lr.predict(x_test)
    pred_prob = lr.predict_proba(x_test)
    get_metric(y_test, pred, pred_prob, "../results", "LR")


def nb():
    # samples, labels = get_features.get_fea()
    # dataset = ds.get_data("approximate")
    # dataset = ds.get_data("sample")
    dataset = ds.get_data("fuzzy")
    samples, labels = dataset.data, dataset.label
    # shuffle_X, shuffle_Y = shuffle(samples, labels, random_state=666)
    x_train, x_test, y_train, y_test = train_test_split(samples, labels, test_size=0.2, shuffle=True, random_state=666)
    clf = naive_bayes.GaussianNB()
    clf.fit(x_train, y_train)
    # clf = naive_bayes.MultinomialNB(alpha=1.0, fit_prior=True, class_prior=None)
    # clf = naive_bayes.BernoulliNB(alpha=1.0, binarize=0.0, fit_prior=True, class_prior=None)
    pred = clf.predict(x_test)
    pred_prob = clf.predict_proba(x_test)
    get_metric(y_test, pred, pred_prob, "../results", "NB")


def svm():
    # samples, labels = get_features.get_fea()
    # dataset = ds.get_data("approximate")
    # dataset = ds.get_data("sample")
    dataset = ds.get_data("fuzzy")
    samples, labels = dataset.data, dataset.label
    # shuffle_X, shuffle_Y = shuffle(samples, labels, random_state=666)
    x_train, x_test, y_train, y_test = train_test_split(samples, labels, test_size=0.2, shuffle=True, random_state=666)
    clf = SVC(C=1, kernel="rbf", gamma=1, decision_function_shape='ovo', probability=True)
    clf.fit(x_train, y_train)
    pred = clf.predict(x_test)
    pred_prob = clf.predict_proba(x_test)
    get_metric(y_test, pred, pred_prob, "../results", "SVM")


def decision_tree():
    # samples, labels = get_features.get_fea()
    # dataset = ds.get_data("approximate")
    # dataset = ds.get_data("sample")
    dataset = ds.get_data("fuzzy")
    samples, labels = dataset.data, dataset.label
    # shuffle_X, shuffle_Y = shuffle(samples, labels, random_state=666)
    x_train, x_test, y_train, y_test = train_test_split(samples, labels, test_size=0.2, shuffle=True, random_state=666)
    clf = DecisionTreeClassifier(max_depth=3)
    clf.fit(x_train, y_train)
    pred = clf.predict(x_test)
    pred_prob = clf.predict_proba(x_test)
    get_metric(y_test, pred, pred_prob, "../results", "DT")


params = [{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': 0,
           'learning_rate_init': 0.2},
          {'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
           'nesterovs_momentum': False, 'learning_rate_init': 0.2},
          {'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
           'nesterovs_momentum': True, 'learning_rate_init': 0.2},
          {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0,
           'learning_rate_init': 0.2},
          {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
           'nesterovs_momentum': True, 'learning_rate_init': 0.2},
          {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
           'nesterovs_momentum': False, 'learning_rate_init': 0.2},
          {'solver': 'adam', 'learning_rate_init': 0.01}]

params_labels = ["constant learning-rate", "constant with momentum",
                 "constant with Nesterov's momentum",
                 "inv-scaling learning-rate", "inv-scaling with momentum",
                 "inv-scaling with Nesterov's momentum", "adam"]

plot_args = [{'c': 'red', 'linestyle': '-'},
             {'c': 'green', 'linestyle': '-'},
             {'c': 'blue', 'linestyle': '-'},
             {'c': 'red', 'linestyle': '--'},
             {'c': 'green', 'linestyle': '--'},
             {'c': 'blue', 'linestyle': '--'},
             {'c': 'black', 'linestyle': '-'}]


def mlp():
    # samples, labels = get_features.get_fea()
    # dataset = ds.get_data("approximate")
    # dataset = ds.get_data("sample")
    dataset = ds.get_data("fuzzy")
    samples, labels = dataset.data, dataset.label
    # shuffle_X, shuffle_Y = shuffle(samples, labels, random_state=666)
    x_train, x_test, y_train, y_test = train_test_split(samples, labels, test_size=0.2, shuffle=True, random_state=666)

    clf = MLPClassifier(**params[2], random_state=666)
    clf.fit(x_train, y_train)

    pred = clf.predict(x_test)
    pred_prob = clf.predict_proba(x_test)
    get_metric(y_test, pred, pred_prob, "../results", "MLP")


def plot_for_mlp():
    # for each dataset, plot learning for each learning strategy
    fig, ax = plt.subplots(1, 1, figsize=(15, 12))
    print("\nlearning on dataset %s" % "MLP for PVA")
    ax.set_title("MLP for PVA")

    dataset = ds.get_data("fuzzy")
    samples, labels = dataset.data, dataset.label
    x_train, x_test, y_train, y_test = train_test_split(samples, labels, test_size=0.2, shuffle=True, random_state=666)

    # X = MinMaxScaler().fit_transform(X)
    mlps = []

    max_iter = 400

    for label, param in zip(params_labels, params):
        print("training: %s" % label)
        mlp = MLPClassifier(random_state=0,
                            max_iter=max_iter, **param)

        # some parameter combinations will not converge as can be seen on the
        # plots so they are ignored here
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore", category=ConvergenceWarning,
                                    module="sklearn")
            mlp.fit(x_train, y_train)

        mlps.append(mlp)
        print("Training set score: %f" % mlp.score(x_train, y_train))
        print("Training set loss: %f" % mlp.loss_)
    for mlp, label, args in zip(mlps, params_labels, plot_args):
        ax.plot(mlp.loss_curve_, label=label, **args)
    fig.legend(params_labels, ncol=3, loc="upper center")
    plt.savefig(os.path.join("../results", "../results/MLP_loss.png"))
    plt.show()


if __name__ == '__main__':
    # logistic_regression()
    # svm()
    # decision_tree()
    mlp()
    # plot_for_mlp()
    # nb()
