import argparse
import timeit
import warnings
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap

from sklearn.datasets import load_wine, load_digits
from sklearn.inspection import DecisionBoundaryDisplay
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split


def get_arguments():
    parser = argparse.ArgumentParser(description='MLP')
    parser.add_argument('--dataset', type=int, default=2, choices=(1, 2),
                        help='the type of dataset'
                             '1: the wine dataset,'
                             '2: the digits dataset')
    parser.add_argument('--test_size', type=float, default=0.33, help='the proportion of test data')
    parser.add_argument('--random_state', type=int, default=42, help='the random seed of dataset split')
    parser.add_argument('--hidden_layer_sizes', type=dict, default=(100,),
                        help='the hidden layer size of MLP')
    parser.add_argument('--activation', type=str, default='tanh', choices=('identity', 'logistic', 'tanh', 'relu'),
                        help='the activation function of MLP')
    parser.add_argument('--alpha', type=float, default=0.0001, help='the strength of the regularization term')
    parser.add_argument('--learning_rate', type=str, default='adaptive', choices=('constant', 'incscaling', 'adaptive'),
                        help='the learning rate of MLP')
    parser.add_argument('--max_iter', type=int, default=10000, help='the number of max interation in MLP')

    args = parser.parse_args()
    return args


class MyPreprocessing:
    def __init__(self, parser):
        self.dataset = parser.dataset
        self.test_size = parser.test_size
        self.random_state = parser.random_state

    def load_dataset(self):
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            if self.dataset == 1:
                dataset = load_wine()
                datas = dataset.data
                datas = datas[:, :2]
                target = dataset.target
            elif self.dataset == 2:
                dataset = load_digits()
                datas = dataset.data
                target = dataset.target
                plt.imshow(datas[4].reshape(8, 8), cmap='gray')
                plt.title('label = ' + str(target[4]))
                plt.show()
            else:
                raise ValueError("Please choose right dataset~", self.dataset)
        return datas, target

    def split_dataset(self, datas, labels):
        assert 0 < self.test_size < 1, "Please choose right test size between 0 and 1~"
        X_train, X_test, y_train, y_test = train_test_split(
            datas, labels, test_size=self.test_size, random_state=self.random_state)
        return X_train, y_train.reshape(-1), X_test, y_test.reshape(-1)


class SklearnMLP:
    def __init__(self, parser):
        self.hidden_layer_sizes = parser.hidden_layer_sizes
        self.activation = parser.activation
        self.alpha = parser.alpha
        self.max_iter = parser.max_iter
        self.random_state = parser.random_state
        self.learning_rate = parser.learning_rate

    def MLP(self, X_train, y_train, X_test, y_test):
        clf = MLPClassifier(hidden_layer_sizes=self.hidden_layer_sizes,
                            activation=self.activation,
                            alpha=self.alpha,
                            max_iter=self.max_iter,
                            learning_rate=self.learning_rate,
                            random_state=self.random_state)
        clf.fit(X_train, y_train)
        print("The train scores of MLP is {}".format(clf.score(X_train, y_train)))
        print("The test scores of MLP is {}".format(clf.score(X_test, y_test)))
        return clf

    def visualization(self, clf, X_train, y_train, X_test, y_test):
        figure = plt.figure(figsize=(17, 9))
        # x_min = min(X_train[:, 0].min(), X_test[:, 0].min()) - 0.5
        # x_max = max(X_train[:, 0].max(), X_test[:, 0].max()) + 0.5
        # y_min = min(X_train[:, 1].min(), X_test[:, 1].min()) - 0.5
        # y_max = max(X_train[:, 1].max(), X_test[:, 1].max()) + 0.5
        x_min = X_train[:, 0].min() - 0.5
        x_max = X_train[:, 0].max() + 0.5
        y_min = X_train[:, 1].min() - 0.5
        y_max = X_train[:, 1].max() + 0.5
        h = 0.02    # step size in the mesh
        xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))

        cm = plt.cm.RdBu
        cm_bright = ListedColormap(["#FF0000", "#FFFF00", "#0000FF"])
        ax = plt.subplot(1, 2, 1)
        # Plot the training points
        ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, alpha=0.6, edgecolors="black",)
        # and testing points
        # ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
        ax.set_xlim(xx.min(), xx.max())
        ax.set_ylim(yy.min(), yy.max())
        ax.set_xticks(())
        ax.set_yticks(())

        ax = plt.subplot(1, 2, 2)
        # score = clf.score(X_test, y_test)
        # Plot the decision boundary. For that, we will assign a color to each
        # point in the mesh [x_min, x_max] x [y_min, y_max].
        # if hasattr(clf, "decision_function"):
        #     Z = clf.decision_function(np.column_stack([xx.ravel(), yy.ravel()]))
        # else:
        #     Z = clf.predict_proba(np.column_stack([xx.ravel(), yy.ravel()]))[:, 1]
        #
        # # Put the result into a color plot
        # Z = Z.reshape(xx.shape)
        # ax.contourf(xx, yy, Z, cmap=cm, alpha=0.8)

        DecisionBoundaryDisplay.from_estimator(clf, X_train, response_method="predict", cmap=plt.cm.RdYlBu, ax=ax)




        # Plot also the training points
        ax.scatter(
            X_train[:, 0],
            X_train[:, 1],
            c=y_train,
            cmap=cm_bright,
            edgecolors="black",
            s=25,
        )
        # and testing points
        # ax.scatter(
        #     X_test[:, 0],
        #     X_test[:, 1],
        #     c=y_test,
        #     cmap=cm_bright,
        #     alpha=0.6,
        #     edgecolors="black",
        #     s=25,
        # )

        ax.set_xlim(xx.min(), xx.max())
        ax.set_ylim(yy.min(), yy.max())
        ax.set_xticks(())
        ax.set_yticks(())
        ax.set_title('MLP')
        # ax.text(
        #     xx.max() - 0.3,
        #     yy.min() + 0.3,
        #     f"{score:.3f}".lstrip("0"),
        #     size=15,
        #     horizontalalignment="right",
        # )

        figure.subplots_adjust(left=0.02, right=0.98)
        plt.show()


if __name__ == '__main__':
    parser = get_arguments()

    MyPreprocessing = MyPreprocessing(parser)
    datas, target = MyPreprocessing.load_dataset()
    X_train, y_train, X_test, y_test = MyPreprocessing.split_dataset(datas, target)

    # 红酒数据集
    # assert parser.dataset == 1
    # SklearnMLP = SklearnMLP(parser)
    # clf = SklearnMLP.MLP(X_train, y_train, X_test, y_test)
    # # 结果可视化
    # SklearnMLP.visualization(clf, X_train, y_train, X_test, y_test)

    # 手写数字数据集
    assert parser.dataset == 2
    SklearnMLP = SklearnMLP(parser)
    clf = SklearnMLP.MLP(X_train, y_train, X_test, y_test)






