import argparse
import warnings

import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris, load_breast_cancer, make_blobs, make_classification, make_circles, make_moons
from sklearn.preprocessing import MinMaxScaler, MaxAbsScaler, StandardScaler
from sklearn.experimental import enable_halving_search_cv
from sklearn.model_selection import train_test_split, cross_val_score, cross_validate, validation_curve, GridSearchCV, \
    RandomizedSearchCV, HalvingGridSearchCV, HalvingRandomSearchCV
from sklearn.svm import LinearSVC, SVC
from sklearn.inspection import DecisionBoundaryDisplay
import timeit


def get_arguments():
    parser = argparse.ArgumentParser(description='SVM')
    parser.add_argument('--data_name', type=int, default=2, choices=(1, 2),
                        help='choose datasets,'
                             '1: iris,'
                             '2: breast_cancer')
    parser.add_argument('--iris_features', type=list, default=['f1', 'f2'],
                        help="the features of iris datasets for regression, "
                             "element of parameter should be 'f0', 'f1', 'f2' or 'f3'")
    parser.add_argument('--iris_classes', type=list, default=[1, 2],
                        help='the classes of iris datasets for classify, element of parameter should be 0, 1, 2')
    parser.add_argument('--normalization', type=int, default=0, choices=(0, 1, 2, 3),
                        help='select the type of data normalization,'
                             '0: no normalization,'
                             '1: rescale the data to [0, 1],'
                             '2: rescale the data to [-1, 1],'
                             '3: z-score normalization')
    parser.add_argument('--test_size', type=float, default=0.33, help='the proportion of test data')
    parser.add_argument('--random_state', type=int, default=42, help='the random seed of dataset split')
    parser.add_argument('--C', type=float, default=1.0, help='penalty coefficient, should be positive float')
    parser.add_argument('--kernel', type=str, default='linear',
                        choices=('linear', 'poly', 'rbf', 'sigmoid'),
                        help='the type of kernel function')
    parser.add_argument('--cv', type=int, default=5, help='the value of partitions in cross validation')
    parser.add_argument('--cross_validation', type=int, default=3,
                        help='the type of cross validation,'
                             '0: no cross validation,'
                             '1: cross_val_score,'
                             '2: cross_validate,'
                             '3: validation_curve')
    parser.add_argument('--param_name', type=str, default='C', help='the parameter in validation_curve')
    parser.add_argument('--param_range', default=np.arange(0, 20, 1), help='the range of parameter in validation_curve')

    parser.add_argument('--make_data', type=int, default=3,
                        help='the type of making dataset,'
                             '1: make_blobs,'
                             '2: make_classification,'
                             '3: make_circles,'
                             '4: make_moons')
    parser.add_argument('--n_samples', type=int, default=500, help='the size of making dataset')

    parser.add_argument('--grid_search', type=int, default=2,
                        help='the type of gridsearch,'
                             '1: GridSearchCV,'
                             '2: RandomizedSearchCV,'
                             '3: HalvingGridSearchCV,'
                             '4: HalvingRandomSearchCV')
    parser.add_argument('--param_grid', type=dict,
                        default={
                            'gamma': ('scale', 'auto'),
                            'kernel': ('linear', 'poly', 'rbf', 'sigmoid'),
                            'degree': np.arange(1, 16, 1)
                        },
                        help='the parameter and its values in grid search')

    args = parser.parse_args()
    return args


class MyPreprocessing:
    def __init__(self, parser):
        self.data_name = parser.data_name
        self.iris_features = parser.iris_features
        self.iris_classes = parser.iris_classes
        self.test_size = parser.test_size
        self.random_state = parser.random_state
        self.normalization = parser.normalization

    def load_dataset(self):
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            if self.data_name == 1:
                dataset = load_iris()
                print("The iris datasets is loaded successfully!")
                datas = dataset.data.astype(float)
                target = dataset.target.astype(int)
                df = pd.DataFrame({'f0': datas[:, 0],
                                   'f1': datas[:, 1],
                                   'f2': datas[:, 2],
                                   'f3': datas[:, 3],
                                   'label': target})
                return df
            elif self.data_name == 2:
                dataset = load_breast_cancer()
                print("The breast_cancer datasets is loaded successfully!")
                description = dataset.DESCR
                feature_names = dataset.feature_names
                target_names = dataset.target_names
                datas = dataset.data
                target = dataset.target
                print("The description of datasets is: ", end="")
                print(description)
                print("The feature names of datasets is: ", end="")
                print(*feature_names)
                print("The target names of datasets is: ", end="")
                print(*target_names)
                print("The shape of dataset is: ", end="")
                print(datas.shape)
                return datas, target
            else:
                raise ValueError("Please choose right dataset~", self.data_name)

    def normalize_dataset(self, X):
        if self.normalization == 0:
            # 不进行任何操作
            X_normalization = X
        elif self.normalization == 1:
            # 将数值放缩到[0, 1]
            min_max_scaler = MinMaxScaler()
            X_normalization = min_max_scaler.fit_transform(X)
        elif self.normalization == 2:
            # 将数值放缩到[-1, 1]
            max_abs_scaler = MaxAbsScaler()
            X_normalization = max_abs_scaler.fit_transform(X)
        elif self.normalization == 3:
            # 将数值进行z-score标准化
            scaler = StandardScaler()
            X_normalization = scaler.fit_transform(X)
        else:
            raise ValueError("Please choose right normalization type~", self.normalization)
        return X_normalization

    def choose_dataset(self, df):
        df = df[df.label.isin(self.iris_classes)]  # 选取特定标签
        datas = df[self.iris_features].values  # 选取特定特征
        labels = df[['label']].values  # 读取标签
        return datas, labels

    def draw_dataset(self, df):
        datas, labels = self.choose_dataset(df)
        # # label = np.unique(labels)
        # colors = []
        # for i in labels:
        #     if i == 0:
        #         colors.append('b')
        #     elif i == 1:
        #         colors.append('r')
        #     elif i == 2:
        #         colors.append('y')
        plt.scatter(datas[:, 0], datas[:, 1], c=labels)
        plt.xlabel(self.iris_features[0])
        plt.ylabel(self.iris_features[1])
        plt.title("Dataset scatter plot")
        plt.show()

    def split_dataset(self, datas, labels):
        assert 0 < self.test_size < 1, "Please choose right test size between 0 and 1"
        X_train, X_test, y_train, y_test = train_test_split(
            datas, labels, test_size=self.test_size, random_state=self.random_state)
        return X_train, y_train.reshape(np.shape(y_train)[0]), X_test, y_test.reshape(np.shape(y_test)[0])


class MakeDataset(MyPreprocessing):
    def __init__(self, parser):
        super(MakeDataset, self).__init__(parser)
        self.make_data = parser.make_data
        self.n_samples = parser.n_samples

    def makedataset(self):
        if self.make_data == 1:
            X, y = make_blobs(n_samples=self.n_samples, centers=2, cluster_std=0.4, random_state=0)
        elif self.make_data == 2:
            X, y = make_classification(n_samples=self.n_samples, n_features=2, n_informative=2, n_redundant=0, random_state=20)
        elif self.make_data == 3:
            X, y = make_circles(n_samples=self.n_samples, noise=0.2, factor=0.2, random_state=0)
        elif self.make_data == 4:
            X, y = make_moons(n_samples=500, noise=0.2, random_state=0)
        else:
            raise ValueError("Please choose right type of making dataset")
        self.draw(X, y)
        X_train, X_test, y_train, y_test = self.split_dataset(X, y)
        return X_train, X_test, y_train, y_test

    def draw(self, X, y):
        plt.title(" {} datasets, n_samples = {}".format(self.make_data, self.n_samples))
        plt.scatter(X[:, 0], X[:, 1], marker="o", c=y, s=25, edgecolors='k')
        plt.show()


class SklearnModelSelection:
    def __init__(self, parser):
        self.cv = parser.cv
        self.param_name = parser.param_name
        self.grid_search = parser.grid_search
        self.param_range = parser.param_range
        self.param_grid = parser.param_grid

    def No_Cross_Validate(self, clf, X_train, y_train):
        print("The train scores of LinearSVC is {}".format(clf.score(X_train, y_train)))

    def Cross_Validation(self, clf, X_train, y_train):
        scores = cross_val_score(clf, X_train, y_train, cv=self.cv)
        print("The train scores of LinearSVC by 'cross_validation' split {} partitions is {}".format(self.cv, scores))

    def Cross_Validate(self, clf, X_train, y_train):
        scores = cross_validate(clf, X_train, y_train, cv=self.cv)
        print("The train scores of LinearSVC by 'cross_validate' split {} partitions is {},\n"
              "the fit_time is {}, \n"
              "the score_time is {}"
              .format(self.cv, scores['test_score'], scores['fit_time'], scores['score_time']))

    def Validation_Curve(self, clf, X_train, y_train):
        train_scores, test_scores = validation_curve(clf, X_train, y_train, scoring="accuracy",
                                                      cv=self.cv, param_name=self.param_name, param_range=self.param_range)
        print("The train scores of LinearSVC by 'cross_validate' split {} partitions is {}, "
              "valid scores is {}"
              .format(self.cv, train_scores, test_scores))
        train_scores_mean = np.mean(train_scores, axis=1)
        train_scores_std = np.std(train_scores, axis=1)
        test_scores_mean = np.mean(test_scores, axis=1)
        test_scores_std = np.std(test_scores, axis=1)

        plt.title("Validation Curve with SVM")
        plt.xlabel(self.param_name)
        plt.ylabel("Score")
        plt.ylim(0.0, 1.1)
        lw = 2
        plt.semilogx(
            self.param_range, train_scores_mean, label="Training score", color="darkorange", lw=lw
        )
        plt.fill_between(
            self.param_range,
            train_scores_mean - train_scores_std,
            train_scores_mean + train_scores_std,
            alpha=0.2,
            color="darkorange",
            lw=lw,
        )
        plt.semilogx(
            self.param_range, test_scores_mean, label="Cross-validation score", color="navy", lw=lw
        )
        plt.fill_between(
            self.param_range,
            test_scores_mean - test_scores_std,
            test_scores_mean + test_scores_std,
            alpha=0.2,
            color="navy",
            lw=lw,
        )
        plt.legend(loc="best")
        plt.show()

    def Grid_Search_CV(self, svc, X_train, y_train):
        grid_search = GridSearchCV(estimator=svc, param_grid=self.param_grid, cv=self.cv)
        grid_search.fit(X_train, y_train)
        print("Best: %f using %s" % (grid_search.best_score_, grid_search.best_params_))
        self.draw_param_search(grid_search)
        return grid_search.best_params_

    def Randomized_SearchCV(self, svc, X_train, y_train):
        grid_search = RandomizedSearchCV(estimator=svc, param_distributions=self.param_grid, cv=self.cv)
        grid_search.fit(X_train, y_train)
        print("Best: %f using %s" % (grid_search.best_score_, grid_search.best_params_))
        self.draw_param_search(grid_search)
        return grid_search.best_params_

    def Halving_Grid_SearchCV(self, svc, X_train, y_train):
        grid_search = HalvingGridSearchCV(estimator=svc, param_grid=self.param_grid, cv=self.cv)
        grid_search.fit(X_train, y_train)
        print("Best: %f using %s" % (grid_search.best_score_, grid_search.best_params_))
        self.draw_param_search(grid_search)
        return grid_search.best_params_

    def Halving_Random_SearchCV(self, svc, X_train, y_train):
        grid_search = HalvingRandomSearchCV(estimator=svc, param_distributions=self.param_grid, cv=self.cv)
        grid_search.fit(X_train, y_train)
        print("Best: %f using %s" % (grid_search.best_score_, grid_search.best_params_))
        self.draw_param_search(grid_search)
        return grid_search.best_params_

    def draw_param_search(self, grid_search):
        means = grid_search.cv_results_['mean_test_score']
        params = grid_search.cv_results_['params']

        results = {}
        for mean, param in zip(means, params):
            key = param['kernel'] + '_' + param['gamma']
            if key in results.keys():
                results[key].append([param['degree'], mean])
            else:
                results[key] = [[param['degree'], mean]]

        for key in results.keys():
            results[key].sort()
            degree_mean = np.array(results[key])
            if self.grid_search == 1 or self.grid_search == 3:
                plt.plot(degree_mean[:, 0], degree_mean[:, 1], label=key)
            else:
                plt.scatter(degree_mean[:, 0], degree_mean[:, 1], label=key)
        plt.legend()
        plt.xlabel('degree')
        plt.ylabel('mean test score')
        plt.show()


class SklearnSVM(SklearnModelSelection):
    def __init__(self, parser):
        super(SklearnSVM, self).__init__(parser)
        self.C = parser.C
        self.kernel = parser.kernel
        self.cv = parser.cv
        self.cross_validation = parser.cross_validation

    def SVC(self, X_train, y_train, X_test, y_test):
        start_time = timeit.default_timer()
        clf = SVC(random_state=0, tol=1e-5, C=self.C, kernel=self.kernel)
        clf.fit(X_train, y_train)
        if self.kernel == 'linear':
            print("The coefficient of SVM is: {}".format(clf.coef_))
            print("The intercept of SVM is: {}".format(clf.intercept_))
        if np.shape(X_train)[1] == 2:
            self.draw_result(clf, X_train, y_train, 'train')

        # 交叉验证
        if self.cross_validation == 0:
            self.No_Cross_Validate(clf, X_train, y_train)
        elif self.cross_validation == 1:
            self.Cross_Validation(clf, X_train, y_train)
        elif self.cross_validation == 2:
            self.Cross_Validate(clf, X_train, y_train)
        elif self.cross_validation == 3:
            self.Validation_Curve(clf, X_train, y_train)
        else:
            raise ValueError("Please choose right type of cross validation~")
        finish_time = timeit.default_timer()
        print("Running time of SVM: {:.24f} seconds".format(finish_time - start_time))

        # 测试
        self.test(clf, X_test, y_test)

    def draw_result(self, clf, X, y, type):
        ax = plt.subplot(1, 1, 1)
        if self.kernel == 'linear':
            for i in range(len(clf.intercept_)):
                w = -clf.coef_[i, 0] / clf.coef_[i, 1]
                b0 = -clf.intercept_[i] / clf.coef_[i, 1]
                b1 = -1 * (clf.intercept_[i] + 1) / clf.coef_[i, 1]
                b2 = -1 * (clf.intercept_[i] - 1) / clf.coef_[i, 1]
                x = np.arange(np.min(X[:, 0]),
                              np.max(X[:, 0]), 0.1)
                y0 = w * x + b0  # 决策边界
                y1 = w * x + b1  # 间隔边界
                y2 = w * x + b2  # 间隔边界
                plt.plot(x, y0, c='r', label='decision boundary')
                plt.plot(x, y1, '--', c='g', label='margin boundary')
                plt.plot(x, y2, '--', c='g')
        else:
            DecisionBoundaryDisplay.from_estimator(clf, X, response_method="predict", cmap=plt.cm.RdYlBu, ax=ax)

        # 原始数据
        plt.scatter(X[:, 0], X[:, 1], c=y)
        sv = []
        if type == 'train':
            sv = clf.support_vectors_  # 支持向量
        else:
            dist = clf.decision_function(X)
            for i in range(len(dist)):
                if np.abs(np.min(dist[i])) <= 1.0:
                    sv.append(X[i])
            sv = np.array(sv)
        if np.shape(sv)[0] != 0:
            plt.scatter(sv[:, 0], sv[:, 1], c="w", s=300, alpha=0.5, label='support vector')
        plt.title("The decision boundary, margin boundary and support vector of {}".format(type))
        plt.legend()
        plt.show()

    def test(self, clf, X_test, y_test):
        print("The test scores of LinearSVC is {}".format(clf.score(X_test, y_test)))
        if np.shape(X_test)[1] == 2:
            self.draw_result(clf, X_test, y_test, 'test')


class SklearnGridSearch(SklearnSVM):
    def __init__(self, parser):
        super(SklearnGridSearch, self).__init__(parser)
        self.grid_search = parser.grid_search

    def GridSearch(self, X_train, y_train):
        svc = SVC()
        if self.grid_search == 1:
            best_params = self.Grid_Search_CV(svc, X_train, y_train)
        elif self.grid_search == 2:
            best_params = self.Randomized_SearchCV(svc, X_train, y_train)
        elif self.grid_search == 3:
            best_params = self.Halving_Grid_SearchCV(svc, X_train, y_train)
        elif self.grid_search == 4:
            best_params = self.Halving_Random_SearchCV(svc, X_train, y_train)
        else:
            raise ValueError("Please choose right type of grid search~")
        return best_params

    def SVC(self, X_train, y_train, X_test, y_test):
        start_time = timeit.default_timer()
        best_params = self.GridSearch(X_train, y_train)
        clf = SVC(kernel=best_params['kernel'], gamma=best_params['gamma'], degree=best_params['degree'])
        clf.fit(X_train, y_train)
        if best_params['kernel'] == 'linear':
            print("The coefficient of LinearRegression is: {}".format(clf.coef_))
            print("The intercept of LinearRegression is: {}".format(clf.intercept_))
        print("The train scores of LinearSVC is {}".format(clf.score(X_train, y_train)))
        self.draw_result(clf, X_train, y_train, 'train')
        finish_time = timeit.default_timer()
        print("Running time of GridSearch of SVM: {:.24f} seconds".format(finish_time - start_time))

        self.test(clf, X_test, y_test)


if __name__ == "__main__":
    # 加载参数
    parser = get_arguments()

    # -----------题目一-------------
    parser.data_name = 1
    # ---------数据处理-------------
    # 实例化数据预处理MyPreprocessing类
    MyPreprocessing = MyPreprocessing(parser)
    # 加载数据集
    df = MyPreprocessing.load_dataset()
    # 对数据集进行可视化，观察是否可分
    MyPreprocessing.draw_dataset(df)
    # 对数据集进行筛选
    datas, labels = MyPreprocessing.choose_dataset(df)
    # 对数据集进行划分，分成训练集和测试集
    X_train, y_train, X_test, y_test = MyPreprocessing.split_dataset(datas, labels)
    # ---------模型训练-------------
    # 实例化
    SklearnSVM = SklearnSVM(parser)
    # Iris数据集
    SklearnSVM.SVC(X_train, y_train, X_test, y_test)

    # # -----------题目二-------------
    # # 随机数据集
    # MakeDataset = MakeDataset(parser)
    # X_random_train, y_random_train, X_random_test, y_random_test = MakeDataset. makedataset()
    # SklearnGridSearch = SklearnGridSearch(parser)
    # SklearnGridSearch.SVC(X_random_train, y_random_train, X_random_test, y_random_test)

    # # -----------题目三-------------
    # parser.data_name = 2
    # MyPreprocessing = MyPreprocessing(parser)
    # datas, target = MyPreprocessing.load_dataset()
    # datas = MyPreprocessing.normalize_dataset(datas)
    # X_train, y_train, X_test, y_test = MyPreprocessing.split_dataset(datas, target)
    # SklearnSVM = SklearnSVM(parser)
    # SklearnSVM.SVC(X_train, y_train, X_test, y_test)




