import argparse
import timeit
import warnings
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.datasets import load_wine, fetch_california_housing
from sklearn.model_selection import train_test_split, validation_curve, KFold, cross_val_score, GridSearchCV
from sklearn.ensemble import AdaBoostClassifier, GradientBoostingRegressor, RandomForestRegressor


def get_arguments():
    parser = argparse.ArgumentParser(description='AdaBoost')
    parser.add_argument('--dataset', type=int, default=2, choices=(1, 2),
                        help='the type of dataset'
                             '1: the wine dataset,'
                             '2: the california dataset')
    parser.add_argument('--test_size', type=float, default=0.33, help='the proportion of test data')
    parser.add_argument('--random_state', type=int, default=42, help='the random seed of dataset split')
    parser.add_argument('--algorithm', type=str, default="SAMME.R", choices=("SAMME", "SAMME.R"), help='the algorithm of AdaBoostClassifier')
    parser.add_argument('--cv', type=int, default=KFold(n_splits=5, shuffle=True, random_state=1111),
                        help='the value of partitions in cross validation')
    parser.add_argument('--grid_search', type=int, default=2, choices=(1, 2),
                        help='the type of gridsearch,'
                             '1: validation_curve,'
                             '2: GridSearchCV')
    parser.add_argument('--adaboost_param_name', type=str, default='n_estimators',
                        help='the parameter in validation_curve')
    parser.add_argument('--adaboost_param_range', default=np.arange(1, 201, 1),
                        help='the range of parameter in validation_curve')
    parser.add_argument('--adaboost_param_grid',
                        default={
                            'learning_rate': np.array([0.125, 0.25, 0.5, 1, 2]),
                            'n_estimators': np.arange(1, 201, 1)
                        },
                        help='the parameter and its values in grid search')
    parser.add_argument('--GBDT_param_name', type=str, default='n_estimators',
                        help='the parameter in validation_curve')
    parser.add_argument('--GBDT_param_range', default=np.arange(1, 201, 5),
                        help='the range of parameter in validation_curve')
    parser.add_argument('--GBDT_param_grid',
                        default={
                            'learning_rate': np.array([0.125, 0.25, 0.5, 1, 2]),
                            'n_estimators': np.arange(1, 201, 5)
                        },
                        help='the parameter and its values in grid search')

    args = parser.parse_args()
    return args


class MyPreprocessing:
    def __init__(self, parser):
        self.dataset = parser.dataset
        self.random_state = parser.random_state
        self.test_size = parser.test_size

    def load_dataset(self):
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            if self.dataset == 1:
                dataset = load_wine()
            elif self.dataset == 2:
                dataset = fetch_california_housing()
            else:
                raise ValueError("Please choose right dataset", self.dataset)
            datas = dataset.data
            target = dataset.target
        return datas, target

    def split_dataset(self, datas, labels):
        assert 0 < self.test_size < 1, "Please choose right test size between 0 and 1"
        X_train, X_test, y_train, y_test = train_test_split(
            datas, labels, test_size=self.test_size, random_state=self.random_state)
        return X_train, y_train.reshape(np.shape(y_train)[0]), X_test, y_test.reshape(np.shape(y_test)[0])


class SklearnModelSelection:
    def __init__(self, parser):
        self.cv = parser.cv

    def Cross_Validation(self, clf, X_train, y_train):
        scores = cross_val_score(clf, X_train, y_train, cv=self.cv)
        print("The train scores of Adaboost by 'Cross_Validation' split {} partitions is {},\n"
              "and the average scores is {}"
              .format(self.cv, scores, np.mean(scores)))

    def Validation_Curve(self, clf, X_train, y_train, param_name, param_range):
        train_scores, test_scores = validation_curve(clf, X_train, y_train, scoring="accuracy",
                                                      cv=self.cv, param_name=param_name, param_range=param_range)
        print("The train scores of AdaBoost by 'cross_validate' split {} partitions is {}, "
              "valid scores is {}"
              .format(self.cv, np.mean(train_scores), np.mean(test_scores)))
        train_scores_mean = np.mean(train_scores, axis=1)
        train_scores_std = np.std(train_scores, axis=1)
        test_scores_mean = np.mean(test_scores, axis=1)
        test_scores_std = np.std(test_scores, axis=1)
        self.draw_validation_curve(train_scores_mean, train_scores_std, test_scores_mean, test_scores_std, param_name, param_range)
        best_arg = np.argmax(test_scores_mean)
        best_para = param_range[best_arg]
        print("The best {} is {}".format(param_name, best_para))
        return best_para

    def draw_validation_curve(self, train_scores_mean, train_scores_std, test_scores_mean, test_scores_std, param_name, param_range):
        plt.title("Validation Curve with AdaBoost")
        plt.xlabel(param_name)
        plt.ylabel("Score")
        plt.ylim(0.0, 1.1)
        lw = 2
        plt.plot(
            param_range, train_scores_mean, label="Training score", color="darkorange", lw=lw
        )
        plt.fill_between(
            param_range,
            train_scores_mean - train_scores_std,
            train_scores_mean + train_scores_std,
            alpha=0.2,
            color="darkorange",
            lw=lw,
        )
        plt.plot(
            param_range, test_scores_mean, label="Cross-validation score", color="navy", lw=lw
        )
        plt.fill_between(
            param_range,
            test_scores_mean - test_scores_std,
            test_scores_mean + test_scores_std,
            alpha=0.2,
            color="navy",
            lw=lw,
        )
        plt.legend(loc="best")
        plt.show()

    def Grid_Search_CV(self, svc, X_train, y_train, param_grid):
        grid_search = GridSearchCV(estimator=svc, param_grid=param_grid, cv=self.cv)
        grid_search.fit(X_train, y_train)
        print("Best: %f using %s" % (grid_search.best_score_, grid_search.best_params_))
        self.draw_param_search(grid_search)
        return grid_search.best_params_

    def draw_param_search(self, grid_search):
        means = grid_search.cv_results_['mean_test_score']
        params = grid_search.cv_results_['params']

        results = {}
        for mean, param in zip(means, params):
            key = param['learning_rate']
            if key in results.keys():
                results[key].append([param['n_estimators'], mean])
            else:
                results[key] = [[param['n_estimators'], mean]]

        for key in results.keys():
            results[key].sort()
            max_depth_mean = np.array(results[key])
            plt.plot(max_depth_mean[:, 0], max_depth_mean[:, 1], label='lr = ' + str(key))
        plt.legend()
        plt.xlabel('n_estimators')
        plt.ylabel('mean test score')
        plt.show()


class SklearnAdaBoost(SklearnModelSelection):
    def __init__(self, parser):
        super(SklearnAdaBoost, self).__init__(parser)
        self.algorithm = parser.algorithm
        self.grid_search = parser.grid_search
        self.adaboost_param_name = parser.adaboost_param_name
        self.adaboost_param_range = parser.adaboost_param_range
        self.adaboost_param_grid = parser.adaboost_param_grid
        self.GBDT_param_name = parser.GBDT_param_name
        self.GBDT_param_range = parser.GBDT_param_range
        self.GBDT_param_grid = parser.GBDT_param_grid

    def adaboost(self, X_train, y_train, X_test, y_test):
        clf = AdaBoostClassifier(algorithm=self.algorithm)
        clf.fit(X_train, y_train)
        self.stage(clf, X_train, y_train, X_test, y_test)
        self.Cross_Validation(clf, X_train, y_train)
        self.test(clf, X_test, y_test)

    def GBDT(self, X_train, y_train, X_test, y_test):
        rf = RandomForestRegressor(max_depth=22, max_features=8, min_impurity_decrease=0,
                                   n_estimators=89, random_state=1412)
        clf = GradientBoostingRegressor(init=rf, random_state=1412)
        clf.fit(X_train, y_train)
        self.Cross_Validation(clf, X_train, y_train)
        self.test(clf, X_test, y_test)

    def adaboost_param_select(self, X_train, y_train, X_test, y_test):
        clf = AdaBoostClassifier()
        if self.grid_search == 1:
            best_params = self.Validation_Curve(clf, X_train, y_train, self.adaboost_param_name, self.adaboost_param_range)
            clf = AdaBoostClassifier(n_estimators=best_params)
        elif self.grid_search == 2:
            best_params = self.Grid_Search_CV(clf, X_train, y_train, self.adaboost_param_grid)
            clf = AdaBoostClassifier(learning_rate=best_params['learning_rate'], n_estimators=best_params['n_estimators'])
            print("The best learning rate is {}, the best estimators is {}."
                  .format(best_params['learning_rate'], best_params['n_estimators']))
        else:
            raise ValueError("Please choose right type of grid search~")
        clf.fit(X_train, y_train)
        self.Cross_Validation(clf, X_train, y_train)
        self.test(clf, X_test, y_test)

    def GBDT_param_select(self, X_train, y_train, X_test, y_test):
        rf = RandomForestRegressor(max_depth=22, max_features=14, min_impurity_decrease=0,
                                   n_estimators=89, random_state=1412)
        clf = GradientBoostingRegressor(init=rf, random_state=1412)
        if self.grid_search == 1:
            best_params = self.Validation_Curve(clf, X_train, y_train, self.GBDT_param_name, self.GBDT_param_range)
            clf = GradientBoostingRegressor(init=rf, random_state=1412, n_estimators=best_params)
        elif self.grid_search == 2:
            best_params = self.Grid_Search_CV(clf, X_train, y_train, self.GBDT_param_grid)
            clf = GradientBoostingRegressor(init=rf, random_state=1412,
                                            learning_rate=best_params['learning_rate'],
                                            n_estimators=best_params['n_estimators'])
        else:
            raise ValueError("Please choose right type of grid search~")
        clf.fit(X_train, y_train)
        self.Cross_Validation(clf, X_train, y_train)
        self.test(clf, X_test, y_test)

    def test(self, clf, X_test, y_test):
        print("The test scores of Adaboost is {}.".format(clf.score(X_test, y_test)))

    def stage(self, clf, X_train, y_train, X_test, y_test):
        train_staged_scores = [s for s in clf.staged_score(X_train, y_train)]
        test_staged_scores = [np.sum(s == y_test)/len(y_test) for s in clf.staged_predict(X_test)]
        plt.plot(train_staged_scores, label='train')
        plt.plot(test_staged_scores, label='test')
        plt.xlabel('Number of Estimators')
        plt.ylabel('Accuracy')
        plt.legend()
        plt.show()


if __name__ == '__main__':
    parser = get_arguments()

    # 获取数据集
    MyPreprocessing = MyPreprocessing(parser)
    datas, target = MyPreprocessing.load_dataset()
    X_train, y_train, X_test, y_test = MyPreprocessing.split_dataset(datas, target)

    # 葡萄酒数据集
    # assert parser.dataset == 1, "Please choose wine dataset"
    # SklearnAdaBoost = SklearnAdaBoost(parser)
    # SklearnAdaBoost.adaboost(X_train, y_train, X_test, y_test)
    # SklearnAdaBoost.adaboost_param_select(X_train, y_train, X_test, y_test)

    # GradientBoostingRegressor
    assert parser.dataset == 2, "Please choose california dataset"
    SklearnAdaBoost = SklearnAdaBoost(parser)
    # SklearnAdaBoost.GBDT(X_train, y_train, X_test, y_test)
    SklearnAdaBoost.GBDT_param_select(X_train, y_train, X_test, y_test)



