import argparse
import timeit
import warnings
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt

from sklearn.datasets import fetch_california_housing
from sklearn.experimental import enable_halving_search_cv
from sklearn.model_selection import cross_validate, train_test_split, validation_curve, RandomizedSearchCV, \
    HalvingGridSearchCV, KFold
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor

from MyDecisionTree import CART_decision_tree


def get_arguments():
    parser = argparse.ArgumentParser(description='RandomForest')
    parser.add_argument('--cv', type=int, default=KFold(n_splits=5, shuffle=True, random_state=1111),
                        help='the value of partitions in cross validation')
    parser.add_argument('--test_size', type=float, default=0.90, help='the proportion of test data')
    parser.add_argument('--random_state', type=int, default=42, help='the random seed of dataset split')
    parser.add_argument('--param_name', type=str, default='n_estimators', help='the parameter in validation_curve')
    parser.add_argument('--param_range', default=np.arange(1, 11, 1), help='the range of parameter in validation_curve')
    parser.add_argument('--param_grid', type=dict,
                        default={
                            'ccp_alpha': np.arange(0, 1, 0.1),
                            'n_estimators': np.arange(1, 16, 1)
                        },
                        help='the parameter and its values in grid search')
    parser.add_argument('--factor', type=int, default=3,
                        help='The proportion of sample size added in each iteration is also the proportion of parameter'
                             'combination retained in each iteration')
    parser.add_argument('--grid_search', type=int, default=2, choices=(1, 2),
                        help='the type of gridsearch,'
                             '1: Randomized_SearchCV,'
                             '2: Halving_Grid_SearchCV')
    parser.add_argument('--n_estimators', type=int, default=5, help='the number of estimators in my random forest')

    args = parser.parse_args()
    return args


class MyPreprocessing:
    def __init__(self, parser):
        self.test_size = parser.test_size
        self.random_state = parser.random_state

    def load_dataset(self):
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            dataset = fetch_california_housing()
            description = dataset.DESCR
            feature_names = dataset.feature_names
            target_names = dataset.target_names
            datas = dataset.data
            target = dataset.target
            print("----------------------The information of dataset start----------------------")
            print("The description of datasets is: ", end="")
            print(description)
            print("The feature names of datasets is: ", end="")
            print(*feature_names)
            print("The target names of datasets is: ", end="")
            print(*target_names)
            print("The shape of dataset is: ", end="")
            print(datas.shape)
            print("The type of features of dataset is: ", end="")
            print(datas.dtype)
            print("----------------------The information of dataset end----------------------")
            return datas, target

    def split_dataset(self, datas, labels):
        assert 0 < self.test_size < 1, "Please choose right test size between 0 and 1"
        X_train, X_test, y_train, y_test = train_test_split(
            datas, labels, test_size=self.test_size, random_state=self.random_state)
        return X_train, y_train.reshape(np.shape(y_train)[0]), X_test, y_test.reshape(np.shape(y_test)[0])


class SklearnModelSelection:
    def __init__(self, parser):
        self.cv = parser.cv
        self.param_name = parser.param_name
        self.param_range = parser.param_range
        self.param_grid = parser.param_grid
        self.factor = parser.factor

    def Cross_Validate(self, clf, X_train, y_train, type):
        scores = cross_validate(clf, X_train, y_train, cv=self.cv,
                                scoring="r2",
                                return_train_score=True)
        print("The train scores of {} by 'cross_validate' split {} partitions is {}."
              .format(type, self.cv, np.mean(scores['test_score'])))
        return scores

    def Validation_Curve(self, clf, X_train, y_train):
        train_scores, test_scores = validation_curve(clf, X_train, y_train, scoring="r2",
                                                     cv=self.cv, param_name=self.param_name,
                                                     param_range=self.param_range)
        print("The train scores of RandomForest by 'cross_validate' split {} partitions is {}, "
              "valid scores is {}"
              .format(self.cv, np.mean(train_scores), np.mean(test_scores)))
        train_scores_mean = np.mean(train_scores, axis=1)
        train_scores_std = np.std(train_scores, axis=1)
        test_scores_mean = np.mean(test_scores, axis=1)
        test_scores_std = np.std(test_scores, axis=1)
        self.draw_validation_curve(train_scores_mean, train_scores_std, test_scores_mean, test_scores_std)
        best_arg = np.argmax(test_scores_mean)
        best_para = self.param_range[best_arg]
        print("The best {} is {}".format(self.param_name, best_para))
        return best_para

    def draw_validation_curve(self, train_scores_mean, train_scores_std, test_scores_mean, test_scores_std):
        plt.title("Validation Curve with RandomForest")
        plt.xlabel(self.param_name)
        plt.ylabel("Score")
        plt.ylim(0.0, 1.1)
        lw = 2
        plt.plot(
            self.param_range, train_scores_mean, label="Training score", color="darkorange", lw=lw
        )
        plt.fill_between(
            self.param_range,
            train_scores_mean - train_scores_std,
            train_scores_mean + train_scores_std,
            alpha=0.2,
            color="darkorange",
            lw=lw,
        )
        plt.plot(
            self.param_range, test_scores_mean, label="Cross-validation score", color="navy", lw=lw
        )
        plt.fill_between(
            self.param_range,
            test_scores_mean - test_scores_std,
            test_scores_mean + test_scores_std,
            alpha=0.2,
            color="navy",
            lw=lw,
        )
        plt.legend(loc="best")
        plt.show()

    def Randomized_SearchCV(self, svc, X_train, y_train):
        grid_search = RandomizedSearchCV(estimator=svc, param_distributions=self.param_grid, cv=self.cv)
        grid_search.fit(X_train, y_train)
        print("Best: %f using %s" % (grid_search.best_score_, grid_search.best_params_))
        self.draw_param_search(grid_search, 'Randomized_SearchCV')
        return grid_search.best_params_

    def Halving_Grid_SearchCV(self, svc, X_train, y_train):
        grid_search = HalvingGridSearchCV(estimator=svc, param_grid=self.param_grid, cv=self.cv, factor=self.factor)
        grid_search.fit(X_train, y_train)
        print("Best: %f using %s" % (grid_search.best_score_, grid_search.best_params_))
        self.draw_param_search(grid_search, 'Halving_Grid_SearchCV')
        return grid_search.best_params_

    def draw_param_search(self, grid_search, type):
        means = grid_search.cv_results_['mean_test_score']
        params = grid_search.cv_results_['params']

        results = {}
        for mean, param in zip(means, params):
            key = param['ccp_alpha']
            if key in results.keys():
                results[key].append([param['n_estimators'], mean])
            else:
                results[key] = [[param['n_estimators'], mean]]

        for key in results.keys():
            results[key].sort()
            degree_mean = np.array(results[key])
            if type == 'Halving_Grid_SearchCV':
                plt.plot(degree_mean[:, 0], degree_mean[:, 1], label='ccp_alpha: '+str(key))
            else:
                plt.scatter(degree_mean[:, 0], degree_mean[:, 1], label='ccp_alpha: '+str(key))
        plt.legend()
        plt.xlabel('n_estimators')
        plt.ylabel('mean test score')
        plt.show()


class SklearnDecisionTree(SklearnModelSelection):
    def __init__(self, parser):
        super(SklearnDecisionTree, self).__init__(parser)

    def decision_tree(self, X_train, y_train, X_test, y_test):
        clf = DecisionTreeRegressor()
        clf.fit(X_train, y_train)
        scores = self.Cross_Validate(clf, X_train, y_train, 'DecisionTree')
        self.test(clf, X_test, y_test, 'DecisionTree')
        return scores

    def test(self, clf, X_test, y_test, type):
        print("The test scores of {} is {}".format(type, clf.score(X_test, y_test)))

    def select_param(self, X_train, y_train):
        clf = RandomForestRegressor()
        _ = self.Validation_Curve(clf, X_train, y_train)


class SklearnRandomForest(SklearnDecisionTree):
    def __init__(self, parser):
        super(SklearnRandomForest, self).__init__(parser)
        self.grid_search = parser.grid_search
        self.n_estimators = parser.n_estimators

    def random_forest(self, X_train, y_train, X_test, y_test):
        clf = RandomForestRegressor(n_estimators=self.n_estimators)
        clf.fit(X_train, y_train)

        scores = self.Cross_Validate(clf, X_train, y_train, 'RandomForest')
        self.test(clf, X_test, y_test, 'RandomForest')
        return scores

    def Grid_Search(self, X_train, y_train):
        clf = RandomForestRegressor()
        if self.grid_search == 1:
            best_params = self.Randomized_SearchCV(clf, X_train, y_train)
        elif self.grid_search == 2:
            best_params = self.Halving_Grid_SearchCV(clf, X_train, y_train)
        else:
            raise ValueError("Please choose right type of grid search~")
        return best_params


class MyResultVisualization:
    def __init__(self, parser):
        pass

    def TreeVSForest(self, scores_DecisionTree, scores_RandomForest):
        scores_DecisionTree = scores_DecisionTree['test_score']
        scores_RandomForest = scores_RandomForest['test_score']
        plt.plot(scores_DecisionTree, label='the test_score of DecisionTree')
        plt.plot(scores_RandomForest, label='the test_score of RandomForest')
        plt.title("RandomForest VS DecisionTree")
        plt.ylabel('test_score')
        plt.xlabel('cross_validate')
        plt.legend()
        plt.show()


class MyRandomForest():
    def __init__(self, parser):
        self.n_estimators = parser.n_estimators

    def random_choose_data(self, X_train, y_train):
        _, X_train, _, y_train = train_test_split(X_train, y_train, test_size=1/self.n_estimators, random_state=1111)
        return X_train, y_train

    def decision_tree(self, x_train, y_train):
        tree = CART_decision_tree(tree='mse', criterion='mse', max_depth=3)
        tree.fit(x_train, y_train)
        return tree

    def fit(self, X_train, y_train):
        forest = []
        for i in range(self.n_estimators):
            X, y = self.random_choose_data(X_train, y_train)
            X = pd.DataFrame(X, columns=['1', '2', '3', '4', '5', '6', '7', '8'])
            y = pd.DataFrame(y, columns=['target']).values
            forest.append(self.decision_tree(X, y))
        return forest

    def predict(self, forest, X_test):
        X_test = pd.DataFrame(X_test, columns=['1', '2', '3', '4', '5', '6', '7', '8'])
        pre = []
        for tree in forest:
            pre.append(tree.predict(X_test))
        pre = np.mean(pre)
        return pre

    def score(self, pre, y_test):
        score = np.power(pre - y_test, 2).sum() / len(y_test)
        print("The RMSE of MyRandomForest is {}".format(score))







if __name__ == '__main__':
    # 加载参数
    parser = get_arguments()

    MyPreprocessing = MyPreprocessing(parser)
    datas, target = MyPreprocessing.load_dataset()
    X_train, y_train, X_test, y_test = MyPreprocessing.split_dataset(datas, target)

    SklearnRandomForest = SklearnRandomForest(parser)
    scores_DecisionTree = SklearnRandomForest.decision_tree(X_train, y_train, X_test, y_test)
    scores_RandomForest = SklearnRandomForest.random_forest(X_train, y_train, X_test, y_test)

    # 决策树与随机森林结果比较
    MyResultVisualization = MyResultVisualization(parser)
    MyResultVisualization.TreeVSForest(scores_DecisionTree, scores_RandomForest)

    # 选择合适的n_estimators
    SklearnRandomForest.select_param(X_train, y_train)

    # 网格搜索
    SklearnRandomForest.Grid_Search(X_train, y_train)

    # 自己写的
    MyRandomForest = MyRandomForest(parser)
    forest = MyRandomForest.fit(X_train, y_train)
    pre = MyRandomForest.predict(forest, X_test)
    MyRandomForest.score(pre, y_test)
