import argparse
import timeit
import warnings
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import graphviz
from sklearn import tree

from sklearn.datasets import load_wine, fetch_kddcup99
from sklearn.impute import SimpleImputer
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split, cross_val_score, cross_validate, validation_curve, GridSearchCV
from sklearn.preprocessing import LabelEncoder, OrdinalEncoder
from sklearn.tree import DecisionTreeClassifier, export_text


def get_arguments():
    parser = argparse.ArgumentParser(description='DecisionTree')
    parser.add_argument('--dataset', type=int, default=2, choices=(1, 2),
                        help='the type of dataset for decision tree'
                             '1: the wine dataset,'
                             '2: the kddcup99 dataset')
    parser.add_argument('--missing', type=int, default=0, choices=(0, 1, 2),
                        help='the type of method for dealing with missing values,'
                             '0: do not deal with missing values,'
                             '1: will drop all rows that have any missing values,'
                             '2: Impute the values using scikit-learn SimpleImpute Class')
    parser.add_argument('--strategy', type=str, default='mean', choices=('mean', 'median', 'most_frequent', 'constant'),
                        help='the strategy of SimpleImputer')
    parser.add_argument('--test_size', type=float, default=0.33, help='the proportion of test data')
    parser.add_argument('--random_state', type=int, default=42, help='the random seed of dataset split')
    parser.add_argument('--criterion', type=str, default='gini', choices=('gini', 'entropy'),
                        help='the criteria of feature selection in decision tree')
    parser.add_argument('--splitter', type=str, default='best', choices=('best', 'random'),
                        help='the criteria of feature splitter in decision')
    parser.add_argument('--max_depth', type=int, default=None, help='maximum depth of the tree(excluding the root node)')
    parser.add_argument('--min_samples_leaf', type=int or float, default=1, help='minimum number of leaf node samples')
    parser.add_argument('--min_samples_split', type=int or float, default=2,
                        help='minimum sample size required for internal nodes redividing')
    parser.add_argument('--draw', type=int, default=1, choices=(1, 2, 3),
                        help='the type of tree visualization,'
                             '1: plot_tree,'
                             '2: graphviz,'
                             '3: export_text')
    parser.add_argument('--cv', type=int, default=5, help='the value of partitions in cross validation')
    parser.add_argument('--param_name', type=str, default='max_depth', help='the parameter in validation_curve')
    parser.add_argument('--param_range', default=np.arange(3, 21, 1), help='the range of parameter in validation_curve')
    parser.add_argument('--cross_validation', type=int, default=0, choices=(0, 1, 2),
                        help='the type of cross validation,'
                             '0: no cross validation,'
                             '1: cross_val_score,'
                             '2: cross_validate')
    parser.add_argument('--grid_search', type=int, default=1, choices=(1, 2),
                        help='the type of gridsearch,'
                             '1: validation_curve,'
                             '2: GridSearchCV')
    parser.add_argument('--param_grid', type=dict,
                        default={
                            'max_depth': np.arange(3, 20, 1),
                            'min_samples_leaf': np.arange(1, 3, 1),
                            'min_samples_split': np.arange(2, 4, 1)
                        },
                        help='the parameter and its values in grid search')

    args = parser.parse_args()
    return args


class MyPreprocessing:
    def __init__(self, parser):
        self.dataset = parser.dataset
        self.random_state = parser.random_state
        self.test_size = parser.test_size
        self.missing = parser.missing
        self.strategy = parser.strategy

    def load_dataset(self):
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            if self.dataset == 1:
                dataset = load_wine()
                description = dataset.DESCR
                feature_names = dataset.feature_names
                target_names = dataset.target_names
                datas = dataset.data
                target = dataset.target
                print("----------------------The information of dataset start----------------------")
                print("The description of datasets is: ", end="")
                print(description)
                print("The feature names of datasets is: ", end="")
                print(*feature_names)
                print("The target names of datasets is: ", end="")
                print(*target_names)
                print("The shape of dataset is: ", end="")
                print(datas.shape)
                print("The type of features of dataset is: ", end="")
                print(datas.dtype)
                print("----------------------The information of dataset end----------------------")
                return datas, target, feature_names, target_names
            elif self.dataset == 2:
                dataset = fetch_kddcup99(subset=None, data_home=None, shuffle=False, random_state=None, percent10=True,
                                         download_if_missing=True, return_X_y=False, as_frame=True)
                feature_names = dataset.feature_names
                datas = dataset.data
                target = dataset.target
                datas, target, target_names = self.encode_dataset(datas, target)
                datas, target = self.deal_missing_data(datas, target)
                return datas, target, feature_names, target_names

    def encode_dataset(self, datas, target):
        le = LabelEncoder()
        le.fit(target)
        print("The target names of datasets is: {}".format(list(le.classes_)))
        target = pd.DataFrame(le.transform(target))

        enc = OrdinalEncoder()
        datas[datas.columns[1:4]] = enc.fit_transform(datas[datas.columns[1:4]])
        return datas, target, np.array(list(le.classes_), dtype=str)

    def deal_missing_data(self, datas, target):
        datas.insert(loc=len(datas.columns), column='target', value=target)
        if self.missing == 0:
            pass
        elif self.missing == 1:
            datas.dropna(inplace=True)
        elif self.missing == 2:
            imputer = SimpleImputer(missing_values=np.nan, strategy=self.strategy)
            for k in range(len(datas.columns)):
                datas[[datas.columns[k]]] = imputer.fit_transform(datas[[datas.columns[k]]])
        else:
            raise ValueError('Please choose right method for dealing with missing datas', self.missing)
        data = np.array(datas.values[:, :-1], dtype=float)
        label = np.array(datas.to_numpy()[:, -1], dtype=float)
        return data, label

    def split_dataset(self, datas, labels):
        assert 0 < self.test_size < 1, "Please choose right test size between 0 and 1"
        X_train, X_test, y_train, y_test = train_test_split(
            datas, labels, test_size=self.test_size, random_state=self.random_state)
        return X_train, y_train.reshape(np.shape(y_train)[0]), X_test, y_test.reshape(np.shape(y_test)[0])


class SklearnModelSelection:
    def __init__(self, parser):
        self.cv = parser.cv
        self.param_name = parser.param_name
        self.param_range = parser.param_range
        self.param_grid = parser.param_grid

    def No_Cross_Validate(self, clf, X_train, y_train):
        print("The train scores of DecisionTree by 'No_Cross_Validate' validation is {}".format(clf.score(X_train, y_train)))
        # y_predict = clf.predict(X_train)
        # result = classification_report(y_train, y_predict)
        # print("The train scores of DecisionTree by 'No_Cross_Validate' validation is \n{}".format(result))

    def Cross_Validation(self, clf, X_train, y_train):
        scores = cross_val_score(clf, X_train, y_train, cv=self.cv)
        print("The train scores of DecisionTree by 'Cross_Validation' split {} partitions is {},\n"
              "and the average scores is {}"
              .format(self.cv, scores, np.mean(scores)))

    def Cross_Validate(self, clf, X_train, y_train):
        scores = cross_validate(clf, X_train, y_train, cv=self.cv)
        print("The train scores of DecisionTree by 'cross_validate' split {} partitions is {},\n"
              "the fit_time is {}, \n"
              "the score_time is {}"
              .format(self.cv, scores['test_score'], scores['fit_time'], scores['score_time']))

    def Validation_Curve(self, clf, X_train, y_train):
        train_scores, test_scores = validation_curve(clf, X_train, y_train, scoring="accuracy",
                                                      cv=self.cv, param_name=self.param_name, param_range=self.param_range)
        print("The train scores of DecisionTree by 'cross_validate' split {} partitions is {}, "
              "valid scores is {}"
              .format(self.cv, np.mean(train_scores), np.mean(test_scores)))
        train_scores_mean = np.mean(train_scores, axis=1)
        train_scores_std = np.std(train_scores, axis=1)
        test_scores_mean = np.mean(test_scores, axis=1)
        test_scores_std = np.std(test_scores, axis=1)
        self.draw_validation_curve(train_scores_mean, train_scores_std, test_scores_mean, test_scores_std)
        best_arg = np.argmax(test_scores_mean)
        best_para = self.param_range[best_arg]
        print("The best {} is {}".format(self.param_name, best_para))
        return best_para

    def Grid_Search_CV(self, svc, X_train, y_train):
        grid_search = GridSearchCV(estimator=svc, param_grid=self.param_grid, cv=self.cv)
        grid_search.fit(X_train, y_train)
        print("Best: %f using %s" % (grid_search.best_score_, grid_search.best_params_))
        self.draw_param_search(grid_search)
        return grid_search.best_params_

    def draw_validation_curve(self, train_scores_mean, train_scores_std, test_scores_mean, test_scores_std):
        plt.title("Validation Curve with DecisionTree")
        plt.xlabel(self.param_name)
        plt.ylabel("Score")
        plt.ylim(0.0, 1.1)
        lw = 2
        plt.plot(
            self.param_range, train_scores_mean, label="Training score", color="darkorange", lw=lw
        )
        plt.fill_between(
            self.param_range,
            train_scores_mean - train_scores_std,
            train_scores_mean + train_scores_std,
            alpha=0.2,
            color="darkorange",
            lw=lw,
        )
        plt.plot(
            self.param_range, test_scores_mean, label="Cross-validation score", color="navy", lw=lw
        )
        plt.fill_between(
            self.param_range,
            test_scores_mean - test_scores_std,
            test_scores_mean + test_scores_std,
            alpha=0.2,
            color="navy",
            lw=lw,
        )
        plt.legend(loc="best")
        plt.show()

    def draw_param_search(self, grid_search):
        means = grid_search.cv_results_['mean_test_score']
        params = grid_search.cv_results_['params']

        results = {}
        for mean, param in zip(means, params):
            key = 'min_samples_leaf: ' + str(param['min_samples_leaf']) + '_' \
                  + 'min_samples_split:' + str(param['min_samples_split'])
            if key in results.keys():
                results[key].append([param['max_depth'], mean])
            else:
                results[key] = [[param['max_depth'], mean]]

        for key in results.keys():
            results[key].sort()
            max_depth_mean = np.array(results[key])
            plt.plot(max_depth_mean[:, 0], max_depth_mean[:, 1], label=key)
        plt.legend()
        plt.xlabel('max_depth')
        plt.ylabel('mean test score')
        plt.show()


class SklearnDecisionTree(SklearnModelSelection):
    def __init__(self, parser):
        super(SklearnDecisionTree, self).__init__(parser)
        self.criterion = parser.criterion
        self.splitter = parser.splitter
        self.max_depth = parser.max_depth
        self.min_samples_leaf = parser.min_samples_leaf
        self.min_samples_split = parser.min_samples_split
        self.cross_validation = parser.cross_validation
        self.draw = parser.draw
        self.grid_search = parser.grid_search
        self.param_name = parser.param_name

    def decision_tree(self, X_train, y_train, X_test, y_test, feature_names, target_names):
        clf = DecisionTreeClassifier(criterion=self.criterion,
                                     splitter=self.splitter,
                                     max_depth=self.max_depth,
                                     min_samples_leaf=self.min_samples_leaf,
                                     min_samples_split=self.min_samples_split)
        clf.fit(X_train, y_train)
        print("The feature importance of every features is {}".format(clf.feature_importances_))

        # 交叉验证
        if self.cross_validation == 0:
            self.No_Cross_Validate(clf, X_train, y_train)
        elif self.cross_validation == 1:
            self.Cross_Validation(clf, X_train, y_train)
        elif self.cross_validation == 2:
            self.Cross_Validate(clf, X_train, y_train)
        else:
            raise ValueError("Please choose right type of cross validation~", self.cross_validation)
        self.draw_tree(clf, feature_names, target_names)

        self.test(clf, X_test, y_test)

    def draw_tree(self, clf, feature_names, target_names):
        if self.draw == 1:
            tree.plot_tree(clf)
            plt.show()
        elif self.draw == 2:
            dot_data = tree.export_graphviz(clf,
                                            feature_names=feature_names,
                                            class_names=target_names,
                                            out_file=None,
                                            filled=True,
                                            rounded=True,
                                            special_characters=True)
            graph = graphviz.Source(dot_data)
            graph.view()
        elif self.draw == 3:
            r = export_text(clf, feature_names=feature_names)
            print(r)
        else:
            raise ValueError('Please choose right type of tree visualization~', self.draw)

    def test(self, clf, X_test, y_test):
        print("The test scores of DecisionTree is {}".format(clf.score(X_test, y_test)))

    def GridSearch(self, X_train, y_train, X_test, y_test, feature_names, target_names):
        clf = DecisionTreeClassifier()
        if self.grid_search == 1:
            best_param = self.Validation_Curve(clf, X_train, y_train)
            clf = DecisionTreeClassifier(max_depth=best_param)
            clf.fit(X_train, y_train)
            self.draw_tree(clf, feature_names, target_names)
            self.test(clf, X_test, y_test)
        elif self.grid_search == 2:
            best_params = self.Grid_Search_CV(clf, X_train, y_train)
            clf = DecisionTreeClassifier(max_depth=best_params['max_depth'],
                                         min_samples_leaf=best_params['min_samples_leaf'],
                                         min_samples_split=best_params['min_samples_split'])
            clf.fit(X_train, y_train)
            self.draw_tree(clf, feature_names, target_names)
            self.test(clf, X_test, y_test)
        else:
            raise ValueError("Please choose right grid search method~", self.grid_search)


if __name__ == '__main__':
    # 加载参数
    parser = get_arguments()

    MyPreprocessing = MyPreprocessing(parser)
    datas, target, feature_names, target_names = MyPreprocessing.load_dataset()
    X_train, y_train, X_test, y_test = MyPreprocessing.split_dataset(datas, target)

    SklearnDecisionTree = SklearnDecisionTree(parser)
    SklearnDecisionTree.decision_tree(X_train, y_train, X_test, y_test, feature_names, target_names)
    SklearnDecisionTree.GridSearch(X_train, y_train, X_test, y_test, feature_names, target_names)
