# -*- coding: utf-8 -*-

import logger.logger as logger
from sklearn.externals import joblib
from sklearn.tree import DecisionTreeClassifier


class DecisionTree():
    '''
# --------------------class DecisionClassifier: def __init__():---------------------------#
# --重要参数
# --criterion="gini"：划分属性的选择标准 gini(基尼系数) entropy(信息增益)
# --splitter="best"： 在节点中选择分类的策略。 best(最好的分类) random(最好的随机分类)
# --max_depth=None： 树最大深度。
# --min_samples_split=2： 区分一个内部节点需要的最少的样本数
# --min_samples_leaf=1： 叶子结点所需要的最小样本数
# --min_weight_fraction_leaf=0.：一个叶节点的输入样本所需要的最小的加权分数
# --max_features=None：分类时需要考虑的特征数
# --random_state=None: 随机数字发生器种子。（用来保证输出结果的唯一性）
# --max_leaf_nodes=None: 在最优方法中使用max_leaf_nodes构建一个树.(通常跟max_depth配合使用)
# --min_impurity_decrease=0.： 如果该分裂导致杂质的减少大于或等于该值，则将分裂节点
# --min_impurity_split=None： 节点停止分裂的阙值
# --class_weight=None：与标点中的类所关联的标签的权重
 '''
    def __init__(self, criterion="entropy",
                 splitter="random",
                 max_depth=None,
                 min_samples_split=2,
                 min_samples_leaf=1,
                 min_weight_fraction_leaf=0.,
                 max_features=None,
                 random_state=None,
                 max_leaf_nodes=None,
                 min_impurity_decrease=0.,
                 min_impurity_split=None,
                 class_weight=None,
                 presort=False):
        self.model = None
        self.parameter = {}
        self.parameter['criterion'] = criterion
        self.parameter['splitter'] = splitter
        self.parameter['max_depth'] = max_depth
        self.parameter['min_samples_split'] = min_samples_split
        self.parameter['min_samples_leaf'] = min_samples_leaf
        self.parameter['min_weight_fraction_leaf'] = min_weight_fraction_leaf
        self.parameter['max_features'] = max_features
        self.parameter['random_state'] = random_state
        self.parameter['max_leaf_nodes'] = max_leaf_nodes
        self.parameter['min_impurity_decrease'] = min_impurity_decrease
        self.parameter['min_impurity_split'] = min_impurity_split
        self.parameter['class_weight'] = class_weight
        self.parameter['presort'] = presort

    def train(self, x_train, y_train, x_test, y_test, model_path):
        logger.info("training...")
        self.model = DecisionTreeClassifier(**self.parameter)
        self.model.fit(x_train, y_train)
        joblib.dump(self.model, model_path)
        acc = self.model.score(x_test, y_test)
        logger.info("acc: {}".format(str(acc)))

    def cross_validation(self, train_x, train_y, test_vecs, y_test, model_path, param_grid=None):
        from sklearn.model_selection import GridSearchCV
        model = DecisionTreeClassifier()
        param_grid = [
            {'kernel': ['rbf'],
             'C': [0.001, 0.01, 0.1, 1, 10, 100],
             'gamma': [0.001, 0.01, 0.1, 0.2, 0.4, 0.6, 0.8, 1, 1.2, 1.4, 1.6]},

            {'kernel': ['linear'],
             'C': [0.001, 0.01, 0.1, 1, 10, 10]}
        ]
        grid_search = GridSearchCV(model, param_grid, n_jobs=4, verbose=1, cv=5)
        grid_search.fit(train_x, train_y)
        best_parameters = grid_search.best_estimator_.get_params()

        for para, val in list(best_parameters.items()):
            print(para, val)

        self.model = DecisionTreeClassifier(kernel='rbf', C=best_parameters['C'], gamma=best_parameters['gamma'], probability=True)
        self.model.fit(train_x, train_y)
        joblib.dump(self.model, model_path)
        acc = self.model.score(test_vecs, y_test)
        logger.info("acc: {}".format(str(acc)))


    def load_model(self, model_path):
        self.model = joblib.load(model_path)

    def predict(self, x_vecs):
        if not self.model:
            logger.info("please load decision tree model first!!!")
            return
        y_pred = self.model.predict(x_vecs)
        return y_pred


if __name__ == '__main__':
    pass
